pulumi-databricks 1.74.0a1753335781__py3-none-any.whl → 1.77.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. pulumi_databricks/__init__.py +185 -12
  2. pulumi_databricks/_inputs.py +30467 -20225
  3. pulumi_databricks/access_control_rule_set.py +28 -29
  4. pulumi_databricks/account_federation_policy.py +473 -0
  5. pulumi_databricks/account_network_policy.py +74 -43
  6. pulumi_databricks/account_setting_v2.py +759 -0
  7. pulumi_databricks/aibi_dashboard_embedding_access_policy_setting.py +30 -31
  8. pulumi_databricks/aibi_dashboard_embedding_approved_domains_setting.py +30 -31
  9. pulumi_databricks/alert.py +185 -186
  10. pulumi_databricks/alert_v2.py +369 -211
  11. pulumi_databricks/app.py +315 -209
  12. pulumi_databricks/apps_settings_custom_template.py +531 -0
  13. pulumi_databricks/artifact_allowlist.py +72 -73
  14. pulumi_databricks/automatic_cluster_update_workspace_setting.py +30 -31
  15. pulumi_databricks/budget.py +84 -85
  16. pulumi_databricks/budget_policy.py +75 -53
  17. pulumi_databricks/catalog.py +544 -225
  18. pulumi_databricks/catalog_workspace_binding.py +82 -83
  19. pulumi_databricks/cluster.py +561 -683
  20. pulumi_databricks/cluster_policy.py +115 -116
  21. pulumi_databricks/compliance_security_profile_workspace_setting.py +30 -31
  22. pulumi_databricks/config/__init__.py +1 -1
  23. pulumi_databricks/config/__init__.pyi +3 -2
  24. pulumi_databricks/config/vars.py +40 -37
  25. pulumi_databricks/connection.py +346 -211
  26. pulumi_databricks/credential.py +262 -263
  27. pulumi_databricks/custom_app_integration.py +175 -176
  28. pulumi_databricks/dashboard.py +207 -208
  29. pulumi_databricks/data_quality_monitor.py +453 -0
  30. pulumi_databricks/data_quality_refresh.py +492 -0
  31. pulumi_databricks/database_database_catalog.py +437 -0
  32. pulumi_databricks/database_instance.py +938 -137
  33. pulumi_databricks/database_synced_database_table.py +589 -0
  34. pulumi_databricks/dbfs_file.py +87 -84
  35. pulumi_databricks/default_namespace_setting.py +30 -31
  36. pulumi_databricks/directory.py +61 -62
  37. pulumi_databricks/disable_legacy_access_setting.py +109 -33
  38. pulumi_databricks/disable_legacy_dbfs_setting.py +58 -39
  39. pulumi_databricks/disable_legacy_features_setting.py +62 -37
  40. pulumi_databricks/enhanced_security_monitoring_workspace_setting.py +30 -31
  41. pulumi_databricks/entitlements.py +148 -149
  42. pulumi_databricks/entity_tag_assignment.py +409 -0
  43. pulumi_databricks/external_location.py +276 -290
  44. pulumi_databricks/external_metadata.py +726 -0
  45. pulumi_databricks/feature_engineering_feature.py +480 -0
  46. pulumi_databricks/feature_engineering_materialized_feature.py +397 -0
  47. pulumi_databricks/file.py +96 -97
  48. pulumi_databricks/get_account_federation_policies.py +127 -0
  49. pulumi_databricks/get_account_federation_policy.py +214 -0
  50. pulumi_databricks/get_account_network_policies.py +36 -7
  51. pulumi_databricks/get_account_network_policy.py +33 -29
  52. pulumi_databricks/get_account_setting_v2.py +331 -0
  53. pulumi_databricks/get_alert_v2.py +100 -84
  54. pulumi_databricks/get_alerts_v2.py +35 -17
  55. pulumi_databricks/get_app.py +31 -20
  56. pulumi_databricks/get_apps.py +26 -15
  57. pulumi_databricks/get_apps_settings_custom_template.py +207 -0
  58. pulumi_databricks/get_apps_settings_custom_templates.py +133 -0
  59. pulumi_databricks/get_aws_assume_role_policy.py +27 -28
  60. pulumi_databricks/get_aws_bucket_policy.py +39 -40
  61. pulumi_databricks/get_aws_cross_account_policy.py +47 -48
  62. pulumi_databricks/get_aws_unity_catalog_assume_role_policy.py +35 -36
  63. pulumi_databricks/get_aws_unity_catalog_policy.py +35 -36
  64. pulumi_databricks/get_budget_policies.py +74 -11
  65. pulumi_databricks/get_budget_policy.py +43 -39
  66. pulumi_databricks/get_catalog.py +35 -19
  67. pulumi_databricks/get_catalogs.py +32 -14
  68. pulumi_databricks/get_cluster.py +41 -25
  69. pulumi_databricks/get_cluster_policy.py +72 -54
  70. pulumi_databricks/get_clusters.py +41 -25
  71. pulumi_databricks/get_current_config.py +23 -24
  72. pulumi_databricks/get_current_metastore.py +29 -13
  73. pulumi_databricks/get_current_user.py +17 -18
  74. pulumi_databricks/get_dashboards.py +34 -15
  75. pulumi_databricks/get_data_quality_monitor.py +210 -0
  76. pulumi_databricks/get_data_quality_monitors.py +143 -0
  77. pulumi_databricks/get_data_quality_refresh.py +270 -0
  78. pulumi_databricks/get_data_quality_refreshes.py +207 -0
  79. pulumi_databricks/get_database_database_catalog.py +176 -0
  80. pulumi_databricks/get_database_database_catalogs.py +120 -0
  81. pulumi_databricks/get_database_instance.py +274 -50
  82. pulumi_databricks/get_database_instances.py +35 -11
  83. pulumi_databricks/get_database_synced_database_table.py +225 -0
  84. pulumi_databricks/get_database_synced_database_tables.py +120 -0
  85. pulumi_databricks/get_dbfs_file.py +19 -20
  86. pulumi_databricks/get_dbfs_file_paths.py +16 -17
  87. pulumi_databricks/get_directory.py +43 -25
  88. pulumi_databricks/get_entity_tag_assignment.py +202 -0
  89. pulumi_databricks/get_entity_tag_assignments.py +187 -0
  90. pulumi_databricks/get_external_location.py +35 -19
  91. pulumi_databricks/get_external_locations.py +32 -14
  92. pulumi_databricks/get_external_metadata.py +292 -0
  93. pulumi_databricks/get_external_metadatas.py +135 -0
  94. pulumi_databricks/get_feature_engineering_feature.py +179 -0
  95. pulumi_databricks/get_feature_engineering_features.py +103 -0
  96. pulumi_databricks/get_feature_engineering_materialized_feature.py +180 -0
  97. pulumi_databricks/get_feature_engineering_materialized_features.py +123 -0
  98. pulumi_databricks/get_functions.py +38 -24
  99. pulumi_databricks/get_group.py +85 -86
  100. pulumi_databricks/get_instance_pool.py +10 -11
  101. pulumi_databricks/get_instance_profiles.py +25 -9
  102. pulumi_databricks/get_job.py +22 -23
  103. pulumi_databricks/get_jobs.py +44 -26
  104. pulumi_databricks/get_materialized_features_feature_tag.py +102 -0
  105. pulumi_databricks/get_materialized_features_feature_tags.py +133 -0
  106. pulumi_databricks/get_metastore.py +28 -29
  107. pulumi_databricks/get_metastores.py +9 -10
  108. pulumi_databricks/get_mlflow_experiment.py +62 -46
  109. pulumi_databricks/get_mlflow_model.py +47 -31
  110. pulumi_databricks/get_mlflow_models.py +32 -14
  111. pulumi_databricks/get_mws_credentials.py +9 -10
  112. pulumi_databricks/get_mws_network_connectivity_config.py +40 -41
  113. pulumi_databricks/get_mws_network_connectivity_configs.py +15 -16
  114. pulumi_databricks/get_mws_workspaces.py +5 -6
  115. pulumi_databricks/get_node_type.py +123 -87
  116. pulumi_databricks/get_notebook.py +37 -38
  117. pulumi_databricks/get_notebook_paths.py +16 -17
  118. pulumi_databricks/get_notification_destinations.py +33 -18
  119. pulumi_databricks/get_online_store.py +36 -27
  120. pulumi_databricks/get_online_stores.py +33 -13
  121. pulumi_databricks/get_pipelines.py +49 -31
  122. pulumi_databricks/get_policy_info.py +338 -0
  123. pulumi_databricks/get_policy_infos.py +169 -0
  124. pulumi_databricks/get_quality_monitor_v2.py +21 -18
  125. pulumi_databricks/get_quality_monitors_v2.py +26 -8
  126. pulumi_databricks/get_registered_model.py +41 -27
  127. pulumi_databricks/get_registered_model_versions.py +29 -15
  128. pulumi_databricks/get_rfa_access_request_destinations.py +126 -0
  129. pulumi_databricks/get_schema.py +32 -16
  130. pulumi_databricks/get_schemas.py +38 -20
  131. pulumi_databricks/get_service_principal.py +80 -58
  132. pulumi_databricks/get_service_principal_federation_policies.py +151 -0
  133. pulumi_databricks/get_service_principal_federation_policy.py +220 -0
  134. pulumi_databricks/get_service_principals.py +16 -17
  135. pulumi_databricks/get_serving_endpoints.py +23 -9
  136. pulumi_databricks/get_share.py +130 -32
  137. pulumi_databricks/get_shares.py +29 -11
  138. pulumi_databricks/get_spark_version.py +87 -69
  139. pulumi_databricks/get_sql_warehouse.py +124 -110
  140. pulumi_databricks/get_sql_warehouses.py +37 -19
  141. pulumi_databricks/get_storage_credential.py +32 -16
  142. pulumi_databricks/get_storage_credentials.py +32 -14
  143. pulumi_databricks/get_table.py +30 -14
  144. pulumi_databricks/get_tables.py +41 -23
  145. pulumi_databricks/get_tag_policies.py +139 -0
  146. pulumi_databricks/get_tag_policy.py +175 -0
  147. pulumi_databricks/get_user.py +33 -34
  148. pulumi_databricks/get_views.py +41 -23
  149. pulumi_databricks/get_volume.py +32 -16
  150. pulumi_databricks/get_volumes.py +41 -23
  151. pulumi_databricks/get_workspace_network_option.py +45 -26
  152. pulumi_databricks/get_workspace_setting_v2.py +331 -0
  153. pulumi_databricks/get_zones.py +39 -21
  154. pulumi_databricks/git_credential.py +242 -76
  155. pulumi_databricks/global_init_script.py +99 -100
  156. pulumi_databricks/grant.py +215 -212
  157. pulumi_databricks/grants.py +200 -211
  158. pulumi_databricks/group.py +176 -177
  159. pulumi_databricks/group_instance_profile.py +37 -38
  160. pulumi_databricks/group_member.py +37 -38
  161. pulumi_databricks/group_role.py +37 -38
  162. pulumi_databricks/instance_pool.py +168 -169
  163. pulumi_databricks/instance_profile.py +69 -70
  164. pulumi_databricks/ip_access_list.py +71 -72
  165. pulumi_databricks/job.py +426 -346
  166. pulumi_databricks/lakehouse_monitor.py +199 -200
  167. pulumi_databricks/library.py +264 -99
  168. pulumi_databricks/materialized_features_feature_tag.py +213 -0
  169. pulumi_databricks/metastore.py +254 -255
  170. pulumi_databricks/metastore_assignment.py +52 -53
  171. pulumi_databricks/metastore_data_access.py +153 -154
  172. pulumi_databricks/metastore_provider.py +69 -70
  173. pulumi_databricks/mlflow_experiment.py +108 -109
  174. pulumi_databricks/mlflow_model.py +45 -46
  175. pulumi_databricks/mlflow_webhook.py +75 -76
  176. pulumi_databricks/model_serving.py +195 -74
  177. pulumi_databricks/model_serving_provisioned_throughput.py +100 -54
  178. pulumi_databricks/mount.py +103 -104
  179. pulumi_databricks/mws_credentials.py +99 -100
  180. pulumi_databricks/mws_customer_managed_keys.py +75 -76
  181. pulumi_databricks/mws_log_delivery.py +188 -189
  182. pulumi_databricks/mws_ncc_binding.py +35 -36
  183. pulumi_databricks/mws_ncc_private_endpoint_rule.py +312 -289
  184. pulumi_databricks/mws_network_connectivity_config.py +136 -98
  185. pulumi_databricks/mws_networks.py +159 -160
  186. pulumi_databricks/mws_permission_assignment.py +70 -71
  187. pulumi_databricks/mws_private_access_settings.py +116 -117
  188. pulumi_databricks/mws_storage_configurations.py +74 -75
  189. pulumi_databricks/mws_vpc_endpoint.py +149 -150
  190. pulumi_databricks/mws_workspaces.py +464 -418
  191. pulumi_databricks/notebook.py +143 -144
  192. pulumi_databricks/notification_destination.py +38 -39
  193. pulumi_databricks/obo_token.py +63 -64
  194. pulumi_databricks/online_store.py +121 -70
  195. pulumi_databricks/online_table.py +41 -42
  196. pulumi_databricks/outputs.py +47821 -30802
  197. pulumi_databricks/permission_assignment.py +298 -57
  198. pulumi_databricks/permissions.py +523 -362
  199. pulumi_databricks/pipeline.py +515 -480
  200. pulumi_databricks/policy_info.py +916 -0
  201. pulumi_databricks/provider.py +283 -269
  202. pulumi_databricks/pulumi-plugin.json +1 -1
  203. pulumi_databricks/quality_monitor.py +263 -217
  204. pulumi_databricks/quality_monitor_v2.py +49 -46
  205. pulumi_databricks/query.py +227 -228
  206. pulumi_databricks/recipient.py +208 -209
  207. pulumi_databricks/registered_model.py +393 -122
  208. pulumi_databricks/repo.py +117 -118
  209. pulumi_databricks/restrict_workspace_admins_setting.py +30 -31
  210. pulumi_databricks/rfa_access_request_destinations.py +286 -0
  211. pulumi_databricks/schema.py +159 -160
  212. pulumi_databricks/secret.py +72 -73
  213. pulumi_databricks/secret_acl.py +52 -53
  214. pulumi_databricks/secret_scope.py +55 -56
  215. pulumi_databricks/service_principal.py +279 -278
  216. pulumi_databricks/service_principal_federation_policy.py +469 -0
  217. pulumi_databricks/service_principal_role.py +35 -36
  218. pulumi_databricks/service_principal_secret.py +156 -157
  219. pulumi_databricks/share.py +186 -187
  220. pulumi_databricks/sql_alert.py +98 -99
  221. pulumi_databricks/sql_dashboard.py +94 -95
  222. pulumi_databricks/sql_endpoint.py +298 -266
  223. pulumi_databricks/sql_global_config.py +103 -104
  224. pulumi_databricks/sql_permissions.py +121 -122
  225. pulumi_databricks/sql_query.py +150 -151
  226. pulumi_databricks/sql_table.py +310 -283
  227. pulumi_databricks/sql_visualization.py +92 -93
  228. pulumi_databricks/sql_widget.py +91 -88
  229. pulumi_databricks/storage_credential.py +236 -184
  230. pulumi_databricks/system_schema.py +50 -51
  231. pulumi_databricks/table.py +147 -148
  232. pulumi_databricks/tag_policy.py +357 -0
  233. pulumi_databricks/token.py +83 -84
  234. pulumi_databricks/user.py +284 -285
  235. pulumi_databricks/user_instance_profile.py +35 -36
  236. pulumi_databricks/user_role.py +35 -36
  237. pulumi_databricks/vector_search_endpoint.py +117 -118
  238. pulumi_databricks/vector_search_index.py +86 -87
  239. pulumi_databricks/volume.py +129 -130
  240. pulumi_databricks/workspace_binding.py +82 -131
  241. pulumi_databricks/workspace_conf.py +18 -19
  242. pulumi_databricks/workspace_file.py +100 -101
  243. pulumi_databricks/workspace_network_option.py +89 -40
  244. pulumi_databricks/workspace_setting_v2.py +759 -0
  245. {pulumi_databricks-1.74.0a1753335781.dist-info → pulumi_databricks-1.77.0.dist-info}/METADATA +1 -1
  246. pulumi_databricks-1.77.0.dist-info/RECORD +250 -0
  247. pulumi_databricks/get_recipient_federation_policies.py +0 -83
  248. pulumi_databricks/get_recipient_federation_policy.py +0 -166
  249. pulumi_databricks/recipient_federation_policy.py +0 -347
  250. pulumi_databricks-1.74.0a1753335781.dist-info/RECORD +0 -205
  251. {pulumi_databricks-1.74.0a1753335781.dist-info → pulumi_databricks-1.77.0.dist-info}/WHEEL +0 -0
  252. {pulumi_databricks-1.74.0a1753335781.dist-info → pulumi_databricks-1.77.0.dist-info}/top_level.txt +0 -0
@@ -2,8 +2,7 @@
2
2
  # *** WARNING: this file was generated by pulumi-language-python. ***
3
3
  # *** Do not edit by hand unless you're certain you know what you are doing! ***
4
4
 
5
- import builtins
6
- import copy
5
+ import builtins as _builtins
7
6
  import warnings
8
7
  import sys
9
8
  import pulumi
@@ -22,50 +21,51 @@ __all__ = ['ClusterArgs', 'Cluster']
22
21
  @pulumi.input_type
23
22
  class ClusterArgs:
24
23
  def __init__(__self__, *,
25
- spark_version: pulumi.Input[builtins.str],
26
- apply_policy_default_values: Optional[pulumi.Input[builtins.bool]] = None,
24
+ spark_version: pulumi.Input[_builtins.str],
25
+ apply_policy_default_values: Optional[pulumi.Input[_builtins.bool]] = None,
27
26
  autoscale: Optional[pulumi.Input['ClusterAutoscaleArgs']] = None,
28
- autotermination_minutes: Optional[pulumi.Input[builtins.int]] = None,
27
+ autotermination_minutes: Optional[pulumi.Input[_builtins.int]] = None,
29
28
  aws_attributes: Optional[pulumi.Input['ClusterAwsAttributesArgs']] = None,
30
29
  azure_attributes: Optional[pulumi.Input['ClusterAzureAttributesArgs']] = None,
31
30
  cluster_log_conf: Optional[pulumi.Input['ClusterClusterLogConfArgs']] = None,
32
31
  cluster_mount_infos: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterClusterMountInfoArgs']]]] = None,
33
- cluster_name: Optional[pulumi.Input[builtins.str]] = None,
34
- custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
35
- data_security_mode: Optional[pulumi.Input[builtins.str]] = None,
32
+ cluster_name: Optional[pulumi.Input[_builtins.str]] = None,
33
+ custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
34
+ data_security_mode: Optional[pulumi.Input[_builtins.str]] = None,
36
35
  docker_image: Optional[pulumi.Input['ClusterDockerImageArgs']] = None,
37
- driver_instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
38
- driver_node_type_id: Optional[pulumi.Input[builtins.str]] = None,
39
- enable_elastic_disk: Optional[pulumi.Input[builtins.bool]] = None,
40
- enable_local_disk_encryption: Optional[pulumi.Input[builtins.bool]] = None,
36
+ driver_instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
37
+ driver_node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
38
+ enable_elastic_disk: Optional[pulumi.Input[_builtins.bool]] = None,
39
+ enable_local_disk_encryption: Optional[pulumi.Input[_builtins.bool]] = None,
41
40
  gcp_attributes: Optional[pulumi.Input['ClusterGcpAttributesArgs']] = None,
42
- idempotency_token: Optional[pulumi.Input[builtins.str]] = None,
41
+ idempotency_token: Optional[pulumi.Input[_builtins.str]] = None,
43
42
  init_scripts: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInitScriptArgs']]]] = None,
44
- instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
45
- is_pinned: Optional[pulumi.Input[builtins.bool]] = None,
46
- is_single_node: Optional[pulumi.Input[builtins.bool]] = None,
47
- kind: Optional[pulumi.Input[builtins.str]] = None,
43
+ instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
44
+ is_pinned: Optional[pulumi.Input[_builtins.bool]] = None,
45
+ is_single_node: Optional[pulumi.Input[_builtins.bool]] = None,
46
+ kind: Optional[pulumi.Input[_builtins.str]] = None,
48
47
  libraries: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterLibraryArgs']]]] = None,
49
- no_wait: Optional[pulumi.Input[builtins.bool]] = None,
50
- node_type_id: Optional[pulumi.Input[builtins.str]] = None,
51
- num_workers: Optional[pulumi.Input[builtins.int]] = None,
52
- policy_id: Optional[pulumi.Input[builtins.str]] = None,
53
- remote_disk_throughput: Optional[pulumi.Input[builtins.int]] = None,
54
- runtime_engine: Optional[pulumi.Input[builtins.str]] = None,
55
- single_user_name: Optional[pulumi.Input[builtins.str]] = None,
56
- spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
57
- spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
58
- ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
59
- total_initial_remote_disk_size: Optional[pulumi.Input[builtins.int]] = None,
60
- use_ml_runtime: Optional[pulumi.Input[builtins.bool]] = None,
48
+ no_wait: Optional[pulumi.Input[_builtins.bool]] = None,
49
+ node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
50
+ num_workers: Optional[pulumi.Input[_builtins.int]] = None,
51
+ policy_id: Optional[pulumi.Input[_builtins.str]] = None,
52
+ provider_config: Optional[pulumi.Input['ClusterProviderConfigArgs']] = None,
53
+ remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
54
+ runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
55
+ single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
56
+ spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
57
+ spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
58
+ ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
59
+ total_initial_remote_disk_size: Optional[pulumi.Input[_builtins.int]] = None,
60
+ use_ml_runtime: Optional[pulumi.Input[_builtins.bool]] = None,
61
61
  workload_type: Optional[pulumi.Input['ClusterWorkloadTypeArgs']] = None):
62
62
  """
63
63
  The set of arguments for constructing a Cluster resource.
64
- :param pulumi.Input[builtins.str] spark_version: [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
65
- :param pulumi.Input[builtins.bool] apply_policy_default_values: Whether to use policy default values for missing cluster attributes.
66
- :param pulumi.Input[builtins.int] autotermination_minutes: Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
67
- :param pulumi.Input[builtins.str] cluster_name: Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
68
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] custom_tags: should have tag `ResourceClass` set to value `Serverless`
64
+ :param pulumi.Input[_builtins.str] spark_version: [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
65
+ :param pulumi.Input[_builtins.bool] apply_policy_default_values: Whether to use policy default values for missing cluster attributes.
66
+ :param pulumi.Input[_builtins.int] autotermination_minutes: Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
67
+ :param pulumi.Input[_builtins.str] cluster_name: Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
68
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_tags: should have tag `ResourceClass` set to value `Serverless`
69
69
 
70
70
  For example:
71
71
 
@@ -86,55 +86,32 @@ class ClusterArgs:
86
86
  "ResourceClass": "Serverless",
87
87
  })
88
88
  ```
89
- :param pulumi.Input[builtins.str] data_security_mode: Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
89
+ :param pulumi.Input[_builtins.str] data_security_mode: Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
90
90
  * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
91
91
  * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.
92
92
  * `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.
93
- :param pulumi.Input[builtins.str] driver_instance_pool_id: similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
94
- :param pulumi.Input[builtins.str] driver_node_type_id: The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
95
- :param pulumi.Input[builtins.bool] enable_elastic_disk: If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
96
- :param pulumi.Input[builtins.bool] enable_local_disk_encryption: Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
97
- :param pulumi.Input[builtins.str] idempotency_token: An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
98
- :param pulumi.Input[builtins.str] instance_pool_id: To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
99
- :param pulumi.Input[builtins.bool] is_pinned: boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
100
- :param pulumi.Input[builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
101
- :param pulumi.Input[builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
102
- :param pulumi.Input[builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
103
-
104
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
105
-
106
- ```python
107
- import pulumi
108
- import pulumi_databricks as databricks
109
-
110
- smallest = databricks.get_node_type(local_disk=True)
111
- latest_lts = databricks.get_spark_version(long_term_support=True)
112
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
113
- cluster_name="Shared Autoscaling",
114
- spark_version=latest_lts.id,
115
- node_type_id=smallest.id,
116
- autotermination_minutes=20,
117
- autoscale={
118
- "min_workers": 1,
119
- "max_workers": 50,
120
- },
121
- spark_conf={
122
- "spark.databricks.io.cache.enabled": "true",
123
- "spark.databricks.io.cache.maxDiskUsage": "50g",
124
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
125
- })
126
- ```
127
- :param pulumi.Input[builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
128
- :param pulumi.Input[builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
129
- :param pulumi.Input[builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
130
- :param pulumi.Input[builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
131
- :param pulumi.Input[builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
132
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] spark_conf: should have following items:
93
+ :param pulumi.Input[_builtins.str] driver_instance_pool_id: similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
94
+ :param pulumi.Input[_builtins.str] driver_node_type_id: The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
95
+ :param pulumi.Input[_builtins.bool] enable_elastic_disk: If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
96
+ :param pulumi.Input[_builtins.bool] enable_local_disk_encryption: Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
97
+ :param pulumi.Input[_builtins.str] idempotency_token: An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
98
+ :param pulumi.Input[_builtins.str] instance_pool_id: To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
99
+ :param pulumi.Input[_builtins.bool] is_pinned: boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
100
+ :param pulumi.Input[_builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
101
+ :param pulumi.Input[_builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
102
+ :param pulumi.Input[_builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
103
+ :param pulumi.Input[_builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
104
+ :param pulumi.Input[_builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
105
+ :param pulumi.Input[_builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
106
+ :param pulumi.Input['ClusterProviderConfigArgs'] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
107
+ :param pulumi.Input[_builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
108
+ :param pulumi.Input[_builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
109
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_conf: should have following items:
133
110
  * `spark.databricks.repl.allowedLanguages` set to a list of supported languages, for example: `python,sql`, or `python,sql,r`. Scala is not supported!
134
111
  * `spark.databricks.cluster.profile` set to `serverless`
135
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] spark_env_vars: Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
136
- :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ssh_public_keys: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
137
- :param pulumi.Input[builtins.bool] use_ml_runtime: Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
112
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_env_vars: Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
113
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ssh_public_keys: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
114
+ :param pulumi.Input[_builtins.bool] use_ml_runtime: Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
138
115
  """
139
116
  pulumi.set(__self__, "spark_version", spark_version)
140
117
  if apply_policy_default_values is not None:
@@ -191,6 +168,8 @@ class ClusterArgs:
191
168
  pulumi.set(__self__, "num_workers", num_workers)
192
169
  if policy_id is not None:
193
170
  pulumi.set(__self__, "policy_id", policy_id)
171
+ if provider_config is not None:
172
+ pulumi.set(__self__, "provider_config", provider_config)
194
173
  if remote_disk_throughput is not None:
195
174
  pulumi.set(__self__, "remote_disk_throughput", remote_disk_throughput)
196
175
  if runtime_engine is not None:
@@ -210,31 +189,31 @@ class ClusterArgs:
210
189
  if workload_type is not None:
211
190
  pulumi.set(__self__, "workload_type", workload_type)
212
191
 
213
- @property
192
+ @_builtins.property
214
193
  @pulumi.getter(name="sparkVersion")
215
- def spark_version(self) -> pulumi.Input[builtins.str]:
194
+ def spark_version(self) -> pulumi.Input[_builtins.str]:
216
195
  """
217
196
  [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
218
197
  """
219
198
  return pulumi.get(self, "spark_version")
220
199
 
221
200
  @spark_version.setter
222
- def spark_version(self, value: pulumi.Input[builtins.str]):
201
+ def spark_version(self, value: pulumi.Input[_builtins.str]):
223
202
  pulumi.set(self, "spark_version", value)
224
203
 
225
- @property
204
+ @_builtins.property
226
205
  @pulumi.getter(name="applyPolicyDefaultValues")
227
- def apply_policy_default_values(self) -> Optional[pulumi.Input[builtins.bool]]:
206
+ def apply_policy_default_values(self) -> Optional[pulumi.Input[_builtins.bool]]:
228
207
  """
229
208
  Whether to use policy default values for missing cluster attributes.
230
209
  """
231
210
  return pulumi.get(self, "apply_policy_default_values")
232
211
 
233
212
  @apply_policy_default_values.setter
234
- def apply_policy_default_values(self, value: Optional[pulumi.Input[builtins.bool]]):
213
+ def apply_policy_default_values(self, value: Optional[pulumi.Input[_builtins.bool]]):
235
214
  pulumi.set(self, "apply_policy_default_values", value)
236
215
 
237
- @property
216
+ @_builtins.property
238
217
  @pulumi.getter
239
218
  def autoscale(self) -> Optional[pulumi.Input['ClusterAutoscaleArgs']]:
240
219
  return pulumi.get(self, "autoscale")
@@ -243,19 +222,19 @@ class ClusterArgs:
243
222
  def autoscale(self, value: Optional[pulumi.Input['ClusterAutoscaleArgs']]):
244
223
  pulumi.set(self, "autoscale", value)
245
224
 
246
- @property
225
+ @_builtins.property
247
226
  @pulumi.getter(name="autoterminationMinutes")
248
- def autotermination_minutes(self) -> Optional[pulumi.Input[builtins.int]]:
227
+ def autotermination_minutes(self) -> Optional[pulumi.Input[_builtins.int]]:
249
228
  """
250
229
  Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
251
230
  """
252
231
  return pulumi.get(self, "autotermination_minutes")
253
232
 
254
233
  @autotermination_minutes.setter
255
- def autotermination_minutes(self, value: Optional[pulumi.Input[builtins.int]]):
234
+ def autotermination_minutes(self, value: Optional[pulumi.Input[_builtins.int]]):
256
235
  pulumi.set(self, "autotermination_minutes", value)
257
236
 
258
- @property
237
+ @_builtins.property
259
238
  @pulumi.getter(name="awsAttributes")
260
239
  def aws_attributes(self) -> Optional[pulumi.Input['ClusterAwsAttributesArgs']]:
261
240
  return pulumi.get(self, "aws_attributes")
@@ -264,7 +243,7 @@ class ClusterArgs:
264
243
  def aws_attributes(self, value: Optional[pulumi.Input['ClusterAwsAttributesArgs']]):
265
244
  pulumi.set(self, "aws_attributes", value)
266
245
 
267
- @property
246
+ @_builtins.property
268
247
  @pulumi.getter(name="azureAttributes")
269
248
  def azure_attributes(self) -> Optional[pulumi.Input['ClusterAzureAttributesArgs']]:
270
249
  return pulumi.get(self, "azure_attributes")
@@ -273,7 +252,7 @@ class ClusterArgs:
273
252
  def azure_attributes(self, value: Optional[pulumi.Input['ClusterAzureAttributesArgs']]):
274
253
  pulumi.set(self, "azure_attributes", value)
275
254
 
276
- @property
255
+ @_builtins.property
277
256
  @pulumi.getter(name="clusterLogConf")
278
257
  def cluster_log_conf(self) -> Optional[pulumi.Input['ClusterClusterLogConfArgs']]:
279
258
  return pulumi.get(self, "cluster_log_conf")
@@ -282,7 +261,7 @@ class ClusterArgs:
282
261
  def cluster_log_conf(self, value: Optional[pulumi.Input['ClusterClusterLogConfArgs']]):
283
262
  pulumi.set(self, "cluster_log_conf", value)
284
263
 
285
- @property
264
+ @_builtins.property
286
265
  @pulumi.getter(name="clusterMountInfos")
287
266
  def cluster_mount_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterClusterMountInfoArgs']]]]:
288
267
  return pulumi.get(self, "cluster_mount_infos")
@@ -291,21 +270,21 @@ class ClusterArgs:
291
270
  def cluster_mount_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterClusterMountInfoArgs']]]]):
292
271
  pulumi.set(self, "cluster_mount_infos", value)
293
272
 
294
- @property
273
+ @_builtins.property
295
274
  @pulumi.getter(name="clusterName")
296
- def cluster_name(self) -> Optional[pulumi.Input[builtins.str]]:
275
+ def cluster_name(self) -> Optional[pulumi.Input[_builtins.str]]:
297
276
  """
298
277
  Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
299
278
  """
300
279
  return pulumi.get(self, "cluster_name")
301
280
 
302
281
  @cluster_name.setter
303
- def cluster_name(self, value: Optional[pulumi.Input[builtins.str]]):
282
+ def cluster_name(self, value: Optional[pulumi.Input[_builtins.str]]):
304
283
  pulumi.set(self, "cluster_name", value)
305
284
 
306
- @property
285
+ @_builtins.property
307
286
  @pulumi.getter(name="customTags")
308
- def custom_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
287
+ def custom_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
309
288
  """
310
289
  should have tag `ResourceClass` set to value `Serverless`
311
290
 
@@ -332,12 +311,12 @@ class ClusterArgs:
332
311
  return pulumi.get(self, "custom_tags")
333
312
 
334
313
  @custom_tags.setter
335
- def custom_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
314
+ def custom_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
336
315
  pulumi.set(self, "custom_tags", value)
337
316
 
338
- @property
317
+ @_builtins.property
339
318
  @pulumi.getter(name="dataSecurityMode")
340
- def data_security_mode(self) -> Optional[pulumi.Input[builtins.str]]:
319
+ def data_security_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
341
320
  """
342
321
  Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
343
322
  * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
@@ -347,10 +326,10 @@ class ClusterArgs:
347
326
  return pulumi.get(self, "data_security_mode")
348
327
 
349
328
  @data_security_mode.setter
350
- def data_security_mode(self, value: Optional[pulumi.Input[builtins.str]]):
329
+ def data_security_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
351
330
  pulumi.set(self, "data_security_mode", value)
352
331
 
353
- @property
332
+ @_builtins.property
354
333
  @pulumi.getter(name="dockerImage")
355
334
  def docker_image(self) -> Optional[pulumi.Input['ClusterDockerImageArgs']]:
356
335
  return pulumi.get(self, "docker_image")
@@ -359,55 +338,55 @@ class ClusterArgs:
359
338
  def docker_image(self, value: Optional[pulumi.Input['ClusterDockerImageArgs']]):
360
339
  pulumi.set(self, "docker_image", value)
361
340
 
362
- @property
341
+ @_builtins.property
363
342
  @pulumi.getter(name="driverInstancePoolId")
364
- def driver_instance_pool_id(self) -> Optional[pulumi.Input[builtins.str]]:
343
+ def driver_instance_pool_id(self) -> Optional[pulumi.Input[_builtins.str]]:
365
344
  """
366
345
  similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
367
346
  """
368
347
  return pulumi.get(self, "driver_instance_pool_id")
369
348
 
370
349
  @driver_instance_pool_id.setter
371
- def driver_instance_pool_id(self, value: Optional[pulumi.Input[builtins.str]]):
350
+ def driver_instance_pool_id(self, value: Optional[pulumi.Input[_builtins.str]]):
372
351
  pulumi.set(self, "driver_instance_pool_id", value)
373
352
 
374
- @property
353
+ @_builtins.property
375
354
  @pulumi.getter(name="driverNodeTypeId")
376
- def driver_node_type_id(self) -> Optional[pulumi.Input[builtins.str]]:
355
+ def driver_node_type_id(self) -> Optional[pulumi.Input[_builtins.str]]:
377
356
  """
378
357
  The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
379
358
  """
380
359
  return pulumi.get(self, "driver_node_type_id")
381
360
 
382
361
  @driver_node_type_id.setter
383
- def driver_node_type_id(self, value: Optional[pulumi.Input[builtins.str]]):
362
+ def driver_node_type_id(self, value: Optional[pulumi.Input[_builtins.str]]):
384
363
  pulumi.set(self, "driver_node_type_id", value)
385
364
 
386
- @property
365
+ @_builtins.property
387
366
  @pulumi.getter(name="enableElasticDisk")
388
- def enable_elastic_disk(self) -> Optional[pulumi.Input[builtins.bool]]:
367
+ def enable_elastic_disk(self) -> Optional[pulumi.Input[_builtins.bool]]:
389
368
  """
390
369
  If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
391
370
  """
392
371
  return pulumi.get(self, "enable_elastic_disk")
393
372
 
394
373
  @enable_elastic_disk.setter
395
- def enable_elastic_disk(self, value: Optional[pulumi.Input[builtins.bool]]):
374
+ def enable_elastic_disk(self, value: Optional[pulumi.Input[_builtins.bool]]):
396
375
  pulumi.set(self, "enable_elastic_disk", value)
397
376
 
398
- @property
377
+ @_builtins.property
399
378
  @pulumi.getter(name="enableLocalDiskEncryption")
400
- def enable_local_disk_encryption(self) -> Optional[pulumi.Input[builtins.bool]]:
379
+ def enable_local_disk_encryption(self) -> Optional[pulumi.Input[_builtins.bool]]:
401
380
  """
402
381
  Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
403
382
  """
404
383
  return pulumi.get(self, "enable_local_disk_encryption")
405
384
 
406
385
  @enable_local_disk_encryption.setter
407
- def enable_local_disk_encryption(self, value: Optional[pulumi.Input[builtins.bool]]):
386
+ def enable_local_disk_encryption(self, value: Optional[pulumi.Input[_builtins.bool]]):
408
387
  pulumi.set(self, "enable_local_disk_encryption", value)
409
388
 
410
- @property
389
+ @_builtins.property
411
390
  @pulumi.getter(name="gcpAttributes")
412
391
  def gcp_attributes(self) -> Optional[pulumi.Input['ClusterGcpAttributesArgs']]:
413
392
  return pulumi.get(self, "gcp_attributes")
@@ -416,19 +395,19 @@ class ClusterArgs:
416
395
  def gcp_attributes(self, value: Optional[pulumi.Input['ClusterGcpAttributesArgs']]):
417
396
  pulumi.set(self, "gcp_attributes", value)
418
397
 
419
- @property
398
+ @_builtins.property
420
399
  @pulumi.getter(name="idempotencyToken")
421
- def idempotency_token(self) -> Optional[pulumi.Input[builtins.str]]:
400
+ def idempotency_token(self) -> Optional[pulumi.Input[_builtins.str]]:
422
401
  """
423
402
  An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
424
403
  """
425
404
  return pulumi.get(self, "idempotency_token")
426
405
 
427
406
  @idempotency_token.setter
428
- def idempotency_token(self, value: Optional[pulumi.Input[builtins.str]]):
407
+ def idempotency_token(self, value: Optional[pulumi.Input[_builtins.str]]):
429
408
  pulumi.set(self, "idempotency_token", value)
430
409
 
431
- @property
410
+ @_builtins.property
432
411
  @pulumi.getter(name="initScripts")
433
412
  def init_scripts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInitScriptArgs']]]]:
434
413
  return pulumi.get(self, "init_scripts")
@@ -437,55 +416,55 @@ class ClusterArgs:
437
416
  def init_scripts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInitScriptArgs']]]]):
438
417
  pulumi.set(self, "init_scripts", value)
439
418
 
440
- @property
419
+ @_builtins.property
441
420
  @pulumi.getter(name="instancePoolId")
442
- def instance_pool_id(self) -> Optional[pulumi.Input[builtins.str]]:
421
+ def instance_pool_id(self) -> Optional[pulumi.Input[_builtins.str]]:
443
422
  """
444
423
  To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
445
424
  """
446
425
  return pulumi.get(self, "instance_pool_id")
447
426
 
448
427
  @instance_pool_id.setter
449
- def instance_pool_id(self, value: Optional[pulumi.Input[builtins.str]]):
428
+ def instance_pool_id(self, value: Optional[pulumi.Input[_builtins.str]]):
450
429
  pulumi.set(self, "instance_pool_id", value)
451
430
 
452
- @property
431
+ @_builtins.property
453
432
  @pulumi.getter(name="isPinned")
454
- def is_pinned(self) -> Optional[pulumi.Input[builtins.bool]]:
433
+ def is_pinned(self) -> Optional[pulumi.Input[_builtins.bool]]:
455
434
  """
456
435
  boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
457
436
  """
458
437
  return pulumi.get(self, "is_pinned")
459
438
 
460
439
  @is_pinned.setter
461
- def is_pinned(self, value: Optional[pulumi.Input[builtins.bool]]):
440
+ def is_pinned(self, value: Optional[pulumi.Input[_builtins.bool]]):
462
441
  pulumi.set(self, "is_pinned", value)
463
442
 
464
- @property
443
+ @_builtins.property
465
444
  @pulumi.getter(name="isSingleNode")
466
- def is_single_node(self) -> Optional[pulumi.Input[builtins.bool]]:
445
+ def is_single_node(self) -> Optional[pulumi.Input[_builtins.bool]]:
467
446
  """
468
447
  When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
469
448
  """
470
449
  return pulumi.get(self, "is_single_node")
471
450
 
472
451
  @is_single_node.setter
473
- def is_single_node(self, value: Optional[pulumi.Input[builtins.bool]]):
452
+ def is_single_node(self, value: Optional[pulumi.Input[_builtins.bool]]):
474
453
  pulumi.set(self, "is_single_node", value)
475
454
 
476
- @property
455
+ @_builtins.property
477
456
  @pulumi.getter
478
- def kind(self) -> Optional[pulumi.Input[builtins.str]]:
457
+ def kind(self) -> Optional[pulumi.Input[_builtins.str]]:
479
458
  """
480
459
  The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
481
460
  """
482
461
  return pulumi.get(self, "kind")
483
462
 
484
463
  @kind.setter
485
- def kind(self, value: Optional[pulumi.Input[builtins.str]]):
464
+ def kind(self, value: Optional[pulumi.Input[_builtins.str]]):
486
465
  pulumi.set(self, "kind", value)
487
466
 
488
- @property
467
+ @_builtins.property
489
468
  @pulumi.getter
490
469
  def libraries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterLibraryArgs']]]]:
491
470
  return pulumi.get(self, "libraries")
@@ -494,114 +473,102 @@ class ClusterArgs:
494
473
  def libraries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterLibraryArgs']]]]):
495
474
  pulumi.set(self, "libraries", value)
496
475
 
497
- @property
476
+ @_builtins.property
498
477
  @pulumi.getter(name="noWait")
499
- def no_wait(self) -> Optional[pulumi.Input[builtins.bool]]:
478
+ def no_wait(self) -> Optional[pulumi.Input[_builtins.bool]]:
500
479
  """
501
480
  If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
502
-
503
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
504
-
505
- ```python
506
- import pulumi
507
- import pulumi_databricks as databricks
508
-
509
- smallest = databricks.get_node_type(local_disk=True)
510
- latest_lts = databricks.get_spark_version(long_term_support=True)
511
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
512
- cluster_name="Shared Autoscaling",
513
- spark_version=latest_lts.id,
514
- node_type_id=smallest.id,
515
- autotermination_minutes=20,
516
- autoscale={
517
- "min_workers": 1,
518
- "max_workers": 50,
519
- },
520
- spark_conf={
521
- "spark.databricks.io.cache.enabled": "true",
522
- "spark.databricks.io.cache.maxDiskUsage": "50g",
523
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
524
- })
525
- ```
526
481
  """
527
482
  return pulumi.get(self, "no_wait")
528
483
 
529
484
  @no_wait.setter
530
- def no_wait(self, value: Optional[pulumi.Input[builtins.bool]]):
485
+ def no_wait(self, value: Optional[pulumi.Input[_builtins.bool]]):
531
486
  pulumi.set(self, "no_wait", value)
532
487
 
533
- @property
488
+ @_builtins.property
534
489
  @pulumi.getter(name="nodeTypeId")
535
- def node_type_id(self) -> Optional[pulumi.Input[builtins.str]]:
490
+ def node_type_id(self) -> Optional[pulumi.Input[_builtins.str]]:
536
491
  """
537
492
  Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
538
493
  """
539
494
  return pulumi.get(self, "node_type_id")
540
495
 
541
496
  @node_type_id.setter
542
- def node_type_id(self, value: Optional[pulumi.Input[builtins.str]]):
497
+ def node_type_id(self, value: Optional[pulumi.Input[_builtins.str]]):
543
498
  pulumi.set(self, "node_type_id", value)
544
499
 
545
- @property
500
+ @_builtins.property
546
501
  @pulumi.getter(name="numWorkers")
547
- def num_workers(self) -> Optional[pulumi.Input[builtins.int]]:
502
+ def num_workers(self) -> Optional[pulumi.Input[_builtins.int]]:
548
503
  """
549
504
  Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
550
505
  """
551
506
  return pulumi.get(self, "num_workers")
552
507
 
553
508
  @num_workers.setter
554
- def num_workers(self, value: Optional[pulumi.Input[builtins.int]]):
509
+ def num_workers(self, value: Optional[pulumi.Input[_builtins.int]]):
555
510
  pulumi.set(self, "num_workers", value)
556
511
 
557
- @property
512
+ @_builtins.property
558
513
  @pulumi.getter(name="policyId")
559
- def policy_id(self) -> Optional[pulumi.Input[builtins.str]]:
514
+ def policy_id(self) -> Optional[pulumi.Input[_builtins.str]]:
560
515
  """
561
516
  Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
562
517
  """
563
518
  return pulumi.get(self, "policy_id")
564
519
 
565
520
  @policy_id.setter
566
- def policy_id(self, value: Optional[pulumi.Input[builtins.str]]):
521
+ def policy_id(self, value: Optional[pulumi.Input[_builtins.str]]):
567
522
  pulumi.set(self, "policy_id", value)
568
523
 
569
- @property
524
+ @_builtins.property
525
+ @pulumi.getter(name="providerConfig")
526
+ def provider_config(self) -> Optional[pulumi.Input['ClusterProviderConfigArgs']]:
527
+ """
528
+ Configure the provider for management through account provider. This block consists of the following fields:
529
+ """
530
+ return pulumi.get(self, "provider_config")
531
+
532
+ @provider_config.setter
533
+ def provider_config(self, value: Optional[pulumi.Input['ClusterProviderConfigArgs']]):
534
+ pulumi.set(self, "provider_config", value)
535
+
536
+ @_builtins.property
570
537
  @pulumi.getter(name="remoteDiskThroughput")
571
- def remote_disk_throughput(self) -> Optional[pulumi.Input[builtins.int]]:
538
+ def remote_disk_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
572
539
  return pulumi.get(self, "remote_disk_throughput")
573
540
 
574
541
  @remote_disk_throughput.setter
575
- def remote_disk_throughput(self, value: Optional[pulumi.Input[builtins.int]]):
542
+ def remote_disk_throughput(self, value: Optional[pulumi.Input[_builtins.int]]):
576
543
  pulumi.set(self, "remote_disk_throughput", value)
577
544
 
578
- @property
545
+ @_builtins.property
579
546
  @pulumi.getter(name="runtimeEngine")
580
- def runtime_engine(self) -> Optional[pulumi.Input[builtins.str]]:
547
+ def runtime_engine(self) -> Optional[pulumi.Input[_builtins.str]]:
581
548
  """
582
549
  The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
583
550
  """
584
551
  return pulumi.get(self, "runtime_engine")
585
552
 
586
553
  @runtime_engine.setter
587
- def runtime_engine(self, value: Optional[pulumi.Input[builtins.str]]):
554
+ def runtime_engine(self, value: Optional[pulumi.Input[_builtins.str]]):
588
555
  pulumi.set(self, "runtime_engine", value)
589
556
 
590
- @property
557
+ @_builtins.property
591
558
  @pulumi.getter(name="singleUserName")
592
- def single_user_name(self) -> Optional[pulumi.Input[builtins.str]]:
559
+ def single_user_name(self) -> Optional[pulumi.Input[_builtins.str]]:
593
560
  """
594
561
  The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
595
562
  """
596
563
  return pulumi.get(self, "single_user_name")
597
564
 
598
565
  @single_user_name.setter
599
- def single_user_name(self, value: Optional[pulumi.Input[builtins.str]]):
566
+ def single_user_name(self, value: Optional[pulumi.Input[_builtins.str]]):
600
567
  pulumi.set(self, "single_user_name", value)
601
568
 
602
- @property
569
+ @_builtins.property
603
570
  @pulumi.getter(name="sparkConf")
604
- def spark_conf(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
571
+ def spark_conf(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
605
572
  """
606
573
  should have following items:
607
574
  * `spark.databricks.repl.allowedLanguages` set to a list of supported languages, for example: `python,sql`, or `python,sql,r`. Scala is not supported!
@@ -610,55 +577,55 @@ class ClusterArgs:
610
577
  return pulumi.get(self, "spark_conf")
611
578
 
612
579
  @spark_conf.setter
613
- def spark_conf(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
580
+ def spark_conf(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
614
581
  pulumi.set(self, "spark_conf", value)
615
582
 
616
- @property
583
+ @_builtins.property
617
584
  @pulumi.getter(name="sparkEnvVars")
618
- def spark_env_vars(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
585
+ def spark_env_vars(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
619
586
  """
620
587
  Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
621
588
  """
622
589
  return pulumi.get(self, "spark_env_vars")
623
590
 
624
591
  @spark_env_vars.setter
625
- def spark_env_vars(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
592
+ def spark_env_vars(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
626
593
  pulumi.set(self, "spark_env_vars", value)
627
594
 
628
- @property
595
+ @_builtins.property
629
596
  @pulumi.getter(name="sshPublicKeys")
630
- def ssh_public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
597
+ def ssh_public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
631
598
  """
632
599
  SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
633
600
  """
634
601
  return pulumi.get(self, "ssh_public_keys")
635
602
 
636
603
  @ssh_public_keys.setter
637
- def ssh_public_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
604
+ def ssh_public_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
638
605
  pulumi.set(self, "ssh_public_keys", value)
639
606
 
640
- @property
607
+ @_builtins.property
641
608
  @pulumi.getter(name="totalInitialRemoteDiskSize")
642
- def total_initial_remote_disk_size(self) -> Optional[pulumi.Input[builtins.int]]:
609
+ def total_initial_remote_disk_size(self) -> Optional[pulumi.Input[_builtins.int]]:
643
610
  return pulumi.get(self, "total_initial_remote_disk_size")
644
611
 
645
612
  @total_initial_remote_disk_size.setter
646
- def total_initial_remote_disk_size(self, value: Optional[pulumi.Input[builtins.int]]):
613
+ def total_initial_remote_disk_size(self, value: Optional[pulumi.Input[_builtins.int]]):
647
614
  pulumi.set(self, "total_initial_remote_disk_size", value)
648
615
 
649
- @property
616
+ @_builtins.property
650
617
  @pulumi.getter(name="useMlRuntime")
651
- def use_ml_runtime(self) -> Optional[pulumi.Input[builtins.bool]]:
618
+ def use_ml_runtime(self) -> Optional[pulumi.Input[_builtins.bool]]:
652
619
  """
653
620
  Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
654
621
  """
655
622
  return pulumi.get(self, "use_ml_runtime")
656
623
 
657
624
  @use_ml_runtime.setter
658
- def use_ml_runtime(self, value: Optional[pulumi.Input[builtins.bool]]):
625
+ def use_ml_runtime(self, value: Optional[pulumi.Input[_builtins.bool]]):
659
626
  pulumi.set(self, "use_ml_runtime", value)
660
627
 
661
- @property
628
+ @_builtins.property
662
629
  @pulumi.getter(name="workloadType")
663
630
  def workload_type(self) -> Optional[pulumi.Input['ClusterWorkloadTypeArgs']]:
664
631
  return pulumi.get(self, "workload_type")
@@ -671,53 +638,54 @@ class ClusterArgs:
671
638
  @pulumi.input_type
672
639
  class _ClusterState:
673
640
  def __init__(__self__, *,
674
- apply_policy_default_values: Optional[pulumi.Input[builtins.bool]] = None,
641
+ apply_policy_default_values: Optional[pulumi.Input[_builtins.bool]] = None,
675
642
  autoscale: Optional[pulumi.Input['ClusterAutoscaleArgs']] = None,
676
- autotermination_minutes: Optional[pulumi.Input[builtins.int]] = None,
643
+ autotermination_minutes: Optional[pulumi.Input[_builtins.int]] = None,
677
644
  aws_attributes: Optional[pulumi.Input['ClusterAwsAttributesArgs']] = None,
678
645
  azure_attributes: Optional[pulumi.Input['ClusterAzureAttributesArgs']] = None,
679
- cluster_id: Optional[pulumi.Input[builtins.str]] = None,
646
+ cluster_id: Optional[pulumi.Input[_builtins.str]] = None,
680
647
  cluster_log_conf: Optional[pulumi.Input['ClusterClusterLogConfArgs']] = None,
681
648
  cluster_mount_infos: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterClusterMountInfoArgs']]]] = None,
682
- cluster_name: Optional[pulumi.Input[builtins.str]] = None,
683
- custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
684
- data_security_mode: Optional[pulumi.Input[builtins.str]] = None,
685
- default_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
649
+ cluster_name: Optional[pulumi.Input[_builtins.str]] = None,
650
+ custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
651
+ data_security_mode: Optional[pulumi.Input[_builtins.str]] = None,
652
+ default_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
686
653
  docker_image: Optional[pulumi.Input['ClusterDockerImageArgs']] = None,
687
- driver_instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
688
- driver_node_type_id: Optional[pulumi.Input[builtins.str]] = None,
689
- enable_elastic_disk: Optional[pulumi.Input[builtins.bool]] = None,
690
- enable_local_disk_encryption: Optional[pulumi.Input[builtins.bool]] = None,
654
+ driver_instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
655
+ driver_node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
656
+ enable_elastic_disk: Optional[pulumi.Input[_builtins.bool]] = None,
657
+ enable_local_disk_encryption: Optional[pulumi.Input[_builtins.bool]] = None,
691
658
  gcp_attributes: Optional[pulumi.Input['ClusterGcpAttributesArgs']] = None,
692
- idempotency_token: Optional[pulumi.Input[builtins.str]] = None,
659
+ idempotency_token: Optional[pulumi.Input[_builtins.str]] = None,
693
660
  init_scripts: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInitScriptArgs']]]] = None,
694
- instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
695
- is_pinned: Optional[pulumi.Input[builtins.bool]] = None,
696
- is_single_node: Optional[pulumi.Input[builtins.bool]] = None,
697
- kind: Optional[pulumi.Input[builtins.str]] = None,
661
+ instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
662
+ is_pinned: Optional[pulumi.Input[_builtins.bool]] = None,
663
+ is_single_node: Optional[pulumi.Input[_builtins.bool]] = None,
664
+ kind: Optional[pulumi.Input[_builtins.str]] = None,
698
665
  libraries: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterLibraryArgs']]]] = None,
699
- no_wait: Optional[pulumi.Input[builtins.bool]] = None,
700
- node_type_id: Optional[pulumi.Input[builtins.str]] = None,
701
- num_workers: Optional[pulumi.Input[builtins.int]] = None,
702
- policy_id: Optional[pulumi.Input[builtins.str]] = None,
703
- remote_disk_throughput: Optional[pulumi.Input[builtins.int]] = None,
704
- runtime_engine: Optional[pulumi.Input[builtins.str]] = None,
705
- single_user_name: Optional[pulumi.Input[builtins.str]] = None,
706
- spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
707
- spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
708
- spark_version: Optional[pulumi.Input[builtins.str]] = None,
709
- ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
710
- state: Optional[pulumi.Input[builtins.str]] = None,
711
- total_initial_remote_disk_size: Optional[pulumi.Input[builtins.int]] = None,
712
- url: Optional[pulumi.Input[builtins.str]] = None,
713
- use_ml_runtime: Optional[pulumi.Input[builtins.bool]] = None,
666
+ no_wait: Optional[pulumi.Input[_builtins.bool]] = None,
667
+ node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
668
+ num_workers: Optional[pulumi.Input[_builtins.int]] = None,
669
+ policy_id: Optional[pulumi.Input[_builtins.str]] = None,
670
+ provider_config: Optional[pulumi.Input['ClusterProviderConfigArgs']] = None,
671
+ remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
672
+ runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
673
+ single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
674
+ spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
675
+ spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
676
+ spark_version: Optional[pulumi.Input[_builtins.str]] = None,
677
+ ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
678
+ state: Optional[pulumi.Input[_builtins.str]] = None,
679
+ total_initial_remote_disk_size: Optional[pulumi.Input[_builtins.int]] = None,
680
+ url: Optional[pulumi.Input[_builtins.str]] = None,
681
+ use_ml_runtime: Optional[pulumi.Input[_builtins.bool]] = None,
714
682
  workload_type: Optional[pulumi.Input['ClusterWorkloadTypeArgs']] = None):
715
683
  """
716
684
  Input properties used for looking up and filtering Cluster resources.
717
- :param pulumi.Input[builtins.bool] apply_policy_default_values: Whether to use policy default values for missing cluster attributes.
718
- :param pulumi.Input[builtins.int] autotermination_minutes: Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
719
- :param pulumi.Input[builtins.str] cluster_name: Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
720
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] custom_tags: should have tag `ResourceClass` set to value `Serverless`
685
+ :param pulumi.Input[_builtins.bool] apply_policy_default_values: Whether to use policy default values for missing cluster attributes.
686
+ :param pulumi.Input[_builtins.int] autotermination_minutes: Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
687
+ :param pulumi.Input[_builtins.str] cluster_name: Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
688
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_tags: should have tag `ResourceClass` set to value `Serverless`
721
689
 
722
690
  For example:
723
691
 
@@ -738,58 +706,35 @@ class _ClusterState:
738
706
  "ResourceClass": "Serverless",
739
707
  })
740
708
  ```
741
- :param pulumi.Input[builtins.str] data_security_mode: Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
709
+ :param pulumi.Input[_builtins.str] data_security_mode: Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
742
710
  * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
743
711
  * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.
744
712
  * `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.
745
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] default_tags: (map) Tags that are added by Databricks by default, regardless of any `custom_tags` that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>, and any workspace and pool tags.
746
- :param pulumi.Input[builtins.str] driver_instance_pool_id: similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
747
- :param pulumi.Input[builtins.str] driver_node_type_id: The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
748
- :param pulumi.Input[builtins.bool] enable_elastic_disk: If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
749
- :param pulumi.Input[builtins.bool] enable_local_disk_encryption: Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
750
- :param pulumi.Input[builtins.str] idempotency_token: An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
751
- :param pulumi.Input[builtins.str] instance_pool_id: To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
752
- :param pulumi.Input[builtins.bool] is_pinned: boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
753
- :param pulumi.Input[builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
754
- :param pulumi.Input[builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
755
- :param pulumi.Input[builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
756
-
757
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
758
-
759
- ```python
760
- import pulumi
761
- import pulumi_databricks as databricks
762
-
763
- smallest = databricks.get_node_type(local_disk=True)
764
- latest_lts = databricks.get_spark_version(long_term_support=True)
765
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
766
- cluster_name="Shared Autoscaling",
767
- spark_version=latest_lts.id,
768
- node_type_id=smallest.id,
769
- autotermination_minutes=20,
770
- autoscale={
771
- "min_workers": 1,
772
- "max_workers": 50,
773
- },
774
- spark_conf={
775
- "spark.databricks.io.cache.enabled": "true",
776
- "spark.databricks.io.cache.maxDiskUsage": "50g",
777
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
778
- })
779
- ```
780
- :param pulumi.Input[builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
781
- :param pulumi.Input[builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
782
- :param pulumi.Input[builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
783
- :param pulumi.Input[builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
784
- :param pulumi.Input[builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
785
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] spark_conf: should have following items:
713
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] default_tags: (map) Tags that are added by Databricks by default, regardless of any `custom_tags` that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>, and any workspace and pool tags.
714
+ :param pulumi.Input[_builtins.str] driver_instance_pool_id: similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
715
+ :param pulumi.Input[_builtins.str] driver_node_type_id: The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
716
+ :param pulumi.Input[_builtins.bool] enable_elastic_disk: If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
717
+ :param pulumi.Input[_builtins.bool] enable_local_disk_encryption: Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
718
+ :param pulumi.Input[_builtins.str] idempotency_token: An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
719
+ :param pulumi.Input[_builtins.str] instance_pool_id: To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
720
+ :param pulumi.Input[_builtins.bool] is_pinned: boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
721
+ :param pulumi.Input[_builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
722
+ :param pulumi.Input[_builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
723
+ :param pulumi.Input[_builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
724
+ :param pulumi.Input[_builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
725
+ :param pulumi.Input[_builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
726
+ :param pulumi.Input[_builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
727
+ :param pulumi.Input['ClusterProviderConfigArgs'] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
728
+ :param pulumi.Input[_builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
729
+ :param pulumi.Input[_builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
730
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_conf: should have following items:
786
731
  * `spark.databricks.repl.allowedLanguages` set to a list of supported languages, for example: `python,sql`, or `python,sql,r`. Scala is not supported!
787
732
  * `spark.databricks.cluster.profile` set to `serverless`
788
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] spark_env_vars: Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
789
- :param pulumi.Input[builtins.str] spark_version: [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
790
- :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ssh_public_keys: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
791
- :param pulumi.Input[builtins.str] state: (string) State of the cluster.
792
- :param pulumi.Input[builtins.bool] use_ml_runtime: Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
733
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_env_vars: Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
734
+ :param pulumi.Input[_builtins.str] spark_version: [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
735
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ssh_public_keys: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
736
+ :param pulumi.Input[_builtins.str] state: (string) State of the cluster.
737
+ :param pulumi.Input[_builtins.bool] use_ml_runtime: Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
793
738
  """
794
739
  if apply_policy_default_values is not None:
795
740
  pulumi.set(__self__, "apply_policy_default_values", apply_policy_default_values)
@@ -849,6 +794,8 @@ class _ClusterState:
849
794
  pulumi.set(__self__, "num_workers", num_workers)
850
795
  if policy_id is not None:
851
796
  pulumi.set(__self__, "policy_id", policy_id)
797
+ if provider_config is not None:
798
+ pulumi.set(__self__, "provider_config", provider_config)
852
799
  if remote_disk_throughput is not None:
853
800
  pulumi.set(__self__, "remote_disk_throughput", remote_disk_throughput)
854
801
  if runtime_engine is not None:
@@ -874,19 +821,19 @@ class _ClusterState:
874
821
  if workload_type is not None:
875
822
  pulumi.set(__self__, "workload_type", workload_type)
876
823
 
877
- @property
824
+ @_builtins.property
878
825
  @pulumi.getter(name="applyPolicyDefaultValues")
879
- def apply_policy_default_values(self) -> Optional[pulumi.Input[builtins.bool]]:
826
+ def apply_policy_default_values(self) -> Optional[pulumi.Input[_builtins.bool]]:
880
827
  """
881
828
  Whether to use policy default values for missing cluster attributes.
882
829
  """
883
830
  return pulumi.get(self, "apply_policy_default_values")
884
831
 
885
832
  @apply_policy_default_values.setter
886
- def apply_policy_default_values(self, value: Optional[pulumi.Input[builtins.bool]]):
833
+ def apply_policy_default_values(self, value: Optional[pulumi.Input[_builtins.bool]]):
887
834
  pulumi.set(self, "apply_policy_default_values", value)
888
835
 
889
- @property
836
+ @_builtins.property
890
837
  @pulumi.getter
891
838
  def autoscale(self) -> Optional[pulumi.Input['ClusterAutoscaleArgs']]:
892
839
  return pulumi.get(self, "autoscale")
@@ -895,19 +842,19 @@ class _ClusterState:
895
842
  def autoscale(self, value: Optional[pulumi.Input['ClusterAutoscaleArgs']]):
896
843
  pulumi.set(self, "autoscale", value)
897
844
 
898
- @property
845
+ @_builtins.property
899
846
  @pulumi.getter(name="autoterminationMinutes")
900
- def autotermination_minutes(self) -> Optional[pulumi.Input[builtins.int]]:
847
+ def autotermination_minutes(self) -> Optional[pulumi.Input[_builtins.int]]:
901
848
  """
902
849
  Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
903
850
  """
904
851
  return pulumi.get(self, "autotermination_minutes")
905
852
 
906
853
  @autotermination_minutes.setter
907
- def autotermination_minutes(self, value: Optional[pulumi.Input[builtins.int]]):
854
+ def autotermination_minutes(self, value: Optional[pulumi.Input[_builtins.int]]):
908
855
  pulumi.set(self, "autotermination_minutes", value)
909
856
 
910
- @property
857
+ @_builtins.property
911
858
  @pulumi.getter(name="awsAttributes")
912
859
  def aws_attributes(self) -> Optional[pulumi.Input['ClusterAwsAttributesArgs']]:
913
860
  return pulumi.get(self, "aws_attributes")
@@ -916,7 +863,7 @@ class _ClusterState:
916
863
  def aws_attributes(self, value: Optional[pulumi.Input['ClusterAwsAttributesArgs']]):
917
864
  pulumi.set(self, "aws_attributes", value)
918
865
 
919
- @property
866
+ @_builtins.property
920
867
  @pulumi.getter(name="azureAttributes")
921
868
  def azure_attributes(self) -> Optional[pulumi.Input['ClusterAzureAttributesArgs']]:
922
869
  return pulumi.get(self, "azure_attributes")
@@ -925,16 +872,16 @@ class _ClusterState:
925
872
  def azure_attributes(self, value: Optional[pulumi.Input['ClusterAzureAttributesArgs']]):
926
873
  pulumi.set(self, "azure_attributes", value)
927
874
 
928
- @property
875
+ @_builtins.property
929
876
  @pulumi.getter(name="clusterId")
930
- def cluster_id(self) -> Optional[pulumi.Input[builtins.str]]:
877
+ def cluster_id(self) -> Optional[pulumi.Input[_builtins.str]]:
931
878
  return pulumi.get(self, "cluster_id")
932
879
 
933
880
  @cluster_id.setter
934
- def cluster_id(self, value: Optional[pulumi.Input[builtins.str]]):
881
+ def cluster_id(self, value: Optional[pulumi.Input[_builtins.str]]):
935
882
  pulumi.set(self, "cluster_id", value)
936
883
 
937
- @property
884
+ @_builtins.property
938
885
  @pulumi.getter(name="clusterLogConf")
939
886
  def cluster_log_conf(self) -> Optional[pulumi.Input['ClusterClusterLogConfArgs']]:
940
887
  return pulumi.get(self, "cluster_log_conf")
@@ -943,7 +890,7 @@ class _ClusterState:
943
890
  def cluster_log_conf(self, value: Optional[pulumi.Input['ClusterClusterLogConfArgs']]):
944
891
  pulumi.set(self, "cluster_log_conf", value)
945
892
 
946
- @property
893
+ @_builtins.property
947
894
  @pulumi.getter(name="clusterMountInfos")
948
895
  def cluster_mount_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterClusterMountInfoArgs']]]]:
949
896
  return pulumi.get(self, "cluster_mount_infos")
@@ -952,21 +899,21 @@ class _ClusterState:
952
899
  def cluster_mount_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterClusterMountInfoArgs']]]]):
953
900
  pulumi.set(self, "cluster_mount_infos", value)
954
901
 
955
- @property
902
+ @_builtins.property
956
903
  @pulumi.getter(name="clusterName")
957
- def cluster_name(self) -> Optional[pulumi.Input[builtins.str]]:
904
+ def cluster_name(self) -> Optional[pulumi.Input[_builtins.str]]:
958
905
  """
959
906
  Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
960
907
  """
961
908
  return pulumi.get(self, "cluster_name")
962
909
 
963
910
  @cluster_name.setter
964
- def cluster_name(self, value: Optional[pulumi.Input[builtins.str]]):
911
+ def cluster_name(self, value: Optional[pulumi.Input[_builtins.str]]):
965
912
  pulumi.set(self, "cluster_name", value)
966
913
 
967
- @property
914
+ @_builtins.property
968
915
  @pulumi.getter(name="customTags")
969
- def custom_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
916
+ def custom_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
970
917
  """
971
918
  should have tag `ResourceClass` set to value `Serverless`
972
919
 
@@ -993,12 +940,12 @@ class _ClusterState:
993
940
  return pulumi.get(self, "custom_tags")
994
941
 
995
942
  @custom_tags.setter
996
- def custom_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
943
+ def custom_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
997
944
  pulumi.set(self, "custom_tags", value)
998
945
 
999
- @property
946
+ @_builtins.property
1000
947
  @pulumi.getter(name="dataSecurityMode")
1001
- def data_security_mode(self) -> Optional[pulumi.Input[builtins.str]]:
948
+ def data_security_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
1002
949
  """
1003
950
  Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
1004
951
  * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
@@ -1008,22 +955,22 @@ class _ClusterState:
1008
955
  return pulumi.get(self, "data_security_mode")
1009
956
 
1010
957
  @data_security_mode.setter
1011
- def data_security_mode(self, value: Optional[pulumi.Input[builtins.str]]):
958
+ def data_security_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
1012
959
  pulumi.set(self, "data_security_mode", value)
1013
960
 
1014
- @property
961
+ @_builtins.property
1015
962
  @pulumi.getter(name="defaultTags")
1016
- def default_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
963
+ def default_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
1017
964
  """
1018
965
  (map) Tags that are added by Databricks by default, regardless of any `custom_tags` that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>, and any workspace and pool tags.
1019
966
  """
1020
967
  return pulumi.get(self, "default_tags")
1021
968
 
1022
969
  @default_tags.setter
1023
- def default_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
970
+ def default_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
1024
971
  pulumi.set(self, "default_tags", value)
1025
972
 
1026
- @property
973
+ @_builtins.property
1027
974
  @pulumi.getter(name="dockerImage")
1028
975
  def docker_image(self) -> Optional[pulumi.Input['ClusterDockerImageArgs']]:
1029
976
  return pulumi.get(self, "docker_image")
@@ -1032,55 +979,55 @@ class _ClusterState:
1032
979
  def docker_image(self, value: Optional[pulumi.Input['ClusterDockerImageArgs']]):
1033
980
  pulumi.set(self, "docker_image", value)
1034
981
 
1035
- @property
982
+ @_builtins.property
1036
983
  @pulumi.getter(name="driverInstancePoolId")
1037
- def driver_instance_pool_id(self) -> Optional[pulumi.Input[builtins.str]]:
984
+ def driver_instance_pool_id(self) -> Optional[pulumi.Input[_builtins.str]]:
1038
985
  """
1039
986
  similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
1040
987
  """
1041
988
  return pulumi.get(self, "driver_instance_pool_id")
1042
989
 
1043
990
  @driver_instance_pool_id.setter
1044
- def driver_instance_pool_id(self, value: Optional[pulumi.Input[builtins.str]]):
991
+ def driver_instance_pool_id(self, value: Optional[pulumi.Input[_builtins.str]]):
1045
992
  pulumi.set(self, "driver_instance_pool_id", value)
1046
993
 
1047
- @property
994
+ @_builtins.property
1048
995
  @pulumi.getter(name="driverNodeTypeId")
1049
- def driver_node_type_id(self) -> Optional[pulumi.Input[builtins.str]]:
996
+ def driver_node_type_id(self) -> Optional[pulumi.Input[_builtins.str]]:
1050
997
  """
1051
998
  The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
1052
999
  """
1053
1000
  return pulumi.get(self, "driver_node_type_id")
1054
1001
 
1055
1002
  @driver_node_type_id.setter
1056
- def driver_node_type_id(self, value: Optional[pulumi.Input[builtins.str]]):
1003
+ def driver_node_type_id(self, value: Optional[pulumi.Input[_builtins.str]]):
1057
1004
  pulumi.set(self, "driver_node_type_id", value)
1058
1005
 
1059
- @property
1006
+ @_builtins.property
1060
1007
  @pulumi.getter(name="enableElasticDisk")
1061
- def enable_elastic_disk(self) -> Optional[pulumi.Input[builtins.bool]]:
1008
+ def enable_elastic_disk(self) -> Optional[pulumi.Input[_builtins.bool]]:
1062
1009
  """
1063
1010
  If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
1064
1011
  """
1065
1012
  return pulumi.get(self, "enable_elastic_disk")
1066
1013
 
1067
1014
  @enable_elastic_disk.setter
1068
- def enable_elastic_disk(self, value: Optional[pulumi.Input[builtins.bool]]):
1015
+ def enable_elastic_disk(self, value: Optional[pulumi.Input[_builtins.bool]]):
1069
1016
  pulumi.set(self, "enable_elastic_disk", value)
1070
1017
 
1071
- @property
1018
+ @_builtins.property
1072
1019
  @pulumi.getter(name="enableLocalDiskEncryption")
1073
- def enable_local_disk_encryption(self) -> Optional[pulumi.Input[builtins.bool]]:
1020
+ def enable_local_disk_encryption(self) -> Optional[pulumi.Input[_builtins.bool]]:
1074
1021
  """
1075
1022
  Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
1076
1023
  """
1077
1024
  return pulumi.get(self, "enable_local_disk_encryption")
1078
1025
 
1079
1026
  @enable_local_disk_encryption.setter
1080
- def enable_local_disk_encryption(self, value: Optional[pulumi.Input[builtins.bool]]):
1027
+ def enable_local_disk_encryption(self, value: Optional[pulumi.Input[_builtins.bool]]):
1081
1028
  pulumi.set(self, "enable_local_disk_encryption", value)
1082
1029
 
1083
- @property
1030
+ @_builtins.property
1084
1031
  @pulumi.getter(name="gcpAttributes")
1085
1032
  def gcp_attributes(self) -> Optional[pulumi.Input['ClusterGcpAttributesArgs']]:
1086
1033
  return pulumi.get(self, "gcp_attributes")
@@ -1089,19 +1036,19 @@ class _ClusterState:
1089
1036
  def gcp_attributes(self, value: Optional[pulumi.Input['ClusterGcpAttributesArgs']]):
1090
1037
  pulumi.set(self, "gcp_attributes", value)
1091
1038
 
1092
- @property
1039
+ @_builtins.property
1093
1040
  @pulumi.getter(name="idempotencyToken")
1094
- def idempotency_token(self) -> Optional[pulumi.Input[builtins.str]]:
1041
+ def idempotency_token(self) -> Optional[pulumi.Input[_builtins.str]]:
1095
1042
  """
1096
1043
  An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
1097
1044
  """
1098
1045
  return pulumi.get(self, "idempotency_token")
1099
1046
 
1100
1047
  @idempotency_token.setter
1101
- def idempotency_token(self, value: Optional[pulumi.Input[builtins.str]]):
1048
+ def idempotency_token(self, value: Optional[pulumi.Input[_builtins.str]]):
1102
1049
  pulumi.set(self, "idempotency_token", value)
1103
1050
 
1104
- @property
1051
+ @_builtins.property
1105
1052
  @pulumi.getter(name="initScripts")
1106
1053
  def init_scripts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInitScriptArgs']]]]:
1107
1054
  return pulumi.get(self, "init_scripts")
@@ -1110,55 +1057,55 @@ class _ClusterState:
1110
1057
  def init_scripts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInitScriptArgs']]]]):
1111
1058
  pulumi.set(self, "init_scripts", value)
1112
1059
 
1113
- @property
1060
+ @_builtins.property
1114
1061
  @pulumi.getter(name="instancePoolId")
1115
- def instance_pool_id(self) -> Optional[pulumi.Input[builtins.str]]:
1062
+ def instance_pool_id(self) -> Optional[pulumi.Input[_builtins.str]]:
1116
1063
  """
1117
1064
  To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
1118
1065
  """
1119
1066
  return pulumi.get(self, "instance_pool_id")
1120
1067
 
1121
1068
  @instance_pool_id.setter
1122
- def instance_pool_id(self, value: Optional[pulumi.Input[builtins.str]]):
1069
+ def instance_pool_id(self, value: Optional[pulumi.Input[_builtins.str]]):
1123
1070
  pulumi.set(self, "instance_pool_id", value)
1124
1071
 
1125
- @property
1072
+ @_builtins.property
1126
1073
  @pulumi.getter(name="isPinned")
1127
- def is_pinned(self) -> Optional[pulumi.Input[builtins.bool]]:
1074
+ def is_pinned(self) -> Optional[pulumi.Input[_builtins.bool]]:
1128
1075
  """
1129
1076
  boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
1130
1077
  """
1131
1078
  return pulumi.get(self, "is_pinned")
1132
1079
 
1133
1080
  @is_pinned.setter
1134
- def is_pinned(self, value: Optional[pulumi.Input[builtins.bool]]):
1081
+ def is_pinned(self, value: Optional[pulumi.Input[_builtins.bool]]):
1135
1082
  pulumi.set(self, "is_pinned", value)
1136
1083
 
1137
- @property
1084
+ @_builtins.property
1138
1085
  @pulumi.getter(name="isSingleNode")
1139
- def is_single_node(self) -> Optional[pulumi.Input[builtins.bool]]:
1086
+ def is_single_node(self) -> Optional[pulumi.Input[_builtins.bool]]:
1140
1087
  """
1141
1088
  When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
1142
1089
  """
1143
1090
  return pulumi.get(self, "is_single_node")
1144
1091
 
1145
1092
  @is_single_node.setter
1146
- def is_single_node(self, value: Optional[pulumi.Input[builtins.bool]]):
1093
+ def is_single_node(self, value: Optional[pulumi.Input[_builtins.bool]]):
1147
1094
  pulumi.set(self, "is_single_node", value)
1148
1095
 
1149
- @property
1096
+ @_builtins.property
1150
1097
  @pulumi.getter
1151
- def kind(self) -> Optional[pulumi.Input[builtins.str]]:
1098
+ def kind(self) -> Optional[pulumi.Input[_builtins.str]]:
1152
1099
  """
1153
1100
  The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
1154
1101
  """
1155
1102
  return pulumi.get(self, "kind")
1156
1103
 
1157
1104
  @kind.setter
1158
- def kind(self, value: Optional[pulumi.Input[builtins.str]]):
1105
+ def kind(self, value: Optional[pulumi.Input[_builtins.str]]):
1159
1106
  pulumi.set(self, "kind", value)
1160
1107
 
1161
- @property
1108
+ @_builtins.property
1162
1109
  @pulumi.getter
1163
1110
  def libraries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterLibraryArgs']]]]:
1164
1111
  return pulumi.get(self, "libraries")
@@ -1167,114 +1114,102 @@ class _ClusterState:
1167
1114
  def libraries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterLibraryArgs']]]]):
1168
1115
  pulumi.set(self, "libraries", value)
1169
1116
 
1170
- @property
1117
+ @_builtins.property
1171
1118
  @pulumi.getter(name="noWait")
1172
- def no_wait(self) -> Optional[pulumi.Input[builtins.bool]]:
1119
+ def no_wait(self) -> Optional[pulumi.Input[_builtins.bool]]:
1173
1120
  """
1174
1121
  If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
1175
-
1176
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
1177
-
1178
- ```python
1179
- import pulumi
1180
- import pulumi_databricks as databricks
1181
-
1182
- smallest = databricks.get_node_type(local_disk=True)
1183
- latest_lts = databricks.get_spark_version(long_term_support=True)
1184
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
1185
- cluster_name="Shared Autoscaling",
1186
- spark_version=latest_lts.id,
1187
- node_type_id=smallest.id,
1188
- autotermination_minutes=20,
1189
- autoscale={
1190
- "min_workers": 1,
1191
- "max_workers": 50,
1192
- },
1193
- spark_conf={
1194
- "spark.databricks.io.cache.enabled": "true",
1195
- "spark.databricks.io.cache.maxDiskUsage": "50g",
1196
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
1197
- })
1198
- ```
1199
1122
  """
1200
1123
  return pulumi.get(self, "no_wait")
1201
1124
 
1202
1125
  @no_wait.setter
1203
- def no_wait(self, value: Optional[pulumi.Input[builtins.bool]]):
1126
+ def no_wait(self, value: Optional[pulumi.Input[_builtins.bool]]):
1204
1127
  pulumi.set(self, "no_wait", value)
1205
1128
 
1206
- @property
1129
+ @_builtins.property
1207
1130
  @pulumi.getter(name="nodeTypeId")
1208
- def node_type_id(self) -> Optional[pulumi.Input[builtins.str]]:
1131
+ def node_type_id(self) -> Optional[pulumi.Input[_builtins.str]]:
1209
1132
  """
1210
1133
  Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
1211
1134
  """
1212
1135
  return pulumi.get(self, "node_type_id")
1213
1136
 
1214
1137
  @node_type_id.setter
1215
- def node_type_id(self, value: Optional[pulumi.Input[builtins.str]]):
1138
+ def node_type_id(self, value: Optional[pulumi.Input[_builtins.str]]):
1216
1139
  pulumi.set(self, "node_type_id", value)
1217
1140
 
1218
- @property
1141
+ @_builtins.property
1219
1142
  @pulumi.getter(name="numWorkers")
1220
- def num_workers(self) -> Optional[pulumi.Input[builtins.int]]:
1143
+ def num_workers(self) -> Optional[pulumi.Input[_builtins.int]]:
1221
1144
  """
1222
1145
  Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
1223
1146
  """
1224
1147
  return pulumi.get(self, "num_workers")
1225
1148
 
1226
1149
  @num_workers.setter
1227
- def num_workers(self, value: Optional[pulumi.Input[builtins.int]]):
1150
+ def num_workers(self, value: Optional[pulumi.Input[_builtins.int]]):
1228
1151
  pulumi.set(self, "num_workers", value)
1229
1152
 
1230
- @property
1153
+ @_builtins.property
1231
1154
  @pulumi.getter(name="policyId")
1232
- def policy_id(self) -> Optional[pulumi.Input[builtins.str]]:
1155
+ def policy_id(self) -> Optional[pulumi.Input[_builtins.str]]:
1233
1156
  """
1234
1157
  Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
1235
1158
  """
1236
1159
  return pulumi.get(self, "policy_id")
1237
1160
 
1238
1161
  @policy_id.setter
1239
- def policy_id(self, value: Optional[pulumi.Input[builtins.str]]):
1162
+ def policy_id(self, value: Optional[pulumi.Input[_builtins.str]]):
1240
1163
  pulumi.set(self, "policy_id", value)
1241
1164
 
1242
- @property
1165
+ @_builtins.property
1166
+ @pulumi.getter(name="providerConfig")
1167
+ def provider_config(self) -> Optional[pulumi.Input['ClusterProviderConfigArgs']]:
1168
+ """
1169
+ Configure the provider for management through account provider. This block consists of the following fields:
1170
+ """
1171
+ return pulumi.get(self, "provider_config")
1172
+
1173
+ @provider_config.setter
1174
+ def provider_config(self, value: Optional[pulumi.Input['ClusterProviderConfigArgs']]):
1175
+ pulumi.set(self, "provider_config", value)
1176
+
1177
+ @_builtins.property
1243
1178
  @pulumi.getter(name="remoteDiskThroughput")
1244
- def remote_disk_throughput(self) -> Optional[pulumi.Input[builtins.int]]:
1179
+ def remote_disk_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
1245
1180
  return pulumi.get(self, "remote_disk_throughput")
1246
1181
 
1247
1182
  @remote_disk_throughput.setter
1248
- def remote_disk_throughput(self, value: Optional[pulumi.Input[builtins.int]]):
1183
+ def remote_disk_throughput(self, value: Optional[pulumi.Input[_builtins.int]]):
1249
1184
  pulumi.set(self, "remote_disk_throughput", value)
1250
1185
 
1251
- @property
1186
+ @_builtins.property
1252
1187
  @pulumi.getter(name="runtimeEngine")
1253
- def runtime_engine(self) -> Optional[pulumi.Input[builtins.str]]:
1188
+ def runtime_engine(self) -> Optional[pulumi.Input[_builtins.str]]:
1254
1189
  """
1255
1190
  The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
1256
1191
  """
1257
1192
  return pulumi.get(self, "runtime_engine")
1258
1193
 
1259
1194
  @runtime_engine.setter
1260
- def runtime_engine(self, value: Optional[pulumi.Input[builtins.str]]):
1195
+ def runtime_engine(self, value: Optional[pulumi.Input[_builtins.str]]):
1261
1196
  pulumi.set(self, "runtime_engine", value)
1262
1197
 
1263
- @property
1198
+ @_builtins.property
1264
1199
  @pulumi.getter(name="singleUserName")
1265
- def single_user_name(self) -> Optional[pulumi.Input[builtins.str]]:
1200
+ def single_user_name(self) -> Optional[pulumi.Input[_builtins.str]]:
1266
1201
  """
1267
1202
  The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
1268
1203
  """
1269
1204
  return pulumi.get(self, "single_user_name")
1270
1205
 
1271
1206
  @single_user_name.setter
1272
- def single_user_name(self, value: Optional[pulumi.Input[builtins.str]]):
1207
+ def single_user_name(self, value: Optional[pulumi.Input[_builtins.str]]):
1273
1208
  pulumi.set(self, "single_user_name", value)
1274
1209
 
1275
- @property
1210
+ @_builtins.property
1276
1211
  @pulumi.getter(name="sparkConf")
1277
- def spark_conf(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
1212
+ def spark_conf(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
1278
1213
  """
1279
1214
  should have following items:
1280
1215
  * `spark.databricks.repl.allowedLanguages` set to a list of supported languages, for example: `python,sql`, or `python,sql,r`. Scala is not supported!
@@ -1283,88 +1218,88 @@ class _ClusterState:
1283
1218
  return pulumi.get(self, "spark_conf")
1284
1219
 
1285
1220
  @spark_conf.setter
1286
- def spark_conf(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
1221
+ def spark_conf(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
1287
1222
  pulumi.set(self, "spark_conf", value)
1288
1223
 
1289
- @property
1224
+ @_builtins.property
1290
1225
  @pulumi.getter(name="sparkEnvVars")
1291
- def spark_env_vars(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
1226
+ def spark_env_vars(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
1292
1227
  """
1293
1228
  Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
1294
1229
  """
1295
1230
  return pulumi.get(self, "spark_env_vars")
1296
1231
 
1297
1232
  @spark_env_vars.setter
1298
- def spark_env_vars(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
1233
+ def spark_env_vars(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
1299
1234
  pulumi.set(self, "spark_env_vars", value)
1300
1235
 
1301
- @property
1236
+ @_builtins.property
1302
1237
  @pulumi.getter(name="sparkVersion")
1303
- def spark_version(self) -> Optional[pulumi.Input[builtins.str]]:
1238
+ def spark_version(self) -> Optional[pulumi.Input[_builtins.str]]:
1304
1239
  """
1305
1240
  [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
1306
1241
  """
1307
1242
  return pulumi.get(self, "spark_version")
1308
1243
 
1309
1244
  @spark_version.setter
1310
- def spark_version(self, value: Optional[pulumi.Input[builtins.str]]):
1245
+ def spark_version(self, value: Optional[pulumi.Input[_builtins.str]]):
1311
1246
  pulumi.set(self, "spark_version", value)
1312
1247
 
1313
- @property
1248
+ @_builtins.property
1314
1249
  @pulumi.getter(name="sshPublicKeys")
1315
- def ssh_public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
1250
+ def ssh_public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
1316
1251
  """
1317
1252
  SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
1318
1253
  """
1319
1254
  return pulumi.get(self, "ssh_public_keys")
1320
1255
 
1321
1256
  @ssh_public_keys.setter
1322
- def ssh_public_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
1257
+ def ssh_public_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
1323
1258
  pulumi.set(self, "ssh_public_keys", value)
1324
1259
 
1325
- @property
1260
+ @_builtins.property
1326
1261
  @pulumi.getter
1327
- def state(self) -> Optional[pulumi.Input[builtins.str]]:
1262
+ def state(self) -> Optional[pulumi.Input[_builtins.str]]:
1328
1263
  """
1329
1264
  (string) State of the cluster.
1330
1265
  """
1331
1266
  return pulumi.get(self, "state")
1332
1267
 
1333
1268
  @state.setter
1334
- def state(self, value: Optional[pulumi.Input[builtins.str]]):
1269
+ def state(self, value: Optional[pulumi.Input[_builtins.str]]):
1335
1270
  pulumi.set(self, "state", value)
1336
1271
 
1337
- @property
1272
+ @_builtins.property
1338
1273
  @pulumi.getter(name="totalInitialRemoteDiskSize")
1339
- def total_initial_remote_disk_size(self) -> Optional[pulumi.Input[builtins.int]]:
1274
+ def total_initial_remote_disk_size(self) -> Optional[pulumi.Input[_builtins.int]]:
1340
1275
  return pulumi.get(self, "total_initial_remote_disk_size")
1341
1276
 
1342
1277
  @total_initial_remote_disk_size.setter
1343
- def total_initial_remote_disk_size(self, value: Optional[pulumi.Input[builtins.int]]):
1278
+ def total_initial_remote_disk_size(self, value: Optional[pulumi.Input[_builtins.int]]):
1344
1279
  pulumi.set(self, "total_initial_remote_disk_size", value)
1345
1280
 
1346
- @property
1281
+ @_builtins.property
1347
1282
  @pulumi.getter
1348
- def url(self) -> Optional[pulumi.Input[builtins.str]]:
1283
+ def url(self) -> Optional[pulumi.Input[_builtins.str]]:
1349
1284
  return pulumi.get(self, "url")
1350
1285
 
1351
1286
  @url.setter
1352
- def url(self, value: Optional[pulumi.Input[builtins.str]]):
1287
+ def url(self, value: Optional[pulumi.Input[_builtins.str]]):
1353
1288
  pulumi.set(self, "url", value)
1354
1289
 
1355
- @property
1290
+ @_builtins.property
1356
1291
  @pulumi.getter(name="useMlRuntime")
1357
- def use_ml_runtime(self) -> Optional[pulumi.Input[builtins.bool]]:
1292
+ def use_ml_runtime(self) -> Optional[pulumi.Input[_builtins.bool]]:
1358
1293
  """
1359
1294
  Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
1360
1295
  """
1361
1296
  return pulumi.get(self, "use_ml_runtime")
1362
1297
 
1363
1298
  @use_ml_runtime.setter
1364
- def use_ml_runtime(self, value: Optional[pulumi.Input[builtins.bool]]):
1299
+ def use_ml_runtime(self, value: Optional[pulumi.Input[_builtins.bool]]):
1365
1300
  pulumi.set(self, "use_ml_runtime", value)
1366
1301
 
1367
- @property
1302
+ @_builtins.property
1368
1303
  @pulumi.getter(name="workloadType")
1369
1304
  def workload_type(self) -> Optional[pulumi.Input['ClusterWorkloadTypeArgs']]:
1370
1305
  return pulumi.get(self, "workload_type")
@@ -1380,42 +1315,43 @@ class Cluster(pulumi.CustomResource):
1380
1315
  def __init__(__self__,
1381
1316
  resource_name: str,
1382
1317
  opts: Optional[pulumi.ResourceOptions] = None,
1383
- apply_policy_default_values: Optional[pulumi.Input[builtins.bool]] = None,
1318
+ apply_policy_default_values: Optional[pulumi.Input[_builtins.bool]] = None,
1384
1319
  autoscale: Optional[pulumi.Input[Union['ClusterAutoscaleArgs', 'ClusterAutoscaleArgsDict']]] = None,
1385
- autotermination_minutes: Optional[pulumi.Input[builtins.int]] = None,
1320
+ autotermination_minutes: Optional[pulumi.Input[_builtins.int]] = None,
1386
1321
  aws_attributes: Optional[pulumi.Input[Union['ClusterAwsAttributesArgs', 'ClusterAwsAttributesArgsDict']]] = None,
1387
1322
  azure_attributes: Optional[pulumi.Input[Union['ClusterAzureAttributesArgs', 'ClusterAzureAttributesArgsDict']]] = None,
1388
1323
  cluster_log_conf: Optional[pulumi.Input[Union['ClusterClusterLogConfArgs', 'ClusterClusterLogConfArgsDict']]] = None,
1389
1324
  cluster_mount_infos: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterClusterMountInfoArgs', 'ClusterClusterMountInfoArgsDict']]]]] = None,
1390
- cluster_name: Optional[pulumi.Input[builtins.str]] = None,
1391
- custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1392
- data_security_mode: Optional[pulumi.Input[builtins.str]] = None,
1325
+ cluster_name: Optional[pulumi.Input[_builtins.str]] = None,
1326
+ custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1327
+ data_security_mode: Optional[pulumi.Input[_builtins.str]] = None,
1393
1328
  docker_image: Optional[pulumi.Input[Union['ClusterDockerImageArgs', 'ClusterDockerImageArgsDict']]] = None,
1394
- driver_instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
1395
- driver_node_type_id: Optional[pulumi.Input[builtins.str]] = None,
1396
- enable_elastic_disk: Optional[pulumi.Input[builtins.bool]] = None,
1397
- enable_local_disk_encryption: Optional[pulumi.Input[builtins.bool]] = None,
1329
+ driver_instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
1330
+ driver_node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
1331
+ enable_elastic_disk: Optional[pulumi.Input[_builtins.bool]] = None,
1332
+ enable_local_disk_encryption: Optional[pulumi.Input[_builtins.bool]] = None,
1398
1333
  gcp_attributes: Optional[pulumi.Input[Union['ClusterGcpAttributesArgs', 'ClusterGcpAttributesArgsDict']]] = None,
1399
- idempotency_token: Optional[pulumi.Input[builtins.str]] = None,
1334
+ idempotency_token: Optional[pulumi.Input[_builtins.str]] = None,
1400
1335
  init_scripts: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterInitScriptArgs', 'ClusterInitScriptArgsDict']]]]] = None,
1401
- instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
1402
- is_pinned: Optional[pulumi.Input[builtins.bool]] = None,
1403
- is_single_node: Optional[pulumi.Input[builtins.bool]] = None,
1404
- kind: Optional[pulumi.Input[builtins.str]] = None,
1336
+ instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
1337
+ is_pinned: Optional[pulumi.Input[_builtins.bool]] = None,
1338
+ is_single_node: Optional[pulumi.Input[_builtins.bool]] = None,
1339
+ kind: Optional[pulumi.Input[_builtins.str]] = None,
1405
1340
  libraries: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterLibraryArgs', 'ClusterLibraryArgsDict']]]]] = None,
1406
- no_wait: Optional[pulumi.Input[builtins.bool]] = None,
1407
- node_type_id: Optional[pulumi.Input[builtins.str]] = None,
1408
- num_workers: Optional[pulumi.Input[builtins.int]] = None,
1409
- policy_id: Optional[pulumi.Input[builtins.str]] = None,
1410
- remote_disk_throughput: Optional[pulumi.Input[builtins.int]] = None,
1411
- runtime_engine: Optional[pulumi.Input[builtins.str]] = None,
1412
- single_user_name: Optional[pulumi.Input[builtins.str]] = None,
1413
- spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1414
- spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1415
- spark_version: Optional[pulumi.Input[builtins.str]] = None,
1416
- ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
1417
- total_initial_remote_disk_size: Optional[pulumi.Input[builtins.int]] = None,
1418
- use_ml_runtime: Optional[pulumi.Input[builtins.bool]] = None,
1341
+ no_wait: Optional[pulumi.Input[_builtins.bool]] = None,
1342
+ node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
1343
+ num_workers: Optional[pulumi.Input[_builtins.int]] = None,
1344
+ policy_id: Optional[pulumi.Input[_builtins.str]] = None,
1345
+ provider_config: Optional[pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']]] = None,
1346
+ remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
1347
+ runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
1348
+ single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
1349
+ spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1350
+ spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1351
+ spark_version: Optional[pulumi.Input[_builtins.str]] = None,
1352
+ ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
1353
+ total_initial_remote_disk_size: Optional[pulumi.Input[_builtins.int]] = None,
1354
+ use_ml_runtime: Optional[pulumi.Input[_builtins.bool]] = None,
1419
1355
  workload_type: Optional[pulumi.Input[Union['ClusterWorkloadTypeArgs', 'ClusterWorkloadTypeArgsDict']]] = None,
1420
1356
  __props__=None):
1421
1357
  """
@@ -1431,10 +1367,10 @@ class Cluster(pulumi.CustomResource):
1431
1367
 
1432
1368
  :param str resource_name: The name of the resource.
1433
1369
  :param pulumi.ResourceOptions opts: Options for the resource.
1434
- :param pulumi.Input[builtins.bool] apply_policy_default_values: Whether to use policy default values for missing cluster attributes.
1435
- :param pulumi.Input[builtins.int] autotermination_minutes: Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
1436
- :param pulumi.Input[builtins.str] cluster_name: Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
1437
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] custom_tags: should have tag `ResourceClass` set to value `Serverless`
1370
+ :param pulumi.Input[_builtins.bool] apply_policy_default_values: Whether to use policy default values for missing cluster attributes.
1371
+ :param pulumi.Input[_builtins.int] autotermination_minutes: Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
1372
+ :param pulumi.Input[_builtins.str] cluster_name: Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
1373
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_tags: should have tag `ResourceClass` set to value `Serverless`
1438
1374
 
1439
1375
  For example:
1440
1376
 
@@ -1455,56 +1391,33 @@ class Cluster(pulumi.CustomResource):
1455
1391
  "ResourceClass": "Serverless",
1456
1392
  })
1457
1393
  ```
1458
- :param pulumi.Input[builtins.str] data_security_mode: Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
1394
+ :param pulumi.Input[_builtins.str] data_security_mode: Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
1459
1395
  * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
1460
1396
  * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.
1461
1397
  * `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.
1462
- :param pulumi.Input[builtins.str] driver_instance_pool_id: similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
1463
- :param pulumi.Input[builtins.str] driver_node_type_id: The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
1464
- :param pulumi.Input[builtins.bool] enable_elastic_disk: If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
1465
- :param pulumi.Input[builtins.bool] enable_local_disk_encryption: Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
1466
- :param pulumi.Input[builtins.str] idempotency_token: An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
1467
- :param pulumi.Input[builtins.str] instance_pool_id: To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
1468
- :param pulumi.Input[builtins.bool] is_pinned: boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
1469
- :param pulumi.Input[builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
1470
- :param pulumi.Input[builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
1471
- :param pulumi.Input[builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
1472
-
1473
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
1474
-
1475
- ```python
1476
- import pulumi
1477
- import pulumi_databricks as databricks
1478
-
1479
- smallest = databricks.get_node_type(local_disk=True)
1480
- latest_lts = databricks.get_spark_version(long_term_support=True)
1481
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
1482
- cluster_name="Shared Autoscaling",
1483
- spark_version=latest_lts.id,
1484
- node_type_id=smallest.id,
1485
- autotermination_minutes=20,
1486
- autoscale={
1487
- "min_workers": 1,
1488
- "max_workers": 50,
1489
- },
1490
- spark_conf={
1491
- "spark.databricks.io.cache.enabled": "true",
1492
- "spark.databricks.io.cache.maxDiskUsage": "50g",
1493
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
1494
- })
1495
- ```
1496
- :param pulumi.Input[builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
1497
- :param pulumi.Input[builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
1498
- :param pulumi.Input[builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
1499
- :param pulumi.Input[builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
1500
- :param pulumi.Input[builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
1501
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] spark_conf: should have following items:
1398
+ :param pulumi.Input[_builtins.str] driver_instance_pool_id: similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
1399
+ :param pulumi.Input[_builtins.str] driver_node_type_id: The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
1400
+ :param pulumi.Input[_builtins.bool] enable_elastic_disk: If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
1401
+ :param pulumi.Input[_builtins.bool] enable_local_disk_encryption: Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
1402
+ :param pulumi.Input[_builtins.str] idempotency_token: An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
1403
+ :param pulumi.Input[_builtins.str] instance_pool_id: To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
1404
+ :param pulumi.Input[_builtins.bool] is_pinned: boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
1405
+ :param pulumi.Input[_builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
1406
+ :param pulumi.Input[_builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
1407
+ :param pulumi.Input[_builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
1408
+ :param pulumi.Input[_builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
1409
+ :param pulumi.Input[_builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
1410
+ :param pulumi.Input[_builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
1411
+ :param pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
1412
+ :param pulumi.Input[_builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
1413
+ :param pulumi.Input[_builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
1414
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_conf: should have following items:
1502
1415
  * `spark.databricks.repl.allowedLanguages` set to a list of supported languages, for example: `python,sql`, or `python,sql,r`. Scala is not supported!
1503
1416
  * `spark.databricks.cluster.profile` set to `serverless`
1504
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] spark_env_vars: Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
1505
- :param pulumi.Input[builtins.str] spark_version: [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
1506
- :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ssh_public_keys: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
1507
- :param pulumi.Input[builtins.bool] use_ml_runtime: Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
1417
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_env_vars: Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
1418
+ :param pulumi.Input[_builtins.str] spark_version: [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
1419
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ssh_public_keys: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
1420
+ :param pulumi.Input[_builtins.bool] use_ml_runtime: Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
1508
1421
  """
1509
1422
  ...
1510
1423
  @overload
@@ -1538,42 +1451,43 @@ class Cluster(pulumi.CustomResource):
1538
1451
  def _internal_init(__self__,
1539
1452
  resource_name: str,
1540
1453
  opts: Optional[pulumi.ResourceOptions] = None,
1541
- apply_policy_default_values: Optional[pulumi.Input[builtins.bool]] = None,
1454
+ apply_policy_default_values: Optional[pulumi.Input[_builtins.bool]] = None,
1542
1455
  autoscale: Optional[pulumi.Input[Union['ClusterAutoscaleArgs', 'ClusterAutoscaleArgsDict']]] = None,
1543
- autotermination_minutes: Optional[pulumi.Input[builtins.int]] = None,
1456
+ autotermination_minutes: Optional[pulumi.Input[_builtins.int]] = None,
1544
1457
  aws_attributes: Optional[pulumi.Input[Union['ClusterAwsAttributesArgs', 'ClusterAwsAttributesArgsDict']]] = None,
1545
1458
  azure_attributes: Optional[pulumi.Input[Union['ClusterAzureAttributesArgs', 'ClusterAzureAttributesArgsDict']]] = None,
1546
1459
  cluster_log_conf: Optional[pulumi.Input[Union['ClusterClusterLogConfArgs', 'ClusterClusterLogConfArgsDict']]] = None,
1547
1460
  cluster_mount_infos: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterClusterMountInfoArgs', 'ClusterClusterMountInfoArgsDict']]]]] = None,
1548
- cluster_name: Optional[pulumi.Input[builtins.str]] = None,
1549
- custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1550
- data_security_mode: Optional[pulumi.Input[builtins.str]] = None,
1461
+ cluster_name: Optional[pulumi.Input[_builtins.str]] = None,
1462
+ custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1463
+ data_security_mode: Optional[pulumi.Input[_builtins.str]] = None,
1551
1464
  docker_image: Optional[pulumi.Input[Union['ClusterDockerImageArgs', 'ClusterDockerImageArgsDict']]] = None,
1552
- driver_instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
1553
- driver_node_type_id: Optional[pulumi.Input[builtins.str]] = None,
1554
- enable_elastic_disk: Optional[pulumi.Input[builtins.bool]] = None,
1555
- enable_local_disk_encryption: Optional[pulumi.Input[builtins.bool]] = None,
1465
+ driver_instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
1466
+ driver_node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
1467
+ enable_elastic_disk: Optional[pulumi.Input[_builtins.bool]] = None,
1468
+ enable_local_disk_encryption: Optional[pulumi.Input[_builtins.bool]] = None,
1556
1469
  gcp_attributes: Optional[pulumi.Input[Union['ClusterGcpAttributesArgs', 'ClusterGcpAttributesArgsDict']]] = None,
1557
- idempotency_token: Optional[pulumi.Input[builtins.str]] = None,
1470
+ idempotency_token: Optional[pulumi.Input[_builtins.str]] = None,
1558
1471
  init_scripts: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterInitScriptArgs', 'ClusterInitScriptArgsDict']]]]] = None,
1559
- instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
1560
- is_pinned: Optional[pulumi.Input[builtins.bool]] = None,
1561
- is_single_node: Optional[pulumi.Input[builtins.bool]] = None,
1562
- kind: Optional[pulumi.Input[builtins.str]] = None,
1472
+ instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
1473
+ is_pinned: Optional[pulumi.Input[_builtins.bool]] = None,
1474
+ is_single_node: Optional[pulumi.Input[_builtins.bool]] = None,
1475
+ kind: Optional[pulumi.Input[_builtins.str]] = None,
1563
1476
  libraries: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterLibraryArgs', 'ClusterLibraryArgsDict']]]]] = None,
1564
- no_wait: Optional[pulumi.Input[builtins.bool]] = None,
1565
- node_type_id: Optional[pulumi.Input[builtins.str]] = None,
1566
- num_workers: Optional[pulumi.Input[builtins.int]] = None,
1567
- policy_id: Optional[pulumi.Input[builtins.str]] = None,
1568
- remote_disk_throughput: Optional[pulumi.Input[builtins.int]] = None,
1569
- runtime_engine: Optional[pulumi.Input[builtins.str]] = None,
1570
- single_user_name: Optional[pulumi.Input[builtins.str]] = None,
1571
- spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1572
- spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1573
- spark_version: Optional[pulumi.Input[builtins.str]] = None,
1574
- ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
1575
- total_initial_remote_disk_size: Optional[pulumi.Input[builtins.int]] = None,
1576
- use_ml_runtime: Optional[pulumi.Input[builtins.bool]] = None,
1477
+ no_wait: Optional[pulumi.Input[_builtins.bool]] = None,
1478
+ node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
1479
+ num_workers: Optional[pulumi.Input[_builtins.int]] = None,
1480
+ policy_id: Optional[pulumi.Input[_builtins.str]] = None,
1481
+ provider_config: Optional[pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']]] = None,
1482
+ remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
1483
+ runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
1484
+ single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
1485
+ spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1486
+ spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1487
+ spark_version: Optional[pulumi.Input[_builtins.str]] = None,
1488
+ ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
1489
+ total_initial_remote_disk_size: Optional[pulumi.Input[_builtins.int]] = None,
1490
+ use_ml_runtime: Optional[pulumi.Input[_builtins.bool]] = None,
1577
1491
  workload_type: Optional[pulumi.Input[Union['ClusterWorkloadTypeArgs', 'ClusterWorkloadTypeArgsDict']]] = None,
1578
1492
  __props__=None):
1579
1493
  opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
@@ -1611,6 +1525,7 @@ class Cluster(pulumi.CustomResource):
1611
1525
  __props__.__dict__["node_type_id"] = node_type_id
1612
1526
  __props__.__dict__["num_workers"] = num_workers
1613
1527
  __props__.__dict__["policy_id"] = policy_id
1528
+ __props__.__dict__["provider_config"] = provider_config
1614
1529
  __props__.__dict__["remote_disk_throughput"] = remote_disk_throughput
1615
1530
  __props__.__dict__["runtime_engine"] = runtime_engine
1616
1531
  __props__.__dict__["single_user_name"] = single_user_name
@@ -1637,46 +1552,47 @@ class Cluster(pulumi.CustomResource):
1637
1552
  def get(resource_name: str,
1638
1553
  id: pulumi.Input[str],
1639
1554
  opts: Optional[pulumi.ResourceOptions] = None,
1640
- apply_policy_default_values: Optional[pulumi.Input[builtins.bool]] = None,
1555
+ apply_policy_default_values: Optional[pulumi.Input[_builtins.bool]] = None,
1641
1556
  autoscale: Optional[pulumi.Input[Union['ClusterAutoscaleArgs', 'ClusterAutoscaleArgsDict']]] = None,
1642
- autotermination_minutes: Optional[pulumi.Input[builtins.int]] = None,
1557
+ autotermination_minutes: Optional[pulumi.Input[_builtins.int]] = None,
1643
1558
  aws_attributes: Optional[pulumi.Input[Union['ClusterAwsAttributesArgs', 'ClusterAwsAttributesArgsDict']]] = None,
1644
1559
  azure_attributes: Optional[pulumi.Input[Union['ClusterAzureAttributesArgs', 'ClusterAzureAttributesArgsDict']]] = None,
1645
- cluster_id: Optional[pulumi.Input[builtins.str]] = None,
1560
+ cluster_id: Optional[pulumi.Input[_builtins.str]] = None,
1646
1561
  cluster_log_conf: Optional[pulumi.Input[Union['ClusterClusterLogConfArgs', 'ClusterClusterLogConfArgsDict']]] = None,
1647
1562
  cluster_mount_infos: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterClusterMountInfoArgs', 'ClusterClusterMountInfoArgsDict']]]]] = None,
1648
- cluster_name: Optional[pulumi.Input[builtins.str]] = None,
1649
- custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1650
- data_security_mode: Optional[pulumi.Input[builtins.str]] = None,
1651
- default_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1563
+ cluster_name: Optional[pulumi.Input[_builtins.str]] = None,
1564
+ custom_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1565
+ data_security_mode: Optional[pulumi.Input[_builtins.str]] = None,
1566
+ default_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1652
1567
  docker_image: Optional[pulumi.Input[Union['ClusterDockerImageArgs', 'ClusterDockerImageArgsDict']]] = None,
1653
- driver_instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
1654
- driver_node_type_id: Optional[pulumi.Input[builtins.str]] = None,
1655
- enable_elastic_disk: Optional[pulumi.Input[builtins.bool]] = None,
1656
- enable_local_disk_encryption: Optional[pulumi.Input[builtins.bool]] = None,
1568
+ driver_instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
1569
+ driver_node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
1570
+ enable_elastic_disk: Optional[pulumi.Input[_builtins.bool]] = None,
1571
+ enable_local_disk_encryption: Optional[pulumi.Input[_builtins.bool]] = None,
1657
1572
  gcp_attributes: Optional[pulumi.Input[Union['ClusterGcpAttributesArgs', 'ClusterGcpAttributesArgsDict']]] = None,
1658
- idempotency_token: Optional[pulumi.Input[builtins.str]] = None,
1573
+ idempotency_token: Optional[pulumi.Input[_builtins.str]] = None,
1659
1574
  init_scripts: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterInitScriptArgs', 'ClusterInitScriptArgsDict']]]]] = None,
1660
- instance_pool_id: Optional[pulumi.Input[builtins.str]] = None,
1661
- is_pinned: Optional[pulumi.Input[builtins.bool]] = None,
1662
- is_single_node: Optional[pulumi.Input[builtins.bool]] = None,
1663
- kind: Optional[pulumi.Input[builtins.str]] = None,
1575
+ instance_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
1576
+ is_pinned: Optional[pulumi.Input[_builtins.bool]] = None,
1577
+ is_single_node: Optional[pulumi.Input[_builtins.bool]] = None,
1578
+ kind: Optional[pulumi.Input[_builtins.str]] = None,
1664
1579
  libraries: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterLibraryArgs', 'ClusterLibraryArgsDict']]]]] = None,
1665
- no_wait: Optional[pulumi.Input[builtins.bool]] = None,
1666
- node_type_id: Optional[pulumi.Input[builtins.str]] = None,
1667
- num_workers: Optional[pulumi.Input[builtins.int]] = None,
1668
- policy_id: Optional[pulumi.Input[builtins.str]] = None,
1669
- remote_disk_throughput: Optional[pulumi.Input[builtins.int]] = None,
1670
- runtime_engine: Optional[pulumi.Input[builtins.str]] = None,
1671
- single_user_name: Optional[pulumi.Input[builtins.str]] = None,
1672
- spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1673
- spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1674
- spark_version: Optional[pulumi.Input[builtins.str]] = None,
1675
- ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
1676
- state: Optional[pulumi.Input[builtins.str]] = None,
1677
- total_initial_remote_disk_size: Optional[pulumi.Input[builtins.int]] = None,
1678
- url: Optional[pulumi.Input[builtins.str]] = None,
1679
- use_ml_runtime: Optional[pulumi.Input[builtins.bool]] = None,
1580
+ no_wait: Optional[pulumi.Input[_builtins.bool]] = None,
1581
+ node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
1582
+ num_workers: Optional[pulumi.Input[_builtins.int]] = None,
1583
+ policy_id: Optional[pulumi.Input[_builtins.str]] = None,
1584
+ provider_config: Optional[pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']]] = None,
1585
+ remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
1586
+ runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
1587
+ single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
1588
+ spark_conf: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1589
+ spark_env_vars: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1590
+ spark_version: Optional[pulumi.Input[_builtins.str]] = None,
1591
+ ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
1592
+ state: Optional[pulumi.Input[_builtins.str]] = None,
1593
+ total_initial_remote_disk_size: Optional[pulumi.Input[_builtins.int]] = None,
1594
+ url: Optional[pulumi.Input[_builtins.str]] = None,
1595
+ use_ml_runtime: Optional[pulumi.Input[_builtins.bool]] = None,
1680
1596
  workload_type: Optional[pulumi.Input[Union['ClusterWorkloadTypeArgs', 'ClusterWorkloadTypeArgsDict']]] = None) -> 'Cluster':
1681
1597
  """
1682
1598
  Get an existing Cluster resource's state with the given name, id, and optional extra
@@ -1685,10 +1601,10 @@ class Cluster(pulumi.CustomResource):
1685
1601
  :param str resource_name: The unique name of the resulting resource.
1686
1602
  :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
1687
1603
  :param pulumi.ResourceOptions opts: Options for the resource.
1688
- :param pulumi.Input[builtins.bool] apply_policy_default_values: Whether to use policy default values for missing cluster attributes.
1689
- :param pulumi.Input[builtins.int] autotermination_minutes: Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
1690
- :param pulumi.Input[builtins.str] cluster_name: Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
1691
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] custom_tags: should have tag `ResourceClass` set to value `Serverless`
1604
+ :param pulumi.Input[_builtins.bool] apply_policy_default_values: Whether to use policy default values for missing cluster attributes.
1605
+ :param pulumi.Input[_builtins.int] autotermination_minutes: Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
1606
+ :param pulumi.Input[_builtins.str] cluster_name: Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
1607
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_tags: should have tag `ResourceClass` set to value `Serverless`
1692
1608
 
1693
1609
  For example:
1694
1610
 
@@ -1709,58 +1625,35 @@ class Cluster(pulumi.CustomResource):
1709
1625
  "ResourceClass": "Serverless",
1710
1626
  })
1711
1627
  ```
1712
- :param pulumi.Input[builtins.str] data_security_mode: Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
1628
+ :param pulumi.Input[_builtins.str] data_security_mode: Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
1713
1629
  * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
1714
1630
  * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`.
1715
1631
  * `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`.
1716
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] default_tags: (map) Tags that are added by Databricks by default, regardless of any `custom_tags` that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>, and any workspace and pool tags.
1717
- :param pulumi.Input[builtins.str] driver_instance_pool_id: similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
1718
- :param pulumi.Input[builtins.str] driver_node_type_id: The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
1719
- :param pulumi.Input[builtins.bool] enable_elastic_disk: If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
1720
- :param pulumi.Input[builtins.bool] enable_local_disk_encryption: Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
1721
- :param pulumi.Input[builtins.str] idempotency_token: An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
1722
- :param pulumi.Input[builtins.str] instance_pool_id: To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
1723
- :param pulumi.Input[builtins.bool] is_pinned: boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
1724
- :param pulumi.Input[builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
1725
- :param pulumi.Input[builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
1726
- :param pulumi.Input[builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
1727
-
1728
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
1729
-
1730
- ```python
1731
- import pulumi
1732
- import pulumi_databricks as databricks
1733
-
1734
- smallest = databricks.get_node_type(local_disk=True)
1735
- latest_lts = databricks.get_spark_version(long_term_support=True)
1736
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
1737
- cluster_name="Shared Autoscaling",
1738
- spark_version=latest_lts.id,
1739
- node_type_id=smallest.id,
1740
- autotermination_minutes=20,
1741
- autoscale={
1742
- "min_workers": 1,
1743
- "max_workers": 50,
1744
- },
1745
- spark_conf={
1746
- "spark.databricks.io.cache.enabled": "true",
1747
- "spark.databricks.io.cache.maxDiskUsage": "50g",
1748
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
1749
- })
1750
- ```
1751
- :param pulumi.Input[builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
1752
- :param pulumi.Input[builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
1753
- :param pulumi.Input[builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
1754
- :param pulumi.Input[builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
1755
- :param pulumi.Input[builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
1756
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] spark_conf: should have following items:
1632
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] default_tags: (map) Tags that are added by Databricks by default, regardless of any `custom_tags` that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>, and any workspace and pool tags.
1633
+ :param pulumi.Input[_builtins.str] driver_instance_pool_id: similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
1634
+ :param pulumi.Input[_builtins.str] driver_node_type_id: The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
1635
+ :param pulumi.Input[_builtins.bool] enable_elastic_disk: If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
1636
+ :param pulumi.Input[_builtins.bool] enable_local_disk_encryption: Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
1637
+ :param pulumi.Input[_builtins.str] idempotency_token: An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
1638
+ :param pulumi.Input[_builtins.str] instance_pool_id: To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
1639
+ :param pulumi.Input[_builtins.bool] is_pinned: boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
1640
+ :param pulumi.Input[_builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
1641
+ :param pulumi.Input[_builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
1642
+ :param pulumi.Input[_builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
1643
+ :param pulumi.Input[_builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
1644
+ :param pulumi.Input[_builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
1645
+ :param pulumi.Input[_builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
1646
+ :param pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
1647
+ :param pulumi.Input[_builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
1648
+ :param pulumi.Input[_builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
1649
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_conf: should have following items:
1757
1650
  * `spark.databricks.repl.allowedLanguages` set to a list of supported languages, for example: `python,sql`, or `python,sql,r`. Scala is not supported!
1758
1651
  * `spark.databricks.cluster.profile` set to `serverless`
1759
- :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] spark_env_vars: Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
1760
- :param pulumi.Input[builtins.str] spark_version: [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
1761
- :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ssh_public_keys: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
1762
- :param pulumi.Input[builtins.str] state: (string) State of the cluster.
1763
- :param pulumi.Input[builtins.bool] use_ml_runtime: Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
1652
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_env_vars: Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
1653
+ :param pulumi.Input[_builtins.str] spark_version: [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
1654
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ssh_public_keys: SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
1655
+ :param pulumi.Input[_builtins.str] state: (string) State of the cluster.
1656
+ :param pulumi.Input[_builtins.bool] use_ml_runtime: Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
1764
1657
  """
1765
1658
  opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
1766
1659
 
@@ -1795,6 +1688,7 @@ class Cluster(pulumi.CustomResource):
1795
1688
  __props__.__dict__["node_type_id"] = node_type_id
1796
1689
  __props__.__dict__["num_workers"] = num_workers
1797
1690
  __props__.__dict__["policy_id"] = policy_id
1691
+ __props__.__dict__["provider_config"] = provider_config
1798
1692
  __props__.__dict__["remote_disk_throughput"] = remote_disk_throughput
1799
1693
  __props__.__dict__["runtime_engine"] = runtime_engine
1800
1694
  __props__.__dict__["single_user_name"] = single_user_name
@@ -1809,63 +1703,63 @@ class Cluster(pulumi.CustomResource):
1809
1703
  __props__.__dict__["workload_type"] = workload_type
1810
1704
  return Cluster(resource_name, opts=opts, __props__=__props__)
1811
1705
 
1812
- @property
1706
+ @_builtins.property
1813
1707
  @pulumi.getter(name="applyPolicyDefaultValues")
1814
- def apply_policy_default_values(self) -> pulumi.Output[Optional[builtins.bool]]:
1708
+ def apply_policy_default_values(self) -> pulumi.Output[Optional[_builtins.bool]]:
1815
1709
  """
1816
1710
  Whether to use policy default values for missing cluster attributes.
1817
1711
  """
1818
1712
  return pulumi.get(self, "apply_policy_default_values")
1819
1713
 
1820
- @property
1714
+ @_builtins.property
1821
1715
  @pulumi.getter
1822
1716
  def autoscale(self) -> pulumi.Output[Optional['outputs.ClusterAutoscale']]:
1823
1717
  return pulumi.get(self, "autoscale")
1824
1718
 
1825
- @property
1719
+ @_builtins.property
1826
1720
  @pulumi.getter(name="autoterminationMinutes")
1827
- def autotermination_minutes(self) -> pulumi.Output[Optional[builtins.int]]:
1721
+ def autotermination_minutes(self) -> pulumi.Output[Optional[_builtins.int]]:
1828
1722
  """
1829
1723
  Automatically terminate the cluster after being inactive for this time in minutes. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. Defaults to `60`. *We highly recommend having this setting present for Interactive/BI clusters.*
1830
1724
  """
1831
1725
  return pulumi.get(self, "autotermination_minutes")
1832
1726
 
1833
- @property
1727
+ @_builtins.property
1834
1728
  @pulumi.getter(name="awsAttributes")
1835
1729
  def aws_attributes(self) -> pulumi.Output[Optional['outputs.ClusterAwsAttributes']]:
1836
1730
  return pulumi.get(self, "aws_attributes")
1837
1731
 
1838
- @property
1732
+ @_builtins.property
1839
1733
  @pulumi.getter(name="azureAttributes")
1840
1734
  def azure_attributes(self) -> pulumi.Output[Optional['outputs.ClusterAzureAttributes']]:
1841
1735
  return pulumi.get(self, "azure_attributes")
1842
1736
 
1843
- @property
1737
+ @_builtins.property
1844
1738
  @pulumi.getter(name="clusterId")
1845
- def cluster_id(self) -> pulumi.Output[builtins.str]:
1739
+ def cluster_id(self) -> pulumi.Output[_builtins.str]:
1846
1740
  return pulumi.get(self, "cluster_id")
1847
1741
 
1848
- @property
1742
+ @_builtins.property
1849
1743
  @pulumi.getter(name="clusterLogConf")
1850
1744
  def cluster_log_conf(self) -> pulumi.Output[Optional['outputs.ClusterClusterLogConf']]:
1851
1745
  return pulumi.get(self, "cluster_log_conf")
1852
1746
 
1853
- @property
1747
+ @_builtins.property
1854
1748
  @pulumi.getter(name="clusterMountInfos")
1855
1749
  def cluster_mount_infos(self) -> pulumi.Output[Optional[Sequence['outputs.ClusterClusterMountInfo']]]:
1856
1750
  return pulumi.get(self, "cluster_mount_infos")
1857
1751
 
1858
- @property
1752
+ @_builtins.property
1859
1753
  @pulumi.getter(name="clusterName")
1860
- def cluster_name(self) -> pulumi.Output[Optional[builtins.str]]:
1754
+ def cluster_name(self) -> pulumi.Output[Optional[_builtins.str]]:
1861
1755
  """
1862
1756
  Cluster name, which doesn't have to be unique. If not specified at creation, the cluster name will be an empty string.
1863
1757
  """
1864
1758
  return pulumi.get(self, "cluster_name")
1865
1759
 
1866
- @property
1760
+ @_builtins.property
1867
1761
  @pulumi.getter(name="customTags")
1868
- def custom_tags(self) -> pulumi.Output[Optional[Mapping[str, builtins.str]]]:
1762
+ def custom_tags(self) -> pulumi.Output[Optional[Mapping[str, _builtins.str]]]:
1869
1763
  """
1870
1764
  should have tag `ResourceClass` set to value `Serverless`
1871
1765
 
@@ -1891,9 +1785,9 @@ class Cluster(pulumi.CustomResource):
1891
1785
  """
1892
1786
  return pulumi.get(self, "custom_tags")
1893
1787
 
1894
- @property
1788
+ @_builtins.property
1895
1789
  @pulumi.getter(name="dataSecurityMode")
1896
- def data_security_mode(self) -> pulumi.Output[Optional[builtins.str]]:
1790
+ def data_security_mode(self) -> pulumi.Output[Optional[_builtins.str]]:
1897
1791
  """
1898
1792
  Select the security features of the cluster (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#data_security_mode) for full list of values). [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. If `kind` is specified, then the following options are available:
1899
1793
  * `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access mode depending on your compute configuration.
@@ -1902,186 +1796,170 @@ class Cluster(pulumi.CustomResource):
1902
1796
  """
1903
1797
  return pulumi.get(self, "data_security_mode")
1904
1798
 
1905
- @property
1799
+ @_builtins.property
1906
1800
  @pulumi.getter(name="defaultTags")
1907
- def default_tags(self) -> pulumi.Output[Mapping[str, builtins.str]]:
1801
+ def default_tags(self) -> pulumi.Output[Mapping[str, _builtins.str]]:
1908
1802
  """
1909
1803
  (map) Tags that are added by Databricks by default, regardless of any `custom_tags` that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>, and any workspace and pool tags.
1910
1804
  """
1911
1805
  return pulumi.get(self, "default_tags")
1912
1806
 
1913
- @property
1807
+ @_builtins.property
1914
1808
  @pulumi.getter(name="dockerImage")
1915
1809
  def docker_image(self) -> pulumi.Output[Optional['outputs.ClusterDockerImage']]:
1916
1810
  return pulumi.get(self, "docker_image")
1917
1811
 
1918
- @property
1812
+ @_builtins.property
1919
1813
  @pulumi.getter(name="driverInstancePoolId")
1920
- def driver_instance_pool_id(self) -> pulumi.Output[builtins.str]:
1814
+ def driver_instance_pool_id(self) -> pulumi.Output[_builtins.str]:
1921
1815
  """
1922
1816
  similar to `instance_pool_id`, but for driver node. If omitted, and `instance_pool_id` is specified, then the driver will be allocated from that pool.
1923
1817
  """
1924
1818
  return pulumi.get(self, "driver_instance_pool_id")
1925
1819
 
1926
- @property
1820
+ @_builtins.property
1927
1821
  @pulumi.getter(name="driverNodeTypeId")
1928
- def driver_node_type_id(self) -> pulumi.Output[builtins.str]:
1822
+ def driver_node_type_id(self) -> pulumi.Output[_builtins.str]:
1929
1823
  """
1930
1824
  The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `node_type_id` defined above.
1931
1825
  """
1932
1826
  return pulumi.get(self, "driver_node_type_id")
1933
1827
 
1934
- @property
1828
+ @_builtins.property
1935
1829
  @pulumi.getter(name="enableElasticDisk")
1936
- def enable_elastic_disk(self) -> pulumi.Output[builtins.bool]:
1830
+ def enable_elastic_disk(self) -> pulumi.Output[_builtins.bool]:
1937
1831
  """
1938
1832
  If you don't want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster's Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance's local storage). To scale down EBS usage, make sure you have `autotermination_minutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
1939
1833
  """
1940
1834
  return pulumi.get(self, "enable_elastic_disk")
1941
1835
 
1942
- @property
1836
+ @_builtins.property
1943
1837
  @pulumi.getter(name="enableLocalDiskEncryption")
1944
- def enable_local_disk_encryption(self) -> pulumi.Output[builtins.bool]:
1838
+ def enable_local_disk_encryption(self) -> pulumi.Output[_builtins.bool]:
1945
1839
  """
1946
1840
  Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster's local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and uses it to encrypt all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. *Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access.*
1947
1841
  """
1948
1842
  return pulumi.get(self, "enable_local_disk_encryption")
1949
1843
 
1950
- @property
1844
+ @_builtins.property
1951
1845
  @pulumi.getter(name="gcpAttributes")
1952
1846
  def gcp_attributes(self) -> pulumi.Output[Optional['outputs.ClusterGcpAttributes']]:
1953
1847
  return pulumi.get(self, "gcp_attributes")
1954
1848
 
1955
- @property
1849
+ @_builtins.property
1956
1850
  @pulumi.getter(name="idempotencyToken")
1957
- def idempotency_token(self) -> pulumi.Output[Optional[builtins.str]]:
1851
+ def idempotency_token(self) -> pulumi.Output[Optional[_builtins.str]]:
1958
1852
  """
1959
1853
  An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
1960
1854
  """
1961
1855
  return pulumi.get(self, "idempotency_token")
1962
1856
 
1963
- @property
1857
+ @_builtins.property
1964
1858
  @pulumi.getter(name="initScripts")
1965
1859
  def init_scripts(self) -> pulumi.Output[Optional[Sequence['outputs.ClusterInitScript']]]:
1966
1860
  return pulumi.get(self, "init_scripts")
1967
1861
 
1968
- @property
1862
+ @_builtins.property
1969
1863
  @pulumi.getter(name="instancePoolId")
1970
- def instance_pool_id(self) -> pulumi.Output[Optional[builtins.str]]:
1864
+ def instance_pool_id(self) -> pulumi.Output[Optional[_builtins.str]]:
1971
1865
  """
1972
1866
  To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster's request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
1973
1867
  """
1974
1868
  return pulumi.get(self, "instance_pool_id")
1975
1869
 
1976
- @property
1870
+ @_builtins.property
1977
1871
  @pulumi.getter(name="isPinned")
1978
- def is_pinned(self) -> pulumi.Output[Optional[builtins.bool]]:
1872
+ def is_pinned(self) -> pulumi.Output[Optional[_builtins.bool]]:
1979
1873
  """
1980
1874
  boolean value specifying if the cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 100](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that (this number may change over time, so check Databricks documentation for actual number).
1981
1875
  """
1982
1876
  return pulumi.get(self, "is_pinned")
1983
1877
 
1984
- @property
1878
+ @_builtins.property
1985
1879
  @pulumi.getter(name="isSingleNode")
1986
- def is_single_node(self) -> pulumi.Output[Optional[builtins.bool]]:
1880
+ def is_single_node(self) -> pulumi.Output[Optional[_builtins.bool]]:
1987
1881
  """
1988
1882
  When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
1989
1883
  """
1990
1884
  return pulumi.get(self, "is_single_node")
1991
1885
 
1992
- @property
1886
+ @_builtins.property
1993
1887
  @pulumi.getter
1994
- def kind(self) -> pulumi.Output[Optional[builtins.str]]:
1888
+ def kind(self) -> pulumi.Output[Optional[_builtins.str]]:
1995
1889
  """
1996
1890
  The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
1997
1891
  """
1998
1892
  return pulumi.get(self, "kind")
1999
1893
 
2000
- @property
1894
+ @_builtins.property
2001
1895
  @pulumi.getter
2002
1896
  def libraries(self) -> pulumi.Output[Optional[Sequence['outputs.ClusterLibrary']]]:
2003
1897
  return pulumi.get(self, "libraries")
2004
1898
 
2005
- @property
1899
+ @_builtins.property
2006
1900
  @pulumi.getter(name="noWait")
2007
- def no_wait(self) -> pulumi.Output[Optional[builtins.bool]]:
1901
+ def no_wait(self) -> pulumi.Output[Optional[_builtins.bool]]:
2008
1902
  """
2009
1903
  If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
2010
-
2011
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
2012
-
2013
- ```python
2014
- import pulumi
2015
- import pulumi_databricks as databricks
2016
-
2017
- smallest = databricks.get_node_type(local_disk=True)
2018
- latest_lts = databricks.get_spark_version(long_term_support=True)
2019
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
2020
- cluster_name="Shared Autoscaling",
2021
- spark_version=latest_lts.id,
2022
- node_type_id=smallest.id,
2023
- autotermination_minutes=20,
2024
- autoscale={
2025
- "min_workers": 1,
2026
- "max_workers": 50,
2027
- },
2028
- spark_conf={
2029
- "spark.databricks.io.cache.enabled": "true",
2030
- "spark.databricks.io.cache.maxDiskUsage": "50g",
2031
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
2032
- })
2033
- ```
2034
1904
  """
2035
1905
  return pulumi.get(self, "no_wait")
2036
1906
 
2037
- @property
1907
+ @_builtins.property
2038
1908
  @pulumi.getter(name="nodeTypeId")
2039
- def node_type_id(self) -> pulumi.Output[builtins.str]:
1909
+ def node_type_id(self) -> pulumi.Output[_builtins.str]:
2040
1910
  """
2041
1911
  Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
2042
1912
  """
2043
1913
  return pulumi.get(self, "node_type_id")
2044
1914
 
2045
- @property
1915
+ @_builtins.property
2046
1916
  @pulumi.getter(name="numWorkers")
2047
- def num_workers(self) -> pulumi.Output[Optional[builtins.int]]:
1917
+ def num_workers(self) -> pulumi.Output[Optional[_builtins.int]]:
2048
1918
  """
2049
1919
  Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
2050
1920
  """
2051
1921
  return pulumi.get(self, "num_workers")
2052
1922
 
2053
- @property
1923
+ @_builtins.property
2054
1924
  @pulumi.getter(name="policyId")
2055
- def policy_id(self) -> pulumi.Output[Optional[builtins.str]]:
1925
+ def policy_id(self) -> pulumi.Output[Optional[_builtins.str]]:
2056
1926
  """
2057
1927
  Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
2058
1928
  """
2059
1929
  return pulumi.get(self, "policy_id")
2060
1930
 
2061
- @property
1931
+ @_builtins.property
1932
+ @pulumi.getter(name="providerConfig")
1933
+ def provider_config(self) -> pulumi.Output[Optional['outputs.ClusterProviderConfig']]:
1934
+ """
1935
+ Configure the provider for management through account provider. This block consists of the following fields:
1936
+ """
1937
+ return pulumi.get(self, "provider_config")
1938
+
1939
+ @_builtins.property
2062
1940
  @pulumi.getter(name="remoteDiskThroughput")
2063
- def remote_disk_throughput(self) -> pulumi.Output[Optional[builtins.int]]:
1941
+ def remote_disk_throughput(self) -> pulumi.Output[Optional[_builtins.int]]:
2064
1942
  return pulumi.get(self, "remote_disk_throughput")
2065
1943
 
2066
- @property
1944
+ @_builtins.property
2067
1945
  @pulumi.getter(name="runtimeEngine")
2068
- def runtime_engine(self) -> pulumi.Output[Optional[builtins.str]]:
1946
+ def runtime_engine(self) -> pulumi.Output[Optional[_builtins.str]]:
2069
1947
  """
2070
1948
  The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
2071
1949
  """
2072
1950
  return pulumi.get(self, "runtime_engine")
2073
1951
 
2074
- @property
1952
+ @_builtins.property
2075
1953
  @pulumi.getter(name="singleUserName")
2076
- def single_user_name(self) -> pulumi.Output[Optional[builtins.str]]:
1954
+ def single_user_name(self) -> pulumi.Output[Optional[_builtins.str]]:
2077
1955
  """
2078
1956
  The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
2079
1957
  """
2080
1958
  return pulumi.get(self, "single_user_name")
2081
1959
 
2082
- @property
1960
+ @_builtins.property
2083
1961
  @pulumi.getter(name="sparkConf")
2084
- def spark_conf(self) -> pulumi.Output[Optional[Mapping[str, builtins.str]]]:
1962
+ def spark_conf(self) -> pulumi.Output[Optional[Mapping[str, _builtins.str]]]:
2085
1963
  """
2086
1964
  should have following items:
2087
1965
  * `spark.databricks.repl.allowedLanguages` set to a list of supported languages, for example: `python,sql`, or `python,sql,r`. Scala is not supported!
@@ -2089,57 +1967,57 @@ class Cluster(pulumi.CustomResource):
2089
1967
  """
2090
1968
  return pulumi.get(self, "spark_conf")
2091
1969
 
2092
- @property
1970
+ @_builtins.property
2093
1971
  @pulumi.getter(name="sparkEnvVars")
2094
- def spark_env_vars(self) -> pulumi.Output[Optional[Mapping[str, builtins.str]]]:
1972
+ def spark_env_vars(self) -> pulumi.Output[Optional[Mapping[str, _builtins.str]]]:
2095
1973
  """
2096
1974
  Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
2097
1975
  """
2098
1976
  return pulumi.get(self, "spark_env_vars")
2099
1977
 
2100
- @property
1978
+ @_builtins.property
2101
1979
  @pulumi.getter(name="sparkVersion")
2102
- def spark_version(self) -> pulumi.Output[builtins.str]:
1980
+ def spark_version(self) -> pulumi.Output[_builtins.str]:
2103
1981
  """
2104
1982
  [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported get_spark_version id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
2105
1983
  """
2106
1984
  return pulumi.get(self, "spark_version")
2107
1985
 
2108
- @property
1986
+ @_builtins.property
2109
1987
  @pulumi.getter(name="sshPublicKeys")
2110
- def ssh_public_keys(self) -> pulumi.Output[Optional[Sequence[builtins.str]]]:
1988
+ def ssh_public_keys(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
2111
1989
  """
2112
1990
  SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
2113
1991
  """
2114
1992
  return pulumi.get(self, "ssh_public_keys")
2115
1993
 
2116
- @property
1994
+ @_builtins.property
2117
1995
  @pulumi.getter
2118
- def state(self) -> pulumi.Output[builtins.str]:
1996
+ def state(self) -> pulumi.Output[_builtins.str]:
2119
1997
  """
2120
1998
  (string) State of the cluster.
2121
1999
  """
2122
2000
  return pulumi.get(self, "state")
2123
2001
 
2124
- @property
2002
+ @_builtins.property
2125
2003
  @pulumi.getter(name="totalInitialRemoteDiskSize")
2126
- def total_initial_remote_disk_size(self) -> pulumi.Output[Optional[builtins.int]]:
2004
+ def total_initial_remote_disk_size(self) -> pulumi.Output[Optional[_builtins.int]]:
2127
2005
  return pulumi.get(self, "total_initial_remote_disk_size")
2128
2006
 
2129
- @property
2007
+ @_builtins.property
2130
2008
  @pulumi.getter
2131
- def url(self) -> pulumi.Output[builtins.str]:
2009
+ def url(self) -> pulumi.Output[_builtins.str]:
2132
2010
  return pulumi.get(self, "url")
2133
2011
 
2134
- @property
2012
+ @_builtins.property
2135
2013
  @pulumi.getter(name="useMlRuntime")
2136
- def use_ml_runtime(self) -> pulumi.Output[Optional[builtins.bool]]:
2014
+ def use_ml_runtime(self) -> pulumi.Output[Optional[_builtins.bool]]:
2137
2015
  """
2138
2016
  Whenever ML runtime should be selected or not. Actual runtime is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is GPU node or not.
2139
2017
  """
2140
2018
  return pulumi.get(self, "use_ml_runtime")
2141
2019
 
2142
- @property
2020
+ @_builtins.property
2143
2021
  @pulumi.getter(name="workloadType")
2144
2022
  def workload_type(self) -> pulumi.Output[Optional['outputs.ClusterWorkloadType']]:
2145
2023
  return pulumi.get(self, "workload_type")