pulumi-databricks 1.77.0a1762276204__py3-none-any.whl → 1.78.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. pulumi_databricks/_inputs.py +117 -120
  2. pulumi_databricks/account_federation_policy.py +24 -0
  3. pulumi_databricks/account_network_policy.py +50 -0
  4. pulumi_databricks/alert_v2.py +72 -0
  5. pulumi_databricks/app.py +64 -0
  6. pulumi_databricks/apps_settings_custom_template.py +66 -0
  7. pulumi_databricks/catalog.py +7 -7
  8. pulumi_databricks/cluster_policy.py +120 -0
  9. pulumi_databricks/config/__init__.pyi +4 -0
  10. pulumi_databricks/config/vars.py +8 -0
  11. pulumi_databricks/database_synced_database_table.py +212 -0
  12. pulumi_databricks/external_location.py +131 -7
  13. pulumi_databricks/file.py +2 -2
  14. pulumi_databricks/get_aws_assume_role_policy.py +14 -14
  15. pulumi_databricks/get_aws_bucket_policy.py +10 -10
  16. pulumi_databricks/get_aws_unity_catalog_assume_role_policy.py +10 -10
  17. pulumi_databricks/get_aws_unity_catalog_policy.py +10 -10
  18. pulumi_databricks/get_cluster.py +54 -0
  19. pulumi_databricks/get_current_config.py +4 -4
  20. pulumi_databricks/get_metastore.py +6 -6
  21. pulumi_databricks/get_notebook.py +20 -1
  22. pulumi_databricks/get_service_principals.py +64 -0
  23. pulumi_databricks/instance_profile.py +0 -182
  24. pulumi_databricks/metastore.py +81 -7
  25. pulumi_databricks/metastore_data_access.py +48 -0
  26. pulumi_databricks/mlflow_webhook.py +4 -4
  27. pulumi_databricks/mws_credentials.py +10 -10
  28. pulumi_databricks/mws_customer_managed_keys.py +0 -288
  29. pulumi_databricks/mws_log_delivery.py +146 -0
  30. pulumi_databricks/mws_storage_configurations.py +16 -16
  31. pulumi_databricks/mws_vpc_endpoint.py +56 -56
  32. pulumi_databricks/mws_workspaces.py +85 -51
  33. pulumi_databricks/notebook.py +49 -0
  34. pulumi_databricks/outputs.py +99 -76
  35. pulumi_databricks/permission_assignment.py +49 -0
  36. pulumi_databricks/permissions.py +6 -6
  37. pulumi_databricks/provider.py +36 -1
  38. pulumi_databricks/pulumi-plugin.json +1 -1
  39. pulumi_databricks/recipient.py +74 -0
  40. pulumi_databricks/registered_model.py +7 -7
  41. pulumi_databricks/schema.py +7 -7
  42. pulumi_databricks/service_principal_federation_policy.py +28 -0
  43. pulumi_databricks/sql_table.py +7 -7
  44. pulumi_databricks/volume.py +7 -7
  45. {pulumi_databricks-1.77.0a1762276204.dist-info → pulumi_databricks-1.78.0.dist-info}/METADATA +1 -1
  46. {pulumi_databricks-1.77.0a1762276204.dist-info → pulumi_databricks-1.78.0.dist-info}/RECORD +48 -48
  47. {pulumi_databricks-1.77.0a1762276204.dist-info → pulumi_databricks-1.78.0.dist-info}/WHEEL +0 -0
  48. {pulumi_databricks-1.77.0a1762276204.dist-info → pulumi_databricks-1.78.0.dist-info}/top_level.txt +0 -0
@@ -290,6 +290,39 @@ class AppsSettingsCustomTemplate(pulumi.CustomResource):
290
290
 
291
291
  This example defines a template that requests specific workspace resources with permissions granted.
292
292
 
293
+ ```python
294
+ import pulumi
295
+ import pulumi_databricks as databricks
296
+
297
+ resources_example = databricks.AppsSettingsCustomTemplate("resources_example",
298
+ name="my-resource-template",
299
+ description="Template that requires secret and SQL warehouse access",
300
+ git_repo="https://github.com/example/resource-app.git",
301
+ path="resource-template",
302
+ git_provider="github",
303
+ manifest={
304
+ "version": 1,
305
+ "name": "resource-consuming-app",
306
+ "description": "This app requires access to a secret and SQL warehouse.",
307
+ "resource_specs": [
308
+ {
309
+ "name": "my-secret",
310
+ "description": "A secret needed by the app",
311
+ "secret_spec": {
312
+ "permission": "READ",
313
+ },
314
+ },
315
+ {
316
+ "name": "warehouse",
317
+ "description": "Warehouse access",
318
+ "sql_warehouse_spec": {
319
+ "permission": "CAN_USE",
320
+ },
321
+ },
322
+ ],
323
+ })
324
+ ```
325
+
293
326
  ## Import
294
327
 
295
328
  As of Pulumi v1.5, resources can be imported through configuration.
@@ -361,6 +394,39 @@ class AppsSettingsCustomTemplate(pulumi.CustomResource):
361
394
 
362
395
  This example defines a template that requests specific workspace resources with permissions granted.
363
396
 
397
+ ```python
398
+ import pulumi
399
+ import pulumi_databricks as databricks
400
+
401
+ resources_example = databricks.AppsSettingsCustomTemplate("resources_example",
402
+ name="my-resource-template",
403
+ description="Template that requires secret and SQL warehouse access",
404
+ git_repo="https://github.com/example/resource-app.git",
405
+ path="resource-template",
406
+ git_provider="github",
407
+ manifest={
408
+ "version": 1,
409
+ "name": "resource-consuming-app",
410
+ "description": "This app requires access to a secret and SQL warehouse.",
411
+ "resource_specs": [
412
+ {
413
+ "name": "my-secret",
414
+ "description": "A secret needed by the app",
415
+ "secret_spec": {
416
+ "permission": "READ",
417
+ },
418
+ },
419
+ {
420
+ "name": "warehouse",
421
+ "description": "Warehouse access",
422
+ "sql_warehouse_spec": {
423
+ "permission": "CAN_USE",
424
+ },
425
+ },
426
+ ],
427
+ })
428
+ ```
429
+
364
430
  ## Import
365
431
 
366
432
  As of Pulumi v1.5, resources can be imported through configuration.
@@ -51,7 +51,7 @@ class CatalogArgs:
51
51
  :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] properties: Extensible Catalog properties.
52
52
  :param pulumi.Input[_builtins.str] provider_name: For Delta Sharing Catalogs: the name of the delta sharing provider. Change forces creation of a new resource.
53
53
  :param pulumi.Input[_builtins.str] share_name: For Delta Sharing Catalogs: the name of the share under the share provider. Change forces creation of a new resource.
54
- :param pulumi.Input[_builtins.str] storage_root: Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
54
+ :param pulumi.Input[_builtins.str] storage_root: Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
55
55
  """
56
56
  if browse_only is not None:
57
57
  pulumi.set(__self__, "browse_only", browse_only)
@@ -261,7 +261,7 @@ class CatalogArgs:
261
261
  @pulumi.getter(name="storageRoot")
262
262
  def storage_root(self) -> Optional[pulumi.Input[_builtins.str]]:
263
263
  """
264
- Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
264
+ Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
265
265
  """
266
266
  return pulumi.get(self, "storage_root")
267
267
 
@@ -316,7 +316,7 @@ class _CatalogState:
316
316
  :param pulumi.Input[_builtins.str] securable_type: the type of Unity Catalog securable.
317
317
  :param pulumi.Input[_builtins.str] share_name: For Delta Sharing Catalogs: the name of the share under the share provider. Change forces creation of a new resource.
318
318
  :param pulumi.Input[_builtins.str] storage_location: effective storage Location URL (full path) for managed tables within catalog.
319
- :param pulumi.Input[_builtins.str] storage_root: Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
319
+ :param pulumi.Input[_builtins.str] storage_root: Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
320
320
  :param pulumi.Input[_builtins.int] updated_at: time at which this catalog was last modified, in epoch milliseconds..
321
321
  :param pulumi.Input[_builtins.str] updated_by: username of user who last modified catalog.
322
322
  """
@@ -613,7 +613,7 @@ class _CatalogState:
613
613
  @pulumi.getter(name="storageRoot")
614
614
  def storage_root(self) -> Optional[pulumi.Input[_builtins.str]]:
615
615
  """
616
- Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
616
+ Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
617
617
  """
618
618
  return pulumi.get(self, "storage_root")
619
619
 
@@ -734,7 +734,7 @@ class Catalog(pulumi.CustomResource):
734
734
  :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] properties: Extensible Catalog properties.
735
735
  :param pulumi.Input[_builtins.str] provider_name: For Delta Sharing Catalogs: the name of the delta sharing provider. Change forces creation of a new resource.
736
736
  :param pulumi.Input[_builtins.str] share_name: For Delta Sharing Catalogs: the name of the share under the share provider. Change forces creation of a new resource.
737
- :param pulumi.Input[_builtins.str] storage_root: Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
737
+ :param pulumi.Input[_builtins.str] storage_root: Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
738
738
  """
739
739
  ...
740
740
  @overload
@@ -915,7 +915,7 @@ class Catalog(pulumi.CustomResource):
915
915
  :param pulumi.Input[_builtins.str] securable_type: the type of Unity Catalog securable.
916
916
  :param pulumi.Input[_builtins.str] share_name: For Delta Sharing Catalogs: the name of the share under the share provider. Change forces creation of a new resource.
917
917
  :param pulumi.Input[_builtins.str] storage_location: effective storage Location URL (full path) for managed tables within catalog.
918
- :param pulumi.Input[_builtins.str] storage_root: Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
918
+ :param pulumi.Input[_builtins.str] storage_root: Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
919
919
  :param pulumi.Input[_builtins.int] updated_at: time at which this catalog was last modified, in epoch milliseconds..
920
920
  :param pulumi.Input[_builtins.str] updated_by: username of user who last modified catalog.
921
921
  """
@@ -1109,7 +1109,7 @@ class Catalog(pulumi.CustomResource):
1109
1109
  @pulumi.getter(name="storageRoot")
1110
1110
  def storage_root(self) -> pulumi.Output[Optional[_builtins.str]]:
1111
1111
  """
1112
- Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
1112
+ Managed location of the catalog. Location in cloud storage where data for managed tables will be stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). If not specified, the location will default to the metastore root location. Change forces creation of a new resource.
1113
1113
  """
1114
1114
  return pulumi.get(self, "storage_root")
1115
1115
 
@@ -298,6 +298,66 @@ class ClusterPolicy(pulumi.CustomResource):
298
298
  * A user who has both cluster create permission and access to cluster policies can select the Free form policy and policies they have access to.
299
299
  * A user that has access to only cluster policies, can select the policies they have access to.
300
300
 
301
+ ## Example Usage
302
+
303
+ Let us take a look at an example of how you can manage two teams: Marketing and Data Engineering. In the following scenario we want the marketing team to have a really good query experience, so we enabled delta cache for them. On the other hand we want the data engineering team to be able to utilize bigger clusters so we increased the dbus per hour that they can spend. This strategy allows your marketing users and data engineering users to use Databricks in a self service manner but have a different experience in regards to security and performance. And down the line if you need to add more global settings you can propagate them through the "base cluster policy".
304
+
305
+ `modules/base-cluster-policy/main.tf` could look like:
306
+
307
+ ```python
308
+ import pulumi
309
+ import json
310
+ import pulumi_databricks as databricks
311
+ import pulumi_std as std
312
+
313
+ config = pulumi.Config()
314
+ # Team that performs the work
315
+ team = config.require_object("team")
316
+ # Cluster policy overrides
317
+ policy_overrides = config.require_object("policyOverrides")
318
+ default_policy = {
319
+ "dbus_per_hour": {
320
+ "type": "range",
321
+ "maxValue": 10,
322
+ },
323
+ "autotermination_minutes": {
324
+ "type": "fixed",
325
+ "value": 20,
326
+ "hidden": True,
327
+ },
328
+ "custom_tags.Team": {
329
+ "type": "fixed",
330
+ "value": team,
331
+ },
332
+ }
333
+ fair_use = databricks.ClusterPolicy("fair_use",
334
+ name=f"{team} cluster policy",
335
+ definition=json.dumps(std.merge(input=[
336
+ default_policy,
337
+ policy_overrides,
338
+ ]).result),
339
+ libraries=[
340
+ {
341
+ "pypi": {
342
+ "package": "databricks-sdk==0.12.0",
343
+ },
344
+ },
345
+ {
346
+ "maven": {
347
+ "coordinates": "com.oracle.database.jdbc:ojdbc8:XXXX",
348
+ },
349
+ },
350
+ ])
351
+ can_use_cluster_policyinstance_profile = databricks.Permissions("can_use_cluster_policyinstance_profile",
352
+ cluster_policy_id=fair_use.id,
353
+ access_controls=[{
354
+ "group_name": team,
355
+ "permission_level": "CAN_USE",
356
+ }])
357
+ ```
358
+
359
+ And custom instances of that base policy module for our marketing and data engineering teams would look like:
360
+
301
361
  ### Overriding the built-in cluster policies
302
362
 
303
363
  You can override built-in cluster policies by creating a `ClusterPolicy` resource with following attributes:
@@ -405,6 +465,66 @@ class ClusterPolicy(pulumi.CustomResource):
405
465
  * A user who has both cluster create permission and access to cluster policies can select the Free form policy and policies they have access to.
406
466
  * A user that has access to only cluster policies, can select the policies they have access to.
407
467
 
468
+ ## Example Usage
469
+
470
+ Let us take a look at an example of how you can manage two teams: Marketing and Data Engineering. In the following scenario we want the marketing team to have a really good query experience, so we enabled delta cache for them. On the other hand we want the data engineering team to be able to utilize bigger clusters so we increased the dbus per hour that they can spend. This strategy allows your marketing users and data engineering users to use Databricks in a self service manner but have a different experience in regards to security and performance. And down the line if you need to add more global settings you can propagate them through the "base cluster policy".
471
+
472
+ `modules/base-cluster-policy/main.tf` could look like:
473
+
474
+ ```python
475
+ import pulumi
476
+ import json
477
+ import pulumi_databricks as databricks
478
+ import pulumi_std as std
479
+
480
+ config = pulumi.Config()
481
+ # Team that performs the work
482
+ team = config.require_object("team")
483
+ # Cluster policy overrides
484
+ policy_overrides = config.require_object("policyOverrides")
485
+ default_policy = {
486
+ "dbus_per_hour": {
487
+ "type": "range",
488
+ "maxValue": 10,
489
+ },
490
+ "autotermination_minutes": {
491
+ "type": "fixed",
492
+ "value": 20,
493
+ "hidden": True,
494
+ },
495
+ "custom_tags.Team": {
496
+ "type": "fixed",
497
+ "value": team,
498
+ },
499
+ }
500
+ fair_use = databricks.ClusterPolicy("fair_use",
501
+ name=f"{team} cluster policy",
502
+ definition=json.dumps(std.merge(input=[
503
+ default_policy,
504
+ policy_overrides,
505
+ ]).result),
506
+ libraries=[
507
+ {
508
+ "pypi": {
509
+ "package": "databricks-sdk==0.12.0",
510
+ },
511
+ },
512
+ {
513
+ "maven": {
514
+ "coordinates": "com.oracle.database.jdbc:ojdbc8:XXXX",
515
+ },
516
+ },
517
+ ])
518
+ can_use_cluster_policyinstance_profile = databricks.Permissions("can_use_cluster_policyinstance_profile",
519
+ cluster_policy_id=fair_use.id,
520
+ access_controls=[{
521
+ "group_name": team,
522
+ "permission_level": "CAN_USE",
523
+ }])
524
+ ```
525
+
526
+ And custom instances of that base policy module for our marketing and data engineering teams would look like:
527
+
408
528
  ### Overriding the built-in cluster policies
409
529
 
410
530
  You can override built-in cluster policies by creating a `ClusterPolicy` resource with following attributes:
@@ -54,6 +54,8 @@ debugHeaders: Optional[bool]
54
54
 
55
55
  debugTruncateBytes: Optional[int]
56
56
 
57
+ experimentalIsUnifiedHost: Optional[bool]
58
+
57
59
  googleCredentials: Optional[str]
58
60
 
59
61
  googleServiceAccount: Optional[str]
@@ -86,3 +88,5 @@ username: Optional[str]
86
88
 
87
89
  warehouseId: Optional[str]
88
90
 
91
+ workspaceId: Optional[str]
92
+
@@ -100,6 +100,10 @@ class _ExportableConfig(types.ModuleType):
100
100
  def debug_truncate_bytes(self) -> Optional[int]:
101
101
  return __config__.get_int('debugTruncateBytes')
102
102
 
103
+ @_builtins.property
104
+ def experimental_is_unified_host(self) -> Optional[bool]:
105
+ return __config__.get_bool('experimentalIsUnifiedHost')
106
+
103
107
  @_builtins.property
104
108
  def google_credentials(self) -> Optional[str]:
105
109
  return __config__.get('googleCredentials')
@@ -164,3 +168,7 @@ class _ExportableConfig(types.ModuleType):
164
168
  def warehouse_id(self) -> Optional[str]:
165
169
  return __config__.get('warehouseId')
166
170
 
171
+ @_builtins.property
172
+ def workspace_id(self) -> Optional[str]:
173
+ return __config__.get('workspaceId')
174
+
@@ -298,15 +298,88 @@ class DatabaseSyncedDatabaseTable(pulumi.CustomResource):
298
298
 
299
299
  This example creates a Synced Database Table inside a Database Catalog.
300
300
 
301
+ ```python
302
+ import pulumi
303
+ import pulumi_databricks as databricks
304
+
305
+ this = databricks.DatabaseSyncedDatabaseTable("this",
306
+ name="my_database_catalog.public.synced_table",
307
+ logical_database_name="databricks_postgres",
308
+ spec={
309
+ "scheduling_policy": "SNAPSHOT",
310
+ "source_table_full_name": "source_delta.tpch.customer",
311
+ "primary_key_columns": ["c_custkey"],
312
+ "create_database_objects_if_missing": True,
313
+ "new_pipeline_spec": {
314
+ "storage_catalog": "source_delta",
315
+ "storage_schema": "tpch",
316
+ },
317
+ })
318
+ ```
319
+
301
320
  ### Creating a Synced Database Table inside a Standard Catalog
302
321
 
303
322
  This example creates a Synced Database Table inside a Standard Catalog.
304
323
 
324
+ ```python
325
+ import pulumi
326
+ import pulumi_databricks as databricks
327
+
328
+ this = databricks.DatabaseSyncedDatabaseTable("this",
329
+ name="my_standard_catalog.public.synced_table",
330
+ logical_database_name="databricks_postgres",
331
+ database_instance_name="my-database-instance",
332
+ spec={
333
+ "scheduling_policy": "SNAPSHOT",
334
+ "source_table_full_name": "source_delta.tpch.customer",
335
+ "primary_key_columns": ["c_custkey"],
336
+ "create_database_objects_if_missing": True,
337
+ "new_pipeline_spec": {
338
+ "storage_catalog": "source_delta",
339
+ "storage_schema": "tpch",
340
+ },
341
+ })
342
+ ```
343
+
305
344
  ### Creating multiple Synced Database Tables and bin packing them into a single pipeline
306
345
 
307
346
  This example creates two Synced Database Tables. The first one specifies a new pipeline spec,
308
347
  which generates a new pipeline. The second one utilizes the pipeline ID of the first table.
309
348
 
349
+ ```python
350
+ import pulumi
351
+ import pulumi_databricks as databricks
352
+
353
+ instance = databricks.DatabaseInstance("instance",
354
+ name="my-database-instance",
355
+ capacity="CU_1")
356
+ synced_table1 = databricks.DatabaseSyncedDatabaseTable("synced_table_1",
357
+ name="my_standard_catalog.public.synced_table1",
358
+ logical_database_name="databricks_postgres",
359
+ database_instance_name=instance.name,
360
+ spec={
361
+ "scheduling_policy": "SNAPSHOT",
362
+ "source_table_full_name": "source_delta.tpch.customer",
363
+ "primary_key_columns": ["c_custkey"],
364
+ "create_database_objects_if_missing": True,
365
+ "new_pipeline_spec": {
366
+ "storage_catalog": "source_delta",
367
+ "storage_schema": "tpch",
368
+ },
369
+ })
370
+ synced_table2 = databricks.DatabaseSyncedDatabaseTable("synced_table_2",
371
+ name="my_standard_catalog.public.synced_table2",
372
+ logical_database_name="databricks_postgres",
373
+ database_instance_name=instance.name,
374
+ spec={
375
+ "scheduling_policy": "SNAPSHOT",
376
+ "source_table_full_name": "source_delta.tpch.customer",
377
+ "primary_key_columns": ["c_custkey"],
378
+ "create_database_objects_if_missing": True,
379
+ "existing_pipeline_id": synced_table1.data_synchronization_status.pipeline_id,
380
+ })
381
+ ```
382
+
310
383
  ### Creating a Synced Database Table with a custom Jobs schedule
311
384
 
312
385
  This example creates a Synced Database Table and customizes the pipeline schedule. It assumes you already have
@@ -316,6 +389,39 @@ class DatabaseSyncedDatabaseTable(pulumi.CustomResource):
316
389
  - A schema in the standard catalog named `"default"`
317
390
  - A source delta table named `"source_delta.schema.customer"` with the primary key `"c_custkey"`
318
391
 
392
+ ```python
393
+ import pulumi
394
+ import pulumi_databricks as databricks
395
+
396
+ synced_table = databricks.DatabaseSyncedDatabaseTable("synced_table",
397
+ name="my_standard_catalog.default.my_synced_table",
398
+ logical_database_name="terraform_test_db",
399
+ database_instance_name="my-database-instance",
400
+ spec={
401
+ "scheduling_policy": "SNAPSHOT",
402
+ "source_table_full_name": "source_delta.schema.customer",
403
+ "primary_key_columns": ["c_custkey"],
404
+ "create_database_objects_if_missing": True,
405
+ "new_pipeline_spec": {
406
+ "storage_catalog": "source_delta",
407
+ "storage_schema": "schema",
408
+ },
409
+ })
410
+ sync_pipeline_schedule_job = databricks.Job("sync_pipeline_schedule_job",
411
+ name="Synced Pipeline Refresh",
412
+ description="Job to schedule synced database table pipeline. ",
413
+ tasks=[{
414
+ "task_key": "synced-table-pipeline",
415
+ "pipeline_task": {
416
+ "pipeline_id": synced_table.data_synchronization_status.pipeline_id,
417
+ },
418
+ }],
419
+ schedule={
420
+ "quartz_cron_expression": "0 0 0 * * ?",
421
+ "timezone_id": "Europe/Helsinki",
422
+ })
423
+ ```
424
+
319
425
  ## Import
320
426
 
321
427
  As of Pulumi v1.5, resources can be imported through configuration.
@@ -375,15 +481,88 @@ class DatabaseSyncedDatabaseTable(pulumi.CustomResource):
375
481
 
376
482
  This example creates a Synced Database Table inside a Database Catalog.
377
483
 
484
+ ```python
485
+ import pulumi
486
+ import pulumi_databricks as databricks
487
+
488
+ this = databricks.DatabaseSyncedDatabaseTable("this",
489
+ name="my_database_catalog.public.synced_table",
490
+ logical_database_name="databricks_postgres",
491
+ spec={
492
+ "scheduling_policy": "SNAPSHOT",
493
+ "source_table_full_name": "source_delta.tpch.customer",
494
+ "primary_key_columns": ["c_custkey"],
495
+ "create_database_objects_if_missing": True,
496
+ "new_pipeline_spec": {
497
+ "storage_catalog": "source_delta",
498
+ "storage_schema": "tpch",
499
+ },
500
+ })
501
+ ```
502
+
378
503
  ### Creating a Synced Database Table inside a Standard Catalog
379
504
 
380
505
  This example creates a Synced Database Table inside a Standard Catalog.
381
506
 
507
+ ```python
508
+ import pulumi
509
+ import pulumi_databricks as databricks
510
+
511
+ this = databricks.DatabaseSyncedDatabaseTable("this",
512
+ name="my_standard_catalog.public.synced_table",
513
+ logical_database_name="databricks_postgres",
514
+ database_instance_name="my-database-instance",
515
+ spec={
516
+ "scheduling_policy": "SNAPSHOT",
517
+ "source_table_full_name": "source_delta.tpch.customer",
518
+ "primary_key_columns": ["c_custkey"],
519
+ "create_database_objects_if_missing": True,
520
+ "new_pipeline_spec": {
521
+ "storage_catalog": "source_delta",
522
+ "storage_schema": "tpch",
523
+ },
524
+ })
525
+ ```
526
+
382
527
  ### Creating multiple Synced Database Tables and bin packing them into a single pipeline
383
528
 
384
529
  This example creates two Synced Database Tables. The first one specifies a new pipeline spec,
385
530
  which generates a new pipeline. The second one utilizes the pipeline ID of the first table.
386
531
 
532
+ ```python
533
+ import pulumi
534
+ import pulumi_databricks as databricks
535
+
536
+ instance = databricks.DatabaseInstance("instance",
537
+ name="my-database-instance",
538
+ capacity="CU_1")
539
+ synced_table1 = databricks.DatabaseSyncedDatabaseTable("synced_table_1",
540
+ name="my_standard_catalog.public.synced_table1",
541
+ logical_database_name="databricks_postgres",
542
+ database_instance_name=instance.name,
543
+ spec={
544
+ "scheduling_policy": "SNAPSHOT",
545
+ "source_table_full_name": "source_delta.tpch.customer",
546
+ "primary_key_columns": ["c_custkey"],
547
+ "create_database_objects_if_missing": True,
548
+ "new_pipeline_spec": {
549
+ "storage_catalog": "source_delta",
550
+ "storage_schema": "tpch",
551
+ },
552
+ })
553
+ synced_table2 = databricks.DatabaseSyncedDatabaseTable("synced_table_2",
554
+ name="my_standard_catalog.public.synced_table2",
555
+ logical_database_name="databricks_postgres",
556
+ database_instance_name=instance.name,
557
+ spec={
558
+ "scheduling_policy": "SNAPSHOT",
559
+ "source_table_full_name": "source_delta.tpch.customer",
560
+ "primary_key_columns": ["c_custkey"],
561
+ "create_database_objects_if_missing": True,
562
+ "existing_pipeline_id": synced_table1.data_synchronization_status.pipeline_id,
563
+ })
564
+ ```
565
+
387
566
  ### Creating a Synced Database Table with a custom Jobs schedule
388
567
 
389
568
  This example creates a Synced Database Table and customizes the pipeline schedule. It assumes you already have
@@ -393,6 +572,39 @@ class DatabaseSyncedDatabaseTable(pulumi.CustomResource):
393
572
  - A schema in the standard catalog named `"default"`
394
573
  - A source delta table named `"source_delta.schema.customer"` with the primary key `"c_custkey"`
395
574
 
575
+ ```python
576
+ import pulumi
577
+ import pulumi_databricks as databricks
578
+
579
+ synced_table = databricks.DatabaseSyncedDatabaseTable("synced_table",
580
+ name="my_standard_catalog.default.my_synced_table",
581
+ logical_database_name="terraform_test_db",
582
+ database_instance_name="my-database-instance",
583
+ spec={
584
+ "scheduling_policy": "SNAPSHOT",
585
+ "source_table_full_name": "source_delta.schema.customer",
586
+ "primary_key_columns": ["c_custkey"],
587
+ "create_database_objects_if_missing": True,
588
+ "new_pipeline_spec": {
589
+ "storage_catalog": "source_delta",
590
+ "storage_schema": "schema",
591
+ },
592
+ })
593
+ sync_pipeline_schedule_job = databricks.Job("sync_pipeline_schedule_job",
594
+ name="Synced Pipeline Refresh",
595
+ description="Job to schedule synced database table pipeline. ",
596
+ tasks=[{
597
+ "task_key": "synced-table-pipeline",
598
+ "pipeline_task": {
599
+ "pipeline_id": synced_table.data_synchronization_status.pipeline_id,
600
+ },
601
+ }],
602
+ schedule={
603
+ "quartz_cron_expression": "0 0 0 * * ?",
604
+ "timezone_id": "Europe/Helsinki",
605
+ })
606
+ ```
607
+
396
608
  ## Import
397
609
 
398
610
  As of Pulumi v1.5, resources can be imported through configuration.