@pulumi/databricks 1.43.0 → 1.44.0-alpha.1717175555

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pulumi/databricks",
3
- "version": "1.43.0",
3
+ "version": "1.44.0-alpha.1717175555",
4
4
  "description": "A Pulumi package for creating and managing databricks cloud resources.",
5
5
  "keywords": [
6
6
  "pulumi",
@@ -24,6 +24,6 @@
24
24
  "pulumi": {
25
25
  "resource": true,
26
26
  "name": "databricks",
27
- "version": "1.43.0"
27
+ "version": "1.44.0-alpha.1717175555"
28
28
  }
29
29
  }
package/types/input.d.ts CHANGED
@@ -1725,22 +1725,22 @@ export interface GetJobJobSettingsSettingsHealthArgs {
1725
1725
  rules: pulumi.Input<pulumi.Input<inputs.GetJobJobSettingsSettingsHealthRuleArgs>[]>;
1726
1726
  }
1727
1727
  export interface GetJobJobSettingsSettingsHealthRule {
1728
- metric?: string;
1729
- op?: string;
1730
- value?: number;
1728
+ metric: string;
1729
+ op: string;
1730
+ value: number;
1731
1731
  }
1732
1732
  export interface GetJobJobSettingsSettingsHealthRuleArgs {
1733
- metric?: pulumi.Input<string>;
1734
- op?: pulumi.Input<string>;
1735
- value?: pulumi.Input<number>;
1733
+ metric: pulumi.Input<string>;
1734
+ op: pulumi.Input<string>;
1735
+ value: pulumi.Input<number>;
1736
1736
  }
1737
1737
  export interface GetJobJobSettingsSettingsJobCluster {
1738
- jobClusterKey?: string;
1739
- newCluster?: inputs.GetJobJobSettingsSettingsJobClusterNewCluster;
1738
+ jobClusterKey: string;
1739
+ newCluster: inputs.GetJobJobSettingsSettingsJobClusterNewCluster;
1740
1740
  }
1741
1741
  export interface GetJobJobSettingsSettingsJobClusterArgs {
1742
- jobClusterKey?: pulumi.Input<string>;
1743
- newCluster?: pulumi.Input<inputs.GetJobJobSettingsSettingsJobClusterNewClusterArgs>;
1742
+ jobClusterKey: pulumi.Input<string>;
1743
+ newCluster: pulumi.Input<inputs.GetJobJobSettingsSettingsJobClusterNewClusterArgs>;
1744
1744
  }
1745
1745
  export interface GetJobJobSettingsSettingsJobClusterNewCluster {
1746
1746
  applyPolicyDefaultValues?: boolean;
@@ -2500,7 +2500,7 @@ export interface GetJobJobSettingsSettingsTask {
2500
2500
  sparkPythonTask?: inputs.GetJobJobSettingsSettingsTaskSparkPythonTask;
2501
2501
  sparkSubmitTask?: inputs.GetJobJobSettingsSettingsTaskSparkSubmitTask;
2502
2502
  sqlTask?: inputs.GetJobJobSettingsSettingsTaskSqlTask;
2503
- taskKey?: string;
2503
+ taskKey: string;
2504
2504
  timeoutSeconds?: number;
2505
2505
  webhookNotifications?: inputs.GetJobJobSettingsSettingsTaskWebhookNotifications;
2506
2506
  }
@@ -2530,7 +2530,7 @@ export interface GetJobJobSettingsSettingsTaskArgs {
2530
2530
  sparkPythonTask?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskSparkPythonTaskArgs>;
2531
2531
  sparkSubmitTask?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskSparkSubmitTaskArgs>;
2532
2532
  sqlTask?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskSqlTaskArgs>;
2533
- taskKey?: pulumi.Input<string>;
2533
+ taskKey: pulumi.Input<string>;
2534
2534
  timeoutSeconds?: pulumi.Input<number>;
2535
2535
  webhookNotifications?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskWebhookNotificationsArgs>;
2536
2536
  }
@@ -2619,7 +2619,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTask {
2619
2619
  sparkPythonTask?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSparkPythonTask;
2620
2620
  sparkSubmitTask?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSparkSubmitTask;
2621
2621
  sqlTask?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTask;
2622
- taskKey?: string;
2622
+ taskKey: string;
2623
2623
  timeoutSeconds?: number;
2624
2624
  webhookNotifications?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications;
2625
2625
  }
@@ -2648,7 +2648,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskArgs {
2648
2648
  sparkPythonTask?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSparkPythonTaskArgs>;
2649
2649
  sparkSubmitTask?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSparkSubmitTaskArgs>;
2650
2650
  sqlTask?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskArgs>;
2651
- taskKey?: pulumi.Input<string>;
2651
+ taskKey: pulumi.Input<string>;
2652
2652
  timeoutSeconds?: pulumi.Input<number>;
2653
2653
  webhookNotifications?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsArgs>;
2654
2654
  }
@@ -2709,14 +2709,14 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskHealthArgs {
2709
2709
  rules: pulumi.Input<pulumi.Input<inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskHealthRuleArgs>[]>;
2710
2710
  }
2711
2711
  export interface GetJobJobSettingsSettingsTaskForEachTaskTaskHealthRule {
2712
- metric?: string;
2713
- op?: string;
2714
- value?: number;
2712
+ metric: string;
2713
+ op: string;
2714
+ value: number;
2715
2715
  }
2716
2716
  export interface GetJobJobSettingsSettingsTaskForEachTaskTaskHealthRuleArgs {
2717
- metric?: pulumi.Input<string>;
2718
- op?: pulumi.Input<string>;
2719
- value?: pulumi.Input<number>;
2717
+ metric: pulumi.Input<string>;
2718
+ op: pulumi.Input<string>;
2719
+ value: pulumi.Input<number>;
2720
2720
  }
2721
2721
  export interface GetJobJobSettingsSettingsTaskForEachTaskTaskLibrary {
2722
2722
  cran?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran;
@@ -3140,7 +3140,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTask {
3140
3140
  [key: string]: any;
3141
3141
  };
3142
3142
  query?: inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskQuery;
3143
- warehouseId?: string;
3143
+ warehouseId: string;
3144
3144
  }
3145
3145
  export interface GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskArgs {
3146
3146
  alert?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlertArgs>;
@@ -3150,7 +3150,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskArgs {
3150
3150
  [key: string]: any;
3151
3151
  }>;
3152
3152
  query?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskQueryArgs>;
3153
- warehouseId?: pulumi.Input<string>;
3153
+ warehouseId: pulumi.Input<string>;
3154
3154
  }
3155
3155
  export interface GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlert {
3156
3156
  alertId: string;
@@ -3271,14 +3271,14 @@ export interface GetJobJobSettingsSettingsTaskHealthArgs {
3271
3271
  rules: pulumi.Input<pulumi.Input<inputs.GetJobJobSettingsSettingsTaskHealthRuleArgs>[]>;
3272
3272
  }
3273
3273
  export interface GetJobJobSettingsSettingsTaskHealthRule {
3274
- metric?: string;
3275
- op?: string;
3276
- value?: number;
3274
+ metric: string;
3275
+ op: string;
3276
+ value: number;
3277
3277
  }
3278
3278
  export interface GetJobJobSettingsSettingsTaskHealthRuleArgs {
3279
- metric?: pulumi.Input<string>;
3280
- op?: pulumi.Input<string>;
3281
- value?: pulumi.Input<number>;
3279
+ metric: pulumi.Input<string>;
3280
+ op: pulumi.Input<string>;
3281
+ value: pulumi.Input<number>;
3282
3282
  }
3283
3283
  export interface GetJobJobSettingsSettingsTaskLibrary {
3284
3284
  cran?: inputs.GetJobJobSettingsSettingsTaskLibraryCran;
@@ -3702,7 +3702,7 @@ export interface GetJobJobSettingsSettingsTaskSqlTask {
3702
3702
  [key: string]: any;
3703
3703
  };
3704
3704
  query?: inputs.GetJobJobSettingsSettingsTaskSqlTaskQuery;
3705
- warehouseId?: string;
3705
+ warehouseId: string;
3706
3706
  }
3707
3707
  export interface GetJobJobSettingsSettingsTaskSqlTaskArgs {
3708
3708
  alert?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskSqlTaskAlertArgs>;
@@ -3712,7 +3712,7 @@ export interface GetJobJobSettingsSettingsTaskSqlTaskArgs {
3712
3712
  [key: string]: any;
3713
3713
  }>;
3714
3714
  query?: pulumi.Input<inputs.GetJobJobSettingsSettingsTaskSqlTaskQueryArgs>;
3715
- warehouseId?: pulumi.Input<string>;
3715
+ warehouseId: pulumi.Input<string>;
3716
3716
  }
3717
3717
  export interface GetJobJobSettingsSettingsTaskSqlTaskAlert {
3718
3718
  alertId: string;
@@ -4958,30 +4958,29 @@ export interface JobHealthRule {
4958
4958
  /**
4959
4959
  * string specifying the metric to check. The only supported metric is `RUN_DURATION_SECONDS` (check [Jobs REST API documentation](https://docs.databricks.com/api/workspace/jobs/create) for the latest information).
4960
4960
  */
4961
- metric?: pulumi.Input<string>;
4961
+ metric: pulumi.Input<string>;
4962
4962
  /**
4963
4963
  * string specifying the operation used to evaluate the given metric. The only supported operation is `GREATER_THAN`.
4964
4964
  */
4965
- op?: pulumi.Input<string>;
4965
+ op: pulumi.Input<string>;
4966
4966
  /**
4967
4967
  * integer value used to compare to the given metric.
4968
4968
  */
4969
- value?: pulumi.Input<number>;
4969
+ value: pulumi.Input<number>;
4970
4970
  }
4971
4971
  export interface JobJobCluster {
4972
4972
  /**
4973
4973
  * Identifier that can be referenced in `task` block, so that cluster is shared between tasks
4974
4974
  */
4975
- jobClusterKey?: pulumi.Input<string>;
4975
+ jobClusterKey: pulumi.Input<string>;
4976
4976
  /**
4977
4977
  * Same set of parameters as for databricks.Cluster resource.
4978
4978
  */
4979
- newCluster?: pulumi.Input<inputs.JobJobClusterNewCluster>;
4979
+ newCluster: pulumi.Input<inputs.JobJobClusterNewCluster>;
4980
4980
  }
4981
4981
  export interface JobJobClusterNewCluster {
4982
4982
  applyPolicyDefaultValues?: pulumi.Input<boolean>;
4983
4983
  autoscale?: pulumi.Input<inputs.JobJobClusterNewClusterAutoscale>;
4984
- autoterminationMinutes?: pulumi.Input<number>;
4985
4984
  awsAttributes?: pulumi.Input<inputs.JobJobClusterNewClusterAwsAttributes>;
4986
4985
  azureAttributes?: pulumi.Input<inputs.JobJobClusterNewClusterAzureAttributes>;
4987
4986
  clusterId?: pulumi.Input<string>;
@@ -5183,7 +5182,6 @@ export interface JobLibraryPypi {
5183
5182
  export interface JobNewCluster {
5184
5183
  applyPolicyDefaultValues?: pulumi.Input<boolean>;
5185
5184
  autoscale?: pulumi.Input<inputs.JobNewClusterAutoscale>;
5186
- autoterminationMinutes?: pulumi.Input<number>;
5187
5185
  awsAttributes?: pulumi.Input<inputs.JobNewClusterAwsAttributes>;
5188
5186
  azureAttributes?: pulumi.Input<inputs.JobNewClusterAzureAttributes>;
5189
5187
  clusterId?: pulumi.Input<string>;
@@ -5592,7 +5590,7 @@ export interface JobTask {
5592
5590
  * string specifying an unique key for a given task.
5593
5591
  * * `*_task` - (Required) one of the specific task blocks described below:
5594
5592
  */
5595
- taskKey?: pulumi.Input<string>;
5593
+ taskKey: pulumi.Input<string>;
5596
5594
  /**
5597
5595
  * (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
5598
5596
  */
@@ -5768,7 +5766,7 @@ export interface JobTaskForEachTaskTask {
5768
5766
  * string specifying an unique key for a given task.
5769
5767
  * * `*_task` - (Required) one of the specific task blocks described below:
5770
5768
  */
5771
- taskKey?: pulumi.Input<string>;
5769
+ taskKey: pulumi.Input<string>;
5772
5770
  /**
5773
5771
  * (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
5774
5772
  */
@@ -5869,15 +5867,15 @@ export interface JobTaskForEachTaskTaskHealthRule {
5869
5867
  /**
5870
5868
  * string specifying the metric to check. The only supported metric is `RUN_DURATION_SECONDS` (check [Jobs REST API documentation](https://docs.databricks.com/api/workspace/jobs/create) for the latest information).
5871
5869
  */
5872
- metric?: pulumi.Input<string>;
5870
+ metric: pulumi.Input<string>;
5873
5871
  /**
5874
5872
  * string specifying the operation used to evaluate the given metric. The only supported operation is `GREATER_THAN`.
5875
5873
  */
5876
- op?: pulumi.Input<string>;
5874
+ op: pulumi.Input<string>;
5877
5875
  /**
5878
5876
  * integer value used to compare to the given metric.
5879
5877
  */
5880
- value?: pulumi.Input<number>;
5878
+ value: pulumi.Input<number>;
5881
5879
  }
5882
5880
  export interface JobTaskForEachTaskTaskLibrary {
5883
5881
  cran?: pulumi.Input<inputs.JobTaskForEachTaskTaskLibraryCran>;
@@ -5904,7 +5902,6 @@ export interface JobTaskForEachTaskTaskLibraryPypi {
5904
5902
  export interface JobTaskForEachTaskTaskNewCluster {
5905
5903
  applyPolicyDefaultValues?: pulumi.Input<boolean>;
5906
5904
  autoscale?: pulumi.Input<inputs.JobTaskForEachTaskTaskNewClusterAutoscale>;
5907
- autoterminationMinutes?: pulumi.Input<number>;
5908
5905
  awsAttributes?: pulumi.Input<inputs.JobTaskForEachTaskTaskNewClusterAwsAttributes>;
5909
5906
  azureAttributes?: pulumi.Input<inputs.JobTaskForEachTaskTaskNewClusterAzureAttributes>;
5910
5907
  clusterId?: pulumi.Input<string>;
@@ -6235,7 +6232,7 @@ export interface JobTaskForEachTaskTaskSqlTask {
6235
6232
  /**
6236
6233
  * ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
6237
6234
  */
6238
- warehouseId?: pulumi.Input<string>;
6235
+ warehouseId: pulumi.Input<string>;
6239
6236
  }
6240
6237
  export interface JobTaskForEachTaskTaskSqlTaskAlert {
6241
6238
  /**
@@ -6392,15 +6389,15 @@ export interface JobTaskHealthRule {
6392
6389
  /**
6393
6390
  * string specifying the metric to check. The only supported metric is `RUN_DURATION_SECONDS` (check [Jobs REST API documentation](https://docs.databricks.com/api/workspace/jobs/create) for the latest information).
6394
6391
  */
6395
- metric?: pulumi.Input<string>;
6392
+ metric: pulumi.Input<string>;
6396
6393
  /**
6397
6394
  * string specifying the operation used to evaluate the given metric. The only supported operation is `GREATER_THAN`.
6398
6395
  */
6399
- op?: pulumi.Input<string>;
6396
+ op: pulumi.Input<string>;
6400
6397
  /**
6401
6398
  * integer value used to compare to the given metric.
6402
6399
  */
6403
- value?: pulumi.Input<number>;
6400
+ value: pulumi.Input<number>;
6404
6401
  }
6405
6402
  export interface JobTaskLibrary {
6406
6403
  cran?: pulumi.Input<inputs.JobTaskLibraryCran>;
@@ -6427,7 +6424,6 @@ export interface JobTaskLibraryPypi {
6427
6424
  export interface JobTaskNewCluster {
6428
6425
  applyPolicyDefaultValues?: pulumi.Input<boolean>;
6429
6426
  autoscale?: pulumi.Input<inputs.JobTaskNewClusterAutoscale>;
6430
- autoterminationMinutes?: pulumi.Input<number>;
6431
6427
  awsAttributes?: pulumi.Input<inputs.JobTaskNewClusterAwsAttributes>;
6432
6428
  azureAttributes?: pulumi.Input<inputs.JobTaskNewClusterAzureAttributes>;
6433
6429
  clusterId?: pulumi.Input<string>;
@@ -6758,7 +6754,7 @@ export interface JobTaskSqlTask {
6758
6754
  /**
6759
6755
  * ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
6760
6756
  */
6761
- warehouseId?: pulumi.Input<string>;
6757
+ warehouseId: pulumi.Input<string>;
6762
6758
  }
6763
6759
  export interface JobTaskSqlTaskAlert {
6764
6760
  /**
@@ -6915,9 +6911,6 @@ export interface JobTrigger {
6915
6911
  */
6916
6912
  pauseStatus?: pulumi.Input<string>;
6917
6913
  table?: pulumi.Input<inputs.JobTriggerTable>;
6918
- /**
6919
- * configuration block to define a trigger for Table Update events consisting of following attributes:
6920
- */
6921
6914
  tableUpdate?: pulumi.Input<inputs.JobTriggerTableUpdate>;
6922
6915
  }
6923
6916
  export interface JobTriggerFileArrival {
@@ -6941,21 +6934,9 @@ export interface JobTriggerTable {
6941
6934
  waitAfterLastChangeSeconds?: pulumi.Input<number>;
6942
6935
  }
6943
6936
  export interface JobTriggerTableUpdate {
6944
- /**
6945
- * The table(s) condition based on which to trigger a job run. Valid values are `ANY_UPDATED` or `ALL_UPDATED`.
6946
- */
6947
6937
  condition?: pulumi.Input<string>;
6948
- /**
6949
- * If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
6950
- */
6951
6938
  minTimeBetweenTriggersSeconds?: pulumi.Input<number>;
6952
- /**
6953
- * A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.
6954
- */
6955
6939
  tableNames: pulumi.Input<pulumi.Input<string>[]>;
6956
- /**
6957
- * If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
6958
- */
6959
6940
  waitAfterLastChangeSeconds?: pulumi.Input<number>;
6960
6941
  }
6961
6942
  export interface JobWebhookNotifications {
package/types/output.d.ts CHANGED
@@ -1068,13 +1068,13 @@ export interface GetJobJobSettingsSettingsHealth {
1068
1068
  rules: outputs.GetJobJobSettingsSettingsHealthRule[];
1069
1069
  }
1070
1070
  export interface GetJobJobSettingsSettingsHealthRule {
1071
- metric?: string;
1072
- op?: string;
1073
- value?: number;
1071
+ metric: string;
1072
+ op: string;
1073
+ value: number;
1074
1074
  }
1075
1075
  export interface GetJobJobSettingsSettingsJobCluster {
1076
- jobClusterKey?: string;
1077
- newCluster?: outputs.GetJobJobSettingsSettingsJobClusterNewCluster;
1076
+ jobClusterKey: string;
1077
+ newCluster: outputs.GetJobJobSettingsSettingsJobClusterNewCluster;
1078
1078
  }
1079
1079
  export interface GetJobJobSettingsSettingsJobClusterNewCluster {
1080
1080
  applyPolicyDefaultValues?: boolean;
@@ -1468,7 +1468,7 @@ export interface GetJobJobSettingsSettingsTask {
1468
1468
  sparkPythonTask?: outputs.GetJobJobSettingsSettingsTaskSparkPythonTask;
1469
1469
  sparkSubmitTask?: outputs.GetJobJobSettingsSettingsTaskSparkSubmitTask;
1470
1470
  sqlTask?: outputs.GetJobJobSettingsSettingsTaskSqlTask;
1471
- taskKey?: string;
1471
+ taskKey: string;
1472
1472
  timeoutSeconds?: number;
1473
1473
  webhookNotifications?: outputs.GetJobJobSettingsSettingsTaskWebhookNotifications;
1474
1474
  }
@@ -1527,7 +1527,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTask {
1527
1527
  sparkPythonTask?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSparkPythonTask;
1528
1528
  sparkSubmitTask?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSparkSubmitTask;
1529
1529
  sqlTask?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTask;
1530
- taskKey?: string;
1530
+ taskKey: string;
1531
1531
  timeoutSeconds?: number;
1532
1532
  webhookNotifications?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications;
1533
1533
  }
@@ -1560,9 +1560,9 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskHealth {
1560
1560
  rules: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskHealthRule[];
1561
1561
  }
1562
1562
  export interface GetJobJobSettingsSettingsTaskForEachTaskTaskHealthRule {
1563
- metric?: string;
1564
- op?: string;
1565
- value?: number;
1563
+ metric: string;
1564
+ op: string;
1565
+ value: number;
1566
1566
  }
1567
1567
  export interface GetJobJobSettingsSettingsTaskForEachTaskTaskLibrary {
1568
1568
  cran?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran;
@@ -1779,7 +1779,7 @@ export interface GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTask {
1779
1779
  [key: string]: any;
1780
1780
  };
1781
1781
  query?: outputs.GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskQuery;
1782
- warehouseId?: string;
1782
+ warehouseId: string;
1783
1783
  }
1784
1784
  export interface GetJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlert {
1785
1785
  alertId: string;
@@ -1841,9 +1841,9 @@ export interface GetJobJobSettingsSettingsTaskHealth {
1841
1841
  rules: outputs.GetJobJobSettingsSettingsTaskHealthRule[];
1842
1842
  }
1843
1843
  export interface GetJobJobSettingsSettingsTaskHealthRule {
1844
- metric?: string;
1845
- op?: string;
1846
- value?: number;
1844
+ metric: string;
1845
+ op: string;
1846
+ value: number;
1847
1847
  }
1848
1848
  export interface GetJobJobSettingsSettingsTaskLibrary {
1849
1849
  cran?: outputs.GetJobJobSettingsSettingsTaskLibraryCran;
@@ -2060,7 +2060,7 @@ export interface GetJobJobSettingsSettingsTaskSqlTask {
2060
2060
  [key: string]: any;
2061
2061
  };
2062
2062
  query?: outputs.GetJobJobSettingsSettingsTaskSqlTaskQuery;
2063
- warehouseId?: string;
2063
+ warehouseId: string;
2064
2064
  }
2065
2065
  export interface GetJobJobSettingsSettingsTaskSqlTaskAlert {
2066
2066
  alertId: string;
@@ -2804,30 +2804,29 @@ export interface JobHealthRule {
2804
2804
  /**
2805
2805
  * string specifying the metric to check. The only supported metric is `RUN_DURATION_SECONDS` (check [Jobs REST API documentation](https://docs.databricks.com/api/workspace/jobs/create) for the latest information).
2806
2806
  */
2807
- metric?: string;
2807
+ metric: string;
2808
2808
  /**
2809
2809
  * string specifying the operation used to evaluate the given metric. The only supported operation is `GREATER_THAN`.
2810
2810
  */
2811
- op?: string;
2811
+ op: string;
2812
2812
  /**
2813
2813
  * integer value used to compare to the given metric.
2814
2814
  */
2815
- value?: number;
2815
+ value: number;
2816
2816
  }
2817
2817
  export interface JobJobCluster {
2818
2818
  /**
2819
2819
  * Identifier that can be referenced in `task` block, so that cluster is shared between tasks
2820
2820
  */
2821
- jobClusterKey?: string;
2821
+ jobClusterKey: string;
2822
2822
  /**
2823
2823
  * Same set of parameters as for databricks.Cluster resource.
2824
2824
  */
2825
- newCluster?: outputs.JobJobClusterNewCluster;
2825
+ newCluster: outputs.JobJobClusterNewCluster;
2826
2826
  }
2827
2827
  export interface JobJobClusterNewCluster {
2828
2828
  applyPolicyDefaultValues?: boolean;
2829
2829
  autoscale?: outputs.JobJobClusterNewClusterAutoscale;
2830
- autoterminationMinutes?: number;
2831
2830
  awsAttributes?: outputs.JobJobClusterNewClusterAwsAttributes;
2832
2831
  azureAttributes?: outputs.JobJobClusterNewClusterAzureAttributes;
2833
2832
  clusterId?: string;
@@ -3029,7 +3028,6 @@ export interface JobLibraryPypi {
3029
3028
  export interface JobNewCluster {
3030
3029
  applyPolicyDefaultValues?: boolean;
3031
3030
  autoscale?: outputs.JobNewClusterAutoscale;
3032
- autoterminationMinutes?: number;
3033
3031
  awsAttributes?: outputs.JobNewClusterAwsAttributes;
3034
3032
  azureAttributes?: outputs.JobNewClusterAzureAttributes;
3035
3033
  clusterId?: string;
@@ -3438,7 +3436,7 @@ export interface JobTask {
3438
3436
  * string specifying an unique key for a given task.
3439
3437
  * * `*_task` - (Required) one of the specific task blocks described below:
3440
3438
  */
3441
- taskKey?: string;
3439
+ taskKey: string;
3442
3440
  /**
3443
3441
  * (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
3444
3442
  */
@@ -3614,7 +3612,7 @@ export interface JobTaskForEachTaskTask {
3614
3612
  * string specifying an unique key for a given task.
3615
3613
  * * `*_task` - (Required) one of the specific task blocks described below:
3616
3614
  */
3617
- taskKey?: string;
3615
+ taskKey: string;
3618
3616
  /**
3619
3617
  * (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout.
3620
3618
  */
@@ -3715,15 +3713,15 @@ export interface JobTaskForEachTaskTaskHealthRule {
3715
3713
  /**
3716
3714
  * string specifying the metric to check. The only supported metric is `RUN_DURATION_SECONDS` (check [Jobs REST API documentation](https://docs.databricks.com/api/workspace/jobs/create) for the latest information).
3717
3715
  */
3718
- metric?: string;
3716
+ metric: string;
3719
3717
  /**
3720
3718
  * string specifying the operation used to evaluate the given metric. The only supported operation is `GREATER_THAN`.
3721
3719
  */
3722
- op?: string;
3720
+ op: string;
3723
3721
  /**
3724
3722
  * integer value used to compare to the given metric.
3725
3723
  */
3726
- value?: number;
3724
+ value: number;
3727
3725
  }
3728
3726
  export interface JobTaskForEachTaskTaskLibrary {
3729
3727
  cran?: outputs.JobTaskForEachTaskTaskLibraryCran;
@@ -3750,7 +3748,6 @@ export interface JobTaskForEachTaskTaskLibraryPypi {
3750
3748
  export interface JobTaskForEachTaskTaskNewCluster {
3751
3749
  applyPolicyDefaultValues?: boolean;
3752
3750
  autoscale?: outputs.JobTaskForEachTaskTaskNewClusterAutoscale;
3753
- autoterminationMinutes?: number;
3754
3751
  awsAttributes?: outputs.JobTaskForEachTaskTaskNewClusterAwsAttributes;
3755
3752
  azureAttributes?: outputs.JobTaskForEachTaskTaskNewClusterAzureAttributes;
3756
3753
  clusterId?: string;
@@ -4081,7 +4078,7 @@ export interface JobTaskForEachTaskTaskSqlTask {
4081
4078
  /**
4082
4079
  * ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
4083
4080
  */
4084
- warehouseId?: string;
4081
+ warehouseId: string;
4085
4082
  }
4086
4083
  export interface JobTaskForEachTaskTaskSqlTaskAlert {
4087
4084
  /**
@@ -4238,15 +4235,15 @@ export interface JobTaskHealthRule {
4238
4235
  /**
4239
4236
  * string specifying the metric to check. The only supported metric is `RUN_DURATION_SECONDS` (check [Jobs REST API documentation](https://docs.databricks.com/api/workspace/jobs/create) for the latest information).
4240
4237
  */
4241
- metric?: string;
4238
+ metric: string;
4242
4239
  /**
4243
4240
  * string specifying the operation used to evaluate the given metric. The only supported operation is `GREATER_THAN`.
4244
4241
  */
4245
- op?: string;
4242
+ op: string;
4246
4243
  /**
4247
4244
  * integer value used to compare to the given metric.
4248
4245
  */
4249
- value?: number;
4246
+ value: number;
4250
4247
  }
4251
4248
  export interface JobTaskLibrary {
4252
4249
  cran?: outputs.JobTaskLibraryCran;
@@ -4273,7 +4270,6 @@ export interface JobTaskLibraryPypi {
4273
4270
  export interface JobTaskNewCluster {
4274
4271
  applyPolicyDefaultValues?: boolean;
4275
4272
  autoscale?: outputs.JobTaskNewClusterAutoscale;
4276
- autoterminationMinutes?: number;
4277
4273
  awsAttributes?: outputs.JobTaskNewClusterAwsAttributes;
4278
4274
  azureAttributes?: outputs.JobTaskNewClusterAzureAttributes;
4279
4275
  clusterId?: string;
@@ -4604,7 +4600,7 @@ export interface JobTaskSqlTask {
4604
4600
  /**
4605
4601
  * ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now.
4606
4602
  */
4607
- warehouseId?: string;
4603
+ warehouseId: string;
4608
4604
  }
4609
4605
  export interface JobTaskSqlTaskAlert {
4610
4606
  /**
@@ -4761,9 +4757,6 @@ export interface JobTrigger {
4761
4757
  */
4762
4758
  pauseStatus?: string;
4763
4759
  table?: outputs.JobTriggerTable;
4764
- /**
4765
- * configuration block to define a trigger for Table Update events consisting of following attributes:
4766
- */
4767
4760
  tableUpdate?: outputs.JobTriggerTableUpdate;
4768
4761
  }
4769
4762
  export interface JobTriggerFileArrival {
@@ -4787,21 +4780,9 @@ export interface JobTriggerTable {
4787
4780
  waitAfterLastChangeSeconds?: number;
4788
4781
  }
4789
4782
  export interface JobTriggerTableUpdate {
4790
- /**
4791
- * The table(s) condition based on which to trigger a job run. Valid values are `ANY_UPDATED` or `ALL_UPDATED`.
4792
- */
4793
4783
  condition?: string;
4794
- /**
4795
- * If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds.
4796
- */
4797
4784
  minTimeBetweenTriggersSeconds?: number;
4798
- /**
4799
- * A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.
4800
- */
4801
4785
  tableNames: string[];
4802
- /**
4803
- * If set, the trigger starts a run only after no file activity has occurred for the specified amount of time. This makes it possible to wait for a batch of incoming files to arrive before triggering a run. The minimum allowed value is 60 seconds.
4804
- */
4805
4786
  waitAfterLastChangeSeconds?: number;
4806
4787
  }
4807
4788
  export interface JobWebhookNotifications {
@@ -5100,11 +5081,11 @@ export interface ModelServingConfigServedEntity {
5100
5081
  /**
5101
5082
  * The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are `Small` (4 - 4 provisioned concurrency), `Medium` (8 - 16 provisioned concurrency), and `Large` (16 - 64 provisioned concurrency). If `scale-to-zero` is enabled, the lower bound of the provisioned concurrency for each workload size is 0.
5102
5083
  */
5103
- workloadSize?: string;
5084
+ workloadSize: string;
5104
5085
  /**
5105
5086
  * The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is `CPU`. For deep learning workloads, GPU acceleration is available by selecting workload types like `GPU_SMALL` and others. See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).
5106
5087
  */
5107
- workloadType?: string;
5088
+ workloadType: string;
5108
5089
  }
5109
5090
  export interface ModelServingConfigServedEntityExternalModel {
5110
5091
  ai21labsConfig?: outputs.ModelServingConfigServedEntityExternalModelAi21labsConfig;