@pulumi/databricks 1.48.0-alpha.1723454038 → 1.48.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/getAwsUnityCatalogAssumeRolePolicy.d.ts +2 -2
  2. package/getAwsUnityCatalogAssumeRolePolicy.js +2 -2
  3. package/getAwsUnityCatalogPolicy.d.ts +4 -4
  4. package/getAwsUnityCatalogPolicy.js +2 -2
  5. package/getNotebook.d.ts +4 -0
  6. package/getNotebook.js.map +1 -1
  7. package/getSchema.d.ts +68 -0
  8. package/getSchema.js +68 -0
  9. package/getSchema.js.map +1 -1
  10. package/getUser.d.ts +4 -0
  11. package/getUser.js.map +1 -1
  12. package/index.d.ts +3 -0
  13. package/index.js +6 -1
  14. package/index.js.map +1 -1
  15. package/metastoreAssignment.d.ts +3 -3
  16. package/metastoreDataAccess.d.ts +3 -0
  17. package/metastoreDataAccess.js +2 -0
  18. package/metastoreDataAccess.js.map +1 -1
  19. package/notebook.d.ts +8 -0
  20. package/notebook.js +2 -0
  21. package/notebook.js.map +1 -1
  22. package/notificationDestination.d.ts +157 -0
  23. package/notificationDestination.js +139 -0
  24. package/notificationDestination.js.map +1 -0
  25. package/package.json +2 -2
  26. package/permissions.d.ts +70 -5
  27. package/permissions.js +70 -5
  28. package/permissions.js.map +1 -1
  29. package/pipeline.d.ts +79 -3
  30. package/pipeline.js +25 -1
  31. package/pipeline.js.map +1 -1
  32. package/sqlDashboard.d.ts +4 -2
  33. package/sqlDashboard.js +4 -2
  34. package/sqlDashboard.js.map +1 -1
  35. package/sqlTable.d.ts +3 -3
  36. package/sqlWidget.d.ts +5 -3
  37. package/sqlWidget.js +5 -3
  38. package/sqlWidget.js.map +1 -1
  39. package/storageCredential.d.ts +3 -0
  40. package/storageCredential.js +2 -0
  41. package/storageCredential.js.map +1 -1
  42. package/types/input.d.ts +233 -25
  43. package/types/output.d.ts +233 -25
  44. package/volume.d.ts +1 -1
  45. package/volume.js +1 -1
  46. package/workspaceConf.d.ts +7 -5
  47. package/workspaceConf.js +7 -5
  48. package/workspaceConf.js.map +1 -1
package/types/output.d.ts CHANGED
@@ -143,7 +143,7 @@ export interface ClusterAzureAttributes {
143
143
  firstOnDemand?: number;
144
144
  logAnalyticsInfo?: outputs.ClusterAzureAttributesLogAnalyticsInfo;
145
145
  /**
146
- * The max price for Azure spot instances. Use `-1` to specify the lowest price.
146
+ * The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to `-1`, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance.
147
147
  */
148
148
  spotBidMaxPrice?: number;
149
149
  }
@@ -3100,7 +3100,7 @@ export interface InstancePoolAzureAttributes {
3100
3100
  */
3101
3101
  availability?: string;
3102
3102
  /**
3103
- * The max price for Azure spot instances. Use `-1` to specify the lowest price.
3103
+ * The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to `-1`, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance.
3104
3104
  */
3105
3105
  spotBidMaxPrice?: number;
3106
3106
  }
@@ -3895,12 +3895,15 @@ export interface JobTask {
3895
3895
  */
3896
3896
  dependsOns?: outputs.JobTaskDependsOn[];
3897
3897
  /**
3898
- * An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
3898
+ * description for this task.
3899
3899
  */
3900
3900
  description?: string;
3901
+ /**
3902
+ * A flag to disable auto optimization in serverless tasks.
3903
+ */
3901
3904
  disableAutoOptimization?: boolean;
3902
3905
  /**
3903
- * (List) An optional set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
3906
+ * An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
3904
3907
  */
3905
3908
  emailNotifications?: outputs.JobTaskEmailNotifications;
3906
3909
  /**
@@ -3914,8 +3917,6 @@ export interface JobTask {
3914
3917
  forEachTask?: outputs.JobTaskForEachTask;
3915
3918
  /**
3916
3919
  * block described below that specifies health conditions for a given task.
3917
- *
3918
- * > **Note** If no `jobClusterKey`, `existingClusterId`, or `newCluster` were specified in task definition, then task will executed using serverless compute.
3919
3920
  */
3920
3921
  health?: outputs.JobTaskHealth;
3921
3922
  /**
@@ -3969,6 +3970,8 @@ export interface JobTask {
3969
3970
  timeoutSeconds?: number;
3970
3971
  /**
3971
3972
  * (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
3973
+ *
3974
+ * > **Note** If no `jobClusterKey`, `existingClusterId`, or `newCluster` were specified in task definition, then task will executed using serverless compute.
3972
3975
  */
3973
3976
  webhookNotifications?: outputs.JobTaskWebhookNotifications;
3974
3977
  }
@@ -4081,12 +4084,15 @@ export interface JobTaskForEachTaskTask {
4081
4084
  */
4082
4085
  dependsOns?: outputs.JobTaskForEachTaskTaskDependsOn[];
4083
4086
  /**
4084
- * An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding.
4087
+ * description for this task.
4085
4088
  */
4086
4089
  description?: string;
4090
+ /**
4091
+ * A flag to disable auto optimization in serverless tasks.
4092
+ */
4087
4093
  disableAutoOptimization?: boolean;
4088
4094
  /**
4089
- * (List) An optional set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
4095
+ * An optional block to specify a set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This block is documented below.
4090
4096
  */
4091
4097
  emailNotifications?: outputs.JobTaskForEachTaskTaskEmailNotifications;
4092
4098
  /**
@@ -4099,8 +4105,6 @@ export interface JobTaskForEachTaskTask {
4099
4105
  existingClusterId?: string;
4100
4106
  /**
4101
4107
  * block described below that specifies health conditions for a given task.
4102
- *
4103
- * > **Note** If no `jobClusterKey`, `existingClusterId`, or `newCluster` were specified in task definition, then task will executed using serverless compute.
4104
4108
  */
4105
4109
  health?: outputs.JobTaskForEachTaskTaskHealth;
4106
4110
  /**
@@ -4154,6 +4158,8 @@ export interface JobTaskForEachTaskTask {
4154
4158
  timeoutSeconds?: number;
4155
4159
  /**
4156
4160
  * (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below.
4161
+ *
4162
+ * > **Note** If no `jobClusterKey`, `existingClusterId`, or `newCluster` were specified in task definition, then task will executed using serverless compute.
4157
4163
  */
4158
4164
  webhookNotifications?: outputs.JobTaskForEachTaskTaskWebhookNotifications;
4159
4165
  }
@@ -5578,6 +5584,11 @@ export interface MetastoreDataAccessAzureServicePrincipal {
5578
5584
  clientSecret: string;
5579
5585
  directoryId: string;
5580
5586
  }
5587
+ export interface MetastoreDataAccessCloudflareApiToken {
5588
+ accessKeyId: string;
5589
+ accountId: string;
5590
+ secretAccessKey: string;
5591
+ }
5581
5592
  export interface MetastoreDataAccessDatabricksGcpServiceAccount {
5582
5593
  credentialId: string;
5583
5594
  email: string;
@@ -5730,6 +5741,7 @@ export interface ModelServingConfigServedEntityExternalModel {
5730
5741
  * Databricks Model Serving Config
5731
5742
  */
5732
5743
  databricksModelServingConfig?: outputs.ModelServingConfigServedEntityExternalModelDatabricksModelServingConfig;
5744
+ googleCloudVertexAiConfig?: outputs.ModelServingConfigServedEntityExternalModelGoogleCloudVertexAiConfig;
5733
5745
  /**
5734
5746
  * The name of the external model.
5735
5747
  */
@@ -5755,13 +5767,15 @@ export interface ModelServingConfigServedEntityExternalModelAi21labsConfig {
5755
5767
  /**
5756
5768
  * The Databricks secret key reference for an AI21Labs API key.
5757
5769
  */
5758
- ai21labsApiKey: string;
5770
+ ai21labsApiKey?: string;
5771
+ ai21labsApiKeyPlaintext?: string;
5759
5772
  }
5760
5773
  export interface ModelServingConfigServedEntityExternalModelAmazonBedrockConfig {
5761
5774
  /**
5762
5775
  * The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services.
5763
5776
  */
5764
- awsAccessKeyId: string;
5777
+ awsAccessKeyId?: string;
5778
+ awsAccessKeyIdPlaintext?: string;
5765
5779
  /**
5766
5780
  * The AWS region to use. Bedrock has to be enabled there.
5767
5781
  */
@@ -5769,7 +5783,8 @@ export interface ModelServingConfigServedEntityExternalModelAmazonBedrockConfig
5769
5783
  /**
5770
5784
  * The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services.
5771
5785
  */
5772
- awsSecretAccessKey: string;
5786
+ awsSecretAccessKey?: string;
5787
+ awsSecretAccessKeyPlaintext?: string;
5773
5788
  /**
5774
5789
  * The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: `Anthropic`, `Cohere`, `AI21Labs`, `Amazon`.
5775
5790
  */
@@ -5780,27 +5795,38 @@ export interface ModelServingConfigServedEntityExternalModelAnthropicConfig {
5780
5795
  * The Databricks secret key reference for an Anthropic API key.
5781
5796
  * The Databricks secret key reference for an Anthropic API key.
5782
5797
  */
5783
- anthropicApiKey: string;
5798
+ anthropicApiKey?: string;
5799
+ anthropicApiKeyPlaintext?: string;
5784
5800
  }
5785
5801
  export interface ModelServingConfigServedEntityExternalModelCohereConfig {
5802
+ cohereApiBase?: string;
5786
5803
  /**
5787
5804
  * The Databricks secret key reference for a Cohere API key.
5788
5805
  */
5789
- cohereApiKey: string;
5806
+ cohereApiKey?: string;
5807
+ cohereApiKeyPlaintext?: string;
5790
5808
  }
5791
5809
  export interface ModelServingConfigServedEntityExternalModelDatabricksModelServingConfig {
5792
5810
  /**
5793
5811
  * The Databricks secret key reference for a Databricks API token that corresponds to a user or service principal with Can Query access to the model serving endpoint pointed to by this external model.
5794
5812
  */
5795
- databricksApiToken: string;
5813
+ databricksApiToken?: string;
5814
+ databricksApiTokenPlaintext?: string;
5796
5815
  /**
5797
5816
  * The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.
5798
5817
  */
5799
5818
  databricksWorkspaceUrl: string;
5800
5819
  }
5820
+ export interface ModelServingConfigServedEntityExternalModelGoogleCloudVertexAiConfig {
5821
+ privateKey?: string;
5822
+ privateKeyPlaintext?: string;
5823
+ projectId?: string;
5824
+ region?: string;
5825
+ }
5801
5826
  export interface ModelServingConfigServedEntityExternalModelOpenaiConfig {
5802
5827
  microsoftEntraClientId?: string;
5803
5828
  microsoftEntraClientSecret?: string;
5829
+ microsoftEntraClientSecretPlaintext?: string;
5804
5830
  microsoftEntraTenantId?: string;
5805
5831
  /**
5806
5832
  * This is the base URL for the OpenAI API (default: "https://api.openai.com/v1"). For Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service provided by Azure.
@@ -5810,6 +5836,7 @@ export interface ModelServingConfigServedEntityExternalModelOpenaiConfig {
5810
5836
  * The Databricks secret key reference for an OpenAI or Azure OpenAI API key.
5811
5837
  */
5812
5838
  openaiApiKey?: string;
5839
+ openaiApiKeyPlaintext?: string;
5813
5840
  /**
5814
5841
  * This is an optional field to specify the type of OpenAI API to use. For Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security access validation protocol. For access token validation, use azure. For authentication using Azure Active Directory (Azure AD) use, azuread.
5815
5842
  */
@@ -5831,7 +5858,8 @@ export interface ModelServingConfigServedEntityExternalModelPalmConfig {
5831
5858
  /**
5832
5859
  * The Databricks secret key reference for a PaLM API key.
5833
5860
  */
5834
- palmApiKey: string;
5861
+ palmApiKey?: string;
5862
+ palmApiKeyPlaintext?: string;
5835
5863
  }
5836
5864
  export interface ModelServingConfigServedModel {
5837
5865
  /**
@@ -6122,6 +6150,74 @@ export interface MwsWorkspacesToken {
6122
6150
  tokenId: string;
6123
6151
  tokenValue: string;
6124
6152
  }
6153
+ export interface NotificationDestinationConfig {
6154
+ /**
6155
+ * The email configuration of the Notification Destination. It must contain the following:
6156
+ */
6157
+ email?: outputs.NotificationDestinationConfigEmail;
6158
+ /**
6159
+ * The Generic Webhook configuration of the Notification Destination. It must contain the following:
6160
+ */
6161
+ genericWebhook?: outputs.NotificationDestinationConfigGenericWebhook;
6162
+ /**
6163
+ * The Microsoft Teams configuration of the Notification Destination. It must contain the following:
6164
+ */
6165
+ microsoftTeams?: outputs.NotificationDestinationConfigMicrosoftTeams;
6166
+ /**
6167
+ * The PagerDuty configuration of the Notification Destination. It must contain the following:
6168
+ */
6169
+ pagerduty?: outputs.NotificationDestinationConfigPagerduty;
6170
+ /**
6171
+ * The Slack configuration of the Notification Destination. It must contain the following:
6172
+ */
6173
+ slack?: outputs.NotificationDestinationConfigSlack;
6174
+ }
6175
+ export interface NotificationDestinationConfigEmail {
6176
+ /**
6177
+ * The list of email addresses to send notifications to.
6178
+ */
6179
+ addresses?: string[];
6180
+ }
6181
+ export interface NotificationDestinationConfigGenericWebhook {
6182
+ /**
6183
+ * The password for basic authentication.
6184
+ *
6185
+ * > **NOTE** If the type of notification destination is changed, the existing notification destination will be deleted and a new notification destination will be created with the new type.
6186
+ */
6187
+ password?: string;
6188
+ passwordSet: boolean;
6189
+ /**
6190
+ * The Generic Webhook URL.
6191
+ */
6192
+ url?: string;
6193
+ urlSet: boolean;
6194
+ /**
6195
+ * The username for basic authentication.
6196
+ */
6197
+ username?: string;
6198
+ usernameSet: boolean;
6199
+ }
6200
+ export interface NotificationDestinationConfigMicrosoftTeams {
6201
+ /**
6202
+ * The Microsoft Teams webhook URL.
6203
+ */
6204
+ url?: string;
6205
+ urlSet: boolean;
6206
+ }
6207
+ export interface NotificationDestinationConfigPagerduty {
6208
+ /**
6209
+ * The PagerDuty integration key.
6210
+ */
6211
+ integrationKey?: string;
6212
+ integrationKeySet: boolean;
6213
+ }
6214
+ export interface NotificationDestinationConfigSlack {
6215
+ /**
6216
+ * The Slack webhook URL.
6217
+ */
6218
+ url?: string;
6219
+ urlSet: boolean;
6220
+ }
6125
6221
  export interface OnlineTableSpec {
6126
6222
  /**
6127
6223
  * Whether to create a full-copy pipeline -- a pipeline that stops after creates a full copy of the source table upon initialization and does not process any change data feeds (CDFs) afterwards. The pipeline can still be manually triggered afterwards, but it always perform a full copy of the source table and there are no incremental updates. This mode is useful for syncing views or tables without CDFs to online tables. Note that the full-copy pipeline only supports "triggered" scheduling policy.
@@ -6256,14 +6352,16 @@ export interface PipelineCluster {
6256
6352
  sshPublicKeys?: string[];
6257
6353
  }
6258
6354
  export interface PipelineClusterAutoscale {
6259
- maxWorkers?: number;
6260
- minWorkers?: number;
6355
+ maxWorkers: number;
6356
+ minWorkers: number;
6261
6357
  mode?: string;
6262
6358
  }
6263
6359
  export interface PipelineClusterAwsAttributes {
6264
6360
  availability?: string;
6265
6361
  ebsVolumeCount?: number;
6362
+ ebsVolumeIops?: number;
6266
6363
  ebsVolumeSize?: number;
6364
+ ebsVolumeThroughput?: number;
6267
6365
  ebsVolumeType?: string;
6268
6366
  firstOnDemand?: number;
6269
6367
  instanceProfileArn?: string;
@@ -6273,8 +6371,13 @@ export interface PipelineClusterAwsAttributes {
6273
6371
  export interface PipelineClusterAzureAttributes {
6274
6372
  availability?: string;
6275
6373
  firstOnDemand?: number;
6374
+ logAnalyticsInfo?: outputs.PipelineClusterAzureAttributesLogAnalyticsInfo;
6276
6375
  spotBidMaxPrice?: number;
6277
6376
  }
6377
+ export interface PipelineClusterAzureAttributesLogAnalyticsInfo {
6378
+ logAnalyticsPrimaryKey?: string;
6379
+ logAnalyticsWorkspaceId?: string;
6380
+ }
6278
6381
  export interface PipelineClusterClusterLogConf {
6279
6382
  dbfs?: outputs.PipelineClusterClusterLogConfDbfs;
6280
6383
  s3?: outputs.PipelineClusterClusterLogConfS3;
@@ -6337,22 +6440,101 @@ export interface PipelineClusterInitScriptWorkspace {
6337
6440
  destination: string;
6338
6441
  }
6339
6442
  export interface PipelineDeployment {
6443
+ /**
6444
+ * The deployment method that manages the pipeline.
6445
+ */
6340
6446
  kind?: string;
6447
+ /**
6448
+ * The path to the file containing metadata about the deployment.
6449
+ */
6341
6450
  metadataFilePath?: string;
6342
6451
  }
6343
6452
  export interface PipelineFilters {
6453
+ /**
6454
+ * Paths to exclude.
6455
+ */
6344
6456
  excludes?: string[];
6457
+ /**
6458
+ * Paths to include.
6459
+ */
6345
6460
  includes?: string[];
6346
6461
  }
6462
+ export interface PipelineGatewayDefinition {
6463
+ /**
6464
+ * Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.
6465
+ */
6466
+ connectionId?: string;
6467
+ /**
6468
+ * Required, Immutable. The name of the catalog for the gateway pipeline's storage location.
6469
+ */
6470
+ gatewayStorageCatalog?: string;
6471
+ /**
6472
+ * Required. The Unity Catalog-compatible naming for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Delta Live Tables system will automatically create the storage location under the catalog and schema.
6473
+ */
6474
+ gatewayStorageName?: string;
6475
+ /**
6476
+ * Required, Immutable. The name of the schema for the gateway pipelines's storage location.
6477
+ */
6478
+ gatewayStorageSchema?: string;
6479
+ }
6480
+ export interface PipelineIngestionDefinition {
6481
+ connectionName?: string;
6482
+ ingestionGatewayId?: string;
6483
+ objects?: outputs.PipelineIngestionDefinitionObject[];
6484
+ tableConfiguration?: outputs.PipelineIngestionDefinitionTableConfiguration;
6485
+ }
6486
+ export interface PipelineIngestionDefinitionObject {
6487
+ schema?: outputs.PipelineIngestionDefinitionObjectSchema;
6488
+ table?: outputs.PipelineIngestionDefinitionObjectTable;
6489
+ }
6490
+ export interface PipelineIngestionDefinitionObjectSchema {
6491
+ destinationCatalog?: string;
6492
+ destinationSchema?: string;
6493
+ sourceCatalog?: string;
6494
+ sourceSchema?: string;
6495
+ tableConfiguration?: outputs.PipelineIngestionDefinitionObjectSchemaTableConfiguration;
6496
+ }
6497
+ export interface PipelineIngestionDefinitionObjectSchemaTableConfiguration {
6498
+ primaryKeys?: string[];
6499
+ salesforceIncludeFormulaFields?: boolean;
6500
+ scdType?: string;
6501
+ }
6502
+ export interface PipelineIngestionDefinitionObjectTable {
6503
+ destinationCatalog?: string;
6504
+ destinationSchema?: string;
6505
+ destinationTable?: string;
6506
+ sourceCatalog?: string;
6507
+ sourceSchema?: string;
6508
+ sourceTable?: string;
6509
+ tableConfiguration?: outputs.PipelineIngestionDefinitionObjectTableTableConfiguration;
6510
+ }
6511
+ export interface PipelineIngestionDefinitionObjectTableTableConfiguration {
6512
+ primaryKeys?: string[];
6513
+ salesforceIncludeFormulaFields?: boolean;
6514
+ scdType?: string;
6515
+ }
6516
+ export interface PipelineIngestionDefinitionTableConfiguration {
6517
+ primaryKeys?: string[];
6518
+ salesforceIncludeFormulaFields?: boolean;
6519
+ scdType?: string;
6520
+ }
6521
+ export interface PipelineLatestUpdate {
6522
+ creationTime?: string;
6523
+ state?: string;
6524
+ updateId?: string;
6525
+ }
6347
6526
  export interface PipelineLibrary {
6348
6527
  file?: outputs.PipelineLibraryFile;
6349
6528
  jar?: string;
6350
6529
  maven?: outputs.PipelineLibraryMaven;
6351
6530
  notebook?: outputs.PipelineLibraryNotebook;
6531
+ /**
6532
+ * @deprecated The 'whl' field is deprecated
6533
+ */
6352
6534
  whl?: string;
6353
6535
  }
6354
6536
  export interface PipelineLibraryFile {
6355
- path: string;
6537
+ path?: string;
6356
6538
  }
6357
6539
  export interface PipelineLibraryMaven {
6358
6540
  coordinates: string;
@@ -6360,7 +6542,7 @@ export interface PipelineLibraryMaven {
6360
6542
  repo?: string;
6361
6543
  }
6362
6544
  export interface PipelineLibraryNotebook {
6363
- path: string;
6545
+ path?: string;
6364
6546
  }
6365
6547
  export interface PipelineNotification {
6366
6548
  /**
@@ -6370,11 +6552,21 @@ export interface PipelineNotification {
6370
6552
  * * `on-update-fatal-failure` - a pipeline update fails with a non-retryable (fatal) error.
6371
6553
  * * `on-flow-failure` - a single data flow fails.
6372
6554
  */
6373
- alerts: string[];
6555
+ alerts?: string[];
6374
6556
  /**
6375
6557
  * non-empty list of emails to notify.
6376
6558
  */
6377
- emailRecipients: string[];
6559
+ emailRecipients?: string[];
6560
+ }
6561
+ export interface PipelineTrigger {
6562
+ cron?: outputs.PipelineTriggerCron;
6563
+ manual?: outputs.PipelineTriggerManual;
6564
+ }
6565
+ export interface PipelineTriggerCron {
6566
+ quartzCronSchedule?: string;
6567
+ timezoneId?: string;
6568
+ }
6569
+ export interface PipelineTriggerManual {
6378
6570
  }
6379
6571
  export interface QualityMonitorCustomMetric {
6380
6572
  /**
@@ -6873,12 +7065,28 @@ export interface StorageCredentialAzureServicePrincipal {
6873
7065
  */
6874
7066
  directoryId: string;
6875
7067
  }
7068
+ export interface StorageCredentialCloudflareApiToken {
7069
+ /**
7070
+ * R2 API token access key ID
7071
+ */
7072
+ accessKeyId: string;
7073
+ /**
7074
+ * R2 account ID
7075
+ */
7076
+ accountId: string;
7077
+ /**
7078
+ * R2 API token secret access key
7079
+ *
7080
+ * `azureServicePrincipal` optional configuration block to use service principal as credential details for Azure (Legacy):
7081
+ */
7082
+ secretAccessKey: string;
7083
+ }
6876
7084
  export interface StorageCredentialDatabricksGcpServiceAccount {
6877
7085
  credentialId: string;
6878
7086
  /**
6879
7087
  * The email of the GCP service account created, to be granted access to relevant buckets.
6880
7088
  *
6881
- * `azureServicePrincipal` optional configuration block to use service principal as credential details for Azure (Legacy):
7089
+ * `cloudflareApiToken` optional configuration block for using a Cloudflare API Token as credential details. This requires account admin access:
6882
7090
  */
6883
7091
  email: string;
6884
7092
  }
@@ -6886,7 +7094,7 @@ export interface StorageCredentialGcpServiceAccountKey {
6886
7094
  /**
6887
7095
  * The email of the GCP service account created, to be granted access to relevant buckets.
6888
7096
  *
6889
- * `azureServicePrincipal` optional configuration block to use service principal as credential details for Azure (Legacy):
7097
+ * `cloudflareApiToken` optional configuration block for using a Cloudflare API Token as credential details. This requires account admin access:
6890
7098
  */
6891
7099
  email: string;
6892
7100
  privateKey: string;
package/volume.d.ts CHANGED
@@ -58,7 +58,7 @@ import * as pulumi from "@pulumi/pulumi";
58
58
  * },
59
59
  * });
60
60
  * const some = new databricks.ExternalLocation("some", {
61
- * name: "external-location",
61
+ * name: "external_location",
62
62
  * url: `s3://${externalAwsS3Bucket.id}/some`,
63
63
  * credentialName: external.name,
64
64
  * });
package/volume.js CHANGED
@@ -64,7 +64,7 @@ const utilities = require("./utilities");
64
64
  * },
65
65
  * });
66
66
  * const some = new databricks.ExternalLocation("some", {
67
- * name: "external-location",
67
+ * name: "external_location",
68
68
  * url: `s3://${externalAwsS3Bucket.id}/some`,
69
69
  * credentialName: external.name,
70
70
  * });
@@ -4,15 +4,17 @@ import * as pulumi from "@pulumi/pulumi";
4
4
  *
5
5
  * Manages workspace configuration for expert usage. Currently, more than one instance of resource can exist in Pulumi state, though there's no deterministic behavior, when they manage the same property. We strongly recommend to use a single `databricks.WorkspaceConf` per workspace.
6
6
  *
7
+ * > **Note** Deleting `databricks.WorkspaceConf` resources may fail depending on the configuration properties set, including but not limited to `enableIpAccessLists`, `enableGp3`, and `maxTokenLifetimeDays`. The provider will print a warning if this occurs. You can verify the workspace configuration by reviewing [the workspace settings in the UI](https://docs.databricks.com/en/admin/workspace-settings/index.html).
8
+ *
7
9
  * ## Example Usage
8
10
  *
9
11
  * Allows specification of custom configuration properties for expert usage:
10
12
  *
11
- * * `enableIpAccessLists` - enables the use of databricks.IpAccessList resources
12
- * * `maxTokenLifetimeDays` - (string) Maximum token lifetime of new tokens in days, as an integer. If zero, new tokens are permitted to have no lifetime limit. Negative numbers are unsupported. **WARNING:** This limit only applies to new tokens, so there may be tokens with lifetimes longer than this value, including unlimited lifetime. Such tokens may have been created before the current maximum token lifetime was set.
13
- * * `enableTokensConfig` - (boolean) Enable or disable personal access tokens for this workspace.
14
- * * `enableDeprecatedClusterNamedInitScripts` - (boolean) Enable or disable [legacy cluster-named init scripts](https://docs.databricks.com/clusters/init-scripts.html#disable-legacy-cluster-named-init-scripts-for-a-workspace) for this workspace.
15
- * * `enableDeprecatedGlobalInitScripts` - (boolean) Enable or disable [legacy global init scripts](https://docs.databricks.com/clusters/init-scripts.html#migrate-legacy-scripts) for this workspace.
13
+ * - `enableIpAccessLists` - enables the use of databricks.IpAccessList resources
14
+ * - `maxTokenLifetimeDays` - (string) Maximum token lifetime of new tokens in days, as an integer. If zero, new tokens are permitted to have no lifetime limit. Negative numbers are unsupported. **WARNING:** This limit only applies to new tokens, so there may be tokens with lifetimes longer than this value, including unlimited lifetime. Such tokens may have been created before the current maximum token lifetime was set.
15
+ * - `enableTokensConfig` - (boolean) Enable or disable personal access tokens for this workspace.
16
+ * - `enableDeprecatedClusterNamedInitScripts` - (boolean) Enable or disable [legacy cluster-named init scripts](https://docs.databricks.com/clusters/init-scripts.html#disable-legacy-cluster-named-init-scripts-for-a-workspace) for this workspace.
17
+ * - `enableDeprecatedGlobalInitScripts` - (boolean) Enable or disable [legacy global init scripts](https://docs.databricks.com/clusters/init-scripts.html#migrate-legacy-scripts) for this workspace.
16
18
  *
17
19
  * ```typescript
18
20
  * import * as pulumi from "@pulumi/pulumi";
package/workspaceConf.js CHANGED
@@ -10,15 +10,17 @@ const utilities = require("./utilities");
10
10
  *
11
11
  * Manages workspace configuration for expert usage. Currently, more than one instance of resource can exist in Pulumi state, though there's no deterministic behavior, when they manage the same property. We strongly recommend to use a single `databricks.WorkspaceConf` per workspace.
12
12
  *
13
+ * > **Note** Deleting `databricks.WorkspaceConf` resources may fail depending on the configuration properties set, including but not limited to `enableIpAccessLists`, `enableGp3`, and `maxTokenLifetimeDays`. The provider will print a warning if this occurs. You can verify the workspace configuration by reviewing [the workspace settings in the UI](https://docs.databricks.com/en/admin/workspace-settings/index.html).
14
+ *
13
15
  * ## Example Usage
14
16
  *
15
17
  * Allows specification of custom configuration properties for expert usage:
16
18
  *
17
- * * `enableIpAccessLists` - enables the use of databricks.IpAccessList resources
18
- * * `maxTokenLifetimeDays` - (string) Maximum token lifetime of new tokens in days, as an integer. If zero, new tokens are permitted to have no lifetime limit. Negative numbers are unsupported. **WARNING:** This limit only applies to new tokens, so there may be tokens with lifetimes longer than this value, including unlimited lifetime. Such tokens may have been created before the current maximum token lifetime was set.
19
- * * `enableTokensConfig` - (boolean) Enable or disable personal access tokens for this workspace.
20
- * * `enableDeprecatedClusterNamedInitScripts` - (boolean) Enable or disable [legacy cluster-named init scripts](https://docs.databricks.com/clusters/init-scripts.html#disable-legacy-cluster-named-init-scripts-for-a-workspace) for this workspace.
21
- * * `enableDeprecatedGlobalInitScripts` - (boolean) Enable or disable [legacy global init scripts](https://docs.databricks.com/clusters/init-scripts.html#migrate-legacy-scripts) for this workspace.
19
+ * - `enableIpAccessLists` - enables the use of databricks.IpAccessList resources
20
+ * - `maxTokenLifetimeDays` - (string) Maximum token lifetime of new tokens in days, as an integer. If zero, new tokens are permitted to have no lifetime limit. Negative numbers are unsupported. **WARNING:** This limit only applies to new tokens, so there may be tokens with lifetimes longer than this value, including unlimited lifetime. Such tokens may have been created before the current maximum token lifetime was set.
21
+ * - `enableTokensConfig` - (boolean) Enable or disable personal access tokens for this workspace.
22
+ * - `enableDeprecatedClusterNamedInitScripts` - (boolean) Enable or disable [legacy cluster-named init scripts](https://docs.databricks.com/clusters/init-scripts.html#disable-legacy-cluster-named-init-scripts-for-a-workspace) for this workspace.
23
+ * - `enableDeprecatedGlobalInitScripts` - (boolean) Enable or disable [legacy global init scripts](https://docs.databricks.com/clusters/init-scripts.html#migrate-legacy-scripts) for this workspace.
22
24
  *
23
25
  * ```typescript
24
26
  * import * as pulumi from "@pulumi/pulumi";
@@ -1 +1 @@
1
- {"version":3,"file":"workspaceConf.js","sourceRoot":"","sources":["../workspaceConf.ts"],"names":[],"mappings":";AAAA,wFAAwF;AACxF,iFAAiF;;;AAEjF,yCAAyC;AACzC,yCAAyC;AAEzC;;;;;;;;;;;;;;;;;;;;;;;;;;;GA2BG;AACH,MAAa,aAAc,SAAQ,MAAM,CAAC,cAAc;IACpD;;;;;;;;OAQG;IACI,MAAM,CAAC,GAAG,CAAC,IAAY,EAAE,EAA2B,EAAE,KAA0B,EAAE,IAAmC;QACxH,OAAO,IAAI,aAAa,CAAC,IAAI,EAAO,KAAK,kCAAO,IAAI,KAAE,EAAE,EAAE,EAAE,IAAG,CAAC;IACpE,CAAC;IAKD;;;OAGG;IACI,MAAM,CAAC,UAAU,CAAC,GAAQ;QAC7B,IAAI,GAAG,KAAK,SAAS,IAAI,GAAG,KAAK,IAAI,EAAE;YACnC,OAAO,KAAK,CAAC;SAChB;QACD,OAAO,GAAG,CAAC,cAAc,CAAC,KAAK,aAAa,CAAC,YAAY,CAAC;IAC9D,CAAC;IAeD,YAAY,IAAY,EAAE,WAAoD,EAAE,IAAmC;QAC/G,IAAI,cAAc,GAAkB,EAAE,CAAC;QACvC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QAClB,IAAI,IAAI,CAAC,EAAE,EAAE;YACT,MAAM,KAAK,GAAG,WAA6C,CAAC;YAC5D,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;SAC3E;aAAM;YACH,MAAM,IAAI,GAAG,WAA4C,CAAC;YAC1D,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;SACzE;QACD,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,SAAS,CAAC,oBAAoB,EAAE,EAAE,IAAI,CAAC,CAAC;QACnE,KAAK,CAAC,aAAa,CAAC,YAAY,EAAE,IAAI,EAAE,cAAc,EAAE,IAAI,CAAC,CAAC;IAClE,CAAC;;AArDL,sCAsDC;AAxCG,gBAAgB;AACO,0BAAY,GAAG,8CAA8C,CAAC"}
1
+ {"version":3,"file":"workspaceConf.js","sourceRoot":"","sources":["../workspaceConf.ts"],"names":[],"mappings":";AAAA,wFAAwF;AACxF,iFAAiF;;;AAEjF,yCAAyC;AACzC,yCAAyC;AAEzC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,MAAa,aAAc,SAAQ,MAAM,CAAC,cAAc;IACpD;;;;;;;;OAQG;IACI,MAAM,CAAC,GAAG,CAAC,IAAY,EAAE,EAA2B,EAAE,KAA0B,EAAE,IAAmC;QACxH,OAAO,IAAI,aAAa,CAAC,IAAI,EAAO,KAAK,kCAAO,IAAI,KAAE,EAAE,EAAE,EAAE,IAAG,CAAC;IACpE,CAAC;IAKD;;;OAGG;IACI,MAAM,CAAC,UAAU,CAAC,GAAQ;QAC7B,IAAI,GAAG,KAAK,SAAS,IAAI,GAAG,KAAK,IAAI,EAAE;YACnC,OAAO,KAAK,CAAC;SAChB;QACD,OAAO,GAAG,CAAC,cAAc,CAAC,KAAK,aAAa,CAAC,YAAY,CAAC;IAC9D,CAAC;IAeD,YAAY,IAAY,EAAE,WAAoD,EAAE,IAAmC;QAC/G,IAAI,cAAc,GAAkB,EAAE,CAAC;QACvC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QAClB,IAAI,IAAI,CAAC,EAAE,EAAE;YACT,MAAM,KAAK,GAAG,WAA6C,CAAC;YAC5D,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;SAC3E;aAAM;YACH,MAAM,IAAI,GAAG,WAA4C,CAAC;YAC1D,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;SACzE;QACD,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,SAAS,CAAC,oBAAoB,EAAE,EAAE,IAAI,CAAC,CAAC;QACnE,KAAK,CAAC,aAAa,CAAC,YAAY,EAAE,IAAI,EAAE,cAAc,EAAE,IAAI,CAAC,CAAC;IAClE,CAAC;;AArDL,sCAsDC;AAxCG,gBAAgB;AACO,0BAAY,GAAG,8CAA8C,CAAC"}