@pulumi/databricks 1.48.0 → 1.49.0-alpha.1724218263

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/catalog.d.ts CHANGED
@@ -87,7 +87,7 @@ export declare class Catalog extends pulumi.CustomResource {
87
87
  * For Foreign Catalogs: the name of the entity from an external data source that maps to a catalog. For example, the database name in a PostgreSQL server.
88
88
  */
89
89
  readonly options: pulumi.Output<{
90
- [key: string]: any;
90
+ [key: string]: string;
91
91
  } | undefined>;
92
92
  /**
93
93
  * Username/groupname/sp applicationId of the catalog owner.
@@ -97,7 +97,7 @@ export declare class Catalog extends pulumi.CustomResource {
97
97
  * Extensible Catalog properties.
98
98
  */
99
99
  readonly properties: pulumi.Output<{
100
- [key: string]: any;
100
+ [key: string]: string;
101
101
  } | undefined>;
102
102
  /**
103
103
  * For Delta Sharing Catalogs: the name of the delta sharing provider. Change forces creation of a new resource.
@@ -156,7 +156,7 @@ export interface CatalogState {
156
156
  * For Foreign Catalogs: the name of the entity from an external data source that maps to a catalog. For example, the database name in a PostgreSQL server.
157
157
  */
158
158
  options?: pulumi.Input<{
159
- [key: string]: any;
159
+ [key: string]: pulumi.Input<string>;
160
160
  }>;
161
161
  /**
162
162
  * Username/groupname/sp applicationId of the catalog owner.
@@ -166,7 +166,7 @@ export interface CatalogState {
166
166
  * Extensible Catalog properties.
167
167
  */
168
168
  properties?: pulumi.Input<{
169
- [key: string]: any;
169
+ [key: string]: pulumi.Input<string>;
170
170
  }>;
171
171
  /**
172
172
  * For Delta Sharing Catalogs: the name of the delta sharing provider. Change forces creation of a new resource.
@@ -217,7 +217,7 @@ export interface CatalogArgs {
217
217
  * For Foreign Catalogs: the name of the entity from an external data source that maps to a catalog. For example, the database name in a PostgreSQL server.
218
218
  */
219
219
  options?: pulumi.Input<{
220
- [key: string]: any;
220
+ [key: string]: pulumi.Input<string>;
221
221
  }>;
222
222
  /**
223
223
  * Username/groupname/sp applicationId of the catalog owner.
@@ -227,7 +227,7 @@ export interface CatalogArgs {
227
227
  * Extensible Catalog properties.
228
228
  */
229
229
  properties?: pulumi.Input<{
230
- [key: string]: any;
230
+ [key: string]: pulumi.Input<string>;
231
231
  }>;
232
232
  /**
233
233
  * For Delta Sharing Catalogs: the name of the delta sharing provider. Change forces creation of a new resource.
package/cluster.d.ts CHANGED
@@ -125,7 +125,7 @@ export declare class Cluster extends pulumi.CustomResource {
125
125
  * ```
126
126
  */
127
127
  readonly customTags: pulumi.Output<{
128
- [key: string]: any;
128
+ [key: string]: string;
129
129
  } | undefined>;
130
130
  /**
131
131
  * Select the security features of the cluster. [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. In the Databricks UI, this has been recently been renamed *Access Mode* and `USER_ISOLATION` has been renamed *Shared*, but use these terms here.
@@ -135,7 +135,7 @@ export declare class Cluster extends pulumi.CustomResource {
135
135
  * (map) Tags that are added by Databricks by default, regardless of any `customTags` that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>, and any workspace and pool tags.
136
136
  */
137
137
  readonly defaultTags: pulumi.Output<{
138
- [key: string]: any;
138
+ [key: string]: string;
139
139
  }>;
140
140
  readonly dockerImage: pulumi.Output<outputs.ClusterDockerImage | undefined>;
141
141
  /**
@@ -189,7 +189,7 @@ export declare class Cluster extends pulumi.CustomResource {
189
189
  * maxWorkers: 50,
190
190
  * },
191
191
  * sparkConf: {
192
- * "spark.databricks.io.cache.enabled": true,
192
+ * "spark.databricks.io.cache.enabled": "true",
193
193
  * "spark.databricks.io.cache.maxDiskUsage": "50g",
194
194
  * "spark.databricks.io.cache.maxMetaDataCache": "1g",
195
195
  * },
@@ -224,13 +224,13 @@ export declare class Cluster extends pulumi.CustomResource {
224
224
  * * `spark.databricks.cluster.profile` set to `serverless`
225
225
  */
226
226
  readonly sparkConf: pulumi.Output<{
227
- [key: string]: any;
227
+ [key: string]: string;
228
228
  } | undefined>;
229
229
  /**
230
230
  * Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
231
231
  */
232
232
  readonly sparkEnvVars: pulumi.Output<{
233
- [key: string]: any;
233
+ [key: string]: string;
234
234
  } | undefined>;
235
235
  /**
236
236
  * [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
@@ -302,7 +302,7 @@ export interface ClusterState {
302
302
  * ```
303
303
  */
304
304
  customTags?: pulumi.Input<{
305
- [key: string]: any;
305
+ [key: string]: pulumi.Input<string>;
306
306
  }>;
307
307
  /**
308
308
  * Select the security features of the cluster. [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. In the Databricks UI, this has been recently been renamed *Access Mode* and `USER_ISOLATION` has been renamed *Shared*, but use these terms here.
@@ -312,7 +312,7 @@ export interface ClusterState {
312
312
  * (map) Tags that are added by Databricks by default, regardless of any `customTags` that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>, and any workspace and pool tags.
313
313
  */
314
314
  defaultTags?: pulumi.Input<{
315
- [key: string]: any;
315
+ [key: string]: pulumi.Input<string>;
316
316
  }>;
317
317
  dockerImage?: pulumi.Input<inputs.ClusterDockerImage>;
318
318
  /**
@@ -366,7 +366,7 @@ export interface ClusterState {
366
366
  * maxWorkers: 50,
367
367
  * },
368
368
  * sparkConf: {
369
- * "spark.databricks.io.cache.enabled": true,
369
+ * "spark.databricks.io.cache.enabled": "true",
370
370
  * "spark.databricks.io.cache.maxDiskUsage": "50g",
371
371
  * "spark.databricks.io.cache.maxMetaDataCache": "1g",
372
372
  * },
@@ -401,13 +401,13 @@ export interface ClusterState {
401
401
  * * `spark.databricks.cluster.profile` set to `serverless`
402
402
  */
403
403
  sparkConf?: pulumi.Input<{
404
- [key: string]: any;
404
+ [key: string]: pulumi.Input<string>;
405
405
  }>;
406
406
  /**
407
407
  * Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
408
408
  */
409
409
  sparkEnvVars?: pulumi.Input<{
410
- [key: string]: any;
410
+ [key: string]: pulumi.Input<string>;
411
411
  }>;
412
412
  /**
413
413
  * [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
@@ -470,7 +470,7 @@ export interface ClusterArgs {
470
470
  * ```
471
471
  */
472
472
  customTags?: pulumi.Input<{
473
- [key: string]: any;
473
+ [key: string]: pulumi.Input<string>;
474
474
  }>;
475
475
  /**
476
476
  * Select the security features of the cluster. [Unity Catalog requires](https://docs.databricks.com/data-governance/unity-catalog/compute.html#create-clusters--sql-warehouses-with-unity-catalog-access) `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. If omitted, default security features are enabled. To disable security features use `NONE` or legacy mode `NO_ISOLATION`. In the Databricks UI, this has been recently been renamed *Access Mode* and `USER_ISOLATION` has been renamed *Shared*, but use these terms here.
@@ -528,7 +528,7 @@ export interface ClusterArgs {
528
528
  * maxWorkers: 50,
529
529
  * },
530
530
  * sparkConf: {
531
- * "spark.databricks.io.cache.enabled": true,
531
+ * "spark.databricks.io.cache.enabled": "true",
532
532
  * "spark.databricks.io.cache.maxDiskUsage": "50g",
533
533
  * "spark.databricks.io.cache.maxMetaDataCache": "1g",
534
534
  * },
@@ -563,13 +563,13 @@ export interface ClusterArgs {
563
563
  * * `spark.databricks.cluster.profile` set to `serverless`
564
564
  */
565
565
  sparkConf?: pulumi.Input<{
566
- [key: string]: any;
566
+ [key: string]: pulumi.Input<string>;
567
567
  }>;
568
568
  /**
569
569
  * Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
570
570
  */
571
571
  sparkEnvVars?: pulumi.Input<{
572
- [key: string]: any;
572
+ [key: string]: pulumi.Input<string>;
573
573
  }>;
574
574
  /**
575
575
  * [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
package/connection.d.ts CHANGED
@@ -110,7 +110,7 @@ export declare class Connection extends pulumi.CustomResource {
110
110
  * The key value of options required by the connection, e.g. `host`, `port`, `user`, `password` or `GoogleServiceAccountKeyJson`. Please consult the [documentation](https://docs.databricks.com/query-federation/index.html#supported-data-sources) for the required option.
111
111
  */
112
112
  readonly options: pulumi.Output<{
113
- [key: string]: any;
113
+ [key: string]: string;
114
114
  }>;
115
115
  /**
116
116
  * Name of the connection owner.
@@ -120,7 +120,7 @@ export declare class Connection extends pulumi.CustomResource {
120
120
  * Free-form connection properties.
121
121
  */
122
122
  readonly properties: pulumi.Output<{
123
- [key: string]: any;
123
+ [key: string]: string;
124
124
  } | undefined>;
125
125
  readonly readOnly: pulumi.Output<boolean>;
126
126
  /**
@@ -153,7 +153,7 @@ export interface ConnectionState {
153
153
  * The key value of options required by the connection, e.g. `host`, `port`, `user`, `password` or `GoogleServiceAccountKeyJson`. Please consult the [documentation](https://docs.databricks.com/query-federation/index.html#supported-data-sources) for the required option.
154
154
  */
155
155
  options?: pulumi.Input<{
156
- [key: string]: any;
156
+ [key: string]: pulumi.Input<string>;
157
157
  }>;
158
158
  /**
159
159
  * Name of the connection owner.
@@ -163,7 +163,7 @@ export interface ConnectionState {
163
163
  * Free-form connection properties.
164
164
  */
165
165
  properties?: pulumi.Input<{
166
- [key: string]: any;
166
+ [key: string]: pulumi.Input<string>;
167
167
  }>;
168
168
  readOnly?: pulumi.Input<boolean>;
169
169
  }
@@ -188,7 +188,7 @@ export interface ConnectionArgs {
188
188
  * The key value of options required by the connection, e.g. `host`, `port`, `user`, `password` or `GoogleServiceAccountKeyJson`. Please consult the [documentation](https://docs.databricks.com/query-federation/index.html#supported-data-sources) for the required option.
189
189
  */
190
190
  options: pulumi.Input<{
191
- [key: string]: any;
191
+ [key: string]: pulumi.Input<string>;
192
192
  }>;
193
193
  /**
194
194
  * Name of the connection owner.
@@ -198,7 +198,7 @@ export interface ConnectionArgs {
198
198
  * Free-form connection properties.
199
199
  */
200
200
  properties?: pulumi.Input<{
201
- [key: string]: any;
201
+ [key: string]: pulumi.Input<string>;
202
202
  }>;
203
203
  readOnly?: pulumi.Input<boolean>;
204
204
  }
package/getJobs.d.ts CHANGED
@@ -54,7 +54,7 @@ export interface GetJobsArgs {
54
54
  * map of databricks.Job names to ids
55
55
  */
56
56
  ids?: {
57
- [key: string]: any;
57
+ [key: string]: string;
58
58
  };
59
59
  }
60
60
  /**
@@ -69,7 +69,7 @@ export interface GetJobsResult {
69
69
  * map of databricks.Job names to ids
70
70
  */
71
71
  readonly ids: {
72
- [key: string]: any;
72
+ [key: string]: string;
73
73
  };
74
74
  }
75
75
  /**
@@ -127,6 +127,6 @@ export interface GetJobsOutputArgs {
127
127
  * map of databricks.Job names to ids
128
128
  */
129
129
  ids?: pulumi.Input<{
130
- [key: string]: any;
130
+ [key: string]: pulumi.Input<string>;
131
131
  }>;
132
132
  }
@@ -35,7 +35,7 @@ export interface GetMetastoresArgs {
35
35
  * Mapping of name to id of databricks_metastore
36
36
  */
37
37
  ids?: {
38
- [key: string]: any;
38
+ [key: string]: string;
39
39
  };
40
40
  }
41
41
  /**
@@ -50,7 +50,7 @@ export interface GetMetastoresResult {
50
50
  * Mapping of name to id of databricks_metastore
51
51
  */
52
52
  readonly ids: {
53
- [key: string]: any;
53
+ [key: string]: string;
54
54
  };
55
55
  }
56
56
  /**
@@ -89,6 +89,6 @@ export interface GetMetastoresOutputArgs {
89
89
  * Mapping of name to id of databricks_metastore
90
90
  */
91
91
  ids?: pulumi.Input<{
92
- [key: string]: any;
92
+ [key: string]: pulumi.Input<string>;
93
93
  }>;
94
94
  }
@@ -38,7 +38,7 @@ export interface GetMwsCredentialsArgs {
38
38
  * name-to-id map for all of the credentials in the account
39
39
  */
40
40
  ids?: {
41
- [key: string]: any;
41
+ [key: string]: string;
42
42
  };
43
43
  }
44
44
  /**
@@ -53,7 +53,7 @@ export interface GetMwsCredentialsResult {
53
53
  * name-to-id map for all of the credentials in the account
54
54
  */
55
55
  readonly ids: {
56
- [key: string]: any;
56
+ [key: string]: string;
57
57
  };
58
58
  }
59
59
  /**
@@ -95,6 +95,6 @@ export interface GetMwsCredentialsOutputArgs {
95
95
  * name-to-id map for all of the credentials in the account
96
96
  */
97
97
  ids?: pulumi.Input<{
98
- [key: string]: any;
98
+ [key: string]: pulumi.Input<string>;
99
99
  }>;
100
100
  }
@@ -34,7 +34,7 @@ export interface GetMwsWorkspacesArgs {
34
34
  * name-to-id map for all of the workspaces in the account
35
35
  */
36
36
  ids?: {
37
- [key: string]: any;
37
+ [key: string]: string;
38
38
  };
39
39
  }
40
40
  /**
@@ -49,7 +49,7 @@ export interface GetMwsWorkspacesResult {
49
49
  * name-to-id map for all of the workspaces in the account
50
50
  */
51
51
  readonly ids: {
52
- [key: string]: any;
52
+ [key: string]: string;
53
53
  };
54
54
  }
55
55
  /**
@@ -87,6 +87,6 @@ export interface GetMwsWorkspacesOutputArgs {
87
87
  * name-to-id map for all of the workspaces in the account
88
88
  */
89
89
  ids?: pulumi.Input<{
90
- [key: string]: any;
90
+ [key: string]: pulumi.Input<string>;
91
91
  }>;
92
92
  }
package/instancePool.d.ts CHANGED
@@ -71,7 +71,7 @@ export declare class InstancePool extends pulumi.CustomResource {
71
71
  * (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the [official documentation](https://docs.databricks.com/administration-guide/account-settings/usage-detail-tags-aws.html#tag-propagation)). Attempting to set the same tags in both cluster and instance pool will raise an error. *Databricks allows at most 43 custom tags.*
72
72
  */
73
73
  readonly customTags: pulumi.Output<{
74
- [key: string]: any;
74
+ [key: string]: string;
75
75
  } | undefined>;
76
76
  readonly diskSpec: pulumi.Output<outputs.InstancePoolDiskSpec | undefined>;
77
77
  /**
@@ -125,7 +125,7 @@ export interface InstancePoolState {
125
125
  * (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the [official documentation](https://docs.databricks.com/administration-guide/account-settings/usage-detail-tags-aws.html#tag-propagation)). Attempting to set the same tags in both cluster and instance pool will raise an error. *Databricks allows at most 43 custom tags.*
126
126
  */
127
127
  customTags?: pulumi.Input<{
128
- [key: string]: any;
128
+ [key: string]: pulumi.Input<string>;
129
129
  }>;
130
130
  diskSpec?: pulumi.Input<inputs.InstancePoolDiskSpec>;
131
131
  /**
@@ -171,7 +171,7 @@ export interface InstancePoolArgs {
171
171
  * (Map) Additional tags for instance pool resources. Databricks tags all pool resources (e.g. AWS & Azure instances and Disk volumes). The tags of the instance pool will propagate to the clusters using the pool (see the [official documentation](https://docs.databricks.com/administration-guide/account-settings/usage-detail-tags-aws.html#tag-propagation)). Attempting to set the same tags in both cluster and instance pool will raise an error. *Databricks allows at most 43 custom tags.*
172
172
  */
173
173
  customTags?: pulumi.Input<{
174
- [key: string]: any;
174
+ [key: string]: pulumi.Input<string>;
175
175
  }>;
176
176
  diskSpec?: pulumi.Input<inputs.InstancePoolDiskSpec>;
177
177
  /**
package/ipAccessList.d.ts CHANGED
@@ -11,7 +11,7 @@ import * as pulumi from "@pulumi/pulumi";
11
11
  * import * as databricks from "@pulumi/databricks";
12
12
  *
13
13
  * const _this = new databricks.WorkspaceConf("this", {customConfig: {
14
- * enableIpAccessLists: true,
14
+ * enableIpAccessLists: "true",
15
15
  * }});
16
16
  * const allowed_list = new databricks.IpAccessList("allowed-list", {
17
17
  * label: "allow_in",
package/ipAccessList.js CHANGED
@@ -17,7 +17,7 @@ const utilities = require("./utilities");
17
17
  * import * as databricks from "@pulumi/databricks";
18
18
  *
19
19
  * const _this = new databricks.WorkspaceConf("this", {customConfig: {
20
- * enableIpAccessLists: true,
20
+ * enableIpAccessLists: "true",
21
21
  * }});
22
22
  * const allowed_list = new databricks.IpAccessList("allowed-list", {
23
23
  * label: "allow_in",
package/job.d.ts CHANGED
@@ -221,7 +221,7 @@ export declare class Job extends pulumi.CustomResource {
221
221
  * An optional map of the tags associated with the job. See tags Configuration Map
222
222
  */
223
223
  readonly tags: pulumi.Output<{
224
- [key: string]: any;
224
+ [key: string]: string;
225
225
  } | undefined>;
226
226
  /**
227
227
  * A list of task specification that the job will execute. See task Configuration Block below.
@@ -380,7 +380,7 @@ export interface JobState {
380
380
  * An optional map of the tags associated with the job. See tags Configuration Map
381
381
  */
382
382
  tags?: pulumi.Input<{
383
- [key: string]: any;
383
+ [key: string]: pulumi.Input<string>;
384
384
  }>;
385
385
  /**
386
386
  * A list of task specification that the job will execute. See task Configuration Block below.
@@ -531,7 +531,7 @@ export interface JobArgs {
531
531
  * An optional map of the tags associated with the job. See tags Configuration Map
532
532
  */
533
533
  tags?: pulumi.Input<{
534
- [key: string]: any;
534
+ [key: string]: pulumi.Input<string>;
535
535
  }>;
536
536
  /**
537
537
  * A list of task specification that the job will execute. See task Configuration Block below.
package/mount.d.ts CHANGED
@@ -342,7 +342,7 @@ export declare class Mount extends pulumi.CustomResource {
342
342
  readonly clusterId: pulumi.Output<string>;
343
343
  readonly encryptionType: pulumi.Output<string | undefined>;
344
344
  readonly extraConfigs: pulumi.Output<{
345
- [key: string]: any;
345
+ [key: string]: string;
346
346
  } | undefined>;
347
347
  readonly gs: pulumi.Output<outputs.MountGs | undefined>;
348
348
  readonly name: pulumi.Output<string>;
@@ -372,7 +372,7 @@ export interface MountState {
372
372
  clusterId?: pulumi.Input<string>;
373
373
  encryptionType?: pulumi.Input<string>;
374
374
  extraConfigs?: pulumi.Input<{
375
- [key: string]: any;
375
+ [key: string]: pulumi.Input<string>;
376
376
  }>;
377
377
  gs?: pulumi.Input<inputs.MountGs>;
378
378
  name?: pulumi.Input<string>;
@@ -394,7 +394,7 @@ export interface MountArgs {
394
394
  clusterId?: pulumi.Input<string>;
395
395
  encryptionType?: pulumi.Input<string>;
396
396
  extraConfigs?: pulumi.Input<{
397
- [key: string]: any;
397
+ [key: string]: pulumi.Input<string>;
398
398
  }>;
399
399
  gs?: pulumi.Input<inputs.MountGs>;
400
400
  name?: pulumi.Input<string>;
@@ -242,7 +242,7 @@ export declare class MwsWorkspaces extends pulumi.CustomResource {
242
242
  * The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any `defaultTags` or `customTags` on a cluster level. Please note it can take up to an hour for customTags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
243
243
  */
244
244
  readonly customTags: pulumi.Output<{
245
- [key: string]: any;
245
+ [key: string]: string;
246
246
  } | undefined>;
247
247
  /**
248
248
  * @deprecated Use managedServicesCustomerManagedKeyId instead
@@ -347,7 +347,7 @@ export interface MwsWorkspacesState {
347
347
  * The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any `defaultTags` or `customTags` on a cluster level. Please note it can take up to an hour for customTags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
348
348
  */
349
349
  customTags?: pulumi.Input<{
350
- [key: string]: any;
350
+ [key: string]: pulumi.Input<string>;
351
351
  }>;
352
352
  /**
353
353
  * @deprecated Use managedServicesCustomerManagedKeyId instead
@@ -444,7 +444,7 @@ export interface MwsWorkspacesArgs {
444
444
  * The custom tags key-value pairing that is attached to this workspace. These tags will be applied to clusters automatically in addition to any `defaultTags` or `customTags` on a cluster level. Please note it can take up to an hour for customTags to be set due to scheduling on Control Plane. After custom tags are applied, they can be modified however they can never be completely removed.
445
445
  */
446
446
  customTags?: pulumi.Input<{
447
- [key: string]: any;
447
+ [key: string]: pulumi.Input<string>;
448
448
  }>;
449
449
  /**
450
450
  * @deprecated Use managedServicesCustomerManagedKeyId instead
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pulumi/databricks",
3
- "version": "1.48.0",
3
+ "version": "1.49.0-alpha.1724218263",
4
4
  "description": "A Pulumi package for creating and managing databricks cloud resources.",
5
5
  "keywords": [
6
6
  "pulumi",
@@ -24,6 +24,6 @@
24
24
  "pulumi": {
25
25
  "resource": true,
26
26
  "name": "databricks",
27
- "version": "1.48.0"
27
+ "version": "1.49.0-alpha.1724218263"
28
28
  }
29
29
  }
package/pipeline.d.ts CHANGED
@@ -121,7 +121,7 @@ export declare class Pipeline extends pulumi.CustomResource {
121
121
  * An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
122
122
  */
123
123
  readonly configuration: pulumi.Output<{
124
- [key: string]: any;
124
+ [key: string]: string;
125
125
  } | undefined>;
126
126
  /**
127
127
  * A flag indicating whether to run the pipeline continuously. The default value is `false`.
@@ -220,7 +220,7 @@ export interface PipelineState {
220
220
  * An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
221
221
  */
222
222
  configuration?: pulumi.Input<{
223
- [key: string]: any;
223
+ [key: string]: pulumi.Input<string>;
224
224
  }>;
225
225
  /**
226
226
  * A flag indicating whether to run the pipeline continuously. The default value is `false`.
@@ -311,7 +311,7 @@ export interface PipelineArgs {
311
311
  * An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
312
312
  */
313
313
  configuration?: pulumi.Input<{
314
- [key: string]: any;
314
+ [key: string]: pulumi.Input<string>;
315
315
  }>;
316
316
  /**
317
317
  * A flag indicating whether to run the pipeline continuously. The default value is `false`.
package/schema.d.ts CHANGED
@@ -92,7 +92,7 @@ export declare class Schema extends pulumi.CustomResource {
92
92
  * Extensible Schema properties.
93
93
  */
94
94
  readonly properties: pulumi.Output<{
95
- [key: string]: any;
95
+ [key: string]: string;
96
96
  } | undefined>;
97
97
  /**
98
98
  * Managed location of the schema. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the catalog root location. Change forces creation of a new resource.
@@ -140,7 +140,7 @@ export interface SchemaState {
140
140
  * Extensible Schema properties.
141
141
  */
142
142
  properties?: pulumi.Input<{
143
- [key: string]: any;
143
+ [key: string]: pulumi.Input<string>;
144
144
  }>;
145
145
  /**
146
146
  * Managed location of the schema. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the catalog root location. Change forces creation of a new resource.
@@ -180,7 +180,7 @@ export interface SchemaArgs {
180
180
  * Extensible Schema properties.
181
181
  */
182
182
  properties?: pulumi.Input<{
183
- [key: string]: any;
183
+ [key: string]: pulumi.Input<string>;
184
184
  }>;
185
185
  /**
186
186
  * Managed location of the schema. Location in cloud storage where data for managed tables will be stored. If not specified, the location will default to the catalog root location. Change forces creation of a new resource.
@@ -82,7 +82,7 @@ export declare class SqlGlobalConfig extends pulumi.CustomResource {
82
82
  * Data access configuration for databricks_sql_endpoint, such as configuration for an external Hive metastore, Hadoop Filesystem configuration, etc. Please note that the list of supported configuration properties is limited, so refer to the [documentation](https://docs.databricks.com/sql/admin/data-access-configuration.html#supported-properties) for a full list. Apply will fail if you're specifying not permitted configuration.
83
83
  */
84
84
  readonly dataAccessConfig: pulumi.Output<{
85
- [key: string]: any;
85
+ [key: string]: string;
86
86
  } | undefined>;
87
87
  /**
88
88
  * @deprecated This field is intended as an internal API and may be removed from the Databricks Terraform provider in the future
@@ -104,7 +104,7 @@ export declare class SqlGlobalConfig extends pulumi.CustomResource {
104
104
  * SQL Configuration Parameters let you override the default behavior for all sessions with all endpoints.
105
105
  */
106
106
  readonly sqlConfigParams: pulumi.Output<{
107
- [key: string]: any;
107
+ [key: string]: string;
108
108
  } | undefined>;
109
109
  /**
110
110
  * Create a SqlGlobalConfig resource with the given unique name, arguments, and options.
@@ -123,7 +123,7 @@ export interface SqlGlobalConfigState {
123
123
  * Data access configuration for databricks_sql_endpoint, such as configuration for an external Hive metastore, Hadoop Filesystem configuration, etc. Please note that the list of supported configuration properties is limited, so refer to the [documentation](https://docs.databricks.com/sql/admin/data-access-configuration.html#supported-properties) for a full list. Apply will fail if you're specifying not permitted configuration.
124
124
  */
125
125
  dataAccessConfig?: pulumi.Input<{
126
- [key: string]: any;
126
+ [key: string]: pulumi.Input<string>;
127
127
  }>;
128
128
  /**
129
129
  * @deprecated This field is intended as an internal API and may be removed from the Databricks Terraform provider in the future
@@ -145,7 +145,7 @@ export interface SqlGlobalConfigState {
145
145
  * SQL Configuration Parameters let you override the default behavior for all sessions with all endpoints.
146
146
  */
147
147
  sqlConfigParams?: pulumi.Input<{
148
- [key: string]: any;
148
+ [key: string]: pulumi.Input<string>;
149
149
  }>;
150
150
  }
151
151
  /**
@@ -156,7 +156,7 @@ export interface SqlGlobalConfigArgs {
156
156
  * Data access configuration for databricks_sql_endpoint, such as configuration for an external Hive metastore, Hadoop Filesystem configuration, etc. Please note that the list of supported configuration properties is limited, so refer to the [documentation](https://docs.databricks.com/sql/admin/data-access-configuration.html#supported-properties) for a full list. Apply will fail if you're specifying not permitted configuration.
157
157
  */
158
158
  dataAccessConfig?: pulumi.Input<{
159
- [key: string]: any;
159
+ [key: string]: pulumi.Input<string>;
160
160
  }>;
161
161
  /**
162
162
  * @deprecated This field is intended as an internal API and may be removed from the Databricks Terraform provider in the future
@@ -178,6 +178,6 @@ export interface SqlGlobalConfigArgs {
178
178
  * SQL Configuration Parameters let you override the default behavior for all sessions with all endpoints.
179
179
  */
180
180
  sqlConfigParams?: pulumi.Input<{
181
- [key: string]: any;
181
+ [key: string]: pulumi.Input<string>;
182
182
  }>;
183
183
  }
package/sqlTable.d.ts CHANGED
@@ -60,7 +60,7 @@ export declare class SqlTable extends pulumi.CustomResource {
60
60
  * Map of user defined table options. Change forces creation of a new resource.
61
61
  */
62
62
  readonly options: pulumi.Output<{
63
- [key: string]: any;
63
+ [key: string]: string;
64
64
  } | undefined>;
65
65
  /**
66
66
  * Username/groupname/sp applicationId of the schema owner.
@@ -74,7 +74,7 @@ export declare class SqlTable extends pulumi.CustomResource {
74
74
  * Map of table properties.
75
75
  */
76
76
  readonly properties: pulumi.Output<{
77
- [key: string]: any;
77
+ [key: string]: string;
78
78
  }>;
79
79
  /**
80
80
  * Name of parent Schema relative to parent Catalog. Change forces creation of a new resource.
@@ -139,7 +139,7 @@ export interface SqlTableState {
139
139
  * Map of user defined table options. Change forces creation of a new resource.
140
140
  */
141
141
  options?: pulumi.Input<{
142
- [key: string]: any;
142
+ [key: string]: pulumi.Input<string>;
143
143
  }>;
144
144
  /**
145
145
  * Username/groupname/sp applicationId of the schema owner.
@@ -153,7 +153,7 @@ export interface SqlTableState {
153
153
  * Map of table properties.
154
154
  */
155
155
  properties?: pulumi.Input<{
156
- [key: string]: any;
156
+ [key: string]: pulumi.Input<string>;
157
157
  }>;
158
158
  /**
159
159
  * Name of parent Schema relative to parent Catalog. Change forces creation of a new resource.
@@ -210,7 +210,7 @@ export interface SqlTableArgs {
210
210
  * Map of user defined table options. Change forces creation of a new resource.
211
211
  */
212
212
  options?: pulumi.Input<{
213
- [key: string]: any;
213
+ [key: string]: pulumi.Input<string>;
214
214
  }>;
215
215
  /**
216
216
  * Username/groupname/sp applicationId of the schema owner.
@@ -224,7 +224,7 @@ export interface SqlTableArgs {
224
224
  * Map of table properties.
225
225
  */
226
226
  properties?: pulumi.Input<{
227
- [key: string]: any;
227
+ [key: string]: pulumi.Input<string>;
228
228
  }>;
229
229
  /**
230
230
  * Name of parent Schema relative to parent Catalog. Change forces creation of a new resource.