@pulumi/databricks 1.50.0-alpha.1724995860 → 1.50.0-alpha.1725644892

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/cluster.d.ts +0 -54
  2. package/cluster.js +0 -54
  3. package/cluster.js.map +1 -1
  4. package/getAwsAssumeRolePolicy.d.ts +2 -2
  5. package/getAwsAssumeRolePolicy.js +2 -2
  6. package/getCatalog.d.ts +2 -2
  7. package/getCatalog.js +2 -2
  8. package/getCatalogs.d.ts +2 -2
  9. package/getCatalogs.js +2 -2
  10. package/getCluster.d.ts +2 -2
  11. package/getCluster.js +2 -2
  12. package/getClusterPolicy.d.ts +2 -2
  13. package/getClusterPolicy.js +2 -2
  14. package/getClusters.d.ts +2 -2
  15. package/getClusters.js +2 -2
  16. package/getCurrentMetastore.d.ts +2 -2
  17. package/getCurrentMetastore.js +2 -2
  18. package/getCurrentUser.d.ts +2 -2
  19. package/getCurrentUser.js +2 -2
  20. package/getDbfsFile.d.ts +2 -2
  21. package/getDbfsFile.js +2 -2
  22. package/getDbfsFilePaths.d.ts +2 -2
  23. package/getDbfsFilePaths.js +2 -2
  24. package/getDirectory.d.ts +2 -2
  25. package/getDirectory.js +2 -2
  26. package/getGroup.d.ts +2 -2
  27. package/getGroup.js +2 -2
  28. package/getInstancePool.d.ts +2 -2
  29. package/getInstancePool.js +2 -2
  30. package/getJob.d.ts +2 -2
  31. package/getJob.js +2 -2
  32. package/getJobs.d.ts +2 -2
  33. package/getJobs.js +2 -2
  34. package/getMetastore.d.ts +2 -2
  35. package/getMetastore.js +2 -2
  36. package/getMlflowExperiment.d.ts +2 -2
  37. package/getMlflowExperiment.js +2 -2
  38. package/getMlflowModel.d.ts +2 -2
  39. package/getMlflowModel.js +2 -2
  40. package/getMwsCredentials.d.ts +2 -2
  41. package/getMwsCredentials.js +2 -2
  42. package/getMwsWorkspaces.d.ts +4 -4
  43. package/getMwsWorkspaces.js +4 -4
  44. package/getNodeType.d.ts +2 -2
  45. package/getNodeType.js +2 -2
  46. package/getNotebook.d.ts +2 -2
  47. package/getNotebook.js +2 -2
  48. package/getNotebookPaths.d.ts +2 -2
  49. package/getNotebookPaths.js +2 -2
  50. package/getPipelines.d.ts +2 -2
  51. package/getPipelines.js +2 -2
  52. package/getSchemas.d.ts +2 -2
  53. package/getSchemas.js +2 -2
  54. package/getServicePrincipal.d.ts +2 -2
  55. package/getServicePrincipal.js +2 -2
  56. package/getServicePrincipals.d.ts +2 -2
  57. package/getServicePrincipals.js +2 -2
  58. package/getSparkVersion.d.ts +2 -2
  59. package/getSparkVersion.js +2 -2
  60. package/getSqlWarehouse.d.ts +2 -2
  61. package/getSqlWarehouse.js +2 -2
  62. package/getSqlWarehouses.d.ts +2 -2
  63. package/getSqlWarehouses.js +2 -2
  64. package/getTable.d.ts +2 -2
  65. package/getTable.js +2 -2
  66. package/getTables.d.ts +2 -2
  67. package/getTables.js +2 -2
  68. package/getUser.d.ts +2 -2
  69. package/getUser.js +2 -2
  70. package/getViews.d.ts +2 -2
  71. package/getViews.js +2 -2
  72. package/getZones.d.ts +2 -2
  73. package/getZones.js +2 -2
  74. package/gitCredential.d.ts +3 -3
  75. package/mount.d.ts +0 -315
  76. package/mount.js +0 -315
  77. package/mount.js.map +1 -1
  78. package/mwsCredentials.d.ts +52 -5
  79. package/mwsCredentials.js +28 -5
  80. package/mwsCredentials.js.map +1 -1
  81. package/mwsCustomerManagedKeys.d.ts +0 -9
  82. package/mwsCustomerManagedKeys.js +0 -9
  83. package/mwsCustomerManagedKeys.js.map +1 -1
  84. package/mwsNetworks.d.ts +78 -43
  85. package/mwsNetworks.js +80 -45
  86. package/mwsNetworks.js.map +1 -1
  87. package/mwsPrivateAccessSettings.d.ts +1 -1
  88. package/mwsPrivateAccessSettings.js +1 -1
  89. package/mwsStorageConfigurations.d.ts +51 -6
  90. package/mwsStorageConfigurations.js +24 -6
  91. package/mwsStorageConfigurations.js.map +1 -1
  92. package/mwsVpcEndpoint.d.ts +1 -1
  93. package/mwsVpcEndpoint.js +1 -1
  94. package/package.json +2 -2
  95. package/sqlPermissions.d.ts +0 -9
  96. package/sqlPermissions.js.map +1 -1
  97. package/types/input.d.ts +2 -2
  98. package/types/output.d.ts +2 -2
package/mount.d.ts CHANGED
@@ -2,321 +2,6 @@ import * as pulumi from "@pulumi/pulumi";
2
2
  import * as inputs from "./types/input";
3
3
  import * as outputs from "./types/output";
4
4
  /**
5
- * This resource will mount your cloud storage
6
- * * `gs` - to [mount Google Cloud Storage](https://docs.gcp.databricks.com/data/data-sources/google/gcs.html)
7
- * * `abfs` - to [mount ADLS Gen2](https://docs.microsoft.com/en-us/azure/databricks/data/data-sources/azure/adls-gen2/) using Azure Blob Filesystem (ABFS) driver
8
- * * `adl` - to [mount ADLS Gen1](https://docs.microsoft.com/en-us/azure/databricks/data/data-sources/azure/azure-datalake) using Azure Data Lake (ADL) driver
9
- * * `wasb` - to [mount Azure Blob Storage](https://docs.microsoft.com/en-us/azure/databricks/data/data-sources/azure/azure-storage) using Windows Azure Storage Blob (WASB) driver
10
- *
11
- * 1. Use generic arguments - you have a responsibility for providing all necessary parameters that are required to mount specific storage. This is most flexible option
12
- *
13
- * ## Common arguments
14
- *
15
- * * `clusterId` - (Optional, String) Cluster to use for mounting. If no cluster is specified, a new cluster will be created and will mount the bucket for all of the clusters in this workspace. If the cluster is not running - it's going to be started, so be aware to set auto-termination rules on it.
16
- * * `name` - (Optional, String) Name, under which mount will be accessible in `dbfs:/mnt/<MOUNT_NAME>`. If not specified, provider will try to infer it from depending on the resource type:
17
- * * `bucketName` for AWS S3 and Google Cloud Storage
18
- * * `containerName` for ADLS Gen2 and Azure Blob Storage
19
- * * `storageResourceName` for ADLS Gen1
20
- * * `uri` - (Optional, String) the URI for accessing specific storage (`s3a://....`, `abfss://....`, `gs://....`, etc.)
21
- * * `extraConfigs` - (Optional, String map) configuration parameters that are necessary for mounting of specific storage
22
- * * `resourceId` - (Optional, String) resource ID for a given storage account. Could be used to fill defaults, such as storage account & container names on Azure.
23
- * * `encryptionType` - (Optional, String) encryption type. Currently used only for [AWS S3 mounts](https://docs.databricks.com/data/data-sources/aws/amazon-s3.html#encrypt-data-in-s3-buckets)
24
- *
25
- * ### Example mounting ADLS Gen2 using uri and extraConfigs
26
- *
27
- * ```typescript
28
- * import * as pulumi from "@pulumi/pulumi";
29
- * import * as databricks from "@pulumi/databricks";
30
- *
31
- * const tenantId = "00000000-1111-2222-3333-444444444444";
32
- * const clientId = "55555555-6666-7777-8888-999999999999";
33
- * const secretScope = "some-kv";
34
- * const secretKey = "some-sp-secret";
35
- * const container = "test";
36
- * const storageAcc = "lrs";
37
- * const _this = new databricks.Mount("this", {
38
- * name: "tf-abfss",
39
- * uri: `abfss://${container}@${storageAcc}.dfs.core.windows.net`,
40
- * extraConfigs: {
41
- * "fs.azure.account.auth.type": "OAuth",
42
- * "fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
43
- * "fs.azure.account.oauth2.client.id": clientId,
44
- * "fs.azure.account.oauth2.client.secret": `{{secrets/${secretScope}/${secretKey}}}`,
45
- * "fs.azure.account.oauth2.client.endpoint": `https://login.microsoftonline.com/${tenantId}/oauth2/token`,
46
- * "fs.azure.createRemoteFileSystemDuringInitialization": "false",
47
- * },
48
- * });
49
- * ```
50
- *
51
- * ### Example mounting ADLS Gen2 with AAD passthrough
52
- *
53
- * > **Note** AAD passthrough is considered a legacy data access pattern. Use Unity Catalog for fine-grained data access control.
54
- *
55
- * > **Note** Mounts using AAD passthrough cannot be created using a service principal.
56
- *
57
- * To mount ALDS Gen2 with Azure Active Directory Credentials passthrough we need to execute the mount commands using the cluster configured with AAD Credentials passthrough & provide necessary configuration parameters (see [documentation](https://docs.microsoft.com/en-us/azure/databricks/security/credential-passthrough/adls-passthrough#--mount-azure-data-lake-storage-to-dbfs-using-credential-passthrough) for more details).
58
- *
59
- * ```typescript
60
- * import * as pulumi from "@pulumi/pulumi";
61
- * import * as azure from "@pulumi/azure";
62
- * import * as databricks from "@pulumi/databricks";
63
- *
64
- * const config = new pulumi.Config();
65
- * // Resource group for Databricks Workspace
66
- * const resourceGroup = config.require("resourceGroup");
67
- * // Name of the Databricks Workspace
68
- * const workspaceName = config.require("workspaceName");
69
- * const this = azure.databricks.getWorkspace({
70
- * name: workspaceName,
71
- * resourceGroupName: resourceGroup,
72
- * });
73
- * const smallest = databricks.getNodeType({
74
- * localDisk: true,
75
- * });
76
- * const latest = databricks.getSparkVersion({});
77
- * const sharedPassthrough = new databricks.Cluster("shared_passthrough", {
78
- * clusterName: "Shared Passthrough for mount",
79
- * sparkVersion: latest.then(latest => latest.id),
80
- * nodeTypeId: smallest.then(smallest => smallest.id),
81
- * autoterminationMinutes: 10,
82
- * numWorkers: 1,
83
- * sparkConf: {
84
- * "spark.databricks.cluster.profile": "serverless",
85
- * "spark.databricks.repl.allowedLanguages": "python,sql",
86
- * "spark.databricks.passthrough.enabled": "true",
87
- * "spark.databricks.pyspark.enableProcessIsolation": "true",
88
- * },
89
- * customTags: {
90
- * ResourceClass: "Serverless",
91
- * },
92
- * });
93
- * // Name of the ADLS Gen2 storage container
94
- * const storageAcc = config.require("storageAcc");
95
- * // Name of container inside storage account
96
- * const container = config.require("container");
97
- * const passthrough = new databricks.Mount("passthrough", {
98
- * name: "passthrough-test",
99
- * clusterId: sharedPassthrough.id,
100
- * uri: `abfss://${container}@${storageAcc}.dfs.core.windows.net`,
101
- * extraConfigs: {
102
- * "fs.azure.account.auth.type": "CustomAccessToken",
103
- * "fs.azure.account.custom.token.provider.class": "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}",
104
- * },
105
- * });
106
- * ```
107
- *
108
- * ## s3 block
109
- *
110
- * This block allows specifying parameters for mounting of the ADLS Gen2. The following arguments are required inside the `s3` block:
111
- *
112
- * * `instanceProfile` - (Optional) (String) ARN of registered instance profile for data access. If it's not specified, then the `clusterId` should be provided, and the cluster should have an instance profile attached to it. If both `clusterId` & `instanceProfile` are specified, then `clusterId` takes precedence.
113
- * * `bucketName` - (Required) (String) S3 bucket name to be mounted.
114
- *
115
- * ### Example of mounting S3
116
- *
117
- * ```typescript
118
- * import * as pulumi from "@pulumi/pulumi";
119
- * import * as databricks from "@pulumi/databricks";
120
- *
121
- * // now you can do `%fs ls /mnt/experiments` in notebooks
122
- * const _this = new databricks.Mount("this", {
123
- * name: "experiments",
124
- * s3: {
125
- * instanceProfile: ds.id,
126
- * bucketName: thisAwsS3Bucket.bucket,
127
- * },
128
- * });
129
- * ```
130
- *
131
- * ## abfs block
132
- *
133
- * This block allows specifying parameters for mounting of the ADLS Gen2. The following arguments are required inside the `abfs` block:
134
- *
135
- * * `clientId` - (Required) (String) This is the clientId (Application Object ID) for the enterprise application for the service principal.
136
- * * `tenantId` - (Optional) (String) This is your azure directory tenant id. It is required for creating the mount. (Could be omitted if Azure authentication is used, and we can extract `tenantId` from it).
137
- * * `clientSecretKey` - (Required) (String) This is the secret key in which your service principal/enterprise app client secret will be stored.
138
- * * `clientSecretScope` - (Required) (String) This is the secret scope in which your service principal/enterprise app client secret will be stored.
139
- * * `containerName` - (Required) (String) ADLS gen2 container name. (Could be omitted if `resourceId` is provided)
140
- * * `storageAccountName` - (Required) (String) The name of the storage resource in which the data is. (Could be omitted if `resourceId` is provided)
141
- * * `directory` - (Computed) (String) This is optional if you don't want to add an additional directory that you wish to mount. This must start with a "/".
142
- * * `initializeFileSystem` - (Required) (Bool) either or not initialize FS for the first use
143
- *
144
- * ### Creating mount for ADLS Gen2 using abfs block
145
- *
146
- * In this example, we're using Azure authentication, so we can omit some parameters (`tenantId`, `storageAccountName`, and `containerName`) that will be detected automatically.
147
- *
148
- * ```typescript
149
- * import * as pulumi from "@pulumi/pulumi";
150
- * import * as azure from "@pulumi/azure";
151
- * import * as databricks from "@pulumi/databricks";
152
- *
153
- * const terraform = new databricks.SecretScope("terraform", {
154
- * name: "application",
155
- * initialManagePrincipal: "users",
156
- * });
157
- * const servicePrincipalKey = new databricks.Secret("service_principal_key", {
158
- * key: "service_principal_key",
159
- * stringValue: ARM_CLIENT_SECRET,
160
- * scope: terraform.name,
161
- * });
162
- * const _this = new azure.storage.Account("this", {
163
- * name: `${prefix}datalake`,
164
- * resourceGroupName: resourceGroupName,
165
- * location: resourceGroupLocation,
166
- * accountTier: "Standard",
167
- * accountReplicationType: "GRS",
168
- * accountKind: "StorageV2",
169
- * isHnsEnabled: true,
170
- * });
171
- * const thisAssignment = new azure.authorization.Assignment("this", {
172
- * scope: _this.id,
173
- * roleDefinitionName: "Storage Blob Data Contributor",
174
- * principalId: current.objectId,
175
- * });
176
- * const thisContainer = new azure.storage.Container("this", {
177
- * name: "marketing",
178
- * storageAccountName: _this.name,
179
- * containerAccessType: "private",
180
- * });
181
- * const marketing = new databricks.Mount("marketing", {
182
- * name: "marketing",
183
- * resourceId: thisContainer.resourceManagerId,
184
- * abfs: {
185
- * clientId: current.clientId,
186
- * clientSecretScope: terraform.name,
187
- * clientSecretKey: servicePrincipalKey.key,
188
- * initializeFileSystem: true,
189
- * },
190
- * });
191
- * ```
192
- *
193
- * ## gs block
194
- *
195
- * This block allows specifying parameters for mounting of the Google Cloud Storage. The following arguments are required inside the `gs` block:
196
- *
197
- * * `serviceAccount` - (Optional) (String) email of registered [Google Service Account](https://docs.gcp.databricks.com/data/data-sources/google/gcs.html#step-1-set-up-google-cloud-service-account-using-google-cloud-console) for data access. If it's not specified, then the `clusterId` should be provided, and the cluster should have a Google service account attached to it.
198
- * * `bucketName` - (Required) (String) GCS bucket name to be mounted.
199
- *
200
- * ### Example mounting Google Cloud Storage
201
- *
202
- * ```typescript
203
- * import * as pulumi from "@pulumi/pulumi";
204
- * import * as databricks from "@pulumi/databricks";
205
- *
206
- * const thisGs = new databricks.Mount("this_gs", {
207
- * name: "gs-mount",
208
- * gs: {
209
- * serviceAccount: "acc@company.iam.gserviceaccount.com",
210
- * bucketName: "mybucket",
211
- * },
212
- * });
213
- * ```
214
- *
215
- * ## adl block
216
- *
217
- * This block allows specifying parameters for mounting of the ADLS Gen1. The following arguments are required inside the `adl` block:
218
- *
219
- * * `clientId` - (Required) (String) This is the clientId for the enterprise application for the service principal.
220
- * * `tenantId` - (Optional) (String) This is your azure directory tenant id. It is required for creating the mount. (Could be omitted if Azure authentication is used, and we can extract `tenantId` from it)
221
- * * `clientSecretKey` - (Required) (String) This is the secret key in which your service principal/enterprise app client secret will be stored.
222
- * * `clientSecretScope` - (Required) (String) This is the secret scope in which your service principal/enterprise app client secret will be stored.
223
- *
224
- * * `storageResourceName` - (Required) (String) The name of the storage resource in which the data is for ADLS gen 1. This is what you are trying to mount. (Could be omitted if `resourceId` is provided)
225
- * * `sparkConfPrefix` - (Optional) (String) This is the spark configuration prefix for adls gen 1 mount. The options are `fs.adl`, `dfs.adls`. Use `fs.adl` for runtime 6.0 and above for the clusters. Otherwise use `dfs.adls`. The default value is: `fs.adl`.
226
- * * `directory` - (Computed) (String) This is optional if you don't want to add an additional directory that you wish to mount. This must start with a "/".
227
- *
228
- * ### Example mounting ADLS Gen1
229
- *
230
- * ```typescript
231
- * import * as pulumi from "@pulumi/pulumi";
232
- * import * as databricks from "@pulumi/databricks";
233
- *
234
- * const mount = new databricks.Mount("mount", {
235
- * name: "{var.RANDOM}",
236
- * adl: {
237
- * storageResourceName: "{env.TEST_STORAGE_ACCOUNT_NAME}",
238
- * tenantId: current.tenantId,
239
- * clientId: current.clientId,
240
- * clientSecretScope: terraform.name,
241
- * clientSecretKey: servicePrincipalKey.key,
242
- * sparkConfPrefix: "fs.adl",
243
- * },
244
- * });
245
- * ```
246
- *
247
- * ## wasb block
248
- *
249
- * This block allows specifying parameters for mounting of the Azure Blob Storage. The following arguments are required inside the `wasb` block:
250
- *
251
- * * `authType` - (Required) (String) This is the auth type for blob storage. This can either be SAS tokens (`SAS`) or account access keys (`ACCESS_KEY`).
252
- * * `tokenSecretScope` - (Required) (String) This is the secret scope in which your auth type token is stored.
253
- * * `tokenSecretKey` - (Required) (String) This is the secret key in which your auth type token is stored.
254
- * * `containerName` - (Required) (String) The container in which the data is. This is what you are trying to mount. (Could be omitted if `resourceId` is provided)
255
- * * `storageAccountName` - (Required) (String) The name of the storage resource in which the data is. (Could be omitted if `resourceId` is provided)
256
- * * `directory` - (Computed) (String) This is optional if you don't want to add an additional directory that you wish to mount. This must start with a "/".
257
- *
258
- * ### Example mounting Azure Blob Storage
259
- *
260
- * ```typescript
261
- * import * as pulumi from "@pulumi/pulumi";
262
- * import * as azure from "@pulumi/azure";
263
- * import * as databricks from "@pulumi/databricks";
264
- *
265
- * const blobaccount = new azure.storage.Account("blobaccount", {
266
- * name: `${prefix}blob`,
267
- * resourceGroupName: resourceGroupName,
268
- * location: resourceGroupLocation,
269
- * accountTier: "Standard",
270
- * accountReplicationType: "LRS",
271
- * accountKind: "StorageV2",
272
- * });
273
- * const marketing = new azure.storage.Container("marketing", {
274
- * name: "marketing",
275
- * storageAccountName: blobaccount.name,
276
- * containerAccessType: "private",
277
- * });
278
- * const terraform = new databricks.SecretScope("terraform", {
279
- * name: "application",
280
- * initialManagePrincipal: "users",
281
- * });
282
- * const storageKey = new databricks.Secret("storage_key", {
283
- * key: "blob_storage_key",
284
- * stringValue: blobaccount.primaryAccessKey,
285
- * scope: terraform.name,
286
- * });
287
- * const marketingMount = new databricks.Mount("marketing", {
288
- * name: "marketing",
289
- * wasb: {
290
- * containerName: marketing.name,
291
- * storageAccountName: blobaccount.name,
292
- * authType: "ACCESS_KEY",
293
- * tokenSecretScope: terraform.name,
294
- * tokenSecretKey: storageKey.key,
295
- * },
296
- * });
297
- * ```
298
- *
299
- * ## Migration from other mount resources
300
- *
301
- * Migration from the specific mount resource is straightforward:
302
- *
303
- * * rename `mountName` to `name`
304
- * * wrap storage-specific settings (`containerName`, ...) into corresponding block (`adl`, `abfs`, `s3`, `wasbs`)
305
- * * for S3 mounts, rename `s3BucketName` to `bucketName`
306
- *
307
- * ## Related Resources
308
- *
309
- * The following resources are often used in the same context:
310
- *
311
- * * End to end workspace management guide.
312
- * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it.
313
- * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html).
314
- * * databricks.DbfsFile data to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html).
315
- * * databricks.getDbfsFilePaths data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html).
316
- * * databricks.DbfsFile to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html).
317
- * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
318
- * * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster.
319
- *
320
5
  * ## Import
321
6
  *
322
7
  * -> **Note** Importing this resource is not currently supported.
package/mount.js CHANGED
@@ -6,321 +6,6 @@ exports.Mount = void 0;
6
6
  const pulumi = require("@pulumi/pulumi");
7
7
  const utilities = require("./utilities");
8
8
  /**
9
- * This resource will mount your cloud storage
10
- * * `gs` - to [mount Google Cloud Storage](https://docs.gcp.databricks.com/data/data-sources/google/gcs.html)
11
- * * `abfs` - to [mount ADLS Gen2](https://docs.microsoft.com/en-us/azure/databricks/data/data-sources/azure/adls-gen2/) using Azure Blob Filesystem (ABFS) driver
12
- * * `adl` - to [mount ADLS Gen1](https://docs.microsoft.com/en-us/azure/databricks/data/data-sources/azure/azure-datalake) using Azure Data Lake (ADL) driver
13
- * * `wasb` - to [mount Azure Blob Storage](https://docs.microsoft.com/en-us/azure/databricks/data/data-sources/azure/azure-storage) using Windows Azure Storage Blob (WASB) driver
14
- *
15
- * 1. Use generic arguments - you have a responsibility for providing all necessary parameters that are required to mount specific storage. This is most flexible option
16
- *
17
- * ## Common arguments
18
- *
19
- * * `clusterId` - (Optional, String) Cluster to use for mounting. If no cluster is specified, a new cluster will be created and will mount the bucket for all of the clusters in this workspace. If the cluster is not running - it's going to be started, so be aware to set auto-termination rules on it.
20
- * * `name` - (Optional, String) Name, under which mount will be accessible in `dbfs:/mnt/<MOUNT_NAME>`. If not specified, provider will try to infer it from depending on the resource type:
21
- * * `bucketName` for AWS S3 and Google Cloud Storage
22
- * * `containerName` for ADLS Gen2 and Azure Blob Storage
23
- * * `storageResourceName` for ADLS Gen1
24
- * * `uri` - (Optional, String) the URI for accessing specific storage (`s3a://....`, `abfss://....`, `gs://....`, etc.)
25
- * * `extraConfigs` - (Optional, String map) configuration parameters that are necessary for mounting of specific storage
26
- * * `resourceId` - (Optional, String) resource ID for a given storage account. Could be used to fill defaults, such as storage account & container names on Azure.
27
- * * `encryptionType` - (Optional, String) encryption type. Currently used only for [AWS S3 mounts](https://docs.databricks.com/data/data-sources/aws/amazon-s3.html#encrypt-data-in-s3-buckets)
28
- *
29
- * ### Example mounting ADLS Gen2 using uri and extraConfigs
30
- *
31
- * ```typescript
32
- * import * as pulumi from "@pulumi/pulumi";
33
- * import * as databricks from "@pulumi/databricks";
34
- *
35
- * const tenantId = "00000000-1111-2222-3333-444444444444";
36
- * const clientId = "55555555-6666-7777-8888-999999999999";
37
- * const secretScope = "some-kv";
38
- * const secretKey = "some-sp-secret";
39
- * const container = "test";
40
- * const storageAcc = "lrs";
41
- * const _this = new databricks.Mount("this", {
42
- * name: "tf-abfss",
43
- * uri: `abfss://${container}@${storageAcc}.dfs.core.windows.net`,
44
- * extraConfigs: {
45
- * "fs.azure.account.auth.type": "OAuth",
46
- * "fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider",
47
- * "fs.azure.account.oauth2.client.id": clientId,
48
- * "fs.azure.account.oauth2.client.secret": `{{secrets/${secretScope}/${secretKey}}}`,
49
- * "fs.azure.account.oauth2.client.endpoint": `https://login.microsoftonline.com/${tenantId}/oauth2/token`,
50
- * "fs.azure.createRemoteFileSystemDuringInitialization": "false",
51
- * },
52
- * });
53
- * ```
54
- *
55
- * ### Example mounting ADLS Gen2 with AAD passthrough
56
- *
57
- * > **Note** AAD passthrough is considered a legacy data access pattern. Use Unity Catalog for fine-grained data access control.
58
- *
59
- * > **Note** Mounts using AAD passthrough cannot be created using a service principal.
60
- *
61
- * To mount ALDS Gen2 with Azure Active Directory Credentials passthrough we need to execute the mount commands using the cluster configured with AAD Credentials passthrough & provide necessary configuration parameters (see [documentation](https://docs.microsoft.com/en-us/azure/databricks/security/credential-passthrough/adls-passthrough#--mount-azure-data-lake-storage-to-dbfs-using-credential-passthrough) for more details).
62
- *
63
- * ```typescript
64
- * import * as pulumi from "@pulumi/pulumi";
65
- * import * as azure from "@pulumi/azure";
66
- * import * as databricks from "@pulumi/databricks";
67
- *
68
- * const config = new pulumi.Config();
69
- * // Resource group for Databricks Workspace
70
- * const resourceGroup = config.require("resourceGroup");
71
- * // Name of the Databricks Workspace
72
- * const workspaceName = config.require("workspaceName");
73
- * const this = azure.databricks.getWorkspace({
74
- * name: workspaceName,
75
- * resourceGroupName: resourceGroup,
76
- * });
77
- * const smallest = databricks.getNodeType({
78
- * localDisk: true,
79
- * });
80
- * const latest = databricks.getSparkVersion({});
81
- * const sharedPassthrough = new databricks.Cluster("shared_passthrough", {
82
- * clusterName: "Shared Passthrough for mount",
83
- * sparkVersion: latest.then(latest => latest.id),
84
- * nodeTypeId: smallest.then(smallest => smallest.id),
85
- * autoterminationMinutes: 10,
86
- * numWorkers: 1,
87
- * sparkConf: {
88
- * "spark.databricks.cluster.profile": "serverless",
89
- * "spark.databricks.repl.allowedLanguages": "python,sql",
90
- * "spark.databricks.passthrough.enabled": "true",
91
- * "spark.databricks.pyspark.enableProcessIsolation": "true",
92
- * },
93
- * customTags: {
94
- * ResourceClass: "Serverless",
95
- * },
96
- * });
97
- * // Name of the ADLS Gen2 storage container
98
- * const storageAcc = config.require("storageAcc");
99
- * // Name of container inside storage account
100
- * const container = config.require("container");
101
- * const passthrough = new databricks.Mount("passthrough", {
102
- * name: "passthrough-test",
103
- * clusterId: sharedPassthrough.id,
104
- * uri: `abfss://${container}@${storageAcc}.dfs.core.windows.net`,
105
- * extraConfigs: {
106
- * "fs.azure.account.auth.type": "CustomAccessToken",
107
- * "fs.azure.account.custom.token.provider.class": "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}",
108
- * },
109
- * });
110
- * ```
111
- *
112
- * ## s3 block
113
- *
114
- * This block allows specifying parameters for mounting of the ADLS Gen2. The following arguments are required inside the `s3` block:
115
- *
116
- * * `instanceProfile` - (Optional) (String) ARN of registered instance profile for data access. If it's not specified, then the `clusterId` should be provided, and the cluster should have an instance profile attached to it. If both `clusterId` & `instanceProfile` are specified, then `clusterId` takes precedence.
117
- * * `bucketName` - (Required) (String) S3 bucket name to be mounted.
118
- *
119
- * ### Example of mounting S3
120
- *
121
- * ```typescript
122
- * import * as pulumi from "@pulumi/pulumi";
123
- * import * as databricks from "@pulumi/databricks";
124
- *
125
- * // now you can do `%fs ls /mnt/experiments` in notebooks
126
- * const _this = new databricks.Mount("this", {
127
- * name: "experiments",
128
- * s3: {
129
- * instanceProfile: ds.id,
130
- * bucketName: thisAwsS3Bucket.bucket,
131
- * },
132
- * });
133
- * ```
134
- *
135
- * ## abfs block
136
- *
137
- * This block allows specifying parameters for mounting of the ADLS Gen2. The following arguments are required inside the `abfs` block:
138
- *
139
- * * `clientId` - (Required) (String) This is the clientId (Application Object ID) for the enterprise application for the service principal.
140
- * * `tenantId` - (Optional) (String) This is your azure directory tenant id. It is required for creating the mount. (Could be omitted if Azure authentication is used, and we can extract `tenantId` from it).
141
- * * `clientSecretKey` - (Required) (String) This is the secret key in which your service principal/enterprise app client secret will be stored.
142
- * * `clientSecretScope` - (Required) (String) This is the secret scope in which your service principal/enterprise app client secret will be stored.
143
- * * `containerName` - (Required) (String) ADLS gen2 container name. (Could be omitted if `resourceId` is provided)
144
- * * `storageAccountName` - (Required) (String) The name of the storage resource in which the data is. (Could be omitted if `resourceId` is provided)
145
- * * `directory` - (Computed) (String) This is optional if you don't want to add an additional directory that you wish to mount. This must start with a "/".
146
- * * `initializeFileSystem` - (Required) (Bool) either or not initialize FS for the first use
147
- *
148
- * ### Creating mount for ADLS Gen2 using abfs block
149
- *
150
- * In this example, we're using Azure authentication, so we can omit some parameters (`tenantId`, `storageAccountName`, and `containerName`) that will be detected automatically.
151
- *
152
- * ```typescript
153
- * import * as pulumi from "@pulumi/pulumi";
154
- * import * as azure from "@pulumi/azure";
155
- * import * as databricks from "@pulumi/databricks";
156
- *
157
- * const terraform = new databricks.SecretScope("terraform", {
158
- * name: "application",
159
- * initialManagePrincipal: "users",
160
- * });
161
- * const servicePrincipalKey = new databricks.Secret("service_principal_key", {
162
- * key: "service_principal_key",
163
- * stringValue: ARM_CLIENT_SECRET,
164
- * scope: terraform.name,
165
- * });
166
- * const _this = new azure.storage.Account("this", {
167
- * name: `${prefix}datalake`,
168
- * resourceGroupName: resourceGroupName,
169
- * location: resourceGroupLocation,
170
- * accountTier: "Standard",
171
- * accountReplicationType: "GRS",
172
- * accountKind: "StorageV2",
173
- * isHnsEnabled: true,
174
- * });
175
- * const thisAssignment = new azure.authorization.Assignment("this", {
176
- * scope: _this.id,
177
- * roleDefinitionName: "Storage Blob Data Contributor",
178
- * principalId: current.objectId,
179
- * });
180
- * const thisContainer = new azure.storage.Container("this", {
181
- * name: "marketing",
182
- * storageAccountName: _this.name,
183
- * containerAccessType: "private",
184
- * });
185
- * const marketing = new databricks.Mount("marketing", {
186
- * name: "marketing",
187
- * resourceId: thisContainer.resourceManagerId,
188
- * abfs: {
189
- * clientId: current.clientId,
190
- * clientSecretScope: terraform.name,
191
- * clientSecretKey: servicePrincipalKey.key,
192
- * initializeFileSystem: true,
193
- * },
194
- * });
195
- * ```
196
- *
197
- * ## gs block
198
- *
199
- * This block allows specifying parameters for mounting of the Google Cloud Storage. The following arguments are required inside the `gs` block:
200
- *
201
- * * `serviceAccount` - (Optional) (String) email of registered [Google Service Account](https://docs.gcp.databricks.com/data/data-sources/google/gcs.html#step-1-set-up-google-cloud-service-account-using-google-cloud-console) for data access. If it's not specified, then the `clusterId` should be provided, and the cluster should have a Google service account attached to it.
202
- * * `bucketName` - (Required) (String) GCS bucket name to be mounted.
203
- *
204
- * ### Example mounting Google Cloud Storage
205
- *
206
- * ```typescript
207
- * import * as pulumi from "@pulumi/pulumi";
208
- * import * as databricks from "@pulumi/databricks";
209
- *
210
- * const thisGs = new databricks.Mount("this_gs", {
211
- * name: "gs-mount",
212
- * gs: {
213
- * serviceAccount: "acc@company.iam.gserviceaccount.com",
214
- * bucketName: "mybucket",
215
- * },
216
- * });
217
- * ```
218
- *
219
- * ## adl block
220
- *
221
- * This block allows specifying parameters for mounting of the ADLS Gen1. The following arguments are required inside the `adl` block:
222
- *
223
- * * `clientId` - (Required) (String) This is the clientId for the enterprise application for the service principal.
224
- * * `tenantId` - (Optional) (String) This is your azure directory tenant id. It is required for creating the mount. (Could be omitted if Azure authentication is used, and we can extract `tenantId` from it)
225
- * * `clientSecretKey` - (Required) (String) This is the secret key in which your service principal/enterprise app client secret will be stored.
226
- * * `clientSecretScope` - (Required) (String) This is the secret scope in which your service principal/enterprise app client secret will be stored.
227
- *
228
- * * `storageResourceName` - (Required) (String) The name of the storage resource in which the data is for ADLS gen 1. This is what you are trying to mount. (Could be omitted if `resourceId` is provided)
229
- * * `sparkConfPrefix` - (Optional) (String) This is the spark configuration prefix for adls gen 1 mount. The options are `fs.adl`, `dfs.adls`. Use `fs.adl` for runtime 6.0 and above for the clusters. Otherwise use `dfs.adls`. The default value is: `fs.adl`.
230
- * * `directory` - (Computed) (String) This is optional if you don't want to add an additional directory that you wish to mount. This must start with a "/".
231
- *
232
- * ### Example mounting ADLS Gen1
233
- *
234
- * ```typescript
235
- * import * as pulumi from "@pulumi/pulumi";
236
- * import * as databricks from "@pulumi/databricks";
237
- *
238
- * const mount = new databricks.Mount("mount", {
239
- * name: "{var.RANDOM}",
240
- * adl: {
241
- * storageResourceName: "{env.TEST_STORAGE_ACCOUNT_NAME}",
242
- * tenantId: current.tenantId,
243
- * clientId: current.clientId,
244
- * clientSecretScope: terraform.name,
245
- * clientSecretKey: servicePrincipalKey.key,
246
- * sparkConfPrefix: "fs.adl",
247
- * },
248
- * });
249
- * ```
250
- *
251
- * ## wasb block
252
- *
253
- * This block allows specifying parameters for mounting of the Azure Blob Storage. The following arguments are required inside the `wasb` block:
254
- *
255
- * * `authType` - (Required) (String) This is the auth type for blob storage. This can either be SAS tokens (`SAS`) or account access keys (`ACCESS_KEY`).
256
- * * `tokenSecretScope` - (Required) (String) This is the secret scope in which your auth type token is stored.
257
- * * `tokenSecretKey` - (Required) (String) This is the secret key in which your auth type token is stored.
258
- * * `containerName` - (Required) (String) The container in which the data is. This is what you are trying to mount. (Could be omitted if `resourceId` is provided)
259
- * * `storageAccountName` - (Required) (String) The name of the storage resource in which the data is. (Could be omitted if `resourceId` is provided)
260
- * * `directory` - (Computed) (String) This is optional if you don't want to add an additional directory that you wish to mount. This must start with a "/".
261
- *
262
- * ### Example mounting Azure Blob Storage
263
- *
264
- * ```typescript
265
- * import * as pulumi from "@pulumi/pulumi";
266
- * import * as azure from "@pulumi/azure";
267
- * import * as databricks from "@pulumi/databricks";
268
- *
269
- * const blobaccount = new azure.storage.Account("blobaccount", {
270
- * name: `${prefix}blob`,
271
- * resourceGroupName: resourceGroupName,
272
- * location: resourceGroupLocation,
273
- * accountTier: "Standard",
274
- * accountReplicationType: "LRS",
275
- * accountKind: "StorageV2",
276
- * });
277
- * const marketing = new azure.storage.Container("marketing", {
278
- * name: "marketing",
279
- * storageAccountName: blobaccount.name,
280
- * containerAccessType: "private",
281
- * });
282
- * const terraform = new databricks.SecretScope("terraform", {
283
- * name: "application",
284
- * initialManagePrincipal: "users",
285
- * });
286
- * const storageKey = new databricks.Secret("storage_key", {
287
- * key: "blob_storage_key",
288
- * stringValue: blobaccount.primaryAccessKey,
289
- * scope: terraform.name,
290
- * });
291
- * const marketingMount = new databricks.Mount("marketing", {
292
- * name: "marketing",
293
- * wasb: {
294
- * containerName: marketing.name,
295
- * storageAccountName: blobaccount.name,
296
- * authType: "ACCESS_KEY",
297
- * tokenSecretScope: terraform.name,
298
- * tokenSecretKey: storageKey.key,
299
- * },
300
- * });
301
- * ```
302
- *
303
- * ## Migration from other mount resources
304
- *
305
- * Migration from the specific mount resource is straightforward:
306
- *
307
- * * rename `mountName` to `name`
308
- * * wrap storage-specific settings (`containerName`, ...) into corresponding block (`adl`, `abfs`, `s3`, `wasbs`)
309
- * * for S3 mounts, rename `s3BucketName` to `bucketName`
310
- *
311
- * ## Related Resources
312
- *
313
- * The following resources are often used in the same context:
314
- *
315
- * * End to end workspace management guide.
316
- * * databricks.getAwsBucketPolicy data to configure a simple access policy for AWS S3 buckets, so that Databricks can access data in it.
317
- * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html).
318
- * * databricks.DbfsFile data to get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html).
319
- * * databricks.getDbfsFilePaths data to get list of file names from get file content from [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html).
320
- * * databricks.DbfsFile to manage relatively small files on [Databricks File System (DBFS)](https://docs.databricks.com/data/databricks-file-system.html).
321
- * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
322
- * * databricks.Library to install a [library](https://docs.databricks.com/libraries/index.html) on databricks_cluster.
323
- *
324
9
  * ## Import
325
10
  *
326
11
  * -> **Note** Importing this resource is not currently supported.
package/mount.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"file":"mount.js","sourceRoot":"","sources":["../mount.ts"],"names":[],"mappings":";AAAA,wFAAwF;AACxF,iFAAiF;;;AAEjF,yCAAyC;AAGzC,yCAAyC;AAEzC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA+TG;AACH,MAAa,KAAM,SAAQ,MAAM,CAAC,cAAc;IAC5C;;;;;;;;OAQG;IACI,MAAM,CAAC,GAAG,CAAC,IAAY,EAAE,EAA2B,EAAE,KAAkB,EAAE,IAAmC;QAChH,OAAO,IAAI,KAAK,CAAC,IAAI,EAAO,KAAK,kCAAO,IAAI,KAAE,EAAE,EAAE,EAAE,IAAG,CAAC;IAC5D,CAAC;IAKD;;;OAGG;IACI,MAAM,CAAC,UAAU,CAAC,GAAQ;QAC7B,IAAI,GAAG,KAAK,SAAS,IAAI,GAAG,KAAK,IAAI,EAAE;YACnC,OAAO,KAAK,CAAC;SAChB;QACD,OAAO,GAAG,CAAC,cAAc,CAAC,KAAK,KAAK,CAAC,YAAY,CAAC;IACtD,CAAC;IA0BD,YAAY,IAAY,EAAE,WAAoC,EAAE,IAAmC;QAC/F,IAAI,cAAc,GAAkB,EAAE,CAAC;QACvC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QAClB,IAAI,IAAI,CAAC,EAAE,EAAE;YACT,MAAM,KAAK,GAAG,WAAqC,CAAC;YACpD,cAAc,CAAC,MAAM,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACxD,cAAc,CAAC,KAAK,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,gBAAgB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5E,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;YACxE,cAAc,CAAC,IAAI,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;YACpD,cAAc,CAAC,MAAM,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACxD,cAAc,CAAC,YAAY,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YACpE,cAAc,CAAC,IAAI,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;YACpD,cAAc,CAAC,QAAQ,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5D,cAAc,CAAC,KAAK,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,MAAM,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;SAC3D;aAAM;YACH,MAAM,IAAI,GAAG,WAAoC,CAAC;YAClD,cAAc,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,KAAK,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC;YACpD,cAAc,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAChE,cAAc,CAAC,gBAAgB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1E,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;YACtE,cAAc,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;YAClD,cAAc,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,YAAY,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;YAClD,cAAc,CAAC,KAAK,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC;YACpD,cAAc,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,QAAQ,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;SAChD;QACD,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,SAAS,CAAC,oBAAoB,EAAE,EAAE,IAAI,CAAC,CAAC;QACnE,KAAK,CAAC,KAAK,CAAC,YAAY,EAAE,IAAI,EAAE,cAAc,EAAE,IAAI,CAAC,CAAC;IAC1D,CAAC;;AAtFL,sBAuFC;AAzEG,gBAAgB;AACO,kBAAY,GAAG,8BAA8B,CAAC"}
1
+ {"version":3,"file":"mount.js","sourceRoot":"","sources":["../mount.ts"],"names":[],"mappings":";AAAA,wFAAwF;AACxF,iFAAiF;;;AAEjF,yCAAyC;AAGzC,yCAAyC;AAEzC;;;;GAIG;AACH,MAAa,KAAM,SAAQ,MAAM,CAAC,cAAc;IAC5C;;;;;;;;OAQG;IACI,MAAM,CAAC,GAAG,CAAC,IAAY,EAAE,EAA2B,EAAE,KAAkB,EAAE,IAAmC;QAChH,OAAO,IAAI,KAAK,CAAC,IAAI,EAAO,KAAK,kCAAO,IAAI,KAAE,EAAE,EAAE,EAAE,IAAG,CAAC;IAC5D,CAAC;IAKD;;;OAGG;IACI,MAAM,CAAC,UAAU,CAAC,GAAQ;QAC7B,IAAI,GAAG,KAAK,SAAS,IAAI,GAAG,KAAK,IAAI,EAAE;YACnC,OAAO,KAAK,CAAC;SAChB;QACD,OAAO,GAAG,CAAC,cAAc,CAAC,KAAK,KAAK,CAAC,YAAY,CAAC;IACtD,CAAC;IA0BD,YAAY,IAAY,EAAE,WAAoC,EAAE,IAAmC;QAC/F,IAAI,cAAc,GAAkB,EAAE,CAAC;QACvC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QAClB,IAAI,IAAI,CAAC,EAAE,EAAE;YACT,MAAM,KAAK,GAAG,WAAqC,CAAC;YACpD,cAAc,CAAC,MAAM,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACxD,cAAc,CAAC,KAAK,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,gBAAgB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5E,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;YACxE,cAAc,CAAC,IAAI,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;YACpD,cAAc,CAAC,MAAM,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACxD,cAAc,CAAC,YAAY,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YACpE,cAAc,CAAC,IAAI,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;YACpD,cAAc,CAAC,QAAQ,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5D,cAAc,CAAC,KAAK,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,MAAM,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;SAC3D;aAAM;YACH,MAAM,IAAI,GAAG,WAAoC,CAAC;YAClD,cAAc,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,KAAK,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC;YACpD,cAAc,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAChE,cAAc,CAAC,gBAAgB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1E,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;YACtE,cAAc,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;YAClD,cAAc,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,YAAY,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,CAAC;YAClD,cAAc,CAAC,KAAK,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC;YACpD,cAAc,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,QAAQ,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;SAChD;QACD,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,SAAS,CAAC,oBAAoB,EAAE,EAAE,IAAI,CAAC,CAAC;QACnE,KAAK,CAAC,KAAK,CAAC,YAAY,EAAE,IAAI,EAAE,cAAc,EAAE,IAAI,CAAC,CAAC;IAC1D,CAAC;;AAtFL,sBAuFC;AAzEG,gBAAgB;AACO,kBAAY,GAAG,8BAA8B,CAAC"}