@pulumi/databricks 0.0.1-alpha.1648473134
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +202 -0
- package/README.md +303 -0
- package/awsS3Mount.d.ts +50 -0
- package/awsS3Mount.js +63 -0
- package/awsS3Mount.js.map +1 -0
- package/azureAdlsGen1Mount.d.ts +65 -0
- package/azureAdlsGen1Mount.js +85 -0
- package/azureAdlsGen1Mount.js.map +1 -0
- package/azureAdlsGen2Mount.d.ts +68 -0
- package/azureAdlsGen2Mount.js +93 -0
- package/azureAdlsGen2Mount.js.map +1 -0
- package/azureBlobMount.d.ts +62 -0
- package/azureBlobMount.js +83 -0
- package/azureBlobMount.js.map +1 -0
- package/catalog.d.ts +124 -0
- package/catalog.js +88 -0
- package/catalog.js.map +1 -0
- package/cluster.d.ts +332 -0
- package/cluster.js +121 -0
- package/cluster.js.map +1 -0
- package/clusterPolicy.d.ts +112 -0
- package/clusterPolicy.js +97 -0
- package/clusterPolicy.js.map +1 -0
- package/config/index.d.ts +1 -0
- package/config/index.js +21 -0
- package/config/index.js.map +1 -0
- package/config/vars.d.ts +20 -0
- package/config/vars.js +127 -0
- package/config/vars.js.map +1 -0
- package/dbfsFile.d.ts +91 -0
- package/dbfsFile.js +71 -0
- package/dbfsFile.js.map +1 -0
- package/directory.d.ts +72 -0
- package/directory.js +65 -0
- package/directory.js.map +1 -0
- package/externalLocation.d.ts +114 -0
- package/externalLocation.js +80 -0
- package/externalLocation.js.map +1 -0
- package/getAwsAssumeRolePolicy.d.ts +47 -0
- package/getAwsAssumeRolePolicy.js +24 -0
- package/getAwsAssumeRolePolicy.js.map +1 -0
- package/getAwsBucketPolicy.d.ts +59 -0
- package/getAwsBucketPolicy.js +36 -0
- package/getAwsBucketPolicy.js.map +1 -0
- package/getAwsCrossAccountPolicy.d.ts +59 -0
- package/getAwsCrossAccountPolicy.js +47 -0
- package/getAwsCrossAccountPolicy.js.map +1 -0
- package/getCatalogs.d.ts +54 -0
- package/getCatalogs.js +43 -0
- package/getCatalogs.js.map +1 -0
- package/getClusters.d.ts +69 -0
- package/getClusters.js +57 -0
- package/getClusters.js.map +1 -0
- package/getCurrentUser.d.ts +39 -0
- package/getCurrentUser.js +38 -0
- package/getCurrentUser.js.map +1 -0
- package/getDbfsFile.d.ts +69 -0
- package/getDbfsFile.js +44 -0
- package/getDbfsFile.js.map +1 -0
- package/getDbfsFilePaths.d.ts +68 -0
- package/getDbfsFilePaths.js +46 -0
- package/getDbfsFilePaths.js.map +1 -0
- package/getGroup.d.ts +166 -0
- package/getGroup.js +46 -0
- package/getGroup.js.map +1 -0
- package/getJobs.d.ts +47 -0
- package/getJobs.js +30 -0
- package/getJobs.js.map +1 -0
- package/getNodeType.d.ts +173 -0
- package/getNodeType.js +71 -0
- package/getNodeType.js.map +1 -0
- package/getNotebook.d.ts +93 -0
- package/getNotebook.js +39 -0
- package/getNotebook.js.map +1 -0
- package/getNotebookPaths.d.ts +58 -0
- package/getNotebookPaths.js +36 -0
- package/getNotebookPaths.js.map +1 -0
- package/getSchemas.d.ts +65 -0
- package/getSchemas.js +45 -0
- package/getSchemas.js.map +1 -0
- package/getSparkVersion.d.ts +150 -0
- package/getSparkVersion.js +70 -0
- package/getSparkVersion.js.map +1 -0
- package/getTables.d.ts +75 -0
- package/getTables.js +47 -0
- package/getTables.js.map +1 -0
- package/getUser.d.ts +78 -0
- package/getUser.js +39 -0
- package/getUser.js.map +1 -0
- package/getZones.d.ts +29 -0
- package/getZones.js +26 -0
- package/getZones.js.map +1 -0
- package/gitCredential.d.ts +48 -0
- package/gitCredential.js +64 -0
- package/gitCredential.js.map +1 -0
- package/globalInitScript.d.ts +99 -0
- package/globalInitScript.js +68 -0
- package/globalInitScript.js.map +1 -0
- package/grants.d.ts +58 -0
- package/grants.js +64 -0
- package/grants.js.map +1 -0
- package/group.d.ts +131 -0
- package/group.js +83 -0
- package/group.js.map +1 -0
- package/groupInstanceProfile.d.ts +96 -0
- package/groupInstanceProfile.js +93 -0
- package/groupInstanceProfile.js.map +1 -0
- package/index.d.ts +81 -0
- package/index.js +362 -0
- package/index.js.map +1 -0
- package/instancePool.d.ts +166 -0
- package/instancePool.js +93 -0
- package/instancePool.js.map +1 -0
- package/instanceProfile.d.ts +180 -0
- package/instanceProfile.js +161 -0
- package/instanceProfile.js.map +1 -0
- package/ipAccessList.d.ts +128 -0
- package/ipAccessList.js +108 -0
- package/ipAccessList.js.map +1 -0
- package/job.d.ts +228 -0
- package/job.js +102 -0
- package/job.js.map +1 -0
- package/library.d.ts +183 -0
- package/library.js +189 -0
- package/library.js.map +1 -0
- package/metastore.d.ts +102 -0
- package/metastore.js +75 -0
- package/metastore.js.map +1 -0
- package/metastoreAssignment.d.ts +94 -0
- package/metastoreAssignment.js +81 -0
- package/metastoreAssignment.js.map +1 -0
- package/metastoreDataAccess.d.ts +85 -0
- package/metastoreDataAccess.js +73 -0
- package/metastoreDataAccess.js.map +1 -0
- package/mlflowExperiment.d.ts +122 -0
- package/mlflowExperiment.js +99 -0
- package/mlflowExperiment.js.map +1 -0
- package/mlflowModel.d.ts +131 -0
- package/mlflowModel.js +107 -0
- package/mlflowModel.js.map +1 -0
- package/mlflowWebhook.d.ts +129 -0
- package/mlflowWebhook.js +100 -0
- package/mlflowWebhook.js.map +1 -0
- package/mount.d.ts +89 -0
- package/mount.js +76 -0
- package/mount.js.map +1 -0
- package/mwsCredentials.d.ts +137 -0
- package/mwsCredentials.js +115 -0
- package/mwsCredentials.js.map +1 -0
- package/mwsCustomerManagedKeys.d.ts +257 -0
- package/mwsCustomerManagedKeys.js +226 -0
- package/mwsCustomerManagedKeys.js.map +1 -0
- package/mwsLogDelivery.d.ts +219 -0
- package/mwsLogDelivery.js +144 -0
- package/mwsLogDelivery.js.map +1 -0
- package/mwsNetworks.d.ts +129 -0
- package/mwsNetworks.js +89 -0
- package/mwsNetworks.js.map +1 -0
- package/mwsPrivateAccessSettings.d.ts +137 -0
- package/mwsPrivateAccessSettings.js +74 -0
- package/mwsPrivateAccessSettings.js.map +1 -0
- package/mwsStorageConfigurations.d.ts +122 -0
- package/mwsStorageConfigurations.js +106 -0
- package/mwsStorageConfigurations.js.map +1 -0
- package/mwsVpcEndpoint.d.ts +122 -0
- package/mwsVpcEndpoint.js +79 -0
- package/mwsVpcEndpoint.js.map +1 -0
- package/mwsWorkspaces.d.ts +222 -0
- package/mwsWorkspaces.js +106 -0
- package/mwsWorkspaces.js.map +1 -0
- package/notebook.d.ts +128 -0
- package/notebook.js +77 -0
- package/notebook.js.map +1 -0
- package/oboToken.d.ts +98 -0
- package/oboToken.js +82 -0
- package/oboToken.js.map +1 -0
- package/package.json +28 -0
- package/package.json.bak +28 -0
- package/package.json.dev +28 -0
- package/permissions.d.ts +211 -0
- package/permissions.js +97 -0
- package/permissions.js.map +1 -0
- package/pipeline.d.ts +200 -0
- package/pipeline.js +134 -0
- package/pipeline.js.map +1 -0
- package/provider.d.ts +61 -0
- package/provider.js +64 -0
- package/provider.js.map +1 -0
- package/repo.d.ts +117 -0
- package/repo.js +71 -0
- package/repo.js.map +1 -0
- package/schema.d.ts +149 -0
- package/schema.js +106 -0
- package/schema.js.map +1 -0
- package/scripts/install-pulumi-plugin.js +21 -0
- package/secret.d.ts +115 -0
- package/secret.js +99 -0
- package/secret.js.map +1 -0
- package/secretAcl.d.ts +115 -0
- package/secretAcl.js +105 -0
- package/secretAcl.js.map +1 -0
- package/secretScope.d.ts +85 -0
- package/secretScope.js +64 -0
- package/secretScope.js.map +1 -0
- package/servicePrincipal.d.ts +142 -0
- package/servicePrincipal.js +83 -0
- package/servicePrincipal.js.map +1 -0
- package/sqlDashboard.d.ts +90 -0
- package/sqlDashboard.js +99 -0
- package/sqlDashboard.js.map +1 -0
- package/sqlEndpoint.d.ts +249 -0
- package/sqlEndpoint.js +128 -0
- package/sqlEndpoint.js.map +1 -0
- package/sqlGlobalConfig.d.ts +157 -0
- package/sqlGlobalConfig.js +115 -0
- package/sqlGlobalConfig.js.map +1 -0
- package/sqlPermissions.d.ts +191 -0
- package/sqlPermissions.js +139 -0
- package/sqlPermissions.js.map +1 -0
- package/sqlQuery.d.ts +131 -0
- package/sqlQuery.js +139 -0
- package/sqlQuery.js.map +1 -0
- package/sqlVisualization.d.ts +105 -0
- package/sqlVisualization.js +119 -0
- package/sqlVisualization.js.map +1 -0
- package/sqlWidget.d.ts +109 -0
- package/sqlWidget.js +114 -0
- package/sqlWidget.js.map +1 -0
- package/storageCredential.d.ts +122 -0
- package/storageCredential.js +118 -0
- package/storageCredential.js.map +1 -0
- package/table.d.ts +249 -0
- package/table.js +157 -0
- package/table.js.map +1 -0
- package/token.d.ts +102 -0
- package/token.js +84 -0
- package/token.js.map +1 -0
- package/types/index.d.ts +3 -0
- package/types/index.js +11 -0
- package/types/index.js.map +1 -0
- package/types/input.d.ts +1209 -0
- package/types/input.js +5 -0
- package/types/input.js.map +1 -0
- package/types/output.d.ts +1222 -0
- package/types/output.js +5 -0
- package/types/output.js.map +1 -0
- package/user.d.ts +149 -0
- package/user.js +91 -0
- package/user.js.map +1 -0
- package/userInstanceProfile.d.ts +93 -0
- package/userInstanceProfile.js +90 -0
- package/userInstanceProfile.js.map +1 -0
- package/userRole.d.ts +106 -0
- package/userRole.js +103 -0
- package/userRole.js.map +1 -0
- package/utilities.d.ts +4 -0
- package/utilities.js +57 -0
- package/utilities.js.map +1 -0
- package/workspaceConf.d.ts +76 -0
- package/workspaceConf.js +71 -0
- package/workspaceConf.js.map +1 -0
package/catalog.js
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
|
|
3
|
+
// *** Do not edit by hand unless you're certain you know what you are doing! ***
|
|
4
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
5
|
+
exports.Catalog = void 0;
|
|
6
|
+
const pulumi = require("@pulumi/pulumi");
|
|
7
|
+
const utilities = require("./utilities");
|
|
8
|
+
/**
|
|
9
|
+
* ## Example Usage
|
|
10
|
+
*
|
|
11
|
+
* ```typescript
|
|
12
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
13
|
+
* import * as databricks from "@pulumi/databricks";
|
|
14
|
+
*
|
|
15
|
+
* const sandbox = new databricks.Catalog("sandbox", {
|
|
16
|
+
* metastoreId: databricks_metastore["this"].id,
|
|
17
|
+
* comment: "this catalog is managed by terraform",
|
|
18
|
+
* properties: {
|
|
19
|
+
* purpose: "testing",
|
|
20
|
+
* },
|
|
21
|
+
* });
|
|
22
|
+
* ```
|
|
23
|
+
* ## Related Resources
|
|
24
|
+
*
|
|
25
|
+
* The following resources are used in the same context:
|
|
26
|
+
*
|
|
27
|
+
* * databricks.Table data to list tables within Unity Catalog.
|
|
28
|
+
* * databricks.Schema data to list schemas within Unity Catalog.
|
|
29
|
+
* * databricks.Catalog data to list catalogs within Unity Catalog.
|
|
30
|
+
*
|
|
31
|
+
* ## Import
|
|
32
|
+
*
|
|
33
|
+
* This resource can be imported by namebash
|
|
34
|
+
*
|
|
35
|
+
* ```sh
|
|
36
|
+
* $ pulumi import databricks:index/catalog:Catalog this <name>
|
|
37
|
+
* ```
|
|
38
|
+
*/
|
|
39
|
+
class Catalog extends pulumi.CustomResource {
|
|
40
|
+
constructor(name, argsOrState, opts) {
|
|
41
|
+
let resourceInputs = {};
|
|
42
|
+
opts = opts || {};
|
|
43
|
+
if (opts.id) {
|
|
44
|
+
const state = argsOrState;
|
|
45
|
+
resourceInputs["comment"] = state ? state.comment : undefined;
|
|
46
|
+
resourceInputs["metastoreId"] = state ? state.metastoreId : undefined;
|
|
47
|
+
resourceInputs["name"] = state ? state.name : undefined;
|
|
48
|
+
resourceInputs["owner"] = state ? state.owner : undefined;
|
|
49
|
+
resourceInputs["properties"] = state ? state.properties : undefined;
|
|
50
|
+
}
|
|
51
|
+
else {
|
|
52
|
+
const args = argsOrState;
|
|
53
|
+
resourceInputs["comment"] = args ? args.comment : undefined;
|
|
54
|
+
resourceInputs["metastoreId"] = args ? args.metastoreId : undefined;
|
|
55
|
+
resourceInputs["name"] = args ? args.name : undefined;
|
|
56
|
+
resourceInputs["owner"] = args ? args.owner : undefined;
|
|
57
|
+
resourceInputs["properties"] = args ? args.properties : undefined;
|
|
58
|
+
}
|
|
59
|
+
opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);
|
|
60
|
+
super(Catalog.__pulumiType, name, resourceInputs, opts);
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Get an existing Catalog resource's state with the given name, ID, and optional extra
|
|
64
|
+
* properties used to qualify the lookup.
|
|
65
|
+
*
|
|
66
|
+
* @param name The _unique_ name of the resulting resource.
|
|
67
|
+
* @param id The _unique_ provider ID of the resource to lookup.
|
|
68
|
+
* @param state Any extra arguments used during the lookup.
|
|
69
|
+
* @param opts Optional settings to control the behavior of the CustomResource.
|
|
70
|
+
*/
|
|
71
|
+
static get(name, id, state, opts) {
|
|
72
|
+
return new Catalog(name, state, Object.assign(Object.assign({}, opts), { id: id }));
|
|
73
|
+
}
|
|
74
|
+
/**
|
|
75
|
+
* Returns true if the given object is an instance of Catalog. This is designed to work even
|
|
76
|
+
* when multiple copies of the Pulumi SDK have been loaded into the same process.
|
|
77
|
+
*/
|
|
78
|
+
static isInstance(obj) {
|
|
79
|
+
if (obj === undefined || obj === null) {
|
|
80
|
+
return false;
|
|
81
|
+
}
|
|
82
|
+
return obj['__pulumiType'] === Catalog.__pulumiType;
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
exports.Catalog = Catalog;
|
|
86
|
+
/** @internal */
|
|
87
|
+
Catalog.__pulumiType = 'databricks:index/catalog:Catalog';
|
|
88
|
+
//# sourceMappingURL=catalog.js.map
|
package/catalog.js.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"catalog.js","sourceRoot":"","sources":["../catalog.ts"],"names":[],"mappings":";AAAA,wFAAwF;AACxF,iFAAiF;;;AAEjF,yCAAyC;AACzC,yCAAyC;AAEzC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,MAAa,OAAQ,SAAQ,MAAM,CAAC,cAAc;IAsD9C,YAAY,IAAY,EAAE,WAAwC,EAAE,IAAmC;QACnG,IAAI,cAAc,GAAkB,EAAE,CAAC;QACvC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QAClB,IAAI,IAAI,CAAC,EAAE,EAAE;YACT,MAAM,KAAK,GAAG,WAAuC,CAAC;YACtD,cAAc,CAAC,SAAS,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC;YAC9D,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;YACtE,cAAc,CAAC,MAAM,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACxD,cAAc,CAAC,OAAO,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1D,cAAc,CAAC,YAAY,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;SACvE;aAAM;YACH,MAAM,IAAI,GAAG,WAAsC,CAAC;YACpD,cAAc,CAAC,SAAS,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5D,cAAc,CAAC,aAAa,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;YACpE,cAAc,CAAC,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,SAAS,CAAC;YACtD,cAAc,CAAC,OAAO,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC;YACxD,cAAc,CAAC,YAAY,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;SACrE;QACD,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,SAAS,CAAC,oBAAoB,EAAE,EAAE,IAAI,CAAC,CAAC;QACnE,KAAK,CAAC,OAAO,CAAC,YAAY,EAAE,IAAI,EAAE,cAAc,EAAE,IAAI,CAAC,CAAC;IAC5D,CAAC;IAzED;;;;;;;;OAQG;IACI,MAAM,CAAC,GAAG,CAAC,IAAY,EAAE,EAA2B,EAAE,KAAoB,EAAE,IAAmC;QAClH,OAAO,IAAI,OAAO,CAAC,IAAI,EAAO,KAAK,kCAAO,IAAI,KAAE,EAAE,EAAE,EAAE,IAAG,CAAC;IAC9D,CAAC;IAKD;;;OAGG;IACI,MAAM,CAAC,UAAU,CAAC,GAAQ;QAC7B,IAAI,GAAG,KAAK,SAAS,IAAI,GAAG,KAAK,IAAI,EAAE;YACnC,OAAO,KAAK,CAAC;SAChB;QACD,OAAO,GAAG,CAAC,cAAc,CAAC,KAAK,OAAO,CAAC,YAAY,CAAC;IACxD,CAAC;;AA1BL,0BA2EC;AA7DG,gBAAgB;AACO,oBAAY,GAAG,kCAAkC,CAAC"}
|
package/cluster.d.ts
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
import * as pulumi from "@pulumi/pulumi";
|
|
2
|
+
import { input as inputs, output as outputs } from "./types";
|
|
3
|
+
/**
|
|
4
|
+
* ## Import
|
|
5
|
+
*
|
|
6
|
+
* The resource cluster can be imported using cluster id. bash
|
|
7
|
+
*
|
|
8
|
+
* ```sh
|
|
9
|
+
* $ pulumi import databricks:index/cluster:Cluster this <cluster-id>
|
|
10
|
+
* ```
|
|
11
|
+
*/
|
|
12
|
+
export declare class Cluster extends pulumi.CustomResource {
|
|
13
|
+
/**
|
|
14
|
+
* Get an existing Cluster resource's state with the given name, ID, and optional extra
|
|
15
|
+
* properties used to qualify the lookup.
|
|
16
|
+
*
|
|
17
|
+
* @param name The _unique_ name of the resulting resource.
|
|
18
|
+
* @param id The _unique_ provider ID of the resource to lookup.
|
|
19
|
+
* @param state Any extra arguments used during the lookup.
|
|
20
|
+
* @param opts Optional settings to control the behavior of the CustomResource.
|
|
21
|
+
*/
|
|
22
|
+
static get(name: string, id: pulumi.Input<pulumi.ID>, state?: ClusterState, opts?: pulumi.CustomResourceOptions): Cluster;
|
|
23
|
+
/**
|
|
24
|
+
* Returns true if the given object is an instance of Cluster. This is designed to work even
|
|
25
|
+
* when multiple copies of the Pulumi SDK have been loaded into the same process.
|
|
26
|
+
*/
|
|
27
|
+
static isInstance(obj: any): obj is Cluster;
|
|
28
|
+
readonly autoscale: pulumi.Output<outputs.ClusterAutoscale | undefined>;
|
|
29
|
+
/**
|
|
30
|
+
* Automatically terminate the cluster after being inactive for this time in minutes. If not set, Databricks won't automatically terminate an inactive cluster. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. _We highly recommend having this setting present for Interactive/BI clusters._
|
|
31
|
+
*/
|
|
32
|
+
readonly autoterminationMinutes: pulumi.Output<number | undefined>;
|
|
33
|
+
readonly awsAttributes: pulumi.Output<outputs.ClusterAwsAttributes | undefined>;
|
|
34
|
+
readonly azureAttributes: pulumi.Output<outputs.ClusterAzureAttributes | undefined>;
|
|
35
|
+
readonly clusterId: pulumi.Output<string>;
|
|
36
|
+
readonly clusterLogConf: pulumi.Output<outputs.ClusterClusterLogConf | undefined>;
|
|
37
|
+
/**
|
|
38
|
+
* Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
|
|
39
|
+
*/
|
|
40
|
+
readonly clusterName: pulumi.Output<string | undefined>;
|
|
41
|
+
/**
|
|
42
|
+
* Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS EC2 instances and EBS volumes) with these tags in addition to `defaultTags`.
|
|
43
|
+
*/
|
|
44
|
+
readonly customTags: pulumi.Output<{
|
|
45
|
+
[key: string]: any;
|
|
46
|
+
} | undefined>;
|
|
47
|
+
/**
|
|
48
|
+
* Select the security features of the cluster. Unity Catalog requires `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. Default to `NONE`, i.e. no security feature enabled.
|
|
49
|
+
*/
|
|
50
|
+
readonly dataSecurityMode: pulumi.Output<string | undefined>;
|
|
51
|
+
/**
|
|
52
|
+
* (map) Tags that are added by Databricks by default, regardless of any customTags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>
|
|
53
|
+
*/
|
|
54
|
+
readonly defaultTags: pulumi.Output<{
|
|
55
|
+
[key: string]: any;
|
|
56
|
+
}>;
|
|
57
|
+
readonly dockerImage: pulumi.Output<outputs.ClusterDockerImage | undefined>;
|
|
58
|
+
/**
|
|
59
|
+
* similar to `instancePoolId`, but for driver node. If omitted, and `instancePoolId` is specified, then driver will be allocated from that pool.
|
|
60
|
+
*/
|
|
61
|
+
readonly driverInstancePoolId: pulumi.Output<string>;
|
|
62
|
+
/**
|
|
63
|
+
* The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `nodeTypeId` defined above.
|
|
64
|
+
*/
|
|
65
|
+
readonly driverNodeTypeId: pulumi.Output<string>;
|
|
66
|
+
/**
|
|
67
|
+
* If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have `autoterminationMinutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
|
|
68
|
+
*/
|
|
69
|
+
readonly enableElasticDisk: pulumi.Output<boolean>;
|
|
70
|
+
/**
|
|
71
|
+
* Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and encrypting all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. _Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access._
|
|
72
|
+
*/
|
|
73
|
+
readonly enableLocalDiskEncryption: pulumi.Output<boolean>;
|
|
74
|
+
readonly gcpAttributes: pulumi.Output<outputs.ClusterGcpAttributes | undefined>;
|
|
75
|
+
/**
|
|
76
|
+
* An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
|
|
77
|
+
*/
|
|
78
|
+
readonly idempotencyToken: pulumi.Output<string | undefined>;
|
|
79
|
+
readonly initScripts: pulumi.Output<outputs.ClusterInitScript[] | undefined>;
|
|
80
|
+
/**
|
|
81
|
+
* - To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
|
|
82
|
+
*/
|
|
83
|
+
readonly instancePoolId: pulumi.Output<string | undefined>;
|
|
84
|
+
/**
|
|
85
|
+
* boolean value specifying if cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 70](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that.
|
|
86
|
+
*/
|
|
87
|
+
readonly isPinned: pulumi.Output<boolean | undefined>;
|
|
88
|
+
readonly libraries: pulumi.Output<outputs.ClusterLibrary[] | undefined>;
|
|
89
|
+
/**
|
|
90
|
+
* Any supported databricks.getNodeType id. If `instancePoolId` is specified, this field is not needed.
|
|
91
|
+
*/
|
|
92
|
+
readonly nodeTypeId: pulumi.Output<string>;
|
|
93
|
+
readonly numWorkers: pulumi.Output<number | undefined>;
|
|
94
|
+
/**
|
|
95
|
+
* Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policyId` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `sparkConf`.
|
|
96
|
+
*/
|
|
97
|
+
readonly policyId: pulumi.Output<string | undefined>;
|
|
98
|
+
/**
|
|
99
|
+
* The optional user name of the user to assign to an interactive cluster. This field is required when using standard AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
|
|
100
|
+
*/
|
|
101
|
+
readonly singleUserName: pulumi.Output<string | undefined>;
|
|
102
|
+
/**
|
|
103
|
+
* Map with key-value pairs to fine-tune Spark clusters, where you can provide custom [Spark configuration properties](https://spark.apache.org/docs/latest/configuration.html) in a cluster configuration.
|
|
104
|
+
*/
|
|
105
|
+
readonly sparkConf: pulumi.Output<{
|
|
106
|
+
[key: string]: any;
|
|
107
|
+
} | undefined>;
|
|
108
|
+
/**
|
|
109
|
+
* Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
|
|
110
|
+
*/
|
|
111
|
+
readonly sparkEnvVars: pulumi.Output<{
|
|
112
|
+
[key: string]: any;
|
|
113
|
+
} | undefined>;
|
|
114
|
+
/**
|
|
115
|
+
* [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
|
|
116
|
+
*/
|
|
117
|
+
readonly sparkVersion: pulumi.Output<string>;
|
|
118
|
+
/**
|
|
119
|
+
* SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
|
|
120
|
+
*/
|
|
121
|
+
readonly sshPublicKeys: pulumi.Output<string[] | undefined>;
|
|
122
|
+
/**
|
|
123
|
+
* (string) State of the cluster.
|
|
124
|
+
*/
|
|
125
|
+
readonly state: pulumi.Output<string>;
|
|
126
|
+
readonly url: pulumi.Output<string>;
|
|
127
|
+
/**
|
|
128
|
+
* Create a Cluster resource with the given unique name, arguments, and options.
|
|
129
|
+
*
|
|
130
|
+
* @param name The _unique_ name of the resource.
|
|
131
|
+
* @param args The arguments to use to populate this resource's properties.
|
|
132
|
+
* @param opts A bag of options that control this resource's behavior.
|
|
133
|
+
*/
|
|
134
|
+
constructor(name: string, args: ClusterArgs, opts?: pulumi.CustomResourceOptions);
|
|
135
|
+
}
|
|
136
|
+
/**
|
|
137
|
+
* Input properties used for looking up and filtering Cluster resources.
|
|
138
|
+
*/
|
|
139
|
+
export interface ClusterState {
|
|
140
|
+
autoscale?: pulumi.Input<inputs.ClusterAutoscale>;
|
|
141
|
+
/**
|
|
142
|
+
* Automatically terminate the cluster after being inactive for this time in minutes. If not set, Databricks won't automatically terminate an inactive cluster. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. _We highly recommend having this setting present for Interactive/BI clusters._
|
|
143
|
+
*/
|
|
144
|
+
autoterminationMinutes?: pulumi.Input<number>;
|
|
145
|
+
awsAttributes?: pulumi.Input<inputs.ClusterAwsAttributes>;
|
|
146
|
+
azureAttributes?: pulumi.Input<inputs.ClusterAzureAttributes>;
|
|
147
|
+
clusterId?: pulumi.Input<string>;
|
|
148
|
+
clusterLogConf?: pulumi.Input<inputs.ClusterClusterLogConf>;
|
|
149
|
+
/**
|
|
150
|
+
* Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
|
|
151
|
+
*/
|
|
152
|
+
clusterName?: pulumi.Input<string>;
|
|
153
|
+
/**
|
|
154
|
+
* Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS EC2 instances and EBS volumes) with these tags in addition to `defaultTags`.
|
|
155
|
+
*/
|
|
156
|
+
customTags?: pulumi.Input<{
|
|
157
|
+
[key: string]: any;
|
|
158
|
+
}>;
|
|
159
|
+
/**
|
|
160
|
+
* Select the security features of the cluster. Unity Catalog requires `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. Default to `NONE`, i.e. no security feature enabled.
|
|
161
|
+
*/
|
|
162
|
+
dataSecurityMode?: pulumi.Input<string>;
|
|
163
|
+
/**
|
|
164
|
+
* (map) Tags that are added by Databricks by default, regardless of any customTags that may have been added. These include: Vendor: Databricks, Creator: <username_of_creator>, ClusterName: <name_of_cluster>, ClusterId: <id_of_cluster>, Name: <Databricks internal use>
|
|
165
|
+
*/
|
|
166
|
+
defaultTags?: pulumi.Input<{
|
|
167
|
+
[key: string]: any;
|
|
168
|
+
}>;
|
|
169
|
+
dockerImage?: pulumi.Input<inputs.ClusterDockerImage>;
|
|
170
|
+
/**
|
|
171
|
+
* similar to `instancePoolId`, but for driver node. If omitted, and `instancePoolId` is specified, then driver will be allocated from that pool.
|
|
172
|
+
*/
|
|
173
|
+
driverInstancePoolId?: pulumi.Input<string>;
|
|
174
|
+
/**
|
|
175
|
+
* The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `nodeTypeId` defined above.
|
|
176
|
+
*/
|
|
177
|
+
driverNodeTypeId?: pulumi.Input<string>;
|
|
178
|
+
/**
|
|
179
|
+
* If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have `autoterminationMinutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
|
|
180
|
+
*/
|
|
181
|
+
enableElasticDisk?: pulumi.Input<boolean>;
|
|
182
|
+
/**
|
|
183
|
+
* Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and encrypting all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. _Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access._
|
|
184
|
+
*/
|
|
185
|
+
enableLocalDiskEncryption?: pulumi.Input<boolean>;
|
|
186
|
+
gcpAttributes?: pulumi.Input<inputs.ClusterGcpAttributes>;
|
|
187
|
+
/**
|
|
188
|
+
* An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
|
|
189
|
+
*/
|
|
190
|
+
idempotencyToken?: pulumi.Input<string>;
|
|
191
|
+
initScripts?: pulumi.Input<pulumi.Input<inputs.ClusterInitScript>[]>;
|
|
192
|
+
/**
|
|
193
|
+
* - To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
|
|
194
|
+
*/
|
|
195
|
+
instancePoolId?: pulumi.Input<string>;
|
|
196
|
+
/**
|
|
197
|
+
* boolean value specifying if cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 70](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that.
|
|
198
|
+
*/
|
|
199
|
+
isPinned?: pulumi.Input<boolean>;
|
|
200
|
+
libraries?: pulumi.Input<pulumi.Input<inputs.ClusterLibrary>[]>;
|
|
201
|
+
/**
|
|
202
|
+
* Any supported databricks.getNodeType id. If `instancePoolId` is specified, this field is not needed.
|
|
203
|
+
*/
|
|
204
|
+
nodeTypeId?: pulumi.Input<string>;
|
|
205
|
+
numWorkers?: pulumi.Input<number>;
|
|
206
|
+
/**
|
|
207
|
+
* Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policyId` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `sparkConf`.
|
|
208
|
+
*/
|
|
209
|
+
policyId?: pulumi.Input<string>;
|
|
210
|
+
/**
|
|
211
|
+
* The optional user name of the user to assign to an interactive cluster. This field is required when using standard AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
|
|
212
|
+
*/
|
|
213
|
+
singleUserName?: pulumi.Input<string>;
|
|
214
|
+
/**
|
|
215
|
+
* Map with key-value pairs to fine-tune Spark clusters, where you can provide custom [Spark configuration properties](https://spark.apache.org/docs/latest/configuration.html) in a cluster configuration.
|
|
216
|
+
*/
|
|
217
|
+
sparkConf?: pulumi.Input<{
|
|
218
|
+
[key: string]: any;
|
|
219
|
+
}>;
|
|
220
|
+
/**
|
|
221
|
+
* Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
|
|
222
|
+
*/
|
|
223
|
+
sparkEnvVars?: pulumi.Input<{
|
|
224
|
+
[key: string]: any;
|
|
225
|
+
}>;
|
|
226
|
+
/**
|
|
227
|
+
* [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
|
|
228
|
+
*/
|
|
229
|
+
sparkVersion?: pulumi.Input<string>;
|
|
230
|
+
/**
|
|
231
|
+
* SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
|
|
232
|
+
*/
|
|
233
|
+
sshPublicKeys?: pulumi.Input<pulumi.Input<string>[]>;
|
|
234
|
+
/**
|
|
235
|
+
* (string) State of the cluster.
|
|
236
|
+
*/
|
|
237
|
+
state?: pulumi.Input<string>;
|
|
238
|
+
url?: pulumi.Input<string>;
|
|
239
|
+
}
|
|
240
|
+
/**
|
|
241
|
+
* The set of arguments for constructing a Cluster resource.
|
|
242
|
+
*/
|
|
243
|
+
export interface ClusterArgs {
|
|
244
|
+
autoscale?: pulumi.Input<inputs.ClusterAutoscale>;
|
|
245
|
+
/**
|
|
246
|
+
* Automatically terminate the cluster after being inactive for this time in minutes. If not set, Databricks won't automatically terminate an inactive cluster. If specified, the threshold must be between 10 and 10000 minutes. You can also set this value to 0 to explicitly disable automatic termination. _We highly recommend having this setting present for Interactive/BI clusters._
|
|
247
|
+
*/
|
|
248
|
+
autoterminationMinutes?: pulumi.Input<number>;
|
|
249
|
+
awsAttributes?: pulumi.Input<inputs.ClusterAwsAttributes>;
|
|
250
|
+
azureAttributes?: pulumi.Input<inputs.ClusterAzureAttributes>;
|
|
251
|
+
clusterId?: pulumi.Input<string>;
|
|
252
|
+
clusterLogConf?: pulumi.Input<inputs.ClusterClusterLogConf>;
|
|
253
|
+
/**
|
|
254
|
+
* Cluster name, which doesn’t have to be unique. If not specified at creation, the cluster name will be an empty string.
|
|
255
|
+
*/
|
|
256
|
+
clusterName?: pulumi.Input<string>;
|
|
257
|
+
/**
|
|
258
|
+
* Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS EC2 instances and EBS volumes) with these tags in addition to `defaultTags`.
|
|
259
|
+
*/
|
|
260
|
+
customTags?: pulumi.Input<{
|
|
261
|
+
[key: string]: any;
|
|
262
|
+
}>;
|
|
263
|
+
/**
|
|
264
|
+
* Select the security features of the cluster. Unity Catalog requires `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. Default to `NONE`, i.e. no security feature enabled.
|
|
265
|
+
*/
|
|
266
|
+
dataSecurityMode?: pulumi.Input<string>;
|
|
267
|
+
dockerImage?: pulumi.Input<inputs.ClusterDockerImage>;
|
|
268
|
+
/**
|
|
269
|
+
* similar to `instancePoolId`, but for driver node. If omitted, and `instancePoolId` is specified, then driver will be allocated from that pool.
|
|
270
|
+
*/
|
|
271
|
+
driverInstancePoolId?: pulumi.Input<string>;
|
|
272
|
+
/**
|
|
273
|
+
* The node type of the Spark driver. This field is optional; if unset, API will set the driver node type to the same value as `nodeTypeId` defined above.
|
|
274
|
+
*/
|
|
275
|
+
driverNodeTypeId?: pulumi.Input<string>;
|
|
276
|
+
/**
|
|
277
|
+
* If you don’t want to allocate a fixed number of EBS volumes at cluster creation time, use autoscaling local storage. With autoscaling local storage, Databricks monitors the amount of free disk space available on your cluster’s Spark workers. If a worker begins to run too low on disk, Databricks automatically attaches a new EBS volume to the worker before it runs out of disk space. EBS volumes are attached up to a limit of 5 TB of total disk space per instance (including the instance’s local storage). To scale down EBS usage, make sure you have `autoterminationMinutes` and `autoscale` attributes set. More documentation available at [cluster configuration page](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage-1).
|
|
278
|
+
*/
|
|
279
|
+
enableElasticDisk?: pulumi.Input<boolean>;
|
|
280
|
+
/**
|
|
281
|
+
* Some instance types you use to run clusters may have locally attached disks. Databricks may store shuffle data or temporary data on these locally attached disks. To ensure that all data at rest is encrypted for all storage types, including shuffle data stored temporarily on your cluster’s local disks, you can enable local disk encryption. When local disk encryption is enabled, Databricks generates an encryption key locally unique to each cluster node and encrypting all data stored on local disks. The scope of the key is local to each cluster node and is destroyed along with the cluster node itself. During its lifetime, the key resides in memory for encryption and decryption and is stored encrypted on the disk. _Your workloads may run more slowly because of the performance impact of reading and writing encrypted data to and from local volumes. This feature is not available for all Azure Databricks subscriptions. Contact your Microsoft or Databricks account representative to request access._
|
|
282
|
+
*/
|
|
283
|
+
enableLocalDiskEncryption?: pulumi.Input<boolean>;
|
|
284
|
+
gcpAttributes?: pulumi.Input<inputs.ClusterGcpAttributes>;
|
|
285
|
+
/**
|
|
286
|
+
* An optional token to guarantee the idempotency of cluster creation requests. If an active cluster with the provided token already exists, the request will not create a new cluster, but it will return the existing running cluster's ID instead. If you specify the idempotency token, upon failure, you can retry until the request succeeds. Databricks platform guarantees to launch exactly one cluster with that idempotency token. This token should have at most 64 characters.
|
|
287
|
+
*/
|
|
288
|
+
idempotencyToken?: pulumi.Input<string>;
|
|
289
|
+
initScripts?: pulumi.Input<pulumi.Input<inputs.ClusterInitScript>[]>;
|
|
290
|
+
/**
|
|
291
|
+
* - To reduce cluster start time, you can attach a cluster to a predefined pool of idle instances. When attached to a pool, a cluster allocates its driver and worker nodes from the pool. If the pool does not have sufficient idle resources to accommodate the cluster’s request, it expands by allocating new instances from the instance provider. When an attached cluster changes its state to `TERMINATED`, the instances it used are returned to the pool and reused by a different cluster.
|
|
292
|
+
*/
|
|
293
|
+
instancePoolId?: pulumi.Input<string>;
|
|
294
|
+
/**
|
|
295
|
+
* boolean value specifying if cluster is pinned (not pinned by default). You must be a Databricks administrator to use this. The pinned clusters' maximum number is [limited to 70](https://docs.databricks.com/clusters/clusters-manage.html#pin-a-cluster), so `apply` may fail if you have more than that.
|
|
296
|
+
*/
|
|
297
|
+
isPinned?: pulumi.Input<boolean>;
|
|
298
|
+
libraries?: pulumi.Input<pulumi.Input<inputs.ClusterLibrary>[]>;
|
|
299
|
+
/**
|
|
300
|
+
* Any supported databricks.getNodeType id. If `instancePoolId` is specified, this field is not needed.
|
|
301
|
+
*/
|
|
302
|
+
nodeTypeId?: pulumi.Input<string>;
|
|
303
|
+
numWorkers?: pulumi.Input<number>;
|
|
304
|
+
/**
|
|
305
|
+
* Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policyId` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `sparkConf`.
|
|
306
|
+
*/
|
|
307
|
+
policyId?: pulumi.Input<string>;
|
|
308
|
+
/**
|
|
309
|
+
* The optional user name of the user to assign to an interactive cluster. This field is required when using standard AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
|
|
310
|
+
*/
|
|
311
|
+
singleUserName?: pulumi.Input<string>;
|
|
312
|
+
/**
|
|
313
|
+
* Map with key-value pairs to fine-tune Spark clusters, where you can provide custom [Spark configuration properties](https://spark.apache.org/docs/latest/configuration.html) in a cluster configuration.
|
|
314
|
+
*/
|
|
315
|
+
sparkConf?: pulumi.Input<{
|
|
316
|
+
[key: string]: any;
|
|
317
|
+
}>;
|
|
318
|
+
/**
|
|
319
|
+
* Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
|
|
320
|
+
*/
|
|
321
|
+
sparkEnvVars?: pulumi.Input<{
|
|
322
|
+
[key: string]: any;
|
|
323
|
+
}>;
|
|
324
|
+
/**
|
|
325
|
+
* [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster. Any supported databricks.getSparkVersion id. We advise using Cluster Policies to restrict the list of versions for simplicity while maintaining enough control.
|
|
326
|
+
*/
|
|
327
|
+
sparkVersion: pulumi.Input<string>;
|
|
328
|
+
/**
|
|
329
|
+
* SSH public key contents that will be added to each Spark node in this cluster. The corresponding private keys can be used to login with the user name ubuntu on port 2200. You can specify up to 10 keys.
|
|
330
|
+
*/
|
|
331
|
+
sshPublicKeys?: pulumi.Input<pulumi.Input<string>[]>;
|
|
332
|
+
}
|
package/cluster.js
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
|
|
3
|
+
// *** Do not edit by hand unless you're certain you know what you are doing! ***
|
|
4
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
5
|
+
exports.Cluster = void 0;
|
|
6
|
+
const pulumi = require("@pulumi/pulumi");
|
|
7
|
+
const utilities = require("./utilities");
|
|
8
|
+
/**
|
|
9
|
+
* ## Import
|
|
10
|
+
*
|
|
11
|
+
* The resource cluster can be imported using cluster id. bash
|
|
12
|
+
*
|
|
13
|
+
* ```sh
|
|
14
|
+
* $ pulumi import databricks:index/cluster:Cluster this <cluster-id>
|
|
15
|
+
* ```
|
|
16
|
+
*/
|
|
17
|
+
class Cluster extends pulumi.CustomResource {
|
|
18
|
+
constructor(name, argsOrState, opts) {
|
|
19
|
+
let resourceInputs = {};
|
|
20
|
+
opts = opts || {};
|
|
21
|
+
if (opts.id) {
|
|
22
|
+
const state = argsOrState;
|
|
23
|
+
resourceInputs["autoscale"] = state ? state.autoscale : undefined;
|
|
24
|
+
resourceInputs["autoterminationMinutes"] = state ? state.autoterminationMinutes : undefined;
|
|
25
|
+
resourceInputs["awsAttributes"] = state ? state.awsAttributes : undefined;
|
|
26
|
+
resourceInputs["azureAttributes"] = state ? state.azureAttributes : undefined;
|
|
27
|
+
resourceInputs["clusterId"] = state ? state.clusterId : undefined;
|
|
28
|
+
resourceInputs["clusterLogConf"] = state ? state.clusterLogConf : undefined;
|
|
29
|
+
resourceInputs["clusterName"] = state ? state.clusterName : undefined;
|
|
30
|
+
resourceInputs["customTags"] = state ? state.customTags : undefined;
|
|
31
|
+
resourceInputs["dataSecurityMode"] = state ? state.dataSecurityMode : undefined;
|
|
32
|
+
resourceInputs["defaultTags"] = state ? state.defaultTags : undefined;
|
|
33
|
+
resourceInputs["dockerImage"] = state ? state.dockerImage : undefined;
|
|
34
|
+
resourceInputs["driverInstancePoolId"] = state ? state.driverInstancePoolId : undefined;
|
|
35
|
+
resourceInputs["driverNodeTypeId"] = state ? state.driverNodeTypeId : undefined;
|
|
36
|
+
resourceInputs["enableElasticDisk"] = state ? state.enableElasticDisk : undefined;
|
|
37
|
+
resourceInputs["enableLocalDiskEncryption"] = state ? state.enableLocalDiskEncryption : undefined;
|
|
38
|
+
resourceInputs["gcpAttributes"] = state ? state.gcpAttributes : undefined;
|
|
39
|
+
resourceInputs["idempotencyToken"] = state ? state.idempotencyToken : undefined;
|
|
40
|
+
resourceInputs["initScripts"] = state ? state.initScripts : undefined;
|
|
41
|
+
resourceInputs["instancePoolId"] = state ? state.instancePoolId : undefined;
|
|
42
|
+
resourceInputs["isPinned"] = state ? state.isPinned : undefined;
|
|
43
|
+
resourceInputs["libraries"] = state ? state.libraries : undefined;
|
|
44
|
+
resourceInputs["nodeTypeId"] = state ? state.nodeTypeId : undefined;
|
|
45
|
+
resourceInputs["numWorkers"] = state ? state.numWorkers : undefined;
|
|
46
|
+
resourceInputs["policyId"] = state ? state.policyId : undefined;
|
|
47
|
+
resourceInputs["singleUserName"] = state ? state.singleUserName : undefined;
|
|
48
|
+
resourceInputs["sparkConf"] = state ? state.sparkConf : undefined;
|
|
49
|
+
resourceInputs["sparkEnvVars"] = state ? state.sparkEnvVars : undefined;
|
|
50
|
+
resourceInputs["sparkVersion"] = state ? state.sparkVersion : undefined;
|
|
51
|
+
resourceInputs["sshPublicKeys"] = state ? state.sshPublicKeys : undefined;
|
|
52
|
+
resourceInputs["state"] = state ? state.state : undefined;
|
|
53
|
+
resourceInputs["url"] = state ? state.url : undefined;
|
|
54
|
+
}
|
|
55
|
+
else {
|
|
56
|
+
const args = argsOrState;
|
|
57
|
+
if ((!args || args.sparkVersion === undefined) && !opts.urn) {
|
|
58
|
+
throw new Error("Missing required property 'sparkVersion'");
|
|
59
|
+
}
|
|
60
|
+
resourceInputs["autoscale"] = args ? args.autoscale : undefined;
|
|
61
|
+
resourceInputs["autoterminationMinutes"] = args ? args.autoterminationMinutes : undefined;
|
|
62
|
+
resourceInputs["awsAttributes"] = args ? args.awsAttributes : undefined;
|
|
63
|
+
resourceInputs["azureAttributes"] = args ? args.azureAttributes : undefined;
|
|
64
|
+
resourceInputs["clusterId"] = args ? args.clusterId : undefined;
|
|
65
|
+
resourceInputs["clusterLogConf"] = args ? args.clusterLogConf : undefined;
|
|
66
|
+
resourceInputs["clusterName"] = args ? args.clusterName : undefined;
|
|
67
|
+
resourceInputs["customTags"] = args ? args.customTags : undefined;
|
|
68
|
+
resourceInputs["dataSecurityMode"] = args ? args.dataSecurityMode : undefined;
|
|
69
|
+
resourceInputs["dockerImage"] = args ? args.dockerImage : undefined;
|
|
70
|
+
resourceInputs["driverInstancePoolId"] = args ? args.driverInstancePoolId : undefined;
|
|
71
|
+
resourceInputs["driverNodeTypeId"] = args ? args.driverNodeTypeId : undefined;
|
|
72
|
+
resourceInputs["enableElasticDisk"] = args ? args.enableElasticDisk : undefined;
|
|
73
|
+
resourceInputs["enableLocalDiskEncryption"] = args ? args.enableLocalDiskEncryption : undefined;
|
|
74
|
+
resourceInputs["gcpAttributes"] = args ? args.gcpAttributes : undefined;
|
|
75
|
+
resourceInputs["idempotencyToken"] = args ? args.idempotencyToken : undefined;
|
|
76
|
+
resourceInputs["initScripts"] = args ? args.initScripts : undefined;
|
|
77
|
+
resourceInputs["instancePoolId"] = args ? args.instancePoolId : undefined;
|
|
78
|
+
resourceInputs["isPinned"] = args ? args.isPinned : undefined;
|
|
79
|
+
resourceInputs["libraries"] = args ? args.libraries : undefined;
|
|
80
|
+
resourceInputs["nodeTypeId"] = args ? args.nodeTypeId : undefined;
|
|
81
|
+
resourceInputs["numWorkers"] = args ? args.numWorkers : undefined;
|
|
82
|
+
resourceInputs["policyId"] = args ? args.policyId : undefined;
|
|
83
|
+
resourceInputs["singleUserName"] = args ? args.singleUserName : undefined;
|
|
84
|
+
resourceInputs["sparkConf"] = args ? args.sparkConf : undefined;
|
|
85
|
+
resourceInputs["sparkEnvVars"] = args ? args.sparkEnvVars : undefined;
|
|
86
|
+
resourceInputs["sparkVersion"] = args ? args.sparkVersion : undefined;
|
|
87
|
+
resourceInputs["sshPublicKeys"] = args ? args.sshPublicKeys : undefined;
|
|
88
|
+
resourceInputs["defaultTags"] = undefined /*out*/;
|
|
89
|
+
resourceInputs["state"] = undefined /*out*/;
|
|
90
|
+
resourceInputs["url"] = undefined /*out*/;
|
|
91
|
+
}
|
|
92
|
+
opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);
|
|
93
|
+
super(Cluster.__pulumiType, name, resourceInputs, opts);
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Get an existing Cluster resource's state with the given name, ID, and optional extra
|
|
97
|
+
* properties used to qualify the lookup.
|
|
98
|
+
*
|
|
99
|
+
* @param name The _unique_ name of the resulting resource.
|
|
100
|
+
* @param id The _unique_ provider ID of the resource to lookup.
|
|
101
|
+
* @param state Any extra arguments used during the lookup.
|
|
102
|
+
* @param opts Optional settings to control the behavior of the CustomResource.
|
|
103
|
+
*/
|
|
104
|
+
static get(name, id, state, opts) {
|
|
105
|
+
return new Cluster(name, state, Object.assign(Object.assign({}, opts), { id: id }));
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Returns true if the given object is an instance of Cluster. This is designed to work even
|
|
109
|
+
* when multiple copies of the Pulumi SDK have been loaded into the same process.
|
|
110
|
+
*/
|
|
111
|
+
static isInstance(obj) {
|
|
112
|
+
if (obj === undefined || obj === null) {
|
|
113
|
+
return false;
|
|
114
|
+
}
|
|
115
|
+
return obj['__pulumiType'] === Cluster.__pulumiType;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
exports.Cluster = Cluster;
|
|
119
|
+
/** @internal */
|
|
120
|
+
Cluster.__pulumiType = 'databricks:index/cluster:Cluster';
|
|
121
|
+
//# sourceMappingURL=cluster.js.map
|
package/cluster.js.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cluster.js","sourceRoot":"","sources":["../cluster.ts"],"names":[],"mappings":";AAAA,wFAAwF;AACxF,iFAAiF;;;AAEjF,yCAAyC;AAEzC,yCAAyC;AAEzC;;;;;;;;GAQG;AACH,MAAa,OAAQ,SAAQ,MAAM,CAAC,cAAc;IAgI9C,YAAY,IAAY,EAAE,WAAwC,EAAE,IAAmC;QACnG,IAAI,cAAc,GAAkB,EAAE,CAAC;QACvC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QAClB,IAAI,IAAI,CAAC,EAAE,EAAE;YACT,MAAM,KAAK,GAAG,WAAuC,CAAC;YACtD,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,wBAAwB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,sBAAsB,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5F,cAAc,CAAC,eAAe,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,aAAa,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1E,cAAc,CAAC,iBAAiB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,eAAe,CAAC,CAAC,CAAC,SAAS,CAAC;YAC9E,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,gBAAgB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5E,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;YACtE,cAAc,CAAC,YAAY,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YACpE,cAAc,CAAC,kBAAkB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,SAAS,CAAC;YAChF,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;YACtE,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;YACtE,cAAc,CAAC,sBAAsB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,oBAAoB,CAAC,CAAC,CAAC,SAAS,CAAC;YACxF,cAAc,CAAC,kBAAkB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,SAAS,CAAC;YAChF,cAAc,CAAC,mBAAmB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,iBAAiB,CAAC,CAAC,CAAC,SAAS,CAAC;YAClF,cAAc,CAAC,2BAA2B,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,yBAAyB,CAAC,CAAC,CAAC,SAAS,CAAC;YAClG,cAAc,CAAC,eAAe,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,aAAa,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1E,cAAc,CAAC,kBAAkB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,SAAS,CAAC;YAChF,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;YACtE,cAAc,CAAC,gBAAgB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5E,cAAc,CAAC,UAAU,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS,CAAC;YAChE,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,YAAY,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YACpE,cAAc,CAAC,YAAY,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YACpE,cAAc,CAAC,UAAU,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS,CAAC;YAChE,cAAc,CAAC,gBAAgB,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5E,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;YACxE,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;YACxE,cAAc,CAAC,eAAe,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,aAAa,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1E,cAAc,CAAC,OAAO,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1D,cAAc,CAAC,KAAK,CAAC,GAAG,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,SAAS,CAAC;SACzD;aAAM;YACH,MAAM,IAAI,GAAG,WAAsC,CAAC;YACpD,IAAI,CAAC,CAAC,IAAI,IAAI,IAAI,CAAC,YAAY,KAAK,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE;gBACzD,MAAM,IAAI,KAAK,CAAC,0CAA0C,CAAC,CAAC;aAC/D;YACD,cAAc,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAChE,cAAc,CAAC,wBAAwB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,sBAAsB,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1F,cAAc,CAAC,eAAe,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,SAAS,CAAC;YACxE,cAAc,CAAC,iBAAiB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC,CAAC,SAAS,CAAC;YAC5E,cAAc,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAChE,cAAc,CAAC,gBAAgB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1E,cAAc,CAAC,aAAa,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;YACpE,cAAc,CAAC,YAAY,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,kBAAkB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC,CAAC,SAAS,CAAC;YAC9E,cAAc,CAAC,aAAa,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;YACpE,cAAc,CAAC,sBAAsB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,oBAAoB,CAAC,CAAC,CAAC,SAAS,CAAC;YACtF,cAAc,CAAC,kBAAkB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC,CAAC,SAAS,CAAC;YAC9E,cAAc,CAAC,mBAAmB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,iBAAiB,CAAC,CAAC,CAAC,SAAS,CAAC;YAChF,cAAc,CAAC,2BAA2B,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,yBAAyB,CAAC,CAAC,CAAC,SAAS,CAAC;YAChG,cAAc,CAAC,eAAe,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,SAAS,CAAC;YACxE,cAAc,CAAC,kBAAkB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC,CAAC,SAAS,CAAC;YAC9E,cAAc,CAAC,aAAa,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,SAAS,CAAC;YACpE,cAAc,CAAC,gBAAgB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1E,cAAc,CAAC,UAAU,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS,CAAC;YAC9D,cAAc,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAChE,cAAc,CAAC,YAAY,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,YAAY,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC,CAAC,SAAS,CAAC;YAClE,cAAc,CAAC,UAAU,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS,CAAC;YAC9D,cAAc,CAAC,gBAAgB,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC,CAAC,SAAS,CAAC;YAC1E,cAAc,CAAC,WAAW,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;YAChE,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;YACtE,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,SAAS,CAAC;YACtE,cAAc,CAAC,eAAe,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,SAAS,CAAC;YACxE,cAAc,CAAC,aAAa,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;YAClD,cAAc,CAAC,OAAO,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;YAC5C,cAAc,CAAC,KAAK,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;SAC7C;QACD,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,SAAS,CAAC,oBAAoB,EAAE,EAAE,IAAI,CAAC,CAAC;QACnE,KAAK,CAAC,OAAO,CAAC,YAAY,EAAE,IAAI,EAAE,cAAc,EAAE,IAAI,CAAC,CAAC;IAC5D,CAAC;IA1MD;;;;;;;;OAQG;IACI,MAAM,CAAC,GAAG,CAAC,IAAY,EAAE,EAA2B,EAAE,KAAoB,EAAE,IAAmC;QAClH,OAAO,IAAI,OAAO,CAAC,IAAI,EAAO,KAAK,kCAAO,IAAI,KAAE,EAAE,EAAE,EAAE,IAAG,CAAC;IAC9D,CAAC;IAKD;;;OAGG;IACI,MAAM,CAAC,UAAU,CAAC,GAAQ;QAC7B,IAAI,GAAG,KAAK,SAAS,IAAI,GAAG,KAAK,IAAI,EAAE;YACnC,OAAO,KAAK,CAAC;SAChB;QACD,OAAO,GAAG,CAAC,cAAc,CAAC,KAAK,OAAO,CAAC,YAAY,CAAC;IACxD,CAAC;;AA1BL,0BA4MC;AA9LG,gBAAgB;AACO,oBAAY,GAAG,kCAAkC,CAAC"}
|