@pulumi/databricks 1.47.0-alpha.1721236148 → 1.47.0-alpha.1721667289

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/types/output.d.ts CHANGED
@@ -584,7 +584,7 @@ export interface GetClusterClusterInfo {
584
584
  * The exact name of the cluster to search
585
585
  */
586
586
  clusterName?: string;
587
- clusterSource: string;
587
+ clusterSource?: string;
588
588
  creatorUserName?: string;
589
589
  /**
590
590
  * Additional tags for cluster resources.
@@ -596,7 +596,7 @@ export interface GetClusterClusterInfo {
596
596
  * Security features of the cluster. Unity Catalog requires `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. Default to `NONE`, i.e. no security feature enabled.
597
597
  */
598
598
  dataSecurityMode?: string;
599
- defaultTags: {
599
+ defaultTags?: {
600
600
  [key: string]: any;
601
601
  };
602
602
  dockerImage?: outputs.GetClusterClusterInfoDockerImage;
@@ -604,7 +604,7 @@ export interface GetClusterClusterInfo {
604
604
  /**
605
605
  * similar to `instancePoolId`, but for driver node.
606
606
  */
607
- driverInstancePoolId: string;
607
+ driverInstancePoolId?: string;
608
608
  /**
609
609
  * The node type of the Spark driver.
610
610
  */
@@ -625,7 +625,7 @@ export interface GetClusterClusterInfo {
625
625
  */
626
626
  instancePoolId?: string;
627
627
  jdbcPort?: number;
628
- lastActivityTime?: number;
628
+ lastRestartedTime?: number;
629
629
  lastStateLossTime?: number;
630
630
  /**
631
631
  * Any supported databricks.getNodeType id.
@@ -660,16 +660,18 @@ export interface GetClusterClusterInfo {
660
660
  /**
661
661
  * [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster.
662
662
  */
663
- sparkVersion: string;
663
+ sparkVersion?: string;
664
+ spec?: outputs.GetClusterClusterInfoSpec;
664
665
  /**
665
666
  * SSH public key contents that will be added to each Spark node in this cluster.
666
667
  */
667
668
  sshPublicKeys?: string[];
668
669
  startTime?: number;
669
- state: string;
670
+ state?: string;
670
671
  stateMessage?: string;
671
- terminateTime?: number;
672
+ terminatedTime?: number;
672
673
  terminationReason?: outputs.GetClusterClusterInfoTerminationReason;
674
+ workloadType?: outputs.GetClusterClusterInfoWorkloadType;
673
675
  }
674
676
  export interface GetClusterClusterInfoAutoscale {
675
677
  maxWorkers?: number;
@@ -678,7 +680,9 @@ export interface GetClusterClusterInfoAutoscale {
678
680
  export interface GetClusterClusterInfoAwsAttributes {
679
681
  availability?: string;
680
682
  ebsVolumeCount?: number;
683
+ ebsVolumeIops?: number;
681
684
  ebsVolumeSize?: number;
685
+ ebsVolumeThroughput?: number;
682
686
  ebsVolumeType?: string;
683
687
  firstOnDemand?: number;
684
688
  instanceProfileArn?: string;
@@ -688,8 +692,13 @@ export interface GetClusterClusterInfoAwsAttributes {
688
692
  export interface GetClusterClusterInfoAzureAttributes {
689
693
  availability?: string;
690
694
  firstOnDemand?: number;
695
+ logAnalyticsInfo?: outputs.GetClusterClusterInfoAzureAttributesLogAnalyticsInfo;
691
696
  spotBidMaxPrice?: number;
692
697
  }
698
+ export interface GetClusterClusterInfoAzureAttributesLogAnalyticsInfo {
699
+ logAnalyticsPrimaryKey?: string;
700
+ logAnalyticsWorkspaceId?: string;
701
+ }
693
702
  export interface GetClusterClusterInfoClusterLogConf {
694
703
  dbfs?: outputs.GetClusterClusterInfoClusterLogConfDbfs;
695
704
  s3?: outputs.GetClusterClusterInfoClusterLogConfS3;
@@ -712,11 +721,11 @@ export interface GetClusterClusterInfoClusterLogStatus {
712
721
  }
713
722
  export interface GetClusterClusterInfoDockerImage {
714
723
  basicAuth?: outputs.GetClusterClusterInfoDockerImageBasicAuth;
715
- url: string;
724
+ url?: string;
716
725
  }
717
726
  export interface GetClusterClusterInfoDockerImageBasicAuth {
718
- password: string;
719
- username: string;
727
+ password?: string;
728
+ username?: string;
720
729
  }
721
730
  export interface GetClusterClusterInfoDriver {
722
731
  hostPrivateIp?: string;
@@ -786,6 +795,233 @@ export interface GetClusterClusterInfoInitScriptVolumes {
786
795
  export interface GetClusterClusterInfoInitScriptWorkspace {
787
796
  destination: string;
788
797
  }
798
+ export interface GetClusterClusterInfoSpec {
799
+ applyPolicyDefaultValues?: boolean;
800
+ autoscale?: outputs.GetClusterClusterInfoSpecAutoscale;
801
+ awsAttributes?: outputs.GetClusterClusterInfoSpecAwsAttributes;
802
+ azureAttributes?: outputs.GetClusterClusterInfoSpecAzureAttributes;
803
+ /**
804
+ * The id of the cluster
805
+ */
806
+ clusterId: string;
807
+ clusterLogConf?: outputs.GetClusterClusterInfoSpecClusterLogConf;
808
+ clusterMountInfos?: outputs.GetClusterClusterInfoSpecClusterMountInfo[];
809
+ /**
810
+ * The exact name of the cluster to search
811
+ */
812
+ clusterName?: string;
813
+ /**
814
+ * Additional tags for cluster resources.
815
+ */
816
+ customTags?: {
817
+ [key: string]: any;
818
+ };
819
+ /**
820
+ * Security features of the cluster. Unity Catalog requires `SINGLE_USER` or `USER_ISOLATION` mode. `LEGACY_PASSTHROUGH` for passthrough cluster and `LEGACY_TABLE_ACL` for Table ACL cluster. Default to `NONE`, i.e. no security feature enabled.
821
+ */
822
+ dataSecurityMode?: string;
823
+ dockerImage?: outputs.GetClusterClusterInfoSpecDockerImage;
824
+ /**
825
+ * similar to `instancePoolId`, but for driver node.
826
+ */
827
+ driverInstancePoolId: string;
828
+ /**
829
+ * The node type of the Spark driver.
830
+ */
831
+ driverNodeTypeId: string;
832
+ /**
833
+ * Use autoscaling local storage.
834
+ */
835
+ enableElasticDisk: boolean;
836
+ /**
837
+ * Enable local disk encryption.
838
+ */
839
+ enableLocalDiskEncryption: boolean;
840
+ gcpAttributes?: outputs.GetClusterClusterInfoSpecGcpAttributes;
841
+ /**
842
+ * An optional token to guarantee the idempotency of cluster creation requests.
843
+ */
844
+ idempotencyToken?: string;
845
+ initScripts?: outputs.GetClusterClusterInfoSpecInitScript[];
846
+ /**
847
+ * The pool of idle instances the cluster is attached to.
848
+ */
849
+ instancePoolId?: string;
850
+ libraries?: outputs.GetClusterClusterInfoSpecLibrary[];
851
+ /**
852
+ * Any supported databricks.getNodeType id.
853
+ */
854
+ nodeTypeId: string;
855
+ numWorkers?: number;
856
+ /**
857
+ * Identifier of Cluster Policy to validate cluster and preset certain defaults.
858
+ */
859
+ policyId?: string;
860
+ /**
861
+ * The type of runtime of the cluster
862
+ */
863
+ runtimeEngine?: string;
864
+ /**
865
+ * The optional user name of the user to assign to an interactive cluster. This field is required when using standard AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
866
+ */
867
+ singleUserName?: string;
868
+ /**
869
+ * Map with key-value pairs to fine-tune Spark clusters.
870
+ */
871
+ sparkConf?: {
872
+ [key: string]: any;
873
+ };
874
+ /**
875
+ * Map with environment variable key-value pairs to fine-tune Spark clusters. Key-value pairs of the form (X,Y) are exported (i.e., X='Y') while launching the driver and workers.
876
+ */
877
+ sparkEnvVars?: {
878
+ [key: string]: any;
879
+ };
880
+ /**
881
+ * [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster.
882
+ */
883
+ sparkVersion: string;
884
+ /**
885
+ * SSH public key contents that will be added to each Spark node in this cluster.
886
+ */
887
+ sshPublicKeys?: string[];
888
+ workloadType?: outputs.GetClusterClusterInfoSpecWorkloadType;
889
+ }
890
+ export interface GetClusterClusterInfoSpecAutoscale {
891
+ maxWorkers?: number;
892
+ minWorkers?: number;
893
+ }
894
+ export interface GetClusterClusterInfoSpecAwsAttributes {
895
+ availability?: string;
896
+ ebsVolumeCount?: number;
897
+ ebsVolumeIops?: number;
898
+ ebsVolumeSize?: number;
899
+ ebsVolumeThroughput?: number;
900
+ ebsVolumeType?: string;
901
+ firstOnDemand?: number;
902
+ instanceProfileArn?: string;
903
+ spotBidPricePercent?: number;
904
+ zoneId?: string;
905
+ }
906
+ export interface GetClusterClusterInfoSpecAzureAttributes {
907
+ availability?: string;
908
+ firstOnDemand?: number;
909
+ logAnalyticsInfo?: outputs.GetClusterClusterInfoSpecAzureAttributesLogAnalyticsInfo;
910
+ spotBidMaxPrice?: number;
911
+ }
912
+ export interface GetClusterClusterInfoSpecAzureAttributesLogAnalyticsInfo {
913
+ logAnalyticsPrimaryKey?: string;
914
+ logAnalyticsWorkspaceId?: string;
915
+ }
916
+ export interface GetClusterClusterInfoSpecClusterLogConf {
917
+ dbfs?: outputs.GetClusterClusterInfoSpecClusterLogConfDbfs;
918
+ s3?: outputs.GetClusterClusterInfoSpecClusterLogConfS3;
919
+ }
920
+ export interface GetClusterClusterInfoSpecClusterLogConfDbfs {
921
+ destination: string;
922
+ }
923
+ export interface GetClusterClusterInfoSpecClusterLogConfS3 {
924
+ cannedAcl?: string;
925
+ destination: string;
926
+ enableEncryption?: boolean;
927
+ encryptionType?: string;
928
+ endpoint?: string;
929
+ kmsKey?: string;
930
+ region?: string;
931
+ }
932
+ export interface GetClusterClusterInfoSpecClusterMountInfo {
933
+ localMountDirPath: string;
934
+ networkFilesystemInfo: outputs.GetClusterClusterInfoSpecClusterMountInfoNetworkFilesystemInfo;
935
+ remoteMountDirPath?: string;
936
+ }
937
+ export interface GetClusterClusterInfoSpecClusterMountInfoNetworkFilesystemInfo {
938
+ mountOptions?: string;
939
+ serverAddress: string;
940
+ }
941
+ export interface GetClusterClusterInfoSpecDockerImage {
942
+ basicAuth?: outputs.GetClusterClusterInfoSpecDockerImageBasicAuth;
943
+ url: string;
944
+ }
945
+ export interface GetClusterClusterInfoSpecDockerImageBasicAuth {
946
+ password: string;
947
+ username: string;
948
+ }
949
+ export interface GetClusterClusterInfoSpecGcpAttributes {
950
+ availability?: string;
951
+ bootDiskSize?: number;
952
+ googleServiceAccount?: string;
953
+ localSsdCount?: number;
954
+ usePreemptibleExecutors?: boolean;
955
+ zoneId?: string;
956
+ }
957
+ export interface GetClusterClusterInfoSpecInitScript {
958
+ abfss?: outputs.GetClusterClusterInfoSpecInitScriptAbfss;
959
+ /**
960
+ * @deprecated For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'.
961
+ */
962
+ dbfs?: outputs.GetClusterClusterInfoSpecInitScriptDbfs;
963
+ file?: outputs.GetClusterClusterInfoSpecInitScriptFile;
964
+ gcs?: outputs.GetClusterClusterInfoSpecInitScriptGcs;
965
+ s3?: outputs.GetClusterClusterInfoSpecInitScriptS3;
966
+ volumes?: outputs.GetClusterClusterInfoSpecInitScriptVolumes;
967
+ workspace?: outputs.GetClusterClusterInfoSpecInitScriptWorkspace;
968
+ }
969
+ export interface GetClusterClusterInfoSpecInitScriptAbfss {
970
+ destination: string;
971
+ }
972
+ export interface GetClusterClusterInfoSpecInitScriptDbfs {
973
+ destination: string;
974
+ }
975
+ export interface GetClusterClusterInfoSpecInitScriptFile {
976
+ destination: string;
977
+ }
978
+ export interface GetClusterClusterInfoSpecInitScriptGcs {
979
+ destination: string;
980
+ }
981
+ export interface GetClusterClusterInfoSpecInitScriptS3 {
982
+ cannedAcl?: string;
983
+ destination: string;
984
+ enableEncryption?: boolean;
985
+ encryptionType?: string;
986
+ endpoint?: string;
987
+ kmsKey?: string;
988
+ region?: string;
989
+ }
990
+ export interface GetClusterClusterInfoSpecInitScriptVolumes {
991
+ destination: string;
992
+ }
993
+ export interface GetClusterClusterInfoSpecInitScriptWorkspace {
994
+ destination: string;
995
+ }
996
+ export interface GetClusterClusterInfoSpecLibrary {
997
+ cran?: outputs.GetClusterClusterInfoSpecLibraryCran;
998
+ egg?: string;
999
+ jar?: string;
1000
+ maven?: outputs.GetClusterClusterInfoSpecLibraryMaven;
1001
+ pypi?: outputs.GetClusterClusterInfoSpecLibraryPypi;
1002
+ requirements?: string;
1003
+ whl?: string;
1004
+ }
1005
+ export interface GetClusterClusterInfoSpecLibraryCran {
1006
+ package: string;
1007
+ repo?: string;
1008
+ }
1009
+ export interface GetClusterClusterInfoSpecLibraryMaven {
1010
+ coordinates: string;
1011
+ exclusions?: string[];
1012
+ repo?: string;
1013
+ }
1014
+ export interface GetClusterClusterInfoSpecLibraryPypi {
1015
+ package: string;
1016
+ repo?: string;
1017
+ }
1018
+ export interface GetClusterClusterInfoSpecWorkloadType {
1019
+ clients: outputs.GetClusterClusterInfoSpecWorkloadTypeClients;
1020
+ }
1021
+ export interface GetClusterClusterInfoSpecWorkloadTypeClients {
1022
+ jobs?: boolean;
1023
+ notebooks?: boolean;
1024
+ }
789
1025
  export interface GetClusterClusterInfoTerminationReason {
790
1026
  code?: string;
791
1027
  parameters?: {
@@ -793,6 +1029,13 @@ export interface GetClusterClusterInfoTerminationReason {
793
1029
  };
794
1030
  type?: string;
795
1031
  }
1032
+ export interface GetClusterClusterInfoWorkloadType {
1033
+ clients: outputs.GetClusterClusterInfoWorkloadTypeClients;
1034
+ }
1035
+ export interface GetClusterClusterInfoWorkloadTypeClients {
1036
+ jobs?: boolean;
1037
+ notebooks?: boolean;
1038
+ }
796
1039
  export interface GetCurrentMetastoreMetastoreInfo {
797
1040
  cloud?: string;
798
1041
  /**
@@ -2347,6 +2590,87 @@ export interface GetNotebookPathsNotebookPathList {
2347
2590
  */
2348
2591
  path?: string;
2349
2592
  }
2593
+ export interface GetSchemaSchemaInfo {
2594
+ /**
2595
+ * indicates whether the principal is limited to retrieving metadata for the schema through the BROWSE privilege.
2596
+ */
2597
+ browseOnly?: boolean;
2598
+ /**
2599
+ * the name of the catalog where the schema is.
2600
+ */
2601
+ catalogName?: string;
2602
+ /**
2603
+ * the type of the parent catalog.
2604
+ */
2605
+ catalogType?: string;
2606
+ /**
2607
+ * the comment attached to the volume
2608
+ */
2609
+ comment?: string;
2610
+ /**
2611
+ * time at which this schema was created, in epoch milliseconds.
2612
+ */
2613
+ createdAt?: number;
2614
+ /**
2615
+ * username of schema creator.
2616
+ */
2617
+ createdBy?: string;
2618
+ /**
2619
+ * information about actual state of predictive optimization.
2620
+ */
2621
+ effectivePredictiveOptimizationFlag?: outputs.GetSchemaSchemaInfoEffectivePredictiveOptimizationFlag;
2622
+ /**
2623
+ * whether predictive optimization should be enabled for this object and objects under it.
2624
+ */
2625
+ enablePredictiveOptimization?: string;
2626
+ /**
2627
+ * the two-level (fully qualified) name of the schema
2628
+ */
2629
+ fullName?: string;
2630
+ /**
2631
+ * the unique identifier of the metastore
2632
+ */
2633
+ metastoreId?: string;
2634
+ /**
2635
+ * a fully qualified name of databricks_schema: *`catalog`.`schema`*
2636
+ */
2637
+ name?: string;
2638
+ /**
2639
+ * the identifier of the user who owns the schema
2640
+ */
2641
+ owner?: string;
2642
+ /**
2643
+ * map of properties set on the schema
2644
+ */
2645
+ properties?: {
2646
+ [key: string]: any;
2647
+ };
2648
+ /**
2649
+ * the unique identifier of the volume
2650
+ */
2651
+ schemaId?: string;
2652
+ /**
2653
+ * the storage location on the cloud.
2654
+ */
2655
+ storageLocation?: string;
2656
+ /**
2657
+ * storage root URL for managed tables within schema.
2658
+ */
2659
+ storageRoot?: string;
2660
+ /**
2661
+ * the timestamp of the last time changes were made to the schema
2662
+ */
2663
+ updatedAt?: number;
2664
+ /**
2665
+ * the identifier of the user who updated the schema last time
2666
+ */
2667
+ updatedBy?: string;
2668
+ }
2669
+ export interface GetSchemaSchemaInfoEffectivePredictiveOptimizationFlag {
2670
+ inheritedFromName?: string;
2671
+ inheritedFromType?: string;
2672
+ value: string;
2673
+ }
2350
2674
  export interface GetShareObject {
2351
2675
  addedAt: number;
2352
2676
  addedBy: string;
@@ -2672,6 +2996,83 @@ export interface GetTableTableInfoViewDependenciesDependencyFunction {
2672
2996
  export interface GetTableTableInfoViewDependenciesDependencyTable {
2673
2997
  tableFullName: string;
2674
2998
  }
2999
+ export interface GetVolumeVolumeInfo {
3000
+ /**
3001
+ * the AWS access point to use when accessing s3 bucket for this volume's external location
3002
+ */
3003
+ accessPoint?: string;
3004
+ /**
3005
+ * indicates whether the principal is limited to retrieving metadata for the volume through the BROWSE privilege when includeBrowse is enabled in the request.
3006
+ */
3007
+ browseOnly?: boolean;
3008
+ /**
3009
+ * the name of the catalog where the schema and the volume are
3010
+ */
3011
+ catalogName?: string;
3012
+ /**
3013
+ * the comment attached to the volume
3014
+ */
3015
+ comment?: string;
3016
+ /**
3017
+ * the Unix timestamp at the volume's creation
3018
+ */
3019
+ createdAt?: number;
3020
+ /**
3021
+ * the identifier of the user who created the volume
3022
+ */
3023
+ createdBy?: string;
3024
+ /**
3025
+ * encryption options that apply to clients connecting to cloud storage
3026
+ */
3027
+ encryptionDetails?: outputs.GetVolumeVolumeInfoEncryptionDetails;
3028
+ /**
3029
+ * the three-level (fully qualified) name of the volume
3030
+ */
3031
+ fullName?: string;
3032
+ /**
3033
+ * the unique identifier of the metastore
3034
+ */
3035
+ metastoreId?: string;
3036
+ /**
3037
+ * a fully qualified name of databricks_volume: *`catalog`.`schema`.`volume`*
3038
+ */
3039
+ name?: string;
3040
+ /**
3041
+ * the identifier of the user who owns the volume
3042
+ */
3043
+ owner?: string;
3044
+ /**
3045
+ * the name of the schema where the volume is
3046
+ */
3047
+ schemaName?: string;
3048
+ /**
3049
+ * the storage location on the cloud
3050
+ */
3051
+ storageLocation?: string;
3052
+ /**
3053
+ * the timestamp of the last time changes were made to the volume
3054
+ */
3055
+ updatedAt?: number;
3056
+ /**
3057
+ * the identifier of the user who updated the volume last time
3058
+ */
3059
+ updatedBy?: string;
3060
+ /**
3061
+ * the unique identifier of the volume
3062
+ */
3063
+ volumeId?: string;
3064
+ /**
3065
+ * whether the volume is `MANAGED` or `EXTERNAL`
3066
+ */
3067
+ volumeType?: string;
3068
+ }
3069
+ export interface GetVolumeVolumeInfoEncryptionDetails {
3070
+ sseEncryptionDetails?: outputs.GetVolumeVolumeInfoEncryptionDetailsSseEncryptionDetails;
3071
+ }
3072
+ export interface GetVolumeVolumeInfoEncryptionDetailsSseEncryptionDetails {
3073
+ algorithm?: string;
3074
+ awsKmsKeyArn?: string;
3075
+ }
2675
3076
  export interface GrantsGrant {
2676
3077
  principal: string;
2677
3078
  privileges: string[];
@@ -2850,11 +3251,23 @@ export interface JobEmailNotifications {
2850
3251
  onSuccesses?: string[];
2851
3252
  }
2852
3253
  export interface JobEnvironment {
3254
+ /**
3255
+ * an unique identifier of the Environment. It will be referenced from `environmentKey` attribute of corresponding task.
3256
+ */
2853
3257
  environmentKey: string;
3258
+ /**
3259
+ * block describing the Environment. Consists of following attributes:
3260
+ */
2854
3261
  spec?: outputs.JobEnvironmentSpec;
2855
3262
  }
2856
3263
  export interface JobEnvironmentSpec {
3264
+ /**
3265
+ * client version used by the environment.
3266
+ */
2857
3267
  client: string;
3268
+ /**
3269
+ * List of pip dependencies, as supported by the version of pip in this environment. Each dependency is a pip requirement file line. See [API docs](https://docs.databricks.com/api/workspace/jobs/create#environments-spec-dependencies) for more information.
3270
+ */
2858
3271
  dependencies?: string[];
2859
3272
  }
2860
3273
  export interface JobGitSource {
@@ -3487,6 +3900,9 @@ export interface JobTask {
3487
3900
  * (List) An optional set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
3488
3901
  */
3489
3902
  emailNotifications?: outputs.JobTaskEmailNotifications;
3903
+ /**
3904
+ * identifier of an `environment` block that is used to specify libraries. Required for some tasks (`sparkPythonTask`, `pythonWheelTask`, ...) running on serverless compute.
3905
+ */
3490
3906
  environmentKey?: string;
3491
3907
  /**
3492
3908
  * Identifier of the interactive cluster to run job on. *Note: running tasks on interactive clusters may lead to increased costs!*
@@ -3665,6 +4081,9 @@ export interface JobTaskForEachTaskTask {
3665
4081
  * (List) An optional set of email addresses notified when this task begins, completes or fails. The default behavior is to not send any emails. This field is a block and is documented below.
3666
4082
  */
3667
4083
  emailNotifications?: outputs.JobTaskForEachTaskTaskEmailNotifications;
4084
+ /**
4085
+ * identifier of an `environment` block that is used to specify libraries. Required for some tasks (`sparkPythonTask`, `pythonWheelTask`, ...) running on serverless compute.
4086
+ */
3668
4087
  environmentKey?: string;
3669
4088
  /**
3670
4089
  * Identifier of the interactive cluster to run job on. *Note: running tasks on interactive clusters may lead to increased costs!*
@@ -5096,7 +5515,7 @@ export interface LakehouseMonitorSchedule {
5096
5515
  /**
5097
5516
  * optional string field that indicates whether a schedule is paused (`PAUSED`) or not (`UNPAUSED`).
5098
5517
  */
5099
- pauseStatus?: string;
5518
+ pauseStatus: string;
5100
5519
  /**
5101
5520
  * string expression that determines when to run the monitor. See [Quartz documentation](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for examples.
5102
5521
  */
@@ -5219,7 +5638,7 @@ export interface ModelServingConfigAutoCaptureConfig {
5219
5638
  /**
5220
5639
  * If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again.
5221
5640
  */
5222
- enabled?: boolean;
5641
+ enabled: boolean;
5223
5642
  /**
5224
5643
  * The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set.
5225
5644
  */
@@ -5227,7 +5646,7 @@ export interface ModelServingConfigAutoCaptureConfig {
5227
5646
  /**
5228
5647
  * The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set.
5229
5648
  */
5230
- tableNamePrefix?: string;
5649
+ tableNamePrefix: string;
5231
5650
  }
5232
5651
  export interface ModelServingConfigServedEntity {
5233
5652
  /**
@@ -5435,7 +5854,7 @@ export interface ModelServingConfigServedModel {
5435
5854
  /**
5436
5855
  * The workload type of the served model. The workload type selects which type of compute to use in the endpoint. For deep learning workloads, GPU acceleration is available by selecting workload types like `GPU_SMALL` and others. See documentation for all options. The default value is `CPU`.
5437
5856
  */
5438
- workloadType?: string;
5857
+ workloadType: string;
5439
5858
  }
5440
5859
  export interface ModelServingConfigTrafficConfig {
5441
5860
  /**
@@ -5530,25 +5949,49 @@ export interface MwsCustomerManagedKeysGcpKeyInfo {
5530
5949
  kmsKeyId: string;
5531
5950
  }
5532
5951
  export interface MwsNetworkConnectivityConfigEgressConfig {
5952
+ /**
5953
+ * block describing network connectivity rules that are applied by default without resource specific configurations. Consists of the following fields:
5954
+ */
5533
5955
  defaultRules?: outputs.MwsNetworkConnectivityConfigEgressConfigDefaultRules;
5956
+ /**
5957
+ * block describing network connectivity rules that configured for each destinations. These rules override default rules. Consists of the following fields:
5958
+ */
5534
5959
  targetRules?: outputs.MwsNetworkConnectivityConfigEgressConfigTargetRules;
5535
5960
  }
5536
5961
  export interface MwsNetworkConnectivityConfigEgressConfigDefaultRules {
5962
+ /**
5963
+ * (AWS only) - block with information about stable AWS IP CIDR blocks. You can use these to configure the firewall of your resources to allow traffic from your Databricks workspace. Consists of the following fields:
5964
+ */
5537
5965
  awsStableIpRule?: outputs.MwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule;
5538
5966
  /**
5539
- * This provides a list of subnets. These subnets need to be allowed in your Azure resources in order for Databricks to access. See `default_rules.azure_service_endpoint_rule.target_services` for the supported Azure services.
5967
+ * (Azure only) - block with information about stable Azure service endpoints. You can configure the firewall of your Azure resources to allow traffic from your Databricks serverless compute resources. Consists of the following fields:
5540
5968
  */
5541
5969
  azureServiceEndpointRule?: outputs.MwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule;
5542
5970
  }
5543
5971
  export interface MwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule {
5972
+ /**
5973
+ * list of IP CIDR blocks.
5974
+ */
5544
5975
  cidrBlocks?: string[];
5545
5976
  }
5546
5977
  export interface MwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule {
5978
+ /**
5979
+ * list of subnets from which Databricks network traffic originates when accessing your Azure resources.
5980
+ */
5547
5981
  subnets?: string[];
5982
+ /**
5983
+ * the Azure region in which this service endpoint rule applies.
5984
+ */
5548
5985
  targetRegion?: string;
5986
+ /**
5987
+ * the Azure services to which this service endpoint rule applies to.
5988
+ */
5549
5989
  targetServices?: string[];
5550
5990
  }
5551
5991
  export interface MwsNetworkConnectivityConfigEgressConfigTargetRules {
5992
+ /**
5993
+ * (Azure only) - list containing information about configure Azure Private Endpoints.
5994
+ */
5552
5995
  azurePrivateEndpointRules?: outputs.MwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRule[];
5553
5996
  }
5554
5997
  export interface MwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRule {
@@ -5989,10 +6432,7 @@ export interface QualityMonitorNotificationsOnNewClassificationTagDetected {
5989
6432
  emailAddresses?: string[];
5990
6433
  }
5991
6434
  export interface QualityMonitorSchedule {
5992
- /**
5993
- * optional string field that indicates whether a schedule is paused (`PAUSED`) or not (`UNPAUSED`).
5994
- */
5995
- pauseStatus?: string;
6435
+ pauseStatus: string;
5996
6436
  /**
5997
6437
  * string expression that determines when to run the monitor. See [Quartz documentation](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for examples.
5998
6438
  */
package/user.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import * as pulumi from "@pulumi/pulumi";
2
2
  /**
3
- * This resource allows you to manage [users in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/users.html), [Databricks Account Console](https://accounts.cloud.databricks.com/) or [Azure Databricks Account Console](https://accounts.azuredatabricks.net). You can also associate Databricks users to databricks_group. Upon user creation the user will receive a password reset email. You can also get information about caller identity using databricks.getCurrentUser data source.
3
+ * This resource allows you to manage [users in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/users.html), [Databricks Account Console](https://accounts.cloud.databricks.com/) or [Azure Databricks Account Console](https://accounts.azuredatabricks.net). You can also associate Databricks users to databricks_group. Upon user creation the user will receive a welcome email. You can also get information about caller identity using databricks.getCurrentUser data source.
4
4
  *
5
5
  * > **Note** To assign account level users to workspace use databricks_mws_permission_assignment.
6
6
  *
package/user.js CHANGED
@@ -6,7 +6,7 @@ exports.User = void 0;
6
6
  const pulumi = require("@pulumi/pulumi");
7
7
  const utilities = require("./utilities");
8
8
  /**
9
- * This resource allows you to manage [users in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/users.html), [Databricks Account Console](https://accounts.cloud.databricks.com/) or [Azure Databricks Account Console](https://accounts.azuredatabricks.net). You can also associate Databricks users to databricks_group. Upon user creation the user will receive a password reset email. You can also get information about caller identity using databricks.getCurrentUser data source.
9
+ * This resource allows you to manage [users in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/users.html), [Databricks Account Console](https://accounts.cloud.databricks.com/) or [Azure Databricks Account Console](https://accounts.azuredatabricks.net). You can also associate Databricks users to databricks_group. Upon user creation the user will receive a welcome email. You can also get information about caller identity using databricks.getCurrentUser data source.
10
10
  *
11
11
  * > **Note** To assign account level users to workspace use databricks_mws_permission_assignment.
12
12
  *