@pulumi/databricks 1.2.1 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/catalog.d.ts +12 -0
  2. package/catalog.js +2 -0
  3. package/catalog.js.map +1 -1
  4. package/config/vars.d.ts +1 -0
  5. package/config/vars.js +6 -0
  6. package/config/vars.js.map +1 -1
  7. package/entitlements.d.ts +183 -0
  8. package/entitlements.js +133 -0
  9. package/entitlements.js.map +1 -0
  10. package/externalLocation.d.ts +0 -2
  11. package/externalLocation.js +0 -2
  12. package/externalLocation.js.map +1 -1
  13. package/getCatalogs.d.ts +0 -1
  14. package/getCatalogs.js +0 -1
  15. package/getCatalogs.js.map +1 -1
  16. package/getJob.d.ts +80 -0
  17. package/getJob.js +46 -0
  18. package/getJob.js.map +1 -0
  19. package/getSchemas.d.ts +0 -1
  20. package/getSchemas.js +0 -1
  21. package/getSchemas.js.map +1 -1
  22. package/getTables.d.ts +0 -1
  23. package/getTables.js +0 -1
  24. package/getTables.js.map +1 -1
  25. package/getViews.d.ts +3 -4
  26. package/getViews.js +0 -1
  27. package/getViews.js.map +1 -1
  28. package/group.d.ts +0 -10
  29. package/group.js +0 -10
  30. package/group.js.map +1 -1
  31. package/groupInstanceProfile.d.ts +1 -1
  32. package/groupInstanceProfile.js +1 -1
  33. package/groupRole.d.ts +93 -0
  34. package/groupRole.js +90 -0
  35. package/groupRole.js.map +1 -0
  36. package/index.d.ts +4 -0
  37. package/index.js +16 -0
  38. package/index.js.map +1 -1
  39. package/instancePool.d.ts +3 -3
  40. package/job.d.ts +3 -0
  41. package/job.js +2 -0
  42. package/job.js.map +1 -1
  43. package/metastore.d.ts +3 -5
  44. package/metastore.js +0 -2
  45. package/metastore.js.map +1 -1
  46. package/metastoreAssignment.d.ts +0 -2
  47. package/metastoreAssignment.js +0 -2
  48. package/metastoreAssignment.js.map +1 -1
  49. package/metastoreDataAccess.d.ts +0 -2
  50. package/metastoreDataAccess.js +0 -2
  51. package/metastoreDataAccess.js.map +1 -1
  52. package/mwsPrivateAccessSettings.d.ts +3 -3
  53. package/package.json +2 -2
  54. package/package.json.dev +2 -2
  55. package/pipeline.d.ts +3 -3
  56. package/provider.d.ts +2 -0
  57. package/provider.js +1 -0
  58. package/provider.js.map +1 -1
  59. package/recipient.d.ts +146 -0
  60. package/recipient.js +98 -0
  61. package/recipient.js.map +1 -0
  62. package/schema.d.ts +12 -2
  63. package/schema.js +2 -2
  64. package/schema.js.map +1 -1
  65. package/servicePrincipal.d.ts +0 -4
  66. package/servicePrincipal.js +0 -4
  67. package/servicePrincipal.js.map +1 -1
  68. package/sqlQuery.d.ts +0 -65
  69. package/sqlQuery.js +0 -65
  70. package/sqlQuery.js.map +1 -1
  71. package/sqlVisualization.d.ts +0 -46
  72. package/sqlVisualization.js +0 -46
  73. package/sqlVisualization.js.map +1 -1
  74. package/storageCredential.d.ts +0 -2
  75. package/storageCredential.js +0 -2
  76. package/storageCredential.js.map +1 -1
  77. package/types/input.d.ts +1442 -227
  78. package/types/output.d.ts +656 -15
  79. package/user.d.ts +0 -4
  80. package/user.js +0 -4
  81. package/user.js.map +1 -1
package/types/output.d.ts CHANGED
@@ -310,6 +310,580 @@ export interface GetDbfsFilePathsPathList {
310
310
  */
311
311
  path?: string;
312
312
  }
313
+ export interface GetJobJobSettings {
314
+ createdTime?: number;
315
+ creatorUserName?: string;
316
+ /**
317
+ * the id of databricks.Job if the resource was matched by name.
318
+ */
319
+ jobId?: number;
320
+ settings?: outputs.GetJobJobSettingsSettings;
321
+ }
322
+ export interface GetJobJobSettingsSettings {
323
+ dbtTask?: outputs.GetJobJobSettingsSettingsDbtTask;
324
+ emailNotifications?: outputs.GetJobJobSettingsSettingsEmailNotifications;
325
+ existingClusterId?: string;
326
+ format: string;
327
+ gitSource?: outputs.GetJobJobSettingsSettingsGitSource;
328
+ jobClusters?: outputs.GetJobJobSettingsSettingsJobCluster[];
329
+ libraries?: outputs.GetJobJobSettingsSettingsLibrary[];
330
+ maxConcurrentRuns?: number;
331
+ maxRetries?: number;
332
+ minRetryIntervalMillis?: number;
333
+ name?: string;
334
+ newCluster?: outputs.GetJobJobSettingsSettingsNewCluster;
335
+ notebookTask?: outputs.GetJobJobSettingsSettingsNotebookTask;
336
+ pipelineTask?: outputs.GetJobJobSettingsSettingsPipelineTask;
337
+ pythonWheelTask?: outputs.GetJobJobSettingsSettingsPythonWheelTask;
338
+ retryOnTimeout?: boolean;
339
+ schedule?: outputs.GetJobJobSettingsSettingsSchedule;
340
+ sparkJarTask?: outputs.GetJobJobSettingsSettingsSparkJarTask;
341
+ sparkPythonTask?: outputs.GetJobJobSettingsSettingsSparkPythonTask;
342
+ sparkSubmitTask?: outputs.GetJobJobSettingsSettingsSparkSubmitTask;
343
+ tags?: {
344
+ [key: string]: any;
345
+ };
346
+ tasks?: outputs.GetJobJobSettingsSettingsTask[];
347
+ timeoutSeconds?: number;
348
+ }
349
+ export interface GetJobJobSettingsSettingsDbtTask {
350
+ commands: string[];
351
+ profilesDirectory?: string;
352
+ projectDirectory?: string;
353
+ schema?: string;
354
+ warehouseId?: string;
355
+ }
356
+ export interface GetJobJobSettingsSettingsEmailNotifications {
357
+ alertOnLastAttempt?: boolean;
358
+ noAlertForSkippedRuns?: boolean;
359
+ onFailures?: string[];
360
+ onStarts?: string[];
361
+ onSuccesses?: string[];
362
+ }
363
+ export interface GetJobJobSettingsSettingsGitSource {
364
+ branch?: string;
365
+ commit?: string;
366
+ provider?: string;
367
+ tag?: string;
368
+ url: string;
369
+ }
370
+ export interface GetJobJobSettingsSettingsJobCluster {
371
+ jobClusterKey?: string;
372
+ newCluster?: outputs.GetJobJobSettingsSettingsJobClusterNewCluster;
373
+ }
374
+ export interface GetJobJobSettingsSettingsJobClusterNewCluster {
375
+ applyPolicyDefaultValues?: boolean;
376
+ autoscale?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterAutoscale;
377
+ autoterminationMinutes?: number;
378
+ awsAttributes?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterAwsAttributes;
379
+ azureAttributes?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterAzureAttributes;
380
+ clusterId?: string;
381
+ clusterLogConf?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterClusterLogConf;
382
+ clusterName?: string;
383
+ customTags?: {
384
+ [key: string]: any;
385
+ };
386
+ dataSecurityMode?: string;
387
+ dockerImage?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterDockerImage;
388
+ driverInstancePoolId: string;
389
+ driverNodeTypeId: string;
390
+ enableElasticDisk: boolean;
391
+ enableLocalDiskEncryption: boolean;
392
+ gcpAttributes?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes;
393
+ idempotencyToken?: string;
394
+ initScripts?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterInitScript[];
395
+ instancePoolId?: string;
396
+ nodeTypeId: string;
397
+ numWorkers: number;
398
+ policyId?: string;
399
+ singleUserName?: string;
400
+ sparkConf?: {
401
+ [key: string]: any;
402
+ };
403
+ sparkEnvVars?: {
404
+ [key: string]: any;
405
+ };
406
+ sparkVersion: string;
407
+ sshPublicKeys?: string[];
408
+ workloadType?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterWorkloadType;
409
+ }
410
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterAutoscale {
411
+ maxWorkers?: number;
412
+ minWorkers?: number;
413
+ }
414
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterAwsAttributes {
415
+ availability?: string;
416
+ ebsVolumeCount?: number;
417
+ ebsVolumeSize?: number;
418
+ ebsVolumeType?: string;
419
+ firstOnDemand?: number;
420
+ instanceProfileArn?: string;
421
+ spotBidPricePercent?: number;
422
+ zoneId?: string;
423
+ }
424
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterAzureAttributes {
425
+ availability?: string;
426
+ firstOnDemand?: number;
427
+ spotBidMaxPrice?: number;
428
+ }
429
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterClusterLogConf {
430
+ dbfs?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterClusterLogConfDbfs;
431
+ s3?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterClusterLogConfS3;
432
+ }
433
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterClusterLogConfDbfs {
434
+ destination: string;
435
+ }
436
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterClusterLogConfS3 {
437
+ cannedAcl?: string;
438
+ destination: string;
439
+ enableEncryption?: boolean;
440
+ encryptionType?: string;
441
+ endpoint?: string;
442
+ kmsKey?: string;
443
+ region?: string;
444
+ }
445
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterDockerImage {
446
+ basicAuth?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterDockerImageBasicAuth;
447
+ url: string;
448
+ }
449
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterDockerImageBasicAuth {
450
+ password: string;
451
+ username: string;
452
+ }
453
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes {
454
+ availability?: string;
455
+ bootDiskSize?: number;
456
+ googleServiceAccount?: string;
457
+ usePreemptibleExecutors?: boolean;
458
+ zoneId?: string;
459
+ }
460
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterInitScript {
461
+ dbfs?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterInitScriptDbfs;
462
+ file?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterInitScriptFile;
463
+ gcs?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs;
464
+ s3?: outputs.GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3;
465
+ }
466
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterInitScriptDbfs {
467
+ destination: string;
468
+ }
469
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterInitScriptFile {
470
+ destination?: string;
471
+ }
472
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterInitScriptGcs {
473
+ destination?: string;
474
+ }
475
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterInitScriptS3 {
476
+ cannedAcl?: string;
477
+ destination: string;
478
+ enableEncryption?: boolean;
479
+ encryptionType?: string;
480
+ endpoint?: string;
481
+ kmsKey?: string;
482
+ region?: string;
483
+ }
484
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterWorkloadType {
485
+ clients: outputs.GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeClients;
486
+ }
487
+ export interface GetJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeClients {
488
+ jobs?: boolean;
489
+ notebooks?: boolean;
490
+ }
491
+ export interface GetJobJobSettingsSettingsLibrary {
492
+ cran?: outputs.GetJobJobSettingsSettingsLibraryCran;
493
+ egg?: string;
494
+ jar?: string;
495
+ maven?: outputs.GetJobJobSettingsSettingsLibraryMaven;
496
+ pypi?: outputs.GetJobJobSettingsSettingsLibraryPypi;
497
+ whl?: string;
498
+ }
499
+ export interface GetJobJobSettingsSettingsLibraryCran {
500
+ package: string;
501
+ repo?: string;
502
+ }
503
+ export interface GetJobJobSettingsSettingsLibraryMaven {
504
+ coordinates: string;
505
+ exclusions?: string[];
506
+ repo?: string;
507
+ }
508
+ export interface GetJobJobSettingsSettingsLibraryPypi {
509
+ package: string;
510
+ repo?: string;
511
+ }
512
+ export interface GetJobJobSettingsSettingsNewCluster {
513
+ applyPolicyDefaultValues?: boolean;
514
+ autoscale?: outputs.GetJobJobSettingsSettingsNewClusterAutoscale;
515
+ autoterminationMinutes?: number;
516
+ awsAttributes?: outputs.GetJobJobSettingsSettingsNewClusterAwsAttributes;
517
+ azureAttributes?: outputs.GetJobJobSettingsSettingsNewClusterAzureAttributes;
518
+ clusterId?: string;
519
+ clusterLogConf?: outputs.GetJobJobSettingsSettingsNewClusterClusterLogConf;
520
+ clusterName?: string;
521
+ customTags?: {
522
+ [key: string]: any;
523
+ };
524
+ dataSecurityMode?: string;
525
+ dockerImage?: outputs.GetJobJobSettingsSettingsNewClusterDockerImage;
526
+ driverInstancePoolId: string;
527
+ driverNodeTypeId: string;
528
+ enableElasticDisk: boolean;
529
+ enableLocalDiskEncryption: boolean;
530
+ gcpAttributes?: outputs.GetJobJobSettingsSettingsNewClusterGcpAttributes;
531
+ idempotencyToken?: string;
532
+ initScripts?: outputs.GetJobJobSettingsSettingsNewClusterInitScript[];
533
+ instancePoolId?: string;
534
+ nodeTypeId: string;
535
+ numWorkers: number;
536
+ policyId?: string;
537
+ singleUserName?: string;
538
+ sparkConf?: {
539
+ [key: string]: any;
540
+ };
541
+ sparkEnvVars?: {
542
+ [key: string]: any;
543
+ };
544
+ sparkVersion: string;
545
+ sshPublicKeys?: string[];
546
+ workloadType?: outputs.GetJobJobSettingsSettingsNewClusterWorkloadType;
547
+ }
548
+ export interface GetJobJobSettingsSettingsNewClusterAutoscale {
549
+ maxWorkers?: number;
550
+ minWorkers?: number;
551
+ }
552
+ export interface GetJobJobSettingsSettingsNewClusterAwsAttributes {
553
+ availability?: string;
554
+ ebsVolumeCount?: number;
555
+ ebsVolumeSize?: number;
556
+ ebsVolumeType?: string;
557
+ firstOnDemand?: number;
558
+ instanceProfileArn?: string;
559
+ spotBidPricePercent?: number;
560
+ zoneId?: string;
561
+ }
562
+ export interface GetJobJobSettingsSettingsNewClusterAzureAttributes {
563
+ availability?: string;
564
+ firstOnDemand?: number;
565
+ spotBidMaxPrice?: number;
566
+ }
567
+ export interface GetJobJobSettingsSettingsNewClusterClusterLogConf {
568
+ dbfs?: outputs.GetJobJobSettingsSettingsNewClusterClusterLogConfDbfs;
569
+ s3?: outputs.GetJobJobSettingsSettingsNewClusterClusterLogConfS3;
570
+ }
571
+ export interface GetJobJobSettingsSettingsNewClusterClusterLogConfDbfs {
572
+ destination: string;
573
+ }
574
+ export interface GetJobJobSettingsSettingsNewClusterClusterLogConfS3 {
575
+ cannedAcl?: string;
576
+ destination: string;
577
+ enableEncryption?: boolean;
578
+ encryptionType?: string;
579
+ endpoint?: string;
580
+ kmsKey?: string;
581
+ region?: string;
582
+ }
583
+ export interface GetJobJobSettingsSettingsNewClusterDockerImage {
584
+ basicAuth?: outputs.GetJobJobSettingsSettingsNewClusterDockerImageBasicAuth;
585
+ url: string;
586
+ }
587
+ export interface GetJobJobSettingsSettingsNewClusterDockerImageBasicAuth {
588
+ password: string;
589
+ username: string;
590
+ }
591
+ export interface GetJobJobSettingsSettingsNewClusterGcpAttributes {
592
+ availability?: string;
593
+ bootDiskSize?: number;
594
+ googleServiceAccount?: string;
595
+ usePreemptibleExecutors?: boolean;
596
+ zoneId?: string;
597
+ }
598
+ export interface GetJobJobSettingsSettingsNewClusterInitScript {
599
+ dbfs?: outputs.GetJobJobSettingsSettingsNewClusterInitScriptDbfs;
600
+ file?: outputs.GetJobJobSettingsSettingsNewClusterInitScriptFile;
601
+ gcs?: outputs.GetJobJobSettingsSettingsNewClusterInitScriptGcs;
602
+ s3?: outputs.GetJobJobSettingsSettingsNewClusterInitScriptS3;
603
+ }
604
+ export interface GetJobJobSettingsSettingsNewClusterInitScriptDbfs {
605
+ destination: string;
606
+ }
607
+ export interface GetJobJobSettingsSettingsNewClusterInitScriptFile {
608
+ destination?: string;
609
+ }
610
+ export interface GetJobJobSettingsSettingsNewClusterInitScriptGcs {
611
+ destination?: string;
612
+ }
613
+ export interface GetJobJobSettingsSettingsNewClusterInitScriptS3 {
614
+ cannedAcl?: string;
615
+ destination: string;
616
+ enableEncryption?: boolean;
617
+ encryptionType?: string;
618
+ endpoint?: string;
619
+ kmsKey?: string;
620
+ region?: string;
621
+ }
622
+ export interface GetJobJobSettingsSettingsNewClusterWorkloadType {
623
+ clients: outputs.GetJobJobSettingsSettingsNewClusterWorkloadTypeClients;
624
+ }
625
+ export interface GetJobJobSettingsSettingsNewClusterWorkloadTypeClients {
626
+ jobs?: boolean;
627
+ notebooks?: boolean;
628
+ }
629
+ export interface GetJobJobSettingsSettingsNotebookTask {
630
+ baseParameters?: {
631
+ [key: string]: any;
632
+ };
633
+ notebookPath: string;
634
+ }
635
+ export interface GetJobJobSettingsSettingsPipelineTask {
636
+ pipelineId: string;
637
+ }
638
+ export interface GetJobJobSettingsSettingsPythonWheelTask {
639
+ entryPoint?: string;
640
+ namedParameters?: {
641
+ [key: string]: any;
642
+ };
643
+ packageName?: string;
644
+ parameters?: string[];
645
+ }
646
+ export interface GetJobJobSettingsSettingsSchedule {
647
+ pauseStatus: string;
648
+ quartzCronExpression: string;
649
+ timezoneId: string;
650
+ }
651
+ export interface GetJobJobSettingsSettingsSparkJarTask {
652
+ jarUri?: string;
653
+ mainClassName?: string;
654
+ parameters?: string[];
655
+ }
656
+ export interface GetJobJobSettingsSettingsSparkPythonTask {
657
+ parameters?: string[];
658
+ pythonFile: string;
659
+ }
660
+ export interface GetJobJobSettingsSettingsSparkSubmitTask {
661
+ parameters?: string[];
662
+ }
663
+ export interface GetJobJobSettingsSettingsTask {
664
+ dbtTask?: outputs.GetJobJobSettingsSettingsTaskDbtTask;
665
+ dependsOns?: outputs.GetJobJobSettingsSettingsTaskDependsOn[];
666
+ description?: string;
667
+ emailNotifications?: outputs.GetJobJobSettingsSettingsTaskEmailNotifications;
668
+ existingClusterId?: string;
669
+ jobClusterKey?: string;
670
+ libraries?: outputs.GetJobJobSettingsSettingsTaskLibrary[];
671
+ maxRetries?: number;
672
+ minRetryIntervalMillis?: number;
673
+ newCluster?: outputs.GetJobJobSettingsSettingsTaskNewCluster;
674
+ notebookTask?: outputs.GetJobJobSettingsSettingsTaskNotebookTask;
675
+ pipelineTask?: outputs.GetJobJobSettingsSettingsTaskPipelineTask;
676
+ pythonWheelTask?: outputs.GetJobJobSettingsSettingsTaskPythonWheelTask;
677
+ retryOnTimeout: boolean;
678
+ sparkJarTask?: outputs.GetJobJobSettingsSettingsTaskSparkJarTask;
679
+ sparkPythonTask?: outputs.GetJobJobSettingsSettingsTaskSparkPythonTask;
680
+ sparkSubmitTask?: outputs.GetJobJobSettingsSettingsTaskSparkSubmitTask;
681
+ sqlTask?: outputs.GetJobJobSettingsSettingsTaskSqlTask;
682
+ taskKey?: string;
683
+ timeoutSeconds?: number;
684
+ }
685
+ export interface GetJobJobSettingsSettingsTaskDbtTask {
686
+ commands: string[];
687
+ profilesDirectory?: string;
688
+ projectDirectory?: string;
689
+ schema?: string;
690
+ warehouseId?: string;
691
+ }
692
+ export interface GetJobJobSettingsSettingsTaskDependsOn {
693
+ taskKey?: string;
694
+ }
695
+ export interface GetJobJobSettingsSettingsTaskEmailNotifications {
696
+ alertOnLastAttempt?: boolean;
697
+ noAlertForSkippedRuns?: boolean;
698
+ onFailures?: string[];
699
+ onStarts?: string[];
700
+ onSuccesses?: string[];
701
+ }
702
+ export interface GetJobJobSettingsSettingsTaskLibrary {
703
+ cran?: outputs.GetJobJobSettingsSettingsTaskLibraryCran;
704
+ egg?: string;
705
+ jar?: string;
706
+ maven?: outputs.GetJobJobSettingsSettingsTaskLibraryMaven;
707
+ pypi?: outputs.GetJobJobSettingsSettingsTaskLibraryPypi;
708
+ whl?: string;
709
+ }
710
+ export interface GetJobJobSettingsSettingsTaskLibraryCran {
711
+ package: string;
712
+ repo?: string;
713
+ }
714
+ export interface GetJobJobSettingsSettingsTaskLibraryMaven {
715
+ coordinates: string;
716
+ exclusions?: string[];
717
+ repo?: string;
718
+ }
719
+ export interface GetJobJobSettingsSettingsTaskLibraryPypi {
720
+ package: string;
721
+ repo?: string;
722
+ }
723
+ export interface GetJobJobSettingsSettingsTaskNewCluster {
724
+ applyPolicyDefaultValues?: boolean;
725
+ autoscale?: outputs.GetJobJobSettingsSettingsTaskNewClusterAutoscale;
726
+ autoterminationMinutes?: number;
727
+ awsAttributes?: outputs.GetJobJobSettingsSettingsTaskNewClusterAwsAttributes;
728
+ azureAttributes?: outputs.GetJobJobSettingsSettingsTaskNewClusterAzureAttributes;
729
+ clusterId?: string;
730
+ clusterLogConf?: outputs.GetJobJobSettingsSettingsTaskNewClusterClusterLogConf;
731
+ clusterName?: string;
732
+ customTags?: {
733
+ [key: string]: any;
734
+ };
735
+ dataSecurityMode?: string;
736
+ dockerImage?: outputs.GetJobJobSettingsSettingsTaskNewClusterDockerImage;
737
+ driverInstancePoolId: string;
738
+ driverNodeTypeId: string;
739
+ enableElasticDisk: boolean;
740
+ enableLocalDiskEncryption: boolean;
741
+ gcpAttributes?: outputs.GetJobJobSettingsSettingsTaskNewClusterGcpAttributes;
742
+ idempotencyToken?: string;
743
+ initScripts?: outputs.GetJobJobSettingsSettingsTaskNewClusterInitScript[];
744
+ instancePoolId?: string;
745
+ nodeTypeId: string;
746
+ numWorkers: number;
747
+ policyId?: string;
748
+ singleUserName?: string;
749
+ sparkConf?: {
750
+ [key: string]: any;
751
+ };
752
+ sparkEnvVars?: {
753
+ [key: string]: any;
754
+ };
755
+ sparkVersion: string;
756
+ sshPublicKeys?: string[];
757
+ workloadType?: outputs.GetJobJobSettingsSettingsTaskNewClusterWorkloadType;
758
+ }
759
+ export interface GetJobJobSettingsSettingsTaskNewClusterAutoscale {
760
+ maxWorkers?: number;
761
+ minWorkers?: number;
762
+ }
763
+ export interface GetJobJobSettingsSettingsTaskNewClusterAwsAttributes {
764
+ availability?: string;
765
+ ebsVolumeCount?: number;
766
+ ebsVolumeSize?: number;
767
+ ebsVolumeType?: string;
768
+ firstOnDemand?: number;
769
+ instanceProfileArn?: string;
770
+ spotBidPricePercent?: number;
771
+ zoneId?: string;
772
+ }
773
+ export interface GetJobJobSettingsSettingsTaskNewClusterAzureAttributes {
774
+ availability?: string;
775
+ firstOnDemand?: number;
776
+ spotBidMaxPrice?: number;
777
+ }
778
+ export interface GetJobJobSettingsSettingsTaskNewClusterClusterLogConf {
779
+ dbfs?: outputs.GetJobJobSettingsSettingsTaskNewClusterClusterLogConfDbfs;
780
+ s3?: outputs.GetJobJobSettingsSettingsTaskNewClusterClusterLogConfS3;
781
+ }
782
+ export interface GetJobJobSettingsSettingsTaskNewClusterClusterLogConfDbfs {
783
+ destination: string;
784
+ }
785
+ export interface GetJobJobSettingsSettingsTaskNewClusterClusterLogConfS3 {
786
+ cannedAcl?: string;
787
+ destination: string;
788
+ enableEncryption?: boolean;
789
+ encryptionType?: string;
790
+ endpoint?: string;
791
+ kmsKey?: string;
792
+ region?: string;
793
+ }
794
+ export interface GetJobJobSettingsSettingsTaskNewClusterDockerImage {
795
+ basicAuth?: outputs.GetJobJobSettingsSettingsTaskNewClusterDockerImageBasicAuth;
796
+ url: string;
797
+ }
798
+ export interface GetJobJobSettingsSettingsTaskNewClusterDockerImageBasicAuth {
799
+ password: string;
800
+ username: string;
801
+ }
802
+ export interface GetJobJobSettingsSettingsTaskNewClusterGcpAttributes {
803
+ availability?: string;
804
+ bootDiskSize?: number;
805
+ googleServiceAccount?: string;
806
+ usePreemptibleExecutors?: boolean;
807
+ zoneId?: string;
808
+ }
809
+ export interface GetJobJobSettingsSettingsTaskNewClusterInitScript {
810
+ dbfs?: outputs.GetJobJobSettingsSettingsTaskNewClusterInitScriptDbfs;
811
+ file?: outputs.GetJobJobSettingsSettingsTaskNewClusterInitScriptFile;
812
+ gcs?: outputs.GetJobJobSettingsSettingsTaskNewClusterInitScriptGcs;
813
+ s3?: outputs.GetJobJobSettingsSettingsTaskNewClusterInitScriptS3;
814
+ }
815
+ export interface GetJobJobSettingsSettingsTaskNewClusterInitScriptDbfs {
816
+ destination: string;
817
+ }
818
+ export interface GetJobJobSettingsSettingsTaskNewClusterInitScriptFile {
819
+ destination?: string;
820
+ }
821
+ export interface GetJobJobSettingsSettingsTaskNewClusterInitScriptGcs {
822
+ destination?: string;
823
+ }
824
+ export interface GetJobJobSettingsSettingsTaskNewClusterInitScriptS3 {
825
+ cannedAcl?: string;
826
+ destination: string;
827
+ enableEncryption?: boolean;
828
+ encryptionType?: string;
829
+ endpoint?: string;
830
+ kmsKey?: string;
831
+ region?: string;
832
+ }
833
+ export interface GetJobJobSettingsSettingsTaskNewClusterWorkloadType {
834
+ clients: outputs.GetJobJobSettingsSettingsTaskNewClusterWorkloadTypeClients;
835
+ }
836
+ export interface GetJobJobSettingsSettingsTaskNewClusterWorkloadTypeClients {
837
+ jobs?: boolean;
838
+ notebooks?: boolean;
839
+ }
840
+ export interface GetJobJobSettingsSettingsTaskNotebookTask {
841
+ baseParameters?: {
842
+ [key: string]: any;
843
+ };
844
+ notebookPath: string;
845
+ }
846
+ export interface GetJobJobSettingsSettingsTaskPipelineTask {
847
+ pipelineId: string;
848
+ }
849
+ export interface GetJobJobSettingsSettingsTaskPythonWheelTask {
850
+ entryPoint?: string;
851
+ namedParameters?: {
852
+ [key: string]: any;
853
+ };
854
+ packageName?: string;
855
+ parameters?: string[];
856
+ }
857
+ export interface GetJobJobSettingsSettingsTaskSparkJarTask {
858
+ jarUri?: string;
859
+ mainClassName?: string;
860
+ parameters?: string[];
861
+ }
862
+ export interface GetJobJobSettingsSettingsTaskSparkPythonTask {
863
+ parameters?: string[];
864
+ pythonFile: string;
865
+ }
866
+ export interface GetJobJobSettingsSettingsTaskSparkSubmitTask {
867
+ parameters?: string[];
868
+ }
869
+ export interface GetJobJobSettingsSettingsTaskSqlTask {
870
+ alert?: outputs.GetJobJobSettingsSettingsTaskSqlTaskAlert;
871
+ dashboard?: outputs.GetJobJobSettingsSettingsTaskSqlTaskDashboard;
872
+ parameters?: {
873
+ [key: string]: any;
874
+ };
875
+ query?: outputs.GetJobJobSettingsSettingsTaskSqlTaskQuery;
876
+ warehouseId?: string;
877
+ }
878
+ export interface GetJobJobSettingsSettingsTaskSqlTaskAlert {
879
+ alertId: string;
880
+ }
881
+ export interface GetJobJobSettingsSettingsTaskSqlTaskDashboard {
882
+ dashboardId: string;
883
+ }
884
+ export interface GetJobJobSettingsSettingsTaskSqlTaskQuery {
885
+ queryId: string;
886
+ }
313
887
  export interface GetNotebookPathsNotebookPathList {
314
888
  language?: string;
315
889
  /**
@@ -351,7 +925,7 @@ export interface InstancePoolAwsAttributes {
351
925
  */
352
926
  spotBidPricePercent?: number;
353
927
  /**
354
- * (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like `"us-west-2a"`. The provided availability zone must be in the same region as the Databricks deployment. For example, `"us-west-2a"` is not a valid zone ID if the Databricks deployment resides in the `"us-east-1"` region. This is an optional field. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the [List Zones API](https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclusterservicelistavailablezones).
928
+ * (String) Identifier for the availability zone/datacenter in which the instance pool resides. This string is of the form like `"us-west-2a"`. The provided availability zone must be in the same region as the Databricks deployment. For example, `"us-west-2a"` is not a valid zone ID if the Databricks deployment resides in the `"us-east-1"` region. If not specified, a default zone is used. You can find the list of available zones as well as the default value by using the [List Zones API](https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterclusterservicelistavailablezones).
355
929
  */
356
930
  zoneId: string;
357
931
  }
@@ -381,10 +955,7 @@ export interface InstancePoolDiskSpecDiskType {
381
955
  ebsVolumeType?: string;
382
956
  }
383
957
  export interface InstancePoolGcpAttributes {
384
- /**
385
- * Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`.
386
- */
387
- availability?: string;
958
+ gcpAvailability?: string;
388
959
  }
389
960
  export interface InstancePoolInstancePoolFleetAttributes {
390
961
  fleetOnDemandOption?: outputs.InstancePoolInstancePoolFleetAttributesFleetOnDemandOption;
@@ -411,6 +982,28 @@ export interface InstancePoolPreloadedDockerImageBasicAuth {
411
982
  password: string;
412
983
  username: string;
413
984
  }
985
+ export interface JobDbtTask {
986
+ /**
987
+ * (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
988
+ */
989
+ commands: string[];
990
+ /**
991
+ * The relative path to the directory in the repository specified by `gitSource` where dbt should look in for the `profiles.yml` file. If not specified, defaults to the repository's root directory. Equivalent to passing `--profile-dir` to a dbt command.
992
+ */
993
+ profilesDirectory?: string;
994
+ /**
995
+ * The relative path to the directory in the repository specified in `gitSource` where dbt should look in for the `dbt_project.yml` file. If not specified, defaults to the repository's root directory. Equivalent to passing `--project-dir` to a dbt command.
996
+ */
997
+ projectDirectory?: string;
998
+ /**
999
+ * The name of the schema dbt should run in. Defaults to `default`.
1000
+ */
1001
+ schema?: string;
1002
+ /**
1003
+ * ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only serverless warehouses are supported right now.
1004
+ */
1005
+ warehouseId?: string;
1006
+ }
414
1007
  export interface JobEmailNotifications {
415
1008
  alertOnLastAttempt?: boolean;
416
1009
  /**
@@ -757,7 +1350,7 @@ export interface JobPythonWheelTask {
757
1350
  */
758
1351
  packageName?: string;
759
1352
  /**
760
- * Parameters for the task
1353
+ * (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
761
1354
  */
762
1355
  parameters?: string[];
763
1356
  }
@@ -782,13 +1375,13 @@ export interface JobSparkJarTask {
782
1375
  */
783
1376
  mainClassName?: string;
784
1377
  /**
785
- * Parameters for the task
1378
+ * (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
786
1379
  */
787
1380
  parameters?: string[];
788
1381
  }
789
1382
  export interface JobSparkPythonTask {
790
1383
  /**
791
- * Parameters for the task
1384
+ * (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
792
1385
  */
793
1386
  parameters?: string[];
794
1387
  /**
@@ -798,7 +1391,7 @@ export interface JobSparkPythonTask {
798
1391
  }
799
1392
  export interface JobSparkSubmitTask {
800
1393
  /**
801
- * Parameters for the task
1394
+ * (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
802
1395
  */
803
1396
  parameters?: string[];
804
1397
  }
@@ -852,9 +1445,26 @@ export interface JobTask {
852
1445
  timeoutSeconds?: number;
853
1446
  }
854
1447
  export interface JobTaskDbtTask {
1448
+ /**
1449
+ * (Array) Series of dbt commands to execute in sequence. Every command must start with "dbt".
1450
+ */
855
1451
  commands: string[];
1452
+ /**
1453
+ * The relative path to the directory in the repository specified by `gitSource` where dbt should look in for the `profiles.yml` file. If not specified, defaults to the repository's root directory. Equivalent to passing `--profile-dir` to a dbt command.
1454
+ */
1455
+ profilesDirectory?: string;
1456
+ /**
1457
+ * The relative path to the directory in the repository specified in `gitSource` where dbt should look in for the `dbt_project.yml` file. If not specified, defaults to the repository's root directory. Equivalent to passing `--project-dir` to a dbt command.
1458
+ */
856
1459
  projectDirectory?: string;
1460
+ /**
1461
+ * The name of the schema dbt should run in. Defaults to `default`.
1462
+ */
857
1463
  schema?: string;
1464
+ /**
1465
+ * ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only serverless warehouses are supported right now.
1466
+ */
1467
+ warehouseId?: string;
858
1468
  }
859
1469
  export interface JobTaskDependsOn {
860
1470
  taskKey?: string;
@@ -1053,7 +1663,7 @@ export interface JobTaskPythonWheelTask {
1053
1663
  */
1054
1664
  packageName?: string;
1055
1665
  /**
1056
- * Parameters for the task
1666
+ * (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
1057
1667
  */
1058
1668
  parameters?: string[];
1059
1669
  }
@@ -1064,13 +1674,13 @@ export interface JobTaskSparkJarTask {
1064
1674
  */
1065
1675
  mainClassName?: string;
1066
1676
  /**
1067
- * Parameters for the task
1677
+ * (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
1068
1678
  */
1069
1679
  parameters?: string[];
1070
1680
  }
1071
1681
  export interface JobTaskSparkPythonTask {
1072
1682
  /**
1073
- * Parameters for the task
1683
+ * (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
1074
1684
  */
1075
1685
  parameters?: string[];
1076
1686
  /**
@@ -1080,20 +1690,32 @@ export interface JobTaskSparkPythonTask {
1080
1690
  }
1081
1691
  export interface JobTaskSparkSubmitTask {
1082
1692
  /**
1083
- * Parameters for the task
1693
+ * (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
1084
1694
  */
1085
1695
  parameters?: string[];
1086
1696
  }
1087
1697
  export interface JobTaskSqlTask {
1698
+ /**
1699
+ * block consisting of single string field: `alertId` - identifier of the Databricks SQL Alert.
1700
+ */
1088
1701
  alert?: outputs.JobTaskSqlTaskAlert;
1702
+ /**
1703
+ * block consisting of single string field: `dashboardId` - identifier of the Databricks SQL Dashboard databricks_sql_dashboard.
1704
+ */
1089
1705
  dashboard?: outputs.JobTaskSqlTaskDashboard;
1090
1706
  /**
1091
- * Parameters for the task
1707
+ * (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters.
1092
1708
  */
1093
1709
  parameters?: {
1094
1710
  [key: string]: any;
1095
1711
  };
1712
+ /**
1713
+ * block consisting of single string field: `queryId` - identifier of the Databricks SQL Query (databricks_sql_query).
1714
+ */
1096
1715
  query?: outputs.JobTaskSqlTaskQuery;
1716
+ /**
1717
+ * ID of the (the databricks_sql_endpoint) that will be used to execute the task. Only serverless warehouses are supported right now.
1718
+ */
1097
1719
  warehouseId?: string;
1098
1720
  }
1099
1721
  export interface JobTaskSqlTaskAlert {
@@ -1157,7 +1779,10 @@ export interface MlflowWebhookHttpUrlSpec {
1157
1779
  * Enable/disable SSL certificate validation. Default is `true`. For self-signed certificates, this field must be `false` AND the destination server must disable certificate validation as well. For security purposes, it is encouraged to perform secret validation with the HMAC-encoded portion of the payload and acknowledge the risk associated with disabling hostname validation whereby it becomes more likely that requests can be maliciously routed to an unintended host.
1158
1780
  */
1159
1781
  enableSslVerification?: boolean;
1160
- string?: string;
1782
+ /**
1783
+ * Shared secret required for HMAC encoding payload. The HMAC-encoded payload will be sent in the header as `X-Databricks-Signature: encodedPayload`.
1784
+ */
1785
+ secret?: string;
1161
1786
  /**
1162
1787
  * External HTTPS URL called on event trigger (by using a POST request). Structure of payload depends on the event type, refer to [documentation](https://docs.databricks.com/applications/mlflow/model-registry-webhooks.html) for more details.
1163
1788
  */
@@ -1311,6 +1936,7 @@ export interface PipelineCluster {
1311
1936
  export interface PipelineClusterAutoscale {
1312
1937
  maxWorkers?: number;
1313
1938
  minWorkers?: number;
1939
+ mode?: string;
1314
1940
  }
1315
1941
  export interface PipelineClusterAwsAttributes {
1316
1942
  firstOnDemand?: number;
@@ -1378,6 +2004,21 @@ export interface PipelineLibraryMaven {
1378
2004
  export interface PipelineLibraryNotebook {
1379
2005
  path: string;
1380
2006
  }
2007
+ export interface RecipientIpAccessList {
2008
+ /**
2009
+ * Allowed IP Addresses in CIDR notation. Limit of 100.
2010
+ */
2011
+ allowedIpAddresses: string[];
2012
+ }
2013
+ export interface RecipientToken {
2014
+ activationUrl: string;
2015
+ createdAt: number;
2016
+ createdBy: string;
2017
+ expirationTime: number;
2018
+ id: string;
2019
+ updatedAt: number;
2020
+ updatedBy: string;
2021
+ }
1381
2022
  export interface SecretScopeKeyvaultMetadata {
1382
2023
  dnsName: string;
1383
2024
  resourceId: string;