@pulumi/dbtcloud 0.2.0-alpha.1724909482 → 0.2.0-alpha.1725644297

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/types/output.d.ts CHANGED
@@ -45,6 +45,426 @@ export interface GetEnvironmentsEnvironment {
45
45
  */
46
46
  useCustomBranch: boolean;
47
47
  }
48
+ export interface GetGlobalConnectionApacheSpark {
49
+ /**
50
+ * Auth
51
+ */
52
+ auth: string;
53
+ /**
54
+ * Spark cluster for the connection
55
+ */
56
+ cluster: string;
57
+ /**
58
+ * Connection retries. Default=0
59
+ */
60
+ connectRetries: number;
61
+ /**
62
+ * Connection time out in seconds. Default=10
63
+ */
64
+ connectTimeout: number;
65
+ /**
66
+ * Hostname of the connection
67
+ */
68
+ host: string;
69
+ /**
70
+ * Authentication method for the connection (http or thrift).
71
+ */
72
+ method: string;
73
+ /**
74
+ * Organization ID
75
+ */
76
+ organization: string;
77
+ /**
78
+ * Port for the connection. Default=443
79
+ */
80
+ port: number;
81
+ /**
82
+ * User
83
+ */
84
+ user: string;
85
+ }
86
+ export interface GetGlobalConnectionAthena {
87
+ /**
88
+ * Specify the database (data catalog) to build models into (lowercase only).
89
+ */
90
+ database: string;
91
+ /**
92
+ * Number of times to retry boto3 requests (e.g. deleting S3 files for materialized tables).
93
+ */
94
+ numBoto3Retries: number;
95
+ /**
96
+ * Number of times to retry iceberg commit queries to fix ICEBERG*COMMIT*ERROR.
97
+ */
98
+ numIcebergRetries: number;
99
+ /**
100
+ * Number of times to retry a failing query.
101
+ */
102
+ numRetries: number;
103
+ /**
104
+ * Interval in seconds to use for polling the status of query results in Athena.
105
+ */
106
+ pollInterval: number;
107
+ /**
108
+ * AWS region of your Athena instance.
109
+ */
110
+ regionName: string;
111
+ /**
112
+ * Prefix for storing tables, if different from the connection's S3 staging directory.
113
+ */
114
+ s3DataDir: string;
115
+ /**
116
+ * How to generate table paths in the S3 data directory.
117
+ */
118
+ s3DataNaming: string;
119
+ /**
120
+ * S3 location to store Athena query results and metadata.
121
+ */
122
+ s3StagingDir: string;
123
+ /**
124
+ * Prefix for storing temporary tables, if different from the connection's S3 data directory.
125
+ */
126
+ s3TmpTableDir: string;
127
+ /**
128
+ * Identifier of Athena Spark workgroup for running Python models.
129
+ */
130
+ sparkWorkGroup: string;
131
+ /**
132
+ * Identifier of Athena workgroup.
133
+ */
134
+ workGroup: string;
135
+ }
136
+ export interface GetGlobalConnectionBigquery {
137
+ /**
138
+ * OAuth Client ID
139
+ */
140
+ applicationId: string;
141
+ /**
142
+ * OAuth Client Secret
143
+ */
144
+ applicationSecret: string;
145
+ /**
146
+ * Auth Provider X509 Cert URL for the Service Account
147
+ */
148
+ authProviderX509CertUrl: string;
149
+ /**
150
+ * Auth URI for the Service Account
151
+ */
152
+ authUri: string;
153
+ /**
154
+ * Service Account email
155
+ */
156
+ clientEmail: string;
157
+ /**
158
+ * Client ID of the Service Account
159
+ */
160
+ clientId: string;
161
+ /**
162
+ * Client X509 Cert URL for the Service Account
163
+ */
164
+ clientX509CertUrl: string;
165
+ /**
166
+ * Dataproc cluster name for PySpark workloads
167
+ */
168
+ dataprocClusterName: string;
169
+ /**
170
+ * Google Cloud region for PySpark workloads on Dataproc
171
+ */
172
+ dataprocRegion: string;
173
+ /**
174
+ * Project to bill for query execution
175
+ */
176
+ executionProject: string;
177
+ /**
178
+ * The GCP project ID to use for the connection
179
+ */
180
+ gcpProjectId: string;
181
+ /**
182
+ * URI for a Google Cloud Storage bucket to host Python code executed via Datapro
183
+ */
184
+ gcsBucket: string;
185
+ /**
186
+ * Service Account to impersonate when running queries
187
+ */
188
+ impersonateServiceAccount: string;
189
+ /**
190
+ * Maximum timeout for the job creation step
191
+ */
192
+ jobCreationTimeoutSeconds: number;
193
+ /**
194
+ * Total number of seconds to wait while retrying the same query
195
+ */
196
+ jobRetryDeadlineSeconds: number;
197
+ /**
198
+ * Location to create new Datasets in
199
+ */
200
+ location: string;
201
+ /**
202
+ * Max number of bytes that can be billed for a given BigQuery query
203
+ */
204
+ maximumBytesBilled: number;
205
+ /**
206
+ * The priority with which to execute BigQuery queries (batch or interactive)
207
+ */
208
+ priority: string;
209
+ /**
210
+ * Private Key for the Service Account
211
+ */
212
+ privateKey: string;
213
+ /**
214
+ * Private Key ID for the Service Account
215
+ */
216
+ privateKeyId: string;
217
+ /**
218
+ * Number of retries for queries
219
+ */
220
+ retries: number;
221
+ /**
222
+ * OAuth scopes for the BigQuery connection
223
+ */
224
+ scopes: string[];
225
+ /**
226
+ * Timeout in seconds for queries
227
+ */
228
+ timeoutSeconds: number;
229
+ /**
230
+ * Token URI for the Service Account
231
+ */
232
+ tokenUri: string;
233
+ }
234
+ export interface GetGlobalConnectionDatabricks {
235
+ /**
236
+ * Catalog name if Unity Catalog is enabled in your Databricks workspace.
237
+ */
238
+ catalog: string;
239
+ /**
240
+ * Required to enable Databricks OAuth authentication for IDE developers.
241
+ */
242
+ clientId: string;
243
+ /**
244
+ * Required to enable Databricks OAuth authentication for IDE developers.
245
+ */
246
+ clientSecret: string;
247
+ /**
248
+ * The hostname of the Databricks cluster or SQL warehouse.
249
+ */
250
+ host: string;
251
+ /**
252
+ * The HTTP path of the Databricks cluster or SQL warehouse.
253
+ */
254
+ httpPath: string;
255
+ }
256
+ export interface GetGlobalConnectionFabric {
257
+ /**
258
+ * The database to connect to for this connection.
259
+ */
260
+ database: string;
261
+ /**
262
+ * The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
263
+ */
264
+ loginTimeout: number;
265
+ /**
266
+ * The port to connect to for this connection. Default=1433
267
+ */
268
+ port: number;
269
+ /**
270
+ * The number of seconds used to wait for a query before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
271
+ */
272
+ queryTimeout: number;
273
+ /**
274
+ * The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.
275
+ */
276
+ retries: number;
277
+ /**
278
+ * The server hostname.
279
+ */
280
+ server: string;
281
+ }
282
+ export interface GetGlobalConnectionPostgres {
283
+ /**
284
+ * The database name for this connection.
285
+ */
286
+ dbname: string;
287
+ /**
288
+ * The hostname of the database.
289
+ */
290
+ hostname: string;
291
+ /**
292
+ * The port to connect to for this connection. Default=5432
293
+ */
294
+ port: number;
295
+ /**
296
+ * PostgreSQL SSH Tunnel configuration
297
+ */
298
+ sshTunnel: outputs.GetGlobalConnectionPostgresSshTunnel;
299
+ }
300
+ export interface GetGlobalConnectionPostgresSshTunnel {
301
+ /**
302
+ * The hostname for the SSH tunnel.
303
+ */
304
+ hostname: string;
305
+ /**
306
+ * The ID of the SSH tunnel connection.
307
+ */
308
+ id: number;
309
+ /**
310
+ * The HTTP port for the SSH tunnel.
311
+ */
312
+ port: number;
313
+ /**
314
+ * The SSH public key generated to allow connecting via SSH tunnel.
315
+ */
316
+ publicKey: string;
317
+ /**
318
+ * The username to use for the SSH tunnel.
319
+ */
320
+ username: string;
321
+ }
322
+ export interface GetGlobalConnectionRedshift {
323
+ /**
324
+ * The database name for this connection.
325
+ */
326
+ dbname: string;
327
+ /**
328
+ * The hostname of the data warehouse.
329
+ */
330
+ hostname: string;
331
+ /**
332
+ * The port to connect to for this connection. Default=5432
333
+ */
334
+ port: number;
335
+ /**
336
+ * Redshift SSH Tunnel configuration
337
+ */
338
+ sshTunnel: outputs.GetGlobalConnectionRedshiftSshTunnel;
339
+ }
340
+ export interface GetGlobalConnectionRedshiftSshTunnel {
341
+ /**
342
+ * The hostname for the SSH tunnel.
343
+ */
344
+ hostname: string;
345
+ /**
346
+ * The ID of the SSH tunnel connection.
347
+ */
348
+ id: number;
349
+ /**
350
+ * The HTTP port for the SSH tunnel.
351
+ */
352
+ port: number;
353
+ /**
354
+ * The SSH public key generated to allow connecting via SSH tunnel.
355
+ */
356
+ publicKey: string;
357
+ /**
358
+ * The username to use for the SSH tunnel.
359
+ */
360
+ username: string;
361
+ }
362
+ export interface GetGlobalConnectionSnowflake {
363
+ /**
364
+ * The Snowflake account name
365
+ */
366
+ account: string;
367
+ /**
368
+ * Whether to allow Snowflake OAuth for the connection. If true, the `oauthClientId` and `oauthClientSecret` fields must be set
369
+ */
370
+ allowSso: boolean;
371
+ /**
372
+ * If true, the snowflake client will keep connections for longer than the default 4 hours. This is helpful when particularly long-running queries are executing (> 4 hours)
373
+ */
374
+ clientSessionKeepAlive: boolean;
375
+ /**
376
+ * The default database for the connection
377
+ */
378
+ database: string;
379
+ /**
380
+ * OAuth Client ID. Required to allow OAuth between dbt Cloud and Snowflake
381
+ */
382
+ oauthClientId: string;
383
+ /**
384
+ * OAuth Client Secret. Required to allow OAuth between dbt Cloud and Snowflake
385
+ */
386
+ oauthClientSecret: string;
387
+ /**
388
+ * The Snowflake role to use when running queries on the connection
389
+ */
390
+ role: string;
391
+ /**
392
+ * The default Snowflake Warehouse to use for the connection
393
+ */
394
+ warehouse: string;
395
+ }
396
+ export interface GetGlobalConnectionStarburst {
397
+ /**
398
+ * The hostname of the account to connect to.
399
+ */
400
+ host: string;
401
+ /**
402
+ * The authentication method. Only LDAP for now.
403
+ */
404
+ method: string;
405
+ /**
406
+ * The port to connect to for this connection. Default=443
407
+ */
408
+ port: number;
409
+ }
410
+ export interface GetGlobalConnectionSynapse {
411
+ /**
412
+ * The database to connect to for this connection.
413
+ */
414
+ database: string;
415
+ /**
416
+ * The server hostname.
417
+ */
418
+ host: string;
419
+ /**
420
+ * The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
421
+ */
422
+ loginTimeout: number;
423
+ /**
424
+ * The port to connect to for this connection. Default=1433
425
+ */
426
+ port: number;
427
+ /**
428
+ * The number of seconds used to wait for a query before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
429
+ */
430
+ queryTimeout: number;
431
+ /**
432
+ * The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.
433
+ */
434
+ retries: number;
435
+ }
436
+ export interface GetGlobalConnectionsConnection {
437
+ /**
438
+ * Type of adapter used for the connection
439
+ */
440
+ adapterVersion: string;
441
+ /**
442
+ * When the connection was created
443
+ */
444
+ createdAt: string;
445
+ /**
446
+ * Number of environments using this connection
447
+ */
448
+ environmentCount: number;
449
+ /**
450
+ * Connection Identifier
451
+ */
452
+ id: number;
453
+ isSshTunnelEnabled: boolean;
454
+ /**
455
+ * Connection name
456
+ */
457
+ name: string;
458
+ oauthConfigurationId: number;
459
+ /**
460
+ * Private Link Endpoint ID.
461
+ */
462
+ privateLinkEndpointId: number;
463
+ /**
464
+ * When the connection was updated
465
+ */
466
+ updatedAt: string;
467
+ }
48
468
  export interface GetGroupGroupPermission {
49
469
  /**
50
470
  * Whether access should be provided for all projects or not.
@@ -214,6 +634,72 @@ export interface GetJobsJobTriggers {
214
634
  */
215
635
  schedule: boolean;
216
636
  }
637
+ export interface GetProjectsProject {
638
+ /**
639
+ * Details for the connection linked to the project
640
+ */
641
+ connection: outputs.GetProjectsProjectConnection;
642
+ /**
643
+ * When the project was created
644
+ */
645
+ createdAt: string;
646
+ /**
647
+ * Subdirectory for the dbt project inside the git repo
648
+ */
649
+ dbtProjectSubdirectory: string;
650
+ /**
651
+ * Project description
652
+ */
653
+ description: string;
654
+ /**
655
+ * Project ID
656
+ */
657
+ id: number;
658
+ /**
659
+ * Project name
660
+ */
661
+ name: string;
662
+ /**
663
+ * Details for the repository linked to the project
664
+ */
665
+ repository: outputs.GetProjectsProjectRepository;
666
+ /**
667
+ * Semantic layer config ID
668
+ */
669
+ semanticLayerConfigId: number;
670
+ /**
671
+ * When the project was last updated
672
+ */
673
+ updatedAt: string;
674
+ }
675
+ export interface GetProjectsProjectConnection {
676
+ /**
677
+ * Version of the adapter for the connection. Will tell what connection type it is
678
+ */
679
+ adapterVersion: string;
680
+ /**
681
+ * Connection ID
682
+ */
683
+ id: number;
684
+ /**
685
+ * Connection name
686
+ */
687
+ name: string;
688
+ }
689
+ export interface GetProjectsProjectRepository {
690
+ /**
691
+ * Repository ID
692
+ */
693
+ id: number;
694
+ /**
695
+ * URL template for PRs
696
+ */
697
+ pullRequestUrlTemplate: string;
698
+ /**
699
+ * URL of the git repo remote
700
+ */
701
+ remoteUrl: string;
702
+ }
217
703
  export interface GetServiceTokenServiceTokenPermission {
218
704
  /**
219
705
  * Whether or not to apply this permission to all projects for this service token
@@ -246,6 +732,94 @@ export interface GetUsersUser {
246
732
  */
247
733
  id: number;
248
734
  }
735
+ export interface GlobalConnectionApacheSpark {
736
+ /**
737
+ * Auth
738
+ */
739
+ auth?: string;
740
+ /**
741
+ * Spark cluster for the connection
742
+ */
743
+ cluster: string;
744
+ /**
745
+ * Connection retries. Default=0
746
+ */
747
+ connectRetries: number;
748
+ /**
749
+ * Connection time out in seconds. Default=10
750
+ */
751
+ connectTimeout: number;
752
+ /**
753
+ * Hostname of the connection
754
+ */
755
+ host: string;
756
+ /**
757
+ * Authentication method for the connection (http or thrift).
758
+ */
759
+ method: string;
760
+ /**
761
+ * Organization ID
762
+ */
763
+ organization?: string;
764
+ /**
765
+ * Port for the connection. Default=443
766
+ */
767
+ port: number;
768
+ /**
769
+ * User
770
+ */
771
+ user?: string;
772
+ }
773
+ export interface GlobalConnectionAthena {
774
+ /**
775
+ * Specify the database (data catalog) to build models into (lowercase only).
776
+ */
777
+ database: string;
778
+ /**
779
+ * Number of times to retry boto3 requests (e.g. deleting S3 files for materialized tables).
780
+ */
781
+ numBoto3Retries?: number;
782
+ /**
783
+ * Number of times to retry iceberg commit queries to fix ICEBERG*COMMIT*ERROR.
784
+ */
785
+ numIcebergRetries?: number;
786
+ /**
787
+ * Number of times to retry a failing query.
788
+ */
789
+ numRetries?: number;
790
+ /**
791
+ * Interval in seconds to use for polling the status of query results in Athena.
792
+ */
793
+ pollInterval?: number;
794
+ /**
795
+ * AWS region of your Athena instance.
796
+ */
797
+ regionName: string;
798
+ /**
799
+ * Prefix for storing tables, if different from the connection's S3 staging directory.
800
+ */
801
+ s3DataDir?: string;
802
+ /**
803
+ * How to generate table paths in the S3 data directory.
804
+ */
805
+ s3DataNaming?: string;
806
+ /**
807
+ * S3 location to store Athena query results and metadata.
808
+ */
809
+ s3StagingDir: string;
810
+ /**
811
+ * Prefix for storing temporary tables, if different from the connection's S3 data directory.
812
+ */
813
+ s3TmpTableDir?: string;
814
+ /**
815
+ * Identifier of Athena Spark workgroup for running Python models.
816
+ */
817
+ sparkWorkGroup?: string;
818
+ /**
819
+ * Identifier of Athena workgroup.
820
+ */
821
+ workGroup?: string;
822
+ }
249
823
  export interface GlobalConnectionBigquery {
250
824
  /**
251
825
  * OAuth Client ID
@@ -366,6 +940,112 @@ export interface GlobalConnectionDatabricks {
366
940
  */
367
941
  httpPath: string;
368
942
  }
943
+ export interface GlobalConnectionFabric {
944
+ /**
945
+ * The database to connect to for this connection.
946
+ */
947
+ database: string;
948
+ /**
949
+ * The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
950
+ */
951
+ loginTimeout: number;
952
+ /**
953
+ * The port to connect to for this connection. Default=1433
954
+ */
955
+ port: number;
956
+ /**
957
+ * The number of seconds used to wait for a query before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
958
+ */
959
+ queryTimeout: number;
960
+ /**
961
+ * The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.
962
+ */
963
+ retries: number;
964
+ /**
965
+ * The server hostname.
966
+ */
967
+ server: string;
968
+ }
969
+ export interface GlobalConnectionPostgres {
970
+ /**
971
+ * The database name for this connection.
972
+ */
973
+ dbname?: string;
974
+ /**
975
+ * The hostname of the database.
976
+ */
977
+ hostname: string;
978
+ /**
979
+ * The port to connect to for this connection. Default=5432
980
+ */
981
+ port: number;
982
+ /**
983
+ * PostgreSQL SSH Tunnel configuration
984
+ */
985
+ sshTunnel?: outputs.GlobalConnectionPostgresSshTunnel;
986
+ }
987
+ export interface GlobalConnectionPostgresSshTunnel {
988
+ /**
989
+ * The hostname for the SSH tunnel.
990
+ */
991
+ hostname: string;
992
+ /**
993
+ * The ID of the SSH tunnel connection.
994
+ */
995
+ id: number;
996
+ /**
997
+ * The HTTP port for the SSH tunnel.
998
+ */
999
+ port: number;
1000
+ /**
1001
+ * The SSH public key generated to allow connecting via SSH tunnel.
1002
+ */
1003
+ publicKey: string;
1004
+ /**
1005
+ * The username to use for the SSH tunnel.
1006
+ */
1007
+ username: string;
1008
+ }
1009
+ export interface GlobalConnectionRedshift {
1010
+ /**
1011
+ * The database name for this connection.
1012
+ */
1013
+ dbname?: string;
1014
+ /**
1015
+ * The hostname of the data warehouse.
1016
+ */
1017
+ hostname: string;
1018
+ /**
1019
+ * The port to connect to for this connection. Default=5432
1020
+ */
1021
+ port: number;
1022
+ /**
1023
+ * Redshift SSH Tunnel configuration
1024
+ */
1025
+ sshTunnel?: outputs.GlobalConnectionRedshiftSshTunnel;
1026
+ }
1027
+ export interface GlobalConnectionRedshiftSshTunnel {
1028
+ /**
1029
+ * The hostname for the SSH tunnel.
1030
+ */
1031
+ hostname: string;
1032
+ /**
1033
+ * The ID of the SSH tunnel connection.
1034
+ */
1035
+ id: number;
1036
+ /**
1037
+ * The HTTP port for the SSH tunnel.
1038
+ */
1039
+ port: number;
1040
+ /**
1041
+ * The SSH public key generated to allow connecting via SSH tunnel.
1042
+ */
1043
+ publicKey: string;
1044
+ /**
1045
+ * The username to use for the SSH tunnel.
1046
+ */
1047
+ username: string;
1048
+ }
369
1049
  export interface GlobalConnectionSnowflake {
370
1050
  /**
371
1051
  * The Snowflake account name
@@ -400,6 +1080,46 @@ export interface GlobalConnectionSnowflake {
400
1080
  */
401
1081
  warehouse: string;
402
1082
  }
1083
+ export interface GlobalConnectionStarburst {
1084
+ /**
1085
+ * The hostname of the account to connect to.
1086
+ */
1087
+ host: string;
1088
+ /**
1089
+ * The authentication method. Only LDAP for now.
1090
+ */
1091
+ method: string;
1092
+ /**
1093
+ * The port to connect to for this connection. Default=443
1094
+ */
1095
+ port: number;
1096
+ }
1097
+ export interface GlobalConnectionSynapse {
1098
+ /**
1099
+ * The database to connect to for this connection.
1100
+ */
1101
+ database: string;
1102
+ /**
1103
+ * The server hostname.
1104
+ */
1105
+ host: string;
1106
+ /**
1107
+ * The number of seconds used to establish a connection before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
1108
+ */
1109
+ loginTimeout: number;
1110
+ /**
1111
+ * The port to connect to for this connection. Default=1433
1112
+ */
1113
+ port: number;
1114
+ /**
1115
+ * The number of seconds used to wait for a query before failing. Defaults to 0, which means that the timeout is disabled or uses the default system settings.
1116
+ */
1117
+ queryTimeout: number;
1118
+ /**
1119
+ * The number of automatic times to retry a query before failing. Defaults to 1. Queries with syntax errors will not be retried. This setting can be used to overcome intermittent network issues.
1120
+ */
1121
+ retries: number;
1122
+ }
403
1123
  export interface GroupGroupPermission {
404
1124
  /**
405
1125
  * Whether access should be provided for all projects or not.