@pulumi/confluentcloud 2.67.0-alpha.1777530259 → 2.67.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/flinkMaterializedTable.d.ts +268 -0
- package/flinkMaterializedTable.js +199 -0
- package/flinkMaterializedTable.js.map +1 -0
- package/getFlinkMaterializedTable.d.ts +175 -0
- package/getFlinkMaterializedTable.js +124 -0
- package/getFlinkMaterializedTable.js.map +1 -0
- package/index.d.ts +6 -0
- package/index.js +12 -4
- package/index.js.map +1 -1
- package/package.json +2 -2
- package/schema.d.ts +52 -0
- package/schema.js +52 -0
- package/schema.js.map +1 -1
- package/types/input.d.ts +410 -4
- package/types/output.d.ts +424 -2
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
import * as pulumi from "@pulumi/pulumi";
|
|
2
|
+
import * as inputs from "./types/input";
|
|
3
|
+
import * as outputs from "./types/output";
|
|
4
|
+
/**
|
|
5
|
+
* [](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
|
|
6
|
+
*
|
|
7
|
+
* `confluentcloud.FlinkMaterializedTable` provides a Flink Materialized Table resource that enables creating, editing, and deleting Flink Materialized Tables on Confluent Cloud.
|
|
8
|
+
*
|
|
9
|
+
* > **Note:** It is recommended to set `lifecycle { preventDestroy = true }` on production instances to prevent accidental Flink Materialized Table deletion. This setting rejects plans that would destroy or recreate the Flink Materialized Table, such as attempting to change uneditable attributes. Read more about it in the Terraform docs.
|
|
10
|
+
*
|
|
11
|
+
* ## Example Usage
|
|
12
|
+
*
|
|
13
|
+
* ### Option #1: Manage multiple Flink Materialized Tables in the same Pulumi Stack
|
|
14
|
+
*
|
|
15
|
+
* ```typescript
|
|
16
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
17
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
18
|
+
*
|
|
19
|
+
* const example = new confluentcloud.FlinkMaterializedTable("example", {
|
|
20
|
+
* organization: {
|
|
21
|
+
* id: main.id,
|
|
22
|
+
* },
|
|
23
|
+
* environment: {
|
|
24
|
+
* id: staging.id,
|
|
25
|
+
* },
|
|
26
|
+
* computePool: {
|
|
27
|
+
* id: exampleConfluentFlinkComputePool.id,
|
|
28
|
+
* },
|
|
29
|
+
* principal: {
|
|
30
|
+
* id: app_manager_flink.id,
|
|
31
|
+
* },
|
|
32
|
+
* restEndpoint: mainConfluentFlinkRegion.restEndpoint,
|
|
33
|
+
* credentials: {
|
|
34
|
+
* key: flink.id,
|
|
35
|
+
* secret: flink.secret,
|
|
36
|
+
* },
|
|
37
|
+
* displayName: "my_materialized_table",
|
|
38
|
+
* kafkaCluster: {
|
|
39
|
+
* id: basic_cluster.id,
|
|
40
|
+
* },
|
|
41
|
+
* query: "SELECT user_id, product_id, price, quantity FROM orders WHERE price > 1000;",
|
|
42
|
+
* });
|
|
43
|
+
* ```
|
|
44
|
+
*
|
|
45
|
+
* ### Option #2: Manage a single Flink Materialized Table in the same Pulumi Stack
|
|
46
|
+
*
|
|
47
|
+
* ```typescript
|
|
48
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
49
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
50
|
+
*
|
|
51
|
+
* const example = new confluentcloud.FlinkMaterializedTable("example", {
|
|
52
|
+
* displayName: "my_materialized_table",
|
|
53
|
+
* kafkaCluster: {
|
|
54
|
+
* id: basic_cluster.id,
|
|
55
|
+
* },
|
|
56
|
+
* query: "SELECT user_id, product_id, price, quantity FROM orders WHERE price > 1000;",
|
|
57
|
+
* });
|
|
58
|
+
* ```
|
|
59
|
+
*
|
|
60
|
+
* ### Stopping and Resuming a Materialized Table
|
|
61
|
+
*
|
|
62
|
+
* A running Materialized Table can be paused and later resumed by toggling the `stopped` attribute. The default value is `false` (running). To stop the table, set `stopped = true` and run `pulumi up`; to resume it, set `stopped = false` (or remove the attribute) and apply again. The Materialized Table is preserved across stop/resume — only its execution is paused.
|
|
63
|
+
*
|
|
64
|
+
* ```typescript
|
|
65
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
66
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
67
|
+
*
|
|
68
|
+
* const example = new confluentcloud.FlinkMaterializedTable("example", {
|
|
69
|
+
* organization: {
|
|
70
|
+
* id: main.id,
|
|
71
|
+
* },
|
|
72
|
+
* environment: {
|
|
73
|
+
* id: staging.id,
|
|
74
|
+
* },
|
|
75
|
+
* computePool: {
|
|
76
|
+
* id: exampleConfluentFlinkComputePool.id,
|
|
77
|
+
* },
|
|
78
|
+
* principal: {
|
|
79
|
+
* id: app_manager_flink.id,
|
|
80
|
+
* },
|
|
81
|
+
* restEndpoint: mainConfluentFlinkRegion.restEndpoint,
|
|
82
|
+
* credentials: {
|
|
83
|
+
* key: flink.id,
|
|
84
|
+
* secret: flink.secret,
|
|
85
|
+
* },
|
|
86
|
+
* displayName: "my_materialized_table",
|
|
87
|
+
* kafkaCluster: {
|
|
88
|
+
* id: basic_cluster.id,
|
|
89
|
+
* },
|
|
90
|
+
* query: "SELECT user_id, product_id, price, quantity FROM orders WHERE price > 1000;",
|
|
91
|
+
* stopped: false,
|
|
92
|
+
* });
|
|
93
|
+
* ```
|
|
94
|
+
*
|
|
95
|
+
* ## Import
|
|
96
|
+
*
|
|
97
|
+
* You can import a Flink Materialized Table by using the Materialized Table name, for example:
|
|
98
|
+
*
|
|
99
|
+
* Option #1: Manage multiple Flink Materialized Tables in the same Pulumi Stack
|
|
100
|
+
*
|
|
101
|
+
* ```sh
|
|
102
|
+
* $ export IMPORT_CONFLUENT_ORGANIZATION_ID="<organization_id>"
|
|
103
|
+
* $ export IMPORT_CONFLUENT_ENVIRONMENT_ID="<environment_id>"
|
|
104
|
+
* $ export IMPORT_FLINK_COMPUTE_POOL_ID="<flink_compute_pool_id>"
|
|
105
|
+
* $ export IMPORT_FLINK_API_KEY="<flink_api_key>"
|
|
106
|
+
* $ export IMPORT_FLINK_API_SECRET="<flink_api_secret>"
|
|
107
|
+
* $ export IMPORT_FLINK_REST_ENDPOINT="<flink_rest_endpoint>"
|
|
108
|
+
* $ export IMPORT_FLINK_PRINCIPAL_ID="<flink_principal_id>"
|
|
109
|
+
* $ pulumi import confluentcloud:index/flinkMaterializedTable:FlinkMaterializedTable example env-abc123/lkc-xyz123/my_materialized_table
|
|
110
|
+
* ```
|
|
111
|
+
*
|
|
112
|
+
* Option #2: Manage a single Flink Materialized Table in the same Pulumi Stack
|
|
113
|
+
*
|
|
114
|
+
* ```sh
|
|
115
|
+
* $ pulumi import confluentcloud:index/flinkMaterializedTable:FlinkMaterializedTable example env-abc123/lkc-xyz123/my_materialized_table
|
|
116
|
+
* ```
|
|
117
|
+
*
|
|
118
|
+
* !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes.
|
|
119
|
+
*/
|
|
120
|
+
export declare class FlinkMaterializedTable extends pulumi.CustomResource {
|
|
121
|
+
/**
|
|
122
|
+
* Get an existing FlinkMaterializedTable resource's state with the given name, ID, and optional extra
|
|
123
|
+
* properties used to qualify the lookup.
|
|
124
|
+
*
|
|
125
|
+
* @param name The _unique_ name of the resulting resource.
|
|
126
|
+
* @param id The _unique_ provider ID of the resource to lookup.
|
|
127
|
+
* @param state Any extra arguments used during the lookup.
|
|
128
|
+
* @param opts Optional settings to control the behavior of the CustomResource.
|
|
129
|
+
*/
|
|
130
|
+
static get(name: string, id: pulumi.Input<pulumi.ID>, state?: FlinkMaterializedTableState, opts?: pulumi.CustomResourceOptions): FlinkMaterializedTable;
|
|
131
|
+
/**
|
|
132
|
+
* Returns true if the given object is an instance of FlinkMaterializedTable. This is designed to work even
|
|
133
|
+
* when multiple copies of the Pulumi SDK have been loaded into the same process.
|
|
134
|
+
*/
|
|
135
|
+
static isInstance(obj: any): obj is FlinkMaterializedTable;
|
|
136
|
+
/**
|
|
137
|
+
* A list of column definitions. Each `columns` block supports the following sub-blocks:
|
|
138
|
+
*/
|
|
139
|
+
readonly columns: pulumi.Output<outputs.FlinkMaterializedTableColumn[] | undefined>;
|
|
140
|
+
readonly computePool: pulumi.Output<outputs.FlinkMaterializedTableComputePool>;
|
|
141
|
+
readonly constraints: pulumi.Output<outputs.FlinkMaterializedTableConstraint[] | undefined>;
|
|
142
|
+
/**
|
|
143
|
+
* The Cluster API Credentials.
|
|
144
|
+
*/
|
|
145
|
+
readonly credentials: pulumi.Output<outputs.FlinkMaterializedTableCredentials | undefined>;
|
|
146
|
+
/**
|
|
147
|
+
* The unique name of the Materialized Table.
|
|
148
|
+
*/
|
|
149
|
+
readonly displayName: pulumi.Output<string>;
|
|
150
|
+
/**
|
|
151
|
+
* The distribution definition for the Materialized Table. Supports the following:
|
|
152
|
+
*/
|
|
153
|
+
readonly distribution: pulumi.Output<outputs.FlinkMaterializedTableDistribution | undefined>;
|
|
154
|
+
readonly environment: pulumi.Output<outputs.FlinkMaterializedTableEnvironment>;
|
|
155
|
+
readonly kafkaCluster: pulumi.Output<outputs.FlinkMaterializedTableKafkaCluster>;
|
|
156
|
+
readonly organization: pulumi.Output<outputs.FlinkMaterializedTableOrganization>;
|
|
157
|
+
readonly principal: pulumi.Output<outputs.FlinkMaterializedTablePrincipal>;
|
|
158
|
+
/**
|
|
159
|
+
* The SQL query that defines the Materialized Table, for example, `SELECT user_id, product_id, price, quantity FROM orders WHERE price > 1000;`.
|
|
160
|
+
*/
|
|
161
|
+
readonly query: pulumi.Output<string | undefined>;
|
|
162
|
+
/**
|
|
163
|
+
* The REST endpoint of the Flink region, for example, `https://flink.us-east-1.aws.confluent.cloud`.
|
|
164
|
+
*/
|
|
165
|
+
readonly restEndpoint: pulumi.Output<string | undefined>;
|
|
166
|
+
/**
|
|
167
|
+
* Indicates whether the Materialized Table is stopped. Defaults to `false`. Update it to `true` to stop the Materialized Table. Subsequently update it to `false` to resume it.
|
|
168
|
+
*/
|
|
169
|
+
readonly stopped: pulumi.Output<boolean | undefined>;
|
|
170
|
+
/**
|
|
171
|
+
* The watermark definition for the Materialized Table. Supports the following:
|
|
172
|
+
*/
|
|
173
|
+
readonly watermark: pulumi.Output<outputs.FlinkMaterializedTableWatermark | undefined>;
|
|
174
|
+
/**
|
|
175
|
+
* Create a FlinkMaterializedTable resource with the given unique name, arguments, and options.
|
|
176
|
+
*
|
|
177
|
+
* @param name The _unique_ name of the resource.
|
|
178
|
+
* @param args The arguments to use to populate this resource's properties.
|
|
179
|
+
* @param opts A bag of options that control this resource's behavior.
|
|
180
|
+
*/
|
|
181
|
+
constructor(name: string, args: FlinkMaterializedTableArgs, opts?: pulumi.CustomResourceOptions);
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Input properties used for looking up and filtering FlinkMaterializedTable resources.
|
|
185
|
+
*/
|
|
186
|
+
export interface FlinkMaterializedTableState {
|
|
187
|
+
/**
|
|
188
|
+
* A list of column definitions. Each `columns` block supports the following sub-blocks:
|
|
189
|
+
*/
|
|
190
|
+
columns?: pulumi.Input<pulumi.Input<inputs.FlinkMaterializedTableColumn>[]>;
|
|
191
|
+
computePool?: pulumi.Input<inputs.FlinkMaterializedTableComputePool>;
|
|
192
|
+
constraints?: pulumi.Input<pulumi.Input<inputs.FlinkMaterializedTableConstraint>[]>;
|
|
193
|
+
/**
|
|
194
|
+
* The Cluster API Credentials.
|
|
195
|
+
*/
|
|
196
|
+
credentials?: pulumi.Input<inputs.FlinkMaterializedTableCredentials>;
|
|
197
|
+
/**
|
|
198
|
+
* The unique name of the Materialized Table.
|
|
199
|
+
*/
|
|
200
|
+
displayName?: pulumi.Input<string>;
|
|
201
|
+
/**
|
|
202
|
+
* The distribution definition for the Materialized Table. Supports the following:
|
|
203
|
+
*/
|
|
204
|
+
distribution?: pulumi.Input<inputs.FlinkMaterializedTableDistribution>;
|
|
205
|
+
environment?: pulumi.Input<inputs.FlinkMaterializedTableEnvironment>;
|
|
206
|
+
kafkaCluster?: pulumi.Input<inputs.FlinkMaterializedTableKafkaCluster>;
|
|
207
|
+
organization?: pulumi.Input<inputs.FlinkMaterializedTableOrganization>;
|
|
208
|
+
principal?: pulumi.Input<inputs.FlinkMaterializedTablePrincipal>;
|
|
209
|
+
/**
|
|
210
|
+
* The SQL query that defines the Materialized Table, for example, `SELECT user_id, product_id, price, quantity FROM orders WHERE price > 1000;`.
|
|
211
|
+
*/
|
|
212
|
+
query?: pulumi.Input<string>;
|
|
213
|
+
/**
|
|
214
|
+
* The REST endpoint of the Flink region, for example, `https://flink.us-east-1.aws.confluent.cloud`.
|
|
215
|
+
*/
|
|
216
|
+
restEndpoint?: pulumi.Input<string>;
|
|
217
|
+
/**
|
|
218
|
+
* Indicates whether the Materialized Table is stopped. Defaults to `false`. Update it to `true` to stop the Materialized Table. Subsequently update it to `false` to resume it.
|
|
219
|
+
*/
|
|
220
|
+
stopped?: pulumi.Input<boolean>;
|
|
221
|
+
/**
|
|
222
|
+
* The watermark definition for the Materialized Table. Supports the following:
|
|
223
|
+
*/
|
|
224
|
+
watermark?: pulumi.Input<inputs.FlinkMaterializedTableWatermark>;
|
|
225
|
+
}
|
|
226
|
+
/**
|
|
227
|
+
* The set of arguments for constructing a FlinkMaterializedTable resource.
|
|
228
|
+
*/
|
|
229
|
+
export interface FlinkMaterializedTableArgs {
|
|
230
|
+
/**
|
|
231
|
+
* A list of column definitions. Each `columns` block supports the following sub-blocks:
|
|
232
|
+
*/
|
|
233
|
+
columns?: pulumi.Input<pulumi.Input<inputs.FlinkMaterializedTableColumn>[]>;
|
|
234
|
+
computePool?: pulumi.Input<inputs.FlinkMaterializedTableComputePool>;
|
|
235
|
+
constraints?: pulumi.Input<pulumi.Input<inputs.FlinkMaterializedTableConstraint>[]>;
|
|
236
|
+
/**
|
|
237
|
+
* The Cluster API Credentials.
|
|
238
|
+
*/
|
|
239
|
+
credentials?: pulumi.Input<inputs.FlinkMaterializedTableCredentials>;
|
|
240
|
+
/**
|
|
241
|
+
* The unique name of the Materialized Table.
|
|
242
|
+
*/
|
|
243
|
+
displayName: pulumi.Input<string>;
|
|
244
|
+
/**
|
|
245
|
+
* The distribution definition for the Materialized Table. Supports the following:
|
|
246
|
+
*/
|
|
247
|
+
distribution?: pulumi.Input<inputs.FlinkMaterializedTableDistribution>;
|
|
248
|
+
environment?: pulumi.Input<inputs.FlinkMaterializedTableEnvironment>;
|
|
249
|
+
kafkaCluster: pulumi.Input<inputs.FlinkMaterializedTableKafkaCluster>;
|
|
250
|
+
organization?: pulumi.Input<inputs.FlinkMaterializedTableOrganization>;
|
|
251
|
+
principal?: pulumi.Input<inputs.FlinkMaterializedTablePrincipal>;
|
|
252
|
+
/**
|
|
253
|
+
* The SQL query that defines the Materialized Table, for example, `SELECT user_id, product_id, price, quantity FROM orders WHERE price > 1000;`.
|
|
254
|
+
*/
|
|
255
|
+
query?: pulumi.Input<string>;
|
|
256
|
+
/**
|
|
257
|
+
* The REST endpoint of the Flink region, for example, `https://flink.us-east-1.aws.confluent.cloud`.
|
|
258
|
+
*/
|
|
259
|
+
restEndpoint?: pulumi.Input<string>;
|
|
260
|
+
/**
|
|
261
|
+
* Indicates whether the Materialized Table is stopped. Defaults to `false`. Update it to `true` to stop the Materialized Table. Subsequently update it to `false` to resume it.
|
|
262
|
+
*/
|
|
263
|
+
stopped?: pulumi.Input<boolean>;
|
|
264
|
+
/**
|
|
265
|
+
* The watermark definition for the Materialized Table. Supports the following:
|
|
266
|
+
*/
|
|
267
|
+
watermark?: pulumi.Input<inputs.FlinkMaterializedTableWatermark>;
|
|
268
|
+
}
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
// *** WARNING: this file was generated by pulumi-language-nodejs. ***
|
|
3
|
+
// *** Do not edit by hand unless you're certain you know what you are doing! ***
|
|
4
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
5
|
+
exports.FlinkMaterializedTable = void 0;
|
|
6
|
+
const pulumi = require("@pulumi/pulumi");
|
|
7
|
+
const utilities = require("./utilities");
|
|
8
|
+
/**
|
|
9
|
+
* [](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
|
|
10
|
+
*
|
|
11
|
+
* `confluentcloud.FlinkMaterializedTable` provides a Flink Materialized Table resource that enables creating, editing, and deleting Flink Materialized Tables on Confluent Cloud.
|
|
12
|
+
*
|
|
13
|
+
* > **Note:** It is recommended to set `lifecycle { preventDestroy = true }` on production instances to prevent accidental Flink Materialized Table deletion. This setting rejects plans that would destroy or recreate the Flink Materialized Table, such as attempting to change uneditable attributes. Read more about it in the Terraform docs.
|
|
14
|
+
*
|
|
15
|
+
* ## Example Usage
|
|
16
|
+
*
|
|
17
|
+
* ### Option #1: Manage multiple Flink Materialized Tables in the same Pulumi Stack
|
|
18
|
+
*
|
|
19
|
+
* ```typescript
|
|
20
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
21
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
22
|
+
*
|
|
23
|
+
* const example = new confluentcloud.FlinkMaterializedTable("example", {
|
|
24
|
+
* organization: {
|
|
25
|
+
* id: main.id,
|
|
26
|
+
* },
|
|
27
|
+
* environment: {
|
|
28
|
+
* id: staging.id,
|
|
29
|
+
* },
|
|
30
|
+
* computePool: {
|
|
31
|
+
* id: exampleConfluentFlinkComputePool.id,
|
|
32
|
+
* },
|
|
33
|
+
* principal: {
|
|
34
|
+
* id: app_manager_flink.id,
|
|
35
|
+
* },
|
|
36
|
+
* restEndpoint: mainConfluentFlinkRegion.restEndpoint,
|
|
37
|
+
* credentials: {
|
|
38
|
+
* key: flink.id,
|
|
39
|
+
* secret: flink.secret,
|
|
40
|
+
* },
|
|
41
|
+
* displayName: "my_materialized_table",
|
|
42
|
+
* kafkaCluster: {
|
|
43
|
+
* id: basic_cluster.id,
|
|
44
|
+
* },
|
|
45
|
+
* query: "SELECT user_id, product_id, price, quantity FROM orders WHERE price > 1000;",
|
|
46
|
+
* });
|
|
47
|
+
* ```
|
|
48
|
+
*
|
|
49
|
+
* ### Option #2: Manage a single Flink Materialized Table in the same Pulumi Stack
|
|
50
|
+
*
|
|
51
|
+
* ```typescript
|
|
52
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
53
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
54
|
+
*
|
|
55
|
+
* const example = new confluentcloud.FlinkMaterializedTable("example", {
|
|
56
|
+
* displayName: "my_materialized_table",
|
|
57
|
+
* kafkaCluster: {
|
|
58
|
+
* id: basic_cluster.id,
|
|
59
|
+
* },
|
|
60
|
+
* query: "SELECT user_id, product_id, price, quantity FROM orders WHERE price > 1000;",
|
|
61
|
+
* });
|
|
62
|
+
* ```
|
|
63
|
+
*
|
|
64
|
+
* ### Stopping and Resuming a Materialized Table
|
|
65
|
+
*
|
|
66
|
+
* A running Materialized Table can be paused and later resumed by toggling the `stopped` attribute. The default value is `false` (running). To stop the table, set `stopped = true` and run `pulumi up`; to resume it, set `stopped = false` (or remove the attribute) and apply again. The Materialized Table is preserved across stop/resume — only its execution is paused.
|
|
67
|
+
*
|
|
68
|
+
* ```typescript
|
|
69
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
70
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
71
|
+
*
|
|
72
|
+
* const example = new confluentcloud.FlinkMaterializedTable("example", {
|
|
73
|
+
* organization: {
|
|
74
|
+
* id: main.id,
|
|
75
|
+
* },
|
|
76
|
+
* environment: {
|
|
77
|
+
* id: staging.id,
|
|
78
|
+
* },
|
|
79
|
+
* computePool: {
|
|
80
|
+
* id: exampleConfluentFlinkComputePool.id,
|
|
81
|
+
* },
|
|
82
|
+
* principal: {
|
|
83
|
+
* id: app_manager_flink.id,
|
|
84
|
+
* },
|
|
85
|
+
* restEndpoint: mainConfluentFlinkRegion.restEndpoint,
|
|
86
|
+
* credentials: {
|
|
87
|
+
* key: flink.id,
|
|
88
|
+
* secret: flink.secret,
|
|
89
|
+
* },
|
|
90
|
+
* displayName: "my_materialized_table",
|
|
91
|
+
* kafkaCluster: {
|
|
92
|
+
* id: basic_cluster.id,
|
|
93
|
+
* },
|
|
94
|
+
* query: "SELECT user_id, product_id, price, quantity FROM orders WHERE price > 1000;",
|
|
95
|
+
* stopped: false,
|
|
96
|
+
* });
|
|
97
|
+
* ```
|
|
98
|
+
*
|
|
99
|
+
* ## Import
|
|
100
|
+
*
|
|
101
|
+
* You can import a Flink Materialized Table by using the Materialized Table name, for example:
|
|
102
|
+
*
|
|
103
|
+
* Option #1: Manage multiple Flink Materialized Tables in the same Pulumi Stack
|
|
104
|
+
*
|
|
105
|
+
* ```sh
|
|
106
|
+
* $ export IMPORT_CONFLUENT_ORGANIZATION_ID="<organization_id>"
|
|
107
|
+
* $ export IMPORT_CONFLUENT_ENVIRONMENT_ID="<environment_id>"
|
|
108
|
+
* $ export IMPORT_FLINK_COMPUTE_POOL_ID="<flink_compute_pool_id>"
|
|
109
|
+
* $ export IMPORT_FLINK_API_KEY="<flink_api_key>"
|
|
110
|
+
* $ export IMPORT_FLINK_API_SECRET="<flink_api_secret>"
|
|
111
|
+
* $ export IMPORT_FLINK_REST_ENDPOINT="<flink_rest_endpoint>"
|
|
112
|
+
* $ export IMPORT_FLINK_PRINCIPAL_ID="<flink_principal_id>"
|
|
113
|
+
* $ pulumi import confluentcloud:index/flinkMaterializedTable:FlinkMaterializedTable example env-abc123/lkc-xyz123/my_materialized_table
|
|
114
|
+
* ```
|
|
115
|
+
*
|
|
116
|
+
* Option #2: Manage a single Flink Materialized Table in the same Pulumi Stack
|
|
117
|
+
*
|
|
118
|
+
* ```sh
|
|
119
|
+
* $ pulumi import confluentcloud:index/flinkMaterializedTable:FlinkMaterializedTable example env-abc123/lkc-xyz123/my_materialized_table
|
|
120
|
+
* ```
|
|
121
|
+
*
|
|
122
|
+
* !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes.
|
|
123
|
+
*/
|
|
124
|
+
class FlinkMaterializedTable extends pulumi.CustomResource {
|
|
125
|
+
/**
|
|
126
|
+
* Get an existing FlinkMaterializedTable resource's state with the given name, ID, and optional extra
|
|
127
|
+
* properties used to qualify the lookup.
|
|
128
|
+
*
|
|
129
|
+
* @param name The _unique_ name of the resulting resource.
|
|
130
|
+
* @param id The _unique_ provider ID of the resource to lookup.
|
|
131
|
+
* @param state Any extra arguments used during the lookup.
|
|
132
|
+
* @param opts Optional settings to control the behavior of the CustomResource.
|
|
133
|
+
*/
|
|
134
|
+
static get(name, id, state, opts) {
|
|
135
|
+
return new FlinkMaterializedTable(name, state, { ...opts, id: id });
|
|
136
|
+
}
|
|
137
|
+
/**
|
|
138
|
+
* Returns true if the given object is an instance of FlinkMaterializedTable. This is designed to work even
|
|
139
|
+
* when multiple copies of the Pulumi SDK have been loaded into the same process.
|
|
140
|
+
*/
|
|
141
|
+
static isInstance(obj) {
|
|
142
|
+
if (obj === undefined || obj === null) {
|
|
143
|
+
return false;
|
|
144
|
+
}
|
|
145
|
+
return obj['__pulumiType'] === FlinkMaterializedTable.__pulumiType;
|
|
146
|
+
}
|
|
147
|
+
constructor(name, argsOrState, opts) {
|
|
148
|
+
let resourceInputs = {};
|
|
149
|
+
opts = opts || {};
|
|
150
|
+
if (opts.id) {
|
|
151
|
+
const state = argsOrState;
|
|
152
|
+
resourceInputs["columns"] = state?.columns;
|
|
153
|
+
resourceInputs["computePool"] = state?.computePool;
|
|
154
|
+
resourceInputs["constraints"] = state?.constraints;
|
|
155
|
+
resourceInputs["credentials"] = state?.credentials;
|
|
156
|
+
resourceInputs["displayName"] = state?.displayName;
|
|
157
|
+
resourceInputs["distribution"] = state?.distribution;
|
|
158
|
+
resourceInputs["environment"] = state?.environment;
|
|
159
|
+
resourceInputs["kafkaCluster"] = state?.kafkaCluster;
|
|
160
|
+
resourceInputs["organization"] = state?.organization;
|
|
161
|
+
resourceInputs["principal"] = state?.principal;
|
|
162
|
+
resourceInputs["query"] = state?.query;
|
|
163
|
+
resourceInputs["restEndpoint"] = state?.restEndpoint;
|
|
164
|
+
resourceInputs["stopped"] = state?.stopped;
|
|
165
|
+
resourceInputs["watermark"] = state?.watermark;
|
|
166
|
+
}
|
|
167
|
+
else {
|
|
168
|
+
const args = argsOrState;
|
|
169
|
+
if (args?.displayName === undefined && !opts.urn) {
|
|
170
|
+
throw new Error("Missing required property 'displayName'");
|
|
171
|
+
}
|
|
172
|
+
if (args?.kafkaCluster === undefined && !opts.urn) {
|
|
173
|
+
throw new Error("Missing required property 'kafkaCluster'");
|
|
174
|
+
}
|
|
175
|
+
resourceInputs["columns"] = args?.columns;
|
|
176
|
+
resourceInputs["computePool"] = args?.computePool;
|
|
177
|
+
resourceInputs["constraints"] = args?.constraints;
|
|
178
|
+
resourceInputs["credentials"] = args?.credentials ? pulumi.secret(args.credentials) : undefined;
|
|
179
|
+
resourceInputs["displayName"] = args?.displayName;
|
|
180
|
+
resourceInputs["distribution"] = args?.distribution;
|
|
181
|
+
resourceInputs["environment"] = args?.environment;
|
|
182
|
+
resourceInputs["kafkaCluster"] = args?.kafkaCluster;
|
|
183
|
+
resourceInputs["organization"] = args?.organization;
|
|
184
|
+
resourceInputs["principal"] = args?.principal;
|
|
185
|
+
resourceInputs["query"] = args?.query;
|
|
186
|
+
resourceInputs["restEndpoint"] = args?.restEndpoint;
|
|
187
|
+
resourceInputs["stopped"] = args?.stopped;
|
|
188
|
+
resourceInputs["watermark"] = args?.watermark;
|
|
189
|
+
}
|
|
190
|
+
opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);
|
|
191
|
+
const secretOpts = { additionalSecretOutputs: ["credentials"] };
|
|
192
|
+
opts = pulumi.mergeOptions(opts, secretOpts);
|
|
193
|
+
super(FlinkMaterializedTable.__pulumiType, name, resourceInputs, opts);
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
exports.FlinkMaterializedTable = FlinkMaterializedTable;
|
|
197
|
+
/** @internal */
|
|
198
|
+
FlinkMaterializedTable.__pulumiType = 'confluentcloud:index/flinkMaterializedTable:FlinkMaterializedTable';
|
|
199
|
+
//# sourceMappingURL=flinkMaterializedTable.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"flinkMaterializedTable.js","sourceRoot":"","sources":["../flinkMaterializedTable.ts"],"names":[],"mappings":";AAAA,sEAAsE;AACtE,iFAAiF;;;AAEjF,yCAAyC;AAGzC,yCAAyC;AAEzC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAmHG;AACH,MAAa,sBAAuB,SAAQ,MAAM,CAAC,cAAc;IAC7D;;;;;;;;OAQG;IACI,MAAM,CAAC,GAAG,CAAC,IAAY,EAAE,EAA2B,EAAE,KAAmC,EAAE,IAAmC;QACjI,OAAO,IAAI,sBAAsB,CAAC,IAAI,EAAO,KAAK,EAAE,EAAE,GAAG,IAAI,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC;IAC7E,CAAC;IAKD;;;OAGG;IACI,MAAM,CAAC,UAAU,CAAC,GAAQ;QAC7B,IAAI,GAAG,KAAK,SAAS,IAAI,GAAG,KAAK,IAAI,EAAE;YACnC,OAAO,KAAK,CAAC;SAChB;QACD,OAAO,GAAG,CAAC,cAAc,CAAC,KAAK,sBAAsB,CAAC,YAAY,CAAC;IACvE,CAAC;IAiDD,YAAY,IAAY,EAAE,WAAsE,EAAE,IAAmC;QACjI,IAAI,cAAc,GAAkB,EAAE,CAAC;QACvC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QAClB,IAAI,IAAI,CAAC,EAAE,EAAE;YACT,MAAM,KAAK,GAAG,WAAsD,CAAC;YACrE,cAAc,CAAC,SAAS,CAAC,GAAG,KAAK,EAAE,OAAO,CAAC;YAC3C,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,EAAE,WAAW,CAAC;YACnD,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,EAAE,WAAW,CAAC;YACnD,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,EAAE,WAAW,CAAC;YACnD,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,EAAE,WAAW,CAAC;YACnD,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,EAAE,YAAY,CAAC;YACrD,cAAc,CAAC,aAAa,CAAC,GAAG,KAAK,EAAE,WAAW,CAAC;YACnD,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,EAAE,YAAY,CAAC;YACrD,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,EAAE,YAAY,CAAC;YACrD,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,EAAE,SAAS,CAAC;YAC/C,cAAc,CAAC,OAAO,CAAC,GAAG,KAAK,EAAE,KAAK,CAAC;YACvC,cAAc,CAAC,cAAc,CAAC,GAAG,KAAK,EAAE,YAAY,CAAC;YACrD,cAAc,CAAC,SAAS,CAAC,GAAG,KAAK,EAAE,OAAO,CAAC;YAC3C,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,EAAE,SAAS,CAAC;SAClD;aAAM;YACH,MAAM,IAAI,GAAG,WAAqD,CAAC;YACnE,IAAI,IAAI,EAAE,WAAW,KAAK,SAAS,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE;gBAC9C,MAAM,IAAI,KAAK,CAAC,yCAAyC,CAAC,CAAC;aAC9D;YACD,IAAI,IAAI,EAAE,YAAY,KAAK,SAAS,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE;gBAC/C,MAAM,IAAI,KAAK,CAAC,0CAA0C,CAAC,CAAC;aAC/D;YACD,cAAc,CAAC,SAAS,CAAC,GAAG,IAAI,EAAE,OAAO,CAAC;YAC1C,cAAc,CAAC,aAAa,CAAC,GAAG,IAAI,EAAE,WAAW,CAAC;YAClD,cAAc,CAAC,aAAa,CAAC,GAAG,IAAI,EAAE,WAAW,CAAC;YAClD,cAAc,CAAC,aAAa,CAAC,GAAG,IAAI,EAAE,WAAW,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;YAChG,cAAc,CAAC,aAAa,CAAC,GAAG,IAAI,EAAE,WAAW,CAAC;YAClD,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,EAAE,YAAY,CAAC;YACpD,cAAc,CAAC,aAAa,CAAC,GAAG,IAAI,EAAE,WAAW,CAAC;YAClD,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,EAAE,YAAY,CAAC;YACpD,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,EAAE,YAAY,CAAC;YACpD,cAAc,CAAC,WAAW,CAAC,GAAG,IAAI,EAAE,SAAS,CAAC;YAC9C,cAAc,CAAC,OAAO,CAAC,GAAG,IAAI,EAAE,KAAK,CAAC;YACtC,cAAc,CAAC,cAAc,CAAC,GAAG,IAAI,EAAE,YAAY,CAAC;YACpD,cAAc,CAAC,SAAS,CAAC,GAAG,IAAI,EAAE,OAAO,CAAC;YAC1C,cAAc,CAAC,WAAW,CAAC,GAAG,IAAI,EAAE,SAAS,CAAC;SACjD;QACD,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,SAAS,CAAC,oBAAoB,EAAE,EAAE,IAAI,CAAC,CAAC;QACnE,MAAM,UAAU,GAAG,EAAE,uBAAuB,EAAE,CAAC,aAAa,CAAC,EAAE,CAAC;QAChE,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,IAAI,EAAE,UAAU,CAAC,CAAC;QAC7C,KAAK,CAAC,sBAAsB,CAAC,YAAY,EAAE,IAAI,EAAE,cAAc,EAAE,IAAI,CAAC,CAAC;IAC3E,CAAC;;AAzHL,wDA0HC;AA5GG,gBAAgB;AACO,mCAAY,GAAG,oEAAoE,CAAC"}
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
import * as pulumi from "@pulumi/pulumi";
|
|
2
|
+
import * as inputs from "./types/input";
|
|
3
|
+
import * as outputs from "./types/output";
|
|
4
|
+
/**
|
|
5
|
+
* [](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
|
|
6
|
+
*
|
|
7
|
+
* `confluentcloud.FlinkMaterializedTable` describes a Flink Materialized Table data source.
|
|
8
|
+
*
|
|
9
|
+
* ## Example Usage
|
|
10
|
+
*
|
|
11
|
+
* ### Option #1: Manage multiple Flink Materialized Tables in the same Pulumi Stack
|
|
12
|
+
*
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
15
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
16
|
+
*
|
|
17
|
+
* const example = confluentcloud.getFlinkMaterializedTable({
|
|
18
|
+
* organization: {
|
|
19
|
+
* id: main.id,
|
|
20
|
+
* },
|
|
21
|
+
* environment: {
|
|
22
|
+
* id: staging.id,
|
|
23
|
+
* },
|
|
24
|
+
* computePool: {
|
|
25
|
+
* id: exampleConfluentFlinkComputePool.id,
|
|
26
|
+
* },
|
|
27
|
+
* restEndpoint: mainConfluentFlinkRegion.restEndpoint,
|
|
28
|
+
* credentials: {
|
|
29
|
+
* key: flink.id,
|
|
30
|
+
* secret: flink.secret,
|
|
31
|
+
* },
|
|
32
|
+
* displayName: "my_materialized_table",
|
|
33
|
+
* });
|
|
34
|
+
* export const materializedTableOutput = example;
|
|
35
|
+
* ```
|
|
36
|
+
*
|
|
37
|
+
* ### Option #2: Manage a single Flink Materialized Table in the same Pulumi Stack
|
|
38
|
+
*
|
|
39
|
+
* ```typescript
|
|
40
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
41
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
42
|
+
*
|
|
43
|
+
* const example = confluentcloud.getFlinkMaterializedTable({
|
|
44
|
+
* displayName: "my_materialized_table",
|
|
45
|
+
* });
|
|
46
|
+
* export const materializedTableOutput = example;
|
|
47
|
+
* ```
|
|
48
|
+
*/
|
|
49
|
+
export declare function getFlinkMaterializedTable(args: GetFlinkMaterializedTableArgs, opts?: pulumi.InvokeOptions): Promise<GetFlinkMaterializedTableResult>;
|
|
50
|
+
/**
|
|
51
|
+
* A collection of arguments for invoking getFlinkMaterializedTable.
|
|
52
|
+
*/
|
|
53
|
+
export interface GetFlinkMaterializedTableArgs {
|
|
54
|
+
computePool?: inputs.GetFlinkMaterializedTableComputePool;
|
|
55
|
+
credentials?: inputs.GetFlinkMaterializedTableCredentials;
|
|
56
|
+
/**
|
|
57
|
+
* The unique name of the Materialized Table.
|
|
58
|
+
*/
|
|
59
|
+
displayName: string;
|
|
60
|
+
environment?: inputs.GetFlinkMaterializedTableEnvironment;
|
|
61
|
+
organization?: inputs.GetFlinkMaterializedTableOrganization;
|
|
62
|
+
principal?: inputs.GetFlinkMaterializedTablePrincipal;
|
|
63
|
+
/**
|
|
64
|
+
* The REST endpoint of the Flink region, for example, `https://flink.us-east-1.aws.confluent.cloud`.
|
|
65
|
+
*/
|
|
66
|
+
restEndpoint?: string;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* A collection of values returned by getFlinkMaterializedTable.
|
|
70
|
+
*/
|
|
71
|
+
export interface GetFlinkMaterializedTableResult {
|
|
72
|
+
/**
|
|
73
|
+
* (Set of Strings) The column names of the constraint.
|
|
74
|
+
*/
|
|
75
|
+
readonly columns: outputs.GetFlinkMaterializedTableColumn[];
|
|
76
|
+
readonly computePool: outputs.GetFlinkMaterializedTableComputePool;
|
|
77
|
+
/**
|
|
78
|
+
* (List of Blocks) The table constraints of the Materialized Table. Each `constraints` block supports the following:
|
|
79
|
+
*/
|
|
80
|
+
readonly constraints: outputs.GetFlinkMaterializedTableConstraint[];
|
|
81
|
+
readonly credentials?: outputs.GetFlinkMaterializedTableCredentials;
|
|
82
|
+
readonly displayName: string;
|
|
83
|
+
/**
|
|
84
|
+
* (Configuration Block) The distribution definition for the Materialized Table. Supports the following:
|
|
85
|
+
*/
|
|
86
|
+
readonly distributions: outputs.GetFlinkMaterializedTableDistribution[];
|
|
87
|
+
readonly environment: outputs.GetFlinkMaterializedTableEnvironment;
|
|
88
|
+
/**
|
|
89
|
+
* The provider-assigned unique ID for this managed resource.
|
|
90
|
+
*/
|
|
91
|
+
readonly id: string;
|
|
92
|
+
/**
|
|
93
|
+
* (Configuration Block) supports the following:
|
|
94
|
+
*/
|
|
95
|
+
readonly kafkaClusters: outputs.GetFlinkMaterializedTableKafkaCluster[];
|
|
96
|
+
readonly organization: outputs.GetFlinkMaterializedTableOrganization;
|
|
97
|
+
readonly principal: outputs.GetFlinkMaterializedTablePrincipal;
|
|
98
|
+
/**
|
|
99
|
+
* (String) The SQL query that defines the Materialized Table.
|
|
100
|
+
*/
|
|
101
|
+
readonly query: string;
|
|
102
|
+
readonly restEndpoint?: string;
|
|
103
|
+
/**
|
|
104
|
+
* (Boolean) Whether the Materialized Table is stopped.
|
|
105
|
+
*/
|
|
106
|
+
readonly stopped: boolean;
|
|
107
|
+
/**
|
|
108
|
+
* (Configuration Block) The watermark definition for the Materialized Table. Supports the following:
|
|
109
|
+
*/
|
|
110
|
+
readonly watermarks: outputs.GetFlinkMaterializedTableWatermark[];
|
|
111
|
+
}
|
|
112
|
+
/**
|
|
113
|
+
* [](https://docs.confluent.io/cloud/current/api.html#section/Versioning/API-Lifecycle-Policy)
|
|
114
|
+
*
|
|
115
|
+
* `confluentcloud.FlinkMaterializedTable` describes a Flink Materialized Table data source.
|
|
116
|
+
*
|
|
117
|
+
* ## Example Usage
|
|
118
|
+
*
|
|
119
|
+
* ### Option #1: Manage multiple Flink Materialized Tables in the same Pulumi Stack
|
|
120
|
+
*
|
|
121
|
+
* ```typescript
|
|
122
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
123
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
124
|
+
*
|
|
125
|
+
* const example = confluentcloud.getFlinkMaterializedTable({
|
|
126
|
+
* organization: {
|
|
127
|
+
* id: main.id,
|
|
128
|
+
* },
|
|
129
|
+
* environment: {
|
|
130
|
+
* id: staging.id,
|
|
131
|
+
* },
|
|
132
|
+
* computePool: {
|
|
133
|
+
* id: exampleConfluentFlinkComputePool.id,
|
|
134
|
+
* },
|
|
135
|
+
* restEndpoint: mainConfluentFlinkRegion.restEndpoint,
|
|
136
|
+
* credentials: {
|
|
137
|
+
* key: flink.id,
|
|
138
|
+
* secret: flink.secret,
|
|
139
|
+
* },
|
|
140
|
+
* displayName: "my_materialized_table",
|
|
141
|
+
* });
|
|
142
|
+
* export const materializedTableOutput = example;
|
|
143
|
+
* ```
|
|
144
|
+
*
|
|
145
|
+
* ### Option #2: Manage a single Flink Materialized Table in the same Pulumi Stack
|
|
146
|
+
*
|
|
147
|
+
* ```typescript
|
|
148
|
+
* import * as pulumi from "@pulumi/pulumi";
|
|
149
|
+
* import * as confluentcloud from "@pulumi/confluentcloud";
|
|
150
|
+
*
|
|
151
|
+
* const example = confluentcloud.getFlinkMaterializedTable({
|
|
152
|
+
* displayName: "my_materialized_table",
|
|
153
|
+
* });
|
|
154
|
+
* export const materializedTableOutput = example;
|
|
155
|
+
* ```
|
|
156
|
+
*/
|
|
157
|
+
export declare function getFlinkMaterializedTableOutput(args: GetFlinkMaterializedTableOutputArgs, opts?: pulumi.InvokeOutputOptions): pulumi.Output<GetFlinkMaterializedTableResult>;
|
|
158
|
+
/**
|
|
159
|
+
* A collection of arguments for invoking getFlinkMaterializedTable.
|
|
160
|
+
*/
|
|
161
|
+
export interface GetFlinkMaterializedTableOutputArgs {
|
|
162
|
+
computePool?: pulumi.Input<inputs.GetFlinkMaterializedTableComputePoolArgs>;
|
|
163
|
+
credentials?: pulumi.Input<inputs.GetFlinkMaterializedTableCredentialsArgs>;
|
|
164
|
+
/**
|
|
165
|
+
* The unique name of the Materialized Table.
|
|
166
|
+
*/
|
|
167
|
+
displayName: pulumi.Input<string>;
|
|
168
|
+
environment?: pulumi.Input<inputs.GetFlinkMaterializedTableEnvironmentArgs>;
|
|
169
|
+
organization?: pulumi.Input<inputs.GetFlinkMaterializedTableOrganizationArgs>;
|
|
170
|
+
principal?: pulumi.Input<inputs.GetFlinkMaterializedTablePrincipalArgs>;
|
|
171
|
+
/**
|
|
172
|
+
* The REST endpoint of the Flink region, for example, `https://flink.us-east-1.aws.confluent.cloud`.
|
|
173
|
+
*/
|
|
174
|
+
restEndpoint?: pulumi.Input<string>;
|
|
175
|
+
}
|