pulumi-confluentcloud 2.42.0__py3-none-any.whl → 2.54.0a1766503424__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -446,6 +446,120 @@ class Schema(pulumi.CustomResource):
446
446
  """
447
447
  ## Example Usage
448
448
 
449
+ ### Option #1: Manage multiple Schema Registry clusters in the same Pulumi Stack
450
+
451
+ ```python
452
+ import pulumi
453
+ import pulumi_confluentcloud as confluentcloud
454
+ import pulumi_std as std
455
+
456
+ avro_purchase = confluentcloud.Schema("avro-purchase",
457
+ schema_registry_cluster={
458
+ "id": essentials["id"],
459
+ },
460
+ rest_endpoint=essentials["restEndpoint"],
461
+ subject_name="avro-purchase-value",
462
+ format="AVRO",
463
+ schema=std.index.file(input="./schemas/avro/purchase.avsc")["result"],
464
+ credentials={
465
+ "key": "<Schema Registry API Key for data.confluent_schema_registry_cluster.essentials>",
466
+ "secret": "<Schema Registry API Secret for data.confluent_schema_registry_cluster.essentials>",
467
+ })
468
+ ```
469
+
470
+ ### Option #2: Manage a single Schema Registry cluster in the same Pulumi Stack
471
+
472
+ ```python
473
+ import pulumi
474
+ import pulumi_confluentcloud as confluentcloud
475
+ import pulumi_std as std
476
+
477
+ avro_purchase = confluentcloud.Schema("avro-purchase",
478
+ subject_name="avro-purchase-value",
479
+ format="AVRO",
480
+ schema=std.index.file(input="./schemas/avro/purchase.avsc")["result"])
481
+ ```
482
+
483
+ ## Getting Started
484
+
485
+ The following end-to-end examples might help to get started with `Schema` resource:
486
+ * single-event-types-avro-schema
487
+ * single-event-types-proto-schema
488
+ * single-event-types-proto-schema-with-alias
489
+ * multiple-event-types-avro-schema
490
+ * multiple-event-types-proto-schema
491
+ * field-level-encryption-schema
492
+
493
+ ## Additional Examples
494
+
495
+ ### Default Option A: Manage the latest schema version only. The resource instance always points to the latest schema version by supporting in-place updates
496
+
497
+ ```python
498
+ import pulumi
499
+ import pulumi_confluentcloud as confluentcloud
500
+ import pulumi_std as std
501
+
502
+ # confluent_schema.avro-purchase points to v1.
503
+ avro_purchase = confluentcloud.Schema("avro-purchase",
504
+ subject_name="avro-purchase-value",
505
+ format="AVRO",
506
+ schema=std.index.file(input="./schemas/avro/purchase.avsc")["result"],
507
+ metadata={
508
+ "properties": {
509
+ "owner": "Bob Jones",
510
+ "email": "bob@acme.com",
511
+ },
512
+ "sensitives": [
513
+ "s1",
514
+ "s2",
515
+ ],
516
+ "tags": [
517
+ {
518
+ "key": "tag1",
519
+ "values": ["PII"],
520
+ },
521
+ {
522
+ "key": "tag2",
523
+ "values": ["PIIIII"],
524
+ },
525
+ ],
526
+ },
527
+ ruleset={
528
+ "domain_rules": [
529
+ {
530
+ "name": "encryptPII",
531
+ "kind": "TRANSFORM",
532
+ "type": "ENCRYPT",
533
+ "mode": "WRITEREAD",
534
+ "tags": ["PII"],
535
+ "params": {
536
+ "encrypt.kek.name": "testkek2",
537
+ },
538
+ },
539
+ {
540
+ "name": "encrypt",
541
+ "kind": "TRANSFORM",
542
+ "type": "ENCRYPT",
543
+ "mode": "WRITEREAD",
544
+ "tags": ["PIIIII"],
545
+ "params": {
546
+ "encrypt.kek.name": "testkek2",
547
+ },
548
+ },
549
+ ],
550
+ "migration_rules": [{
551
+ "name": "encrypt",
552
+ "kind": "TRANSFORM",
553
+ "type": "ENCRYPT",
554
+ "mode": "WRITEREAD",
555
+ "tags": ["PIM"],
556
+ "params": {
557
+ "encrypt.kek.name": "testkekM",
558
+ },
559
+ }],
560
+ })
561
+ ```
562
+
449
563
  ## Import
450
564
 
451
565
  You can import a Schema by using the Schema Registry cluster ID, Subject name, and unique identifier (or `latest` when `recreate_on_update = false`) of the Schema in the format `<Schema Registry cluster ID>/<Subject name>/<Schema identifier>`, for example:
@@ -498,6 +612,120 @@ class Schema(pulumi.CustomResource):
498
612
  """
499
613
  ## Example Usage
500
614
 
615
+ ### Option #1: Manage multiple Schema Registry clusters in the same Pulumi Stack
616
+
617
+ ```python
618
+ import pulumi
619
+ import pulumi_confluentcloud as confluentcloud
620
+ import pulumi_std as std
621
+
622
+ avro_purchase = confluentcloud.Schema("avro-purchase",
623
+ schema_registry_cluster={
624
+ "id": essentials["id"],
625
+ },
626
+ rest_endpoint=essentials["restEndpoint"],
627
+ subject_name="avro-purchase-value",
628
+ format="AVRO",
629
+ schema=std.index.file(input="./schemas/avro/purchase.avsc")["result"],
630
+ credentials={
631
+ "key": "<Schema Registry API Key for data.confluent_schema_registry_cluster.essentials>",
632
+ "secret": "<Schema Registry API Secret for data.confluent_schema_registry_cluster.essentials>",
633
+ })
634
+ ```
635
+
636
+ ### Option #2: Manage a single Schema Registry cluster in the same Pulumi Stack
637
+
638
+ ```python
639
+ import pulumi
640
+ import pulumi_confluentcloud as confluentcloud
641
+ import pulumi_std as std
642
+
643
+ avro_purchase = confluentcloud.Schema("avro-purchase",
644
+ subject_name="avro-purchase-value",
645
+ format="AVRO",
646
+ schema=std.index.file(input="./schemas/avro/purchase.avsc")["result"])
647
+ ```
648
+
649
+ ## Getting Started
650
+
651
+ The following end-to-end examples might help to get started with `Schema` resource:
652
+ * single-event-types-avro-schema
653
+ * single-event-types-proto-schema
654
+ * single-event-types-proto-schema-with-alias
655
+ * multiple-event-types-avro-schema
656
+ * multiple-event-types-proto-schema
657
+ * field-level-encryption-schema
658
+
659
+ ## Additional Examples
660
+
661
+ ### Default Option A: Manage the latest schema version only. The resource instance always points to the latest schema version by supporting in-place updates
662
+
663
+ ```python
664
+ import pulumi
665
+ import pulumi_confluentcloud as confluentcloud
666
+ import pulumi_std as std
667
+
668
+ # confluent_schema.avro-purchase points to v1.
669
+ avro_purchase = confluentcloud.Schema("avro-purchase",
670
+ subject_name="avro-purchase-value",
671
+ format="AVRO",
672
+ schema=std.index.file(input="./schemas/avro/purchase.avsc")["result"],
673
+ metadata={
674
+ "properties": {
675
+ "owner": "Bob Jones",
676
+ "email": "bob@acme.com",
677
+ },
678
+ "sensitives": [
679
+ "s1",
680
+ "s2",
681
+ ],
682
+ "tags": [
683
+ {
684
+ "key": "tag1",
685
+ "values": ["PII"],
686
+ },
687
+ {
688
+ "key": "tag2",
689
+ "values": ["PIIIII"],
690
+ },
691
+ ],
692
+ },
693
+ ruleset={
694
+ "domain_rules": [
695
+ {
696
+ "name": "encryptPII",
697
+ "kind": "TRANSFORM",
698
+ "type": "ENCRYPT",
699
+ "mode": "WRITEREAD",
700
+ "tags": ["PII"],
701
+ "params": {
702
+ "encrypt.kek.name": "testkek2",
703
+ },
704
+ },
705
+ {
706
+ "name": "encrypt",
707
+ "kind": "TRANSFORM",
708
+ "type": "ENCRYPT",
709
+ "mode": "WRITEREAD",
710
+ "tags": ["PIIIII"],
711
+ "params": {
712
+ "encrypt.kek.name": "testkek2",
713
+ },
714
+ },
715
+ ],
716
+ "migration_rules": [{
717
+ "name": "encrypt",
718
+ "kind": "TRANSFORM",
719
+ "type": "ENCRYPT",
720
+ "mode": "WRITEREAD",
721
+ "tags": ["PIM"],
722
+ "params": {
723
+ "encrypt.kek.name": "testkekM",
724
+ },
725
+ }],
726
+ })
727
+ ```
728
+
501
729
  ## Import
502
730
 
503
731
  You can import a Schema by using the Schema Registry cluster ID, Subject name, and unique identifier (or `latest` when `recreate_on_update = false`) of the Schema in the format `<Schema Registry cluster ID>/<Subject name>/<Schema identifier>`, for example:
@@ -24,8 +24,10 @@ class TableflowTopicArgs:
24
24
  display_name: pulumi.Input[_builtins.str],
25
25
  environment: pulumi.Input['TableflowTopicEnvironmentArgs'],
26
26
  kafka_cluster: pulumi.Input['TableflowTopicKafkaClusterArgs'],
27
+ azure_data_lake_storage_gen2: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']] = None,
27
28
  byob_aws: Optional[pulumi.Input['TableflowTopicByobAwsArgs']] = None,
28
29
  credentials: Optional[pulumi.Input['TableflowTopicCredentialsArgs']] = None,
30
+ error_handling: Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']] = None,
29
31
  managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]]] = None,
30
32
  record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
31
33
  retention_ms: Optional[pulumi.Input[_builtins.str]] = None,
@@ -34,6 +36,7 @@ class TableflowTopicArgs:
34
36
  The set of arguments for constructing a TableflowTopic resource.
35
37
  :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
36
38
  :param pulumi.Input['TableflowTopicEnvironmentArgs'] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
39
+ :param pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args'] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
37
40
  :param pulumi.Input['TableflowTopicByobAwsArgs'] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
38
41
  :param pulumi.Input['TableflowTopicCredentialsArgs'] credentials: The Cluster API Credentials.
39
42
  :param pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]] managed_storages: The configuration of the Confluent managed storage. See [Quick Start with Managed Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-managed-storage.html#cloud-tableflow-quick-start-managed-storage) for more details.
@@ -44,12 +47,19 @@ class TableflowTopicArgs:
44
47
  pulumi.set(__self__, "display_name", display_name)
45
48
  pulumi.set(__self__, "environment", environment)
46
49
  pulumi.set(__self__, "kafka_cluster", kafka_cluster)
50
+ if azure_data_lake_storage_gen2 is not None:
51
+ pulumi.set(__self__, "azure_data_lake_storage_gen2", azure_data_lake_storage_gen2)
47
52
  if byob_aws is not None:
48
53
  pulumi.set(__self__, "byob_aws", byob_aws)
49
54
  if credentials is not None:
50
55
  pulumi.set(__self__, "credentials", credentials)
56
+ if error_handling is not None:
57
+ pulumi.set(__self__, "error_handling", error_handling)
51
58
  if managed_storages is not None:
52
59
  pulumi.set(__self__, "managed_storages", managed_storages)
60
+ if record_failure_strategy is not None:
61
+ warnings.warn("""This attribute is deprecated and will be removed in a future release.""", DeprecationWarning)
62
+ pulumi.log.warn("""record_failure_strategy is deprecated: This attribute is deprecated and will be removed in a future release.""")
53
63
  if record_failure_strategy is not None:
54
64
  pulumi.set(__self__, "record_failure_strategy", record_failure_strategy)
55
65
  if retention_ms is not None:
@@ -90,6 +100,18 @@ class TableflowTopicArgs:
90
100
  def kafka_cluster(self, value: pulumi.Input['TableflowTopicKafkaClusterArgs']):
91
101
  pulumi.set(self, "kafka_cluster", value)
92
102
 
103
+ @_builtins.property
104
+ @pulumi.getter(name="azureDataLakeStorageGen2")
105
+ def azure_data_lake_storage_gen2(self) -> Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]:
106
+ """
107
+ (Optional Configuration Block) supports the following:
108
+ """
109
+ return pulumi.get(self, "azure_data_lake_storage_gen2")
110
+
111
+ @azure_data_lake_storage_gen2.setter
112
+ def azure_data_lake_storage_gen2(self, value: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]):
113
+ pulumi.set(self, "azure_data_lake_storage_gen2", value)
114
+
93
115
  @_builtins.property
94
116
  @pulumi.getter(name="byobAws")
95
117
  def byob_aws(self) -> Optional[pulumi.Input['TableflowTopicByobAwsArgs']]:
@@ -114,6 +136,15 @@ class TableflowTopicArgs:
114
136
  def credentials(self, value: Optional[pulumi.Input['TableflowTopicCredentialsArgs']]):
115
137
  pulumi.set(self, "credentials", value)
116
138
 
139
+ @_builtins.property
140
+ @pulumi.getter(name="errorHandling")
141
+ def error_handling(self) -> Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']]:
142
+ return pulumi.get(self, "error_handling")
143
+
144
+ @error_handling.setter
145
+ def error_handling(self, value: Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']]):
146
+ pulumi.set(self, "error_handling", value)
147
+
117
148
  @_builtins.property
118
149
  @pulumi.getter(name="managedStorages")
119
150
  def managed_storages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]]]:
@@ -128,6 +159,7 @@ class TableflowTopicArgs:
128
159
 
129
160
  @_builtins.property
130
161
  @pulumi.getter(name="recordFailureStrategy")
162
+ @_utilities.deprecated("""This attribute is deprecated and will be removed in a future release.""")
131
163
  def record_failure_strategy(self) -> Optional[pulumi.Input[_builtins.str]]:
132
164
  """
133
165
  The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
@@ -166,12 +198,14 @@ class TableflowTopicArgs:
166
198
  @pulumi.input_type
167
199
  class _TableflowTopicState:
168
200
  def __init__(__self__, *,
201
+ azure_data_lake_storage_gen2: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']] = None,
169
202
  byob_aws: Optional[pulumi.Input['TableflowTopicByobAwsArgs']] = None,
170
203
  credentials: Optional[pulumi.Input['TableflowTopicCredentialsArgs']] = None,
171
204
  display_name: Optional[pulumi.Input[_builtins.str]] = None,
172
205
  enable_compaction: Optional[pulumi.Input[_builtins.bool]] = None,
173
206
  enable_partitioning: Optional[pulumi.Input[_builtins.bool]] = None,
174
207
  environment: Optional[pulumi.Input['TableflowTopicEnvironmentArgs']] = None,
208
+ error_handling: Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']] = None,
175
209
  kafka_cluster: Optional[pulumi.Input['TableflowTopicKafkaClusterArgs']] = None,
176
210
  managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]]] = None,
177
211
  record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
@@ -182,6 +216,7 @@ class _TableflowTopicState:
182
216
  write_mode: Optional[pulumi.Input[_builtins.str]] = None):
183
217
  """
184
218
  Input properties used for looking up and filtering TableflowTopic resources.
219
+ :param pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args'] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
185
220
  :param pulumi.Input['TableflowTopicByobAwsArgs'] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
186
221
  :param pulumi.Input['TableflowTopicCredentialsArgs'] credentials: The Cluster API Credentials.
187
222
  :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
@@ -196,6 +231,8 @@ class _TableflowTopicState:
196
231
  :param pulumi.Input[_builtins.str] table_path: (Optional String) The current storage path where the data and metadata is stored for this table.
197
232
  :param pulumi.Input[_builtins.str] write_mode: (Optional String) Indicates the write mode of the Tableflow topic.
198
233
  """
234
+ if azure_data_lake_storage_gen2 is not None:
235
+ pulumi.set(__self__, "azure_data_lake_storage_gen2", azure_data_lake_storage_gen2)
199
236
  if byob_aws is not None:
200
237
  pulumi.set(__self__, "byob_aws", byob_aws)
201
238
  if credentials is not None:
@@ -208,10 +245,15 @@ class _TableflowTopicState:
208
245
  pulumi.set(__self__, "enable_partitioning", enable_partitioning)
209
246
  if environment is not None:
210
247
  pulumi.set(__self__, "environment", environment)
248
+ if error_handling is not None:
249
+ pulumi.set(__self__, "error_handling", error_handling)
211
250
  if kafka_cluster is not None:
212
251
  pulumi.set(__self__, "kafka_cluster", kafka_cluster)
213
252
  if managed_storages is not None:
214
253
  pulumi.set(__self__, "managed_storages", managed_storages)
254
+ if record_failure_strategy is not None:
255
+ warnings.warn("""This attribute is deprecated and will be removed in a future release.""", DeprecationWarning)
256
+ pulumi.log.warn("""record_failure_strategy is deprecated: This attribute is deprecated and will be removed in a future release.""")
215
257
  if record_failure_strategy is not None:
216
258
  pulumi.set(__self__, "record_failure_strategy", record_failure_strategy)
217
259
  if retention_ms is not None:
@@ -225,6 +267,18 @@ class _TableflowTopicState:
225
267
  if write_mode is not None:
226
268
  pulumi.set(__self__, "write_mode", write_mode)
227
269
 
270
+ @_builtins.property
271
+ @pulumi.getter(name="azureDataLakeStorageGen2")
272
+ def azure_data_lake_storage_gen2(self) -> Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]:
273
+ """
274
+ (Optional Configuration Block) supports the following:
275
+ """
276
+ return pulumi.get(self, "azure_data_lake_storage_gen2")
277
+
278
+ @azure_data_lake_storage_gen2.setter
279
+ def azure_data_lake_storage_gen2(self, value: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]):
280
+ pulumi.set(self, "azure_data_lake_storage_gen2", value)
281
+
228
282
  @_builtins.property
229
283
  @pulumi.getter(name="byobAws")
230
284
  def byob_aws(self) -> Optional[pulumi.Input['TableflowTopicByobAwsArgs']]:
@@ -297,6 +351,15 @@ class _TableflowTopicState:
297
351
  def environment(self, value: Optional[pulumi.Input['TableflowTopicEnvironmentArgs']]):
298
352
  pulumi.set(self, "environment", value)
299
353
 
354
+ @_builtins.property
355
+ @pulumi.getter(name="errorHandling")
356
+ def error_handling(self) -> Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']]:
357
+ return pulumi.get(self, "error_handling")
358
+
359
+ @error_handling.setter
360
+ def error_handling(self, value: Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']]):
361
+ pulumi.set(self, "error_handling", value)
362
+
300
363
  @_builtins.property
301
364
  @pulumi.getter(name="kafkaCluster")
302
365
  def kafka_cluster(self) -> Optional[pulumi.Input['TableflowTopicKafkaClusterArgs']]:
@@ -320,6 +383,7 @@ class _TableflowTopicState:
320
383
 
321
384
  @_builtins.property
322
385
  @pulumi.getter(name="recordFailureStrategy")
386
+ @_utilities.deprecated("""This attribute is deprecated and will be removed in a future release.""")
323
387
  def record_failure_strategy(self) -> Optional[pulumi.Input[_builtins.str]]:
324
388
  """
325
389
  The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
@@ -397,10 +461,12 @@ class TableflowTopic(pulumi.CustomResource):
397
461
  def __init__(__self__,
398
462
  resource_name: str,
399
463
  opts: Optional[pulumi.ResourceOptions] = None,
464
+ azure_data_lake_storage_gen2: Optional[pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']]] = None,
400
465
  byob_aws: Optional[pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']]] = None,
401
466
  credentials: Optional[pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']]] = None,
402
467
  display_name: Optional[pulumi.Input[_builtins.str]] = None,
403
468
  environment: Optional[pulumi.Input[Union['TableflowTopicEnvironmentArgs', 'TableflowTopicEnvironmentArgsDict']]] = None,
469
+ error_handling: Optional[pulumi.Input[Union['TableflowTopicErrorHandlingArgs', 'TableflowTopicErrorHandlingArgsDict']]] = None,
404
470
  kafka_cluster: Optional[pulumi.Input[Union['TableflowTopicKafkaClusterArgs', 'TableflowTopicKafkaClusterArgsDict']]] = None,
405
471
  managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input[Union['TableflowTopicManagedStorageArgs', 'TableflowTopicManagedStorageArgsDict']]]]] = None,
406
472
  record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
@@ -454,15 +520,6 @@ class TableflowTopic(pulumi.CustomResource):
454
520
  "provider_integration_id": main["id"],
455
521
  })
456
522
  ```
457
-
458
- ## Getting Started
459
-
460
- The following end-to-end examples might help to get started with `TableflowTopic` resource:
461
- * confluent-managed-storage: Tableflow topic with Confluent-managed storage.
462
- * byob-aws-storage: Tableflow topic with custom (BYOB AWS) storage.
463
- * datagen-connector-byob-aws-storage: Datagen Source connector with a Tableflow topic with custom (BYOB AWS) storage.
464
- * datagen-connector-confluent-managed-storage: Datagen Source connector with a Tableflow topic with Confluent-managed storage.
465
-
466
523
  ## Import
467
524
 
468
525
  You can import a Tableflow Topic by using the Tableflow Topic name, Environment ID, and Kafka Cluster ID, in the format `<Environment ID>/<Kafka Cluster ID>/<Tableflow Topic name>`, for example:
@@ -487,6 +544,7 @@ class TableflowTopic(pulumi.CustomResource):
487
544
 
488
545
  :param str resource_name: The name of the resource.
489
546
  :param pulumi.ResourceOptions opts: Options for the resource.
547
+ :param pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
490
548
  :param pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
491
549
  :param pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']] credentials: The Cluster API Credentials.
492
550
  :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
@@ -549,15 +607,6 @@ class TableflowTopic(pulumi.CustomResource):
549
607
  "provider_integration_id": main["id"],
550
608
  })
551
609
  ```
552
-
553
- ## Getting Started
554
-
555
- The following end-to-end examples might help to get started with `TableflowTopic` resource:
556
- * confluent-managed-storage: Tableflow topic with Confluent-managed storage.
557
- * byob-aws-storage: Tableflow topic with custom (BYOB AWS) storage.
558
- * datagen-connector-byob-aws-storage: Datagen Source connector with a Tableflow topic with custom (BYOB AWS) storage.
559
- * datagen-connector-confluent-managed-storage: Datagen Source connector with a Tableflow topic with Confluent-managed storage.
560
-
561
610
  ## Import
562
611
 
563
612
  You can import a Tableflow Topic by using the Tableflow Topic name, Environment ID, and Kafka Cluster ID, in the format `<Environment ID>/<Kafka Cluster ID>/<Tableflow Topic name>`, for example:
@@ -595,10 +644,12 @@ class TableflowTopic(pulumi.CustomResource):
595
644
  def _internal_init(__self__,
596
645
  resource_name: str,
597
646
  opts: Optional[pulumi.ResourceOptions] = None,
647
+ azure_data_lake_storage_gen2: Optional[pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']]] = None,
598
648
  byob_aws: Optional[pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']]] = None,
599
649
  credentials: Optional[pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']]] = None,
600
650
  display_name: Optional[pulumi.Input[_builtins.str]] = None,
601
651
  environment: Optional[pulumi.Input[Union['TableflowTopicEnvironmentArgs', 'TableflowTopicEnvironmentArgsDict']]] = None,
652
+ error_handling: Optional[pulumi.Input[Union['TableflowTopicErrorHandlingArgs', 'TableflowTopicErrorHandlingArgsDict']]] = None,
602
653
  kafka_cluster: Optional[pulumi.Input[Union['TableflowTopicKafkaClusterArgs', 'TableflowTopicKafkaClusterArgsDict']]] = None,
603
654
  managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input[Union['TableflowTopicManagedStorageArgs', 'TableflowTopicManagedStorageArgsDict']]]]] = None,
604
655
  record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
@@ -613,6 +664,7 @@ class TableflowTopic(pulumi.CustomResource):
613
664
  raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
614
665
  __props__ = TableflowTopicArgs.__new__(TableflowTopicArgs)
615
666
 
667
+ __props__.__dict__["azure_data_lake_storage_gen2"] = azure_data_lake_storage_gen2
616
668
  __props__.__dict__["byob_aws"] = byob_aws
617
669
  __props__.__dict__["credentials"] = None if credentials is None else pulumi.Output.secret(credentials)
618
670
  if display_name is None and not opts.urn:
@@ -621,6 +673,7 @@ class TableflowTopic(pulumi.CustomResource):
621
673
  if environment is None and not opts.urn:
622
674
  raise TypeError("Missing required property 'environment'")
623
675
  __props__.__dict__["environment"] = environment
676
+ __props__.__dict__["error_handling"] = error_handling
624
677
  if kafka_cluster is None and not opts.urn:
625
678
  raise TypeError("Missing required property 'kafka_cluster'")
626
679
  __props__.__dict__["kafka_cluster"] = kafka_cluster
@@ -645,12 +698,14 @@ class TableflowTopic(pulumi.CustomResource):
645
698
  def get(resource_name: str,
646
699
  id: pulumi.Input[str],
647
700
  opts: Optional[pulumi.ResourceOptions] = None,
701
+ azure_data_lake_storage_gen2: Optional[pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']]] = None,
648
702
  byob_aws: Optional[pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']]] = None,
649
703
  credentials: Optional[pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']]] = None,
650
704
  display_name: Optional[pulumi.Input[_builtins.str]] = None,
651
705
  enable_compaction: Optional[pulumi.Input[_builtins.bool]] = None,
652
706
  enable_partitioning: Optional[pulumi.Input[_builtins.bool]] = None,
653
707
  environment: Optional[pulumi.Input[Union['TableflowTopicEnvironmentArgs', 'TableflowTopicEnvironmentArgsDict']]] = None,
708
+ error_handling: Optional[pulumi.Input[Union['TableflowTopicErrorHandlingArgs', 'TableflowTopicErrorHandlingArgsDict']]] = None,
654
709
  kafka_cluster: Optional[pulumi.Input[Union['TableflowTopicKafkaClusterArgs', 'TableflowTopicKafkaClusterArgsDict']]] = None,
655
710
  managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input[Union['TableflowTopicManagedStorageArgs', 'TableflowTopicManagedStorageArgsDict']]]]] = None,
656
711
  record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
@@ -666,6 +721,7 @@ class TableflowTopic(pulumi.CustomResource):
666
721
  :param str resource_name: The unique name of the resulting resource.
667
722
  :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
668
723
  :param pulumi.ResourceOptions opts: Options for the resource.
724
+ :param pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
669
725
  :param pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
670
726
  :param pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']] credentials: The Cluster API Credentials.
671
727
  :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
@@ -684,12 +740,14 @@ class TableflowTopic(pulumi.CustomResource):
684
740
 
685
741
  __props__ = _TableflowTopicState.__new__(_TableflowTopicState)
686
742
 
743
+ __props__.__dict__["azure_data_lake_storage_gen2"] = azure_data_lake_storage_gen2
687
744
  __props__.__dict__["byob_aws"] = byob_aws
688
745
  __props__.__dict__["credentials"] = credentials
689
746
  __props__.__dict__["display_name"] = display_name
690
747
  __props__.__dict__["enable_compaction"] = enable_compaction
691
748
  __props__.__dict__["enable_partitioning"] = enable_partitioning
692
749
  __props__.__dict__["environment"] = environment
750
+ __props__.__dict__["error_handling"] = error_handling
693
751
  __props__.__dict__["kafka_cluster"] = kafka_cluster
694
752
  __props__.__dict__["managed_storages"] = managed_storages
695
753
  __props__.__dict__["record_failure_strategy"] = record_failure_strategy
@@ -700,6 +758,14 @@ class TableflowTopic(pulumi.CustomResource):
700
758
  __props__.__dict__["write_mode"] = write_mode
701
759
  return TableflowTopic(resource_name, opts=opts, __props__=__props__)
702
760
 
761
+ @_builtins.property
762
+ @pulumi.getter(name="azureDataLakeStorageGen2")
763
+ def azure_data_lake_storage_gen2(self) -> pulumi.Output[Optional['outputs.TableflowTopicAzureDataLakeStorageGen2']]:
764
+ """
765
+ (Optional Configuration Block) supports the following:
766
+ """
767
+ return pulumi.get(self, "azure_data_lake_storage_gen2")
768
+
703
769
  @_builtins.property
704
770
  @pulumi.getter(name="byobAws")
705
771
  def byob_aws(self) -> pulumi.Output[Optional['outputs.TableflowTopicByobAws']]:
@@ -748,6 +814,11 @@ class TableflowTopic(pulumi.CustomResource):
748
814
  """
749
815
  return pulumi.get(self, "environment")
750
816
 
817
+ @_builtins.property
818
+ @pulumi.getter(name="errorHandling")
819
+ def error_handling(self) -> pulumi.Output['outputs.TableflowTopicErrorHandling']:
820
+ return pulumi.get(self, "error_handling")
821
+
751
822
  @_builtins.property
752
823
  @pulumi.getter(name="kafkaCluster")
753
824
  def kafka_cluster(self) -> pulumi.Output['outputs.TableflowTopicKafkaCluster']:
@@ -763,6 +834,7 @@ class TableflowTopic(pulumi.CustomResource):
763
834
 
764
835
  @_builtins.property
765
836
  @pulumi.getter(name="recordFailureStrategy")
837
+ @_utilities.deprecated("""This attribute is deprecated and will be removed in a future release.""")
766
838
  def record_failure_strategy(self) -> pulumi.Output[_builtins.str]:
767
839
  """
768
840
  The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pulumi_confluentcloud
3
- Version: 2.42.0
3
+ Version: 2.54.0a1766503424
4
4
  Summary: A Pulumi package for creating and managing Confluent cloud resources.
5
5
  License: Apache-2.0
6
6
  Project-URL: Homepage, https://www.pulumi.com