pulumi-confluentcloud 2.10.0a1731389320__py3-none-any.whl → 2.54.0a1766503424__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. pulumi_confluentcloud/__init__.py +102 -1
  2. pulumi_confluentcloud/_inputs.py +4870 -2499
  3. pulumi_confluentcloud/_utilities.py +9 -5
  4. pulumi_confluentcloud/access_point.py +188 -33
  5. pulumi_confluentcloud/api_key.py +178 -144
  6. pulumi_confluentcloud/business_metadata.py +76 -73
  7. pulumi_confluentcloud/business_metadata_binding.py +101 -98
  8. pulumi_confluentcloud/byok_key.py +12 -11
  9. pulumi_confluentcloud/catalog_entity_attributes.py +133 -92
  10. pulumi_confluentcloud/catalog_integration.py +544 -0
  11. pulumi_confluentcloud/certificate_authority.py +158 -157
  12. pulumi_confluentcloud/certificate_pool.py +76 -75
  13. pulumi_confluentcloud/cluster_link.py +168 -91
  14. pulumi_confluentcloud/config/__init__.py +2 -1
  15. pulumi_confluentcloud/config/__init__.pyi +23 -2
  16. pulumi_confluentcloud/config/outputs.py +110 -0
  17. pulumi_confluentcloud/config/vars.py +50 -21
  18. pulumi_confluentcloud/connect_artifact.py +478 -0
  19. pulumi_confluentcloud/connector.py +399 -87
  20. pulumi_confluentcloud/custom_connector_plugin.py +142 -141
  21. pulumi_confluentcloud/custom_connector_plugin_version.py +614 -0
  22. pulumi_confluentcloud/dns_forwarder.py +136 -45
  23. pulumi_confluentcloud/dns_record.py +46 -45
  24. pulumi_confluentcloud/environment.py +74 -73
  25. pulumi_confluentcloud/flink_artifact.py +254 -198
  26. pulumi_confluentcloud/flink_compute_pool.py +101 -100
  27. pulumi_confluentcloud/flink_connection.py +935 -0
  28. pulumi_confluentcloud/flink_statement.py +218 -124
  29. pulumi_confluentcloud/gateway.py +88 -28
  30. pulumi_confluentcloud/get_access_point.py +51 -17
  31. pulumi_confluentcloud/get_business_metadata.py +27 -25
  32. pulumi_confluentcloud/get_business_metadata_binding.py +36 -34
  33. pulumi_confluentcloud/get_byok_key.py +13 -13
  34. pulumi_confluentcloud/get_catalog_integration.py +248 -0
  35. pulumi_confluentcloud/get_certificate_authority.py +42 -42
  36. pulumi_confluentcloud/get_certificate_pool.py +21 -21
  37. pulumi_confluentcloud/get_cluster_link.py +274 -0
  38. pulumi_confluentcloud/get_connect_artifact.py +191 -0
  39. pulumi_confluentcloud/get_dns_record.py +17 -17
  40. pulumi_confluentcloud/get_environment.py +19 -19
  41. pulumi_confluentcloud/get_environments.py +8 -8
  42. pulumi_confluentcloud/get_flink_artifact.py +61 -43
  43. pulumi_confluentcloud/get_flink_compute_pool.py +29 -29
  44. pulumi_confluentcloud/get_flink_connection.py +267 -0
  45. pulumi_confluentcloud/get_flink_region.py +26 -26
  46. pulumi_confluentcloud/get_gateway.py +60 -18
  47. pulumi_confluentcloud/get_group_mapping.py +20 -20
  48. pulumi_confluentcloud/get_identity_pool.py +23 -23
  49. pulumi_confluentcloud/get_identity_provider.py +37 -23
  50. pulumi_confluentcloud/get_invitation.py +22 -22
  51. pulumi_confluentcloud/get_ip_addresses.py +8 -8
  52. pulumi_confluentcloud/get_ip_filter.py +175 -0
  53. pulumi_confluentcloud/get_ip_group.py +133 -0
  54. pulumi_confluentcloud/get_kafka_client_quota.py +19 -19
  55. pulumi_confluentcloud/get_kafka_cluster.py +57 -43
  56. pulumi_confluentcloud/get_kafka_clusters.py +136 -0
  57. pulumi_confluentcloud/get_kafka_topic.py +24 -24
  58. pulumi_confluentcloud/get_ksql_cluster.py +35 -35
  59. pulumi_confluentcloud/get_network.py +56 -42
  60. pulumi_confluentcloud/get_network_link_endpoint.py +19 -19
  61. pulumi_confluentcloud/get_network_link_service.py +25 -19
  62. pulumi_confluentcloud/get_organization.py +8 -8
  63. pulumi_confluentcloud/get_peering.py +21 -21
  64. pulumi_confluentcloud/get_private_link_access.py +21 -21
  65. pulumi_confluentcloud/get_private_link_attachment.py +31 -29
  66. pulumi_confluentcloud/get_private_link_attachment_connection.py +26 -23
  67. pulumi_confluentcloud/get_provider_integration.py +30 -20
  68. pulumi_confluentcloud/get_provider_integration_authorization.py +142 -0
  69. pulumi_confluentcloud/get_provider_integration_setup.py +270 -0
  70. pulumi_confluentcloud/get_role_binding.py +16 -16
  71. pulumi_confluentcloud/get_schema.py +42 -42
  72. pulumi_confluentcloud/get_schema_registry_cluster.py +53 -38
  73. pulumi_confluentcloud/get_schema_registry_cluster_config.py +31 -17
  74. pulumi_confluentcloud/get_schema_registry_cluster_mode.py +16 -16
  75. pulumi_confluentcloud/get_schema_registry_clusters.py +8 -8
  76. pulumi_confluentcloud/get_schema_registry_dek.py +44 -44
  77. pulumi_confluentcloud/get_schema_registry_kek.py +32 -32
  78. pulumi_confluentcloud/get_schemas.py +16 -16
  79. pulumi_confluentcloud/get_service_account.py +22 -22
  80. pulumi_confluentcloud/get_subject_config.py +37 -23
  81. pulumi_confluentcloud/get_subject_mode.py +22 -22
  82. pulumi_confluentcloud/get_tableflow_topic.py +374 -0
  83. pulumi_confluentcloud/get_tag.py +28 -26
  84. pulumi_confluentcloud/get_tag_binding.py +34 -32
  85. pulumi_confluentcloud/get_transit_gateway_attachment.py +19 -19
  86. pulumi_confluentcloud/get_user.py +26 -26
  87. pulumi_confluentcloud/get_users.py +8 -8
  88. pulumi_confluentcloud/group_mapping.py +54 -53
  89. pulumi_confluentcloud/identity_pool.py +74 -73
  90. pulumi_confluentcloud/identity_provider.py +138 -76
  91. pulumi_confluentcloud/invitation.py +81 -80
  92. pulumi_confluentcloud/ip_filter.py +420 -0
  93. pulumi_confluentcloud/ip_group.py +264 -0
  94. pulumi_confluentcloud/kafka_acl.py +173 -172
  95. pulumi_confluentcloud/kafka_client_quota.py +63 -62
  96. pulumi_confluentcloud/kafka_cluster.py +227 -207
  97. pulumi_confluentcloud/kafka_cluster_config.py +43 -42
  98. pulumi_confluentcloud/kafka_mirror_topic.py +46 -45
  99. pulumi_confluentcloud/kafka_topic.py +132 -131
  100. pulumi_confluentcloud/ksql_cluster.py +117 -120
  101. pulumi_confluentcloud/network.py +235 -198
  102. pulumi_confluentcloud/network_link_endpoint.py +62 -68
  103. pulumi_confluentcloud/network_link_service.py +62 -68
  104. pulumi_confluentcloud/outputs.py +4735 -2224
  105. pulumi_confluentcloud/peering.py +128 -48
  106. pulumi_confluentcloud/plugin.py +428 -0
  107. pulumi_confluentcloud/private_link_access.py +54 -60
  108. pulumi_confluentcloud/private_link_attachment.py +93 -88
  109. pulumi_confluentcloud/private_link_attachment_connection.py +70 -47
  110. pulumi_confluentcloud/provider.py +318 -192
  111. pulumi_confluentcloud/provider_integration.py +45 -34
  112. pulumi_confluentcloud/provider_integration_authorization.py +320 -0
  113. pulumi_confluentcloud/provider_integration_setup.py +448 -0
  114. pulumi_confluentcloud/pulumi-plugin.json +1 -1
  115. pulumi_confluentcloud/role_binding.py +153 -56
  116. pulumi_confluentcloud/schema.py +416 -212
  117. pulumi_confluentcloud/schema_exporter.py +217 -164
  118. pulumi_confluentcloud/schema_registry_cluster_config.py +96 -44
  119. pulumi_confluentcloud/schema_registry_cluster_mode.py +90 -42
  120. pulumi_confluentcloud/schema_registry_dek.py +140 -146
  121. pulumi_confluentcloud/schema_registry_kek.py +148 -154
  122. pulumi_confluentcloud/service_account.py +97 -96
  123. pulumi_confluentcloud/subject_config.py +142 -62
  124. pulumi_confluentcloud/subject_mode.py +107 -59
  125. pulumi_confluentcloud/tableflow_topic.py +883 -0
  126. pulumi_confluentcloud/tag.py +82 -79
  127. pulumi_confluentcloud/tag_binding.py +121 -84
  128. pulumi_confluentcloud/tf_importer.py +39 -36
  129. pulumi_confluentcloud/transit_gateway_attachment.py +38 -44
  130. {pulumi_confluentcloud-2.10.0a1731389320.dist-info → pulumi_confluentcloud-2.54.0a1766503424.dist-info}/METADATA +7 -7
  131. pulumi_confluentcloud-2.54.0a1766503424.dist-info/RECORD +134 -0
  132. {pulumi_confluentcloud-2.10.0a1731389320.dist-info → pulumi_confluentcloud-2.54.0a1766503424.dist-info}/WHEEL +1 -1
  133. pulumi_confluentcloud-2.10.0a1731389320.dist-info/RECORD +0 -113
  134. {pulumi_confluentcloud-2.10.0a1731389320.dist-info → pulumi_confluentcloud-2.54.0a1766503424.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,883 @@
1
+ # coding=utf-8
2
+ # *** WARNING: this file was generated by pulumi-language-python. ***
3
+ # *** Do not edit by hand unless you're certain you know what you are doing! ***
4
+
5
+ import builtins as _builtins
6
+ import warnings
7
+ import sys
8
+ import pulumi
9
+ import pulumi.runtime
10
+ from typing import Any, Mapping, Optional, Sequence, Union, overload
11
+ if sys.version_info >= (3, 11):
12
+ from typing import NotRequired, TypedDict, TypeAlias
13
+ else:
14
+ from typing_extensions import NotRequired, TypedDict, TypeAlias
15
+ from . import _utilities
16
+ from . import outputs
17
+ from ._inputs import *
18
+
19
+ __all__ = ['TableflowTopicArgs', 'TableflowTopic']
20
+
21
+ @pulumi.input_type
22
+ class TableflowTopicArgs:
23
+ def __init__(__self__, *,
24
+ display_name: pulumi.Input[_builtins.str],
25
+ environment: pulumi.Input['TableflowTopicEnvironmentArgs'],
26
+ kafka_cluster: pulumi.Input['TableflowTopicKafkaClusterArgs'],
27
+ azure_data_lake_storage_gen2: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']] = None,
28
+ byob_aws: Optional[pulumi.Input['TableflowTopicByobAwsArgs']] = None,
29
+ credentials: Optional[pulumi.Input['TableflowTopicCredentialsArgs']] = None,
30
+ error_handling: Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']] = None,
31
+ managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]]] = None,
32
+ record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
33
+ retention_ms: Optional[pulumi.Input[_builtins.str]] = None,
34
+ table_formats: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None):
35
+ """
36
+ The set of arguments for constructing a TableflowTopic resource.
37
+ :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
38
+ :param pulumi.Input['TableflowTopicEnvironmentArgs'] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
39
+ :param pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args'] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
40
+ :param pulumi.Input['TableflowTopicByobAwsArgs'] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
41
+ :param pulumi.Input['TableflowTopicCredentialsArgs'] credentials: The Cluster API Credentials.
42
+ :param pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]] managed_storages: The configuration of the Confluent managed storage. See [Quick Start with Managed Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-managed-storage.html#cloud-tableflow-quick-start-managed-storage) for more details.
43
+ :param pulumi.Input[_builtins.str] record_failure_strategy: The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
44
+ :param pulumi.Input[_builtins.str] retention_ms: The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
45
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] table_formats: The supported table formats for the Tableflow-enabled topic. Accepted values are `DELTA`, `ICEBERG`.
46
+ """
47
+ pulumi.set(__self__, "display_name", display_name)
48
+ pulumi.set(__self__, "environment", environment)
49
+ pulumi.set(__self__, "kafka_cluster", kafka_cluster)
50
+ if azure_data_lake_storage_gen2 is not None:
51
+ pulumi.set(__self__, "azure_data_lake_storage_gen2", azure_data_lake_storage_gen2)
52
+ if byob_aws is not None:
53
+ pulumi.set(__self__, "byob_aws", byob_aws)
54
+ if credentials is not None:
55
+ pulumi.set(__self__, "credentials", credentials)
56
+ if error_handling is not None:
57
+ pulumi.set(__self__, "error_handling", error_handling)
58
+ if managed_storages is not None:
59
+ pulumi.set(__self__, "managed_storages", managed_storages)
60
+ if record_failure_strategy is not None:
61
+ warnings.warn("""This attribute is deprecated and will be removed in a future release.""", DeprecationWarning)
62
+ pulumi.log.warn("""record_failure_strategy is deprecated: This attribute is deprecated and will be removed in a future release.""")
63
+ if record_failure_strategy is not None:
64
+ pulumi.set(__self__, "record_failure_strategy", record_failure_strategy)
65
+ if retention_ms is not None:
66
+ pulumi.set(__self__, "retention_ms", retention_ms)
67
+ if table_formats is not None:
68
+ pulumi.set(__self__, "table_formats", table_formats)
69
+
70
+ @_builtins.property
71
+ @pulumi.getter(name="displayName")
72
+ def display_name(self) -> pulumi.Input[_builtins.str]:
73
+ """
74
+ The name of the Kafka topic for which Tableflow is enabled.
75
+ """
76
+ return pulumi.get(self, "display_name")
77
+
78
+ @display_name.setter
79
+ def display_name(self, value: pulumi.Input[_builtins.str]):
80
+ pulumi.set(self, "display_name", value)
81
+
82
+ @_builtins.property
83
+ @pulumi.getter
84
+ def environment(self) -> pulumi.Input['TableflowTopicEnvironmentArgs']:
85
+ """
86
+ Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
87
+ """
88
+ return pulumi.get(self, "environment")
89
+
90
+ @environment.setter
91
+ def environment(self, value: pulumi.Input['TableflowTopicEnvironmentArgs']):
92
+ pulumi.set(self, "environment", value)
93
+
94
+ @_builtins.property
95
+ @pulumi.getter(name="kafkaCluster")
96
+ def kafka_cluster(self) -> pulumi.Input['TableflowTopicKafkaClusterArgs']:
97
+ return pulumi.get(self, "kafka_cluster")
98
+
99
+ @kafka_cluster.setter
100
+ def kafka_cluster(self, value: pulumi.Input['TableflowTopicKafkaClusterArgs']):
101
+ pulumi.set(self, "kafka_cluster", value)
102
+
103
+ @_builtins.property
104
+ @pulumi.getter(name="azureDataLakeStorageGen2")
105
+ def azure_data_lake_storage_gen2(self) -> Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]:
106
+ """
107
+ (Optional Configuration Block) supports the following:
108
+ """
109
+ return pulumi.get(self, "azure_data_lake_storage_gen2")
110
+
111
+ @azure_data_lake_storage_gen2.setter
112
+ def azure_data_lake_storage_gen2(self, value: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]):
113
+ pulumi.set(self, "azure_data_lake_storage_gen2", value)
114
+
115
+ @_builtins.property
116
+ @pulumi.getter(name="byobAws")
117
+ def byob_aws(self) -> Optional[pulumi.Input['TableflowTopicByobAwsArgs']]:
118
+ """
119
+ supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
120
+ """
121
+ return pulumi.get(self, "byob_aws")
122
+
123
+ @byob_aws.setter
124
+ def byob_aws(self, value: Optional[pulumi.Input['TableflowTopicByobAwsArgs']]):
125
+ pulumi.set(self, "byob_aws", value)
126
+
127
+ @_builtins.property
128
+ @pulumi.getter
129
+ def credentials(self) -> Optional[pulumi.Input['TableflowTopicCredentialsArgs']]:
130
+ """
131
+ The Cluster API Credentials.
132
+ """
133
+ return pulumi.get(self, "credentials")
134
+
135
+ @credentials.setter
136
+ def credentials(self, value: Optional[pulumi.Input['TableflowTopicCredentialsArgs']]):
137
+ pulumi.set(self, "credentials", value)
138
+
139
+ @_builtins.property
140
+ @pulumi.getter(name="errorHandling")
141
+ def error_handling(self) -> Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']]:
142
+ return pulumi.get(self, "error_handling")
143
+
144
+ @error_handling.setter
145
+ def error_handling(self, value: Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']]):
146
+ pulumi.set(self, "error_handling", value)
147
+
148
+ @_builtins.property
149
+ @pulumi.getter(name="managedStorages")
150
+ def managed_storages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]]]:
151
+ """
152
+ The configuration of the Confluent managed storage. See [Quick Start with Managed Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-managed-storage.html#cloud-tableflow-quick-start-managed-storage) for more details.
153
+ """
154
+ return pulumi.get(self, "managed_storages")
155
+
156
+ @managed_storages.setter
157
+ def managed_storages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]]]):
158
+ pulumi.set(self, "managed_storages", value)
159
+
160
+ @_builtins.property
161
+ @pulumi.getter(name="recordFailureStrategy")
162
+ @_utilities.deprecated("""This attribute is deprecated and will be removed in a future release.""")
163
+ def record_failure_strategy(self) -> Optional[pulumi.Input[_builtins.str]]:
164
+ """
165
+ The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
166
+ """
167
+ return pulumi.get(self, "record_failure_strategy")
168
+
169
+ @record_failure_strategy.setter
170
+ def record_failure_strategy(self, value: Optional[pulumi.Input[_builtins.str]]):
171
+ pulumi.set(self, "record_failure_strategy", value)
172
+
173
+ @_builtins.property
174
+ @pulumi.getter(name="retentionMs")
175
+ def retention_ms(self) -> Optional[pulumi.Input[_builtins.str]]:
176
+ """
177
+ The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
178
+ """
179
+ return pulumi.get(self, "retention_ms")
180
+
181
+ @retention_ms.setter
182
+ def retention_ms(self, value: Optional[pulumi.Input[_builtins.str]]):
183
+ pulumi.set(self, "retention_ms", value)
184
+
185
+ @_builtins.property
186
+ @pulumi.getter(name="tableFormats")
187
+ def table_formats(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
188
+ """
189
+ The supported table formats for the Tableflow-enabled topic. Accepted values are `DELTA`, `ICEBERG`.
190
+ """
191
+ return pulumi.get(self, "table_formats")
192
+
193
+ @table_formats.setter
194
+ def table_formats(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
195
+ pulumi.set(self, "table_formats", value)
196
+
197
+
198
+ @pulumi.input_type
199
+ class _TableflowTopicState:
200
+ def __init__(__self__, *,
201
+ azure_data_lake_storage_gen2: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']] = None,
202
+ byob_aws: Optional[pulumi.Input['TableflowTopicByobAwsArgs']] = None,
203
+ credentials: Optional[pulumi.Input['TableflowTopicCredentialsArgs']] = None,
204
+ display_name: Optional[pulumi.Input[_builtins.str]] = None,
205
+ enable_compaction: Optional[pulumi.Input[_builtins.bool]] = None,
206
+ enable_partitioning: Optional[pulumi.Input[_builtins.bool]] = None,
207
+ environment: Optional[pulumi.Input['TableflowTopicEnvironmentArgs']] = None,
208
+ error_handling: Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']] = None,
209
+ kafka_cluster: Optional[pulumi.Input['TableflowTopicKafkaClusterArgs']] = None,
210
+ managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]]] = None,
211
+ record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
212
+ retention_ms: Optional[pulumi.Input[_builtins.str]] = None,
213
+ suspended: Optional[pulumi.Input[_builtins.bool]] = None,
214
+ table_formats: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
215
+ table_path: Optional[pulumi.Input[_builtins.str]] = None,
216
+ write_mode: Optional[pulumi.Input[_builtins.str]] = None):
217
+ """
218
+ Input properties used for looking up and filtering TableflowTopic resources.
219
+ :param pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args'] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
220
+ :param pulumi.Input['TableflowTopicByobAwsArgs'] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
221
+ :param pulumi.Input['TableflowTopicCredentialsArgs'] credentials: The Cluster API Credentials.
222
+ :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
223
+ :param pulumi.Input[_builtins.bool] enable_compaction: (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
224
+ :param pulumi.Input[_builtins.bool] enable_partitioning: (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
225
+ :param pulumi.Input['TableflowTopicEnvironmentArgs'] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
226
+ :param pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]] managed_storages: The configuration of the Confluent managed storage. See [Quick Start with Managed Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-managed-storage.html#cloud-tableflow-quick-start-managed-storage) for more details.
227
+ :param pulumi.Input[_builtins.str] record_failure_strategy: The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
228
+ :param pulumi.Input[_builtins.str] retention_ms: The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
229
+ :param pulumi.Input[_builtins.bool] suspended: (Optional Boolean) Indicates whether the Tableflow should be suspended.
230
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] table_formats: The supported table formats for the Tableflow-enabled topic. Accepted values are `DELTA`, `ICEBERG`.
231
+ :param pulumi.Input[_builtins.str] table_path: (Optional String) The current storage path where the data and metadata is stored for this table.
232
+ :param pulumi.Input[_builtins.str] write_mode: (Optional String) Indicates the write mode of the Tableflow topic.
233
+ """
234
+ if azure_data_lake_storage_gen2 is not None:
235
+ pulumi.set(__self__, "azure_data_lake_storage_gen2", azure_data_lake_storage_gen2)
236
+ if byob_aws is not None:
237
+ pulumi.set(__self__, "byob_aws", byob_aws)
238
+ if credentials is not None:
239
+ pulumi.set(__self__, "credentials", credentials)
240
+ if display_name is not None:
241
+ pulumi.set(__self__, "display_name", display_name)
242
+ if enable_compaction is not None:
243
+ pulumi.set(__self__, "enable_compaction", enable_compaction)
244
+ if enable_partitioning is not None:
245
+ pulumi.set(__self__, "enable_partitioning", enable_partitioning)
246
+ if environment is not None:
247
+ pulumi.set(__self__, "environment", environment)
248
+ if error_handling is not None:
249
+ pulumi.set(__self__, "error_handling", error_handling)
250
+ if kafka_cluster is not None:
251
+ pulumi.set(__self__, "kafka_cluster", kafka_cluster)
252
+ if managed_storages is not None:
253
+ pulumi.set(__self__, "managed_storages", managed_storages)
254
+ if record_failure_strategy is not None:
255
+ warnings.warn("""This attribute is deprecated and will be removed in a future release.""", DeprecationWarning)
256
+ pulumi.log.warn("""record_failure_strategy is deprecated: This attribute is deprecated and will be removed in a future release.""")
257
+ if record_failure_strategy is not None:
258
+ pulumi.set(__self__, "record_failure_strategy", record_failure_strategy)
259
+ if retention_ms is not None:
260
+ pulumi.set(__self__, "retention_ms", retention_ms)
261
+ if suspended is not None:
262
+ pulumi.set(__self__, "suspended", suspended)
263
+ if table_formats is not None:
264
+ pulumi.set(__self__, "table_formats", table_formats)
265
+ if table_path is not None:
266
+ pulumi.set(__self__, "table_path", table_path)
267
+ if write_mode is not None:
268
+ pulumi.set(__self__, "write_mode", write_mode)
269
+
270
+ @_builtins.property
271
+ @pulumi.getter(name="azureDataLakeStorageGen2")
272
+ def azure_data_lake_storage_gen2(self) -> Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]:
273
+ """
274
+ (Optional Configuration Block) supports the following:
275
+ """
276
+ return pulumi.get(self, "azure_data_lake_storage_gen2")
277
+
278
+ @azure_data_lake_storage_gen2.setter
279
+ def azure_data_lake_storage_gen2(self, value: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]):
280
+ pulumi.set(self, "azure_data_lake_storage_gen2", value)
281
+
282
+ @_builtins.property
283
+ @pulumi.getter(name="byobAws")
284
+ def byob_aws(self) -> Optional[pulumi.Input['TableflowTopicByobAwsArgs']]:
285
+ """
286
+ supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
287
+ """
288
+ return pulumi.get(self, "byob_aws")
289
+
290
+ @byob_aws.setter
291
+ def byob_aws(self, value: Optional[pulumi.Input['TableflowTopicByobAwsArgs']]):
292
+ pulumi.set(self, "byob_aws", value)
293
+
294
+ @_builtins.property
295
+ @pulumi.getter
296
+ def credentials(self) -> Optional[pulumi.Input['TableflowTopicCredentialsArgs']]:
297
+ """
298
+ The Cluster API Credentials.
299
+ """
300
+ return pulumi.get(self, "credentials")
301
+
302
+ @credentials.setter
303
+ def credentials(self, value: Optional[pulumi.Input['TableflowTopicCredentialsArgs']]):
304
+ pulumi.set(self, "credentials", value)
305
+
306
+ @_builtins.property
307
+ @pulumi.getter(name="displayName")
308
+ def display_name(self) -> Optional[pulumi.Input[_builtins.str]]:
309
+ """
310
+ The name of the Kafka topic for which Tableflow is enabled.
311
+ """
312
+ return pulumi.get(self, "display_name")
313
+
314
+ @display_name.setter
315
+ def display_name(self, value: Optional[pulumi.Input[_builtins.str]]):
316
+ pulumi.set(self, "display_name", value)
317
+
318
+ @_builtins.property
319
+ @pulumi.getter(name="enableCompaction")
320
+ def enable_compaction(self) -> Optional[pulumi.Input[_builtins.bool]]:
321
+ """
322
+ (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
323
+ """
324
+ return pulumi.get(self, "enable_compaction")
325
+
326
+ @enable_compaction.setter
327
+ def enable_compaction(self, value: Optional[pulumi.Input[_builtins.bool]]):
328
+ pulumi.set(self, "enable_compaction", value)
329
+
330
+ @_builtins.property
331
+ @pulumi.getter(name="enablePartitioning")
332
+ def enable_partitioning(self) -> Optional[pulumi.Input[_builtins.bool]]:
333
+ """
334
+ (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
335
+ """
336
+ return pulumi.get(self, "enable_partitioning")
337
+
338
+ @enable_partitioning.setter
339
+ def enable_partitioning(self, value: Optional[pulumi.Input[_builtins.bool]]):
340
+ pulumi.set(self, "enable_partitioning", value)
341
+
342
+ @_builtins.property
343
+ @pulumi.getter
344
+ def environment(self) -> Optional[pulumi.Input['TableflowTopicEnvironmentArgs']]:
345
+ """
346
+ Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
347
+ """
348
+ return pulumi.get(self, "environment")
349
+
350
+ @environment.setter
351
+ def environment(self, value: Optional[pulumi.Input['TableflowTopicEnvironmentArgs']]):
352
+ pulumi.set(self, "environment", value)
353
+
354
+ @_builtins.property
355
+ @pulumi.getter(name="errorHandling")
356
+ def error_handling(self) -> Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']]:
357
+ return pulumi.get(self, "error_handling")
358
+
359
+ @error_handling.setter
360
+ def error_handling(self, value: Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']]):
361
+ pulumi.set(self, "error_handling", value)
362
+
363
+ @_builtins.property
364
+ @pulumi.getter(name="kafkaCluster")
365
+ def kafka_cluster(self) -> Optional[pulumi.Input['TableflowTopicKafkaClusterArgs']]:
366
+ return pulumi.get(self, "kafka_cluster")
367
+
368
+ @kafka_cluster.setter
369
+ def kafka_cluster(self, value: Optional[pulumi.Input['TableflowTopicKafkaClusterArgs']]):
370
+ pulumi.set(self, "kafka_cluster", value)
371
+
372
+ @_builtins.property
373
+ @pulumi.getter(name="managedStorages")
374
+ def managed_storages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]]]:
375
+ """
376
+ The configuration of the Confluent managed storage. See [Quick Start with Managed Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-managed-storage.html#cloud-tableflow-quick-start-managed-storage) for more details.
377
+ """
378
+ return pulumi.get(self, "managed_storages")
379
+
380
+ @managed_storages.setter
381
+ def managed_storages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]]]):
382
+ pulumi.set(self, "managed_storages", value)
383
+
384
+ @_builtins.property
385
+ @pulumi.getter(name="recordFailureStrategy")
386
+ @_utilities.deprecated("""This attribute is deprecated and will be removed in a future release.""")
387
+ def record_failure_strategy(self) -> Optional[pulumi.Input[_builtins.str]]:
388
+ """
389
+ The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
390
+ """
391
+ return pulumi.get(self, "record_failure_strategy")
392
+
393
+ @record_failure_strategy.setter
394
+ def record_failure_strategy(self, value: Optional[pulumi.Input[_builtins.str]]):
395
+ pulumi.set(self, "record_failure_strategy", value)
396
+
397
+ @_builtins.property
398
+ @pulumi.getter(name="retentionMs")
399
+ def retention_ms(self) -> Optional[pulumi.Input[_builtins.str]]:
400
+ """
401
+ The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
402
+ """
403
+ return pulumi.get(self, "retention_ms")
404
+
405
+ @retention_ms.setter
406
+ def retention_ms(self, value: Optional[pulumi.Input[_builtins.str]]):
407
+ pulumi.set(self, "retention_ms", value)
408
+
409
+ @_builtins.property
410
+ @pulumi.getter
411
+ def suspended(self) -> Optional[pulumi.Input[_builtins.bool]]:
412
+ """
413
+ (Optional Boolean) Indicates whether the Tableflow should be suspended.
414
+ """
415
+ return pulumi.get(self, "suspended")
416
+
417
+ @suspended.setter
418
+ def suspended(self, value: Optional[pulumi.Input[_builtins.bool]]):
419
+ pulumi.set(self, "suspended", value)
420
+
421
+ @_builtins.property
422
+ @pulumi.getter(name="tableFormats")
423
+ def table_formats(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
424
+ """
425
+ The supported table formats for the Tableflow-enabled topic. Accepted values are `DELTA`, `ICEBERG`.
426
+ """
427
+ return pulumi.get(self, "table_formats")
428
+
429
+ @table_formats.setter
430
+ def table_formats(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
431
+ pulumi.set(self, "table_formats", value)
432
+
433
+ @_builtins.property
434
+ @pulumi.getter(name="tablePath")
435
+ def table_path(self) -> Optional[pulumi.Input[_builtins.str]]:
436
+ """
437
+ (Optional String) The current storage path where the data and metadata is stored for this table.
438
+ """
439
+ return pulumi.get(self, "table_path")
440
+
441
+ @table_path.setter
442
+ def table_path(self, value: Optional[pulumi.Input[_builtins.str]]):
443
+ pulumi.set(self, "table_path", value)
444
+
445
+ @_builtins.property
446
+ @pulumi.getter(name="writeMode")
447
+ def write_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
448
+ """
449
+ (Optional String) Indicates the write mode of the Tableflow topic.
450
+ """
451
+ return pulumi.get(self, "write_mode")
452
+
453
+ @write_mode.setter
454
+ def write_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
455
+ pulumi.set(self, "write_mode", value)
456
+
457
+
458
+ @pulumi.type_token("confluentcloud:index/tableflowTopic:TableflowTopic")
459
+ class TableflowTopic(pulumi.CustomResource):
460
+ @overload
461
+ def __init__(__self__,
462
+ resource_name: str,
463
+ opts: Optional[pulumi.ResourceOptions] = None,
464
+ azure_data_lake_storage_gen2: Optional[pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']]] = None,
465
+ byob_aws: Optional[pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']]] = None,
466
+ credentials: Optional[pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']]] = None,
467
+ display_name: Optional[pulumi.Input[_builtins.str]] = None,
468
+ environment: Optional[pulumi.Input[Union['TableflowTopicEnvironmentArgs', 'TableflowTopicEnvironmentArgsDict']]] = None,
469
+ error_handling: Optional[pulumi.Input[Union['TableflowTopicErrorHandlingArgs', 'TableflowTopicErrorHandlingArgsDict']]] = None,
470
+ kafka_cluster: Optional[pulumi.Input[Union['TableflowTopicKafkaClusterArgs', 'TableflowTopicKafkaClusterArgsDict']]] = None,
471
+ managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input[Union['TableflowTopicManagedStorageArgs', 'TableflowTopicManagedStorageArgsDict']]]]] = None,
472
+ record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
473
+ retention_ms: Optional[pulumi.Input[_builtins.str]] = None,
474
+ table_formats: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
475
+ __props__=None):
476
+ """
477
+ ## Example Usage
478
+
479
+ ### Option #1: Manage multiple Tableflow Topics in the same Pulumi Stack
480
+
481
+ ```python
482
+ import pulumi
483
+ import pulumi_confluentcloud as confluentcloud
484
+
485
+ example = confluentcloud.TableflowTopic("example",
486
+ managed_storages=[{}],
487
+ environment={
488
+ "id": staging["id"],
489
+ },
490
+ kafka_cluster={
491
+ "id": staging_confluent_kafka_cluster["id"],
492
+ },
493
+ display_name=orders["topicName"],
494
+ table_formats=[
495
+ "ICEBERG",
496
+ "DELTA",
497
+ ],
498
+ credentials={
499
+ "key": env_admin_tableflow_api_key["id"],
500
+ "secret": env_admin_tableflow_api_key["secret"],
501
+ })
502
+ ```
503
+
504
+ ### Option #2: Manage a single Tableflow Topic in the same Pulumi Stack
505
+
506
+ ```python
507
+ import pulumi
508
+ import pulumi_confluentcloud as confluentcloud
509
+
510
+ example = confluentcloud.TableflowTopic("example",
511
+ environment={
512
+ "id": staging["id"],
513
+ },
514
+ kafka_cluster={
515
+ "id": staging_confluent_kafka_cluster["id"],
516
+ },
517
+ display_name=orders["topicName"],
518
+ byob_aws={
519
+ "bucket_name": "bucket_1",
520
+ "provider_integration_id": main["id"],
521
+ })
522
+ ```
523
+ ## Import
524
+
525
+ You can import a Tableflow Topic by using the Tableflow Topic name, Environment ID, and Kafka Cluster ID, in the format `<Environment ID>/<Kafka Cluster ID>/<Tableflow Topic name>`, for example:
526
+
527
+ Option #1: Manage multiple Tableflow Topics in the same Pulumi Stack
528
+
529
+ $ export IMPORT_TABLEFLOW_API_KEY="<tableflow_api_key>"
530
+
531
+ $ export IMPORT_TABLEFLOW_API_SECRET="<tableflow_api_secret>"
532
+
533
+ ```sh
534
+ $ pulumi import confluentcloud:index/tableflowTopic:TableflowTopic example env-abc123/lkc-abc123/orders
535
+ ```
536
+
537
+ Option #2: Manage a single Tableflow Topic in the same Pulumi Stack
538
+
539
+ ```sh
540
+ $ pulumi import confluentcloud:index/tableflowTopic:TableflowTopic example env-abc123/lkc-abc123/orders
541
+ ```
542
+
543
+ !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes.
544
+
545
+ :param str resource_name: The name of the resource.
546
+ :param pulumi.ResourceOptions opts: Options for the resource.
547
+ :param pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
548
+ :param pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
549
+ :param pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']] credentials: The Cluster API Credentials.
550
+ :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
551
+ :param pulumi.Input[Union['TableflowTopicEnvironmentArgs', 'TableflowTopicEnvironmentArgsDict']] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
552
+ :param pulumi.Input[Sequence[pulumi.Input[Union['TableflowTopicManagedStorageArgs', 'TableflowTopicManagedStorageArgsDict']]]] managed_storages: The configuration of the Confluent managed storage. See [Quick Start with Managed Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-managed-storage.html#cloud-tableflow-quick-start-managed-storage) for more details.
553
+ :param pulumi.Input[_builtins.str] record_failure_strategy: The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
554
+ :param pulumi.Input[_builtins.str] retention_ms: The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
555
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] table_formats: The supported table formats for the Tableflow-enabled topic. Accepted values are `DELTA`, `ICEBERG`.
556
+ """
557
+ ...
558
+ @overload
559
+ def __init__(__self__,
560
+ resource_name: str,
561
+ args: TableflowTopicArgs,
562
+ opts: Optional[pulumi.ResourceOptions] = None):
563
+ """
564
+ ## Example Usage
565
+
566
+ ### Option #1: Manage multiple Tableflow Topics in the same Pulumi Stack
567
+
568
+ ```python
569
+ import pulumi
570
+ import pulumi_confluentcloud as confluentcloud
571
+
572
+ example = confluentcloud.TableflowTopic("example",
573
+ managed_storages=[{}],
574
+ environment={
575
+ "id": staging["id"],
576
+ },
577
+ kafka_cluster={
578
+ "id": staging_confluent_kafka_cluster["id"],
579
+ },
580
+ display_name=orders["topicName"],
581
+ table_formats=[
582
+ "ICEBERG",
583
+ "DELTA",
584
+ ],
585
+ credentials={
586
+ "key": env_admin_tableflow_api_key["id"],
587
+ "secret": env_admin_tableflow_api_key["secret"],
588
+ })
589
+ ```
590
+
591
+ ### Option #2: Manage a single Tableflow Topic in the same Pulumi Stack
592
+
593
+ ```python
594
+ import pulumi
595
+ import pulumi_confluentcloud as confluentcloud
596
+
597
+ example = confluentcloud.TableflowTopic("example",
598
+ environment={
599
+ "id": staging["id"],
600
+ },
601
+ kafka_cluster={
602
+ "id": staging_confluent_kafka_cluster["id"],
603
+ },
604
+ display_name=orders["topicName"],
605
+ byob_aws={
606
+ "bucket_name": "bucket_1",
607
+ "provider_integration_id": main["id"],
608
+ })
609
+ ```
610
+ ## Import
611
+
612
+ You can import a Tableflow Topic by using the Tableflow Topic name, Environment ID, and Kafka Cluster ID, in the format `<Environment ID>/<Kafka Cluster ID>/<Tableflow Topic name>`, for example:
613
+
614
+ Option #1: Manage multiple Tableflow Topics in the same Pulumi Stack
615
+
616
+ $ export IMPORT_TABLEFLOW_API_KEY="<tableflow_api_key>"
617
+
618
+ $ export IMPORT_TABLEFLOW_API_SECRET="<tableflow_api_secret>"
619
+
620
+ ```sh
621
+ $ pulumi import confluentcloud:index/tableflowTopic:TableflowTopic example env-abc123/lkc-abc123/orders
622
+ ```
623
+
624
+ Option #2: Manage a single Tableflow Topic in the same Pulumi Stack
625
+
626
+ ```sh
627
+ $ pulumi import confluentcloud:index/tableflowTopic:TableflowTopic example env-abc123/lkc-abc123/orders
628
+ ```
629
+
630
+ !> **Warning:** Do not forget to delete terminal command history afterwards for security purposes.
631
+
632
+ :param str resource_name: The name of the resource.
633
+ :param TableflowTopicArgs args: The arguments to use to populate this resource's properties.
634
+ :param pulumi.ResourceOptions opts: Options for the resource.
635
+ """
636
+ ...
637
+ def __init__(__self__, resource_name: str, *args, **kwargs):
638
+ resource_args, opts = _utilities.get_resource_args_opts(TableflowTopicArgs, pulumi.ResourceOptions, *args, **kwargs)
639
+ if resource_args is not None:
640
+ __self__._internal_init(resource_name, opts, **resource_args.__dict__)
641
+ else:
642
+ __self__._internal_init(resource_name, *args, **kwargs)
643
+
644
+ def _internal_init(__self__,
645
+ resource_name: str,
646
+ opts: Optional[pulumi.ResourceOptions] = None,
647
+ azure_data_lake_storage_gen2: Optional[pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']]] = None,
648
+ byob_aws: Optional[pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']]] = None,
649
+ credentials: Optional[pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']]] = None,
650
+ display_name: Optional[pulumi.Input[_builtins.str]] = None,
651
+ environment: Optional[pulumi.Input[Union['TableflowTopicEnvironmentArgs', 'TableflowTopicEnvironmentArgsDict']]] = None,
652
+ error_handling: Optional[pulumi.Input[Union['TableflowTopicErrorHandlingArgs', 'TableflowTopicErrorHandlingArgsDict']]] = None,
653
+ kafka_cluster: Optional[pulumi.Input[Union['TableflowTopicKafkaClusterArgs', 'TableflowTopicKafkaClusterArgsDict']]] = None,
654
+ managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input[Union['TableflowTopicManagedStorageArgs', 'TableflowTopicManagedStorageArgsDict']]]]] = None,
655
+ record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
656
+ retention_ms: Optional[pulumi.Input[_builtins.str]] = None,
657
+ table_formats: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
658
+ __props__=None):
659
+ opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
660
+ if not isinstance(opts, pulumi.ResourceOptions):
661
+ raise TypeError('Expected resource options to be a ResourceOptions instance')
662
+ if opts.id is None:
663
+ if __props__ is not None:
664
+ raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
665
+ __props__ = TableflowTopicArgs.__new__(TableflowTopicArgs)
666
+
667
+ __props__.__dict__["azure_data_lake_storage_gen2"] = azure_data_lake_storage_gen2
668
+ __props__.__dict__["byob_aws"] = byob_aws
669
+ __props__.__dict__["credentials"] = None if credentials is None else pulumi.Output.secret(credentials)
670
+ if display_name is None and not opts.urn:
671
+ raise TypeError("Missing required property 'display_name'")
672
+ __props__.__dict__["display_name"] = display_name
673
+ if environment is None and not opts.urn:
674
+ raise TypeError("Missing required property 'environment'")
675
+ __props__.__dict__["environment"] = environment
676
+ __props__.__dict__["error_handling"] = error_handling
677
+ if kafka_cluster is None and not opts.urn:
678
+ raise TypeError("Missing required property 'kafka_cluster'")
679
+ __props__.__dict__["kafka_cluster"] = kafka_cluster
680
+ __props__.__dict__["managed_storages"] = managed_storages
681
+ __props__.__dict__["record_failure_strategy"] = record_failure_strategy
682
+ __props__.__dict__["retention_ms"] = retention_ms
683
+ __props__.__dict__["table_formats"] = table_formats
684
+ __props__.__dict__["enable_compaction"] = None
685
+ __props__.__dict__["enable_partitioning"] = None
686
+ __props__.__dict__["suspended"] = None
687
+ __props__.__dict__["table_path"] = None
688
+ __props__.__dict__["write_mode"] = None
689
+ secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["credentials"])
690
+ opts = pulumi.ResourceOptions.merge(opts, secret_opts)
691
+ super(TableflowTopic, __self__).__init__(
692
+ 'confluentcloud:index/tableflowTopic:TableflowTopic',
693
+ resource_name,
694
+ __props__,
695
+ opts)
696
+
697
+ @staticmethod
698
+ def get(resource_name: str,
699
+ id: pulumi.Input[str],
700
+ opts: Optional[pulumi.ResourceOptions] = None,
701
+ azure_data_lake_storage_gen2: Optional[pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']]] = None,
702
+ byob_aws: Optional[pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']]] = None,
703
+ credentials: Optional[pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']]] = None,
704
+ display_name: Optional[pulumi.Input[_builtins.str]] = None,
705
+ enable_compaction: Optional[pulumi.Input[_builtins.bool]] = None,
706
+ enable_partitioning: Optional[pulumi.Input[_builtins.bool]] = None,
707
+ environment: Optional[pulumi.Input[Union['TableflowTopicEnvironmentArgs', 'TableflowTopicEnvironmentArgsDict']]] = None,
708
+ error_handling: Optional[pulumi.Input[Union['TableflowTopicErrorHandlingArgs', 'TableflowTopicErrorHandlingArgsDict']]] = None,
709
+ kafka_cluster: Optional[pulumi.Input[Union['TableflowTopicKafkaClusterArgs', 'TableflowTopicKafkaClusterArgsDict']]] = None,
710
+ managed_storages: Optional[pulumi.Input[Sequence[pulumi.Input[Union['TableflowTopicManagedStorageArgs', 'TableflowTopicManagedStorageArgsDict']]]]] = None,
711
+ record_failure_strategy: Optional[pulumi.Input[_builtins.str]] = None,
712
+ retention_ms: Optional[pulumi.Input[_builtins.str]] = None,
713
+ suspended: Optional[pulumi.Input[_builtins.bool]] = None,
714
+ table_formats: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
715
+ table_path: Optional[pulumi.Input[_builtins.str]] = None,
716
+ write_mode: Optional[pulumi.Input[_builtins.str]] = None) -> 'TableflowTopic':
717
+ """
718
+ Get an existing TableflowTopic resource's state with the given name, id, and optional extra
719
+ properties used to qualify the lookup.
720
+
721
+ :param str resource_name: The unique name of the resulting resource.
722
+ :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
723
+ :param pulumi.ResourceOptions opts: Options for the resource.
724
+ :param pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
725
+ :param pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
726
+ :param pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']] credentials: The Cluster API Credentials.
727
+ :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
728
+ :param pulumi.Input[_builtins.bool] enable_compaction: (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
729
+ :param pulumi.Input[_builtins.bool] enable_partitioning: (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
730
+ :param pulumi.Input[Union['TableflowTopicEnvironmentArgs', 'TableflowTopicEnvironmentArgsDict']] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
731
+ :param pulumi.Input[Sequence[pulumi.Input[Union['TableflowTopicManagedStorageArgs', 'TableflowTopicManagedStorageArgsDict']]]] managed_storages: The configuration of the Confluent managed storage. See [Quick Start with Managed Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-managed-storage.html#cloud-tableflow-quick-start-managed-storage) for more details.
732
+ :param pulumi.Input[_builtins.str] record_failure_strategy: The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
733
+ :param pulumi.Input[_builtins.str] retention_ms: The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
734
+ :param pulumi.Input[_builtins.bool] suspended: (Optional Boolean) Indicates whether the Tableflow should be suspended.
735
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] table_formats: The supported table formats for the Tableflow-enabled topic. Accepted values are `DELTA`, `ICEBERG`.
736
+ :param pulumi.Input[_builtins.str] table_path: (Optional String) The current storage path where the data and metadata is stored for this table.
737
+ :param pulumi.Input[_builtins.str] write_mode: (Optional String) Indicates the write mode of the Tableflow topic.
738
+ """
739
+ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
740
+
741
+ __props__ = _TableflowTopicState.__new__(_TableflowTopicState)
742
+
743
+ __props__.__dict__["azure_data_lake_storage_gen2"] = azure_data_lake_storage_gen2
744
+ __props__.__dict__["byob_aws"] = byob_aws
745
+ __props__.__dict__["credentials"] = credentials
746
+ __props__.__dict__["display_name"] = display_name
747
+ __props__.__dict__["enable_compaction"] = enable_compaction
748
+ __props__.__dict__["enable_partitioning"] = enable_partitioning
749
+ __props__.__dict__["environment"] = environment
750
+ __props__.__dict__["error_handling"] = error_handling
751
+ __props__.__dict__["kafka_cluster"] = kafka_cluster
752
+ __props__.__dict__["managed_storages"] = managed_storages
753
+ __props__.__dict__["record_failure_strategy"] = record_failure_strategy
754
+ __props__.__dict__["retention_ms"] = retention_ms
755
+ __props__.__dict__["suspended"] = suspended
756
+ __props__.__dict__["table_formats"] = table_formats
757
+ __props__.__dict__["table_path"] = table_path
758
+ __props__.__dict__["write_mode"] = write_mode
759
+ return TableflowTopic(resource_name, opts=opts, __props__=__props__)
760
+
761
+ @_builtins.property
762
+ @pulumi.getter(name="azureDataLakeStorageGen2")
763
+ def azure_data_lake_storage_gen2(self) -> pulumi.Output[Optional['outputs.TableflowTopicAzureDataLakeStorageGen2']]:
764
+ """
765
+ (Optional Configuration Block) supports the following:
766
+ """
767
+ return pulumi.get(self, "azure_data_lake_storage_gen2")
768
+
769
+ @_builtins.property
770
+ @pulumi.getter(name="byobAws")
771
+ def byob_aws(self) -> pulumi.Output[Optional['outputs.TableflowTopicByobAws']]:
772
+ """
773
+ supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
774
+ """
775
+ return pulumi.get(self, "byob_aws")
776
+
777
+ @_builtins.property
778
+ @pulumi.getter
779
+ def credentials(self) -> pulumi.Output[Optional['outputs.TableflowTopicCredentials']]:
780
+ """
781
+ The Cluster API Credentials.
782
+ """
783
+ return pulumi.get(self, "credentials")
784
+
785
+ @_builtins.property
786
+ @pulumi.getter(name="displayName")
787
+ def display_name(self) -> pulumi.Output[_builtins.str]:
788
+ """
789
+ The name of the Kafka topic for which Tableflow is enabled.
790
+ """
791
+ return pulumi.get(self, "display_name")
792
+
793
+ @_builtins.property
794
+ @pulumi.getter(name="enableCompaction")
795
+ def enable_compaction(self) -> pulumi.Output[_builtins.bool]:
796
+ """
797
+ (Optional Boolean) This flag determines whether to enable compaction for the Tableflow enabled topic.
798
+ """
799
+ return pulumi.get(self, "enable_compaction")
800
+
801
+ @_builtins.property
802
+ @pulumi.getter(name="enablePartitioning")
803
+ def enable_partitioning(self) -> pulumi.Output[_builtins.bool]:
804
+ """
805
+ (Optional Boolean) This flag determines whether to enable partitioning for the Tableflow enabled topic.
806
+ """
807
+ return pulumi.get(self, "enable_partitioning")
808
+
809
+ @_builtins.property
810
+ @pulumi.getter
811
+ def environment(self) -> pulumi.Output['outputs.TableflowTopicEnvironment']:
812
+ """
813
+ Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
814
+ """
815
+ return pulumi.get(self, "environment")
816
+
817
+ @_builtins.property
818
+ @pulumi.getter(name="errorHandling")
819
+ def error_handling(self) -> pulumi.Output['outputs.TableflowTopicErrorHandling']:
820
+ return pulumi.get(self, "error_handling")
821
+
822
+ @_builtins.property
823
+ @pulumi.getter(name="kafkaCluster")
824
+ def kafka_cluster(self) -> pulumi.Output['outputs.TableflowTopicKafkaCluster']:
825
+ return pulumi.get(self, "kafka_cluster")
826
+
827
+ @_builtins.property
828
+ @pulumi.getter(name="managedStorages")
829
+ def managed_storages(self) -> pulumi.Output[Optional[Sequence['outputs.TableflowTopicManagedStorage']]]:
830
+ """
831
+ The configuration of the Confluent managed storage. See [Quick Start with Managed Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-managed-storage.html#cloud-tableflow-quick-start-managed-storage) for more details.
832
+ """
833
+ return pulumi.get(self, "managed_storages")
834
+
835
+ @_builtins.property
836
+ @pulumi.getter(name="recordFailureStrategy")
837
+ @_utilities.deprecated("""This attribute is deprecated and will be removed in a future release.""")
838
+ def record_failure_strategy(self) -> pulumi.Output[_builtins.str]:
839
+ """
840
+ The strategy to handle record failures in the Tableflow enabled topic during materialization. Accepted values are `SKIP`, `SUSPEND`. For `SKIP`, we skip the bad records and move to the next record. For `SUSPEND`, we suspend the materialization of the topic.
841
+ """
842
+ return pulumi.get(self, "record_failure_strategy")
843
+
844
+ @_builtins.property
845
+ @pulumi.getter(name="retentionMs")
846
+ def retention_ms(self) -> pulumi.Output[Optional[_builtins.str]]:
847
+ """
848
+ The max age of snapshots (Iceberg) or versions (Delta) (snapshot/version expiration) to keep on the table in milliseconds for the Tableflow enabled topic.
849
+ """
850
+ return pulumi.get(self, "retention_ms")
851
+
852
+ @_builtins.property
853
+ @pulumi.getter
854
+ def suspended(self) -> pulumi.Output[_builtins.bool]:
855
+ """
856
+ (Optional Boolean) Indicates whether the Tableflow should be suspended.
857
+ """
858
+ return pulumi.get(self, "suspended")
859
+
860
+ @_builtins.property
861
+ @pulumi.getter(name="tableFormats")
862
+ def table_formats(self) -> pulumi.Output[Sequence[_builtins.str]]:
863
+ """
864
+ The supported table formats for the Tableflow-enabled topic. Accepted values are `DELTA`, `ICEBERG`.
865
+ """
866
+ return pulumi.get(self, "table_formats")
867
+
868
+ @_builtins.property
869
+ @pulumi.getter(name="tablePath")
870
+ def table_path(self) -> pulumi.Output[_builtins.str]:
871
+ """
872
+ (Optional String) The current storage path where the data and metadata is stored for this table.
873
+ """
874
+ return pulumi.get(self, "table_path")
875
+
876
+ @_builtins.property
877
+ @pulumi.getter(name="writeMode")
878
+ def write_mode(self) -> pulumi.Output[_builtins.str]:
879
+ """
880
+ (Optional String) Indicates the write mode of the Tableflow topic.
881
+ """
882
+ return pulumi.get(self, "write_mode")
883
+