pulumi-confluentcloud 2.52.0a1763617028__py3-none-any.whl → 2.53.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -327,6 +327,8 @@ __all__ = [
327
327
  'SubjectModeCredentialsArgsDict',
328
328
  'SubjectModeSchemaRegistryClusterArgs',
329
329
  'SubjectModeSchemaRegistryClusterArgsDict',
330
+ 'TableflowTopicAzureDataLakeStorageGen2Args',
331
+ 'TableflowTopicAzureDataLakeStorageGen2ArgsDict',
330
332
  'TableflowTopicByobAwsArgs',
331
333
  'TableflowTopicByobAwsArgsDict',
332
334
  'TableflowTopicCredentialsArgs',
@@ -3952,14 +3954,34 @@ class KafkaClientQuotaThroughputArgs:
3952
3954
 
3953
3955
  if not MYPY:
3954
3956
  class KafkaClusterBasicArgsDict(TypedDict):
3955
- pass
3957
+ max_ecku: NotRequired[pulumi.Input[_builtins.int]]
3958
+ """
3959
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
3960
+ """
3956
3961
  elif False:
3957
3962
  KafkaClusterBasicArgsDict: TypeAlias = Mapping[str, Any]
3958
3963
 
3959
3964
  @pulumi.input_type
3960
3965
  class KafkaClusterBasicArgs:
3961
- def __init__(__self__):
3962
- pass
3966
+ def __init__(__self__, *,
3967
+ max_ecku: Optional[pulumi.Input[_builtins.int]] = None):
3968
+ """
3969
+ :param pulumi.Input[_builtins.int] max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
3970
+ """
3971
+ if max_ecku is not None:
3972
+ pulumi.set(__self__, "max_ecku", max_ecku)
3973
+
3974
+ @_builtins.property
3975
+ @pulumi.getter(name="maxEcku")
3976
+ def max_ecku(self) -> Optional[pulumi.Input[_builtins.int]]:
3977
+ """
3978
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
3979
+ """
3980
+ return pulumi.get(self, "max_ecku")
3981
+
3982
+ @max_ecku.setter
3983
+ def max_ecku(self, value: Optional[pulumi.Input[_builtins.int]]):
3984
+ pulumi.set(self, "max_ecku", value)
3963
3985
 
3964
3986
 
3965
3987
  if not MYPY:
@@ -4099,6 +4121,8 @@ if not MYPY:
4099
4121
  !> **Warning:** You can only upgrade clusters from `basic` to `standard`.
4100
4122
 
4101
4123
  > **Note:** Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the `pulumi up` step to finish, you can exit it and import the cluster by using the `pulumi import` command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
4124
+
4125
+ > **Note:** Refer to [eCKU/CKU comparison](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#ecku-cku-comparison) documentation for the minimum/maximum eCKU requirements for each cluster type.
4102
4126
  """
4103
4127
  encryption_key: NotRequired[pulumi.Input[_builtins.str]]
4104
4128
  """
@@ -4128,6 +4152,8 @@ class KafkaClusterDedicatedArgs:
4128
4152
  !> **Warning:** You can only upgrade clusters from `basic` to `standard`.
4129
4153
 
4130
4154
  > **Note:** Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the `pulumi up` step to finish, you can exit it and import the cluster by using the `pulumi import` command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
4155
+
4156
+ > **Note:** Refer to [eCKU/CKU comparison](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#ecku-cku-comparison) documentation for the minimum/maximum eCKU requirements for each cluster type.
4131
4157
  :param pulumi.Input[_builtins.str] encryption_key: The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
4132
4158
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] zones: (Required List of String) The list of zones the cluster is in.
4133
4159
  - On AWS, zones are AWS [AZ IDs](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html), for example, `use1-az3`.
@@ -4151,6 +4177,8 @@ class KafkaClusterDedicatedArgs:
4151
4177
  !> **Warning:** You can only upgrade clusters from `basic` to `standard`.
4152
4178
 
4153
4179
  > **Note:** Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the `pulumi up` step to finish, you can exit it and import the cluster by using the `pulumi import` command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
4180
+
4181
+ > **Note:** Refer to [eCKU/CKU comparison](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#ecku-cku-comparison) documentation for the minimum/maximum eCKU requirements for each cluster type.
4154
4182
  """
4155
4183
  return pulumi.get(self, "cku")
4156
4184
 
@@ -4278,14 +4306,34 @@ class KafkaClusterEndpointArgs:
4278
4306
 
4279
4307
  if not MYPY:
4280
4308
  class KafkaClusterEnterpriseArgsDict(TypedDict):
4281
- pass
4309
+ max_ecku: NotRequired[pulumi.Input[_builtins.int]]
4310
+ """
4311
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
4312
+ """
4282
4313
  elif False:
4283
4314
  KafkaClusterEnterpriseArgsDict: TypeAlias = Mapping[str, Any]
4284
4315
 
4285
4316
  @pulumi.input_type
4286
4317
  class KafkaClusterEnterpriseArgs:
4287
- def __init__(__self__):
4288
- pass
4318
+ def __init__(__self__, *,
4319
+ max_ecku: Optional[pulumi.Input[_builtins.int]] = None):
4320
+ """
4321
+ :param pulumi.Input[_builtins.int] max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
4322
+ """
4323
+ if max_ecku is not None:
4324
+ pulumi.set(__self__, "max_ecku", max_ecku)
4325
+
4326
+ @_builtins.property
4327
+ @pulumi.getter(name="maxEcku")
4328
+ def max_ecku(self) -> Optional[pulumi.Input[_builtins.int]]:
4329
+ """
4330
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
4331
+ """
4332
+ return pulumi.get(self, "max_ecku")
4333
+
4334
+ @max_ecku.setter
4335
+ def max_ecku(self, value: Optional[pulumi.Input[_builtins.int]]):
4336
+ pulumi.set(self, "max_ecku", value)
4289
4337
 
4290
4338
 
4291
4339
  if not MYPY:
@@ -4321,6 +4369,10 @@ class KafkaClusterEnvironmentArgs:
4321
4369
 
4322
4370
  if not MYPY:
4323
4371
  class KafkaClusterFreightArgsDict(TypedDict):
4372
+ max_ecku: NotRequired[pulumi.Input[_builtins.int]]
4373
+ """
4374
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
4375
+ """
4324
4376
  zones: NotRequired[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]
4325
4377
  """
4326
4378
  (Required List of String) The list of zones the cluster is in.
@@ -4332,14 +4384,30 @@ elif False:
4332
4384
  @pulumi.input_type
4333
4385
  class KafkaClusterFreightArgs:
4334
4386
  def __init__(__self__, *,
4387
+ max_ecku: Optional[pulumi.Input[_builtins.int]] = None,
4335
4388
  zones: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None):
4336
4389
  """
4390
+ :param pulumi.Input[_builtins.int] max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
4337
4391
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] zones: (Required List of String) The list of zones the cluster is in.
4338
4392
  - On AWS, zones are AWS [AZ IDs](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html), for example, `use1-az3`.
4339
4393
  """
4394
+ if max_ecku is not None:
4395
+ pulumi.set(__self__, "max_ecku", max_ecku)
4340
4396
  if zones is not None:
4341
4397
  pulumi.set(__self__, "zones", zones)
4342
4398
 
4399
+ @_builtins.property
4400
+ @pulumi.getter(name="maxEcku")
4401
+ def max_ecku(self) -> Optional[pulumi.Input[_builtins.int]]:
4402
+ """
4403
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
4404
+ """
4405
+ return pulumi.get(self, "max_ecku")
4406
+
4407
+ @max_ecku.setter
4408
+ def max_ecku(self, value: Optional[pulumi.Input[_builtins.int]]):
4409
+ pulumi.set(self, "max_ecku", value)
4410
+
4343
4411
  @_builtins.property
4344
4412
  @pulumi.getter
4345
4413
  def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
@@ -4387,14 +4455,34 @@ class KafkaClusterNetworkArgs:
4387
4455
 
4388
4456
  if not MYPY:
4389
4457
  class KafkaClusterStandardArgsDict(TypedDict):
4390
- pass
4458
+ max_ecku: NotRequired[pulumi.Input[_builtins.int]]
4459
+ """
4460
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
4461
+ """
4391
4462
  elif False:
4392
4463
  KafkaClusterStandardArgsDict: TypeAlias = Mapping[str, Any]
4393
4464
 
4394
4465
  @pulumi.input_type
4395
4466
  class KafkaClusterStandardArgs:
4396
- def __init__(__self__):
4397
- pass
4467
+ def __init__(__self__, *,
4468
+ max_ecku: Optional[pulumi.Input[_builtins.int]] = None):
4469
+ """
4470
+ :param pulumi.Input[_builtins.int] max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
4471
+ """
4472
+ if max_ecku is not None:
4473
+ pulumi.set(__self__, "max_ecku", max_ecku)
4474
+
4475
+ @_builtins.property
4476
+ @pulumi.getter(name="maxEcku")
4477
+ def max_ecku(self) -> Optional[pulumi.Input[_builtins.int]]:
4478
+ """
4479
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
4480
+ """
4481
+ return pulumi.get(self, "max_ecku")
4482
+
4483
+ @max_ecku.setter
4484
+ def max_ecku(self, value: Optional[pulumi.Input[_builtins.int]]):
4485
+ pulumi.set(self, "max_ecku", value)
4398
4486
 
4399
4487
 
4400
4488
  if not MYPY:
@@ -7840,6 +7928,95 @@ class SubjectModeSchemaRegistryClusterArgs:
7840
7928
  pulumi.set(self, "id", value)
7841
7929
 
7842
7930
 
7931
+ if not MYPY:
7932
+ class TableflowTopicAzureDataLakeStorageGen2ArgsDict(TypedDict):
7933
+ container_name: pulumi.Input[_builtins.str]
7934
+ """
7935
+ The container name.
7936
+ """
7937
+ provider_integration_id: pulumi.Input[_builtins.str]
7938
+ """
7939
+ The provider integration id.
7940
+ """
7941
+ storage_account_name: pulumi.Input[_builtins.str]
7942
+ """
7943
+ The storage account name.
7944
+ """
7945
+ storage_region: NotRequired[pulumi.Input[_builtins.str]]
7946
+ """
7947
+ (Required String) The storage region.
7948
+ """
7949
+ elif False:
7950
+ TableflowTopicAzureDataLakeStorageGen2ArgsDict: TypeAlias = Mapping[str, Any]
7951
+
7952
+ @pulumi.input_type
7953
+ class TableflowTopicAzureDataLakeStorageGen2Args:
7954
+ def __init__(__self__, *,
7955
+ container_name: pulumi.Input[_builtins.str],
7956
+ provider_integration_id: pulumi.Input[_builtins.str],
7957
+ storage_account_name: pulumi.Input[_builtins.str],
7958
+ storage_region: Optional[pulumi.Input[_builtins.str]] = None):
7959
+ """
7960
+ :param pulumi.Input[_builtins.str] container_name: The container name.
7961
+ :param pulumi.Input[_builtins.str] provider_integration_id: The provider integration id.
7962
+ :param pulumi.Input[_builtins.str] storage_account_name: The storage account name.
7963
+ :param pulumi.Input[_builtins.str] storage_region: (Required String) The storage region.
7964
+ """
7965
+ pulumi.set(__self__, "container_name", container_name)
7966
+ pulumi.set(__self__, "provider_integration_id", provider_integration_id)
7967
+ pulumi.set(__self__, "storage_account_name", storage_account_name)
7968
+ if storage_region is not None:
7969
+ pulumi.set(__self__, "storage_region", storage_region)
7970
+
7971
+ @_builtins.property
7972
+ @pulumi.getter(name="containerName")
7973
+ def container_name(self) -> pulumi.Input[_builtins.str]:
7974
+ """
7975
+ The container name.
7976
+ """
7977
+ return pulumi.get(self, "container_name")
7978
+
7979
+ @container_name.setter
7980
+ def container_name(self, value: pulumi.Input[_builtins.str]):
7981
+ pulumi.set(self, "container_name", value)
7982
+
7983
+ @_builtins.property
7984
+ @pulumi.getter(name="providerIntegrationId")
7985
+ def provider_integration_id(self) -> pulumi.Input[_builtins.str]:
7986
+ """
7987
+ The provider integration id.
7988
+ """
7989
+ return pulumi.get(self, "provider_integration_id")
7990
+
7991
+ @provider_integration_id.setter
7992
+ def provider_integration_id(self, value: pulumi.Input[_builtins.str]):
7993
+ pulumi.set(self, "provider_integration_id", value)
7994
+
7995
+ @_builtins.property
7996
+ @pulumi.getter(name="storageAccountName")
7997
+ def storage_account_name(self) -> pulumi.Input[_builtins.str]:
7998
+ """
7999
+ The storage account name.
8000
+ """
8001
+ return pulumi.get(self, "storage_account_name")
8002
+
8003
+ @storage_account_name.setter
8004
+ def storage_account_name(self, value: pulumi.Input[_builtins.str]):
8005
+ pulumi.set(self, "storage_account_name", value)
8006
+
8007
+ @_builtins.property
8008
+ @pulumi.getter(name="storageRegion")
8009
+ def storage_region(self) -> Optional[pulumi.Input[_builtins.str]]:
8010
+ """
8011
+ (Required String) The storage region.
8012
+ """
8013
+ return pulumi.get(self, "storage_region")
8014
+
8015
+ @storage_region.setter
8016
+ def storage_region(self, value: Optional[pulumi.Input[_builtins.str]]):
8017
+ pulumi.set(self, "storage_region", value)
8018
+
8019
+
7843
8020
  if not MYPY:
7844
8021
  class TableflowTopicByobAwsArgsDict(TypedDict):
7845
8022
  bucket_name: pulumi.Input[_builtins.str]
@@ -9425,14 +9602,33 @@ class GetIpAddressesFilterArgs:
9425
9602
 
9426
9603
  if not MYPY:
9427
9604
  class GetKafkaClusterBasicArgsDict(TypedDict):
9428
- pass
9605
+ max_ecku: _builtins.int
9606
+ """
9607
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9608
+ """
9429
9609
  elif False:
9430
9610
  GetKafkaClusterBasicArgsDict: TypeAlias = Mapping[str, Any]
9431
9611
 
9432
9612
  @pulumi.input_type
9433
9613
  class GetKafkaClusterBasicArgs:
9434
- def __init__(__self__):
9435
- pass
9614
+ def __init__(__self__, *,
9615
+ max_ecku: _builtins.int):
9616
+ """
9617
+ :param _builtins.int max_ecku: (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9618
+ """
9619
+ pulumi.set(__self__, "max_ecku", max_ecku)
9620
+
9621
+ @_builtins.property
9622
+ @pulumi.getter(name="maxEcku")
9623
+ def max_ecku(self) -> _builtins.int:
9624
+ """
9625
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9626
+ """
9627
+ return pulumi.get(self, "max_ecku")
9628
+
9629
+ @max_ecku.setter
9630
+ def max_ecku(self, value: _builtins.int):
9631
+ pulumi.set(self, "max_ecku", value)
9436
9632
 
9437
9633
 
9438
9634
  if not MYPY:
@@ -9515,14 +9711,33 @@ class GetKafkaClusterDedicatedArgs:
9515
9711
 
9516
9712
  if not MYPY:
9517
9713
  class GetKafkaClusterEnterpriseArgsDict(TypedDict):
9518
- pass
9714
+ max_ecku: _builtins.int
9715
+ """
9716
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9717
+ """
9519
9718
  elif False:
9520
9719
  GetKafkaClusterEnterpriseArgsDict: TypeAlias = Mapping[str, Any]
9521
9720
 
9522
9721
  @pulumi.input_type
9523
9722
  class GetKafkaClusterEnterpriseArgs:
9524
- def __init__(__self__):
9525
- pass
9723
+ def __init__(__self__, *,
9724
+ max_ecku: _builtins.int):
9725
+ """
9726
+ :param _builtins.int max_ecku: (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9727
+ """
9728
+ pulumi.set(__self__, "max_ecku", max_ecku)
9729
+
9730
+ @_builtins.property
9731
+ @pulumi.getter(name="maxEcku")
9732
+ def max_ecku(self) -> _builtins.int:
9733
+ """
9734
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9735
+ """
9736
+ return pulumi.get(self, "max_ecku")
9737
+
9738
+ @max_ecku.setter
9739
+ def max_ecku(self, value: _builtins.int):
9740
+ pulumi.set(self, "max_ecku", value)
9526
9741
 
9527
9742
 
9528
9743
  if not MYPY:
@@ -9564,6 +9779,10 @@ class GetKafkaClusterEnvironmentArgs:
9564
9779
 
9565
9780
  if not MYPY:
9566
9781
  class GetKafkaClusterFreightArgsDict(TypedDict):
9782
+ max_ecku: _builtins.int
9783
+ """
9784
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9785
+ """
9567
9786
  zones: Sequence[_builtins.str]
9568
9787
  """
9569
9788
  (Required List of String) The list of zones the cluster is in.
@@ -9577,15 +9796,30 @@ elif False:
9577
9796
  @pulumi.input_type
9578
9797
  class GetKafkaClusterFreightArgs:
9579
9798
  def __init__(__self__, *,
9799
+ max_ecku: _builtins.int,
9580
9800
  zones: Sequence[_builtins.str]):
9581
9801
  """
9802
+ :param _builtins.int max_ecku: (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9582
9803
  :param Sequence[_builtins.str] zones: (Required List of String) The list of zones the cluster is in.
9583
9804
  - On AWS, zones are AWS [AZ IDs](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html), for example, `use1-az3`.
9584
9805
  - On GCP, zones are GCP [zones](https://cloud.google.com/compute/docs/regions-zones), for example, `us-central1-c`.
9585
9806
  - On Azure, zones are Confluent-chosen names (for example, `1`, `2`, `3`) since Azure does not have universal zone identifiers.
9586
9807
  """
9808
+ pulumi.set(__self__, "max_ecku", max_ecku)
9587
9809
  pulumi.set(__self__, "zones", zones)
9588
9810
 
9811
+ @_builtins.property
9812
+ @pulumi.getter(name="maxEcku")
9813
+ def max_ecku(self) -> _builtins.int:
9814
+ """
9815
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9816
+ """
9817
+ return pulumi.get(self, "max_ecku")
9818
+
9819
+ @max_ecku.setter
9820
+ def max_ecku(self, value: _builtins.int):
9821
+ pulumi.set(self, "max_ecku", value)
9822
+
9589
9823
  @_builtins.property
9590
9824
  @pulumi.getter
9591
9825
  def zones(self) -> Sequence[_builtins.str]:
@@ -9604,14 +9838,33 @@ class GetKafkaClusterFreightArgs:
9604
9838
 
9605
9839
  if not MYPY:
9606
9840
  class GetKafkaClusterStandardArgsDict(TypedDict):
9607
- pass
9841
+ max_ecku: _builtins.int
9842
+ """
9843
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9844
+ """
9608
9845
  elif False:
9609
9846
  GetKafkaClusterStandardArgsDict: TypeAlias = Mapping[str, Any]
9610
9847
 
9611
9848
  @pulumi.input_type
9612
9849
  class GetKafkaClusterStandardArgs:
9613
- def __init__(__self__):
9614
- pass
9850
+ def __init__(__self__, *,
9851
+ max_ecku: _builtins.int):
9852
+ """
9853
+ :param _builtins.int max_ecku: (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9854
+ """
9855
+ pulumi.set(__self__, "max_ecku", max_ecku)
9856
+
9857
+ @_builtins.property
9858
+ @pulumi.getter(name="maxEcku")
9859
+ def max_ecku(self) -> _builtins.int:
9860
+ """
9861
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
9862
+ """
9863
+ return pulumi.get(self, "max_ecku")
9864
+
9865
+ @max_ecku.setter
9866
+ def max_ecku(self, value: _builtins.int):
9867
+ pulumi.set(self, "max_ecku", value)
9615
9868
 
9616
9869
 
9617
9870
  if not MYPY:
@@ -28,7 +28,10 @@ class GetTableflowTopicResult:
28
28
  """
29
29
  A collection of values returned by getTableflowTopic.
30
30
  """
31
- def __init__(__self__, byob_aws=None, credentials=None, display_name=None, enable_compaction=None, enable_partitioning=None, environment=None, error_handlings=None, id=None, kafka_cluster=None, managed_storages=None, record_failure_strategy=None, retention_ms=None, suspended=None, table_formats=None, table_path=None, write_mode=None):
31
+ def __init__(__self__, azure_data_lake_storage_gen2s=None, byob_aws=None, credentials=None, display_name=None, enable_compaction=None, enable_partitioning=None, environment=None, error_handlings=None, id=None, kafka_cluster=None, managed_storages=None, record_failure_strategy=None, retention_ms=None, suspended=None, table_formats=None, table_path=None, write_mode=None):
32
+ if azure_data_lake_storage_gen2s and not isinstance(azure_data_lake_storage_gen2s, list):
33
+ raise TypeError("Expected argument 'azure_data_lake_storage_gen2s' to be a list")
34
+ pulumi.set(__self__, "azure_data_lake_storage_gen2s", azure_data_lake_storage_gen2s)
32
35
  if byob_aws and not isinstance(byob_aws, list):
33
36
  raise TypeError("Expected argument 'byob_aws' to be a list")
34
37
  pulumi.set(__self__, "byob_aws", byob_aws)
@@ -78,6 +81,14 @@ class GetTableflowTopicResult:
78
81
  raise TypeError("Expected argument 'write_mode' to be a str")
79
82
  pulumi.set(__self__, "write_mode", write_mode)
80
83
 
84
+ @_builtins.property
85
+ @pulumi.getter(name="azureDataLakeStorageGen2s")
86
+ def azure_data_lake_storage_gen2s(self) -> Sequence['outputs.GetTableflowTopicAzureDataLakeStorageGen2Result']:
87
+ """
88
+ (Optional Configuration Block) supports the following:
89
+ """
90
+ return pulumi.get(self, "azure_data_lake_storage_gen2s")
91
+
81
92
  @_builtins.property
82
93
  @pulumi.getter(name="byobAws")
83
94
  def byob_aws(self) -> Sequence['outputs.GetTableflowTopicByobAwResult']:
@@ -201,6 +212,7 @@ class AwaitableGetTableflowTopicResult(GetTableflowTopicResult):
201
212
  if False:
202
213
  yield self
203
214
  return GetTableflowTopicResult(
215
+ azure_data_lake_storage_gen2s=self.azure_data_lake_storage_gen2s,
204
216
  byob_aws=self.byob_aws,
205
217
  credentials=self.credentials,
206
218
  display_name=self.display_name,
@@ -273,6 +285,7 @@ def get_tableflow_topic(credentials: Optional[Union['GetTableflowTopicCredential
273
285
  __ret__ = pulumi.runtime.invoke('confluentcloud:index/getTableflowTopic:getTableflowTopic', __args__, opts=opts, typ=GetTableflowTopicResult).value
274
286
 
275
287
  return AwaitableGetTableflowTopicResult(
288
+ azure_data_lake_storage_gen2s=pulumi.get(__ret__, 'azure_data_lake_storage_gen2s'),
276
289
  byob_aws=pulumi.get(__ret__, 'byob_aws'),
277
290
  credentials=pulumi.get(__ret__, 'credentials'),
278
291
  display_name=pulumi.get(__ret__, 'display_name'),
@@ -342,6 +355,7 @@ def get_tableflow_topic_output(credentials: Optional[pulumi.Input[Optional[Union
342
355
  opts = pulumi.InvokeOutputOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
343
356
  __ret__ = pulumi.runtime.invoke_output('confluentcloud:index/getTableflowTopic:getTableflowTopic', __args__, opts=opts, typ=GetTableflowTopicResult)
344
357
  return __ret__.apply(lambda __response__: GetTableflowTopicResult(
358
+ azure_data_lake_storage_gen2s=pulumi.get(__response__, 'azure_data_lake_storage_gen2s'),
345
359
  byob_aws=pulumi.get(__response__, 'byob_aws'),
346
360
  credentials=pulumi.get(__response__, 'credentials'),
347
361
  display_name=pulumi.get(__response__, 'display_name'),
@@ -171,6 +171,7 @@ __all__ = [
171
171
  'SubjectConfigSchemaRegistryCluster',
172
172
  'SubjectModeCredentials',
173
173
  'SubjectModeSchemaRegistryCluster',
174
+ 'TableflowTopicAzureDataLakeStorageGen2',
174
175
  'TableflowTopicByobAws',
175
176
  'TableflowTopicCredentials',
176
177
  'TableflowTopicEnvironment',
@@ -327,6 +328,7 @@ __all__ = [
327
328
  'GetSubjectConfigSchemaRegistryClusterResult',
328
329
  'GetSubjectModeCredentialsResult',
329
330
  'GetSubjectModeSchemaRegistryClusterResult',
331
+ 'GetTableflowTopicAzureDataLakeStorageGen2Result',
330
332
  'GetTableflowTopicByobAwResult',
331
333
  'GetTableflowTopicCredentialsResult',
332
334
  'GetTableflowTopicEnvironmentResult',
@@ -2755,8 +2757,38 @@ class KafkaClientQuotaThroughput(dict):
2755
2757
 
2756
2758
  @pulumi.output_type
2757
2759
  class KafkaClusterBasic(dict):
2758
- def __init__(__self__):
2759
- pass
2760
+ @staticmethod
2761
+ def __key_warning(key: str):
2762
+ suggest = None
2763
+ if key == "maxEcku":
2764
+ suggest = "max_ecku"
2765
+
2766
+ if suggest:
2767
+ pulumi.log.warn(f"Key '{key}' not found in KafkaClusterBasic. Access the value via the '{suggest}' property getter instead.")
2768
+
2769
+ def __getitem__(self, key: str) -> Any:
2770
+ KafkaClusterBasic.__key_warning(key)
2771
+ return super().__getitem__(key)
2772
+
2773
+ def get(self, key: str, default = None) -> Any:
2774
+ KafkaClusterBasic.__key_warning(key)
2775
+ return super().get(key, default)
2776
+
2777
+ def __init__(__self__, *,
2778
+ max_ecku: Optional[_builtins.int] = None):
2779
+ """
2780
+ :param _builtins.int max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
2781
+ """
2782
+ if max_ecku is not None:
2783
+ pulumi.set(__self__, "max_ecku", max_ecku)
2784
+
2785
+ @_builtins.property
2786
+ @pulumi.getter(name="maxEcku")
2787
+ def max_ecku(self) -> Optional[_builtins.int]:
2788
+ """
2789
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
2790
+ """
2791
+ return pulumi.get(self, "max_ecku")
2760
2792
 
2761
2793
 
2762
2794
  @pulumi.output_type
@@ -2865,6 +2897,8 @@ class KafkaClusterDedicated(dict):
2865
2897
  !> **Warning:** You can only upgrade clusters from `basic` to `standard`.
2866
2898
 
2867
2899
  > **Note:** Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the `pulumi up` step to finish, you can exit it and import the cluster by using the `pulumi import` command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
2900
+
2901
+ > **Note:** Refer to [eCKU/CKU comparison](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#ecku-cku-comparison) documentation for the minimum/maximum eCKU requirements for each cluster type.
2868
2902
  :param _builtins.str encryption_key: The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
2869
2903
  :param Sequence[_builtins.str] zones: (Required List of String) The list of zones the cluster is in.
2870
2904
  - On AWS, zones are AWS [AZ IDs](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html), for example, `use1-az3`.
@@ -2888,6 +2922,8 @@ class KafkaClusterDedicated(dict):
2888
2922
  !> **Warning:** You can only upgrade clusters from `basic` to `standard`.
2889
2923
 
2890
2924
  > **Note:** Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the `pulumi up` step to finish, you can exit it and import the cluster by using the `pulumi import` command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
2925
+
2926
+ > **Note:** Refer to [eCKU/CKU comparison](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#ecku-cku-comparison) documentation for the minimum/maximum eCKU requirements for each cluster type.
2891
2927
  """
2892
2928
  return pulumi.get(self, "cku")
2893
2929
 
@@ -2989,8 +3025,38 @@ class KafkaClusterEndpoint(dict):
2989
3025
 
2990
3026
  @pulumi.output_type
2991
3027
  class KafkaClusterEnterprise(dict):
2992
- def __init__(__self__):
2993
- pass
3028
+ @staticmethod
3029
+ def __key_warning(key: str):
3030
+ suggest = None
3031
+ if key == "maxEcku":
3032
+ suggest = "max_ecku"
3033
+
3034
+ if suggest:
3035
+ pulumi.log.warn(f"Key '{key}' not found in KafkaClusterEnterprise. Access the value via the '{suggest}' property getter instead.")
3036
+
3037
+ def __getitem__(self, key: str) -> Any:
3038
+ KafkaClusterEnterprise.__key_warning(key)
3039
+ return super().__getitem__(key)
3040
+
3041
+ def get(self, key: str, default = None) -> Any:
3042
+ KafkaClusterEnterprise.__key_warning(key)
3043
+ return super().get(key, default)
3044
+
3045
+ def __init__(__self__, *,
3046
+ max_ecku: Optional[_builtins.int] = None):
3047
+ """
3048
+ :param _builtins.int max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
3049
+ """
3050
+ if max_ecku is not None:
3051
+ pulumi.set(__self__, "max_ecku", max_ecku)
3052
+
3053
+ @_builtins.property
3054
+ @pulumi.getter(name="maxEcku")
3055
+ def max_ecku(self) -> Optional[_builtins.int]:
3056
+ """
3057
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
3058
+ """
3059
+ return pulumi.get(self, "max_ecku")
2994
3060
 
2995
3061
 
2996
3062
  @pulumi.output_type
@@ -3013,15 +3079,44 @@ class KafkaClusterEnvironment(dict):
3013
3079
 
3014
3080
  @pulumi.output_type
3015
3081
  class KafkaClusterFreight(dict):
3082
+ @staticmethod
3083
+ def __key_warning(key: str):
3084
+ suggest = None
3085
+ if key == "maxEcku":
3086
+ suggest = "max_ecku"
3087
+
3088
+ if suggest:
3089
+ pulumi.log.warn(f"Key '{key}' not found in KafkaClusterFreight. Access the value via the '{suggest}' property getter instead.")
3090
+
3091
+ def __getitem__(self, key: str) -> Any:
3092
+ KafkaClusterFreight.__key_warning(key)
3093
+ return super().__getitem__(key)
3094
+
3095
+ def get(self, key: str, default = None) -> Any:
3096
+ KafkaClusterFreight.__key_warning(key)
3097
+ return super().get(key, default)
3098
+
3016
3099
  def __init__(__self__, *,
3100
+ max_ecku: Optional[_builtins.int] = None,
3017
3101
  zones: Optional[Sequence[_builtins.str]] = None):
3018
3102
  """
3103
+ :param _builtins.int max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
3019
3104
  :param Sequence[_builtins.str] zones: (Required List of String) The list of zones the cluster is in.
3020
3105
  - On AWS, zones are AWS [AZ IDs](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html), for example, `use1-az3`.
3021
3106
  """
3107
+ if max_ecku is not None:
3108
+ pulumi.set(__self__, "max_ecku", max_ecku)
3022
3109
  if zones is not None:
3023
3110
  pulumi.set(__self__, "zones", zones)
3024
3111
 
3112
+ @_builtins.property
3113
+ @pulumi.getter(name="maxEcku")
3114
+ def max_ecku(self) -> Optional[_builtins.int]:
3115
+ """
3116
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
3117
+ """
3118
+ return pulumi.get(self, "max_ecku")
3119
+
3025
3120
  @_builtins.property
3026
3121
  @pulumi.getter
3027
3122
  def zones(self) -> Optional[Sequence[_builtins.str]]:
@@ -3052,8 +3147,38 @@ class KafkaClusterNetwork(dict):
3052
3147
 
3053
3148
  @pulumi.output_type
3054
3149
  class KafkaClusterStandard(dict):
3055
- def __init__(__self__):
3056
- pass
3150
+ @staticmethod
3151
+ def __key_warning(key: str):
3152
+ suggest = None
3153
+ if key == "maxEcku":
3154
+ suggest = "max_ecku"
3155
+
3156
+ if suggest:
3157
+ pulumi.log.warn(f"Key '{key}' not found in KafkaClusterStandard. Access the value via the '{suggest}' property getter instead.")
3158
+
3159
+ def __getitem__(self, key: str) -> Any:
3160
+ KafkaClusterStandard.__key_warning(key)
3161
+ return super().__getitem__(key)
3162
+
3163
+ def get(self, key: str, default = None) -> Any:
3164
+ KafkaClusterStandard.__key_warning(key)
3165
+ return super().get(key, default)
3166
+
3167
+ def __init__(__self__, *,
3168
+ max_ecku: Optional[_builtins.int] = None):
3169
+ """
3170
+ :param _builtins.int max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
3171
+ """
3172
+ if max_ecku is not None:
3173
+ pulumi.set(__self__, "max_ecku", max_ecku)
3174
+
3175
+ @_builtins.property
3176
+ @pulumi.getter(name="maxEcku")
3177
+ def max_ecku(self) -> Optional[_builtins.int]:
3178
+ """
3179
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs.
3180
+ """
3181
+ return pulumi.get(self, "max_ecku")
3057
3182
 
3058
3183
 
3059
3184
  @pulumi.output_type
@@ -5419,6 +5544,81 @@ class SubjectModeSchemaRegistryCluster(dict):
5419
5544
  return pulumi.get(self, "id")
5420
5545
 
5421
5546
 
5547
+ @pulumi.output_type
5548
+ class TableflowTopicAzureDataLakeStorageGen2(dict):
5549
+ @staticmethod
5550
+ def __key_warning(key: str):
5551
+ suggest = None
5552
+ if key == "containerName":
5553
+ suggest = "container_name"
5554
+ elif key == "providerIntegrationId":
5555
+ suggest = "provider_integration_id"
5556
+ elif key == "storageAccountName":
5557
+ suggest = "storage_account_name"
5558
+ elif key == "storageRegion":
5559
+ suggest = "storage_region"
5560
+
5561
+ if suggest:
5562
+ pulumi.log.warn(f"Key '{key}' not found in TableflowTopicAzureDataLakeStorageGen2. Access the value via the '{suggest}' property getter instead.")
5563
+
5564
+ def __getitem__(self, key: str) -> Any:
5565
+ TableflowTopicAzureDataLakeStorageGen2.__key_warning(key)
5566
+ return super().__getitem__(key)
5567
+
5568
+ def get(self, key: str, default = None) -> Any:
5569
+ TableflowTopicAzureDataLakeStorageGen2.__key_warning(key)
5570
+ return super().get(key, default)
5571
+
5572
+ def __init__(__self__, *,
5573
+ container_name: _builtins.str,
5574
+ provider_integration_id: _builtins.str,
5575
+ storage_account_name: _builtins.str,
5576
+ storage_region: Optional[_builtins.str] = None):
5577
+ """
5578
+ :param _builtins.str container_name: The container name.
5579
+ :param _builtins.str provider_integration_id: The provider integration id.
5580
+ :param _builtins.str storage_account_name: The storage account name.
5581
+ :param _builtins.str storage_region: (Required String) The storage region.
5582
+ """
5583
+ pulumi.set(__self__, "container_name", container_name)
5584
+ pulumi.set(__self__, "provider_integration_id", provider_integration_id)
5585
+ pulumi.set(__self__, "storage_account_name", storage_account_name)
5586
+ if storage_region is not None:
5587
+ pulumi.set(__self__, "storage_region", storage_region)
5588
+
5589
+ @_builtins.property
5590
+ @pulumi.getter(name="containerName")
5591
+ def container_name(self) -> _builtins.str:
5592
+ """
5593
+ The container name.
5594
+ """
5595
+ return pulumi.get(self, "container_name")
5596
+
5597
+ @_builtins.property
5598
+ @pulumi.getter(name="providerIntegrationId")
5599
+ def provider_integration_id(self) -> _builtins.str:
5600
+ """
5601
+ The provider integration id.
5602
+ """
5603
+ return pulumi.get(self, "provider_integration_id")
5604
+
5605
+ @_builtins.property
5606
+ @pulumi.getter(name="storageAccountName")
5607
+ def storage_account_name(self) -> _builtins.str:
5608
+ """
5609
+ The storage account name.
5610
+ """
5611
+ return pulumi.get(self, "storage_account_name")
5612
+
5613
+ @_builtins.property
5614
+ @pulumi.getter(name="storageRegion")
5615
+ def storage_region(self) -> Optional[_builtins.str]:
5616
+ """
5617
+ (Required String) The storage region.
5618
+ """
5619
+ return pulumi.get(self, "storage_region")
5620
+
5621
+
5422
5622
  @pulumi.output_type
5423
5623
  class TableflowTopicByobAws(dict):
5424
5624
  @staticmethod
@@ -7254,8 +7454,20 @@ class GetKafkaClientQuotaThroughputResult(dict):
7254
7454
 
7255
7455
  @pulumi.output_type
7256
7456
  class GetKafkaClusterBasicResult(dict):
7257
- def __init__(__self__):
7258
- pass
7457
+ def __init__(__self__, *,
7458
+ max_ecku: _builtins.int):
7459
+ """
7460
+ :param _builtins.int max_ecku: (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
7461
+ """
7462
+ pulumi.set(__self__, "max_ecku", max_ecku)
7463
+
7464
+ @_builtins.property
7465
+ @pulumi.getter(name="maxEcku")
7466
+ def max_ecku(self) -> _builtins.int:
7467
+ """
7468
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
7469
+ """
7470
+ return pulumi.get(self, "max_ecku")
7259
7471
 
7260
7472
 
7261
7473
  @pulumi.output_type
@@ -7375,8 +7587,20 @@ class GetKafkaClusterEndpointResult(dict):
7375
7587
 
7376
7588
  @pulumi.output_type
7377
7589
  class GetKafkaClusterEnterpriseResult(dict):
7378
- def __init__(__self__):
7379
- pass
7590
+ def __init__(__self__, *,
7591
+ max_ecku: _builtins.int):
7592
+ """
7593
+ :param _builtins.int max_ecku: (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
7594
+ """
7595
+ pulumi.set(__self__, "max_ecku", max_ecku)
7596
+
7597
+ @_builtins.property
7598
+ @pulumi.getter(name="maxEcku")
7599
+ def max_ecku(self) -> _builtins.int:
7600
+ """
7601
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
7602
+ """
7603
+ return pulumi.get(self, "max_ecku")
7380
7604
 
7381
7605
 
7382
7606
  @pulumi.output_type
@@ -7404,15 +7628,26 @@ class GetKafkaClusterEnvironmentResult(dict):
7404
7628
  @pulumi.output_type
7405
7629
  class GetKafkaClusterFreightResult(dict):
7406
7630
  def __init__(__self__, *,
7631
+ max_ecku: _builtins.int,
7407
7632
  zones: Sequence[_builtins.str]):
7408
7633
  """
7634
+ :param _builtins.int max_ecku: (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
7409
7635
  :param Sequence[_builtins.str] zones: (Required List of String) The list of zones the cluster is in.
7410
7636
  - On AWS, zones are AWS [AZ IDs](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html), for example, `use1-az3`.
7411
7637
  - On GCP, zones are GCP [zones](https://cloud.google.com/compute/docs/regions-zones), for example, `us-central1-c`.
7412
7638
  - On Azure, zones are Confluent-chosen names (for example, `1`, `2`, `3`) since Azure does not have universal zone identifiers.
7413
7639
  """
7640
+ pulumi.set(__self__, "max_ecku", max_ecku)
7414
7641
  pulumi.set(__self__, "zones", zones)
7415
7642
 
7643
+ @_builtins.property
7644
+ @pulumi.getter(name="maxEcku")
7645
+ def max_ecku(self) -> _builtins.int:
7646
+ """
7647
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
7648
+ """
7649
+ return pulumi.get(self, "max_ecku")
7650
+
7416
7651
  @_builtins.property
7417
7652
  @pulumi.getter
7418
7653
  def zones(self) -> Sequence[_builtins.str]:
@@ -7445,8 +7680,20 @@ class GetKafkaClusterNetworkResult(dict):
7445
7680
 
7446
7681
  @pulumi.output_type
7447
7682
  class GetKafkaClusterStandardResult(dict):
7448
- def __init__(__self__):
7449
- pass
7683
+ def __init__(__self__, *,
7684
+ max_ecku: _builtins.int):
7685
+ """
7686
+ :param _builtins.int max_ecku: (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
7687
+ """
7688
+ pulumi.set(__self__, "max_ecku", max_ecku)
7689
+
7690
+ @_builtins.property
7691
+ @pulumi.getter(name="maxEcku")
7692
+ def max_ecku(self) -> _builtins.int:
7693
+ """
7694
+ (Optional Number) The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with "HIGH" availability must have at least two eCKUs. For more details, see [Maximum eCKU requirements](https://docs.confluent.io/cloud/current/clusters/cluster-types.html#minimum-maximum-ecku-requirements).
7695
+ """
7696
+ return pulumi.get(self, "max_ecku")
7450
7697
 
7451
7698
 
7452
7699
  @pulumi.output_type
@@ -7672,8 +7919,20 @@ class GetKafkaClustersClusterResult(dict):
7672
7919
 
7673
7920
  @pulumi.output_type
7674
7921
  class GetKafkaClustersClusterBasicResult(dict):
7675
- def __init__(__self__):
7676
- pass
7922
+ def __init__(__self__, *,
7923
+ max_ecku: _builtins.int):
7924
+ """
7925
+ :param _builtins.int max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with HIGH availability must have at least two eCKUs.
7926
+ """
7927
+ pulumi.set(__self__, "max_ecku", max_ecku)
7928
+
7929
+ @_builtins.property
7930
+ @pulumi.getter(name="maxEcku")
7931
+ def max_ecku(self) -> _builtins.int:
7932
+ """
7933
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with HIGH availability must have at least two eCKUs.
7934
+ """
7935
+ return pulumi.get(self, "max_ecku")
7677
7936
 
7678
7937
 
7679
7938
  @pulumi.output_type
@@ -7793,8 +8052,20 @@ class GetKafkaClustersClusterEndpointResult(dict):
7793
8052
 
7794
8053
  @pulumi.output_type
7795
8054
  class GetKafkaClustersClusterEnterpriseResult(dict):
7796
- def __init__(__self__):
7797
- pass
8055
+ def __init__(__self__, *,
8056
+ max_ecku: _builtins.int):
8057
+ """
8058
+ :param _builtins.int max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with HIGH availability must have at least two eCKUs.
8059
+ """
8060
+ pulumi.set(__self__, "max_ecku", max_ecku)
8061
+
8062
+ @_builtins.property
8063
+ @pulumi.getter(name="maxEcku")
8064
+ def max_ecku(self) -> _builtins.int:
8065
+ """
8066
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with HIGH availability must have at least two eCKUs.
8067
+ """
8068
+ return pulumi.get(self, "max_ecku")
7798
8069
 
7799
8070
 
7800
8071
  @pulumi.output_type
@@ -7818,15 +8089,26 @@ class GetKafkaClustersClusterEnvironmentResult(dict):
7818
8089
  @pulumi.output_type
7819
8090
  class GetKafkaClustersClusterFreightResult(dict):
7820
8091
  def __init__(__self__, *,
8092
+ max_ecku: _builtins.int,
7821
8093
  zones: Sequence[_builtins.str]):
7822
8094
  """
8095
+ :param _builtins.int max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with HIGH availability must have at least two eCKUs.
7823
8096
  :param Sequence[_builtins.str] zones: (Required List of String) The list of zones the cluster is in.
7824
8097
  - On AWS, zones are AWS [AZ IDs](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html), for example, `use1-az3`.
7825
8098
  - On GCP, zones are GCP [zones](https://cloud.google.com/compute/docs/regions-zones), for example, `us-central1-c`.
7826
8099
  - On Azure, zones are Confluent-chosen names (for example, `1`, `2`, `3`) since Azure does not have universal zone identifiers.
7827
8100
  """
8101
+ pulumi.set(__self__, "max_ecku", max_ecku)
7828
8102
  pulumi.set(__self__, "zones", zones)
7829
8103
 
8104
+ @_builtins.property
8105
+ @pulumi.getter(name="maxEcku")
8106
+ def max_ecku(self) -> _builtins.int:
8107
+ """
8108
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with HIGH availability must have at least two eCKUs.
8109
+ """
8110
+ return pulumi.get(self, "max_ecku")
8111
+
7830
8112
  @_builtins.property
7831
8113
  @pulumi.getter
7832
8114
  def zones(self) -> Sequence[_builtins.str]:
@@ -7859,8 +8141,20 @@ class GetKafkaClustersClusterNetworkResult(dict):
7859
8141
 
7860
8142
  @pulumi.output_type
7861
8143
  class GetKafkaClustersClusterStandardResult(dict):
7862
- def __init__(__self__):
7863
- pass
8144
+ def __init__(__self__, *,
8145
+ max_ecku: _builtins.int):
8146
+ """
8147
+ :param _builtins.int max_ecku: The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with HIGH availability must have at least two eCKUs.
8148
+ """
8149
+ pulumi.set(__self__, "max_ecku", max_ecku)
8150
+
8151
+ @_builtins.property
8152
+ @pulumi.getter(name="maxEcku")
8153
+ def max_ecku(self) -> _builtins.int:
8154
+ """
8155
+ The maximum number of Elastic Confluent Kafka Units (eCKUs) that Kafka clusters should auto-scale to. Kafka clusters with HIGH availability must have at least two eCKUs.
8156
+ """
8157
+ return pulumi.get(self, "max_ecku")
7864
8158
 
7865
8159
 
7866
8160
  @pulumi.output_type
@@ -10043,6 +10337,68 @@ class GetSubjectModeSchemaRegistryClusterResult(dict):
10043
10337
  return pulumi.get(self, "id")
10044
10338
 
10045
10339
 
10340
+ @pulumi.output_type
10341
+ class GetTableflowTopicAzureDataLakeStorageGen2Result(dict):
10342
+ def __init__(__self__, *,
10343
+ container_name: _builtins.str,
10344
+ provider_integration_id: _builtins.str,
10345
+ storage_account_name: _builtins.str,
10346
+ storage_region: _builtins.str,
10347
+ table_path: _builtins.str):
10348
+ """
10349
+ :param _builtins.str container_name: (Required String) The container name.
10350
+ :param _builtins.str provider_integration_id: (Required String) The provider integration id.
10351
+ :param _builtins.str storage_account_name: (Required String) The storage account name.
10352
+ :param _builtins.str storage_region: (Required String) The storage region.
10353
+ :param _builtins.str table_path: (Optional String) The current storage path where the data and metadata is stored for this table.
10354
+ """
10355
+ pulumi.set(__self__, "container_name", container_name)
10356
+ pulumi.set(__self__, "provider_integration_id", provider_integration_id)
10357
+ pulumi.set(__self__, "storage_account_name", storage_account_name)
10358
+ pulumi.set(__self__, "storage_region", storage_region)
10359
+ pulumi.set(__self__, "table_path", table_path)
10360
+
10361
+ @_builtins.property
10362
+ @pulumi.getter(name="containerName")
10363
+ def container_name(self) -> _builtins.str:
10364
+ """
10365
+ (Required String) The container name.
10366
+ """
10367
+ return pulumi.get(self, "container_name")
10368
+
10369
+ @_builtins.property
10370
+ @pulumi.getter(name="providerIntegrationId")
10371
+ def provider_integration_id(self) -> _builtins.str:
10372
+ """
10373
+ (Required String) The provider integration id.
10374
+ """
10375
+ return pulumi.get(self, "provider_integration_id")
10376
+
10377
+ @_builtins.property
10378
+ @pulumi.getter(name="storageAccountName")
10379
+ def storage_account_name(self) -> _builtins.str:
10380
+ """
10381
+ (Required String) The storage account name.
10382
+ """
10383
+ return pulumi.get(self, "storage_account_name")
10384
+
10385
+ @_builtins.property
10386
+ @pulumi.getter(name="storageRegion")
10387
+ def storage_region(self) -> _builtins.str:
10388
+ """
10389
+ (Required String) The storage region.
10390
+ """
10391
+ return pulumi.get(self, "storage_region")
10392
+
10393
+ @_builtins.property
10394
+ @pulumi.getter(name="tablePath")
10395
+ def table_path(self) -> _builtins.str:
10396
+ """
10397
+ (Optional String) The current storage path where the data and metadata is stored for this table.
10398
+ """
10399
+ return pulumi.get(self, "table_path")
10400
+
10401
+
10046
10402
  @pulumi.output_type
10047
10403
  class GetTableflowTopicByobAwResult(dict):
10048
10404
  def __init__(__self__, *,
@@ -1,5 +1,5 @@
1
1
  {
2
2
  "resource": true,
3
3
  "name": "confluentcloud",
4
- "version": "2.52.0-alpha.1763617028"
4
+ "version": "2.53.0"
5
5
  }
@@ -24,6 +24,7 @@ class TableflowTopicArgs:
24
24
  display_name: pulumi.Input[_builtins.str],
25
25
  environment: pulumi.Input['TableflowTopicEnvironmentArgs'],
26
26
  kafka_cluster: pulumi.Input['TableflowTopicKafkaClusterArgs'],
27
+ azure_data_lake_storage_gen2: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']] = None,
27
28
  byob_aws: Optional[pulumi.Input['TableflowTopicByobAwsArgs']] = None,
28
29
  credentials: Optional[pulumi.Input['TableflowTopicCredentialsArgs']] = None,
29
30
  error_handling: Optional[pulumi.Input['TableflowTopicErrorHandlingArgs']] = None,
@@ -35,6 +36,7 @@ class TableflowTopicArgs:
35
36
  The set of arguments for constructing a TableflowTopic resource.
36
37
  :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
37
38
  :param pulumi.Input['TableflowTopicEnvironmentArgs'] environment: Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
39
+ :param pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args'] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
38
40
  :param pulumi.Input['TableflowTopicByobAwsArgs'] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
39
41
  :param pulumi.Input['TableflowTopicCredentialsArgs'] credentials: The Cluster API Credentials.
40
42
  :param pulumi.Input[Sequence[pulumi.Input['TableflowTopicManagedStorageArgs']]] managed_storages: The configuration of the Confluent managed storage. See [Quick Start with Managed Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-managed-storage.html#cloud-tableflow-quick-start-managed-storage) for more details.
@@ -45,6 +47,8 @@ class TableflowTopicArgs:
45
47
  pulumi.set(__self__, "display_name", display_name)
46
48
  pulumi.set(__self__, "environment", environment)
47
49
  pulumi.set(__self__, "kafka_cluster", kafka_cluster)
50
+ if azure_data_lake_storage_gen2 is not None:
51
+ pulumi.set(__self__, "azure_data_lake_storage_gen2", azure_data_lake_storage_gen2)
48
52
  if byob_aws is not None:
49
53
  pulumi.set(__self__, "byob_aws", byob_aws)
50
54
  if credentials is not None:
@@ -96,6 +100,18 @@ class TableflowTopicArgs:
96
100
  def kafka_cluster(self, value: pulumi.Input['TableflowTopicKafkaClusterArgs']):
97
101
  pulumi.set(self, "kafka_cluster", value)
98
102
 
103
+ @_builtins.property
104
+ @pulumi.getter(name="azureDataLakeStorageGen2")
105
+ def azure_data_lake_storage_gen2(self) -> Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]:
106
+ """
107
+ (Optional Configuration Block) supports the following:
108
+ """
109
+ return pulumi.get(self, "azure_data_lake_storage_gen2")
110
+
111
+ @azure_data_lake_storage_gen2.setter
112
+ def azure_data_lake_storage_gen2(self, value: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]):
113
+ pulumi.set(self, "azure_data_lake_storage_gen2", value)
114
+
99
115
  @_builtins.property
100
116
  @pulumi.getter(name="byobAws")
101
117
  def byob_aws(self) -> Optional[pulumi.Input['TableflowTopicByobAwsArgs']]:
@@ -182,6 +198,7 @@ class TableflowTopicArgs:
182
198
  @pulumi.input_type
183
199
  class _TableflowTopicState:
184
200
  def __init__(__self__, *,
201
+ azure_data_lake_storage_gen2: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']] = None,
185
202
  byob_aws: Optional[pulumi.Input['TableflowTopicByobAwsArgs']] = None,
186
203
  credentials: Optional[pulumi.Input['TableflowTopicCredentialsArgs']] = None,
187
204
  display_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -199,6 +216,7 @@ class _TableflowTopicState:
199
216
  write_mode: Optional[pulumi.Input[_builtins.str]] = None):
200
217
  """
201
218
  Input properties used for looking up and filtering TableflowTopic resources.
219
+ :param pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args'] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
202
220
  :param pulumi.Input['TableflowTopicByobAwsArgs'] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
203
221
  :param pulumi.Input['TableflowTopicCredentialsArgs'] credentials: The Cluster API Credentials.
204
222
  :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
@@ -213,6 +231,8 @@ class _TableflowTopicState:
213
231
  :param pulumi.Input[_builtins.str] table_path: (Optional String) The current storage path where the data and metadata is stored for this table.
214
232
  :param pulumi.Input[_builtins.str] write_mode: (Optional String) Indicates the write mode of the Tableflow topic.
215
233
  """
234
+ if azure_data_lake_storage_gen2 is not None:
235
+ pulumi.set(__self__, "azure_data_lake_storage_gen2", azure_data_lake_storage_gen2)
216
236
  if byob_aws is not None:
217
237
  pulumi.set(__self__, "byob_aws", byob_aws)
218
238
  if credentials is not None:
@@ -247,6 +267,18 @@ class _TableflowTopicState:
247
267
  if write_mode is not None:
248
268
  pulumi.set(__self__, "write_mode", write_mode)
249
269
 
270
+ @_builtins.property
271
+ @pulumi.getter(name="azureDataLakeStorageGen2")
272
+ def azure_data_lake_storage_gen2(self) -> Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]:
273
+ """
274
+ (Optional Configuration Block) supports the following:
275
+ """
276
+ return pulumi.get(self, "azure_data_lake_storage_gen2")
277
+
278
+ @azure_data_lake_storage_gen2.setter
279
+ def azure_data_lake_storage_gen2(self, value: Optional[pulumi.Input['TableflowTopicAzureDataLakeStorageGen2Args']]):
280
+ pulumi.set(self, "azure_data_lake_storage_gen2", value)
281
+
250
282
  @_builtins.property
251
283
  @pulumi.getter(name="byobAws")
252
284
  def byob_aws(self) -> Optional[pulumi.Input['TableflowTopicByobAwsArgs']]:
@@ -429,6 +461,7 @@ class TableflowTopic(pulumi.CustomResource):
429
461
  def __init__(__self__,
430
462
  resource_name: str,
431
463
  opts: Optional[pulumi.ResourceOptions] = None,
464
+ azure_data_lake_storage_gen2: Optional[pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']]] = None,
432
465
  byob_aws: Optional[pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']]] = None,
433
466
  credentials: Optional[pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']]] = None,
434
467
  display_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -487,15 +520,6 @@ class TableflowTopic(pulumi.CustomResource):
487
520
  "provider_integration_id": main["id"],
488
521
  })
489
522
  ```
490
-
491
- ## Getting Started
492
-
493
- The following end-to-end examples might help to get started with `TableflowTopic` resource:
494
- * confluent-managed-storage: Tableflow topic with Confluent-managed storage.
495
- * byob-aws-storage: Tableflow topic with custom (BYOB AWS) storage.
496
- * datagen-connector-byob-aws-storage: Datagen Source connector with a Tableflow topic with custom (BYOB AWS) storage.
497
- * datagen-connector-confluent-managed-storage: Datagen Source connector with a Tableflow topic with Confluent-managed storage.
498
-
499
523
  ## Import
500
524
 
501
525
  You can import a Tableflow Topic by using the Tableflow Topic name, Environment ID, and Kafka Cluster ID, in the format `<Environment ID>/<Kafka Cluster ID>/<Tableflow Topic name>`, for example:
@@ -520,6 +544,7 @@ class TableflowTopic(pulumi.CustomResource):
520
544
 
521
545
  :param str resource_name: The name of the resource.
522
546
  :param pulumi.ResourceOptions opts: Options for the resource.
547
+ :param pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
523
548
  :param pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
524
549
  :param pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']] credentials: The Cluster API Credentials.
525
550
  :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
@@ -582,15 +607,6 @@ class TableflowTopic(pulumi.CustomResource):
582
607
  "provider_integration_id": main["id"],
583
608
  })
584
609
  ```
585
-
586
- ## Getting Started
587
-
588
- The following end-to-end examples might help to get started with `TableflowTopic` resource:
589
- * confluent-managed-storage: Tableflow topic with Confluent-managed storage.
590
- * byob-aws-storage: Tableflow topic with custom (BYOB AWS) storage.
591
- * datagen-connector-byob-aws-storage: Datagen Source connector with a Tableflow topic with custom (BYOB AWS) storage.
592
- * datagen-connector-confluent-managed-storage: Datagen Source connector with a Tableflow topic with Confluent-managed storage.
593
-
594
610
  ## Import
595
611
 
596
612
  You can import a Tableflow Topic by using the Tableflow Topic name, Environment ID, and Kafka Cluster ID, in the format `<Environment ID>/<Kafka Cluster ID>/<Tableflow Topic name>`, for example:
@@ -628,6 +644,7 @@ class TableflowTopic(pulumi.CustomResource):
628
644
  def _internal_init(__self__,
629
645
  resource_name: str,
630
646
  opts: Optional[pulumi.ResourceOptions] = None,
647
+ azure_data_lake_storage_gen2: Optional[pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']]] = None,
631
648
  byob_aws: Optional[pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']]] = None,
632
649
  credentials: Optional[pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']]] = None,
633
650
  display_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -647,6 +664,7 @@ class TableflowTopic(pulumi.CustomResource):
647
664
  raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
648
665
  __props__ = TableflowTopicArgs.__new__(TableflowTopicArgs)
649
666
 
667
+ __props__.__dict__["azure_data_lake_storage_gen2"] = azure_data_lake_storage_gen2
650
668
  __props__.__dict__["byob_aws"] = byob_aws
651
669
  __props__.__dict__["credentials"] = None if credentials is None else pulumi.Output.secret(credentials)
652
670
  if display_name is None and not opts.urn:
@@ -680,6 +698,7 @@ class TableflowTopic(pulumi.CustomResource):
680
698
  def get(resource_name: str,
681
699
  id: pulumi.Input[str],
682
700
  opts: Optional[pulumi.ResourceOptions] = None,
701
+ azure_data_lake_storage_gen2: Optional[pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']]] = None,
683
702
  byob_aws: Optional[pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']]] = None,
684
703
  credentials: Optional[pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']]] = None,
685
704
  display_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -702,6 +721,7 @@ class TableflowTopic(pulumi.CustomResource):
702
721
  :param str resource_name: The unique name of the resulting resource.
703
722
  :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
704
723
  :param pulumi.ResourceOptions opts: Options for the resource.
724
+ :param pulumi.Input[Union['TableflowTopicAzureDataLakeStorageGen2Args', 'TableflowTopicAzureDataLakeStorageGen2ArgsDict']] azure_data_lake_storage_gen2: (Optional Configuration Block) supports the following:
705
725
  :param pulumi.Input[Union['TableflowTopicByobAwsArgs', 'TableflowTopicByobAwsArgsDict']] byob_aws: supports the following (See [Quick Start with Custom Storage](https://docs.confluent.io/cloud/current/topics/tableflow/get-started/quick-start-custom-storage-glue.html#cloud-tableflow-quick-start) for more details):
706
726
  :param pulumi.Input[Union['TableflowTopicCredentialsArgs', 'TableflowTopicCredentialsArgsDict']] credentials: The Cluster API Credentials.
707
727
  :param pulumi.Input[_builtins.str] display_name: The name of the Kafka topic for which Tableflow is enabled.
@@ -720,6 +740,7 @@ class TableflowTopic(pulumi.CustomResource):
720
740
 
721
741
  __props__ = _TableflowTopicState.__new__(_TableflowTopicState)
722
742
 
743
+ __props__.__dict__["azure_data_lake_storage_gen2"] = azure_data_lake_storage_gen2
723
744
  __props__.__dict__["byob_aws"] = byob_aws
724
745
  __props__.__dict__["credentials"] = credentials
725
746
  __props__.__dict__["display_name"] = display_name
@@ -737,6 +758,14 @@ class TableflowTopic(pulumi.CustomResource):
737
758
  __props__.__dict__["write_mode"] = write_mode
738
759
  return TableflowTopic(resource_name, opts=opts, __props__=__props__)
739
760
 
761
+ @_builtins.property
762
+ @pulumi.getter(name="azureDataLakeStorageGen2")
763
+ def azure_data_lake_storage_gen2(self) -> pulumi.Output[Optional['outputs.TableflowTopicAzureDataLakeStorageGen2']]:
764
+ """
765
+ (Optional Configuration Block) supports the following:
766
+ """
767
+ return pulumi.get(self, "azure_data_lake_storage_gen2")
768
+
740
769
  @_builtins.property
741
770
  @pulumi.getter(name="byobAws")
742
771
  def byob_aws(self) -> pulumi.Output[Optional['outputs.TableflowTopicByobAws']]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pulumi_confluentcloud
3
- Version: 2.52.0a1763617028
3
+ Version: 2.53.0
4
4
  Summary: A Pulumi package for creating and managing Confluent cloud resources.
5
5
  License: Apache-2.0
6
6
  Project-URL: Homepage, https://www.pulumi.com
@@ -1,5 +1,5 @@
1
1
  pulumi_confluentcloud/__init__.py,sha256=njoOocldDmVmxvwjY0h3gDa9vLDo_vpJsKmu8LK1Uks,16765
2
- pulumi_confluentcloud/_inputs.py,sha256=3saWgVNL2700CGB6W2Jhshc_jEwLEnINGZz4MWURJC0,475774
2
+ pulumi_confluentcloud/_inputs.py,sha256=yoo6vKQAeNBel1RiUSwlgNnrQr6QE_VmGXOh3AtsQ7c,490648
3
3
  pulumi_confluentcloud/_utilities.py,sha256=66uLGQDI1oMFOI3Fe5igAphtexWhcSLDyuVW50jW3ik,10789
4
4
  pulumi_confluentcloud/access_point.py,sha256=EWjw5oS5Zvch3deV4n8oUv9AV73aYy7qUrEJ6ji0Db0,32976
5
5
  pulumi_confluentcloud/api_key.py,sha256=-jTRJ8ztmZQoqb3T9IzR7bhBdV9PxwUHDj8sG4O93Dc,36850
@@ -75,7 +75,7 @@ pulumi_confluentcloud/get_schemas.py,sha256=Mr-OM-3oJsdPPXbq1J38Z2N8aybF6WXBq-bM
75
75
  pulumi_confluentcloud/get_service_account.py,sha256=3lk7TNkGjmi4c1oa4o8O4XjMw8EIlZg332Ije6jUqfg,7331
76
76
  pulumi_confluentcloud/get_subject_config.py,sha256=dKd28r2YSQZZ5Wtjn5CxfiX1sCYQBubvbCWDWc9-jhU,9328
77
77
  pulumi_confluentcloud/get_subject_mode.py,sha256=RJKGulZlRGGkoGVgEW8_Mv7wkVJfu5wB5RU2Po9PJ_E,10545
78
- pulumi_confluentcloud/get_tableflow_topic.py,sha256=7SFh2xCXl34dADEhEJ0lSzWxHgzFnRsCtWHwCyUs59g,16521
78
+ pulumi_confluentcloud/get_tableflow_topic.py,sha256=dYeXZbuN815Z4BE5usmhQSdqIkvRfMRwA8P-uzDRkpA,17455
79
79
  pulumi_confluentcloud/get_tag.py,sha256=vBOXia2VjmhKoQfRY0A9wv8KXO5IcPOdDa_CAtDfd6M,11178
80
80
  pulumi_confluentcloud/get_tag_binding.py,sha256=1yXnizQsxxxD5YaTezzbC8bx4W_guaXD4UAJbaD94Tc,13071
81
81
  pulumi_confluentcloud/get_transit_gateway_attachment.py,sha256=TUOrt6fi7LD4QukfQkthoSRprS9cMoSZMpZehK_yhIU,8504
@@ -97,7 +97,7 @@ pulumi_confluentcloud/ksql_cluster.py,sha256=2iHuwHft1nFCJHHUZ_OQ6za0Wwhc01VyfdV
97
97
  pulumi_confluentcloud/network.py,sha256=8CtwHx8oTbnbgX7F4WLcmyFHUR5v-d6mt4XtcYdGShw,65777
98
98
  pulumi_confluentcloud/network_link_endpoint.py,sha256=mIDIlsxhKa5wwaLJzBn9aMGGl_lYRcZvUDjLFEh5aYI,21019
99
99
  pulumi_confluentcloud/network_link_service.py,sha256=1EYvdSSxGVyf9EYUb9KQEGF_d7ct74B4BTQcy5VvTto,21597
100
- pulumi_confluentcloud/outputs.py,sha256=JpWdKPjiDZ-saZ8ZfkZGWcfQr1KmPvLmX_2WZAQdRvY,416836
100
+ pulumi_confluentcloud/outputs.py,sha256=1iYkIO1caI4h3izic5HCj_pSmiUPc_-WekBVxGJjRnc,434591
101
101
  pulumi_confluentcloud/peering.py,sha256=JLZhrSeVal5O2KcWqgKjcA_NVxm4jCgdmKkeVqEcLAI,26219
102
102
  pulumi_confluentcloud/plugin.py,sha256=CKzFzVS27r-z8IC6uQKg6JMTAY-58RfRp9WZfybQC9c,19241
103
103
  pulumi_confluentcloud/private_link_access.py,sha256=RXRS63n0MtU1rqscLqDiZXCPTyB6Agn4CwKu5BEg6BY,26465
@@ -107,7 +107,7 @@ pulumi_confluentcloud/provider.py,sha256=sN-VlTRE0cOeq78IKZopdjt1JLVRJpXA3LYYbYi
107
107
  pulumi_confluentcloud/provider_integration.py,sha256=RmBOJN9EFsKlRiBAEP2_hXl-LR0FMWA6_qC61HWocPQ,16497
108
108
  pulumi_confluentcloud/provider_integration_authorization.py,sha256=iESH7OhbtppHB3wY8oBgpWAl9mrvp6Iar9vWtuXjHso,17537
109
109
  pulumi_confluentcloud/provider_integration_setup.py,sha256=q06Qw844J2N0uoctoACyySDy9piyuqjHzAjZS_WsHIc,19706
110
- pulumi_confluentcloud/pulumi-plugin.json,sha256=U6O5ZLajzGQB73QC_RyrBLIWBmGVDxpiub8tqskIGmI,91
110
+ pulumi_confluentcloud/pulumi-plugin.json,sha256=D_DqYBDIACo_GBFRRe7eopvXz-qTerwzUgq4UYwVMI4,74
111
111
  pulumi_confluentcloud/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
112
112
  pulumi_confluentcloud/role_binding.py,sha256=MbeRwcuWNj_KSnNP2zEqNSthGBV1z1haBjbk97pvMZk,20463
113
113
  pulumi_confluentcloud/schema.py,sha256=CYdHdFIw-VYplimUSaTWGittfo5wUOEWuFYfD_kUbvI,49055
@@ -119,7 +119,7 @@ pulumi_confluentcloud/schema_registry_kek.py,sha256=zRcr3nKOwOguV99lC8W3TuUVjsH3
119
119
  pulumi_confluentcloud/service_account.py,sha256=cGwmSYtZN0DW3l2_stxjfgV6FDIks9a77a2nO2xptQE,19583
120
120
  pulumi_confluentcloud/subject_config.py,sha256=OXU_91oUU5FLf8rRjTc2J7aUzCPzZvLlAygSge5eaQE,30180
121
121
  pulumi_confluentcloud/subject_mode.py,sha256=0wyvTJS2EARMEloedqC_Ux4nQUSh5f3wuLe1TAXtIh8,24612
122
- pulumi_confluentcloud/tableflow_topic.py,sha256=TSjIoglzQzL1qwQniNWpEZCvOO6EN_MfAm_fo-GsT-4,47770
122
+ pulumi_confluentcloud/tableflow_topic.py,sha256=3cX-zNsZwzg0nR3zGgk_zxKtUomn52fFWtjaFlokHqg,50173
123
123
  pulumi_confluentcloud/tag.py,sha256=knvRKPcNHRddvzLYwnra8vwW5cxycZOG71Cli_lhLmg,23746
124
124
  pulumi_confluentcloud/tag_binding.py,sha256=lk5cphmn882kG4deW2vQue69aTLOLsM6kJ1quwZT3NY,30739
125
125
  pulumi_confluentcloud/tf_importer.py,sha256=D0oj3ocsPCHOdzVOGHcBLDJ0AenFOVPK13R9BMTcGfY,13500
@@ -128,7 +128,7 @@ pulumi_confluentcloud/config/__init__.py,sha256=XWnQfVtc2oPapjSXXCdORFJvMpXt_SMJ
128
128
  pulumi_confluentcloud/config/__init__.pyi,sha256=wUpGQFTVXK9rFefT-KLKGEPtajQG_D4Due_TzbOT5jE,2151
129
129
  pulumi_confluentcloud/config/outputs.py,sha256=j9KabfxdzVhzLBDXzRsfQbM3kPvizCnfA4jT1GiYu7I,5369
130
130
  pulumi_confluentcloud/config/vars.py,sha256=a6jklkyhkLNyX1ZeL2snOeaA6uX4dqwUZl5fUDp3wMQ,4915
131
- pulumi_confluentcloud-2.52.0a1763617028.dist-info/METADATA,sha256=l1aGAwTSzGC_LUPUsMAJhC3TDCCH5LuQSRUxYYq_2cA,2898
132
- pulumi_confluentcloud-2.52.0a1763617028.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
133
- pulumi_confluentcloud-2.52.0a1763617028.dist-info/top_level.txt,sha256=0spb6Wqsv3xa9v5poWmP3cWll3tbfOwOKwneN7S2DjM,22
134
- pulumi_confluentcloud-2.52.0a1763617028.dist-info/RECORD,,
131
+ pulumi_confluentcloud-2.53.0.dist-info/METADATA,sha256=GpL5GDkQHWCUcALqsvwQzHoRXIFXBsrC6K6De-956pk,2887
132
+ pulumi_confluentcloud-2.53.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
133
+ pulumi_confluentcloud-2.53.0.dist-info/top_level.txt,sha256=0spb6Wqsv3xa9v5poWmP3cWll3tbfOwOKwneN7S2DjM,22
134
+ pulumi_confluentcloud-2.53.0.dist-info/RECORD,,