pulumi-aiven 6.20.0__py3-none-any.whl → 6.21.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

pulumi_aiven/_inputs.py CHANGED
@@ -100,6 +100,7 @@ __all__ = [
100
100
  'KafkaConnectorTaskArgs',
101
101
  'KafkaKafkaArgs',
102
102
  'KafkaKafkaUserConfigArgs',
103
+ 'KafkaKafkaUserConfigFollowerFetchingArgs',
103
104
  'KafkaKafkaUserConfigIpFilterObjectArgs',
104
105
  'KafkaKafkaUserConfigKafkaArgs',
105
106
  'KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs',
@@ -2373,6 +2374,7 @@ class DragonflyDragonflyUserConfigMigrationArgs:
2373
2374
  port: pulumi.Input[int],
2374
2375
  dbname: Optional[pulumi.Input[str]] = None,
2375
2376
  ignore_dbs: Optional[pulumi.Input[str]] = None,
2377
+ ignore_roles: Optional[pulumi.Input[str]] = None,
2376
2378
  method: Optional[pulumi.Input[str]] = None,
2377
2379
  password: Optional[pulumi.Input[str]] = None,
2378
2380
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -2382,6 +2384,7 @@ class DragonflyDragonflyUserConfigMigrationArgs:
2382
2384
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
2383
2385
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
2384
2386
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
2387
+ :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
2385
2388
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2386
2389
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
2387
2390
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -2393,6 +2396,8 @@ class DragonflyDragonflyUserConfigMigrationArgs:
2393
2396
  pulumi.set(__self__, "dbname", dbname)
2394
2397
  if ignore_dbs is not None:
2395
2398
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
2399
+ if ignore_roles is not None:
2400
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
2396
2401
  if method is not None:
2397
2402
  pulumi.set(__self__, "method", method)
2398
2403
  if password is not None:
@@ -2450,6 +2455,18 @@ class DragonflyDragonflyUserConfigMigrationArgs:
2450
2455
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
2451
2456
  pulumi.set(self, "ignore_dbs", value)
2452
2457
 
2458
+ @property
2459
+ @pulumi.getter(name="ignoreRoles")
2460
+ def ignore_roles(self) -> Optional[pulumi.Input[str]]:
2461
+ """
2462
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
2463
+ """
2464
+ return pulumi.get(self, "ignore_roles")
2465
+
2466
+ @ignore_roles.setter
2467
+ def ignore_roles(self, value: Optional[pulumi.Input[str]]):
2468
+ pulumi.set(self, "ignore_roles", value)
2469
+
2453
2470
  @property
2454
2471
  @pulumi.getter
2455
2472
  def method(self) -> Optional[pulumi.Input[str]]:
@@ -7344,6 +7361,7 @@ class KafkaKafkaUserConfigArgs:
7344
7361
  additional_backup_regions: Optional[pulumi.Input[str]] = None,
7345
7362
  aiven_kafka_topic_messages: Optional[pulumi.Input[bool]] = None,
7346
7363
  custom_domain: Optional[pulumi.Input[str]] = None,
7364
+ follower_fetching: Optional[pulumi.Input['KafkaKafkaUserConfigFollowerFetchingArgs']] = None,
7347
7365
  ip_filter_objects: Optional[pulumi.Input[Sequence[pulumi.Input['KafkaKafkaUserConfigIpFilterObjectArgs']]]] = None,
7348
7366
  ip_filter_strings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
7349
7367
  ip_filters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
@@ -7369,6 +7387,7 @@ class KafkaKafkaUserConfigArgs:
7369
7387
  :param pulumi.Input[str] additional_backup_regions: Additional Cloud Regions for Backup Replication.
7370
7388
  :param pulumi.Input[bool] aiven_kafka_topic_messages: Allow access to read Kafka topic messages in the Aiven Console and REST API.
7371
7389
  :param pulumi.Input[str] custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
7390
+ :param pulumi.Input['KafkaKafkaUserConfigFollowerFetchingArgs'] follower_fetching: Enable follower fetching
7372
7391
  :param pulumi.Input[Sequence[pulumi.Input['KafkaKafkaUserConfigIpFilterObjectArgs']]] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
7373
7392
  :param pulumi.Input[Sequence[pulumi.Input[str]]] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
7374
7393
  :param pulumi.Input[Sequence[pulumi.Input[str]]] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -7399,6 +7418,8 @@ class KafkaKafkaUserConfigArgs:
7399
7418
  pulumi.set(__self__, "aiven_kafka_topic_messages", aiven_kafka_topic_messages)
7400
7419
  if custom_domain is not None:
7401
7420
  pulumi.set(__self__, "custom_domain", custom_domain)
7421
+ if follower_fetching is not None:
7422
+ pulumi.set(__self__, "follower_fetching", follower_fetching)
7402
7423
  if ip_filter_objects is not None:
7403
7424
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
7404
7425
  if ip_filter_strings is not None:
@@ -7482,6 +7503,18 @@ class KafkaKafkaUserConfigArgs:
7482
7503
  def custom_domain(self, value: Optional[pulumi.Input[str]]):
7483
7504
  pulumi.set(self, "custom_domain", value)
7484
7505
 
7506
+ @property
7507
+ @pulumi.getter(name="followerFetching")
7508
+ def follower_fetching(self) -> Optional[pulumi.Input['KafkaKafkaUserConfigFollowerFetchingArgs']]:
7509
+ """
7510
+ Enable follower fetching
7511
+ """
7512
+ return pulumi.get(self, "follower_fetching")
7513
+
7514
+ @follower_fetching.setter
7515
+ def follower_fetching(self, value: Optional[pulumi.Input['KafkaKafkaUserConfigFollowerFetchingArgs']]):
7516
+ pulumi.set(self, "follower_fetching", value)
7517
+
7485
7518
  @property
7486
7519
  @pulumi.getter(name="ipFilterObjects")
7487
7520
  def ip_filter_objects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KafkaKafkaUserConfigIpFilterObjectArgs']]]]:
@@ -7733,6 +7766,29 @@ class KafkaKafkaUserConfigArgs:
7733
7766
  pulumi.set(self, "tiered_storage", value)
7734
7767
 
7735
7768
 
7769
+ @pulumi.input_type
7770
+ class KafkaKafkaUserConfigFollowerFetchingArgs:
7771
+ def __init__(__self__, *,
7772
+ enabled: Optional[pulumi.Input[bool]] = None):
7773
+ """
7774
+ :param pulumi.Input[bool] enabled: Whether to enable the follower fetching functionality.
7775
+ """
7776
+ if enabled is not None:
7777
+ pulumi.set(__self__, "enabled", enabled)
7778
+
7779
+ @property
7780
+ @pulumi.getter
7781
+ def enabled(self) -> Optional[pulumi.Input[bool]]:
7782
+ """
7783
+ Whether to enable the follower fetching functionality.
7784
+ """
7785
+ return pulumi.get(self, "enabled")
7786
+
7787
+ @enabled.setter
7788
+ def enabled(self, value: Optional[pulumi.Input[bool]]):
7789
+ pulumi.set(self, "enabled", value)
7790
+
7791
+
7736
7792
  @pulumi.input_type
7737
7793
  class KafkaKafkaUserConfigIpFilterObjectArgs:
7738
7794
  def __init__(__self__, *,
@@ -7821,52 +7877,52 @@ class KafkaKafkaUserConfigKafkaArgs:
7821
7877
  transaction_remove_expired_transaction_cleanup_interval_ms: Optional[pulumi.Input[int]] = None,
7822
7878
  transaction_state_log_segment_bytes: Optional[pulumi.Input[int]] = None):
7823
7879
  """
7824
- :param pulumi.Input[bool] auto_create_topics_enable: Enable auto creation of topics.
7825
- :param pulumi.Input[str] compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
7826
- :param pulumi.Input[int] connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
7827
- :param pulumi.Input[int] default_replication_factor: Replication factor for autocreated topics.
7828
- :param pulumi.Input[int] group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
7829
- :param pulumi.Input[int] group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
7830
- :param pulumi.Input[int] group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
7831
- :param pulumi.Input[int] log_cleaner_delete_retention_ms: How long are delete records retained? Example: `86400000`.
7832
- :param pulumi.Input[int] log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
7833
- :param pulumi.Input[float] log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
7834
- :param pulumi.Input[int] log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
7835
- :param pulumi.Input[str] log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
7836
- :param pulumi.Input[int] log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
7837
- :param pulumi.Input[int] log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
7838
- :param pulumi.Input[int] log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. Example: `4096`.
7839
- :param pulumi.Input[int] log_index_size_max_bytes: The maximum size in bytes of the offset index. Example: `10485760`.
7840
- :param pulumi.Input[int] log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
7841
- :param pulumi.Input[int] log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
7842
- :param pulumi.Input[bool] log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7843
- :param pulumi.Input[int] log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
7844
- :param pulumi.Input[str] log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
7845
- :param pulumi.Input[bool] log_preallocate: Should pre allocate file when create new segment?
7846
- :param pulumi.Input[int] log_retention_bytes: The maximum size of the log before deleting messages.
7847
- :param pulumi.Input[int] log_retention_hours: The number of hours to keep a log file before deleting it.
7848
- :param pulumi.Input[int] log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
7849
- :param pulumi.Input[int] log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
7850
- :param pulumi.Input[int] log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
7851
- :param pulumi.Input[int] log_segment_bytes: The maximum size of a single log file.
7852
- :param pulumi.Input[int] log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
7853
- :param pulumi.Input[int] max_connections_per_ip: The maximum number of connections allowed from each ip address (defaults to 2147483647).
7854
- :param pulumi.Input[int] max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
7855
- :param pulumi.Input[int] message_max_bytes: The maximum size of message that the server can receive. Example: `1048588`.
7856
- :param pulumi.Input[int] min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
7857
- :param pulumi.Input[int] num_partitions: Number of partitions for autocreated topics.
7858
- :param pulumi.Input[int] offsets_retention_minutes: Log retention window in minutes for offsets topic. Example: `10080`.
7859
- :param pulumi.Input[int] producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
7860
- :param pulumi.Input[int] replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
7861
- :param pulumi.Input[int] replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
7862
- :param pulumi.Input[str] sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
7863
- :param pulumi.Input[str] sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
7864
- :param pulumi.Input[str] sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
7865
- :param pulumi.Input[str] sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
7866
- :param pulumi.Input[int] socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
7867
- :param pulumi.Input[bool] transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
7868
- :param pulumi.Input[int] transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
7869
- :param pulumi.Input[int] transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
7880
+ :param pulumi.Input[bool] auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
7881
+ :param pulumi.Input[str] compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
7882
+ :param pulumi.Input[int] connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
7883
+ :param pulumi.Input[int] default_replication_factor: Replication factor for auto-created topics (Default: 3).
7884
+ :param pulumi.Input[int] group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
7885
+ :param pulumi.Input[int] group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
7886
+ :param pulumi.Input[int] group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
7887
+ :param pulumi.Input[int] log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
7888
+ :param pulumi.Input[int] log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
7889
+ :param pulumi.Input[float] log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
7890
+ :param pulumi.Input[int] log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
7891
+ :param pulumi.Input[str] log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
7892
+ :param pulumi.Input[int] log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
7893
+ :param pulumi.Input[int] log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
7894
+ :param pulumi.Input[int] log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
7895
+ :param pulumi.Input[int] log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
7896
+ :param pulumi.Input[int] log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
7897
+ :param pulumi.Input[int] log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
7898
+ :param pulumi.Input[bool] log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
7899
+ :param pulumi.Input[int] log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
7900
+ :param pulumi.Input[str] log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
7901
+ :param pulumi.Input[bool] log_preallocate: Should pre allocate file when create new segment? (Default: false).
7902
+ :param pulumi.Input[int] log_retention_bytes: The maximum size of the log before deleting messages (Default: -1).
7903
+ :param pulumi.Input[int] log_retention_hours: The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
7904
+ :param pulumi.Input[int] log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
7905
+ :param pulumi.Input[int] log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
7906
+ :param pulumi.Input[int] log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
7907
+ :param pulumi.Input[int] log_segment_bytes: The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
7908
+ :param pulumi.Input[int] log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
7909
+ :param pulumi.Input[int] max_connections_per_ip: The maximum number of connections allowed from each ip address (Default: 2147483647).
7910
+ :param pulumi.Input[int] max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
7911
+ :param pulumi.Input[int] message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
7912
+ :param pulumi.Input[int] min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
7913
+ :param pulumi.Input[int] num_partitions: Number of partitions for auto-created topics (Default: 1).
7914
+ :param pulumi.Input[int] offsets_retention_minutes: Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
7915
+ :param pulumi.Input[int] producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
7916
+ :param pulumi.Input[int] replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
7917
+ :param pulumi.Input[int] replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
7918
+ :param pulumi.Input[str] sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
7919
+ :param pulumi.Input[str] sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
7920
+ :param pulumi.Input[str] sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
7921
+ :param pulumi.Input[str] sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
7922
+ :param pulumi.Input[int] socket_request_max_bytes: The maximum number of bytes in a socket request (Default: 104857600 bytes).
7923
+ :param pulumi.Input[bool] transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false).
7924
+ :param pulumi.Input[int] transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
7925
+ :param pulumi.Input[int] transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
7870
7926
  """
7871
7927
  if auto_create_topics_enable is not None:
7872
7928
  pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
@@ -7965,7 +8021,7 @@ class KafkaKafkaUserConfigKafkaArgs:
7965
8021
  @pulumi.getter(name="autoCreateTopicsEnable")
7966
8022
  def auto_create_topics_enable(self) -> Optional[pulumi.Input[bool]]:
7967
8023
  """
7968
- Enable auto creation of topics.
8024
+ Enable auto-creation of topics. (Default: true).
7969
8025
  """
7970
8026
  return pulumi.get(self, "auto_create_topics_enable")
7971
8027
 
@@ -7977,7 +8033,7 @@ class KafkaKafkaUserConfigKafkaArgs:
7977
8033
  @pulumi.getter(name="compressionType")
7978
8034
  def compression_type(self) -> Optional[pulumi.Input[str]]:
7979
8035
  """
7980
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
8036
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
7981
8037
  """
7982
8038
  return pulumi.get(self, "compression_type")
7983
8039
 
@@ -7989,7 +8045,7 @@ class KafkaKafkaUserConfigKafkaArgs:
7989
8045
  @pulumi.getter(name="connectionsMaxIdleMs")
7990
8046
  def connections_max_idle_ms(self) -> Optional[pulumi.Input[int]]:
7991
8047
  """
7992
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
8048
+ Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
7993
8049
  """
7994
8050
  return pulumi.get(self, "connections_max_idle_ms")
7995
8051
 
@@ -8001,7 +8057,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8001
8057
  @pulumi.getter(name="defaultReplicationFactor")
8002
8058
  def default_replication_factor(self) -> Optional[pulumi.Input[int]]:
8003
8059
  """
8004
- Replication factor for autocreated topics.
8060
+ Replication factor for auto-created topics (Default: 3).
8005
8061
  """
8006
8062
  return pulumi.get(self, "default_replication_factor")
8007
8063
 
@@ -8013,7 +8069,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8013
8069
  @pulumi.getter(name="groupInitialRebalanceDelayMs")
8014
8070
  def group_initial_rebalance_delay_ms(self) -> Optional[pulumi.Input[int]]:
8015
8071
  """
8016
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
8072
+ The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
8017
8073
  """
8018
8074
  return pulumi.get(self, "group_initial_rebalance_delay_ms")
8019
8075
 
@@ -8025,7 +8081,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8025
8081
  @pulumi.getter(name="groupMaxSessionTimeoutMs")
8026
8082
  def group_max_session_timeout_ms(self) -> Optional[pulumi.Input[int]]:
8027
8083
  """
8028
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
8084
+ The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
8029
8085
  """
8030
8086
  return pulumi.get(self, "group_max_session_timeout_ms")
8031
8087
 
@@ -8037,7 +8093,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8037
8093
  @pulumi.getter(name="groupMinSessionTimeoutMs")
8038
8094
  def group_min_session_timeout_ms(self) -> Optional[pulumi.Input[int]]:
8039
8095
  """
8040
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
8096
+ The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
8041
8097
  """
8042
8098
  return pulumi.get(self, "group_min_session_timeout_ms")
8043
8099
 
@@ -8049,7 +8105,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8049
8105
  @pulumi.getter(name="logCleanerDeleteRetentionMs")
8050
8106
  def log_cleaner_delete_retention_ms(self) -> Optional[pulumi.Input[int]]:
8051
8107
  """
8052
- How long are delete records retained? Example: `86400000`.
8108
+ How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
8053
8109
  """
8054
8110
  return pulumi.get(self, "log_cleaner_delete_retention_ms")
8055
8111
 
@@ -8061,7 +8117,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8061
8117
  @pulumi.getter(name="logCleanerMaxCompactionLagMs")
8062
8118
  def log_cleaner_max_compaction_lag_ms(self) -> Optional[pulumi.Input[int]]:
8063
8119
  """
8064
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
8120
+ The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
8065
8121
  """
8066
8122
  return pulumi.get(self, "log_cleaner_max_compaction_lag_ms")
8067
8123
 
@@ -8073,7 +8129,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8073
8129
  @pulumi.getter(name="logCleanerMinCleanableRatio")
8074
8130
  def log_cleaner_min_cleanable_ratio(self) -> Optional[pulumi.Input[float]]:
8075
8131
  """
8076
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
8132
+ Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
8077
8133
  """
8078
8134
  return pulumi.get(self, "log_cleaner_min_cleanable_ratio")
8079
8135
 
@@ -8085,7 +8141,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8085
8141
  @pulumi.getter(name="logCleanerMinCompactionLagMs")
8086
8142
  def log_cleaner_min_compaction_lag_ms(self) -> Optional[pulumi.Input[int]]:
8087
8143
  """
8088
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
8144
+ The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
8089
8145
  """
8090
8146
  return pulumi.get(self, "log_cleaner_min_compaction_lag_ms")
8091
8147
 
@@ -8097,7 +8153,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8097
8153
  @pulumi.getter(name="logCleanupPolicy")
8098
8154
  def log_cleanup_policy(self) -> Optional[pulumi.Input[str]]:
8099
8155
  """
8100
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
8156
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8101
8157
  """
8102
8158
  return pulumi.get(self, "log_cleanup_policy")
8103
8159
 
@@ -8109,7 +8165,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8109
8165
  @pulumi.getter(name="logFlushIntervalMessages")
8110
8166
  def log_flush_interval_messages(self) -> Optional[pulumi.Input[int]]:
8111
8167
  """
8112
- The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
8168
+ The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
8113
8169
  """
8114
8170
  return pulumi.get(self, "log_flush_interval_messages")
8115
8171
 
@@ -8121,7 +8177,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8121
8177
  @pulumi.getter(name="logFlushIntervalMs")
8122
8178
  def log_flush_interval_ms(self) -> Optional[pulumi.Input[int]]:
8123
8179
  """
8124
- The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
8180
+ The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
8125
8181
  """
8126
8182
  return pulumi.get(self, "log_flush_interval_ms")
8127
8183
 
@@ -8133,7 +8189,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8133
8189
  @pulumi.getter(name="logIndexIntervalBytes")
8134
8190
  def log_index_interval_bytes(self) -> Optional[pulumi.Input[int]]:
8135
8191
  """
8136
- The interval with which Kafka adds an entry to the offset index. Example: `4096`.
8192
+ The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
8137
8193
  """
8138
8194
  return pulumi.get(self, "log_index_interval_bytes")
8139
8195
 
@@ -8145,7 +8201,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8145
8201
  @pulumi.getter(name="logIndexSizeMaxBytes")
8146
8202
  def log_index_size_max_bytes(self) -> Optional[pulumi.Input[int]]:
8147
8203
  """
8148
- The maximum size in bytes of the offset index. Example: `10485760`.
8204
+ The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
8149
8205
  """
8150
8206
  return pulumi.get(self, "log_index_size_max_bytes")
8151
8207
 
@@ -8157,7 +8213,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8157
8213
  @pulumi.getter(name="logLocalRetentionBytes")
8158
8214
  def log_local_retention_bytes(self) -> Optional[pulumi.Input[int]]:
8159
8215
  """
8160
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
8216
+ The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
8161
8217
  """
8162
8218
  return pulumi.get(self, "log_local_retention_bytes")
8163
8219
 
@@ -8169,7 +8225,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8169
8225
  @pulumi.getter(name="logLocalRetentionMs")
8170
8226
  def log_local_retention_ms(self) -> Optional[pulumi.Input[int]]:
8171
8227
  """
8172
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
8228
+ The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
8173
8229
  """
8174
8230
  return pulumi.get(self, "log_local_retention_ms")
8175
8231
 
@@ -8181,7 +8237,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8181
8237
  @pulumi.getter(name="logMessageDownconversionEnable")
8182
8238
  def log_message_downconversion_enable(self) -> Optional[pulumi.Input[bool]]:
8183
8239
  """
8184
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
8240
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
8185
8241
  """
8186
8242
  return pulumi.get(self, "log_message_downconversion_enable")
8187
8243
 
@@ -8193,7 +8249,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8193
8249
  @pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
8194
8250
  def log_message_timestamp_difference_max_ms(self) -> Optional[pulumi.Input[int]]:
8195
8251
  """
8196
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
8252
+ The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
8197
8253
  """
8198
8254
  return pulumi.get(self, "log_message_timestamp_difference_max_ms")
8199
8255
 
@@ -8205,7 +8261,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8205
8261
  @pulumi.getter(name="logMessageTimestampType")
8206
8262
  def log_message_timestamp_type(self) -> Optional[pulumi.Input[str]]:
8207
8263
  """
8208
- Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
8264
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
8209
8265
  """
8210
8266
  return pulumi.get(self, "log_message_timestamp_type")
8211
8267
 
@@ -8217,7 +8273,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8217
8273
  @pulumi.getter(name="logPreallocate")
8218
8274
  def log_preallocate(self) -> Optional[pulumi.Input[bool]]:
8219
8275
  """
8220
- Should pre allocate file when create new segment?
8276
+ Should pre allocate file when create new segment? (Default: false).
8221
8277
  """
8222
8278
  return pulumi.get(self, "log_preallocate")
8223
8279
 
@@ -8229,7 +8285,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8229
8285
  @pulumi.getter(name="logRetentionBytes")
8230
8286
  def log_retention_bytes(self) -> Optional[pulumi.Input[int]]:
8231
8287
  """
8232
- The maximum size of the log before deleting messages.
8288
+ The maximum size of the log before deleting messages (Default: -1).
8233
8289
  """
8234
8290
  return pulumi.get(self, "log_retention_bytes")
8235
8291
 
@@ -8241,7 +8297,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8241
8297
  @pulumi.getter(name="logRetentionHours")
8242
8298
  def log_retention_hours(self) -> Optional[pulumi.Input[int]]:
8243
8299
  """
8244
- The number of hours to keep a log file before deleting it.
8300
+ The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
8245
8301
  """
8246
8302
  return pulumi.get(self, "log_retention_hours")
8247
8303
 
@@ -8253,7 +8309,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8253
8309
  @pulumi.getter(name="logRetentionMs")
8254
8310
  def log_retention_ms(self) -> Optional[pulumi.Input[int]]:
8255
8311
  """
8256
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
8312
+ The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
8257
8313
  """
8258
8314
  return pulumi.get(self, "log_retention_ms")
8259
8315
 
@@ -8265,7 +8321,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8265
8321
  @pulumi.getter(name="logRollJitterMs")
8266
8322
  def log_roll_jitter_ms(self) -> Optional[pulumi.Input[int]]:
8267
8323
  """
8268
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
8324
+ The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
8269
8325
  """
8270
8326
  return pulumi.get(self, "log_roll_jitter_ms")
8271
8327
 
@@ -8277,7 +8333,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8277
8333
  @pulumi.getter(name="logRollMs")
8278
8334
  def log_roll_ms(self) -> Optional[pulumi.Input[int]]:
8279
8335
  """
8280
- The maximum time before a new log segment is rolled out (in milliseconds).
8336
+ The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
8281
8337
  """
8282
8338
  return pulumi.get(self, "log_roll_ms")
8283
8339
 
@@ -8289,7 +8345,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8289
8345
  @pulumi.getter(name="logSegmentBytes")
8290
8346
  def log_segment_bytes(self) -> Optional[pulumi.Input[int]]:
8291
8347
  """
8292
- The maximum size of a single log file.
8348
+ The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
8293
8349
  """
8294
8350
  return pulumi.get(self, "log_segment_bytes")
8295
8351
 
@@ -8301,7 +8357,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8301
8357
  @pulumi.getter(name="logSegmentDeleteDelayMs")
8302
8358
  def log_segment_delete_delay_ms(self) -> Optional[pulumi.Input[int]]:
8303
8359
  """
8304
- The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
8360
+ The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
8305
8361
  """
8306
8362
  return pulumi.get(self, "log_segment_delete_delay_ms")
8307
8363
 
@@ -8313,7 +8369,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8313
8369
  @pulumi.getter(name="maxConnectionsPerIp")
8314
8370
  def max_connections_per_ip(self) -> Optional[pulumi.Input[int]]:
8315
8371
  """
8316
- The maximum number of connections allowed from each ip address (defaults to 2147483647).
8372
+ The maximum number of connections allowed from each ip address (Default: 2147483647).
8317
8373
  """
8318
8374
  return pulumi.get(self, "max_connections_per_ip")
8319
8375
 
@@ -8325,7 +8381,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8325
8381
  @pulumi.getter(name="maxIncrementalFetchSessionCacheSlots")
8326
8382
  def max_incremental_fetch_session_cache_slots(self) -> Optional[pulumi.Input[int]]:
8327
8383
  """
8328
- The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
8384
+ The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
8329
8385
  """
8330
8386
  return pulumi.get(self, "max_incremental_fetch_session_cache_slots")
8331
8387
 
@@ -8337,7 +8393,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8337
8393
  @pulumi.getter(name="messageMaxBytes")
8338
8394
  def message_max_bytes(self) -> Optional[pulumi.Input[int]]:
8339
8395
  """
8340
- The maximum size of message that the server can receive. Example: `1048588`.
8396
+ The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
8341
8397
  """
8342
8398
  return pulumi.get(self, "message_max_bytes")
8343
8399
 
@@ -8349,7 +8405,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8349
8405
  @pulumi.getter(name="minInsyncReplicas")
8350
8406
  def min_insync_replicas(self) -> Optional[pulumi.Input[int]]:
8351
8407
  """
8352
- When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
8408
+ When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
8353
8409
  """
8354
8410
  return pulumi.get(self, "min_insync_replicas")
8355
8411
 
@@ -8361,7 +8417,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8361
8417
  @pulumi.getter(name="numPartitions")
8362
8418
  def num_partitions(self) -> Optional[pulumi.Input[int]]:
8363
8419
  """
8364
- Number of partitions for autocreated topics.
8420
+ Number of partitions for auto-created topics (Default: 1).
8365
8421
  """
8366
8422
  return pulumi.get(self, "num_partitions")
8367
8423
 
@@ -8373,7 +8429,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8373
8429
  @pulumi.getter(name="offsetsRetentionMinutes")
8374
8430
  def offsets_retention_minutes(self) -> Optional[pulumi.Input[int]]:
8375
8431
  """
8376
- Log retention window in minutes for offsets topic. Example: `10080`.
8432
+ Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
8377
8433
  """
8378
8434
  return pulumi.get(self, "offsets_retention_minutes")
8379
8435
 
@@ -8385,7 +8441,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8385
8441
  @pulumi.getter(name="producerPurgatoryPurgeIntervalRequests")
8386
8442
  def producer_purgatory_purge_interval_requests(self) -> Optional[pulumi.Input[int]]:
8387
8443
  """
8388
- The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
8444
+ The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
8389
8445
  """
8390
8446
  return pulumi.get(self, "producer_purgatory_purge_interval_requests")
8391
8447
 
@@ -8397,7 +8453,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8397
8453
  @pulumi.getter(name="replicaFetchMaxBytes")
8398
8454
  def replica_fetch_max_bytes(self) -> Optional[pulumi.Input[int]]:
8399
8455
  """
8400
- The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
8456
+ The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
8401
8457
  """
8402
8458
  return pulumi.get(self, "replica_fetch_max_bytes")
8403
8459
 
@@ -8409,7 +8465,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8409
8465
  @pulumi.getter(name="replicaFetchResponseMaxBytes")
8410
8466
  def replica_fetch_response_max_bytes(self) -> Optional[pulumi.Input[int]]:
8411
8467
  """
8412
- Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
8468
+ Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
8413
8469
  """
8414
8470
  return pulumi.get(self, "replica_fetch_response_max_bytes")
8415
8471
 
@@ -8421,7 +8477,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8421
8477
  @pulumi.getter(name="saslOauthbearerExpectedAudience")
8422
8478
  def sasl_oauthbearer_expected_audience(self) -> Optional[pulumi.Input[str]]:
8423
8479
  """
8424
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
8480
+ The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
8425
8481
  """
8426
8482
  return pulumi.get(self, "sasl_oauthbearer_expected_audience")
8427
8483
 
@@ -8433,7 +8489,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8433
8489
  @pulumi.getter(name="saslOauthbearerExpectedIssuer")
8434
8490
  def sasl_oauthbearer_expected_issuer(self) -> Optional[pulumi.Input[str]]:
8435
8491
  """
8436
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
8492
+ Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
8437
8493
  """
8438
8494
  return pulumi.get(self, "sasl_oauthbearer_expected_issuer")
8439
8495
 
@@ -8445,7 +8501,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8445
8501
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
8446
8502
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[pulumi.Input[str]]:
8447
8503
  """
8448
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
8504
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
8449
8505
  """
8450
8506
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
8451
8507
 
@@ -8457,7 +8513,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8457
8513
  @pulumi.getter(name="saslOauthbearerSubClaimName")
8458
8514
  def sasl_oauthbearer_sub_claim_name(self) -> Optional[pulumi.Input[str]]:
8459
8515
  """
8460
- Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
8516
+ Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
8461
8517
  """
8462
8518
  return pulumi.get(self, "sasl_oauthbearer_sub_claim_name")
8463
8519
 
@@ -8469,7 +8525,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8469
8525
  @pulumi.getter(name="socketRequestMaxBytes")
8470
8526
  def socket_request_max_bytes(self) -> Optional[pulumi.Input[int]]:
8471
8527
  """
8472
- The maximum number of bytes in a socket request (defaults to 104857600).
8528
+ The maximum number of bytes in a socket request (Default: 104857600 bytes).
8473
8529
  """
8474
8530
  return pulumi.get(self, "socket_request_max_bytes")
8475
8531
 
@@ -8481,7 +8537,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8481
8537
  @pulumi.getter(name="transactionPartitionVerificationEnable")
8482
8538
  def transaction_partition_verification_enable(self) -> Optional[pulumi.Input[bool]]:
8483
8539
  """
8484
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
8540
+ Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false).
8485
8541
  """
8486
8542
  return pulumi.get(self, "transaction_partition_verification_enable")
8487
8543
 
@@ -8493,7 +8549,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8493
8549
  @pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
8494
8550
  def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[pulumi.Input[int]]:
8495
8551
  """
8496
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
8552
+ The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
8497
8553
  """
8498
8554
  return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
8499
8555
 
@@ -8505,7 +8561,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8505
8561
  @pulumi.getter(name="transactionStateLogSegmentBytes")
8506
8562
  def transaction_state_log_segment_bytes(self) -> Optional[pulumi.Input[int]]:
8507
8563
  """
8508
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
8564
+ The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
8509
8565
  """
8510
8566
  return pulumi.get(self, "transaction_state_log_segment_bytes")
8511
8567
 
@@ -13225,6 +13281,7 @@ class MySqlMysqlUserConfigMigrationArgs:
13225
13281
  port: pulumi.Input[int],
13226
13282
  dbname: Optional[pulumi.Input[str]] = None,
13227
13283
  ignore_dbs: Optional[pulumi.Input[str]] = None,
13284
+ ignore_roles: Optional[pulumi.Input[str]] = None,
13228
13285
  method: Optional[pulumi.Input[str]] = None,
13229
13286
  password: Optional[pulumi.Input[str]] = None,
13230
13287
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -13234,6 +13291,7 @@ class MySqlMysqlUserConfigMigrationArgs:
13234
13291
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
13235
13292
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
13236
13293
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
13294
+ :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
13237
13295
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
13238
13296
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
13239
13297
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -13245,6 +13303,8 @@ class MySqlMysqlUserConfigMigrationArgs:
13245
13303
  pulumi.set(__self__, "dbname", dbname)
13246
13304
  if ignore_dbs is not None:
13247
13305
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
13306
+ if ignore_roles is not None:
13307
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
13248
13308
  if method is not None:
13249
13309
  pulumi.set(__self__, "method", method)
13250
13310
  if password is not None:
@@ -13302,6 +13362,18 @@ class MySqlMysqlUserConfigMigrationArgs:
13302
13362
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
13303
13363
  pulumi.set(self, "ignore_dbs", value)
13304
13364
 
13365
+ @property
13366
+ @pulumi.getter(name="ignoreRoles")
13367
+ def ignore_roles(self) -> Optional[pulumi.Input[str]]:
13368
+ """
13369
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
13370
+ """
13371
+ return pulumi.get(self, "ignore_roles")
13372
+
13373
+ @ignore_roles.setter
13374
+ def ignore_roles(self, value: Optional[pulumi.Input[str]]):
13375
+ pulumi.set(self, "ignore_roles", value)
13376
+
13305
13377
  @property
13306
13378
  @pulumi.getter
13307
13379
  def method(self) -> Optional[pulumi.Input[str]]:
@@ -17704,6 +17776,7 @@ class PgPgUserConfigMigrationArgs:
17704
17776
  port: pulumi.Input[int],
17705
17777
  dbname: Optional[pulumi.Input[str]] = None,
17706
17778
  ignore_dbs: Optional[pulumi.Input[str]] = None,
17779
+ ignore_roles: Optional[pulumi.Input[str]] = None,
17707
17780
  method: Optional[pulumi.Input[str]] = None,
17708
17781
  password: Optional[pulumi.Input[str]] = None,
17709
17782
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -17713,6 +17786,7 @@ class PgPgUserConfigMigrationArgs:
17713
17786
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
17714
17787
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
17715
17788
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
17789
+ :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
17716
17790
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
17717
17791
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
17718
17792
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -17724,6 +17798,8 @@ class PgPgUserConfigMigrationArgs:
17724
17798
  pulumi.set(__self__, "dbname", dbname)
17725
17799
  if ignore_dbs is not None:
17726
17800
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
17801
+ if ignore_roles is not None:
17802
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
17727
17803
  if method is not None:
17728
17804
  pulumi.set(__self__, "method", method)
17729
17805
  if password is not None:
@@ -17781,6 +17857,18 @@ class PgPgUserConfigMigrationArgs:
17781
17857
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
17782
17858
  pulumi.set(self, "ignore_dbs", value)
17783
17859
 
17860
+ @property
17861
+ @pulumi.getter(name="ignoreRoles")
17862
+ def ignore_roles(self) -> Optional[pulumi.Input[str]]:
17863
+ """
17864
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
17865
+ """
17866
+ return pulumi.get(self, "ignore_roles")
17867
+
17868
+ @ignore_roles.setter
17869
+ def ignore_roles(self, value: Optional[pulumi.Input[str]]):
17870
+ pulumi.set(self, "ignore_roles", value)
17871
+
17784
17872
  @property
17785
17873
  @pulumi.getter
17786
17874
  def method(self) -> Optional[pulumi.Input[str]]:
@@ -19023,6 +19111,7 @@ class PgPgUserConfigPgbouncerArgs:
19023
19111
  autodb_pool_mode: Optional[pulumi.Input[str]] = None,
19024
19112
  autodb_pool_size: Optional[pulumi.Input[int]] = None,
19025
19113
  ignore_startup_parameters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
19114
+ max_prepared_statements: Optional[pulumi.Input[int]] = None,
19026
19115
  min_pool_size: Optional[pulumi.Input[int]] = None,
19027
19116
  server_idle_timeout: Optional[pulumi.Input[int]] = None,
19028
19117
  server_lifetime: Optional[pulumi.Input[int]] = None,
@@ -19033,6 +19122,7 @@ class PgPgUserConfigPgbouncerArgs:
19033
19122
  :param pulumi.Input[str] autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
19034
19123
  :param pulumi.Input[int] autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
19035
19124
  :param pulumi.Input[Sequence[pulumi.Input[str]]] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
19125
+ :param pulumi.Input[int] max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
19036
19126
  :param pulumi.Input[int] min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. Default: `0`.
19037
19127
  :param pulumi.Input[int] server_idle_timeout: If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). Default: `600`.
19038
19128
  :param pulumi.Input[int] server_lifetime: The pooler will close an unused server connection that has been connected longer than this. (seconds). Default: `3600`.
@@ -19048,6 +19138,8 @@ class PgPgUserConfigPgbouncerArgs:
19048
19138
  pulumi.set(__self__, "autodb_pool_size", autodb_pool_size)
19049
19139
  if ignore_startup_parameters is not None:
19050
19140
  pulumi.set(__self__, "ignore_startup_parameters", ignore_startup_parameters)
19141
+ if max_prepared_statements is not None:
19142
+ pulumi.set(__self__, "max_prepared_statements", max_prepared_statements)
19051
19143
  if min_pool_size is not None:
19052
19144
  pulumi.set(__self__, "min_pool_size", min_pool_size)
19053
19145
  if server_idle_timeout is not None:
@@ -19117,6 +19209,18 @@ class PgPgUserConfigPgbouncerArgs:
19117
19209
  def ignore_startup_parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
19118
19210
  pulumi.set(self, "ignore_startup_parameters", value)
19119
19211
 
19212
+ @property
19213
+ @pulumi.getter(name="maxPreparedStatements")
19214
+ def max_prepared_statements(self) -> Optional[pulumi.Input[int]]:
19215
+ """
19216
+ PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
19217
+ """
19218
+ return pulumi.get(self, "max_prepared_statements")
19219
+
19220
+ @max_prepared_statements.setter
19221
+ def max_prepared_statements(self, value: Optional[pulumi.Input[int]]):
19222
+ pulumi.set(self, "max_prepared_statements", value)
19223
+
19120
19224
  @property
19121
19225
  @pulumi.getter(name="minPoolSize")
19122
19226
  def min_pool_size(self) -> Optional[pulumi.Input[int]]:
@@ -20172,6 +20276,7 @@ class RedisRedisUserConfigMigrationArgs:
20172
20276
  port: pulumi.Input[int],
20173
20277
  dbname: Optional[pulumi.Input[str]] = None,
20174
20278
  ignore_dbs: Optional[pulumi.Input[str]] = None,
20279
+ ignore_roles: Optional[pulumi.Input[str]] = None,
20175
20280
  method: Optional[pulumi.Input[str]] = None,
20176
20281
  password: Optional[pulumi.Input[str]] = None,
20177
20282
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -20181,6 +20286,7 @@ class RedisRedisUserConfigMigrationArgs:
20181
20286
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
20182
20287
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
20183
20288
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
20289
+ :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
20184
20290
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
20185
20291
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
20186
20292
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -20192,6 +20298,8 @@ class RedisRedisUserConfigMigrationArgs:
20192
20298
  pulumi.set(__self__, "dbname", dbname)
20193
20299
  if ignore_dbs is not None:
20194
20300
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
20301
+ if ignore_roles is not None:
20302
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
20195
20303
  if method is not None:
20196
20304
  pulumi.set(__self__, "method", method)
20197
20305
  if password is not None:
@@ -20249,6 +20357,18 @@ class RedisRedisUserConfigMigrationArgs:
20249
20357
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
20250
20358
  pulumi.set(self, "ignore_dbs", value)
20251
20359
 
20360
+ @property
20361
+ @pulumi.getter(name="ignoreRoles")
20362
+ def ignore_roles(self) -> Optional[pulumi.Input[str]]:
20363
+ """
20364
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
20365
+ """
20366
+ return pulumi.get(self, "ignore_roles")
20367
+
20368
+ @ignore_roles.setter
20369
+ def ignore_roles(self, value: Optional[pulumi.Input[str]]):
20370
+ pulumi.set(self, "ignore_roles", value)
20371
+
20252
20372
  @property
20253
20373
  @pulumi.getter
20254
20374
  def method(self) -> Optional[pulumi.Input[str]]:
@@ -20549,7 +20669,9 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20549
20669
  max_rows_per_message: Optional[pulumi.Input[int]] = None,
20550
20670
  num_consumers: Optional[pulumi.Input[int]] = None,
20551
20671
  poll_max_batch_size: Optional[pulumi.Input[int]] = None,
20552
- skip_broken_messages: Optional[pulumi.Input[int]] = None):
20672
+ poll_max_timeout_ms: Optional[pulumi.Input[int]] = None,
20673
+ skip_broken_messages: Optional[pulumi.Input[int]] = None,
20674
+ thread_per_consumer: Optional[pulumi.Input[bool]] = None):
20553
20675
  """
20554
20676
  :param pulumi.Input[Sequence[pulumi.Input['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs']]] columns: Table columns
20555
20677
  :param pulumi.Input[str] data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
@@ -20563,7 +20685,9 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20563
20685
  :param pulumi.Input[int] max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. Default: `1`.
20564
20686
  :param pulumi.Input[int] num_consumers: The number of consumers per table per replica. Default: `1`.
20565
20687
  :param pulumi.Input[int] poll_max_batch_size: Maximum amount of messages to be polled in a single Kafka poll. Default: `0`.
20688
+ :param pulumi.Input[int] poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
20566
20689
  :param pulumi.Input[int] skip_broken_messages: Skip at least this number of broken messages from Kafka topic per block. Default: `0`.
20690
+ :param pulumi.Input[bool] thread_per_consumer: Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
20567
20691
  """
20568
20692
  pulumi.set(__self__, "columns", columns)
20569
20693
  pulumi.set(__self__, "data_format", data_format)
@@ -20584,8 +20708,12 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20584
20708
  pulumi.set(__self__, "num_consumers", num_consumers)
20585
20709
  if poll_max_batch_size is not None:
20586
20710
  pulumi.set(__self__, "poll_max_batch_size", poll_max_batch_size)
20711
+ if poll_max_timeout_ms is not None:
20712
+ pulumi.set(__self__, "poll_max_timeout_ms", poll_max_timeout_ms)
20587
20713
  if skip_broken_messages is not None:
20588
20714
  pulumi.set(__self__, "skip_broken_messages", skip_broken_messages)
20715
+ if thread_per_consumer is not None:
20716
+ pulumi.set(__self__, "thread_per_consumer", thread_per_consumer)
20589
20717
 
20590
20718
  @property
20591
20719
  @pulumi.getter
@@ -20731,6 +20859,18 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20731
20859
  def poll_max_batch_size(self, value: Optional[pulumi.Input[int]]):
20732
20860
  pulumi.set(self, "poll_max_batch_size", value)
20733
20861
 
20862
+ @property
20863
+ @pulumi.getter(name="pollMaxTimeoutMs")
20864
+ def poll_max_timeout_ms(self) -> Optional[pulumi.Input[int]]:
20865
+ """
20866
+ Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
20867
+ """
20868
+ return pulumi.get(self, "poll_max_timeout_ms")
20869
+
20870
+ @poll_max_timeout_ms.setter
20871
+ def poll_max_timeout_ms(self, value: Optional[pulumi.Input[int]]):
20872
+ pulumi.set(self, "poll_max_timeout_ms", value)
20873
+
20734
20874
  @property
20735
20875
  @pulumi.getter(name="skipBrokenMessages")
20736
20876
  def skip_broken_messages(self) -> Optional[pulumi.Input[int]]:
@@ -20743,6 +20883,18 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20743
20883
  def skip_broken_messages(self, value: Optional[pulumi.Input[int]]):
20744
20884
  pulumi.set(self, "skip_broken_messages", value)
20745
20885
 
20886
+ @property
20887
+ @pulumi.getter(name="threadPerConsumer")
20888
+ def thread_per_consumer(self) -> Optional[pulumi.Input[bool]]:
20889
+ """
20890
+ Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
20891
+ """
20892
+ return pulumi.get(self, "thread_per_consumer")
20893
+
20894
+ @thread_per_consumer.setter
20895
+ def thread_per_consumer(self, value: Optional[pulumi.Input[bool]]):
20896
+ pulumi.set(self, "thread_per_consumer", value)
20897
+
20746
20898
 
20747
20899
  @pulumi.input_type
20748
20900
  class ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs:
@@ -25083,6 +25235,7 @@ class ValkeyValkeyUserConfigMigrationArgs:
25083
25235
  port: pulumi.Input[int],
25084
25236
  dbname: Optional[pulumi.Input[str]] = None,
25085
25237
  ignore_dbs: Optional[pulumi.Input[str]] = None,
25238
+ ignore_roles: Optional[pulumi.Input[str]] = None,
25086
25239
  method: Optional[pulumi.Input[str]] = None,
25087
25240
  password: Optional[pulumi.Input[str]] = None,
25088
25241
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -25092,6 +25245,7 @@ class ValkeyValkeyUserConfigMigrationArgs:
25092
25245
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
25093
25246
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
25094
25247
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
25248
+ :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
25095
25249
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
25096
25250
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
25097
25251
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -25103,6 +25257,8 @@ class ValkeyValkeyUserConfigMigrationArgs:
25103
25257
  pulumi.set(__self__, "dbname", dbname)
25104
25258
  if ignore_dbs is not None:
25105
25259
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
25260
+ if ignore_roles is not None:
25261
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
25106
25262
  if method is not None:
25107
25263
  pulumi.set(__self__, "method", method)
25108
25264
  if password is not None:
@@ -25160,6 +25316,18 @@ class ValkeyValkeyUserConfigMigrationArgs:
25160
25316
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
25161
25317
  pulumi.set(self, "ignore_dbs", value)
25162
25318
 
25319
+ @property
25320
+ @pulumi.getter(name="ignoreRoles")
25321
+ def ignore_roles(self) -> Optional[pulumi.Input[str]]:
25322
+ """
25323
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
25324
+ """
25325
+ return pulumi.get(self, "ignore_roles")
25326
+
25327
+ @ignore_roles.setter
25328
+ def ignore_roles(self, value: Optional[pulumi.Input[str]]):
25329
+ pulumi.set(self, "ignore_roles", value)
25330
+
25163
25331
  @property
25164
25332
  @pulumi.getter
25165
25333
  def method(self) -> Optional[pulumi.Input[str]]: