pulumi-aiven 6.21.0a1722057299__py3-none-any.whl → 6.21.0a1722856076__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

pulumi_aiven/outputs.py CHANGED
@@ -101,6 +101,7 @@ __all__ = [
101
101
  'KafkaConnectorTask',
102
102
  'KafkaKafka',
103
103
  'KafkaKafkaUserConfig',
104
+ 'KafkaKafkaUserConfigFollowerFetching',
104
105
  'KafkaKafkaUserConfigIpFilterObject',
105
106
  'KafkaKafkaUserConfigKafka',
106
107
  'KafkaKafkaUserConfigKafkaAuthenticationMethods',
@@ -380,6 +381,7 @@ __all__ = [
380
381
  'GetKafkaConnectorTaskResult',
381
382
  'GetKafkaKafkaResult',
382
383
  'GetKafkaKafkaUserConfigResult',
384
+ 'GetKafkaKafkaUserConfigFollowerFetchingResult',
383
385
  'GetKafkaKafkaUserConfigIpFilterObjectResult',
384
386
  'GetKafkaKafkaUserConfigKafkaResult',
385
387
  'GetKafkaKafkaUserConfigKafkaAuthenticationMethodsResult',
@@ -2520,6 +2522,8 @@ class DragonflyDragonflyUserConfigMigration(dict):
2520
2522
  suggest = None
2521
2523
  if key == "ignoreDbs":
2522
2524
  suggest = "ignore_dbs"
2525
+ elif key == "ignoreRoles":
2526
+ suggest = "ignore_roles"
2523
2527
 
2524
2528
  if suggest:
2525
2529
  pulumi.log.warn(f"Key '{key}' not found in DragonflyDragonflyUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -2537,6 +2541,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
2537
2541
  port: int,
2538
2542
  dbname: Optional[str] = None,
2539
2543
  ignore_dbs: Optional[str] = None,
2544
+ ignore_roles: Optional[str] = None,
2540
2545
  method: Optional[str] = None,
2541
2546
  password: Optional[str] = None,
2542
2547
  ssl: Optional[bool] = None,
@@ -2546,6 +2551,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
2546
2551
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
2547
2552
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
2548
2553
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
2554
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
2549
2555
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2550
2556
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
2551
2557
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -2557,6 +2563,8 @@ class DragonflyDragonflyUserConfigMigration(dict):
2557
2563
  pulumi.set(__self__, "dbname", dbname)
2558
2564
  if ignore_dbs is not None:
2559
2565
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
2566
+ if ignore_roles is not None:
2567
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
2560
2568
  if method is not None:
2561
2569
  pulumi.set(__self__, "method", method)
2562
2570
  if password is not None:
@@ -2598,6 +2606,14 @@ class DragonflyDragonflyUserConfigMigration(dict):
2598
2606
  """
2599
2607
  return pulumi.get(self, "ignore_dbs")
2600
2608
 
2609
+ @property
2610
+ @pulumi.getter(name="ignoreRoles")
2611
+ def ignore_roles(self) -> Optional[str]:
2612
+ """
2613
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
2614
+ """
2615
+ return pulumi.get(self, "ignore_roles")
2616
+
2601
2617
  @property
2602
2618
  @pulumi.getter
2603
2619
  def method(self) -> Optional[str]:
@@ -7230,6 +7246,8 @@ class KafkaKafkaUserConfig(dict):
7230
7246
  suggest = "aiven_kafka_topic_messages"
7231
7247
  elif key == "customDomain":
7232
7248
  suggest = "custom_domain"
7249
+ elif key == "followerFetching":
7250
+ suggest = "follower_fetching"
7233
7251
  elif key == "ipFilterObjects":
7234
7252
  suggest = "ip_filter_objects"
7235
7253
  elif key == "ipFilterStrings":
@@ -7286,6 +7304,7 @@ class KafkaKafkaUserConfig(dict):
7286
7304
  additional_backup_regions: Optional[str] = None,
7287
7305
  aiven_kafka_topic_messages: Optional[bool] = None,
7288
7306
  custom_domain: Optional[str] = None,
7307
+ follower_fetching: Optional['outputs.KafkaKafkaUserConfigFollowerFetching'] = None,
7289
7308
  ip_filter_objects: Optional[Sequence['outputs.KafkaKafkaUserConfigIpFilterObject']] = None,
7290
7309
  ip_filter_strings: Optional[Sequence[str]] = None,
7291
7310
  ip_filters: Optional[Sequence[str]] = None,
@@ -7311,6 +7330,7 @@ class KafkaKafkaUserConfig(dict):
7311
7330
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
7312
7331
  :param bool aiven_kafka_topic_messages: Allow access to read Kafka topic messages in the Aiven Console and REST API.
7313
7332
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
7333
+ :param 'KafkaKafkaUserConfigFollowerFetchingArgs' follower_fetching: Enable follower fetching
7314
7334
  :param Sequence['KafkaKafkaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
7315
7335
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
7316
7336
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -7338,6 +7358,8 @@ class KafkaKafkaUserConfig(dict):
7338
7358
  pulumi.set(__self__, "aiven_kafka_topic_messages", aiven_kafka_topic_messages)
7339
7359
  if custom_domain is not None:
7340
7360
  pulumi.set(__self__, "custom_domain", custom_domain)
7361
+ if follower_fetching is not None:
7362
+ pulumi.set(__self__, "follower_fetching", follower_fetching)
7341
7363
  if ip_filter_objects is not None:
7342
7364
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
7343
7365
  if ip_filter_strings is not None:
@@ -7406,6 +7428,14 @@ class KafkaKafkaUserConfig(dict):
7406
7428
  """
7407
7429
  return pulumi.get(self, "custom_domain")
7408
7430
 
7431
+ @property
7432
+ @pulumi.getter(name="followerFetching")
7433
+ def follower_fetching(self) -> Optional['outputs.KafkaKafkaUserConfigFollowerFetching']:
7434
+ """
7435
+ Enable follower fetching
7436
+ """
7437
+ return pulumi.get(self, "follower_fetching")
7438
+
7409
7439
  @property
7410
7440
  @pulumi.getter(name="ipFilterObjects")
7411
7441
  def ip_filter_objects(self) -> Optional[Sequence['outputs.KafkaKafkaUserConfigIpFilterObject']]:
@@ -7573,6 +7603,25 @@ class KafkaKafkaUserConfig(dict):
7573
7603
  return pulumi.get(self, "tiered_storage")
7574
7604
 
7575
7605
 
7606
+ @pulumi.output_type
7607
+ class KafkaKafkaUserConfigFollowerFetching(dict):
7608
+ def __init__(__self__, *,
7609
+ enabled: Optional[bool] = None):
7610
+ """
7611
+ :param bool enabled: Whether to enable the follower fetching functionality.
7612
+ """
7613
+ if enabled is not None:
7614
+ pulumi.set(__self__, "enabled", enabled)
7615
+
7616
+ @property
7617
+ @pulumi.getter
7618
+ def enabled(self) -> Optional[bool]:
7619
+ """
7620
+ Whether to enable the follower fetching functionality.
7621
+ """
7622
+ return pulumi.get(self, "enabled")
7623
+
7624
+
7576
7625
  @pulumi.output_type
7577
7626
  class KafkaKafkaUserConfigIpFilterObject(dict):
7578
7627
  def __init__(__self__, *,
@@ -7760,52 +7809,52 @@ class KafkaKafkaUserConfigKafka(dict):
7760
7809
  transaction_remove_expired_transaction_cleanup_interval_ms: Optional[int] = None,
7761
7810
  transaction_state_log_segment_bytes: Optional[int] = None):
7762
7811
  """
7763
- :param bool auto_create_topics_enable: Enable auto creation of topics.
7764
- :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
7765
- :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
7766
- :param int default_replication_factor: Replication factor for autocreated topics.
7767
- :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
7768
- :param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
7769
- :param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
7770
- :param int log_cleaner_delete_retention_ms: How long are delete records retained? Example: `86400000`.
7771
- :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
7772
- :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
7773
- :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
7774
- :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
7775
- :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
7776
- :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
7777
- :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. Example: `4096`.
7778
- :param int log_index_size_max_bytes: The maximum size in bytes of the offset index. Example: `10485760`.
7779
- :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
7780
- :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
7781
- :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7782
- :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
7783
- :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
7784
- :param bool log_preallocate: Should pre allocate file when create new segment?
7785
- :param int log_retention_bytes: The maximum size of the log before deleting messages.
7786
- :param int log_retention_hours: The number of hours to keep a log file before deleting it.
7787
- :param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
7788
- :param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
7789
- :param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
7790
- :param int log_segment_bytes: The maximum size of a single log file.
7791
- :param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
7792
- :param int max_connections_per_ip: The maximum number of connections allowed from each ip address (defaults to 2147483647).
7793
- :param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
7794
- :param int message_max_bytes: The maximum size of message that the server can receive. Example: `1048588`.
7795
- :param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
7796
- :param int num_partitions: Number of partitions for autocreated topics.
7797
- :param int offsets_retention_minutes: Log retention window in minutes for offsets topic. Example: `10080`.
7798
- :param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
7799
- :param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
7800
- :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
7801
- :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
7802
- :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
7803
- :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
7804
- :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
7805
- :param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
7806
- :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
7807
- :param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
7808
- :param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
7812
+ :param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
7813
+ :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
7814
+ :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
7815
+ :param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
7816
+ :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
7817
+ :param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
7818
+ :param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
7819
+ :param int log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
7820
+ :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
7821
+ :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
7822
+ :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
7823
+ :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
7824
+ :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
7825
+ :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
7826
+ :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
7827
+ :param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
7828
+ :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
7829
+ :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
7830
+ :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
7831
+ :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
7832
+ :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
7833
+ :param bool log_preallocate: Should pre allocate file when create new segment? (Default: false).
7834
+ :param int log_retention_bytes: The maximum size of the log before deleting messages (Default: -1).
7835
+ :param int log_retention_hours: The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
7836
+ :param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
7837
+ :param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
7838
+ :param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
7839
+ :param int log_segment_bytes: The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
7840
+ :param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
7841
+ :param int max_connections_per_ip: The maximum number of connections allowed from each ip address (Default: 2147483647).
7842
+ :param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
7843
+ :param int message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
7844
+ :param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
7845
+ :param int num_partitions: Number of partitions for auto-created topics (Default: 1).
7846
+ :param int offsets_retention_minutes: Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
7847
+ :param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
7848
+ :param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
7849
+ :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
7850
+ :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
7851
+ :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
7852
+ :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
7853
+ :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
7854
+ :param int socket_request_max_bytes: The maximum number of bytes in a socket request (Default: 104857600 bytes).
7855
+ :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false).
7856
+ :param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
7857
+ :param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
7809
7858
  """
7810
7859
  if auto_create_topics_enable is not None:
7811
7860
  pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
@@ -7904,7 +7953,7 @@ class KafkaKafkaUserConfigKafka(dict):
7904
7953
  @pulumi.getter(name="autoCreateTopicsEnable")
7905
7954
  def auto_create_topics_enable(self) -> Optional[bool]:
7906
7955
  """
7907
- Enable auto creation of topics.
7956
+ Enable auto-creation of topics. (Default: true).
7908
7957
  """
7909
7958
  return pulumi.get(self, "auto_create_topics_enable")
7910
7959
 
@@ -7912,7 +7961,7 @@ class KafkaKafkaUserConfigKafka(dict):
7912
7961
  @pulumi.getter(name="compressionType")
7913
7962
  def compression_type(self) -> Optional[str]:
7914
7963
  """
7915
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
7964
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
7916
7965
  """
7917
7966
  return pulumi.get(self, "compression_type")
7918
7967
 
@@ -7920,7 +7969,7 @@ class KafkaKafkaUserConfigKafka(dict):
7920
7969
  @pulumi.getter(name="connectionsMaxIdleMs")
7921
7970
  def connections_max_idle_ms(self) -> Optional[int]:
7922
7971
  """
7923
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
7972
+ Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
7924
7973
  """
7925
7974
  return pulumi.get(self, "connections_max_idle_ms")
7926
7975
 
@@ -7928,7 +7977,7 @@ class KafkaKafkaUserConfigKafka(dict):
7928
7977
  @pulumi.getter(name="defaultReplicationFactor")
7929
7978
  def default_replication_factor(self) -> Optional[int]:
7930
7979
  """
7931
- Replication factor for autocreated topics.
7980
+ Replication factor for auto-created topics (Default: 3).
7932
7981
  """
7933
7982
  return pulumi.get(self, "default_replication_factor")
7934
7983
 
@@ -7936,7 +7985,7 @@ class KafkaKafkaUserConfigKafka(dict):
7936
7985
  @pulumi.getter(name="groupInitialRebalanceDelayMs")
7937
7986
  def group_initial_rebalance_delay_ms(self) -> Optional[int]:
7938
7987
  """
7939
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
7988
+ The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
7940
7989
  """
7941
7990
  return pulumi.get(self, "group_initial_rebalance_delay_ms")
7942
7991
 
@@ -7944,7 +7993,7 @@ class KafkaKafkaUserConfigKafka(dict):
7944
7993
  @pulumi.getter(name="groupMaxSessionTimeoutMs")
7945
7994
  def group_max_session_timeout_ms(self) -> Optional[int]:
7946
7995
  """
7947
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
7996
+ The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
7948
7997
  """
7949
7998
  return pulumi.get(self, "group_max_session_timeout_ms")
7950
7999
 
@@ -7952,7 +8001,7 @@ class KafkaKafkaUserConfigKafka(dict):
7952
8001
  @pulumi.getter(name="groupMinSessionTimeoutMs")
7953
8002
  def group_min_session_timeout_ms(self) -> Optional[int]:
7954
8003
  """
7955
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
8004
+ The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
7956
8005
  """
7957
8006
  return pulumi.get(self, "group_min_session_timeout_ms")
7958
8007
 
@@ -7960,7 +8009,7 @@ class KafkaKafkaUserConfigKafka(dict):
7960
8009
  @pulumi.getter(name="logCleanerDeleteRetentionMs")
7961
8010
  def log_cleaner_delete_retention_ms(self) -> Optional[int]:
7962
8011
  """
7963
- How long are delete records retained? Example: `86400000`.
8012
+ How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
7964
8013
  """
7965
8014
  return pulumi.get(self, "log_cleaner_delete_retention_ms")
7966
8015
 
@@ -7968,7 +8017,7 @@ class KafkaKafkaUserConfigKafka(dict):
7968
8017
  @pulumi.getter(name="logCleanerMaxCompactionLagMs")
7969
8018
  def log_cleaner_max_compaction_lag_ms(self) -> Optional[int]:
7970
8019
  """
7971
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
8020
+ The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
7972
8021
  """
7973
8022
  return pulumi.get(self, "log_cleaner_max_compaction_lag_ms")
7974
8023
 
@@ -7976,7 +8025,7 @@ class KafkaKafkaUserConfigKafka(dict):
7976
8025
  @pulumi.getter(name="logCleanerMinCleanableRatio")
7977
8026
  def log_cleaner_min_cleanable_ratio(self) -> Optional[float]:
7978
8027
  """
7979
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
8028
+ Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
7980
8029
  """
7981
8030
  return pulumi.get(self, "log_cleaner_min_cleanable_ratio")
7982
8031
 
@@ -7984,7 +8033,7 @@ class KafkaKafkaUserConfigKafka(dict):
7984
8033
  @pulumi.getter(name="logCleanerMinCompactionLagMs")
7985
8034
  def log_cleaner_min_compaction_lag_ms(self) -> Optional[int]:
7986
8035
  """
7987
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
8036
+ The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
7988
8037
  """
7989
8038
  return pulumi.get(self, "log_cleaner_min_compaction_lag_ms")
7990
8039
 
@@ -7992,7 +8041,7 @@ class KafkaKafkaUserConfigKafka(dict):
7992
8041
  @pulumi.getter(name="logCleanupPolicy")
7993
8042
  def log_cleanup_policy(self) -> Optional[str]:
7994
8043
  """
7995
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
8044
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
7996
8045
  """
7997
8046
  return pulumi.get(self, "log_cleanup_policy")
7998
8047
 
@@ -8000,7 +8049,7 @@ class KafkaKafkaUserConfigKafka(dict):
8000
8049
  @pulumi.getter(name="logFlushIntervalMessages")
8001
8050
  def log_flush_interval_messages(self) -> Optional[int]:
8002
8051
  """
8003
- The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
8052
+ The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
8004
8053
  """
8005
8054
  return pulumi.get(self, "log_flush_interval_messages")
8006
8055
 
@@ -8008,7 +8057,7 @@ class KafkaKafkaUserConfigKafka(dict):
8008
8057
  @pulumi.getter(name="logFlushIntervalMs")
8009
8058
  def log_flush_interval_ms(self) -> Optional[int]:
8010
8059
  """
8011
- The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
8060
+ The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
8012
8061
  """
8013
8062
  return pulumi.get(self, "log_flush_interval_ms")
8014
8063
 
@@ -8016,7 +8065,7 @@ class KafkaKafkaUserConfigKafka(dict):
8016
8065
  @pulumi.getter(name="logIndexIntervalBytes")
8017
8066
  def log_index_interval_bytes(self) -> Optional[int]:
8018
8067
  """
8019
- The interval with which Kafka adds an entry to the offset index. Example: `4096`.
8068
+ The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
8020
8069
  """
8021
8070
  return pulumi.get(self, "log_index_interval_bytes")
8022
8071
 
@@ -8024,7 +8073,7 @@ class KafkaKafkaUserConfigKafka(dict):
8024
8073
  @pulumi.getter(name="logIndexSizeMaxBytes")
8025
8074
  def log_index_size_max_bytes(self) -> Optional[int]:
8026
8075
  """
8027
- The maximum size in bytes of the offset index. Example: `10485760`.
8076
+ The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
8028
8077
  """
8029
8078
  return pulumi.get(self, "log_index_size_max_bytes")
8030
8079
 
@@ -8032,7 +8081,7 @@ class KafkaKafkaUserConfigKafka(dict):
8032
8081
  @pulumi.getter(name="logLocalRetentionBytes")
8033
8082
  def log_local_retention_bytes(self) -> Optional[int]:
8034
8083
  """
8035
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
8084
+ The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
8036
8085
  """
8037
8086
  return pulumi.get(self, "log_local_retention_bytes")
8038
8087
 
@@ -8040,7 +8089,7 @@ class KafkaKafkaUserConfigKafka(dict):
8040
8089
  @pulumi.getter(name="logLocalRetentionMs")
8041
8090
  def log_local_retention_ms(self) -> Optional[int]:
8042
8091
  """
8043
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
8092
+ The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
8044
8093
  """
8045
8094
  return pulumi.get(self, "log_local_retention_ms")
8046
8095
 
@@ -8048,7 +8097,7 @@ class KafkaKafkaUserConfigKafka(dict):
8048
8097
  @pulumi.getter(name="logMessageDownconversionEnable")
8049
8098
  def log_message_downconversion_enable(self) -> Optional[bool]:
8050
8099
  """
8051
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
8100
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
8052
8101
  """
8053
8102
  return pulumi.get(self, "log_message_downconversion_enable")
8054
8103
 
@@ -8056,7 +8105,7 @@ class KafkaKafkaUserConfigKafka(dict):
8056
8105
  @pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
8057
8106
  def log_message_timestamp_difference_max_ms(self) -> Optional[int]:
8058
8107
  """
8059
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
8108
+ The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
8060
8109
  """
8061
8110
  return pulumi.get(self, "log_message_timestamp_difference_max_ms")
8062
8111
 
@@ -8064,7 +8113,7 @@ class KafkaKafkaUserConfigKafka(dict):
8064
8113
  @pulumi.getter(name="logMessageTimestampType")
8065
8114
  def log_message_timestamp_type(self) -> Optional[str]:
8066
8115
  """
8067
- Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
8116
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
8068
8117
  """
8069
8118
  return pulumi.get(self, "log_message_timestamp_type")
8070
8119
 
@@ -8072,7 +8121,7 @@ class KafkaKafkaUserConfigKafka(dict):
8072
8121
  @pulumi.getter(name="logPreallocate")
8073
8122
  def log_preallocate(self) -> Optional[bool]:
8074
8123
  """
8075
- Should pre allocate file when create new segment?
8124
+ Should pre allocate file when create new segment? (Default: false).
8076
8125
  """
8077
8126
  return pulumi.get(self, "log_preallocate")
8078
8127
 
@@ -8080,7 +8129,7 @@ class KafkaKafkaUserConfigKafka(dict):
8080
8129
  @pulumi.getter(name="logRetentionBytes")
8081
8130
  def log_retention_bytes(self) -> Optional[int]:
8082
8131
  """
8083
- The maximum size of the log before deleting messages.
8132
+ The maximum size of the log before deleting messages (Default: -1).
8084
8133
  """
8085
8134
  return pulumi.get(self, "log_retention_bytes")
8086
8135
 
@@ -8088,7 +8137,7 @@ class KafkaKafkaUserConfigKafka(dict):
8088
8137
  @pulumi.getter(name="logRetentionHours")
8089
8138
  def log_retention_hours(self) -> Optional[int]:
8090
8139
  """
8091
- The number of hours to keep a log file before deleting it.
8140
+ The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
8092
8141
  """
8093
8142
  return pulumi.get(self, "log_retention_hours")
8094
8143
 
@@ -8096,7 +8145,7 @@ class KafkaKafkaUserConfigKafka(dict):
8096
8145
  @pulumi.getter(name="logRetentionMs")
8097
8146
  def log_retention_ms(self) -> Optional[int]:
8098
8147
  """
8099
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
8148
+ The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
8100
8149
  """
8101
8150
  return pulumi.get(self, "log_retention_ms")
8102
8151
 
@@ -8104,7 +8153,7 @@ class KafkaKafkaUserConfigKafka(dict):
8104
8153
  @pulumi.getter(name="logRollJitterMs")
8105
8154
  def log_roll_jitter_ms(self) -> Optional[int]:
8106
8155
  """
8107
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
8156
+ The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
8108
8157
  """
8109
8158
  return pulumi.get(self, "log_roll_jitter_ms")
8110
8159
 
@@ -8112,7 +8161,7 @@ class KafkaKafkaUserConfigKafka(dict):
8112
8161
  @pulumi.getter(name="logRollMs")
8113
8162
  def log_roll_ms(self) -> Optional[int]:
8114
8163
  """
8115
- The maximum time before a new log segment is rolled out (in milliseconds).
8164
+ The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
8116
8165
  """
8117
8166
  return pulumi.get(self, "log_roll_ms")
8118
8167
 
@@ -8120,7 +8169,7 @@ class KafkaKafkaUserConfigKafka(dict):
8120
8169
  @pulumi.getter(name="logSegmentBytes")
8121
8170
  def log_segment_bytes(self) -> Optional[int]:
8122
8171
  """
8123
- The maximum size of a single log file.
8172
+ The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
8124
8173
  """
8125
8174
  return pulumi.get(self, "log_segment_bytes")
8126
8175
 
@@ -8128,7 +8177,7 @@ class KafkaKafkaUserConfigKafka(dict):
8128
8177
  @pulumi.getter(name="logSegmentDeleteDelayMs")
8129
8178
  def log_segment_delete_delay_ms(self) -> Optional[int]:
8130
8179
  """
8131
- The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
8180
+ The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
8132
8181
  """
8133
8182
  return pulumi.get(self, "log_segment_delete_delay_ms")
8134
8183
 
@@ -8136,7 +8185,7 @@ class KafkaKafkaUserConfigKafka(dict):
8136
8185
  @pulumi.getter(name="maxConnectionsPerIp")
8137
8186
  def max_connections_per_ip(self) -> Optional[int]:
8138
8187
  """
8139
- The maximum number of connections allowed from each ip address (defaults to 2147483647).
8188
+ The maximum number of connections allowed from each ip address (Default: 2147483647).
8140
8189
  """
8141
8190
  return pulumi.get(self, "max_connections_per_ip")
8142
8191
 
@@ -8144,7 +8193,7 @@ class KafkaKafkaUserConfigKafka(dict):
8144
8193
  @pulumi.getter(name="maxIncrementalFetchSessionCacheSlots")
8145
8194
  def max_incremental_fetch_session_cache_slots(self) -> Optional[int]:
8146
8195
  """
8147
- The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
8196
+ The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
8148
8197
  """
8149
8198
  return pulumi.get(self, "max_incremental_fetch_session_cache_slots")
8150
8199
 
@@ -8152,7 +8201,7 @@ class KafkaKafkaUserConfigKafka(dict):
8152
8201
  @pulumi.getter(name="messageMaxBytes")
8153
8202
  def message_max_bytes(self) -> Optional[int]:
8154
8203
  """
8155
- The maximum size of message that the server can receive. Example: `1048588`.
8204
+ The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
8156
8205
  """
8157
8206
  return pulumi.get(self, "message_max_bytes")
8158
8207
 
@@ -8160,7 +8209,7 @@ class KafkaKafkaUserConfigKafka(dict):
8160
8209
  @pulumi.getter(name="minInsyncReplicas")
8161
8210
  def min_insync_replicas(self) -> Optional[int]:
8162
8211
  """
8163
- When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
8212
+ When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
8164
8213
  """
8165
8214
  return pulumi.get(self, "min_insync_replicas")
8166
8215
 
@@ -8168,7 +8217,7 @@ class KafkaKafkaUserConfigKafka(dict):
8168
8217
  @pulumi.getter(name="numPartitions")
8169
8218
  def num_partitions(self) -> Optional[int]:
8170
8219
  """
8171
- Number of partitions for autocreated topics.
8220
+ Number of partitions for auto-created topics (Default: 1).
8172
8221
  """
8173
8222
  return pulumi.get(self, "num_partitions")
8174
8223
 
@@ -8176,7 +8225,7 @@ class KafkaKafkaUserConfigKafka(dict):
8176
8225
  @pulumi.getter(name="offsetsRetentionMinutes")
8177
8226
  def offsets_retention_minutes(self) -> Optional[int]:
8178
8227
  """
8179
- Log retention window in minutes for offsets topic. Example: `10080`.
8228
+ Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
8180
8229
  """
8181
8230
  return pulumi.get(self, "offsets_retention_minutes")
8182
8231
 
@@ -8184,7 +8233,7 @@ class KafkaKafkaUserConfigKafka(dict):
8184
8233
  @pulumi.getter(name="producerPurgatoryPurgeIntervalRequests")
8185
8234
  def producer_purgatory_purge_interval_requests(self) -> Optional[int]:
8186
8235
  """
8187
- The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
8236
+ The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
8188
8237
  """
8189
8238
  return pulumi.get(self, "producer_purgatory_purge_interval_requests")
8190
8239
 
@@ -8192,7 +8241,7 @@ class KafkaKafkaUserConfigKafka(dict):
8192
8241
  @pulumi.getter(name="replicaFetchMaxBytes")
8193
8242
  def replica_fetch_max_bytes(self) -> Optional[int]:
8194
8243
  """
8195
- The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
8244
+ The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
8196
8245
  """
8197
8246
  return pulumi.get(self, "replica_fetch_max_bytes")
8198
8247
 
@@ -8200,7 +8249,7 @@ class KafkaKafkaUserConfigKafka(dict):
8200
8249
  @pulumi.getter(name="replicaFetchResponseMaxBytes")
8201
8250
  def replica_fetch_response_max_bytes(self) -> Optional[int]:
8202
8251
  """
8203
- Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
8252
+ Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
8204
8253
  """
8205
8254
  return pulumi.get(self, "replica_fetch_response_max_bytes")
8206
8255
 
@@ -8208,7 +8257,7 @@ class KafkaKafkaUserConfigKafka(dict):
8208
8257
  @pulumi.getter(name="saslOauthbearerExpectedAudience")
8209
8258
  def sasl_oauthbearer_expected_audience(self) -> Optional[str]:
8210
8259
  """
8211
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
8260
+ The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
8212
8261
  """
8213
8262
  return pulumi.get(self, "sasl_oauthbearer_expected_audience")
8214
8263
 
@@ -8216,7 +8265,7 @@ class KafkaKafkaUserConfigKafka(dict):
8216
8265
  @pulumi.getter(name="saslOauthbearerExpectedIssuer")
8217
8266
  def sasl_oauthbearer_expected_issuer(self) -> Optional[str]:
8218
8267
  """
8219
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
8268
+ Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
8220
8269
  """
8221
8270
  return pulumi.get(self, "sasl_oauthbearer_expected_issuer")
8222
8271
 
@@ -8224,7 +8273,7 @@ class KafkaKafkaUserConfigKafka(dict):
8224
8273
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
8225
8274
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
8226
8275
  """
8227
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
8276
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
8228
8277
  """
8229
8278
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
8230
8279
 
@@ -8232,7 +8281,7 @@ class KafkaKafkaUserConfigKafka(dict):
8232
8281
  @pulumi.getter(name="saslOauthbearerSubClaimName")
8233
8282
  def sasl_oauthbearer_sub_claim_name(self) -> Optional[str]:
8234
8283
  """
8235
- Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
8284
+ Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
8236
8285
  """
8237
8286
  return pulumi.get(self, "sasl_oauthbearer_sub_claim_name")
8238
8287
 
@@ -8240,7 +8289,7 @@ class KafkaKafkaUserConfigKafka(dict):
8240
8289
  @pulumi.getter(name="socketRequestMaxBytes")
8241
8290
  def socket_request_max_bytes(self) -> Optional[int]:
8242
8291
  """
8243
- The maximum number of bytes in a socket request (defaults to 104857600).
8292
+ The maximum number of bytes in a socket request (Default: 104857600 bytes).
8244
8293
  """
8245
8294
  return pulumi.get(self, "socket_request_max_bytes")
8246
8295
 
@@ -8248,7 +8297,7 @@ class KafkaKafkaUserConfigKafka(dict):
8248
8297
  @pulumi.getter(name="transactionPartitionVerificationEnable")
8249
8298
  def transaction_partition_verification_enable(self) -> Optional[bool]:
8250
8299
  """
8251
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
8300
+ Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false).
8252
8301
  """
8253
8302
  return pulumi.get(self, "transaction_partition_verification_enable")
8254
8303
 
@@ -8256,7 +8305,7 @@ class KafkaKafkaUserConfigKafka(dict):
8256
8305
  @pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
8257
8306
  def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[int]:
8258
8307
  """
8259
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
8308
+ The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
8260
8309
  """
8261
8310
  return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
8262
8311
 
@@ -8264,7 +8313,7 @@ class KafkaKafkaUserConfigKafka(dict):
8264
8313
  @pulumi.getter(name="transactionStateLogSegmentBytes")
8265
8314
  def transaction_state_log_segment_bytes(self) -> Optional[int]:
8266
8315
  """
8267
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
8316
+ The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
8268
8317
  """
8269
8318
  return pulumi.get(self, "transaction_state_log_segment_bytes")
8270
8319
 
@@ -12714,6 +12763,8 @@ class MySqlMysqlUserConfigMigration(dict):
12714
12763
  suggest = None
12715
12764
  if key == "ignoreDbs":
12716
12765
  suggest = "ignore_dbs"
12766
+ elif key == "ignoreRoles":
12767
+ suggest = "ignore_roles"
12717
12768
 
12718
12769
  if suggest:
12719
12770
  pulumi.log.warn(f"Key '{key}' not found in MySqlMysqlUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -12731,6 +12782,7 @@ class MySqlMysqlUserConfigMigration(dict):
12731
12782
  port: int,
12732
12783
  dbname: Optional[str] = None,
12733
12784
  ignore_dbs: Optional[str] = None,
12785
+ ignore_roles: Optional[str] = None,
12734
12786
  method: Optional[str] = None,
12735
12787
  password: Optional[str] = None,
12736
12788
  ssl: Optional[bool] = None,
@@ -12740,6 +12792,7 @@ class MySqlMysqlUserConfigMigration(dict):
12740
12792
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
12741
12793
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
12742
12794
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
12795
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
12743
12796
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
12744
12797
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
12745
12798
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -12751,6 +12804,8 @@ class MySqlMysqlUserConfigMigration(dict):
12751
12804
  pulumi.set(__self__, "dbname", dbname)
12752
12805
  if ignore_dbs is not None:
12753
12806
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
12807
+ if ignore_roles is not None:
12808
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
12754
12809
  if method is not None:
12755
12810
  pulumi.set(__self__, "method", method)
12756
12811
  if password is not None:
@@ -12792,6 +12847,14 @@ class MySqlMysqlUserConfigMigration(dict):
12792
12847
  """
12793
12848
  return pulumi.get(self, "ignore_dbs")
12794
12849
 
12850
+ @property
12851
+ @pulumi.getter(name="ignoreRoles")
12852
+ def ignore_roles(self) -> Optional[str]:
12853
+ """
12854
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
12855
+ """
12856
+ return pulumi.get(self, "ignore_roles")
12857
+
12795
12858
  @property
12796
12859
  @pulumi.getter
12797
12860
  def method(self) -> Optional[str]:
@@ -16812,6 +16875,8 @@ class PgPgUserConfigMigration(dict):
16812
16875
  suggest = None
16813
16876
  if key == "ignoreDbs":
16814
16877
  suggest = "ignore_dbs"
16878
+ elif key == "ignoreRoles":
16879
+ suggest = "ignore_roles"
16815
16880
 
16816
16881
  if suggest:
16817
16882
  pulumi.log.warn(f"Key '{key}' not found in PgPgUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -16829,6 +16894,7 @@ class PgPgUserConfigMigration(dict):
16829
16894
  port: int,
16830
16895
  dbname: Optional[str] = None,
16831
16896
  ignore_dbs: Optional[str] = None,
16897
+ ignore_roles: Optional[str] = None,
16832
16898
  method: Optional[str] = None,
16833
16899
  password: Optional[str] = None,
16834
16900
  ssl: Optional[bool] = None,
@@ -16838,6 +16904,7 @@ class PgPgUserConfigMigration(dict):
16838
16904
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
16839
16905
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
16840
16906
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
16907
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
16841
16908
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
16842
16909
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
16843
16910
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -16849,6 +16916,8 @@ class PgPgUserConfigMigration(dict):
16849
16916
  pulumi.set(__self__, "dbname", dbname)
16850
16917
  if ignore_dbs is not None:
16851
16918
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
16919
+ if ignore_roles is not None:
16920
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
16852
16921
  if method is not None:
16853
16922
  pulumi.set(__self__, "method", method)
16854
16923
  if password is not None:
@@ -16890,6 +16959,14 @@ class PgPgUserConfigMigration(dict):
16890
16959
  """
16891
16960
  return pulumi.get(self, "ignore_dbs")
16892
16961
 
16962
+ @property
16963
+ @pulumi.getter(name="ignoreRoles")
16964
+ def ignore_roles(self) -> Optional[str]:
16965
+ """
16966
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
16967
+ """
16968
+ return pulumi.get(self, "ignore_roles")
16969
+
16893
16970
  @property
16894
16971
  @pulumi.getter
16895
16972
  def method(self) -> Optional[str]:
@@ -17965,6 +18042,8 @@ class PgPgUserConfigPgbouncer(dict):
17965
18042
  suggest = "autodb_pool_size"
17966
18043
  elif key == "ignoreStartupParameters":
17967
18044
  suggest = "ignore_startup_parameters"
18045
+ elif key == "maxPreparedStatements":
18046
+ suggest = "max_prepared_statements"
17968
18047
  elif key == "minPoolSize":
17969
18048
  suggest = "min_pool_size"
17970
18049
  elif key == "serverIdleTimeout":
@@ -17991,6 +18070,7 @@ class PgPgUserConfigPgbouncer(dict):
17991
18070
  autodb_pool_mode: Optional[str] = None,
17992
18071
  autodb_pool_size: Optional[int] = None,
17993
18072
  ignore_startup_parameters: Optional[Sequence[str]] = None,
18073
+ max_prepared_statements: Optional[int] = None,
17994
18074
  min_pool_size: Optional[int] = None,
17995
18075
  server_idle_timeout: Optional[int] = None,
17996
18076
  server_lifetime: Optional[int] = None,
@@ -18001,6 +18081,7 @@ class PgPgUserConfigPgbouncer(dict):
18001
18081
  :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
18002
18082
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
18003
18083
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
18084
+ :param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
18004
18085
  :param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. Default: `0`.
18005
18086
  :param int server_idle_timeout: If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). Default: `600`.
18006
18087
  :param int server_lifetime: The pooler will close an unused server connection that has been connected longer than this. (seconds). Default: `3600`.
@@ -18016,6 +18097,8 @@ class PgPgUserConfigPgbouncer(dict):
18016
18097
  pulumi.set(__self__, "autodb_pool_size", autodb_pool_size)
18017
18098
  if ignore_startup_parameters is not None:
18018
18099
  pulumi.set(__self__, "ignore_startup_parameters", ignore_startup_parameters)
18100
+ if max_prepared_statements is not None:
18101
+ pulumi.set(__self__, "max_prepared_statements", max_prepared_statements)
18019
18102
  if min_pool_size is not None:
18020
18103
  pulumi.set(__self__, "min_pool_size", min_pool_size)
18021
18104
  if server_idle_timeout is not None:
@@ -18065,6 +18148,14 @@ class PgPgUserConfigPgbouncer(dict):
18065
18148
  """
18066
18149
  return pulumi.get(self, "ignore_startup_parameters")
18067
18150
 
18151
+ @property
18152
+ @pulumi.getter(name="maxPreparedStatements")
18153
+ def max_prepared_statements(self) -> Optional[int]:
18154
+ """
18155
+ PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
18156
+ """
18157
+ return pulumi.get(self, "max_prepared_statements")
18158
+
18068
18159
  @property
18069
18160
  @pulumi.getter(name="minPoolSize")
18070
18161
  def min_pool_size(self) -> Optional[int]:
@@ -19027,6 +19118,8 @@ class RedisRedisUserConfigMigration(dict):
19027
19118
  suggest = None
19028
19119
  if key == "ignoreDbs":
19029
19120
  suggest = "ignore_dbs"
19121
+ elif key == "ignoreRoles":
19122
+ suggest = "ignore_roles"
19030
19123
 
19031
19124
  if suggest:
19032
19125
  pulumi.log.warn(f"Key '{key}' not found in RedisRedisUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -19044,6 +19137,7 @@ class RedisRedisUserConfigMigration(dict):
19044
19137
  port: int,
19045
19138
  dbname: Optional[str] = None,
19046
19139
  ignore_dbs: Optional[str] = None,
19140
+ ignore_roles: Optional[str] = None,
19047
19141
  method: Optional[str] = None,
19048
19142
  password: Optional[str] = None,
19049
19143
  ssl: Optional[bool] = None,
@@ -19053,6 +19147,7 @@ class RedisRedisUserConfigMigration(dict):
19053
19147
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
19054
19148
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
19055
19149
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
19150
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
19056
19151
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
19057
19152
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
19058
19153
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -19064,6 +19159,8 @@ class RedisRedisUserConfigMigration(dict):
19064
19159
  pulumi.set(__self__, "dbname", dbname)
19065
19160
  if ignore_dbs is not None:
19066
19161
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
19162
+ if ignore_roles is not None:
19163
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
19067
19164
  if method is not None:
19068
19165
  pulumi.set(__self__, "method", method)
19069
19166
  if password is not None:
@@ -19105,6 +19202,14 @@ class RedisRedisUserConfigMigration(dict):
19105
19202
  """
19106
19203
  return pulumi.get(self, "ignore_dbs")
19107
19204
 
19205
+ @property
19206
+ @pulumi.getter(name="ignoreRoles")
19207
+ def ignore_roles(self) -> Optional[str]:
19208
+ """
19209
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
19210
+ """
19211
+ return pulumi.get(self, "ignore_roles")
19212
+
19108
19213
  @property
19109
19214
  @pulumi.getter
19110
19215
  def method(self) -> Optional[str]:
@@ -19368,8 +19473,12 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19368
19473
  suggest = "num_consumers"
19369
19474
  elif key == "pollMaxBatchSize":
19370
19475
  suggest = "poll_max_batch_size"
19476
+ elif key == "pollMaxTimeoutMs":
19477
+ suggest = "poll_max_timeout_ms"
19371
19478
  elif key == "skipBrokenMessages":
19372
19479
  suggest = "skip_broken_messages"
19480
+ elif key == "threadPerConsumer":
19481
+ suggest = "thread_per_consumer"
19373
19482
 
19374
19483
  if suggest:
19375
19484
  pulumi.log.warn(f"Key '{key}' not found in ServiceIntegrationClickhouseKafkaUserConfigTable. Access the value via the '{suggest}' property getter instead.")
@@ -19395,7 +19504,9 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19395
19504
  max_rows_per_message: Optional[int] = None,
19396
19505
  num_consumers: Optional[int] = None,
19397
19506
  poll_max_batch_size: Optional[int] = None,
19398
- skip_broken_messages: Optional[int] = None):
19507
+ poll_max_timeout_ms: Optional[int] = None,
19508
+ skip_broken_messages: Optional[int] = None,
19509
+ thread_per_consumer: Optional[bool] = None):
19399
19510
  """
19400
19511
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
19401
19512
  :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
@@ -19409,7 +19520,9 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19409
19520
  :param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. Default: `1`.
19410
19521
  :param int num_consumers: The number of consumers per table per replica. Default: `1`.
19411
19522
  :param int poll_max_batch_size: Maximum amount of messages to be polled in a single Kafka poll. Default: `0`.
19523
+ :param int poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
19412
19524
  :param int skip_broken_messages: Skip at least this number of broken messages from Kafka topic per block. Default: `0`.
19525
+ :param bool thread_per_consumer: Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
19413
19526
  """
19414
19527
  pulumi.set(__self__, "columns", columns)
19415
19528
  pulumi.set(__self__, "data_format", data_format)
@@ -19430,8 +19543,12 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19430
19543
  pulumi.set(__self__, "num_consumers", num_consumers)
19431
19544
  if poll_max_batch_size is not None:
19432
19545
  pulumi.set(__self__, "poll_max_batch_size", poll_max_batch_size)
19546
+ if poll_max_timeout_ms is not None:
19547
+ pulumi.set(__self__, "poll_max_timeout_ms", poll_max_timeout_ms)
19433
19548
  if skip_broken_messages is not None:
19434
19549
  pulumi.set(__self__, "skip_broken_messages", skip_broken_messages)
19550
+ if thread_per_consumer is not None:
19551
+ pulumi.set(__self__, "thread_per_consumer", thread_per_consumer)
19435
19552
 
19436
19553
  @property
19437
19554
  @pulumi.getter
@@ -19529,6 +19646,14 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19529
19646
  """
19530
19647
  return pulumi.get(self, "poll_max_batch_size")
19531
19648
 
19649
+ @property
19650
+ @pulumi.getter(name="pollMaxTimeoutMs")
19651
+ def poll_max_timeout_ms(self) -> Optional[int]:
19652
+ """
19653
+ Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
19654
+ """
19655
+ return pulumi.get(self, "poll_max_timeout_ms")
19656
+
19532
19657
  @property
19533
19658
  @pulumi.getter(name="skipBrokenMessages")
19534
19659
  def skip_broken_messages(self) -> Optional[int]:
@@ -19537,6 +19662,14 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19537
19662
  """
19538
19663
  return pulumi.get(self, "skip_broken_messages")
19539
19664
 
19665
+ @property
19666
+ @pulumi.getter(name="threadPerConsumer")
19667
+ def thread_per_consumer(self) -> Optional[bool]:
19668
+ """
19669
+ Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
19670
+ """
19671
+ return pulumi.get(self, "thread_per_consumer")
19672
+
19540
19673
 
19541
19674
  @pulumi.output_type
19542
19675
  class ServiceIntegrationClickhouseKafkaUserConfigTableColumn(dict):
@@ -23861,6 +23994,8 @@ class ValkeyValkeyUserConfigMigration(dict):
23861
23994
  suggest = None
23862
23995
  if key == "ignoreDbs":
23863
23996
  suggest = "ignore_dbs"
23997
+ elif key == "ignoreRoles":
23998
+ suggest = "ignore_roles"
23864
23999
 
23865
24000
  if suggest:
23866
24001
  pulumi.log.warn(f"Key '{key}' not found in ValkeyValkeyUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -23878,6 +24013,7 @@ class ValkeyValkeyUserConfigMigration(dict):
23878
24013
  port: int,
23879
24014
  dbname: Optional[str] = None,
23880
24015
  ignore_dbs: Optional[str] = None,
24016
+ ignore_roles: Optional[str] = None,
23881
24017
  method: Optional[str] = None,
23882
24018
  password: Optional[str] = None,
23883
24019
  ssl: Optional[bool] = None,
@@ -23887,6 +24023,7 @@ class ValkeyValkeyUserConfigMigration(dict):
23887
24023
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
23888
24024
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
23889
24025
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
24026
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
23890
24027
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
23891
24028
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
23892
24029
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -23898,6 +24035,8 @@ class ValkeyValkeyUserConfigMigration(dict):
23898
24035
  pulumi.set(__self__, "dbname", dbname)
23899
24036
  if ignore_dbs is not None:
23900
24037
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
24038
+ if ignore_roles is not None:
24039
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
23901
24040
  if method is not None:
23902
24041
  pulumi.set(__self__, "method", method)
23903
24042
  if password is not None:
@@ -23939,6 +24078,14 @@ class ValkeyValkeyUserConfigMigration(dict):
23939
24078
  """
23940
24079
  return pulumi.get(self, "ignore_dbs")
23941
24080
 
24081
+ @property
24082
+ @pulumi.getter(name="ignoreRoles")
24083
+ def ignore_roles(self) -> Optional[str]:
24084
+ """
24085
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
24086
+ """
24087
+ return pulumi.get(self, "ignore_roles")
24088
+
23942
24089
  @property
23943
24090
  @pulumi.getter
23944
24091
  def method(self) -> Optional[str]:
@@ -26078,6 +26225,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
26078
26225
  port: int,
26079
26226
  dbname: Optional[str] = None,
26080
26227
  ignore_dbs: Optional[str] = None,
26228
+ ignore_roles: Optional[str] = None,
26081
26229
  method: Optional[str] = None,
26082
26230
  password: Optional[str] = None,
26083
26231
  ssl: Optional[bool] = None,
@@ -26087,6 +26235,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
26087
26235
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
26088
26236
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
26089
26237
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
26238
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
26090
26239
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
26091
26240
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
26092
26241
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -26098,6 +26247,8 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
26098
26247
  pulumi.set(__self__, "dbname", dbname)
26099
26248
  if ignore_dbs is not None:
26100
26249
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
26250
+ if ignore_roles is not None:
26251
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
26101
26252
  if method is not None:
26102
26253
  pulumi.set(__self__, "method", method)
26103
26254
  if password is not None:
@@ -26139,6 +26290,14 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
26139
26290
  """
26140
26291
  return pulumi.get(self, "ignore_dbs")
26141
26292
 
26293
+ @property
26294
+ @pulumi.getter(name="ignoreRoles")
26295
+ def ignore_roles(self) -> Optional[str]:
26296
+ """
26297
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
26298
+ """
26299
+ return pulumi.get(self, "ignore_roles")
26300
+
26142
26301
  @property
26143
26302
  @pulumi.getter
26144
26303
  def method(self) -> Optional[str]:
@@ -29830,6 +29989,7 @@ class GetKafkaKafkaUserConfigResult(dict):
29830
29989
  additional_backup_regions: Optional[str] = None,
29831
29990
  aiven_kafka_topic_messages: Optional[bool] = None,
29832
29991
  custom_domain: Optional[str] = None,
29992
+ follower_fetching: Optional['outputs.GetKafkaKafkaUserConfigFollowerFetchingResult'] = None,
29833
29993
  ip_filter_objects: Optional[Sequence['outputs.GetKafkaKafkaUserConfigIpFilterObjectResult']] = None,
29834
29994
  ip_filter_strings: Optional[Sequence[str]] = None,
29835
29995
  ip_filters: Optional[Sequence[str]] = None,
@@ -29855,6 +30015,7 @@ class GetKafkaKafkaUserConfigResult(dict):
29855
30015
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
29856
30016
  :param bool aiven_kafka_topic_messages: Allow access to read Kafka topic messages in the Aiven Console and REST API.
29857
30017
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
30018
+ :param 'GetKafkaKafkaUserConfigFollowerFetchingArgs' follower_fetching: Enable follower fetching
29858
30019
  :param Sequence['GetKafkaKafkaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
29859
30020
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
29860
30021
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -29882,6 +30043,8 @@ class GetKafkaKafkaUserConfigResult(dict):
29882
30043
  pulumi.set(__self__, "aiven_kafka_topic_messages", aiven_kafka_topic_messages)
29883
30044
  if custom_domain is not None:
29884
30045
  pulumi.set(__self__, "custom_domain", custom_domain)
30046
+ if follower_fetching is not None:
30047
+ pulumi.set(__self__, "follower_fetching", follower_fetching)
29885
30048
  if ip_filter_objects is not None:
29886
30049
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
29887
30050
  if ip_filter_strings is not None:
@@ -29950,6 +30113,14 @@ class GetKafkaKafkaUserConfigResult(dict):
29950
30113
  """
29951
30114
  return pulumi.get(self, "custom_domain")
29952
30115
 
30116
+ @property
30117
+ @pulumi.getter(name="followerFetching")
30118
+ def follower_fetching(self) -> Optional['outputs.GetKafkaKafkaUserConfigFollowerFetchingResult']:
30119
+ """
30120
+ Enable follower fetching
30121
+ """
30122
+ return pulumi.get(self, "follower_fetching")
30123
+
29953
30124
  @property
29954
30125
  @pulumi.getter(name="ipFilterObjects")
29955
30126
  def ip_filter_objects(self) -> Optional[Sequence['outputs.GetKafkaKafkaUserConfigIpFilterObjectResult']]:
@@ -30117,6 +30288,25 @@ class GetKafkaKafkaUserConfigResult(dict):
30117
30288
  return pulumi.get(self, "tiered_storage")
30118
30289
 
30119
30290
 
30291
+ @pulumi.output_type
30292
+ class GetKafkaKafkaUserConfigFollowerFetchingResult(dict):
30293
+ def __init__(__self__, *,
30294
+ enabled: Optional[bool] = None):
30295
+ """
30296
+ :param bool enabled: Whether to enable the follower fetching functionality.
30297
+ """
30298
+ if enabled is not None:
30299
+ pulumi.set(__self__, "enabled", enabled)
30300
+
30301
+ @property
30302
+ @pulumi.getter
30303
+ def enabled(self) -> Optional[bool]:
30304
+ """
30305
+ Whether to enable the follower fetching functionality.
30306
+ """
30307
+ return pulumi.get(self, "enabled")
30308
+
30309
+
30120
30310
  @pulumi.output_type
30121
30311
  class GetKafkaKafkaUserConfigIpFilterObjectResult(dict):
30122
30312
  def __init__(__self__, *,
@@ -30197,52 +30387,52 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30197
30387
  transaction_remove_expired_transaction_cleanup_interval_ms: Optional[int] = None,
30198
30388
  transaction_state_log_segment_bytes: Optional[int] = None):
30199
30389
  """
30200
- :param bool auto_create_topics_enable: Enable auto creation of topics.
30201
- :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
30202
- :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
30203
- :param int default_replication_factor: Replication factor for autocreated topics.
30204
- :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
30205
- :param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
30206
- :param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
30207
- :param int log_cleaner_delete_retention_ms: How long are delete records retained? Example: `86400000`.
30208
- :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
30209
- :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
30210
- :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
30211
- :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
30212
- :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
30213
- :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
30214
- :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. Example: `4096`.
30215
- :param int log_index_size_max_bytes: The maximum size in bytes of the offset index. Example: `10485760`.
30216
- :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
30217
- :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
30218
- :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
30219
- :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
30220
- :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
30221
- :param bool log_preallocate: Should pre allocate file when create new segment?
30222
- :param int log_retention_bytes: The maximum size of the log before deleting messages.
30223
- :param int log_retention_hours: The number of hours to keep a log file before deleting it.
30224
- :param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
30225
- :param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
30226
- :param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
30227
- :param int log_segment_bytes: The maximum size of a single log file.
30228
- :param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
30229
- :param int max_connections_per_ip: The maximum number of connections allowed from each ip address (defaults to 2147483647).
30230
- :param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
30231
- :param int message_max_bytes: The maximum size of message that the server can receive. Example: `1048588`.
30232
- :param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
30233
- :param int num_partitions: Number of partitions for autocreated topics.
30234
- :param int offsets_retention_minutes: Log retention window in minutes for offsets topic. Example: `10080`.
30235
- :param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
30236
- :param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
30237
- :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
30238
- :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
30239
- :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
30240
- :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
30241
- :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
30242
- :param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
30243
- :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
30244
- :param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
30245
- :param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
30390
+ :param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
30391
+ :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
30392
+ :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
30393
+ :param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
30394
+ :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
30395
+ :param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
30396
+ :param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
30397
+ :param int log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
30398
+ :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
30399
+ :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
30400
+ :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
30401
+ :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
30402
+ :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
30403
+ :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
30404
+ :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
30405
+ :param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
30406
+ :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
30407
+ :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
30408
+ :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
30409
+ :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
30410
+ :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
30411
+ :param bool log_preallocate: Should pre allocate file when create new segment? (Default: false).
30412
+ :param int log_retention_bytes: The maximum size of the log before deleting messages (Default: -1).
30413
+ :param int log_retention_hours: The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
30414
+ :param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
30415
+ :param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
30416
+ :param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
30417
+ :param int log_segment_bytes: The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
30418
+ :param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
30419
+ :param int max_connections_per_ip: The maximum number of connections allowed from each ip address (Default: 2147483647).
30420
+ :param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
30421
+ :param int message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
30422
+ :param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
30423
+ :param int num_partitions: Number of partitions for auto-created topics (Default: 1).
30424
+ :param int offsets_retention_minutes: Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
30425
+ :param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
30426
+ :param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
30427
+ :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
30428
+ :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
30429
+ :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
30430
+ :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
30431
+ :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
30432
+ :param int socket_request_max_bytes: The maximum number of bytes in a socket request (Default: 104857600 bytes).
30433
+ :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false).
30434
+ :param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
30435
+ :param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
30246
30436
  """
30247
30437
  if auto_create_topics_enable is not None:
30248
30438
  pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
@@ -30341,7 +30531,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30341
30531
  @pulumi.getter(name="autoCreateTopicsEnable")
30342
30532
  def auto_create_topics_enable(self) -> Optional[bool]:
30343
30533
  """
30344
- Enable auto creation of topics.
30534
+ Enable auto-creation of topics. (Default: true).
30345
30535
  """
30346
30536
  return pulumi.get(self, "auto_create_topics_enable")
30347
30537
 
@@ -30349,7 +30539,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30349
30539
  @pulumi.getter(name="compressionType")
30350
30540
  def compression_type(self) -> Optional[str]:
30351
30541
  """
30352
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
30542
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
30353
30543
  """
30354
30544
  return pulumi.get(self, "compression_type")
30355
30545
 
@@ -30357,7 +30547,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30357
30547
  @pulumi.getter(name="connectionsMaxIdleMs")
30358
30548
  def connections_max_idle_ms(self) -> Optional[int]:
30359
30549
  """
30360
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
30550
+ Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
30361
30551
  """
30362
30552
  return pulumi.get(self, "connections_max_idle_ms")
30363
30553
 
@@ -30365,7 +30555,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30365
30555
  @pulumi.getter(name="defaultReplicationFactor")
30366
30556
  def default_replication_factor(self) -> Optional[int]:
30367
30557
  """
30368
- Replication factor for autocreated topics.
30558
+ Replication factor for auto-created topics (Default: 3).
30369
30559
  """
30370
30560
  return pulumi.get(self, "default_replication_factor")
30371
30561
 
@@ -30373,7 +30563,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30373
30563
  @pulumi.getter(name="groupInitialRebalanceDelayMs")
30374
30564
  def group_initial_rebalance_delay_ms(self) -> Optional[int]:
30375
30565
  """
30376
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
30566
+ The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
30377
30567
  """
30378
30568
  return pulumi.get(self, "group_initial_rebalance_delay_ms")
30379
30569
 
@@ -30381,7 +30571,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30381
30571
  @pulumi.getter(name="groupMaxSessionTimeoutMs")
30382
30572
  def group_max_session_timeout_ms(self) -> Optional[int]:
30383
30573
  """
30384
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
30574
+ The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
30385
30575
  """
30386
30576
  return pulumi.get(self, "group_max_session_timeout_ms")
30387
30577
 
@@ -30389,7 +30579,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30389
30579
  @pulumi.getter(name="groupMinSessionTimeoutMs")
30390
30580
  def group_min_session_timeout_ms(self) -> Optional[int]:
30391
30581
  """
30392
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
30582
+ The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
30393
30583
  """
30394
30584
  return pulumi.get(self, "group_min_session_timeout_ms")
30395
30585
 
@@ -30397,7 +30587,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30397
30587
  @pulumi.getter(name="logCleanerDeleteRetentionMs")
30398
30588
  def log_cleaner_delete_retention_ms(self) -> Optional[int]:
30399
30589
  """
30400
- How long are delete records retained? Example: `86400000`.
30590
+ How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
30401
30591
  """
30402
30592
  return pulumi.get(self, "log_cleaner_delete_retention_ms")
30403
30593
 
@@ -30405,7 +30595,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30405
30595
  @pulumi.getter(name="logCleanerMaxCompactionLagMs")
30406
30596
  def log_cleaner_max_compaction_lag_ms(self) -> Optional[int]:
30407
30597
  """
30408
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
30598
+ The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
30409
30599
  """
30410
30600
  return pulumi.get(self, "log_cleaner_max_compaction_lag_ms")
30411
30601
 
@@ -30413,7 +30603,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30413
30603
  @pulumi.getter(name="logCleanerMinCleanableRatio")
30414
30604
  def log_cleaner_min_cleanable_ratio(self) -> Optional[float]:
30415
30605
  """
30416
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
30606
+ Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
30417
30607
  """
30418
30608
  return pulumi.get(self, "log_cleaner_min_cleanable_ratio")
30419
30609
 
@@ -30421,7 +30611,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30421
30611
  @pulumi.getter(name="logCleanerMinCompactionLagMs")
30422
30612
  def log_cleaner_min_compaction_lag_ms(self) -> Optional[int]:
30423
30613
  """
30424
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
30614
+ The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
30425
30615
  """
30426
30616
  return pulumi.get(self, "log_cleaner_min_compaction_lag_ms")
30427
30617
 
@@ -30429,7 +30619,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30429
30619
  @pulumi.getter(name="logCleanupPolicy")
30430
30620
  def log_cleanup_policy(self) -> Optional[str]:
30431
30621
  """
30432
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
30622
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
30433
30623
  """
30434
30624
  return pulumi.get(self, "log_cleanup_policy")
30435
30625
 
@@ -30437,7 +30627,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30437
30627
  @pulumi.getter(name="logFlushIntervalMessages")
30438
30628
  def log_flush_interval_messages(self) -> Optional[int]:
30439
30629
  """
30440
- The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
30630
+ The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
30441
30631
  """
30442
30632
  return pulumi.get(self, "log_flush_interval_messages")
30443
30633
 
@@ -30445,7 +30635,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30445
30635
  @pulumi.getter(name="logFlushIntervalMs")
30446
30636
  def log_flush_interval_ms(self) -> Optional[int]:
30447
30637
  """
30448
- The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
30638
+ The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
30449
30639
  """
30450
30640
  return pulumi.get(self, "log_flush_interval_ms")
30451
30641
 
@@ -30453,7 +30643,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30453
30643
  @pulumi.getter(name="logIndexIntervalBytes")
30454
30644
  def log_index_interval_bytes(self) -> Optional[int]:
30455
30645
  """
30456
- The interval with which Kafka adds an entry to the offset index. Example: `4096`.
30646
+ The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
30457
30647
  """
30458
30648
  return pulumi.get(self, "log_index_interval_bytes")
30459
30649
 
@@ -30461,7 +30651,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30461
30651
  @pulumi.getter(name="logIndexSizeMaxBytes")
30462
30652
  def log_index_size_max_bytes(self) -> Optional[int]:
30463
30653
  """
30464
- The maximum size in bytes of the offset index. Example: `10485760`.
30654
+ The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
30465
30655
  """
30466
30656
  return pulumi.get(self, "log_index_size_max_bytes")
30467
30657
 
@@ -30469,7 +30659,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30469
30659
  @pulumi.getter(name="logLocalRetentionBytes")
30470
30660
  def log_local_retention_bytes(self) -> Optional[int]:
30471
30661
  """
30472
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
30662
+ The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
30473
30663
  """
30474
30664
  return pulumi.get(self, "log_local_retention_bytes")
30475
30665
 
@@ -30477,7 +30667,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30477
30667
  @pulumi.getter(name="logLocalRetentionMs")
30478
30668
  def log_local_retention_ms(self) -> Optional[int]:
30479
30669
  """
30480
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
30670
+ The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
30481
30671
  """
30482
30672
  return pulumi.get(self, "log_local_retention_ms")
30483
30673
 
@@ -30485,7 +30675,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30485
30675
  @pulumi.getter(name="logMessageDownconversionEnable")
30486
30676
  def log_message_downconversion_enable(self) -> Optional[bool]:
30487
30677
  """
30488
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
30678
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
30489
30679
  """
30490
30680
  return pulumi.get(self, "log_message_downconversion_enable")
30491
30681
 
@@ -30493,7 +30683,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30493
30683
  @pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
30494
30684
  def log_message_timestamp_difference_max_ms(self) -> Optional[int]:
30495
30685
  """
30496
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
30686
+ The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
30497
30687
  """
30498
30688
  return pulumi.get(self, "log_message_timestamp_difference_max_ms")
30499
30689
 
@@ -30501,7 +30691,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30501
30691
  @pulumi.getter(name="logMessageTimestampType")
30502
30692
  def log_message_timestamp_type(self) -> Optional[str]:
30503
30693
  """
30504
- Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
30694
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
30505
30695
  """
30506
30696
  return pulumi.get(self, "log_message_timestamp_type")
30507
30697
 
@@ -30509,7 +30699,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30509
30699
  @pulumi.getter(name="logPreallocate")
30510
30700
  def log_preallocate(self) -> Optional[bool]:
30511
30701
  """
30512
- Should pre allocate file when create new segment?
30702
+ Should pre allocate file when create new segment? (Default: false).
30513
30703
  """
30514
30704
  return pulumi.get(self, "log_preallocate")
30515
30705
 
@@ -30517,7 +30707,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30517
30707
  @pulumi.getter(name="logRetentionBytes")
30518
30708
  def log_retention_bytes(self) -> Optional[int]:
30519
30709
  """
30520
- The maximum size of the log before deleting messages.
30710
+ The maximum size of the log before deleting messages (Default: -1).
30521
30711
  """
30522
30712
  return pulumi.get(self, "log_retention_bytes")
30523
30713
 
@@ -30525,7 +30715,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30525
30715
  @pulumi.getter(name="logRetentionHours")
30526
30716
  def log_retention_hours(self) -> Optional[int]:
30527
30717
  """
30528
- The number of hours to keep a log file before deleting it.
30718
+ The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
30529
30719
  """
30530
30720
  return pulumi.get(self, "log_retention_hours")
30531
30721
 
@@ -30533,7 +30723,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30533
30723
  @pulumi.getter(name="logRetentionMs")
30534
30724
  def log_retention_ms(self) -> Optional[int]:
30535
30725
  """
30536
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
30726
+ The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
30537
30727
  """
30538
30728
  return pulumi.get(self, "log_retention_ms")
30539
30729
 
@@ -30541,7 +30731,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30541
30731
  @pulumi.getter(name="logRollJitterMs")
30542
30732
  def log_roll_jitter_ms(self) -> Optional[int]:
30543
30733
  """
30544
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
30734
+ The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
30545
30735
  """
30546
30736
  return pulumi.get(self, "log_roll_jitter_ms")
30547
30737
 
@@ -30549,7 +30739,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30549
30739
  @pulumi.getter(name="logRollMs")
30550
30740
  def log_roll_ms(self) -> Optional[int]:
30551
30741
  """
30552
- The maximum time before a new log segment is rolled out (in milliseconds).
30742
+ The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
30553
30743
  """
30554
30744
  return pulumi.get(self, "log_roll_ms")
30555
30745
 
@@ -30557,7 +30747,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30557
30747
  @pulumi.getter(name="logSegmentBytes")
30558
30748
  def log_segment_bytes(self) -> Optional[int]:
30559
30749
  """
30560
- The maximum size of a single log file.
30750
+ The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
30561
30751
  """
30562
30752
  return pulumi.get(self, "log_segment_bytes")
30563
30753
 
@@ -30565,7 +30755,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30565
30755
  @pulumi.getter(name="logSegmentDeleteDelayMs")
30566
30756
  def log_segment_delete_delay_ms(self) -> Optional[int]:
30567
30757
  """
30568
- The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
30758
+ The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
30569
30759
  """
30570
30760
  return pulumi.get(self, "log_segment_delete_delay_ms")
30571
30761
 
@@ -30573,7 +30763,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30573
30763
  @pulumi.getter(name="maxConnectionsPerIp")
30574
30764
  def max_connections_per_ip(self) -> Optional[int]:
30575
30765
  """
30576
- The maximum number of connections allowed from each ip address (defaults to 2147483647).
30766
+ The maximum number of connections allowed from each ip address (Default: 2147483647).
30577
30767
  """
30578
30768
  return pulumi.get(self, "max_connections_per_ip")
30579
30769
 
@@ -30581,7 +30771,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30581
30771
  @pulumi.getter(name="maxIncrementalFetchSessionCacheSlots")
30582
30772
  def max_incremental_fetch_session_cache_slots(self) -> Optional[int]:
30583
30773
  """
30584
- The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
30774
+ The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
30585
30775
  """
30586
30776
  return pulumi.get(self, "max_incremental_fetch_session_cache_slots")
30587
30777
 
@@ -30589,7 +30779,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30589
30779
  @pulumi.getter(name="messageMaxBytes")
30590
30780
  def message_max_bytes(self) -> Optional[int]:
30591
30781
  """
30592
- The maximum size of message that the server can receive. Example: `1048588`.
30782
+ The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
30593
30783
  """
30594
30784
  return pulumi.get(self, "message_max_bytes")
30595
30785
 
@@ -30597,7 +30787,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30597
30787
  @pulumi.getter(name="minInsyncReplicas")
30598
30788
  def min_insync_replicas(self) -> Optional[int]:
30599
30789
  """
30600
- When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
30790
+ When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
30601
30791
  """
30602
30792
  return pulumi.get(self, "min_insync_replicas")
30603
30793
 
@@ -30605,7 +30795,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30605
30795
  @pulumi.getter(name="numPartitions")
30606
30796
  def num_partitions(self) -> Optional[int]:
30607
30797
  """
30608
- Number of partitions for autocreated topics.
30798
+ Number of partitions for auto-created topics (Default: 1).
30609
30799
  """
30610
30800
  return pulumi.get(self, "num_partitions")
30611
30801
 
@@ -30613,7 +30803,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30613
30803
  @pulumi.getter(name="offsetsRetentionMinutes")
30614
30804
  def offsets_retention_minutes(self) -> Optional[int]:
30615
30805
  """
30616
- Log retention window in minutes for offsets topic. Example: `10080`.
30806
+ Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
30617
30807
  """
30618
30808
  return pulumi.get(self, "offsets_retention_minutes")
30619
30809
 
@@ -30621,7 +30811,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30621
30811
  @pulumi.getter(name="producerPurgatoryPurgeIntervalRequests")
30622
30812
  def producer_purgatory_purge_interval_requests(self) -> Optional[int]:
30623
30813
  """
30624
- The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
30814
+ The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
30625
30815
  """
30626
30816
  return pulumi.get(self, "producer_purgatory_purge_interval_requests")
30627
30817
 
@@ -30629,7 +30819,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30629
30819
  @pulumi.getter(name="replicaFetchMaxBytes")
30630
30820
  def replica_fetch_max_bytes(self) -> Optional[int]:
30631
30821
  """
30632
- The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
30822
+ The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
30633
30823
  """
30634
30824
  return pulumi.get(self, "replica_fetch_max_bytes")
30635
30825
 
@@ -30637,7 +30827,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30637
30827
  @pulumi.getter(name="replicaFetchResponseMaxBytes")
30638
30828
  def replica_fetch_response_max_bytes(self) -> Optional[int]:
30639
30829
  """
30640
- Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
30830
+ Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
30641
30831
  """
30642
30832
  return pulumi.get(self, "replica_fetch_response_max_bytes")
30643
30833
 
@@ -30645,7 +30835,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30645
30835
  @pulumi.getter(name="saslOauthbearerExpectedAudience")
30646
30836
  def sasl_oauthbearer_expected_audience(self) -> Optional[str]:
30647
30837
  """
30648
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
30838
+ The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
30649
30839
  """
30650
30840
  return pulumi.get(self, "sasl_oauthbearer_expected_audience")
30651
30841
 
@@ -30653,7 +30843,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30653
30843
  @pulumi.getter(name="saslOauthbearerExpectedIssuer")
30654
30844
  def sasl_oauthbearer_expected_issuer(self) -> Optional[str]:
30655
30845
  """
30656
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
30846
+ Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
30657
30847
  """
30658
30848
  return pulumi.get(self, "sasl_oauthbearer_expected_issuer")
30659
30849
 
@@ -30661,7 +30851,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30661
30851
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
30662
30852
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
30663
30853
  """
30664
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
30854
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
30665
30855
  """
30666
30856
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
30667
30857
 
@@ -30669,7 +30859,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30669
30859
  @pulumi.getter(name="saslOauthbearerSubClaimName")
30670
30860
  def sasl_oauthbearer_sub_claim_name(self) -> Optional[str]:
30671
30861
  """
30672
- Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
30862
+ Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
30673
30863
  """
30674
30864
  return pulumi.get(self, "sasl_oauthbearer_sub_claim_name")
30675
30865
 
@@ -30677,7 +30867,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30677
30867
  @pulumi.getter(name="socketRequestMaxBytes")
30678
30868
  def socket_request_max_bytes(self) -> Optional[int]:
30679
30869
  """
30680
- The maximum number of bytes in a socket request (defaults to 104857600).
30870
+ The maximum number of bytes in a socket request (Default: 104857600 bytes).
30681
30871
  """
30682
30872
  return pulumi.get(self, "socket_request_max_bytes")
30683
30873
 
@@ -30685,7 +30875,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30685
30875
  @pulumi.getter(name="transactionPartitionVerificationEnable")
30686
30876
  def transaction_partition_verification_enable(self) -> Optional[bool]:
30687
30877
  """
30688
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
30878
+ Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false).
30689
30879
  """
30690
30880
  return pulumi.get(self, "transaction_partition_verification_enable")
30691
30881
 
@@ -30693,7 +30883,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30693
30883
  @pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
30694
30884
  def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[int]:
30695
30885
  """
30696
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
30886
+ The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
30697
30887
  """
30698
30888
  return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
30699
30889
 
@@ -30701,7 +30891,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30701
30891
  @pulumi.getter(name="transactionStateLogSegmentBytes")
30702
30892
  def transaction_state_log_segment_bytes(self) -> Optional[int]:
30703
30893
  """
30704
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
30894
+ The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
30705
30895
  """
30706
30896
  return pulumi.get(self, "transaction_state_log_segment_bytes")
30707
30897
 
@@ -34259,6 +34449,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
34259
34449
  port: int,
34260
34450
  dbname: Optional[str] = None,
34261
34451
  ignore_dbs: Optional[str] = None,
34452
+ ignore_roles: Optional[str] = None,
34262
34453
  method: Optional[str] = None,
34263
34454
  password: Optional[str] = None,
34264
34455
  ssl: Optional[bool] = None,
@@ -34268,6 +34459,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
34268
34459
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
34269
34460
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
34270
34461
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
34462
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
34271
34463
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
34272
34464
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
34273
34465
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -34279,6 +34471,8 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
34279
34471
  pulumi.set(__self__, "dbname", dbname)
34280
34472
  if ignore_dbs is not None:
34281
34473
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
34474
+ if ignore_roles is not None:
34475
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
34282
34476
  if method is not None:
34283
34477
  pulumi.set(__self__, "method", method)
34284
34478
  if password is not None:
@@ -34320,6 +34514,14 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
34320
34514
  """
34321
34515
  return pulumi.get(self, "ignore_dbs")
34322
34516
 
34517
+ @property
34518
+ @pulumi.getter(name="ignoreRoles")
34519
+ def ignore_roles(self) -> Optional[str]:
34520
+ """
34521
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
34522
+ """
34523
+ return pulumi.get(self, "ignore_roles")
34524
+
34323
34525
  @property
34324
34526
  @pulumi.getter
34325
34527
  def method(self) -> Optional[str]:
@@ -37460,6 +37662,7 @@ class GetPgPgUserConfigMigrationResult(dict):
37460
37662
  port: int,
37461
37663
  dbname: Optional[str] = None,
37462
37664
  ignore_dbs: Optional[str] = None,
37665
+ ignore_roles: Optional[str] = None,
37463
37666
  method: Optional[str] = None,
37464
37667
  password: Optional[str] = None,
37465
37668
  ssl: Optional[bool] = None,
@@ -37469,6 +37672,7 @@ class GetPgPgUserConfigMigrationResult(dict):
37469
37672
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
37470
37673
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
37471
37674
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
37675
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
37472
37676
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
37473
37677
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
37474
37678
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -37480,6 +37684,8 @@ class GetPgPgUserConfigMigrationResult(dict):
37480
37684
  pulumi.set(__self__, "dbname", dbname)
37481
37685
  if ignore_dbs is not None:
37482
37686
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
37687
+ if ignore_roles is not None:
37688
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
37483
37689
  if method is not None:
37484
37690
  pulumi.set(__self__, "method", method)
37485
37691
  if password is not None:
@@ -37521,6 +37727,14 @@ class GetPgPgUserConfigMigrationResult(dict):
37521
37727
  """
37522
37728
  return pulumi.get(self, "ignore_dbs")
37523
37729
 
37730
+ @property
37731
+ @pulumi.getter(name="ignoreRoles")
37732
+ def ignore_roles(self) -> Optional[str]:
37733
+ """
37734
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
37735
+ """
37736
+ return pulumi.get(self, "ignore_roles")
37737
+
37524
37738
  @property
37525
37739
  @pulumi.getter
37526
37740
  def method(self) -> Optional[str]:
@@ -38418,6 +38632,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
38418
38632
  autodb_pool_mode: Optional[str] = None,
38419
38633
  autodb_pool_size: Optional[int] = None,
38420
38634
  ignore_startup_parameters: Optional[Sequence[str]] = None,
38635
+ max_prepared_statements: Optional[int] = None,
38421
38636
  min_pool_size: Optional[int] = None,
38422
38637
  server_idle_timeout: Optional[int] = None,
38423
38638
  server_lifetime: Optional[int] = None,
@@ -38428,6 +38643,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
38428
38643
  :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
38429
38644
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
38430
38645
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
38646
+ :param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`.
38431
38647
  :param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. Default: `0`.
38432
38648
  :param int server_idle_timeout: If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). Default: `600`.
38433
38649
  :param int server_lifetime: The pooler will close an unused server connection that has been connected longer than this. (seconds). Default: `3600`.
@@ -38443,6 +38659,8 @@ class GetPgPgUserConfigPgbouncerResult(dict):
38443
38659
  pulumi.set(__self__, "autodb_pool_size", autodb_pool_size)
38444
38660
  if ignore_startup_parameters is not None:
38445
38661
  pulumi.set(__self__, "ignore_startup_parameters", ignore_startup_parameters)
38662
+ if max_prepared_statements is not None:
38663
+ pulumi.set(__self__, "max_prepared_statements", max_prepared_statements)
38446
38664
  if min_pool_size is not None:
38447
38665
  pulumi.set(__self__, "min_pool_size", min_pool_size)
38448
38666
  if server_idle_timeout is not None:
@@ -38492,6 +38710,14 @@ class GetPgPgUserConfigPgbouncerResult(dict):
38492
38710
  """
38493
38711
  return pulumi.get(self, "ignore_startup_parameters")
38494
38712
 
38713
+ @property
38714
+ @pulumi.getter(name="maxPreparedStatements")
38715
+ def max_prepared_statements(self) -> Optional[int]:
38716
+ """
38717
+ PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`.
38718
+ """
38719
+ return pulumi.get(self, "max_prepared_statements")
38720
+
38495
38721
  @property
38496
38722
  @pulumi.getter(name="minPoolSize")
38497
38723
  def min_pool_size(self) -> Optional[int]:
@@ -39288,6 +39514,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
39288
39514
  port: int,
39289
39515
  dbname: Optional[str] = None,
39290
39516
  ignore_dbs: Optional[str] = None,
39517
+ ignore_roles: Optional[str] = None,
39291
39518
  method: Optional[str] = None,
39292
39519
  password: Optional[str] = None,
39293
39520
  ssl: Optional[bool] = None,
@@ -39297,6 +39524,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
39297
39524
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
39298
39525
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
39299
39526
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
39527
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
39300
39528
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
39301
39529
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
39302
39530
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -39308,6 +39536,8 @@ class GetRedisRedisUserConfigMigrationResult(dict):
39308
39536
  pulumi.set(__self__, "dbname", dbname)
39309
39537
  if ignore_dbs is not None:
39310
39538
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
39539
+ if ignore_roles is not None:
39540
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
39311
39541
  if method is not None:
39312
39542
  pulumi.set(__self__, "method", method)
39313
39543
  if password is not None:
@@ -39349,6 +39579,14 @@ class GetRedisRedisUserConfigMigrationResult(dict):
39349
39579
  """
39350
39580
  return pulumi.get(self, "ignore_dbs")
39351
39581
 
39582
+ @property
39583
+ @pulumi.getter(name="ignoreRoles")
39584
+ def ignore_roles(self) -> Optional[str]:
39585
+ """
39586
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
39587
+ """
39588
+ return pulumi.get(self, "ignore_roles")
39589
+
39352
39590
  @property
39353
39591
  @pulumi.getter
39354
39592
  def method(self) -> Optional[str]:
@@ -39585,7 +39823,9 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39585
39823
  max_rows_per_message: Optional[int] = None,
39586
39824
  num_consumers: Optional[int] = None,
39587
39825
  poll_max_batch_size: Optional[int] = None,
39588
- skip_broken_messages: Optional[int] = None):
39826
+ poll_max_timeout_ms: Optional[int] = None,
39827
+ skip_broken_messages: Optional[int] = None,
39828
+ thread_per_consumer: Optional[bool] = None):
39589
39829
  """
39590
39830
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
39591
39831
  :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
@@ -39599,7 +39839,9 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39599
39839
  :param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. Default: `1`.
39600
39840
  :param int num_consumers: The number of consumers per table per replica. Default: `1`.
39601
39841
  :param int poll_max_batch_size: Maximum amount of messages to be polled in a single Kafka poll. Default: `0`.
39842
+ :param int poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: `0`.
39602
39843
  :param int skip_broken_messages: Skip at least this number of broken messages from Kafka topic per block. Default: `0`.
39844
+ :param bool thread_per_consumer: Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
39603
39845
  """
39604
39846
  pulumi.set(__self__, "columns", columns)
39605
39847
  pulumi.set(__self__, "data_format", data_format)
@@ -39620,8 +39862,12 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39620
39862
  pulumi.set(__self__, "num_consumers", num_consumers)
39621
39863
  if poll_max_batch_size is not None:
39622
39864
  pulumi.set(__self__, "poll_max_batch_size", poll_max_batch_size)
39865
+ if poll_max_timeout_ms is not None:
39866
+ pulumi.set(__self__, "poll_max_timeout_ms", poll_max_timeout_ms)
39623
39867
  if skip_broken_messages is not None:
39624
39868
  pulumi.set(__self__, "skip_broken_messages", skip_broken_messages)
39869
+ if thread_per_consumer is not None:
39870
+ pulumi.set(__self__, "thread_per_consumer", thread_per_consumer)
39625
39871
 
39626
39872
  @property
39627
39873
  @pulumi.getter
@@ -39719,6 +39965,14 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39719
39965
  """
39720
39966
  return pulumi.get(self, "poll_max_batch_size")
39721
39967
 
39968
+ @property
39969
+ @pulumi.getter(name="pollMaxTimeoutMs")
39970
+ def poll_max_timeout_ms(self) -> Optional[int]:
39971
+ """
39972
+ Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: `0`.
39973
+ """
39974
+ return pulumi.get(self, "poll_max_timeout_ms")
39975
+
39722
39976
  @property
39723
39977
  @pulumi.getter(name="skipBrokenMessages")
39724
39978
  def skip_broken_messages(self) -> Optional[int]:
@@ -39727,6 +39981,14 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39727
39981
  """
39728
39982
  return pulumi.get(self, "skip_broken_messages")
39729
39983
 
39984
+ @property
39985
+ @pulumi.getter(name="threadPerConsumer")
39986
+ def thread_per_consumer(self) -> Optional[bool]:
39987
+ """
39988
+ Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
39989
+ """
39990
+ return pulumi.get(self, "thread_per_consumer")
39991
+
39730
39992
 
39731
39993
  @pulumi.output_type
39732
39994
  class GetServiceIntegrationClickhouseKafkaUserConfigTableColumnResult(dict):
@@ -43059,6 +43321,7 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
43059
43321
  port: int,
43060
43322
  dbname: Optional[str] = None,
43061
43323
  ignore_dbs: Optional[str] = None,
43324
+ ignore_roles: Optional[str] = None,
43062
43325
  method: Optional[str] = None,
43063
43326
  password: Optional[str] = None,
43064
43327
  ssl: Optional[bool] = None,
@@ -43068,6 +43331,7 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
43068
43331
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
43069
43332
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
43070
43333
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
43334
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
43071
43335
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
43072
43336
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
43073
43337
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -43079,6 +43343,8 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
43079
43343
  pulumi.set(__self__, "dbname", dbname)
43080
43344
  if ignore_dbs is not None:
43081
43345
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
43346
+ if ignore_roles is not None:
43347
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
43082
43348
  if method is not None:
43083
43349
  pulumi.set(__self__, "method", method)
43084
43350
  if password is not None:
@@ -43120,6 +43386,14 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
43120
43386
  """
43121
43387
  return pulumi.get(self, "ignore_dbs")
43122
43388
 
43389
+ @property
43390
+ @pulumi.getter(name="ignoreRoles")
43391
+ def ignore_roles(self) -> Optional[str]:
43392
+ """
43393
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
43394
+ """
43395
+ return pulumi.get(self, "ignore_roles")
43396
+
43123
43397
  @property
43124
43398
  @pulumi.getter
43125
43399
  def method(self) -> Optional[str]: