pulumi-aiven 6.21.0__py3-none-any.whl → 6.21.0a1721884436__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

pulumi_aiven/_inputs.py CHANGED
@@ -100,7 +100,6 @@ __all__ = [
100
100
  'KafkaConnectorTaskArgs',
101
101
  'KafkaKafkaArgs',
102
102
  'KafkaKafkaUserConfigArgs',
103
- 'KafkaKafkaUserConfigFollowerFetchingArgs',
104
103
  'KafkaKafkaUserConfigIpFilterObjectArgs',
105
104
  'KafkaKafkaUserConfigKafkaArgs',
106
105
  'KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs',
@@ -2374,7 +2373,6 @@ class DragonflyDragonflyUserConfigMigrationArgs:
2374
2373
  port: pulumi.Input[int],
2375
2374
  dbname: Optional[pulumi.Input[str]] = None,
2376
2375
  ignore_dbs: Optional[pulumi.Input[str]] = None,
2377
- ignore_roles: Optional[pulumi.Input[str]] = None,
2378
2376
  method: Optional[pulumi.Input[str]] = None,
2379
2377
  password: Optional[pulumi.Input[str]] = None,
2380
2378
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -2384,7 +2382,6 @@ class DragonflyDragonflyUserConfigMigrationArgs:
2384
2382
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
2385
2383
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
2386
2384
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
2387
- :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
2388
2385
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2389
2386
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
2390
2387
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -2396,8 +2393,6 @@ class DragonflyDragonflyUserConfigMigrationArgs:
2396
2393
  pulumi.set(__self__, "dbname", dbname)
2397
2394
  if ignore_dbs is not None:
2398
2395
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
2399
- if ignore_roles is not None:
2400
- pulumi.set(__self__, "ignore_roles", ignore_roles)
2401
2396
  if method is not None:
2402
2397
  pulumi.set(__self__, "method", method)
2403
2398
  if password is not None:
@@ -2455,18 +2450,6 @@ class DragonflyDragonflyUserConfigMigrationArgs:
2455
2450
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
2456
2451
  pulumi.set(self, "ignore_dbs", value)
2457
2452
 
2458
- @property
2459
- @pulumi.getter(name="ignoreRoles")
2460
- def ignore_roles(self) -> Optional[pulumi.Input[str]]:
2461
- """
2462
- Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
2463
- """
2464
- return pulumi.get(self, "ignore_roles")
2465
-
2466
- @ignore_roles.setter
2467
- def ignore_roles(self, value: Optional[pulumi.Input[str]]):
2468
- pulumi.set(self, "ignore_roles", value)
2469
-
2470
2453
  @property
2471
2454
  @pulumi.getter
2472
2455
  def method(self) -> Optional[pulumi.Input[str]]:
@@ -7361,7 +7344,6 @@ class KafkaKafkaUserConfigArgs:
7361
7344
  additional_backup_regions: Optional[pulumi.Input[str]] = None,
7362
7345
  aiven_kafka_topic_messages: Optional[pulumi.Input[bool]] = None,
7363
7346
  custom_domain: Optional[pulumi.Input[str]] = None,
7364
- follower_fetching: Optional[pulumi.Input['KafkaKafkaUserConfigFollowerFetchingArgs']] = None,
7365
7347
  ip_filter_objects: Optional[pulumi.Input[Sequence[pulumi.Input['KafkaKafkaUserConfigIpFilterObjectArgs']]]] = None,
7366
7348
  ip_filter_strings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
7367
7349
  ip_filters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
@@ -7387,7 +7369,6 @@ class KafkaKafkaUserConfigArgs:
7387
7369
  :param pulumi.Input[str] additional_backup_regions: Additional Cloud Regions for Backup Replication.
7388
7370
  :param pulumi.Input[bool] aiven_kafka_topic_messages: Allow access to read Kafka topic messages in the Aiven Console and REST API.
7389
7371
  :param pulumi.Input[str] custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
7390
- :param pulumi.Input['KafkaKafkaUserConfigFollowerFetchingArgs'] follower_fetching: Enable follower fetching
7391
7372
  :param pulumi.Input[Sequence[pulumi.Input['KafkaKafkaUserConfigIpFilterObjectArgs']]] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
7392
7373
  :param pulumi.Input[Sequence[pulumi.Input[str]]] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
7393
7374
  :param pulumi.Input[Sequence[pulumi.Input[str]]] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -7418,8 +7399,6 @@ class KafkaKafkaUserConfigArgs:
7418
7399
  pulumi.set(__self__, "aiven_kafka_topic_messages", aiven_kafka_topic_messages)
7419
7400
  if custom_domain is not None:
7420
7401
  pulumi.set(__self__, "custom_domain", custom_domain)
7421
- if follower_fetching is not None:
7422
- pulumi.set(__self__, "follower_fetching", follower_fetching)
7423
7402
  if ip_filter_objects is not None:
7424
7403
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
7425
7404
  if ip_filter_strings is not None:
@@ -7503,18 +7482,6 @@ class KafkaKafkaUserConfigArgs:
7503
7482
  def custom_domain(self, value: Optional[pulumi.Input[str]]):
7504
7483
  pulumi.set(self, "custom_domain", value)
7505
7484
 
7506
- @property
7507
- @pulumi.getter(name="followerFetching")
7508
- def follower_fetching(self) -> Optional[pulumi.Input['KafkaKafkaUserConfigFollowerFetchingArgs']]:
7509
- """
7510
- Enable follower fetching
7511
- """
7512
- return pulumi.get(self, "follower_fetching")
7513
-
7514
- @follower_fetching.setter
7515
- def follower_fetching(self, value: Optional[pulumi.Input['KafkaKafkaUserConfigFollowerFetchingArgs']]):
7516
- pulumi.set(self, "follower_fetching", value)
7517
-
7518
7485
  @property
7519
7486
  @pulumi.getter(name="ipFilterObjects")
7520
7487
  def ip_filter_objects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KafkaKafkaUserConfigIpFilterObjectArgs']]]]:
@@ -7766,29 +7733,6 @@ class KafkaKafkaUserConfigArgs:
7766
7733
  pulumi.set(self, "tiered_storage", value)
7767
7734
 
7768
7735
 
7769
- @pulumi.input_type
7770
- class KafkaKafkaUserConfigFollowerFetchingArgs:
7771
- def __init__(__self__, *,
7772
- enabled: Optional[pulumi.Input[bool]] = None):
7773
- """
7774
- :param pulumi.Input[bool] enabled: Whether to enable the follower fetching functionality.
7775
- """
7776
- if enabled is not None:
7777
- pulumi.set(__self__, "enabled", enabled)
7778
-
7779
- @property
7780
- @pulumi.getter
7781
- def enabled(self) -> Optional[pulumi.Input[bool]]:
7782
- """
7783
- Whether to enable the follower fetching functionality.
7784
- """
7785
- return pulumi.get(self, "enabled")
7786
-
7787
- @enabled.setter
7788
- def enabled(self, value: Optional[pulumi.Input[bool]]):
7789
- pulumi.set(self, "enabled", value)
7790
-
7791
-
7792
7736
  @pulumi.input_type
7793
7737
  class KafkaKafkaUserConfigIpFilterObjectArgs:
7794
7738
  def __init__(__self__, *,
@@ -7877,52 +7821,52 @@ class KafkaKafkaUserConfigKafkaArgs:
7877
7821
  transaction_remove_expired_transaction_cleanup_interval_ms: Optional[pulumi.Input[int]] = None,
7878
7822
  transaction_state_log_segment_bytes: Optional[pulumi.Input[int]] = None):
7879
7823
  """
7880
- :param pulumi.Input[bool] auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
7881
- :param pulumi.Input[str] compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
7882
- :param pulumi.Input[int] connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
7883
- :param pulumi.Input[int] default_replication_factor: Replication factor for auto-created topics (Default: 3).
7884
- :param pulumi.Input[int] group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
7885
- :param pulumi.Input[int] group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
7886
- :param pulumi.Input[int] group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
7887
- :param pulumi.Input[int] log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
7888
- :param pulumi.Input[int] log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
7889
- :param pulumi.Input[float] log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
7890
- :param pulumi.Input[int] log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
7891
- :param pulumi.Input[str] log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
7892
- :param pulumi.Input[int] log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
7893
- :param pulumi.Input[int] log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
7894
- :param pulumi.Input[int] log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
7895
- :param pulumi.Input[int] log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
7896
- :param pulumi.Input[int] log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
7897
- :param pulumi.Input[int] log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
7898
- :param pulumi.Input[bool] log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
7899
- :param pulumi.Input[int] log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
7900
- :param pulumi.Input[str] log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
7901
- :param pulumi.Input[bool] log_preallocate: Should pre allocate file when create new segment? (Default: false).
7902
- :param pulumi.Input[int] log_retention_bytes: The maximum size of the log before deleting messages (Default: -1).
7903
- :param pulumi.Input[int] log_retention_hours: The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
7904
- :param pulumi.Input[int] log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
7905
- :param pulumi.Input[int] log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
7906
- :param pulumi.Input[int] log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
7907
- :param pulumi.Input[int] log_segment_bytes: The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
7908
- :param pulumi.Input[int] log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
7909
- :param pulumi.Input[int] max_connections_per_ip: The maximum number of connections allowed from each ip address (Default: 2147483647).
7910
- :param pulumi.Input[int] max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
7911
- :param pulumi.Input[int] message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
7912
- :param pulumi.Input[int] min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
7913
- :param pulumi.Input[int] num_partitions: Number of partitions for auto-created topics (Default: 1).
7914
- :param pulumi.Input[int] offsets_retention_minutes: Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
7915
- :param pulumi.Input[int] producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
7916
- :param pulumi.Input[int] replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
7917
- :param pulumi.Input[int] replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
7918
- :param pulumi.Input[str] sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
7919
- :param pulumi.Input[str] sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
7920
- :param pulumi.Input[str] sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
7921
- :param pulumi.Input[str] sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
7922
- :param pulumi.Input[int] socket_request_max_bytes: The maximum number of bytes in a socket request (Default: 104857600 bytes).
7923
- :param pulumi.Input[bool] transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false).
7924
- :param pulumi.Input[int] transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
7925
- :param pulumi.Input[int] transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
7824
+ :param pulumi.Input[bool] auto_create_topics_enable: Enable auto creation of topics.
7825
+ :param pulumi.Input[str] compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
7826
+ :param pulumi.Input[int] connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
7827
+ :param pulumi.Input[int] default_replication_factor: Replication factor for autocreated topics.
7828
+ :param pulumi.Input[int] group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
7829
+ :param pulumi.Input[int] group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
7830
+ :param pulumi.Input[int] group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
7831
+ :param pulumi.Input[int] log_cleaner_delete_retention_ms: How long are delete records retained? Example: `86400000`.
7832
+ :param pulumi.Input[int] log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
7833
+ :param pulumi.Input[float] log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
7834
+ :param pulumi.Input[int] log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
7835
+ :param pulumi.Input[str] log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
7836
+ :param pulumi.Input[int] log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
7837
+ :param pulumi.Input[int] log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
7838
+ :param pulumi.Input[int] log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. Example: `4096`.
7839
+ :param pulumi.Input[int] log_index_size_max_bytes: The maximum size in bytes of the offset index. Example: `10485760`.
7840
+ :param pulumi.Input[int] log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
7841
+ :param pulumi.Input[int] log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
7842
+ :param pulumi.Input[bool] log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7843
+ :param pulumi.Input[int] log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
7844
+ :param pulumi.Input[str] log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
7845
+ :param pulumi.Input[bool] log_preallocate: Should pre allocate file when create new segment?
7846
+ :param pulumi.Input[int] log_retention_bytes: The maximum size of the log before deleting messages.
7847
+ :param pulumi.Input[int] log_retention_hours: The number of hours to keep a log file before deleting it.
7848
+ :param pulumi.Input[int] log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
7849
+ :param pulumi.Input[int] log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
7850
+ :param pulumi.Input[int] log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
7851
+ :param pulumi.Input[int] log_segment_bytes: The maximum size of a single log file.
7852
+ :param pulumi.Input[int] log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
7853
+ :param pulumi.Input[int] max_connections_per_ip: The maximum number of connections allowed from each ip address (defaults to 2147483647).
7854
+ :param pulumi.Input[int] max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
7855
+ :param pulumi.Input[int] message_max_bytes: The maximum size of message that the server can receive. Example: `1048588`.
7856
+ :param pulumi.Input[int] min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
7857
+ :param pulumi.Input[int] num_partitions: Number of partitions for autocreated topics.
7858
+ :param pulumi.Input[int] offsets_retention_minutes: Log retention window in minutes for offsets topic. Example: `10080`.
7859
+ :param pulumi.Input[int] producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
7860
+ :param pulumi.Input[int] replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
7861
+ :param pulumi.Input[int] replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
7862
+ :param pulumi.Input[str] sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
7863
+ :param pulumi.Input[str] sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
7864
+ :param pulumi.Input[str] sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
7865
+ :param pulumi.Input[str] sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
7866
+ :param pulumi.Input[int] socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
7867
+ :param pulumi.Input[bool] transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
7868
+ :param pulumi.Input[int] transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
7869
+ :param pulumi.Input[int] transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
7926
7870
  """
7927
7871
  if auto_create_topics_enable is not None:
7928
7872
  pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
@@ -8021,7 +7965,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8021
7965
  @pulumi.getter(name="autoCreateTopicsEnable")
8022
7966
  def auto_create_topics_enable(self) -> Optional[pulumi.Input[bool]]:
8023
7967
  """
8024
- Enable auto-creation of topics. (Default: true).
7968
+ Enable auto creation of topics.
8025
7969
  """
8026
7970
  return pulumi.get(self, "auto_create_topics_enable")
8027
7971
 
@@ -8033,7 +7977,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8033
7977
  @pulumi.getter(name="compressionType")
8034
7978
  def compression_type(self) -> Optional[pulumi.Input[str]]:
8035
7979
  """
8036
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
7980
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
8037
7981
  """
8038
7982
  return pulumi.get(self, "compression_type")
8039
7983
 
@@ -8045,7 +7989,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8045
7989
  @pulumi.getter(name="connectionsMaxIdleMs")
8046
7990
  def connections_max_idle_ms(self) -> Optional[pulumi.Input[int]]:
8047
7991
  """
8048
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
7992
+ Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
8049
7993
  """
8050
7994
  return pulumi.get(self, "connections_max_idle_ms")
8051
7995
 
@@ -8057,7 +8001,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8057
8001
  @pulumi.getter(name="defaultReplicationFactor")
8058
8002
  def default_replication_factor(self) -> Optional[pulumi.Input[int]]:
8059
8003
  """
8060
- Replication factor for auto-created topics (Default: 3).
8004
+ Replication factor for autocreated topics.
8061
8005
  """
8062
8006
  return pulumi.get(self, "default_replication_factor")
8063
8007
 
@@ -8069,7 +8013,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8069
8013
  @pulumi.getter(name="groupInitialRebalanceDelayMs")
8070
8014
  def group_initial_rebalance_delay_ms(self) -> Optional[pulumi.Input[int]]:
8071
8015
  """
8072
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
8016
+ The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
8073
8017
  """
8074
8018
  return pulumi.get(self, "group_initial_rebalance_delay_ms")
8075
8019
 
@@ -8081,7 +8025,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8081
8025
  @pulumi.getter(name="groupMaxSessionTimeoutMs")
8082
8026
  def group_max_session_timeout_ms(self) -> Optional[pulumi.Input[int]]:
8083
8027
  """
8084
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
8028
+ The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
8085
8029
  """
8086
8030
  return pulumi.get(self, "group_max_session_timeout_ms")
8087
8031
 
@@ -8093,7 +8037,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8093
8037
  @pulumi.getter(name="groupMinSessionTimeoutMs")
8094
8038
  def group_min_session_timeout_ms(self) -> Optional[pulumi.Input[int]]:
8095
8039
  """
8096
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
8040
+ The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
8097
8041
  """
8098
8042
  return pulumi.get(self, "group_min_session_timeout_ms")
8099
8043
 
@@ -8105,7 +8049,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8105
8049
  @pulumi.getter(name="logCleanerDeleteRetentionMs")
8106
8050
  def log_cleaner_delete_retention_ms(self) -> Optional[pulumi.Input[int]]:
8107
8051
  """
8108
- How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
8052
+ How long are delete records retained? Example: `86400000`.
8109
8053
  """
8110
8054
  return pulumi.get(self, "log_cleaner_delete_retention_ms")
8111
8055
 
@@ -8117,7 +8061,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8117
8061
  @pulumi.getter(name="logCleanerMaxCompactionLagMs")
8118
8062
  def log_cleaner_max_compaction_lag_ms(self) -> Optional[pulumi.Input[int]]:
8119
8063
  """
8120
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
8064
+ The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
8121
8065
  """
8122
8066
  return pulumi.get(self, "log_cleaner_max_compaction_lag_ms")
8123
8067
 
@@ -8129,7 +8073,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8129
8073
  @pulumi.getter(name="logCleanerMinCleanableRatio")
8130
8074
  def log_cleaner_min_cleanable_ratio(self) -> Optional[pulumi.Input[float]]:
8131
8075
  """
8132
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
8076
+ Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
8133
8077
  """
8134
8078
  return pulumi.get(self, "log_cleaner_min_cleanable_ratio")
8135
8079
 
@@ -8141,7 +8085,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8141
8085
  @pulumi.getter(name="logCleanerMinCompactionLagMs")
8142
8086
  def log_cleaner_min_compaction_lag_ms(self) -> Optional[pulumi.Input[int]]:
8143
8087
  """
8144
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
8088
+ The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
8145
8089
  """
8146
8090
  return pulumi.get(self, "log_cleaner_min_compaction_lag_ms")
8147
8091
 
@@ -8153,7 +8097,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8153
8097
  @pulumi.getter(name="logCleanupPolicy")
8154
8098
  def log_cleanup_policy(self) -> Optional[pulumi.Input[str]]:
8155
8099
  """
8156
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8100
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
8157
8101
  """
8158
8102
  return pulumi.get(self, "log_cleanup_policy")
8159
8103
 
@@ -8165,7 +8109,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8165
8109
  @pulumi.getter(name="logFlushIntervalMessages")
8166
8110
  def log_flush_interval_messages(self) -> Optional[pulumi.Input[int]]:
8167
8111
  """
8168
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
8112
+ The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
8169
8113
  """
8170
8114
  return pulumi.get(self, "log_flush_interval_messages")
8171
8115
 
@@ -8177,7 +8121,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8177
8121
  @pulumi.getter(name="logFlushIntervalMs")
8178
8122
  def log_flush_interval_ms(self) -> Optional[pulumi.Input[int]]:
8179
8123
  """
8180
- The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
8124
+ The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
8181
8125
  """
8182
8126
  return pulumi.get(self, "log_flush_interval_ms")
8183
8127
 
@@ -8189,7 +8133,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8189
8133
  @pulumi.getter(name="logIndexIntervalBytes")
8190
8134
  def log_index_interval_bytes(self) -> Optional[pulumi.Input[int]]:
8191
8135
  """
8192
- The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
8136
+ The interval with which Kafka adds an entry to the offset index. Example: `4096`.
8193
8137
  """
8194
8138
  return pulumi.get(self, "log_index_interval_bytes")
8195
8139
 
@@ -8201,7 +8145,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8201
8145
  @pulumi.getter(name="logIndexSizeMaxBytes")
8202
8146
  def log_index_size_max_bytes(self) -> Optional[pulumi.Input[int]]:
8203
8147
  """
8204
- The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
8148
+ The maximum size in bytes of the offset index. Example: `10485760`.
8205
8149
  """
8206
8150
  return pulumi.get(self, "log_index_size_max_bytes")
8207
8151
 
@@ -8213,7 +8157,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8213
8157
  @pulumi.getter(name="logLocalRetentionBytes")
8214
8158
  def log_local_retention_bytes(self) -> Optional[pulumi.Input[int]]:
8215
8159
  """
8216
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
8160
+ The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
8217
8161
  """
8218
8162
  return pulumi.get(self, "log_local_retention_bytes")
8219
8163
 
@@ -8225,7 +8169,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8225
8169
  @pulumi.getter(name="logLocalRetentionMs")
8226
8170
  def log_local_retention_ms(self) -> Optional[pulumi.Input[int]]:
8227
8171
  """
8228
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
8172
+ The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
8229
8173
  """
8230
8174
  return pulumi.get(self, "log_local_retention_ms")
8231
8175
 
@@ -8237,7 +8181,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8237
8181
  @pulumi.getter(name="logMessageDownconversionEnable")
8238
8182
  def log_message_downconversion_enable(self) -> Optional[pulumi.Input[bool]]:
8239
8183
  """
8240
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
8184
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
8241
8185
  """
8242
8186
  return pulumi.get(self, "log_message_downconversion_enable")
8243
8187
 
@@ -8249,7 +8193,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8249
8193
  @pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
8250
8194
  def log_message_timestamp_difference_max_ms(self) -> Optional[pulumi.Input[int]]:
8251
8195
  """
8252
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
8196
+ The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
8253
8197
  """
8254
8198
  return pulumi.get(self, "log_message_timestamp_difference_max_ms")
8255
8199
 
@@ -8261,7 +8205,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8261
8205
  @pulumi.getter(name="logMessageTimestampType")
8262
8206
  def log_message_timestamp_type(self) -> Optional[pulumi.Input[str]]:
8263
8207
  """
8264
- Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
8208
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
8265
8209
  """
8266
8210
  return pulumi.get(self, "log_message_timestamp_type")
8267
8211
 
@@ -8273,7 +8217,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8273
8217
  @pulumi.getter(name="logPreallocate")
8274
8218
  def log_preallocate(self) -> Optional[pulumi.Input[bool]]:
8275
8219
  """
8276
- Should pre allocate file when create new segment? (Default: false).
8220
+ Should pre allocate file when create new segment?
8277
8221
  """
8278
8222
  return pulumi.get(self, "log_preallocate")
8279
8223
 
@@ -8285,7 +8229,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8285
8229
  @pulumi.getter(name="logRetentionBytes")
8286
8230
  def log_retention_bytes(self) -> Optional[pulumi.Input[int]]:
8287
8231
  """
8288
- The maximum size of the log before deleting messages (Default: -1).
8232
+ The maximum size of the log before deleting messages.
8289
8233
  """
8290
8234
  return pulumi.get(self, "log_retention_bytes")
8291
8235
 
@@ -8297,7 +8241,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8297
8241
  @pulumi.getter(name="logRetentionHours")
8298
8242
  def log_retention_hours(self) -> Optional[pulumi.Input[int]]:
8299
8243
  """
8300
- The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
8244
+ The number of hours to keep a log file before deleting it.
8301
8245
  """
8302
8246
  return pulumi.get(self, "log_retention_hours")
8303
8247
 
@@ -8309,7 +8253,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8309
8253
  @pulumi.getter(name="logRetentionMs")
8310
8254
  def log_retention_ms(self) -> Optional[pulumi.Input[int]]:
8311
8255
  """
8312
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
8256
+ The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
8313
8257
  """
8314
8258
  return pulumi.get(self, "log_retention_ms")
8315
8259
 
@@ -8321,7 +8265,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8321
8265
  @pulumi.getter(name="logRollJitterMs")
8322
8266
  def log_roll_jitter_ms(self) -> Optional[pulumi.Input[int]]:
8323
8267
  """
8324
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
8268
+ The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
8325
8269
  """
8326
8270
  return pulumi.get(self, "log_roll_jitter_ms")
8327
8271
 
@@ -8333,7 +8277,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8333
8277
  @pulumi.getter(name="logRollMs")
8334
8278
  def log_roll_ms(self) -> Optional[pulumi.Input[int]]:
8335
8279
  """
8336
- The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
8280
+ The maximum time before a new log segment is rolled out (in milliseconds).
8337
8281
  """
8338
8282
  return pulumi.get(self, "log_roll_ms")
8339
8283
 
@@ -8345,7 +8289,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8345
8289
  @pulumi.getter(name="logSegmentBytes")
8346
8290
  def log_segment_bytes(self) -> Optional[pulumi.Input[int]]:
8347
8291
  """
8348
- The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
8292
+ The maximum size of a single log file.
8349
8293
  """
8350
8294
  return pulumi.get(self, "log_segment_bytes")
8351
8295
 
@@ -8357,7 +8301,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8357
8301
  @pulumi.getter(name="logSegmentDeleteDelayMs")
8358
8302
  def log_segment_delete_delay_ms(self) -> Optional[pulumi.Input[int]]:
8359
8303
  """
8360
- The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
8304
+ The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
8361
8305
  """
8362
8306
  return pulumi.get(self, "log_segment_delete_delay_ms")
8363
8307
 
@@ -8369,7 +8313,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8369
8313
  @pulumi.getter(name="maxConnectionsPerIp")
8370
8314
  def max_connections_per_ip(self) -> Optional[pulumi.Input[int]]:
8371
8315
  """
8372
- The maximum number of connections allowed from each ip address (Default: 2147483647).
8316
+ The maximum number of connections allowed from each ip address (defaults to 2147483647).
8373
8317
  """
8374
8318
  return pulumi.get(self, "max_connections_per_ip")
8375
8319
 
@@ -8381,7 +8325,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8381
8325
  @pulumi.getter(name="maxIncrementalFetchSessionCacheSlots")
8382
8326
  def max_incremental_fetch_session_cache_slots(self) -> Optional[pulumi.Input[int]]:
8383
8327
  """
8384
- The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
8328
+ The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
8385
8329
  """
8386
8330
  return pulumi.get(self, "max_incremental_fetch_session_cache_slots")
8387
8331
 
@@ -8393,7 +8337,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8393
8337
  @pulumi.getter(name="messageMaxBytes")
8394
8338
  def message_max_bytes(self) -> Optional[pulumi.Input[int]]:
8395
8339
  """
8396
- The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
8340
+ The maximum size of message that the server can receive. Example: `1048588`.
8397
8341
  """
8398
8342
  return pulumi.get(self, "message_max_bytes")
8399
8343
 
@@ -8405,7 +8349,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8405
8349
  @pulumi.getter(name="minInsyncReplicas")
8406
8350
  def min_insync_replicas(self) -> Optional[pulumi.Input[int]]:
8407
8351
  """
8408
- When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
8352
+ When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
8409
8353
  """
8410
8354
  return pulumi.get(self, "min_insync_replicas")
8411
8355
 
@@ -8417,7 +8361,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8417
8361
  @pulumi.getter(name="numPartitions")
8418
8362
  def num_partitions(self) -> Optional[pulumi.Input[int]]:
8419
8363
  """
8420
- Number of partitions for auto-created topics (Default: 1).
8364
+ Number of partitions for autocreated topics.
8421
8365
  """
8422
8366
  return pulumi.get(self, "num_partitions")
8423
8367
 
@@ -8429,7 +8373,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8429
8373
  @pulumi.getter(name="offsetsRetentionMinutes")
8430
8374
  def offsets_retention_minutes(self) -> Optional[pulumi.Input[int]]:
8431
8375
  """
8432
- Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
8376
+ Log retention window in minutes for offsets topic. Example: `10080`.
8433
8377
  """
8434
8378
  return pulumi.get(self, "offsets_retention_minutes")
8435
8379
 
@@ -8441,7 +8385,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8441
8385
  @pulumi.getter(name="producerPurgatoryPurgeIntervalRequests")
8442
8386
  def producer_purgatory_purge_interval_requests(self) -> Optional[pulumi.Input[int]]:
8443
8387
  """
8444
- The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
8388
+ The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
8445
8389
  """
8446
8390
  return pulumi.get(self, "producer_purgatory_purge_interval_requests")
8447
8391
 
@@ -8453,7 +8397,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8453
8397
  @pulumi.getter(name="replicaFetchMaxBytes")
8454
8398
  def replica_fetch_max_bytes(self) -> Optional[pulumi.Input[int]]:
8455
8399
  """
8456
- The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
8400
+ The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
8457
8401
  """
8458
8402
  return pulumi.get(self, "replica_fetch_max_bytes")
8459
8403
 
@@ -8465,7 +8409,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8465
8409
  @pulumi.getter(name="replicaFetchResponseMaxBytes")
8466
8410
  def replica_fetch_response_max_bytes(self) -> Optional[pulumi.Input[int]]:
8467
8411
  """
8468
- Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
8412
+ Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
8469
8413
  """
8470
8414
  return pulumi.get(self, "replica_fetch_response_max_bytes")
8471
8415
 
@@ -8477,7 +8421,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8477
8421
  @pulumi.getter(name="saslOauthbearerExpectedAudience")
8478
8422
  def sasl_oauthbearer_expected_audience(self) -> Optional[pulumi.Input[str]]:
8479
8423
  """
8480
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
8424
+ The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
8481
8425
  """
8482
8426
  return pulumi.get(self, "sasl_oauthbearer_expected_audience")
8483
8427
 
@@ -8489,7 +8433,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8489
8433
  @pulumi.getter(name="saslOauthbearerExpectedIssuer")
8490
8434
  def sasl_oauthbearer_expected_issuer(self) -> Optional[pulumi.Input[str]]:
8491
8435
  """
8492
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
8436
+ Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
8493
8437
  """
8494
8438
  return pulumi.get(self, "sasl_oauthbearer_expected_issuer")
8495
8439
 
@@ -8501,7 +8445,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8501
8445
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
8502
8446
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[pulumi.Input[str]]:
8503
8447
  """
8504
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
8448
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
8505
8449
  """
8506
8450
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
8507
8451
 
@@ -8513,7 +8457,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8513
8457
  @pulumi.getter(name="saslOauthbearerSubClaimName")
8514
8458
  def sasl_oauthbearer_sub_claim_name(self) -> Optional[pulumi.Input[str]]:
8515
8459
  """
8516
- Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
8460
+ Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
8517
8461
  """
8518
8462
  return pulumi.get(self, "sasl_oauthbearer_sub_claim_name")
8519
8463
 
@@ -8525,7 +8469,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8525
8469
  @pulumi.getter(name="socketRequestMaxBytes")
8526
8470
  def socket_request_max_bytes(self) -> Optional[pulumi.Input[int]]:
8527
8471
  """
8528
- The maximum number of bytes in a socket request (Default: 104857600 bytes).
8472
+ The maximum number of bytes in a socket request (defaults to 104857600).
8529
8473
  """
8530
8474
  return pulumi.get(self, "socket_request_max_bytes")
8531
8475
 
@@ -8537,7 +8481,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8537
8481
  @pulumi.getter(name="transactionPartitionVerificationEnable")
8538
8482
  def transaction_partition_verification_enable(self) -> Optional[pulumi.Input[bool]]:
8539
8483
  """
8540
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: false).
8484
+ Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
8541
8485
  """
8542
8486
  return pulumi.get(self, "transaction_partition_verification_enable")
8543
8487
 
@@ -8549,7 +8493,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8549
8493
  @pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
8550
8494
  def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[pulumi.Input[int]]:
8551
8495
  """
8552
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
8496
+ The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
8553
8497
  """
8554
8498
  return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
8555
8499
 
@@ -8561,7 +8505,7 @@ class KafkaKafkaUserConfigKafkaArgs:
8561
8505
  @pulumi.getter(name="transactionStateLogSegmentBytes")
8562
8506
  def transaction_state_log_segment_bytes(self) -> Optional[pulumi.Input[int]]:
8563
8507
  """
8564
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
8508
+ The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
8565
8509
  """
8566
8510
  return pulumi.get(self, "transaction_state_log_segment_bytes")
8567
8511
 
@@ -13281,7 +13225,6 @@ class MySqlMysqlUserConfigMigrationArgs:
13281
13225
  port: pulumi.Input[int],
13282
13226
  dbname: Optional[pulumi.Input[str]] = None,
13283
13227
  ignore_dbs: Optional[pulumi.Input[str]] = None,
13284
- ignore_roles: Optional[pulumi.Input[str]] = None,
13285
13228
  method: Optional[pulumi.Input[str]] = None,
13286
13229
  password: Optional[pulumi.Input[str]] = None,
13287
13230
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -13291,7 +13234,6 @@ class MySqlMysqlUserConfigMigrationArgs:
13291
13234
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
13292
13235
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
13293
13236
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
13294
- :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
13295
13237
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
13296
13238
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
13297
13239
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -13303,8 +13245,6 @@ class MySqlMysqlUserConfigMigrationArgs:
13303
13245
  pulumi.set(__self__, "dbname", dbname)
13304
13246
  if ignore_dbs is not None:
13305
13247
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
13306
- if ignore_roles is not None:
13307
- pulumi.set(__self__, "ignore_roles", ignore_roles)
13308
13248
  if method is not None:
13309
13249
  pulumi.set(__self__, "method", method)
13310
13250
  if password is not None:
@@ -13362,18 +13302,6 @@ class MySqlMysqlUserConfigMigrationArgs:
13362
13302
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
13363
13303
  pulumi.set(self, "ignore_dbs", value)
13364
13304
 
13365
- @property
13366
- @pulumi.getter(name="ignoreRoles")
13367
- def ignore_roles(self) -> Optional[pulumi.Input[str]]:
13368
- """
13369
- Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
13370
- """
13371
- return pulumi.get(self, "ignore_roles")
13372
-
13373
- @ignore_roles.setter
13374
- def ignore_roles(self, value: Optional[pulumi.Input[str]]):
13375
- pulumi.set(self, "ignore_roles", value)
13376
-
13377
13305
  @property
13378
13306
  @pulumi.getter
13379
13307
  def method(self) -> Optional[pulumi.Input[str]]:
@@ -17776,7 +17704,6 @@ class PgPgUserConfigMigrationArgs:
17776
17704
  port: pulumi.Input[int],
17777
17705
  dbname: Optional[pulumi.Input[str]] = None,
17778
17706
  ignore_dbs: Optional[pulumi.Input[str]] = None,
17779
- ignore_roles: Optional[pulumi.Input[str]] = None,
17780
17707
  method: Optional[pulumi.Input[str]] = None,
17781
17708
  password: Optional[pulumi.Input[str]] = None,
17782
17709
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -17786,7 +17713,6 @@ class PgPgUserConfigMigrationArgs:
17786
17713
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
17787
17714
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
17788
17715
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
17789
- :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
17790
17716
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
17791
17717
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
17792
17718
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -17798,8 +17724,6 @@ class PgPgUserConfigMigrationArgs:
17798
17724
  pulumi.set(__self__, "dbname", dbname)
17799
17725
  if ignore_dbs is not None:
17800
17726
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
17801
- if ignore_roles is not None:
17802
- pulumi.set(__self__, "ignore_roles", ignore_roles)
17803
17727
  if method is not None:
17804
17728
  pulumi.set(__self__, "method", method)
17805
17729
  if password is not None:
@@ -17857,18 +17781,6 @@ class PgPgUserConfigMigrationArgs:
17857
17781
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
17858
17782
  pulumi.set(self, "ignore_dbs", value)
17859
17783
 
17860
- @property
17861
- @pulumi.getter(name="ignoreRoles")
17862
- def ignore_roles(self) -> Optional[pulumi.Input[str]]:
17863
- """
17864
- Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
17865
- """
17866
- return pulumi.get(self, "ignore_roles")
17867
-
17868
- @ignore_roles.setter
17869
- def ignore_roles(self, value: Optional[pulumi.Input[str]]):
17870
- pulumi.set(self, "ignore_roles", value)
17871
-
17872
17784
  @property
17873
17785
  @pulumi.getter
17874
17786
  def method(self) -> Optional[pulumi.Input[str]]:
@@ -19111,7 +19023,6 @@ class PgPgUserConfigPgbouncerArgs:
19111
19023
  autodb_pool_mode: Optional[pulumi.Input[str]] = None,
19112
19024
  autodb_pool_size: Optional[pulumi.Input[int]] = None,
19113
19025
  ignore_startup_parameters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
19114
- max_prepared_statements: Optional[pulumi.Input[int]] = None,
19115
19026
  min_pool_size: Optional[pulumi.Input[int]] = None,
19116
19027
  server_idle_timeout: Optional[pulumi.Input[int]] = None,
19117
19028
  server_lifetime: Optional[pulumi.Input[int]] = None,
@@ -19122,7 +19033,6 @@ class PgPgUserConfigPgbouncerArgs:
19122
19033
  :param pulumi.Input[str] autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
19123
19034
  :param pulumi.Input[int] autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
19124
19035
  :param pulumi.Input[Sequence[pulumi.Input[str]]] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
19125
- :param pulumi.Input[int] max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
19126
19036
  :param pulumi.Input[int] min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. Default: `0`.
19127
19037
  :param pulumi.Input[int] server_idle_timeout: If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). Default: `600`.
19128
19038
  :param pulumi.Input[int] server_lifetime: The pooler will close an unused server connection that has been connected longer than this. (seconds). Default: `3600`.
@@ -19138,8 +19048,6 @@ class PgPgUserConfigPgbouncerArgs:
19138
19048
  pulumi.set(__self__, "autodb_pool_size", autodb_pool_size)
19139
19049
  if ignore_startup_parameters is not None:
19140
19050
  pulumi.set(__self__, "ignore_startup_parameters", ignore_startup_parameters)
19141
- if max_prepared_statements is not None:
19142
- pulumi.set(__self__, "max_prepared_statements", max_prepared_statements)
19143
19051
  if min_pool_size is not None:
19144
19052
  pulumi.set(__self__, "min_pool_size", min_pool_size)
19145
19053
  if server_idle_timeout is not None:
@@ -19209,18 +19117,6 @@ class PgPgUserConfigPgbouncerArgs:
19209
19117
  def ignore_startup_parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
19210
19118
  pulumi.set(self, "ignore_startup_parameters", value)
19211
19119
 
19212
- @property
19213
- @pulumi.getter(name="maxPreparedStatements")
19214
- def max_prepared_statements(self) -> Optional[pulumi.Input[int]]:
19215
- """
19216
- PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
19217
- """
19218
- return pulumi.get(self, "max_prepared_statements")
19219
-
19220
- @max_prepared_statements.setter
19221
- def max_prepared_statements(self, value: Optional[pulumi.Input[int]]):
19222
- pulumi.set(self, "max_prepared_statements", value)
19223
-
19224
19120
  @property
19225
19121
  @pulumi.getter(name="minPoolSize")
19226
19122
  def min_pool_size(self) -> Optional[pulumi.Input[int]]:
@@ -20276,7 +20172,6 @@ class RedisRedisUserConfigMigrationArgs:
20276
20172
  port: pulumi.Input[int],
20277
20173
  dbname: Optional[pulumi.Input[str]] = None,
20278
20174
  ignore_dbs: Optional[pulumi.Input[str]] = None,
20279
- ignore_roles: Optional[pulumi.Input[str]] = None,
20280
20175
  method: Optional[pulumi.Input[str]] = None,
20281
20176
  password: Optional[pulumi.Input[str]] = None,
20282
20177
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -20286,7 +20181,6 @@ class RedisRedisUserConfigMigrationArgs:
20286
20181
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
20287
20182
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
20288
20183
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
20289
- :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
20290
20184
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
20291
20185
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
20292
20186
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -20298,8 +20192,6 @@ class RedisRedisUserConfigMigrationArgs:
20298
20192
  pulumi.set(__self__, "dbname", dbname)
20299
20193
  if ignore_dbs is not None:
20300
20194
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
20301
- if ignore_roles is not None:
20302
- pulumi.set(__self__, "ignore_roles", ignore_roles)
20303
20195
  if method is not None:
20304
20196
  pulumi.set(__self__, "method", method)
20305
20197
  if password is not None:
@@ -20357,18 +20249,6 @@ class RedisRedisUserConfigMigrationArgs:
20357
20249
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
20358
20250
  pulumi.set(self, "ignore_dbs", value)
20359
20251
 
20360
- @property
20361
- @pulumi.getter(name="ignoreRoles")
20362
- def ignore_roles(self) -> Optional[pulumi.Input[str]]:
20363
- """
20364
- Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
20365
- """
20366
- return pulumi.get(self, "ignore_roles")
20367
-
20368
- @ignore_roles.setter
20369
- def ignore_roles(self, value: Optional[pulumi.Input[str]]):
20370
- pulumi.set(self, "ignore_roles", value)
20371
-
20372
20252
  @property
20373
20253
  @pulumi.getter
20374
20254
  def method(self) -> Optional[pulumi.Input[str]]:
@@ -20669,9 +20549,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20669
20549
  max_rows_per_message: Optional[pulumi.Input[int]] = None,
20670
20550
  num_consumers: Optional[pulumi.Input[int]] = None,
20671
20551
  poll_max_batch_size: Optional[pulumi.Input[int]] = None,
20672
- poll_max_timeout_ms: Optional[pulumi.Input[int]] = None,
20673
- skip_broken_messages: Optional[pulumi.Input[int]] = None,
20674
- thread_per_consumer: Optional[pulumi.Input[bool]] = None):
20552
+ skip_broken_messages: Optional[pulumi.Input[int]] = None):
20675
20553
  """
20676
20554
  :param pulumi.Input[Sequence[pulumi.Input['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs']]] columns: Table columns
20677
20555
  :param pulumi.Input[str] data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
@@ -20685,9 +20563,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20685
20563
  :param pulumi.Input[int] max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. Default: `1`.
20686
20564
  :param pulumi.Input[int] num_consumers: The number of consumers per table per replica. Default: `1`.
20687
20565
  :param pulumi.Input[int] poll_max_batch_size: Maximum amount of messages to be polled in a single Kafka poll. Default: `0`.
20688
- :param pulumi.Input[int] poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
20689
20566
  :param pulumi.Input[int] skip_broken_messages: Skip at least this number of broken messages from Kafka topic per block. Default: `0`.
20690
- :param pulumi.Input[bool] thread_per_consumer: Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
20691
20567
  """
20692
20568
  pulumi.set(__self__, "columns", columns)
20693
20569
  pulumi.set(__self__, "data_format", data_format)
@@ -20708,12 +20584,8 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20708
20584
  pulumi.set(__self__, "num_consumers", num_consumers)
20709
20585
  if poll_max_batch_size is not None:
20710
20586
  pulumi.set(__self__, "poll_max_batch_size", poll_max_batch_size)
20711
- if poll_max_timeout_ms is not None:
20712
- pulumi.set(__self__, "poll_max_timeout_ms", poll_max_timeout_ms)
20713
20587
  if skip_broken_messages is not None:
20714
20588
  pulumi.set(__self__, "skip_broken_messages", skip_broken_messages)
20715
- if thread_per_consumer is not None:
20716
- pulumi.set(__self__, "thread_per_consumer", thread_per_consumer)
20717
20589
 
20718
20590
  @property
20719
20591
  @pulumi.getter
@@ -20859,18 +20731,6 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20859
20731
  def poll_max_batch_size(self, value: Optional[pulumi.Input[int]]):
20860
20732
  pulumi.set(self, "poll_max_batch_size", value)
20861
20733
 
20862
- @property
20863
- @pulumi.getter(name="pollMaxTimeoutMs")
20864
- def poll_max_timeout_ms(self) -> Optional[pulumi.Input[int]]:
20865
- """
20866
- Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
20867
- """
20868
- return pulumi.get(self, "poll_max_timeout_ms")
20869
-
20870
- @poll_max_timeout_ms.setter
20871
- def poll_max_timeout_ms(self, value: Optional[pulumi.Input[int]]):
20872
- pulumi.set(self, "poll_max_timeout_ms", value)
20873
-
20874
20734
  @property
20875
20735
  @pulumi.getter(name="skipBrokenMessages")
20876
20736
  def skip_broken_messages(self) -> Optional[pulumi.Input[int]]:
@@ -20883,18 +20743,6 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableArgs:
20883
20743
  def skip_broken_messages(self, value: Optional[pulumi.Input[int]]):
20884
20744
  pulumi.set(self, "skip_broken_messages", value)
20885
20745
 
20886
- @property
20887
- @pulumi.getter(name="threadPerConsumer")
20888
- def thread_per_consumer(self) -> Optional[pulumi.Input[bool]]:
20889
- """
20890
- Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
20891
- """
20892
- return pulumi.get(self, "thread_per_consumer")
20893
-
20894
- @thread_per_consumer.setter
20895
- def thread_per_consumer(self, value: Optional[pulumi.Input[bool]]):
20896
- pulumi.set(self, "thread_per_consumer", value)
20897
-
20898
20746
 
20899
20747
  @pulumi.input_type
20900
20748
  class ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs:
@@ -25235,7 +25083,6 @@ class ValkeyValkeyUserConfigMigrationArgs:
25235
25083
  port: pulumi.Input[int],
25236
25084
  dbname: Optional[pulumi.Input[str]] = None,
25237
25085
  ignore_dbs: Optional[pulumi.Input[str]] = None,
25238
- ignore_roles: Optional[pulumi.Input[str]] = None,
25239
25086
  method: Optional[pulumi.Input[str]] = None,
25240
25087
  password: Optional[pulumi.Input[str]] = None,
25241
25088
  ssl: Optional[pulumi.Input[bool]] = None,
@@ -25245,7 +25092,6 @@ class ValkeyValkeyUserConfigMigrationArgs:
25245
25092
  :param pulumi.Input[int] port: Port number of the server where to migrate data from. Example: `1234`.
25246
25093
  :param pulumi.Input[str] dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
25247
25094
  :param pulumi.Input[str] ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
25248
- :param pulumi.Input[str] ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
25249
25095
  :param pulumi.Input[str] method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
25250
25096
  :param pulumi.Input[str] password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
25251
25097
  :param pulumi.Input[bool] ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -25257,8 +25103,6 @@ class ValkeyValkeyUserConfigMigrationArgs:
25257
25103
  pulumi.set(__self__, "dbname", dbname)
25258
25104
  if ignore_dbs is not None:
25259
25105
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
25260
- if ignore_roles is not None:
25261
- pulumi.set(__self__, "ignore_roles", ignore_roles)
25262
25106
  if method is not None:
25263
25107
  pulumi.set(__self__, "method", method)
25264
25108
  if password is not None:
@@ -25316,18 +25160,6 @@ class ValkeyValkeyUserConfigMigrationArgs:
25316
25160
  def ignore_dbs(self, value: Optional[pulumi.Input[str]]):
25317
25161
  pulumi.set(self, "ignore_dbs", value)
25318
25162
 
25319
- @property
25320
- @pulumi.getter(name="ignoreRoles")
25321
- def ignore_roles(self) -> Optional[pulumi.Input[str]]:
25322
- """
25323
- Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
25324
- """
25325
- return pulumi.get(self, "ignore_roles")
25326
-
25327
- @ignore_roles.setter
25328
- def ignore_roles(self, value: Optional[pulumi.Input[str]]):
25329
- pulumi.set(self, "ignore_roles", value)
25330
-
25331
25163
  @property
25332
25164
  @pulumi.getter
25333
25165
  def method(self) -> Optional[pulumi.Input[str]]: