pulumi-aiven 6.21.0__py3-none-any.whl → 6.21.0a1721951240__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-aiven might be problematic. Click here for more details.
- pulumi_aiven/_inputs.py +93 -261
- pulumi_aiven/outputs.py +186 -460
- pulumi_aiven/pulumi-plugin.json +1 -1
- {pulumi_aiven-6.21.0.dist-info → pulumi_aiven-6.21.0a1721951240.dist-info}/METADATA +1 -1
- {pulumi_aiven-6.21.0.dist-info → pulumi_aiven-6.21.0a1721951240.dist-info}/RECORD +7 -7
- {pulumi_aiven-6.21.0.dist-info → pulumi_aiven-6.21.0a1721951240.dist-info}/WHEEL +1 -1
- {pulumi_aiven-6.21.0.dist-info → pulumi_aiven-6.21.0a1721951240.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py
CHANGED
|
@@ -101,7 +101,6 @@ __all__ = [
|
|
|
101
101
|
'KafkaConnectorTask',
|
|
102
102
|
'KafkaKafka',
|
|
103
103
|
'KafkaKafkaUserConfig',
|
|
104
|
-
'KafkaKafkaUserConfigFollowerFetching',
|
|
105
104
|
'KafkaKafkaUserConfigIpFilterObject',
|
|
106
105
|
'KafkaKafkaUserConfigKafka',
|
|
107
106
|
'KafkaKafkaUserConfigKafkaAuthenticationMethods',
|
|
@@ -381,7 +380,6 @@ __all__ = [
|
|
|
381
380
|
'GetKafkaConnectorTaskResult',
|
|
382
381
|
'GetKafkaKafkaResult',
|
|
383
382
|
'GetKafkaKafkaUserConfigResult',
|
|
384
|
-
'GetKafkaKafkaUserConfigFollowerFetchingResult',
|
|
385
383
|
'GetKafkaKafkaUserConfigIpFilterObjectResult',
|
|
386
384
|
'GetKafkaKafkaUserConfigKafkaResult',
|
|
387
385
|
'GetKafkaKafkaUserConfigKafkaAuthenticationMethodsResult',
|
|
@@ -2522,8 +2520,6 @@ class DragonflyDragonflyUserConfigMigration(dict):
|
|
|
2522
2520
|
suggest = None
|
|
2523
2521
|
if key == "ignoreDbs":
|
|
2524
2522
|
suggest = "ignore_dbs"
|
|
2525
|
-
elif key == "ignoreRoles":
|
|
2526
|
-
suggest = "ignore_roles"
|
|
2527
2523
|
|
|
2528
2524
|
if suggest:
|
|
2529
2525
|
pulumi.log.warn(f"Key '{key}' not found in DragonflyDragonflyUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
|
|
@@ -2541,7 +2537,6 @@ class DragonflyDragonflyUserConfigMigration(dict):
|
|
|
2541
2537
|
port: int,
|
|
2542
2538
|
dbname: Optional[str] = None,
|
|
2543
2539
|
ignore_dbs: Optional[str] = None,
|
|
2544
|
-
ignore_roles: Optional[str] = None,
|
|
2545
2540
|
method: Optional[str] = None,
|
|
2546
2541
|
password: Optional[str] = None,
|
|
2547
2542
|
ssl: Optional[bool] = None,
|
|
@@ -2551,7 +2546,6 @@ class DragonflyDragonflyUserConfigMigration(dict):
|
|
|
2551
2546
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
2552
2547
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
2553
2548
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
2554
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
2555
2549
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
2556
2550
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
2557
2551
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -2563,8 +2557,6 @@ class DragonflyDragonflyUserConfigMigration(dict):
|
|
|
2563
2557
|
pulumi.set(__self__, "dbname", dbname)
|
|
2564
2558
|
if ignore_dbs is not None:
|
|
2565
2559
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
2566
|
-
if ignore_roles is not None:
|
|
2567
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
2568
2560
|
if method is not None:
|
|
2569
2561
|
pulumi.set(__self__, "method", method)
|
|
2570
2562
|
if password is not None:
|
|
@@ -2606,14 +2598,6 @@ class DragonflyDragonflyUserConfigMigration(dict):
|
|
|
2606
2598
|
"""
|
|
2607
2599
|
return pulumi.get(self, "ignore_dbs")
|
|
2608
2600
|
|
|
2609
|
-
@property
|
|
2610
|
-
@pulumi.getter(name="ignoreRoles")
|
|
2611
|
-
def ignore_roles(self) -> Optional[str]:
|
|
2612
|
-
"""
|
|
2613
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
2614
|
-
"""
|
|
2615
|
-
return pulumi.get(self, "ignore_roles")
|
|
2616
|
-
|
|
2617
2601
|
@property
|
|
2618
2602
|
@pulumi.getter
|
|
2619
2603
|
def method(self) -> Optional[str]:
|
|
@@ -7246,8 +7230,6 @@ class KafkaKafkaUserConfig(dict):
|
|
|
7246
7230
|
suggest = "aiven_kafka_topic_messages"
|
|
7247
7231
|
elif key == "customDomain":
|
|
7248
7232
|
suggest = "custom_domain"
|
|
7249
|
-
elif key == "followerFetching":
|
|
7250
|
-
suggest = "follower_fetching"
|
|
7251
7233
|
elif key == "ipFilterObjects":
|
|
7252
7234
|
suggest = "ip_filter_objects"
|
|
7253
7235
|
elif key == "ipFilterStrings":
|
|
@@ -7304,7 +7286,6 @@ class KafkaKafkaUserConfig(dict):
|
|
|
7304
7286
|
additional_backup_regions: Optional[str] = None,
|
|
7305
7287
|
aiven_kafka_topic_messages: Optional[bool] = None,
|
|
7306
7288
|
custom_domain: Optional[str] = None,
|
|
7307
|
-
follower_fetching: Optional['outputs.KafkaKafkaUserConfigFollowerFetching'] = None,
|
|
7308
7289
|
ip_filter_objects: Optional[Sequence['outputs.KafkaKafkaUserConfigIpFilterObject']] = None,
|
|
7309
7290
|
ip_filter_strings: Optional[Sequence[str]] = None,
|
|
7310
7291
|
ip_filters: Optional[Sequence[str]] = None,
|
|
@@ -7330,7 +7311,6 @@ class KafkaKafkaUserConfig(dict):
|
|
|
7330
7311
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
7331
7312
|
:param bool aiven_kafka_topic_messages: Allow access to read Kafka topic messages in the Aiven Console and REST API.
|
|
7332
7313
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
7333
|
-
:param 'KafkaKafkaUserConfigFollowerFetchingArgs' follower_fetching: Enable follower fetching
|
|
7334
7314
|
:param Sequence['KafkaKafkaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
|
|
7335
7315
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
7336
7316
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
@@ -7358,8 +7338,6 @@ class KafkaKafkaUserConfig(dict):
|
|
|
7358
7338
|
pulumi.set(__self__, "aiven_kafka_topic_messages", aiven_kafka_topic_messages)
|
|
7359
7339
|
if custom_domain is not None:
|
|
7360
7340
|
pulumi.set(__self__, "custom_domain", custom_domain)
|
|
7361
|
-
if follower_fetching is not None:
|
|
7362
|
-
pulumi.set(__self__, "follower_fetching", follower_fetching)
|
|
7363
7341
|
if ip_filter_objects is not None:
|
|
7364
7342
|
pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
|
|
7365
7343
|
if ip_filter_strings is not None:
|
|
@@ -7428,14 +7406,6 @@ class KafkaKafkaUserConfig(dict):
|
|
|
7428
7406
|
"""
|
|
7429
7407
|
return pulumi.get(self, "custom_domain")
|
|
7430
7408
|
|
|
7431
|
-
@property
|
|
7432
|
-
@pulumi.getter(name="followerFetching")
|
|
7433
|
-
def follower_fetching(self) -> Optional['outputs.KafkaKafkaUserConfigFollowerFetching']:
|
|
7434
|
-
"""
|
|
7435
|
-
Enable follower fetching
|
|
7436
|
-
"""
|
|
7437
|
-
return pulumi.get(self, "follower_fetching")
|
|
7438
|
-
|
|
7439
7409
|
@property
|
|
7440
7410
|
@pulumi.getter(name="ipFilterObjects")
|
|
7441
7411
|
def ip_filter_objects(self) -> Optional[Sequence['outputs.KafkaKafkaUserConfigIpFilterObject']]:
|
|
@@ -7603,25 +7573,6 @@ class KafkaKafkaUserConfig(dict):
|
|
|
7603
7573
|
return pulumi.get(self, "tiered_storage")
|
|
7604
7574
|
|
|
7605
7575
|
|
|
7606
|
-
@pulumi.output_type
|
|
7607
|
-
class KafkaKafkaUserConfigFollowerFetching(dict):
|
|
7608
|
-
def __init__(__self__, *,
|
|
7609
|
-
enabled: Optional[bool] = None):
|
|
7610
|
-
"""
|
|
7611
|
-
:param bool enabled: Whether to enable the follower fetching functionality.
|
|
7612
|
-
"""
|
|
7613
|
-
if enabled is not None:
|
|
7614
|
-
pulumi.set(__self__, "enabled", enabled)
|
|
7615
|
-
|
|
7616
|
-
@property
|
|
7617
|
-
@pulumi.getter
|
|
7618
|
-
def enabled(self) -> Optional[bool]:
|
|
7619
|
-
"""
|
|
7620
|
-
Whether to enable the follower fetching functionality.
|
|
7621
|
-
"""
|
|
7622
|
-
return pulumi.get(self, "enabled")
|
|
7623
|
-
|
|
7624
|
-
|
|
7625
7576
|
@pulumi.output_type
|
|
7626
7577
|
class KafkaKafkaUserConfigIpFilterObject(dict):
|
|
7627
7578
|
def __init__(__self__, *,
|
|
@@ -7809,52 +7760,52 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7809
7760
|
transaction_remove_expired_transaction_cleanup_interval_ms: Optional[int] = None,
|
|
7810
7761
|
transaction_state_log_segment_bytes: Optional[int] = None):
|
|
7811
7762
|
"""
|
|
7812
|
-
:param bool auto_create_topics_enable: Enable auto
|
|
7813
|
-
:param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
|
|
7814
|
-
:param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
|
|
7815
|
-
:param int default_replication_factor: Replication factor for
|
|
7816
|
-
:param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
|
|
7817
|
-
:param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
|
|
7818
|
-
:param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
|
|
7819
|
-
:param int log_cleaner_delete_retention_ms: How long are delete records retained?
|
|
7820
|
-
:param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
7821
|
-
:param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
|
|
7822
|
-
:param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
7823
|
-
:param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window
|
|
7824
|
-
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk
|
|
7825
|
-
:param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory
|
|
7826
|
-
:param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index
|
|
7827
|
-
:param int log_index_size_max_bytes: The maximum size in bytes of the offset index
|
|
7828
|
-
:param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
|
|
7829
|
-
:param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
7830
|
-
:param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
7831
|
-
:param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message
|
|
7832
|
-
:param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
|
|
7833
|
-
:param bool log_preallocate: Should pre allocate file when create new segment?
|
|
7834
|
-
:param int log_retention_bytes: The maximum size of the log before deleting messages
|
|
7835
|
-
:param int log_retention_hours: The number of hours to keep a log file before deleting it
|
|
7836
|
-
:param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
|
|
7837
|
-
:param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used
|
|
7838
|
-
:param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
|
|
7839
|
-
:param int log_segment_bytes: The maximum size of a single log file
|
|
7840
|
-
:param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem
|
|
7841
|
-
:param int max_connections_per_ip: The maximum number of connections allowed from each ip address (
|
|
7842
|
-
:param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain.
|
|
7843
|
-
:param int message_max_bytes: The maximum size of message that the server can receive.
|
|
7844
|
-
:param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
|
|
7845
|
-
:param int num_partitions: Number of partitions for
|
|
7846
|
-
:param int offsets_retention_minutes: Log retention window in minutes for offsets topic
|
|
7847
|
-
:param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory
|
|
7848
|
-
:param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
|
|
7849
|
-
:param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
|
|
7850
|
-
:param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
|
|
7851
|
-
:param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
|
|
7852
|
-
:param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
|
|
7853
|
-
:param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.
|
|
7854
|
-
:param int socket_request_max_bytes: The maximum number of bytes in a socket request (
|
|
7855
|
-
:param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
|
|
7856
|
-
:param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (
|
|
7857
|
-
:param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (
|
|
7763
|
+
:param bool auto_create_topics_enable: Enable auto creation of topics.
|
|
7764
|
+
:param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
|
|
7765
|
+
:param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
|
|
7766
|
+
:param int default_replication_factor: Replication factor for autocreated topics.
|
|
7767
|
+
:param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
|
|
7768
|
+
:param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
|
|
7769
|
+
:param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
|
|
7770
|
+
:param int log_cleaner_delete_retention_ms: How long are delete records retained? Example: `86400000`.
|
|
7771
|
+
:param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
7772
|
+
:param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
|
|
7773
|
+
:param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
7774
|
+
:param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
|
|
7775
|
+
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
|
|
7776
|
+
:param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
|
|
7777
|
+
:param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. Example: `4096`.
|
|
7778
|
+
:param int log_index_size_max_bytes: The maximum size in bytes of the offset index. Example: `10485760`.
|
|
7779
|
+
:param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
|
|
7780
|
+
:param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
7781
|
+
:param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
7782
|
+
:param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
|
|
7783
|
+
:param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
|
|
7784
|
+
:param bool log_preallocate: Should pre allocate file when create new segment?
|
|
7785
|
+
:param int log_retention_bytes: The maximum size of the log before deleting messages.
|
|
7786
|
+
:param int log_retention_hours: The number of hours to keep a log file before deleting it.
|
|
7787
|
+
:param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
|
|
7788
|
+
:param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
|
|
7789
|
+
:param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
|
|
7790
|
+
:param int log_segment_bytes: The maximum size of a single log file.
|
|
7791
|
+
:param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
|
|
7792
|
+
:param int max_connections_per_ip: The maximum number of connections allowed from each ip address (defaults to 2147483647).
|
|
7793
|
+
:param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
|
|
7794
|
+
:param int message_max_bytes: The maximum size of message that the server can receive. Example: `1048588`.
|
|
7795
|
+
:param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
|
|
7796
|
+
:param int num_partitions: Number of partitions for autocreated topics.
|
|
7797
|
+
:param int offsets_retention_minutes: Log retention window in minutes for offsets topic. Example: `10080`.
|
|
7798
|
+
:param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
|
|
7799
|
+
:param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
|
|
7800
|
+
:param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
|
|
7801
|
+
:param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
|
|
7802
|
+
:param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
|
|
7803
|
+
:param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
|
|
7804
|
+
:param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
|
|
7805
|
+
:param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
|
|
7806
|
+
:param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
|
|
7807
|
+
:param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
|
|
7808
|
+
:param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
|
|
7858
7809
|
"""
|
|
7859
7810
|
if auto_create_topics_enable is not None:
|
|
7860
7811
|
pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
|
|
@@ -7953,7 +7904,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7953
7904
|
@pulumi.getter(name="autoCreateTopicsEnable")
|
|
7954
7905
|
def auto_create_topics_enable(self) -> Optional[bool]:
|
|
7955
7906
|
"""
|
|
7956
|
-
Enable auto
|
|
7907
|
+
Enable auto creation of topics.
|
|
7957
7908
|
"""
|
|
7958
7909
|
return pulumi.get(self, "auto_create_topics_enable")
|
|
7959
7910
|
|
|
@@ -7961,7 +7912,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7961
7912
|
@pulumi.getter(name="compressionType")
|
|
7962
7913
|
def compression_type(self) -> Optional[str]:
|
|
7963
7914
|
"""
|
|
7964
|
-
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
|
|
7915
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
|
|
7965
7916
|
"""
|
|
7966
7917
|
return pulumi.get(self, "compression_type")
|
|
7967
7918
|
|
|
@@ -7969,7 +7920,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7969
7920
|
@pulumi.getter(name="connectionsMaxIdleMs")
|
|
7970
7921
|
def connections_max_idle_ms(self) -> Optional[int]:
|
|
7971
7922
|
"""
|
|
7972
|
-
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
|
|
7923
|
+
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
|
|
7973
7924
|
"""
|
|
7974
7925
|
return pulumi.get(self, "connections_max_idle_ms")
|
|
7975
7926
|
|
|
@@ -7977,7 +7928,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7977
7928
|
@pulumi.getter(name="defaultReplicationFactor")
|
|
7978
7929
|
def default_replication_factor(self) -> Optional[int]:
|
|
7979
7930
|
"""
|
|
7980
|
-
Replication factor for
|
|
7931
|
+
Replication factor for autocreated topics.
|
|
7981
7932
|
"""
|
|
7982
7933
|
return pulumi.get(self, "default_replication_factor")
|
|
7983
7934
|
|
|
@@ -7985,7 +7936,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7985
7936
|
@pulumi.getter(name="groupInitialRebalanceDelayMs")
|
|
7986
7937
|
def group_initial_rebalance_delay_ms(self) -> Optional[int]:
|
|
7987
7938
|
"""
|
|
7988
|
-
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
|
|
7939
|
+
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
|
|
7989
7940
|
"""
|
|
7990
7941
|
return pulumi.get(self, "group_initial_rebalance_delay_ms")
|
|
7991
7942
|
|
|
@@ -7993,7 +7944,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7993
7944
|
@pulumi.getter(name="groupMaxSessionTimeoutMs")
|
|
7994
7945
|
def group_max_session_timeout_ms(self) -> Optional[int]:
|
|
7995
7946
|
"""
|
|
7996
|
-
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
|
|
7947
|
+
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
|
|
7997
7948
|
"""
|
|
7998
7949
|
return pulumi.get(self, "group_max_session_timeout_ms")
|
|
7999
7950
|
|
|
@@ -8001,7 +7952,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8001
7952
|
@pulumi.getter(name="groupMinSessionTimeoutMs")
|
|
8002
7953
|
def group_min_session_timeout_ms(self) -> Optional[int]:
|
|
8003
7954
|
"""
|
|
8004
|
-
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
|
|
7955
|
+
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
|
|
8005
7956
|
"""
|
|
8006
7957
|
return pulumi.get(self, "group_min_session_timeout_ms")
|
|
8007
7958
|
|
|
@@ -8009,7 +7960,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8009
7960
|
@pulumi.getter(name="logCleanerDeleteRetentionMs")
|
|
8010
7961
|
def log_cleaner_delete_retention_ms(self) -> Optional[int]:
|
|
8011
7962
|
"""
|
|
8012
|
-
How long are delete records retained?
|
|
7963
|
+
How long are delete records retained? Example: `86400000`.
|
|
8013
7964
|
"""
|
|
8014
7965
|
return pulumi.get(self, "log_cleaner_delete_retention_ms")
|
|
8015
7966
|
|
|
@@ -8017,7 +7968,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8017
7968
|
@pulumi.getter(name="logCleanerMaxCompactionLagMs")
|
|
8018
7969
|
def log_cleaner_max_compaction_lag_ms(self) -> Optional[int]:
|
|
8019
7970
|
"""
|
|
8020
|
-
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
7971
|
+
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
8021
7972
|
"""
|
|
8022
7973
|
return pulumi.get(self, "log_cleaner_max_compaction_lag_ms")
|
|
8023
7974
|
|
|
@@ -8025,7 +7976,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8025
7976
|
@pulumi.getter(name="logCleanerMinCleanableRatio")
|
|
8026
7977
|
def log_cleaner_min_cleanable_ratio(self) -> Optional[float]:
|
|
8027
7978
|
"""
|
|
8028
|
-
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
|
|
7979
|
+
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
|
|
8029
7980
|
"""
|
|
8030
7981
|
return pulumi.get(self, "log_cleaner_min_cleanable_ratio")
|
|
8031
7982
|
|
|
@@ -8033,7 +7984,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8033
7984
|
@pulumi.getter(name="logCleanerMinCompactionLagMs")
|
|
8034
7985
|
def log_cleaner_min_compaction_lag_ms(self) -> Optional[int]:
|
|
8035
7986
|
"""
|
|
8036
|
-
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
7987
|
+
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
8037
7988
|
"""
|
|
8038
7989
|
return pulumi.get(self, "log_cleaner_min_compaction_lag_ms")
|
|
8039
7990
|
|
|
@@ -8041,7 +7992,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8041
7992
|
@pulumi.getter(name="logCleanupPolicy")
|
|
8042
7993
|
def log_cleanup_policy(self) -> Optional[str]:
|
|
8043
7994
|
"""
|
|
8044
|
-
Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window
|
|
7995
|
+
Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
|
|
8045
7996
|
"""
|
|
8046
7997
|
return pulumi.get(self, "log_cleanup_policy")
|
|
8047
7998
|
|
|
@@ -8049,7 +8000,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8049
8000
|
@pulumi.getter(name="logFlushIntervalMessages")
|
|
8050
8001
|
def log_flush_interval_messages(self) -> Optional[int]:
|
|
8051
8002
|
"""
|
|
8052
|
-
The number of messages accumulated on a log partition before messages are flushed to disk
|
|
8003
|
+
The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
|
|
8053
8004
|
"""
|
|
8054
8005
|
return pulumi.get(self, "log_flush_interval_messages")
|
|
8055
8006
|
|
|
@@ -8057,7 +8008,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8057
8008
|
@pulumi.getter(name="logFlushIntervalMs")
|
|
8058
8009
|
def log_flush_interval_ms(self) -> Optional[int]:
|
|
8059
8010
|
"""
|
|
8060
|
-
The maximum time in ms that a message in any topic is kept in memory
|
|
8011
|
+
The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
|
|
8061
8012
|
"""
|
|
8062
8013
|
return pulumi.get(self, "log_flush_interval_ms")
|
|
8063
8014
|
|
|
@@ -8065,7 +8016,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8065
8016
|
@pulumi.getter(name="logIndexIntervalBytes")
|
|
8066
8017
|
def log_index_interval_bytes(self) -> Optional[int]:
|
|
8067
8018
|
"""
|
|
8068
|
-
The interval with which Kafka adds an entry to the offset index
|
|
8019
|
+
The interval with which Kafka adds an entry to the offset index. Example: `4096`.
|
|
8069
8020
|
"""
|
|
8070
8021
|
return pulumi.get(self, "log_index_interval_bytes")
|
|
8071
8022
|
|
|
@@ -8073,7 +8024,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8073
8024
|
@pulumi.getter(name="logIndexSizeMaxBytes")
|
|
8074
8025
|
def log_index_size_max_bytes(self) -> Optional[int]:
|
|
8075
8026
|
"""
|
|
8076
|
-
The maximum size in bytes of the offset index
|
|
8027
|
+
The maximum size in bytes of the offset index. Example: `10485760`.
|
|
8077
8028
|
"""
|
|
8078
8029
|
return pulumi.get(self, "log_index_size_max_bytes")
|
|
8079
8030
|
|
|
@@ -8081,7 +8032,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8081
8032
|
@pulumi.getter(name="logLocalRetentionBytes")
|
|
8082
8033
|
def log_local_retention_bytes(self) -> Optional[int]:
|
|
8083
8034
|
"""
|
|
8084
|
-
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
|
|
8035
|
+
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
|
|
8085
8036
|
"""
|
|
8086
8037
|
return pulumi.get(self, "log_local_retention_bytes")
|
|
8087
8038
|
|
|
@@ -8089,7 +8040,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8089
8040
|
@pulumi.getter(name="logLocalRetentionMs")
|
|
8090
8041
|
def log_local_retention_ms(self) -> Optional[int]:
|
|
8091
8042
|
"""
|
|
8092
|
-
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
8043
|
+
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
8093
8044
|
"""
|
|
8094
8045
|
return pulumi.get(self, "log_local_retention_ms")
|
|
8095
8046
|
|
|
@@ -8097,7 +8048,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8097
8048
|
@pulumi.getter(name="logMessageDownconversionEnable")
|
|
8098
8049
|
def log_message_downconversion_enable(self) -> Optional[bool]:
|
|
8099
8050
|
"""
|
|
8100
|
-
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
8051
|
+
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
8101
8052
|
"""
|
|
8102
8053
|
return pulumi.get(self, "log_message_downconversion_enable")
|
|
8103
8054
|
|
|
@@ -8105,7 +8056,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8105
8056
|
@pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
|
|
8106
8057
|
def log_message_timestamp_difference_max_ms(self) -> Optional[int]:
|
|
8107
8058
|
"""
|
|
8108
|
-
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message
|
|
8059
|
+
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
|
|
8109
8060
|
"""
|
|
8110
8061
|
return pulumi.get(self, "log_message_timestamp_difference_max_ms")
|
|
8111
8062
|
|
|
@@ -8113,7 +8064,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8113
8064
|
@pulumi.getter(name="logMessageTimestampType")
|
|
8114
8065
|
def log_message_timestamp_type(self) -> Optional[str]:
|
|
8115
8066
|
"""
|
|
8116
|
-
Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
|
|
8067
|
+
Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
|
|
8117
8068
|
"""
|
|
8118
8069
|
return pulumi.get(self, "log_message_timestamp_type")
|
|
8119
8070
|
|
|
@@ -8121,7 +8072,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8121
8072
|
@pulumi.getter(name="logPreallocate")
|
|
8122
8073
|
def log_preallocate(self) -> Optional[bool]:
|
|
8123
8074
|
"""
|
|
8124
|
-
Should pre allocate file when create new segment?
|
|
8075
|
+
Should pre allocate file when create new segment?
|
|
8125
8076
|
"""
|
|
8126
8077
|
return pulumi.get(self, "log_preallocate")
|
|
8127
8078
|
|
|
@@ -8129,7 +8080,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8129
8080
|
@pulumi.getter(name="logRetentionBytes")
|
|
8130
8081
|
def log_retention_bytes(self) -> Optional[int]:
|
|
8131
8082
|
"""
|
|
8132
|
-
The maximum size of the log before deleting messages
|
|
8083
|
+
The maximum size of the log before deleting messages.
|
|
8133
8084
|
"""
|
|
8134
8085
|
return pulumi.get(self, "log_retention_bytes")
|
|
8135
8086
|
|
|
@@ -8137,7 +8088,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8137
8088
|
@pulumi.getter(name="logRetentionHours")
|
|
8138
8089
|
def log_retention_hours(self) -> Optional[int]:
|
|
8139
8090
|
"""
|
|
8140
|
-
The number of hours to keep a log file before deleting it
|
|
8091
|
+
The number of hours to keep a log file before deleting it.
|
|
8141
8092
|
"""
|
|
8142
8093
|
return pulumi.get(self, "log_retention_hours")
|
|
8143
8094
|
|
|
@@ -8145,7 +8096,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8145
8096
|
@pulumi.getter(name="logRetentionMs")
|
|
8146
8097
|
def log_retention_ms(self) -> Optional[int]:
|
|
8147
8098
|
"""
|
|
8148
|
-
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
|
|
8099
|
+
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
|
|
8149
8100
|
"""
|
|
8150
8101
|
return pulumi.get(self, "log_retention_ms")
|
|
8151
8102
|
|
|
@@ -8153,7 +8104,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8153
8104
|
@pulumi.getter(name="logRollJitterMs")
|
|
8154
8105
|
def log_roll_jitter_ms(self) -> Optional[int]:
|
|
8155
8106
|
"""
|
|
8156
|
-
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used
|
|
8107
|
+
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
|
|
8157
8108
|
"""
|
|
8158
8109
|
return pulumi.get(self, "log_roll_jitter_ms")
|
|
8159
8110
|
|
|
@@ -8161,7 +8112,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8161
8112
|
@pulumi.getter(name="logRollMs")
|
|
8162
8113
|
def log_roll_ms(self) -> Optional[int]:
|
|
8163
8114
|
"""
|
|
8164
|
-
The maximum time before a new log segment is rolled out (in milliseconds).
|
|
8115
|
+
The maximum time before a new log segment is rolled out (in milliseconds).
|
|
8165
8116
|
"""
|
|
8166
8117
|
return pulumi.get(self, "log_roll_ms")
|
|
8167
8118
|
|
|
@@ -8169,7 +8120,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8169
8120
|
@pulumi.getter(name="logSegmentBytes")
|
|
8170
8121
|
def log_segment_bytes(self) -> Optional[int]:
|
|
8171
8122
|
"""
|
|
8172
|
-
The maximum size of a single log file
|
|
8123
|
+
The maximum size of a single log file.
|
|
8173
8124
|
"""
|
|
8174
8125
|
return pulumi.get(self, "log_segment_bytes")
|
|
8175
8126
|
|
|
@@ -8177,7 +8128,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8177
8128
|
@pulumi.getter(name="logSegmentDeleteDelayMs")
|
|
8178
8129
|
def log_segment_delete_delay_ms(self) -> Optional[int]:
|
|
8179
8130
|
"""
|
|
8180
|
-
The amount of time to wait before deleting a file from the filesystem
|
|
8131
|
+
The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
|
|
8181
8132
|
"""
|
|
8182
8133
|
return pulumi.get(self, "log_segment_delete_delay_ms")
|
|
8183
8134
|
|
|
@@ -8185,7 +8136,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8185
8136
|
@pulumi.getter(name="maxConnectionsPerIp")
|
|
8186
8137
|
def max_connections_per_ip(self) -> Optional[int]:
|
|
8187
8138
|
"""
|
|
8188
|
-
The maximum number of connections allowed from each ip address (
|
|
8139
|
+
The maximum number of connections allowed from each ip address (defaults to 2147483647).
|
|
8189
8140
|
"""
|
|
8190
8141
|
return pulumi.get(self, "max_connections_per_ip")
|
|
8191
8142
|
|
|
@@ -8193,7 +8144,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8193
8144
|
@pulumi.getter(name="maxIncrementalFetchSessionCacheSlots")
|
|
8194
8145
|
def max_incremental_fetch_session_cache_slots(self) -> Optional[int]:
|
|
8195
8146
|
"""
|
|
8196
|
-
The maximum number of incremental fetch sessions that the broker will maintain.
|
|
8147
|
+
The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
|
|
8197
8148
|
"""
|
|
8198
8149
|
return pulumi.get(self, "max_incremental_fetch_session_cache_slots")
|
|
8199
8150
|
|
|
@@ -8201,7 +8152,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8201
8152
|
@pulumi.getter(name="messageMaxBytes")
|
|
8202
8153
|
def message_max_bytes(self) -> Optional[int]:
|
|
8203
8154
|
"""
|
|
8204
|
-
The maximum size of message that the server can receive.
|
|
8155
|
+
The maximum size of message that the server can receive. Example: `1048588`.
|
|
8205
8156
|
"""
|
|
8206
8157
|
return pulumi.get(self, "message_max_bytes")
|
|
8207
8158
|
|
|
@@ -8209,7 +8160,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8209
8160
|
@pulumi.getter(name="minInsyncReplicas")
|
|
8210
8161
|
def min_insync_replicas(self) -> Optional[int]:
|
|
8211
8162
|
"""
|
|
8212
|
-
When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
|
|
8163
|
+
When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
|
|
8213
8164
|
"""
|
|
8214
8165
|
return pulumi.get(self, "min_insync_replicas")
|
|
8215
8166
|
|
|
@@ -8217,7 +8168,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8217
8168
|
@pulumi.getter(name="numPartitions")
|
|
8218
8169
|
def num_partitions(self) -> Optional[int]:
|
|
8219
8170
|
"""
|
|
8220
|
-
Number of partitions for
|
|
8171
|
+
Number of partitions for autocreated topics.
|
|
8221
8172
|
"""
|
|
8222
8173
|
return pulumi.get(self, "num_partitions")
|
|
8223
8174
|
|
|
@@ -8225,7 +8176,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8225
8176
|
@pulumi.getter(name="offsetsRetentionMinutes")
|
|
8226
8177
|
def offsets_retention_minutes(self) -> Optional[int]:
|
|
8227
8178
|
"""
|
|
8228
|
-
Log retention window in minutes for offsets topic
|
|
8179
|
+
Log retention window in minutes for offsets topic. Example: `10080`.
|
|
8229
8180
|
"""
|
|
8230
8181
|
return pulumi.get(self, "offsets_retention_minutes")
|
|
8231
8182
|
|
|
@@ -8233,7 +8184,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8233
8184
|
@pulumi.getter(name="producerPurgatoryPurgeIntervalRequests")
|
|
8234
8185
|
def producer_purgatory_purge_interval_requests(self) -> Optional[int]:
|
|
8235
8186
|
"""
|
|
8236
|
-
The purge interval (in number of requests) of the producer request purgatory
|
|
8187
|
+
The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
|
|
8237
8188
|
"""
|
|
8238
8189
|
return pulumi.get(self, "producer_purgatory_purge_interval_requests")
|
|
8239
8190
|
|
|
@@ -8241,7 +8192,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8241
8192
|
@pulumi.getter(name="replicaFetchMaxBytes")
|
|
8242
8193
|
def replica_fetch_max_bytes(self) -> Optional[int]:
|
|
8243
8194
|
"""
|
|
8244
|
-
The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
|
|
8195
|
+
The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
|
|
8245
8196
|
"""
|
|
8246
8197
|
return pulumi.get(self, "replica_fetch_max_bytes")
|
|
8247
8198
|
|
|
@@ -8249,7 +8200,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8249
8200
|
@pulumi.getter(name="replicaFetchResponseMaxBytes")
|
|
8250
8201
|
def replica_fetch_response_max_bytes(self) -> Optional[int]:
|
|
8251
8202
|
"""
|
|
8252
|
-
Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
|
|
8203
|
+
Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
|
|
8253
8204
|
"""
|
|
8254
8205
|
return pulumi.get(self, "replica_fetch_response_max_bytes")
|
|
8255
8206
|
|
|
@@ -8257,7 +8208,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8257
8208
|
@pulumi.getter(name="saslOauthbearerExpectedAudience")
|
|
8258
8209
|
def sasl_oauthbearer_expected_audience(self) -> Optional[str]:
|
|
8259
8210
|
"""
|
|
8260
|
-
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
|
|
8211
|
+
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
|
|
8261
8212
|
"""
|
|
8262
8213
|
return pulumi.get(self, "sasl_oauthbearer_expected_audience")
|
|
8263
8214
|
|
|
@@ -8265,7 +8216,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8265
8216
|
@pulumi.getter(name="saslOauthbearerExpectedIssuer")
|
|
8266
8217
|
def sasl_oauthbearer_expected_issuer(self) -> Optional[str]:
|
|
8267
8218
|
"""
|
|
8268
|
-
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
|
|
8219
|
+
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
|
|
8269
8220
|
"""
|
|
8270
8221
|
return pulumi.get(self, "sasl_oauthbearer_expected_issuer")
|
|
8271
8222
|
|
|
@@ -8273,7 +8224,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8273
8224
|
@pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
|
|
8274
8225
|
def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
|
|
8275
8226
|
"""
|
|
8276
|
-
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
|
|
8227
|
+
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
|
|
8277
8228
|
"""
|
|
8278
8229
|
return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
|
|
8279
8230
|
|
|
@@ -8281,7 +8232,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8281
8232
|
@pulumi.getter(name="saslOauthbearerSubClaimName")
|
|
8282
8233
|
def sasl_oauthbearer_sub_claim_name(self) -> Optional[str]:
|
|
8283
8234
|
"""
|
|
8284
|
-
Name of the scope from which to extract the subject claim from the JWT.
|
|
8235
|
+
Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
|
|
8285
8236
|
"""
|
|
8286
8237
|
return pulumi.get(self, "sasl_oauthbearer_sub_claim_name")
|
|
8287
8238
|
|
|
@@ -8289,7 +8240,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8289
8240
|
@pulumi.getter(name="socketRequestMaxBytes")
|
|
8290
8241
|
def socket_request_max_bytes(self) -> Optional[int]:
|
|
8291
8242
|
"""
|
|
8292
|
-
The maximum number of bytes in a socket request (
|
|
8243
|
+
The maximum number of bytes in a socket request (defaults to 104857600).
|
|
8293
8244
|
"""
|
|
8294
8245
|
return pulumi.get(self, "socket_request_max_bytes")
|
|
8295
8246
|
|
|
@@ -8297,7 +8248,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8297
8248
|
@pulumi.getter(name="transactionPartitionVerificationEnable")
|
|
8298
8249
|
def transaction_partition_verification_enable(self) -> Optional[bool]:
|
|
8299
8250
|
"""
|
|
8300
|
-
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
|
|
8251
|
+
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
|
|
8301
8252
|
"""
|
|
8302
8253
|
return pulumi.get(self, "transaction_partition_verification_enable")
|
|
8303
8254
|
|
|
@@ -8305,7 +8256,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8305
8256
|
@pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
|
|
8306
8257
|
def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[int]:
|
|
8307
8258
|
"""
|
|
8308
|
-
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (
|
|
8259
|
+
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
|
|
8309
8260
|
"""
|
|
8310
8261
|
return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
|
|
8311
8262
|
|
|
@@ -8313,7 +8264,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8313
8264
|
@pulumi.getter(name="transactionStateLogSegmentBytes")
|
|
8314
8265
|
def transaction_state_log_segment_bytes(self) -> Optional[int]:
|
|
8315
8266
|
"""
|
|
8316
|
-
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (
|
|
8267
|
+
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
|
|
8317
8268
|
"""
|
|
8318
8269
|
return pulumi.get(self, "transaction_state_log_segment_bytes")
|
|
8319
8270
|
|
|
@@ -12763,8 +12714,6 @@ class MySqlMysqlUserConfigMigration(dict):
|
|
|
12763
12714
|
suggest = None
|
|
12764
12715
|
if key == "ignoreDbs":
|
|
12765
12716
|
suggest = "ignore_dbs"
|
|
12766
|
-
elif key == "ignoreRoles":
|
|
12767
|
-
suggest = "ignore_roles"
|
|
12768
12717
|
|
|
12769
12718
|
if suggest:
|
|
12770
12719
|
pulumi.log.warn(f"Key '{key}' not found in MySqlMysqlUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
|
|
@@ -12782,7 +12731,6 @@ class MySqlMysqlUserConfigMigration(dict):
|
|
|
12782
12731
|
port: int,
|
|
12783
12732
|
dbname: Optional[str] = None,
|
|
12784
12733
|
ignore_dbs: Optional[str] = None,
|
|
12785
|
-
ignore_roles: Optional[str] = None,
|
|
12786
12734
|
method: Optional[str] = None,
|
|
12787
12735
|
password: Optional[str] = None,
|
|
12788
12736
|
ssl: Optional[bool] = None,
|
|
@@ -12792,7 +12740,6 @@ class MySqlMysqlUserConfigMigration(dict):
|
|
|
12792
12740
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
12793
12741
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
12794
12742
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
12795
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
12796
12743
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
12797
12744
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
12798
12745
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -12804,8 +12751,6 @@ class MySqlMysqlUserConfigMigration(dict):
|
|
|
12804
12751
|
pulumi.set(__self__, "dbname", dbname)
|
|
12805
12752
|
if ignore_dbs is not None:
|
|
12806
12753
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
12807
|
-
if ignore_roles is not None:
|
|
12808
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
12809
12754
|
if method is not None:
|
|
12810
12755
|
pulumi.set(__self__, "method", method)
|
|
12811
12756
|
if password is not None:
|
|
@@ -12847,14 +12792,6 @@ class MySqlMysqlUserConfigMigration(dict):
|
|
|
12847
12792
|
"""
|
|
12848
12793
|
return pulumi.get(self, "ignore_dbs")
|
|
12849
12794
|
|
|
12850
|
-
@property
|
|
12851
|
-
@pulumi.getter(name="ignoreRoles")
|
|
12852
|
-
def ignore_roles(self) -> Optional[str]:
|
|
12853
|
-
"""
|
|
12854
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
12855
|
-
"""
|
|
12856
|
-
return pulumi.get(self, "ignore_roles")
|
|
12857
|
-
|
|
12858
12795
|
@property
|
|
12859
12796
|
@pulumi.getter
|
|
12860
12797
|
def method(self) -> Optional[str]:
|
|
@@ -16875,8 +16812,6 @@ class PgPgUserConfigMigration(dict):
|
|
|
16875
16812
|
suggest = None
|
|
16876
16813
|
if key == "ignoreDbs":
|
|
16877
16814
|
suggest = "ignore_dbs"
|
|
16878
|
-
elif key == "ignoreRoles":
|
|
16879
|
-
suggest = "ignore_roles"
|
|
16880
16815
|
|
|
16881
16816
|
if suggest:
|
|
16882
16817
|
pulumi.log.warn(f"Key '{key}' not found in PgPgUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
|
|
@@ -16894,7 +16829,6 @@ class PgPgUserConfigMigration(dict):
|
|
|
16894
16829
|
port: int,
|
|
16895
16830
|
dbname: Optional[str] = None,
|
|
16896
16831
|
ignore_dbs: Optional[str] = None,
|
|
16897
|
-
ignore_roles: Optional[str] = None,
|
|
16898
16832
|
method: Optional[str] = None,
|
|
16899
16833
|
password: Optional[str] = None,
|
|
16900
16834
|
ssl: Optional[bool] = None,
|
|
@@ -16904,7 +16838,6 @@ class PgPgUserConfigMigration(dict):
|
|
|
16904
16838
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
16905
16839
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
16906
16840
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
16907
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
16908
16841
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
16909
16842
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
16910
16843
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -16916,8 +16849,6 @@ class PgPgUserConfigMigration(dict):
|
|
|
16916
16849
|
pulumi.set(__self__, "dbname", dbname)
|
|
16917
16850
|
if ignore_dbs is not None:
|
|
16918
16851
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
16919
|
-
if ignore_roles is not None:
|
|
16920
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
16921
16852
|
if method is not None:
|
|
16922
16853
|
pulumi.set(__self__, "method", method)
|
|
16923
16854
|
if password is not None:
|
|
@@ -16959,14 +16890,6 @@ class PgPgUserConfigMigration(dict):
|
|
|
16959
16890
|
"""
|
|
16960
16891
|
return pulumi.get(self, "ignore_dbs")
|
|
16961
16892
|
|
|
16962
|
-
@property
|
|
16963
|
-
@pulumi.getter(name="ignoreRoles")
|
|
16964
|
-
def ignore_roles(self) -> Optional[str]:
|
|
16965
|
-
"""
|
|
16966
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
16967
|
-
"""
|
|
16968
|
-
return pulumi.get(self, "ignore_roles")
|
|
16969
|
-
|
|
16970
16893
|
@property
|
|
16971
16894
|
@pulumi.getter
|
|
16972
16895
|
def method(self) -> Optional[str]:
|
|
@@ -18042,8 +17965,6 @@ class PgPgUserConfigPgbouncer(dict):
|
|
|
18042
17965
|
suggest = "autodb_pool_size"
|
|
18043
17966
|
elif key == "ignoreStartupParameters":
|
|
18044
17967
|
suggest = "ignore_startup_parameters"
|
|
18045
|
-
elif key == "maxPreparedStatements":
|
|
18046
|
-
suggest = "max_prepared_statements"
|
|
18047
17968
|
elif key == "minPoolSize":
|
|
18048
17969
|
suggest = "min_pool_size"
|
|
18049
17970
|
elif key == "serverIdleTimeout":
|
|
@@ -18070,7 +17991,6 @@ class PgPgUserConfigPgbouncer(dict):
|
|
|
18070
17991
|
autodb_pool_mode: Optional[str] = None,
|
|
18071
17992
|
autodb_pool_size: Optional[int] = None,
|
|
18072
17993
|
ignore_startup_parameters: Optional[Sequence[str]] = None,
|
|
18073
|
-
max_prepared_statements: Optional[int] = None,
|
|
18074
17994
|
min_pool_size: Optional[int] = None,
|
|
18075
17995
|
server_idle_timeout: Optional[int] = None,
|
|
18076
17996
|
server_lifetime: Optional[int] = None,
|
|
@@ -18081,7 +18001,6 @@ class PgPgUserConfigPgbouncer(dict):
|
|
|
18081
18001
|
:param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
|
|
18082
18002
|
:param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
|
|
18083
18003
|
:param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
|
|
18084
|
-
:param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
|
|
18085
18004
|
:param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. Default: `0`.
|
|
18086
18005
|
:param int server_idle_timeout: If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). Default: `600`.
|
|
18087
18006
|
:param int server_lifetime: The pooler will close an unused server connection that has been connected longer than this. (seconds). Default: `3600`.
|
|
@@ -18097,8 +18016,6 @@ class PgPgUserConfigPgbouncer(dict):
|
|
|
18097
18016
|
pulumi.set(__self__, "autodb_pool_size", autodb_pool_size)
|
|
18098
18017
|
if ignore_startup_parameters is not None:
|
|
18099
18018
|
pulumi.set(__self__, "ignore_startup_parameters", ignore_startup_parameters)
|
|
18100
|
-
if max_prepared_statements is not None:
|
|
18101
|
-
pulumi.set(__self__, "max_prepared_statements", max_prepared_statements)
|
|
18102
18019
|
if min_pool_size is not None:
|
|
18103
18020
|
pulumi.set(__self__, "min_pool_size", min_pool_size)
|
|
18104
18021
|
if server_idle_timeout is not None:
|
|
@@ -18148,14 +18065,6 @@ class PgPgUserConfigPgbouncer(dict):
|
|
|
18148
18065
|
"""
|
|
18149
18066
|
return pulumi.get(self, "ignore_startup_parameters")
|
|
18150
18067
|
|
|
18151
|
-
@property
|
|
18152
|
-
@pulumi.getter(name="maxPreparedStatements")
|
|
18153
|
-
def max_prepared_statements(self) -> Optional[int]:
|
|
18154
|
-
"""
|
|
18155
|
-
PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
|
|
18156
|
-
"""
|
|
18157
|
-
return pulumi.get(self, "max_prepared_statements")
|
|
18158
|
-
|
|
18159
18068
|
@property
|
|
18160
18069
|
@pulumi.getter(name="minPoolSize")
|
|
18161
18070
|
def min_pool_size(self) -> Optional[int]:
|
|
@@ -19118,8 +19027,6 @@ class RedisRedisUserConfigMigration(dict):
|
|
|
19118
19027
|
suggest = None
|
|
19119
19028
|
if key == "ignoreDbs":
|
|
19120
19029
|
suggest = "ignore_dbs"
|
|
19121
|
-
elif key == "ignoreRoles":
|
|
19122
|
-
suggest = "ignore_roles"
|
|
19123
19030
|
|
|
19124
19031
|
if suggest:
|
|
19125
19032
|
pulumi.log.warn(f"Key '{key}' not found in RedisRedisUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
|
|
@@ -19137,7 +19044,6 @@ class RedisRedisUserConfigMigration(dict):
|
|
|
19137
19044
|
port: int,
|
|
19138
19045
|
dbname: Optional[str] = None,
|
|
19139
19046
|
ignore_dbs: Optional[str] = None,
|
|
19140
|
-
ignore_roles: Optional[str] = None,
|
|
19141
19047
|
method: Optional[str] = None,
|
|
19142
19048
|
password: Optional[str] = None,
|
|
19143
19049
|
ssl: Optional[bool] = None,
|
|
@@ -19147,7 +19053,6 @@ class RedisRedisUserConfigMigration(dict):
|
|
|
19147
19053
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
19148
19054
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
19149
19055
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
19150
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
19151
19056
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
19152
19057
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
19153
19058
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -19159,8 +19064,6 @@ class RedisRedisUserConfigMigration(dict):
|
|
|
19159
19064
|
pulumi.set(__self__, "dbname", dbname)
|
|
19160
19065
|
if ignore_dbs is not None:
|
|
19161
19066
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
19162
|
-
if ignore_roles is not None:
|
|
19163
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
19164
19067
|
if method is not None:
|
|
19165
19068
|
pulumi.set(__self__, "method", method)
|
|
19166
19069
|
if password is not None:
|
|
@@ -19202,14 +19105,6 @@ class RedisRedisUserConfigMigration(dict):
|
|
|
19202
19105
|
"""
|
|
19203
19106
|
return pulumi.get(self, "ignore_dbs")
|
|
19204
19107
|
|
|
19205
|
-
@property
|
|
19206
|
-
@pulumi.getter(name="ignoreRoles")
|
|
19207
|
-
def ignore_roles(self) -> Optional[str]:
|
|
19208
|
-
"""
|
|
19209
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
19210
|
-
"""
|
|
19211
|
-
return pulumi.get(self, "ignore_roles")
|
|
19212
|
-
|
|
19213
19108
|
@property
|
|
19214
19109
|
@pulumi.getter
|
|
19215
19110
|
def method(self) -> Optional[str]:
|
|
@@ -19473,12 +19368,8 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19473
19368
|
suggest = "num_consumers"
|
|
19474
19369
|
elif key == "pollMaxBatchSize":
|
|
19475
19370
|
suggest = "poll_max_batch_size"
|
|
19476
|
-
elif key == "pollMaxTimeoutMs":
|
|
19477
|
-
suggest = "poll_max_timeout_ms"
|
|
19478
19371
|
elif key == "skipBrokenMessages":
|
|
19479
19372
|
suggest = "skip_broken_messages"
|
|
19480
|
-
elif key == "threadPerConsumer":
|
|
19481
|
-
suggest = "thread_per_consumer"
|
|
19482
19373
|
|
|
19483
19374
|
if suggest:
|
|
19484
19375
|
pulumi.log.warn(f"Key '{key}' not found in ServiceIntegrationClickhouseKafkaUserConfigTable. Access the value via the '{suggest}' property getter instead.")
|
|
@@ -19504,9 +19395,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19504
19395
|
max_rows_per_message: Optional[int] = None,
|
|
19505
19396
|
num_consumers: Optional[int] = None,
|
|
19506
19397
|
poll_max_batch_size: Optional[int] = None,
|
|
19507
|
-
|
|
19508
|
-
skip_broken_messages: Optional[int] = None,
|
|
19509
|
-
thread_per_consumer: Optional[bool] = None):
|
|
19398
|
+
skip_broken_messages: Optional[int] = None):
|
|
19510
19399
|
"""
|
|
19511
19400
|
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
|
|
19512
19401
|
:param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
|
|
@@ -19520,9 +19409,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19520
19409
|
:param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. Default: `1`.
|
|
19521
19410
|
:param int num_consumers: The number of consumers per table per replica. Default: `1`.
|
|
19522
19411
|
:param int poll_max_batch_size: Maximum amount of messages to be polled in a single Kafka poll. Default: `0`.
|
|
19523
|
-
:param int poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
|
|
19524
19412
|
:param int skip_broken_messages: Skip at least this number of broken messages from Kafka topic per block. Default: `0`.
|
|
19525
|
-
:param bool thread_per_consumer: Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
|
|
19526
19413
|
"""
|
|
19527
19414
|
pulumi.set(__self__, "columns", columns)
|
|
19528
19415
|
pulumi.set(__self__, "data_format", data_format)
|
|
@@ -19543,12 +19430,8 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19543
19430
|
pulumi.set(__self__, "num_consumers", num_consumers)
|
|
19544
19431
|
if poll_max_batch_size is not None:
|
|
19545
19432
|
pulumi.set(__self__, "poll_max_batch_size", poll_max_batch_size)
|
|
19546
|
-
if poll_max_timeout_ms is not None:
|
|
19547
|
-
pulumi.set(__self__, "poll_max_timeout_ms", poll_max_timeout_ms)
|
|
19548
19433
|
if skip_broken_messages is not None:
|
|
19549
19434
|
pulumi.set(__self__, "skip_broken_messages", skip_broken_messages)
|
|
19550
|
-
if thread_per_consumer is not None:
|
|
19551
|
-
pulumi.set(__self__, "thread_per_consumer", thread_per_consumer)
|
|
19552
19435
|
|
|
19553
19436
|
@property
|
|
19554
19437
|
@pulumi.getter
|
|
@@ -19646,14 +19529,6 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19646
19529
|
"""
|
|
19647
19530
|
return pulumi.get(self, "poll_max_batch_size")
|
|
19648
19531
|
|
|
19649
|
-
@property
|
|
19650
|
-
@pulumi.getter(name="pollMaxTimeoutMs")
|
|
19651
|
-
def poll_max_timeout_ms(self) -> Optional[int]:
|
|
19652
|
-
"""
|
|
19653
|
-
Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
|
|
19654
|
-
"""
|
|
19655
|
-
return pulumi.get(self, "poll_max_timeout_ms")
|
|
19656
|
-
|
|
19657
19532
|
@property
|
|
19658
19533
|
@pulumi.getter(name="skipBrokenMessages")
|
|
19659
19534
|
def skip_broken_messages(self) -> Optional[int]:
|
|
@@ -19662,14 +19537,6 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19662
19537
|
"""
|
|
19663
19538
|
return pulumi.get(self, "skip_broken_messages")
|
|
19664
19539
|
|
|
19665
|
-
@property
|
|
19666
|
-
@pulumi.getter(name="threadPerConsumer")
|
|
19667
|
-
def thread_per_consumer(self) -> Optional[bool]:
|
|
19668
|
-
"""
|
|
19669
|
-
Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
|
|
19670
|
-
"""
|
|
19671
|
-
return pulumi.get(self, "thread_per_consumer")
|
|
19672
|
-
|
|
19673
19540
|
|
|
19674
19541
|
@pulumi.output_type
|
|
19675
19542
|
class ServiceIntegrationClickhouseKafkaUserConfigTableColumn(dict):
|
|
@@ -23994,8 +23861,6 @@ class ValkeyValkeyUserConfigMigration(dict):
|
|
|
23994
23861
|
suggest = None
|
|
23995
23862
|
if key == "ignoreDbs":
|
|
23996
23863
|
suggest = "ignore_dbs"
|
|
23997
|
-
elif key == "ignoreRoles":
|
|
23998
|
-
suggest = "ignore_roles"
|
|
23999
23864
|
|
|
24000
23865
|
if suggest:
|
|
24001
23866
|
pulumi.log.warn(f"Key '{key}' not found in ValkeyValkeyUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
|
|
@@ -24013,7 +23878,6 @@ class ValkeyValkeyUserConfigMigration(dict):
|
|
|
24013
23878
|
port: int,
|
|
24014
23879
|
dbname: Optional[str] = None,
|
|
24015
23880
|
ignore_dbs: Optional[str] = None,
|
|
24016
|
-
ignore_roles: Optional[str] = None,
|
|
24017
23881
|
method: Optional[str] = None,
|
|
24018
23882
|
password: Optional[str] = None,
|
|
24019
23883
|
ssl: Optional[bool] = None,
|
|
@@ -24023,7 +23887,6 @@ class ValkeyValkeyUserConfigMigration(dict):
|
|
|
24023
23887
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
24024
23888
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
24025
23889
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
24026
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
24027
23890
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
24028
23891
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
24029
23892
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -24035,8 +23898,6 @@ class ValkeyValkeyUserConfigMigration(dict):
|
|
|
24035
23898
|
pulumi.set(__self__, "dbname", dbname)
|
|
24036
23899
|
if ignore_dbs is not None:
|
|
24037
23900
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
24038
|
-
if ignore_roles is not None:
|
|
24039
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
24040
23901
|
if method is not None:
|
|
24041
23902
|
pulumi.set(__self__, "method", method)
|
|
24042
23903
|
if password is not None:
|
|
@@ -24078,14 +23939,6 @@ class ValkeyValkeyUserConfigMigration(dict):
|
|
|
24078
23939
|
"""
|
|
24079
23940
|
return pulumi.get(self, "ignore_dbs")
|
|
24080
23941
|
|
|
24081
|
-
@property
|
|
24082
|
-
@pulumi.getter(name="ignoreRoles")
|
|
24083
|
-
def ignore_roles(self) -> Optional[str]:
|
|
24084
|
-
"""
|
|
24085
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
24086
|
-
"""
|
|
24087
|
-
return pulumi.get(self, "ignore_roles")
|
|
24088
|
-
|
|
24089
23942
|
@property
|
|
24090
23943
|
@pulumi.getter
|
|
24091
23944
|
def method(self) -> Optional[str]:
|
|
@@ -26225,7 +26078,6 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
|
|
|
26225
26078
|
port: int,
|
|
26226
26079
|
dbname: Optional[str] = None,
|
|
26227
26080
|
ignore_dbs: Optional[str] = None,
|
|
26228
|
-
ignore_roles: Optional[str] = None,
|
|
26229
26081
|
method: Optional[str] = None,
|
|
26230
26082
|
password: Optional[str] = None,
|
|
26231
26083
|
ssl: Optional[bool] = None,
|
|
@@ -26235,7 +26087,6 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
|
|
|
26235
26087
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
26236
26088
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
26237
26089
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
26238
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
26239
26090
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
26240
26091
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
26241
26092
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -26247,8 +26098,6 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
|
|
|
26247
26098
|
pulumi.set(__self__, "dbname", dbname)
|
|
26248
26099
|
if ignore_dbs is not None:
|
|
26249
26100
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
26250
|
-
if ignore_roles is not None:
|
|
26251
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
26252
26101
|
if method is not None:
|
|
26253
26102
|
pulumi.set(__self__, "method", method)
|
|
26254
26103
|
if password is not None:
|
|
@@ -26290,14 +26139,6 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
|
|
|
26290
26139
|
"""
|
|
26291
26140
|
return pulumi.get(self, "ignore_dbs")
|
|
26292
26141
|
|
|
26293
|
-
@property
|
|
26294
|
-
@pulumi.getter(name="ignoreRoles")
|
|
26295
|
-
def ignore_roles(self) -> Optional[str]:
|
|
26296
|
-
"""
|
|
26297
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
26298
|
-
"""
|
|
26299
|
-
return pulumi.get(self, "ignore_roles")
|
|
26300
|
-
|
|
26301
26142
|
@property
|
|
26302
26143
|
@pulumi.getter
|
|
26303
26144
|
def method(self) -> Optional[str]:
|
|
@@ -29989,7 +29830,6 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
29989
29830
|
additional_backup_regions: Optional[str] = None,
|
|
29990
29831
|
aiven_kafka_topic_messages: Optional[bool] = None,
|
|
29991
29832
|
custom_domain: Optional[str] = None,
|
|
29992
|
-
follower_fetching: Optional['outputs.GetKafkaKafkaUserConfigFollowerFetchingResult'] = None,
|
|
29993
29833
|
ip_filter_objects: Optional[Sequence['outputs.GetKafkaKafkaUserConfigIpFilterObjectResult']] = None,
|
|
29994
29834
|
ip_filter_strings: Optional[Sequence[str]] = None,
|
|
29995
29835
|
ip_filters: Optional[Sequence[str]] = None,
|
|
@@ -30015,7 +29855,6 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
30015
29855
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
30016
29856
|
:param bool aiven_kafka_topic_messages: Allow access to read Kafka topic messages in the Aiven Console and REST API.
|
|
30017
29857
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
30018
|
-
:param 'GetKafkaKafkaUserConfigFollowerFetchingArgs' follower_fetching: Enable follower fetching
|
|
30019
29858
|
:param Sequence['GetKafkaKafkaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
|
|
30020
29859
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
30021
29860
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
@@ -30043,8 +29882,6 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
30043
29882
|
pulumi.set(__self__, "aiven_kafka_topic_messages", aiven_kafka_topic_messages)
|
|
30044
29883
|
if custom_domain is not None:
|
|
30045
29884
|
pulumi.set(__self__, "custom_domain", custom_domain)
|
|
30046
|
-
if follower_fetching is not None:
|
|
30047
|
-
pulumi.set(__self__, "follower_fetching", follower_fetching)
|
|
30048
29885
|
if ip_filter_objects is not None:
|
|
30049
29886
|
pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
|
|
30050
29887
|
if ip_filter_strings is not None:
|
|
@@ -30113,14 +29950,6 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
30113
29950
|
"""
|
|
30114
29951
|
return pulumi.get(self, "custom_domain")
|
|
30115
29952
|
|
|
30116
|
-
@property
|
|
30117
|
-
@pulumi.getter(name="followerFetching")
|
|
30118
|
-
def follower_fetching(self) -> Optional['outputs.GetKafkaKafkaUserConfigFollowerFetchingResult']:
|
|
30119
|
-
"""
|
|
30120
|
-
Enable follower fetching
|
|
30121
|
-
"""
|
|
30122
|
-
return pulumi.get(self, "follower_fetching")
|
|
30123
|
-
|
|
30124
29953
|
@property
|
|
30125
29954
|
@pulumi.getter(name="ipFilterObjects")
|
|
30126
29955
|
def ip_filter_objects(self) -> Optional[Sequence['outputs.GetKafkaKafkaUserConfigIpFilterObjectResult']]:
|
|
@@ -30288,25 +30117,6 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
30288
30117
|
return pulumi.get(self, "tiered_storage")
|
|
30289
30118
|
|
|
30290
30119
|
|
|
30291
|
-
@pulumi.output_type
|
|
30292
|
-
class GetKafkaKafkaUserConfigFollowerFetchingResult(dict):
|
|
30293
|
-
def __init__(__self__, *,
|
|
30294
|
-
enabled: Optional[bool] = None):
|
|
30295
|
-
"""
|
|
30296
|
-
:param bool enabled: Whether to enable the follower fetching functionality.
|
|
30297
|
-
"""
|
|
30298
|
-
if enabled is not None:
|
|
30299
|
-
pulumi.set(__self__, "enabled", enabled)
|
|
30300
|
-
|
|
30301
|
-
@property
|
|
30302
|
-
@pulumi.getter
|
|
30303
|
-
def enabled(self) -> Optional[bool]:
|
|
30304
|
-
"""
|
|
30305
|
-
Whether to enable the follower fetching functionality.
|
|
30306
|
-
"""
|
|
30307
|
-
return pulumi.get(self, "enabled")
|
|
30308
|
-
|
|
30309
|
-
|
|
30310
30120
|
@pulumi.output_type
|
|
30311
30121
|
class GetKafkaKafkaUserConfigIpFilterObjectResult(dict):
|
|
30312
30122
|
def __init__(__self__, *,
|
|
@@ -30387,52 +30197,52 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30387
30197
|
transaction_remove_expired_transaction_cleanup_interval_ms: Optional[int] = None,
|
|
30388
30198
|
transaction_state_log_segment_bytes: Optional[int] = None):
|
|
30389
30199
|
"""
|
|
30390
|
-
:param bool auto_create_topics_enable: Enable auto
|
|
30391
|
-
:param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
|
|
30392
|
-
:param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
|
|
30393
|
-
:param int default_replication_factor: Replication factor for
|
|
30394
|
-
:param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
|
|
30395
|
-
:param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
|
|
30396
|
-
:param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
|
|
30397
|
-
:param int log_cleaner_delete_retention_ms: How long are delete records retained?
|
|
30398
|
-
:param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
30399
|
-
:param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
|
|
30400
|
-
:param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
30401
|
-
:param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window
|
|
30402
|
-
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk
|
|
30403
|
-
:param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory
|
|
30404
|
-
:param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index
|
|
30405
|
-
:param int log_index_size_max_bytes: The maximum size in bytes of the offset index
|
|
30406
|
-
:param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
|
|
30407
|
-
:param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
30408
|
-
:param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
30409
|
-
:param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message
|
|
30410
|
-
:param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
|
|
30411
|
-
:param bool log_preallocate: Should pre allocate file when create new segment?
|
|
30412
|
-
:param int log_retention_bytes: The maximum size of the log before deleting messages
|
|
30413
|
-
:param int log_retention_hours: The number of hours to keep a log file before deleting it
|
|
30414
|
-
:param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
|
|
30415
|
-
:param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used
|
|
30416
|
-
:param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
|
|
30417
|
-
:param int log_segment_bytes: The maximum size of a single log file
|
|
30418
|
-
:param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem
|
|
30419
|
-
:param int max_connections_per_ip: The maximum number of connections allowed from each ip address (
|
|
30420
|
-
:param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain.
|
|
30421
|
-
:param int message_max_bytes: The maximum size of message that the server can receive.
|
|
30422
|
-
:param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
|
|
30423
|
-
:param int num_partitions: Number of partitions for
|
|
30424
|
-
:param int offsets_retention_minutes: Log retention window in minutes for offsets topic
|
|
30425
|
-
:param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory
|
|
30426
|
-
:param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
|
|
30427
|
-
:param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
|
|
30428
|
-
:param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
|
|
30429
|
-
:param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
|
|
30430
|
-
:param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
|
|
30431
|
-
:param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.
|
|
30432
|
-
:param int socket_request_max_bytes: The maximum number of bytes in a socket request (
|
|
30433
|
-
:param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
|
|
30434
|
-
:param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (
|
|
30435
|
-
:param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (
|
|
30200
|
+
:param bool auto_create_topics_enable: Enable auto creation of topics.
|
|
30201
|
+
:param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
|
|
30202
|
+
:param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
|
|
30203
|
+
:param int default_replication_factor: Replication factor for autocreated topics.
|
|
30204
|
+
:param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
|
|
30205
|
+
:param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
|
|
30206
|
+
:param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
|
|
30207
|
+
:param int log_cleaner_delete_retention_ms: How long are delete records retained? Example: `86400000`.
|
|
30208
|
+
:param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
30209
|
+
:param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
|
|
30210
|
+
:param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
30211
|
+
:param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
|
|
30212
|
+
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
|
|
30213
|
+
:param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
|
|
30214
|
+
:param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. Example: `4096`.
|
|
30215
|
+
:param int log_index_size_max_bytes: The maximum size in bytes of the offset index. Example: `10485760`.
|
|
30216
|
+
:param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
|
|
30217
|
+
:param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
30218
|
+
:param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
30219
|
+
:param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
|
|
30220
|
+
:param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
|
|
30221
|
+
:param bool log_preallocate: Should pre allocate file when create new segment?
|
|
30222
|
+
:param int log_retention_bytes: The maximum size of the log before deleting messages.
|
|
30223
|
+
:param int log_retention_hours: The number of hours to keep a log file before deleting it.
|
|
30224
|
+
:param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
|
|
30225
|
+
:param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
|
|
30226
|
+
:param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
|
|
30227
|
+
:param int log_segment_bytes: The maximum size of a single log file.
|
|
30228
|
+
:param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
|
|
30229
|
+
:param int max_connections_per_ip: The maximum number of connections allowed from each ip address (defaults to 2147483647).
|
|
30230
|
+
:param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
|
|
30231
|
+
:param int message_max_bytes: The maximum size of message that the server can receive. Example: `1048588`.
|
|
30232
|
+
:param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
|
|
30233
|
+
:param int num_partitions: Number of partitions for autocreated topics.
|
|
30234
|
+
:param int offsets_retention_minutes: Log retention window in minutes for offsets topic. Example: `10080`.
|
|
30235
|
+
:param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
|
|
30236
|
+
:param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
|
|
30237
|
+
:param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
|
|
30238
|
+
:param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
|
|
30239
|
+
:param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
|
|
30240
|
+
:param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
|
|
30241
|
+
:param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
|
|
30242
|
+
:param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
|
|
30243
|
+
:param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
|
|
30244
|
+
:param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
|
|
30245
|
+
:param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
|
|
30436
30246
|
"""
|
|
30437
30247
|
if auto_create_topics_enable is not None:
|
|
30438
30248
|
pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
|
|
@@ -30531,7 +30341,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30531
30341
|
@pulumi.getter(name="autoCreateTopicsEnable")
|
|
30532
30342
|
def auto_create_topics_enable(self) -> Optional[bool]:
|
|
30533
30343
|
"""
|
|
30534
|
-
Enable auto
|
|
30344
|
+
Enable auto creation of topics.
|
|
30535
30345
|
"""
|
|
30536
30346
|
return pulumi.get(self, "auto_create_topics_enable")
|
|
30537
30347
|
|
|
@@ -30539,7 +30349,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30539
30349
|
@pulumi.getter(name="compressionType")
|
|
30540
30350
|
def compression_type(self) -> Optional[str]:
|
|
30541
30351
|
"""
|
|
30542
|
-
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
|
|
30352
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
|
|
30543
30353
|
"""
|
|
30544
30354
|
return pulumi.get(self, "compression_type")
|
|
30545
30355
|
|
|
@@ -30547,7 +30357,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30547
30357
|
@pulumi.getter(name="connectionsMaxIdleMs")
|
|
30548
30358
|
def connections_max_idle_ms(self) -> Optional[int]:
|
|
30549
30359
|
"""
|
|
30550
|
-
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
|
|
30360
|
+
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
|
|
30551
30361
|
"""
|
|
30552
30362
|
return pulumi.get(self, "connections_max_idle_ms")
|
|
30553
30363
|
|
|
@@ -30555,7 +30365,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30555
30365
|
@pulumi.getter(name="defaultReplicationFactor")
|
|
30556
30366
|
def default_replication_factor(self) -> Optional[int]:
|
|
30557
30367
|
"""
|
|
30558
|
-
Replication factor for
|
|
30368
|
+
Replication factor for autocreated topics.
|
|
30559
30369
|
"""
|
|
30560
30370
|
return pulumi.get(self, "default_replication_factor")
|
|
30561
30371
|
|
|
@@ -30563,7 +30373,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30563
30373
|
@pulumi.getter(name="groupInitialRebalanceDelayMs")
|
|
30564
30374
|
def group_initial_rebalance_delay_ms(self) -> Optional[int]:
|
|
30565
30375
|
"""
|
|
30566
|
-
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
|
|
30376
|
+
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
|
|
30567
30377
|
"""
|
|
30568
30378
|
return pulumi.get(self, "group_initial_rebalance_delay_ms")
|
|
30569
30379
|
|
|
@@ -30571,7 +30381,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30571
30381
|
@pulumi.getter(name="groupMaxSessionTimeoutMs")
|
|
30572
30382
|
def group_max_session_timeout_ms(self) -> Optional[int]:
|
|
30573
30383
|
"""
|
|
30574
|
-
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
|
|
30384
|
+
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
|
|
30575
30385
|
"""
|
|
30576
30386
|
return pulumi.get(self, "group_max_session_timeout_ms")
|
|
30577
30387
|
|
|
@@ -30579,7 +30389,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30579
30389
|
@pulumi.getter(name="groupMinSessionTimeoutMs")
|
|
30580
30390
|
def group_min_session_timeout_ms(self) -> Optional[int]:
|
|
30581
30391
|
"""
|
|
30582
|
-
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.
|
|
30392
|
+
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
|
|
30583
30393
|
"""
|
|
30584
30394
|
return pulumi.get(self, "group_min_session_timeout_ms")
|
|
30585
30395
|
|
|
@@ -30587,7 +30397,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30587
30397
|
@pulumi.getter(name="logCleanerDeleteRetentionMs")
|
|
30588
30398
|
def log_cleaner_delete_retention_ms(self) -> Optional[int]:
|
|
30589
30399
|
"""
|
|
30590
|
-
How long are delete records retained?
|
|
30400
|
+
How long are delete records retained? Example: `86400000`.
|
|
30591
30401
|
"""
|
|
30592
30402
|
return pulumi.get(self, "log_cleaner_delete_retention_ms")
|
|
30593
30403
|
|
|
@@ -30595,7 +30405,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30595
30405
|
@pulumi.getter(name="logCleanerMaxCompactionLagMs")
|
|
30596
30406
|
def log_cleaner_max_compaction_lag_ms(self) -> Optional[int]:
|
|
30597
30407
|
"""
|
|
30598
|
-
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
30408
|
+
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
30599
30409
|
"""
|
|
30600
30410
|
return pulumi.get(self, "log_cleaner_max_compaction_lag_ms")
|
|
30601
30411
|
|
|
@@ -30603,7 +30413,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30603
30413
|
@pulumi.getter(name="logCleanerMinCleanableRatio")
|
|
30604
30414
|
def log_cleaner_min_cleanable_ratio(self) -> Optional[float]:
|
|
30605
30415
|
"""
|
|
30606
|
-
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
|
|
30416
|
+
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
|
|
30607
30417
|
"""
|
|
30608
30418
|
return pulumi.get(self, "log_cleaner_min_cleanable_ratio")
|
|
30609
30419
|
|
|
@@ -30611,7 +30421,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30611
30421
|
@pulumi.getter(name="logCleanerMinCompactionLagMs")
|
|
30612
30422
|
def log_cleaner_min_compaction_lag_ms(self) -> Optional[int]:
|
|
30613
30423
|
"""
|
|
30614
|
-
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
30424
|
+
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
30615
30425
|
"""
|
|
30616
30426
|
return pulumi.get(self, "log_cleaner_min_compaction_lag_ms")
|
|
30617
30427
|
|
|
@@ -30619,7 +30429,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30619
30429
|
@pulumi.getter(name="logCleanupPolicy")
|
|
30620
30430
|
def log_cleanup_policy(self) -> Optional[str]:
|
|
30621
30431
|
"""
|
|
30622
|
-
Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window
|
|
30432
|
+
Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
|
|
30623
30433
|
"""
|
|
30624
30434
|
return pulumi.get(self, "log_cleanup_policy")
|
|
30625
30435
|
|
|
@@ -30627,7 +30437,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30627
30437
|
@pulumi.getter(name="logFlushIntervalMessages")
|
|
30628
30438
|
def log_flush_interval_messages(self) -> Optional[int]:
|
|
30629
30439
|
"""
|
|
30630
|
-
The number of messages accumulated on a log partition before messages are flushed to disk
|
|
30440
|
+
The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
|
|
30631
30441
|
"""
|
|
30632
30442
|
return pulumi.get(self, "log_flush_interval_messages")
|
|
30633
30443
|
|
|
@@ -30635,7 +30445,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30635
30445
|
@pulumi.getter(name="logFlushIntervalMs")
|
|
30636
30446
|
def log_flush_interval_ms(self) -> Optional[int]:
|
|
30637
30447
|
"""
|
|
30638
|
-
The maximum time in ms that a message in any topic is kept in memory
|
|
30448
|
+
The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
|
|
30639
30449
|
"""
|
|
30640
30450
|
return pulumi.get(self, "log_flush_interval_ms")
|
|
30641
30451
|
|
|
@@ -30643,7 +30453,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30643
30453
|
@pulumi.getter(name="logIndexIntervalBytes")
|
|
30644
30454
|
def log_index_interval_bytes(self) -> Optional[int]:
|
|
30645
30455
|
"""
|
|
30646
|
-
The interval with which Kafka adds an entry to the offset index
|
|
30456
|
+
The interval with which Kafka adds an entry to the offset index. Example: `4096`.
|
|
30647
30457
|
"""
|
|
30648
30458
|
return pulumi.get(self, "log_index_interval_bytes")
|
|
30649
30459
|
|
|
@@ -30651,7 +30461,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30651
30461
|
@pulumi.getter(name="logIndexSizeMaxBytes")
|
|
30652
30462
|
def log_index_size_max_bytes(self) -> Optional[int]:
|
|
30653
30463
|
"""
|
|
30654
|
-
The maximum size in bytes of the offset index
|
|
30464
|
+
The maximum size in bytes of the offset index. Example: `10485760`.
|
|
30655
30465
|
"""
|
|
30656
30466
|
return pulumi.get(self, "log_index_size_max_bytes")
|
|
30657
30467
|
|
|
@@ -30659,7 +30469,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30659
30469
|
@pulumi.getter(name="logLocalRetentionBytes")
|
|
30660
30470
|
def log_local_retention_bytes(self) -> Optional[int]:
|
|
30661
30471
|
"""
|
|
30662
|
-
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
|
|
30472
|
+
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
|
|
30663
30473
|
"""
|
|
30664
30474
|
return pulumi.get(self, "log_local_retention_bytes")
|
|
30665
30475
|
|
|
@@ -30667,7 +30477,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30667
30477
|
@pulumi.getter(name="logLocalRetentionMs")
|
|
30668
30478
|
def log_local_retention_ms(self) -> Optional[int]:
|
|
30669
30479
|
"""
|
|
30670
|
-
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
30480
|
+
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
30671
30481
|
"""
|
|
30672
30482
|
return pulumi.get(self, "log_local_retention_ms")
|
|
30673
30483
|
|
|
@@ -30675,7 +30485,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30675
30485
|
@pulumi.getter(name="logMessageDownconversionEnable")
|
|
30676
30486
|
def log_message_downconversion_enable(self) -> Optional[bool]:
|
|
30677
30487
|
"""
|
|
30678
|
-
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
30488
|
+
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
30679
30489
|
"""
|
|
30680
30490
|
return pulumi.get(self, "log_message_downconversion_enable")
|
|
30681
30491
|
|
|
@@ -30683,7 +30493,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30683
30493
|
@pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
|
|
30684
30494
|
def log_message_timestamp_difference_max_ms(self) -> Optional[int]:
|
|
30685
30495
|
"""
|
|
30686
|
-
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message
|
|
30496
|
+
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
|
|
30687
30497
|
"""
|
|
30688
30498
|
return pulumi.get(self, "log_message_timestamp_difference_max_ms")
|
|
30689
30499
|
|
|
@@ -30691,7 +30501,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30691
30501
|
@pulumi.getter(name="logMessageTimestampType")
|
|
30692
30502
|
def log_message_timestamp_type(self) -> Optional[str]:
|
|
30693
30503
|
"""
|
|
30694
|
-
Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
|
|
30504
|
+
Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
|
|
30695
30505
|
"""
|
|
30696
30506
|
return pulumi.get(self, "log_message_timestamp_type")
|
|
30697
30507
|
|
|
@@ -30699,7 +30509,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30699
30509
|
@pulumi.getter(name="logPreallocate")
|
|
30700
30510
|
def log_preallocate(self) -> Optional[bool]:
|
|
30701
30511
|
"""
|
|
30702
|
-
Should pre allocate file when create new segment?
|
|
30512
|
+
Should pre allocate file when create new segment?
|
|
30703
30513
|
"""
|
|
30704
30514
|
return pulumi.get(self, "log_preallocate")
|
|
30705
30515
|
|
|
@@ -30707,7 +30517,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30707
30517
|
@pulumi.getter(name="logRetentionBytes")
|
|
30708
30518
|
def log_retention_bytes(self) -> Optional[int]:
|
|
30709
30519
|
"""
|
|
30710
|
-
The maximum size of the log before deleting messages
|
|
30520
|
+
The maximum size of the log before deleting messages.
|
|
30711
30521
|
"""
|
|
30712
30522
|
return pulumi.get(self, "log_retention_bytes")
|
|
30713
30523
|
|
|
@@ -30715,7 +30525,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30715
30525
|
@pulumi.getter(name="logRetentionHours")
|
|
30716
30526
|
def log_retention_hours(self) -> Optional[int]:
|
|
30717
30527
|
"""
|
|
30718
|
-
The number of hours to keep a log file before deleting it
|
|
30528
|
+
The number of hours to keep a log file before deleting it.
|
|
30719
30529
|
"""
|
|
30720
30530
|
return pulumi.get(self, "log_retention_hours")
|
|
30721
30531
|
|
|
@@ -30723,7 +30533,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30723
30533
|
@pulumi.getter(name="logRetentionMs")
|
|
30724
30534
|
def log_retention_ms(self) -> Optional[int]:
|
|
30725
30535
|
"""
|
|
30726
|
-
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
|
|
30536
|
+
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
|
|
30727
30537
|
"""
|
|
30728
30538
|
return pulumi.get(self, "log_retention_ms")
|
|
30729
30539
|
|
|
@@ -30731,7 +30541,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30731
30541
|
@pulumi.getter(name="logRollJitterMs")
|
|
30732
30542
|
def log_roll_jitter_ms(self) -> Optional[int]:
|
|
30733
30543
|
"""
|
|
30734
|
-
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used
|
|
30544
|
+
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
|
|
30735
30545
|
"""
|
|
30736
30546
|
return pulumi.get(self, "log_roll_jitter_ms")
|
|
30737
30547
|
|
|
@@ -30739,7 +30549,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30739
30549
|
@pulumi.getter(name="logRollMs")
|
|
30740
30550
|
def log_roll_ms(self) -> Optional[int]:
|
|
30741
30551
|
"""
|
|
30742
|
-
The maximum time before a new log segment is rolled out (in milliseconds).
|
|
30552
|
+
The maximum time before a new log segment is rolled out (in milliseconds).
|
|
30743
30553
|
"""
|
|
30744
30554
|
return pulumi.get(self, "log_roll_ms")
|
|
30745
30555
|
|
|
@@ -30747,7 +30557,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30747
30557
|
@pulumi.getter(name="logSegmentBytes")
|
|
30748
30558
|
def log_segment_bytes(self) -> Optional[int]:
|
|
30749
30559
|
"""
|
|
30750
|
-
The maximum size of a single log file
|
|
30560
|
+
The maximum size of a single log file.
|
|
30751
30561
|
"""
|
|
30752
30562
|
return pulumi.get(self, "log_segment_bytes")
|
|
30753
30563
|
|
|
@@ -30755,7 +30565,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30755
30565
|
@pulumi.getter(name="logSegmentDeleteDelayMs")
|
|
30756
30566
|
def log_segment_delete_delay_ms(self) -> Optional[int]:
|
|
30757
30567
|
"""
|
|
30758
|
-
The amount of time to wait before deleting a file from the filesystem
|
|
30568
|
+
The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
|
|
30759
30569
|
"""
|
|
30760
30570
|
return pulumi.get(self, "log_segment_delete_delay_ms")
|
|
30761
30571
|
|
|
@@ -30763,7 +30573,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30763
30573
|
@pulumi.getter(name="maxConnectionsPerIp")
|
|
30764
30574
|
def max_connections_per_ip(self) -> Optional[int]:
|
|
30765
30575
|
"""
|
|
30766
|
-
The maximum number of connections allowed from each ip address (
|
|
30576
|
+
The maximum number of connections allowed from each ip address (defaults to 2147483647).
|
|
30767
30577
|
"""
|
|
30768
30578
|
return pulumi.get(self, "max_connections_per_ip")
|
|
30769
30579
|
|
|
@@ -30771,7 +30581,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30771
30581
|
@pulumi.getter(name="maxIncrementalFetchSessionCacheSlots")
|
|
30772
30582
|
def max_incremental_fetch_session_cache_slots(self) -> Optional[int]:
|
|
30773
30583
|
"""
|
|
30774
|
-
The maximum number of incremental fetch sessions that the broker will maintain.
|
|
30584
|
+
The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
|
|
30775
30585
|
"""
|
|
30776
30586
|
return pulumi.get(self, "max_incremental_fetch_session_cache_slots")
|
|
30777
30587
|
|
|
@@ -30779,7 +30589,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30779
30589
|
@pulumi.getter(name="messageMaxBytes")
|
|
30780
30590
|
def message_max_bytes(self) -> Optional[int]:
|
|
30781
30591
|
"""
|
|
30782
|
-
The maximum size of message that the server can receive.
|
|
30592
|
+
The maximum size of message that the server can receive. Example: `1048588`.
|
|
30783
30593
|
"""
|
|
30784
30594
|
return pulumi.get(self, "message_max_bytes")
|
|
30785
30595
|
|
|
@@ -30787,7 +30597,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30787
30597
|
@pulumi.getter(name="minInsyncReplicas")
|
|
30788
30598
|
def min_insync_replicas(self) -> Optional[int]:
|
|
30789
30599
|
"""
|
|
30790
|
-
When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.
|
|
30600
|
+
When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
|
|
30791
30601
|
"""
|
|
30792
30602
|
return pulumi.get(self, "min_insync_replicas")
|
|
30793
30603
|
|
|
@@ -30795,7 +30605,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30795
30605
|
@pulumi.getter(name="numPartitions")
|
|
30796
30606
|
def num_partitions(self) -> Optional[int]:
|
|
30797
30607
|
"""
|
|
30798
|
-
Number of partitions for
|
|
30608
|
+
Number of partitions for autocreated topics.
|
|
30799
30609
|
"""
|
|
30800
30610
|
return pulumi.get(self, "num_partitions")
|
|
30801
30611
|
|
|
@@ -30803,7 +30613,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30803
30613
|
@pulumi.getter(name="offsetsRetentionMinutes")
|
|
30804
30614
|
def offsets_retention_minutes(self) -> Optional[int]:
|
|
30805
30615
|
"""
|
|
30806
|
-
Log retention window in minutes for offsets topic
|
|
30616
|
+
Log retention window in minutes for offsets topic. Example: `10080`.
|
|
30807
30617
|
"""
|
|
30808
30618
|
return pulumi.get(self, "offsets_retention_minutes")
|
|
30809
30619
|
|
|
@@ -30811,7 +30621,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30811
30621
|
@pulumi.getter(name="producerPurgatoryPurgeIntervalRequests")
|
|
30812
30622
|
def producer_purgatory_purge_interval_requests(self) -> Optional[int]:
|
|
30813
30623
|
"""
|
|
30814
|
-
The purge interval (in number of requests) of the producer request purgatory
|
|
30624
|
+
The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
|
|
30815
30625
|
"""
|
|
30816
30626
|
return pulumi.get(self, "producer_purgatory_purge_interval_requests")
|
|
30817
30627
|
|
|
@@ -30819,7 +30629,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30819
30629
|
@pulumi.getter(name="replicaFetchMaxBytes")
|
|
30820
30630
|
def replica_fetch_max_bytes(self) -> Optional[int]:
|
|
30821
30631
|
"""
|
|
30822
|
-
The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
|
|
30632
|
+
The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
|
|
30823
30633
|
"""
|
|
30824
30634
|
return pulumi.get(self, "replica_fetch_max_bytes")
|
|
30825
30635
|
|
|
@@ -30827,7 +30637,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30827
30637
|
@pulumi.getter(name="replicaFetchResponseMaxBytes")
|
|
30828
30638
|
def replica_fetch_response_max_bytes(self) -> Optional[int]:
|
|
30829
30639
|
"""
|
|
30830
|
-
Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
|
|
30640
|
+
Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
|
|
30831
30641
|
"""
|
|
30832
30642
|
return pulumi.get(self, "replica_fetch_response_max_bytes")
|
|
30833
30643
|
|
|
@@ -30835,7 +30645,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30835
30645
|
@pulumi.getter(name="saslOauthbearerExpectedAudience")
|
|
30836
30646
|
def sasl_oauthbearer_expected_audience(self) -> Optional[str]:
|
|
30837
30647
|
"""
|
|
30838
|
-
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
|
|
30648
|
+
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
|
|
30839
30649
|
"""
|
|
30840
30650
|
return pulumi.get(self, "sasl_oauthbearer_expected_audience")
|
|
30841
30651
|
|
|
@@ -30843,7 +30653,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30843
30653
|
@pulumi.getter(name="saslOauthbearerExpectedIssuer")
|
|
30844
30654
|
def sasl_oauthbearer_expected_issuer(self) -> Optional[str]:
|
|
30845
30655
|
"""
|
|
30846
|
-
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
|
|
30656
|
+
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
|
|
30847
30657
|
"""
|
|
30848
30658
|
return pulumi.get(self, "sasl_oauthbearer_expected_issuer")
|
|
30849
30659
|
|
|
@@ -30851,7 +30661,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30851
30661
|
@pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
|
|
30852
30662
|
def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
|
|
30853
30663
|
"""
|
|
30854
|
-
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
|
|
30664
|
+
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
|
|
30855
30665
|
"""
|
|
30856
30666
|
return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
|
|
30857
30667
|
|
|
@@ -30859,7 +30669,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30859
30669
|
@pulumi.getter(name="saslOauthbearerSubClaimName")
|
|
30860
30670
|
def sasl_oauthbearer_sub_claim_name(self) -> Optional[str]:
|
|
30861
30671
|
"""
|
|
30862
|
-
Name of the scope from which to extract the subject claim from the JWT.
|
|
30672
|
+
Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
|
|
30863
30673
|
"""
|
|
30864
30674
|
return pulumi.get(self, "sasl_oauthbearer_sub_claim_name")
|
|
30865
30675
|
|
|
@@ -30867,7 +30677,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30867
30677
|
@pulumi.getter(name="socketRequestMaxBytes")
|
|
30868
30678
|
def socket_request_max_bytes(self) -> Optional[int]:
|
|
30869
30679
|
"""
|
|
30870
|
-
The maximum number of bytes in a socket request (
|
|
30680
|
+
The maximum number of bytes in a socket request (defaults to 104857600).
|
|
30871
30681
|
"""
|
|
30872
30682
|
return pulumi.get(self, "socket_request_max_bytes")
|
|
30873
30683
|
|
|
@@ -30875,7 +30685,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30875
30685
|
@pulumi.getter(name="transactionPartitionVerificationEnable")
|
|
30876
30686
|
def transaction_partition_verification_enable(self) -> Optional[bool]:
|
|
30877
30687
|
"""
|
|
30878
|
-
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
|
|
30688
|
+
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
|
|
30879
30689
|
"""
|
|
30880
30690
|
return pulumi.get(self, "transaction_partition_verification_enable")
|
|
30881
30691
|
|
|
@@ -30883,7 +30693,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30883
30693
|
@pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
|
|
30884
30694
|
def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[int]:
|
|
30885
30695
|
"""
|
|
30886
|
-
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (
|
|
30696
|
+
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
|
|
30887
30697
|
"""
|
|
30888
30698
|
return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
|
|
30889
30699
|
|
|
@@ -30891,7 +30701,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
30891
30701
|
@pulumi.getter(name="transactionStateLogSegmentBytes")
|
|
30892
30702
|
def transaction_state_log_segment_bytes(self) -> Optional[int]:
|
|
30893
30703
|
"""
|
|
30894
|
-
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (
|
|
30704
|
+
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
|
|
30895
30705
|
"""
|
|
30896
30706
|
return pulumi.get(self, "transaction_state_log_segment_bytes")
|
|
30897
30707
|
|
|
@@ -34449,7 +34259,6 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
|
|
|
34449
34259
|
port: int,
|
|
34450
34260
|
dbname: Optional[str] = None,
|
|
34451
34261
|
ignore_dbs: Optional[str] = None,
|
|
34452
|
-
ignore_roles: Optional[str] = None,
|
|
34453
34262
|
method: Optional[str] = None,
|
|
34454
34263
|
password: Optional[str] = None,
|
|
34455
34264
|
ssl: Optional[bool] = None,
|
|
@@ -34459,7 +34268,6 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
|
|
|
34459
34268
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
34460
34269
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
34461
34270
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
34462
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
34463
34271
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
34464
34272
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
34465
34273
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -34471,8 +34279,6 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
|
|
|
34471
34279
|
pulumi.set(__self__, "dbname", dbname)
|
|
34472
34280
|
if ignore_dbs is not None:
|
|
34473
34281
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
34474
|
-
if ignore_roles is not None:
|
|
34475
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
34476
34282
|
if method is not None:
|
|
34477
34283
|
pulumi.set(__self__, "method", method)
|
|
34478
34284
|
if password is not None:
|
|
@@ -34514,14 +34320,6 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
|
|
|
34514
34320
|
"""
|
|
34515
34321
|
return pulumi.get(self, "ignore_dbs")
|
|
34516
34322
|
|
|
34517
|
-
@property
|
|
34518
|
-
@pulumi.getter(name="ignoreRoles")
|
|
34519
|
-
def ignore_roles(self) -> Optional[str]:
|
|
34520
|
-
"""
|
|
34521
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
34522
|
-
"""
|
|
34523
|
-
return pulumi.get(self, "ignore_roles")
|
|
34524
|
-
|
|
34525
34323
|
@property
|
|
34526
34324
|
@pulumi.getter
|
|
34527
34325
|
def method(self) -> Optional[str]:
|
|
@@ -37662,7 +37460,6 @@ class GetPgPgUserConfigMigrationResult(dict):
|
|
|
37662
37460
|
port: int,
|
|
37663
37461
|
dbname: Optional[str] = None,
|
|
37664
37462
|
ignore_dbs: Optional[str] = None,
|
|
37665
|
-
ignore_roles: Optional[str] = None,
|
|
37666
37463
|
method: Optional[str] = None,
|
|
37667
37464
|
password: Optional[str] = None,
|
|
37668
37465
|
ssl: Optional[bool] = None,
|
|
@@ -37672,7 +37469,6 @@ class GetPgPgUserConfigMigrationResult(dict):
|
|
|
37672
37469
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
37673
37470
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
37674
37471
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
37675
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
37676
37472
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
37677
37473
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
37678
37474
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -37684,8 +37480,6 @@ class GetPgPgUserConfigMigrationResult(dict):
|
|
|
37684
37480
|
pulumi.set(__self__, "dbname", dbname)
|
|
37685
37481
|
if ignore_dbs is not None:
|
|
37686
37482
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
37687
|
-
if ignore_roles is not None:
|
|
37688
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
37689
37483
|
if method is not None:
|
|
37690
37484
|
pulumi.set(__self__, "method", method)
|
|
37691
37485
|
if password is not None:
|
|
@@ -37727,14 +37521,6 @@ class GetPgPgUserConfigMigrationResult(dict):
|
|
|
37727
37521
|
"""
|
|
37728
37522
|
return pulumi.get(self, "ignore_dbs")
|
|
37729
37523
|
|
|
37730
|
-
@property
|
|
37731
|
-
@pulumi.getter(name="ignoreRoles")
|
|
37732
|
-
def ignore_roles(self) -> Optional[str]:
|
|
37733
|
-
"""
|
|
37734
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
37735
|
-
"""
|
|
37736
|
-
return pulumi.get(self, "ignore_roles")
|
|
37737
|
-
|
|
37738
37524
|
@property
|
|
37739
37525
|
@pulumi.getter
|
|
37740
37526
|
def method(self) -> Optional[str]:
|
|
@@ -38632,7 +38418,6 @@ class GetPgPgUserConfigPgbouncerResult(dict):
|
|
|
38632
38418
|
autodb_pool_mode: Optional[str] = None,
|
|
38633
38419
|
autodb_pool_size: Optional[int] = None,
|
|
38634
38420
|
ignore_startup_parameters: Optional[Sequence[str]] = None,
|
|
38635
|
-
max_prepared_statements: Optional[int] = None,
|
|
38636
38421
|
min_pool_size: Optional[int] = None,
|
|
38637
38422
|
server_idle_timeout: Optional[int] = None,
|
|
38638
38423
|
server_lifetime: Optional[int] = None,
|
|
@@ -38643,7 +38428,6 @@ class GetPgPgUserConfigPgbouncerResult(dict):
|
|
|
38643
38428
|
:param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
|
|
38644
38429
|
:param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
|
|
38645
38430
|
:param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
|
|
38646
|
-
:param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`.
|
|
38647
38431
|
:param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. Default: `0`.
|
|
38648
38432
|
:param int server_idle_timeout: If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). Default: `600`.
|
|
38649
38433
|
:param int server_lifetime: The pooler will close an unused server connection that has been connected longer than this. (seconds). Default: `3600`.
|
|
@@ -38659,8 +38443,6 @@ class GetPgPgUserConfigPgbouncerResult(dict):
|
|
|
38659
38443
|
pulumi.set(__self__, "autodb_pool_size", autodb_pool_size)
|
|
38660
38444
|
if ignore_startup_parameters is not None:
|
|
38661
38445
|
pulumi.set(__self__, "ignore_startup_parameters", ignore_startup_parameters)
|
|
38662
|
-
if max_prepared_statements is not None:
|
|
38663
|
-
pulumi.set(__self__, "max_prepared_statements", max_prepared_statements)
|
|
38664
38446
|
if min_pool_size is not None:
|
|
38665
38447
|
pulumi.set(__self__, "min_pool_size", min_pool_size)
|
|
38666
38448
|
if server_idle_timeout is not None:
|
|
@@ -38710,14 +38492,6 @@ class GetPgPgUserConfigPgbouncerResult(dict):
|
|
|
38710
38492
|
"""
|
|
38711
38493
|
return pulumi.get(self, "ignore_startup_parameters")
|
|
38712
38494
|
|
|
38713
|
-
@property
|
|
38714
|
-
@pulumi.getter(name="maxPreparedStatements")
|
|
38715
|
-
def max_prepared_statements(self) -> Optional[int]:
|
|
38716
|
-
"""
|
|
38717
|
-
PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`.
|
|
38718
|
-
"""
|
|
38719
|
-
return pulumi.get(self, "max_prepared_statements")
|
|
38720
|
-
|
|
38721
38495
|
@property
|
|
38722
38496
|
@pulumi.getter(name="minPoolSize")
|
|
38723
38497
|
def min_pool_size(self) -> Optional[int]:
|
|
@@ -39514,7 +39288,6 @@ class GetRedisRedisUserConfigMigrationResult(dict):
|
|
|
39514
39288
|
port: int,
|
|
39515
39289
|
dbname: Optional[str] = None,
|
|
39516
39290
|
ignore_dbs: Optional[str] = None,
|
|
39517
|
-
ignore_roles: Optional[str] = None,
|
|
39518
39291
|
method: Optional[str] = None,
|
|
39519
39292
|
password: Optional[str] = None,
|
|
39520
39293
|
ssl: Optional[bool] = None,
|
|
@@ -39524,7 +39297,6 @@ class GetRedisRedisUserConfigMigrationResult(dict):
|
|
|
39524
39297
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
39525
39298
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
39526
39299
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
39527
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
39528
39300
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
39529
39301
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
39530
39302
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -39536,8 +39308,6 @@ class GetRedisRedisUserConfigMigrationResult(dict):
|
|
|
39536
39308
|
pulumi.set(__self__, "dbname", dbname)
|
|
39537
39309
|
if ignore_dbs is not None:
|
|
39538
39310
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
39539
|
-
if ignore_roles is not None:
|
|
39540
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
39541
39311
|
if method is not None:
|
|
39542
39312
|
pulumi.set(__self__, "method", method)
|
|
39543
39313
|
if password is not None:
|
|
@@ -39579,14 +39349,6 @@ class GetRedisRedisUserConfigMigrationResult(dict):
|
|
|
39579
39349
|
"""
|
|
39580
39350
|
return pulumi.get(self, "ignore_dbs")
|
|
39581
39351
|
|
|
39582
|
-
@property
|
|
39583
|
-
@pulumi.getter(name="ignoreRoles")
|
|
39584
|
-
def ignore_roles(self) -> Optional[str]:
|
|
39585
|
-
"""
|
|
39586
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
39587
|
-
"""
|
|
39588
|
-
return pulumi.get(self, "ignore_roles")
|
|
39589
|
-
|
|
39590
39352
|
@property
|
|
39591
39353
|
@pulumi.getter
|
|
39592
39354
|
def method(self) -> Optional[str]:
|
|
@@ -39823,9 +39585,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
39823
39585
|
max_rows_per_message: Optional[int] = None,
|
|
39824
39586
|
num_consumers: Optional[int] = None,
|
|
39825
39587
|
poll_max_batch_size: Optional[int] = None,
|
|
39826
|
-
|
|
39827
|
-
skip_broken_messages: Optional[int] = None,
|
|
39828
|
-
thread_per_consumer: Optional[bool] = None):
|
|
39588
|
+
skip_broken_messages: Optional[int] = None):
|
|
39829
39589
|
"""
|
|
39830
39590
|
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
|
|
39831
39591
|
:param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
|
|
@@ -39839,9 +39599,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
39839
39599
|
:param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. Default: `1`.
|
|
39840
39600
|
:param int num_consumers: The number of consumers per table per replica. Default: `1`.
|
|
39841
39601
|
:param int poll_max_batch_size: Maximum amount of messages to be polled in a single Kafka poll. Default: `0`.
|
|
39842
|
-
:param int poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: `0`.
|
|
39843
39602
|
:param int skip_broken_messages: Skip at least this number of broken messages from Kafka topic per block. Default: `0`.
|
|
39844
|
-
:param bool thread_per_consumer: Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
|
|
39845
39603
|
"""
|
|
39846
39604
|
pulumi.set(__self__, "columns", columns)
|
|
39847
39605
|
pulumi.set(__self__, "data_format", data_format)
|
|
@@ -39862,12 +39620,8 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
39862
39620
|
pulumi.set(__self__, "num_consumers", num_consumers)
|
|
39863
39621
|
if poll_max_batch_size is not None:
|
|
39864
39622
|
pulumi.set(__self__, "poll_max_batch_size", poll_max_batch_size)
|
|
39865
|
-
if poll_max_timeout_ms is not None:
|
|
39866
|
-
pulumi.set(__self__, "poll_max_timeout_ms", poll_max_timeout_ms)
|
|
39867
39623
|
if skip_broken_messages is not None:
|
|
39868
39624
|
pulumi.set(__self__, "skip_broken_messages", skip_broken_messages)
|
|
39869
|
-
if thread_per_consumer is not None:
|
|
39870
|
-
pulumi.set(__self__, "thread_per_consumer", thread_per_consumer)
|
|
39871
39625
|
|
|
39872
39626
|
@property
|
|
39873
39627
|
@pulumi.getter
|
|
@@ -39965,14 +39719,6 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
39965
39719
|
"""
|
|
39966
39720
|
return pulumi.get(self, "poll_max_batch_size")
|
|
39967
39721
|
|
|
39968
|
-
@property
|
|
39969
|
-
@pulumi.getter(name="pollMaxTimeoutMs")
|
|
39970
|
-
def poll_max_timeout_ms(self) -> Optional[int]:
|
|
39971
|
-
"""
|
|
39972
|
-
Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: `0`.
|
|
39973
|
-
"""
|
|
39974
|
-
return pulumi.get(self, "poll_max_timeout_ms")
|
|
39975
|
-
|
|
39976
39722
|
@property
|
|
39977
39723
|
@pulumi.getter(name="skipBrokenMessages")
|
|
39978
39724
|
def skip_broken_messages(self) -> Optional[int]:
|
|
@@ -39981,14 +39727,6 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
39981
39727
|
"""
|
|
39982
39728
|
return pulumi.get(self, "skip_broken_messages")
|
|
39983
39729
|
|
|
39984
|
-
@property
|
|
39985
|
-
@pulumi.getter(name="threadPerConsumer")
|
|
39986
|
-
def thread_per_consumer(self) -> Optional[bool]:
|
|
39987
|
-
"""
|
|
39988
|
-
Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
|
|
39989
|
-
"""
|
|
39990
|
-
return pulumi.get(self, "thread_per_consumer")
|
|
39991
|
-
|
|
39992
39730
|
|
|
39993
39731
|
@pulumi.output_type
|
|
39994
39732
|
class GetServiceIntegrationClickhouseKafkaUserConfigTableColumnResult(dict):
|
|
@@ -43321,7 +43059,6 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
|
|
|
43321
43059
|
port: int,
|
|
43322
43060
|
dbname: Optional[str] = None,
|
|
43323
43061
|
ignore_dbs: Optional[str] = None,
|
|
43324
|
-
ignore_roles: Optional[str] = None,
|
|
43325
43062
|
method: Optional[str] = None,
|
|
43326
43063
|
password: Optional[str] = None,
|
|
43327
43064
|
ssl: Optional[bool] = None,
|
|
@@ -43331,7 +43068,6 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
|
|
|
43331
43068
|
:param int port: Port number of the server where to migrate data from. Example: `1234`.
|
|
43332
43069
|
:param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
|
|
43333
43070
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
|
|
43334
|
-
:param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
43335
43071
|
:param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
43336
43072
|
:param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
|
|
43337
43073
|
:param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
|
|
@@ -43343,8 +43079,6 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
|
|
|
43343
43079
|
pulumi.set(__self__, "dbname", dbname)
|
|
43344
43080
|
if ignore_dbs is not None:
|
|
43345
43081
|
pulumi.set(__self__, "ignore_dbs", ignore_dbs)
|
|
43346
|
-
if ignore_roles is not None:
|
|
43347
|
-
pulumi.set(__self__, "ignore_roles", ignore_roles)
|
|
43348
43082
|
if method is not None:
|
|
43349
43083
|
pulumi.set(__self__, "method", method)
|
|
43350
43084
|
if password is not None:
|
|
@@ -43386,14 +43120,6 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
|
|
|
43386
43120
|
"""
|
|
43387
43121
|
return pulumi.get(self, "ignore_dbs")
|
|
43388
43122
|
|
|
43389
|
-
@property
|
|
43390
|
-
@pulumi.getter(name="ignoreRoles")
|
|
43391
|
-
def ignore_roles(self) -> Optional[str]:
|
|
43392
|
-
"""
|
|
43393
|
-
Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
|
|
43394
|
-
"""
|
|
43395
|
-
return pulumi.get(self, "ignore_roles")
|
|
43396
|
-
|
|
43397
43123
|
@property
|
|
43398
43124
|
@pulumi.getter
|
|
43399
43125
|
def method(self) -> Optional[str]:
|