pulumi-aiven 6.43.0a1758777024__py3-none-any.whl → 6.44.0a1758949869__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-aiven might be problematic. Click here for more details.
- pulumi_aiven/_inputs.py +657 -183
- pulumi_aiven/account_team.py +6 -12
- pulumi_aiven/account_team_member.py +6 -12
- pulumi_aiven/account_team_project.py +6 -12
- pulumi_aiven/alloydbomni.py +44 -23
- pulumi_aiven/alloydbomni_database.py +26 -0
- pulumi_aiven/alloydbomni_user.py +26 -0
- pulumi_aiven/cassandra.py +18 -23
- pulumi_aiven/clickhouse.py +18 -23
- pulumi_aiven/dragonfly.py +18 -23
- pulumi_aiven/flink.py +18 -23
- pulumi_aiven/get_account_team.py +6 -12
- pulumi_aiven/get_account_team_member.py +6 -12
- pulumi_aiven/get_account_team_project.py +6 -12
- pulumi_aiven/get_alloydbomni.py +28 -2
- pulumi_aiven/get_alloydbomni_database.py +26 -0
- pulumi_aiven/get_alloydbomni_user.py +26 -0
- pulumi_aiven/get_cassanda.py +2 -2
- pulumi_aiven/get_cassandra.py +2 -2
- pulumi_aiven/get_clickhouse.py +2 -2
- pulumi_aiven/get_dragonfly.py +2 -2
- pulumi_aiven/get_flink.py +2 -2
- pulumi_aiven/get_grafana.py +2 -2
- pulumi_aiven/get_kafka.py +2 -2
- pulumi_aiven/get_kafka_connect.py +2 -2
- pulumi_aiven/get_kafka_mirror_maker.py +2 -2
- pulumi_aiven/get_m3_aggregator.py +2 -111
- pulumi_aiven/get_m3_db.py +2 -123
- pulumi_aiven/get_m3db_user.py +2 -61
- pulumi_aiven/get_my_sql.py +2 -2
- pulumi_aiven/get_open_search.py +2 -2
- pulumi_aiven/get_organization_application_user.py +29 -11
- pulumi_aiven/get_organization_project.py +0 -6
- pulumi_aiven/get_pg.py +2 -2
- pulumi_aiven/get_redis.py +2 -2
- pulumi_aiven/get_redis_user.py +3 -3
- pulumi_aiven/get_thanos.py +2 -2
- pulumi_aiven/get_valkey.py +2 -2
- pulumi_aiven/get_valkey_user.py +3 -3
- pulumi_aiven/grafana.py +18 -23
- pulumi_aiven/influx_db.py +18 -23
- pulumi_aiven/kafka.py +18 -23
- pulumi_aiven/kafka_connect.py +18 -23
- pulumi_aiven/kafka_mirror_maker.py +18 -23
- pulumi_aiven/m3_aggregator.py +20 -75
- pulumi_aiven/m3_db.py +20 -95
- pulumi_aiven/m3db_user.py +2 -54
- pulumi_aiven/my_sql.py +18 -23
- pulumi_aiven/open_search.py +18 -23
- pulumi_aiven/organization_application_user.py +69 -41
- pulumi_aiven/organization_application_user_token.py +127 -91
- pulumi_aiven/organization_billing_group.py +7 -7
- pulumi_aiven/organization_permission.py +80 -47
- pulumi_aiven/organization_project.py +0 -6
- pulumi_aiven/organization_user.py +8 -8
- pulumi_aiven/outputs.py +616 -234
- pulumi_aiven/pg.py +18 -23
- pulumi_aiven/project.py +2 -24
- pulumi_aiven/pulumi-plugin.json +1 -1
- pulumi_aiven/redis.py +18 -23
- pulumi_aiven/redis_user.py +21 -21
- pulumi_aiven/thanos.py +18 -23
- pulumi_aiven/valkey.py +18 -23
- pulumi_aiven/valkey_user.py +21 -21
- {pulumi_aiven-6.43.0a1758777024.dist-info → pulumi_aiven-6.44.0a1758949869.dist-info}/METADATA +1 -1
- {pulumi_aiven-6.43.0a1758777024.dist-info → pulumi_aiven-6.44.0a1758949869.dist-info}/RECORD +68 -68
- {pulumi_aiven-6.43.0a1758777024.dist-info → pulumi_aiven-6.44.0a1758949869.dist-info}/WHEEL +0 -0
- {pulumi_aiven-6.43.0a1758777024.dist-info → pulumi_aiven-6.44.0a1758949869.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py
CHANGED
|
@@ -139,6 +139,7 @@ __all__ = [
|
|
|
139
139
|
'KafkaKafkaUserConfigKafkaConnectSecretProvider',
|
|
140
140
|
'KafkaKafkaUserConfigKafkaConnectSecretProviderAws',
|
|
141
141
|
'KafkaKafkaUserConfigKafkaConnectSecretProviderVault',
|
|
142
|
+
'KafkaKafkaUserConfigKafkaDiskless',
|
|
142
143
|
'KafkaKafkaUserConfigKafkaRestConfig',
|
|
143
144
|
'KafkaKafkaUserConfigKafkaSaslMechanisms',
|
|
144
145
|
'KafkaKafkaUserConfigPrivateAccess',
|
|
@@ -204,6 +205,7 @@ __all__ = [
|
|
|
204
205
|
'OpenSearchOpensearch',
|
|
205
206
|
'OpenSearchOpensearchUserConfig',
|
|
206
207
|
'OpenSearchOpensearchUserConfigAzureMigration',
|
|
208
|
+
'OpenSearchOpensearchUserConfigCustomKeystore',
|
|
207
209
|
'OpenSearchOpensearchUserConfigGcsMigration',
|
|
208
210
|
'OpenSearchOpensearchUserConfigIndexPattern',
|
|
209
211
|
'OpenSearchOpensearchUserConfigIndexRollup',
|
|
@@ -243,9 +245,12 @@ __all__ = [
|
|
|
243
245
|
'OpenSearchTag',
|
|
244
246
|
'OpenSearchTechEmail',
|
|
245
247
|
'OrganizationAddressTimeouts',
|
|
248
|
+
'OrganizationApplicationUserTimeouts',
|
|
249
|
+
'OrganizationApplicationUserTokenTimeouts',
|
|
246
250
|
'OrganizationBillingGroupTimeouts',
|
|
247
251
|
'OrganizationGroupProjectTimeouts',
|
|
248
252
|
'OrganizationPermissionPermission',
|
|
253
|
+
'OrganizationPermissionTimeouts',
|
|
249
254
|
'OrganizationProjectTag',
|
|
250
255
|
'OrganizationProjectTimeouts',
|
|
251
256
|
'OrganizationTimeouts',
|
|
@@ -480,6 +485,7 @@ __all__ = [
|
|
|
480
485
|
'GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult',
|
|
481
486
|
'GetKafkaKafkaUserConfigKafkaConnectSecretProviderAwsResult',
|
|
482
487
|
'GetKafkaKafkaUserConfigKafkaConnectSecretProviderVaultResult',
|
|
488
|
+
'GetKafkaKafkaUserConfigKafkaDisklessResult',
|
|
483
489
|
'GetKafkaKafkaUserConfigKafkaRestConfigResult',
|
|
484
490
|
'GetKafkaKafkaUserConfigKafkaSaslMechanismsResult',
|
|
485
491
|
'GetKafkaKafkaUserConfigPrivateAccessResult',
|
|
@@ -545,6 +551,7 @@ __all__ = [
|
|
|
545
551
|
'GetOpenSearchOpensearchResult',
|
|
546
552
|
'GetOpenSearchOpensearchUserConfigResult',
|
|
547
553
|
'GetOpenSearchOpensearchUserConfigAzureMigrationResult',
|
|
554
|
+
'GetOpenSearchOpensearchUserConfigCustomKeystoreResult',
|
|
548
555
|
'GetOpenSearchOpensearchUserConfigGcsMigrationResult',
|
|
549
556
|
'GetOpenSearchOpensearchUserConfigIndexPatternResult',
|
|
550
557
|
'GetOpenSearchOpensearchUserConfigIndexRollupResult',
|
|
@@ -584,6 +591,7 @@ __all__ = [
|
|
|
584
591
|
'GetOpenSearchTagResult',
|
|
585
592
|
'GetOpenSearchTechEmailResult',
|
|
586
593
|
'GetOrganizationAddressTimeoutsResult',
|
|
594
|
+
'GetOrganizationApplicationUserTimeoutsResult',
|
|
587
595
|
'GetOrganizationBillingGroupListBillingGroupResult',
|
|
588
596
|
'GetOrganizationBillingGroupListTimeoutsResult',
|
|
589
597
|
'GetOrganizationBillingGroupTimeoutsResult',
|
|
@@ -1724,7 +1732,7 @@ class AlloydbomniAlloydbomniUserConfigPg(dict):
|
|
|
1724
1732
|
"""
|
|
1725
1733
|
:param _builtins.float autovacuum_analyze_scale_factor: Specifies a fraction of the table size to add to autovacuum*analyze*threshold when deciding whether to trigger an ANALYZE (e.g. `0.2` for 20% of the table size). The default is `0.2`.
|
|
1726
1734
|
:param _builtins.int autovacuum_analyze_threshold: Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is `50`.
|
|
1727
|
-
:param _builtins.int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
1735
|
+
:param _builtins.int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
1728
1736
|
:param _builtins.int autovacuum_max_workers: Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is `3`. Changing this parameter causes a service restart.
|
|
1729
1737
|
:param _builtins.int autovacuum_naptime: Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds. The default is `60`.
|
|
1730
1738
|
:param _builtins.int autovacuum_vacuum_cost_delay: Specifies the cost delay value that will be used in automatic VACUUM operations. If `-1` is specified, the regular vacuum*cost*delay value will be used. The default is `2` (upstream default).
|
|
@@ -1734,7 +1742,7 @@ class AlloydbomniAlloydbomniUserConfigPg(dict):
|
|
|
1734
1742
|
:param _builtins.int bgwriter_delay: Specifies the delay between activity rounds for the background writer in milliseconds. The default is `200`. Example: `200`.
|
|
1735
1743
|
:param _builtins.int bgwriter_flush_after: Whenever more than bgwriter*flush*after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes. Setting of 0 disables forced writeback. The default is `512`. Example: `512`.
|
|
1736
1744
|
:param _builtins.int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. The default is `100`. Example: `100`.
|
|
1737
|
-
:param _builtins.float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2
|
|
1745
|
+
:param _builtins.float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2`.
|
|
1738
1746
|
:param _builtins.int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition. The default is `1000` (upstream default). Example: `1000`.
|
|
1739
1747
|
:param _builtins.str default_toast_compression: Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns. The default is `lz4`.
|
|
1740
1748
|
:param _builtins.int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
|
|
@@ -1763,7 +1771,7 @@ class AlloydbomniAlloydbomniUserConfigPg(dict):
|
|
|
1763
1771
|
:param _builtins.int pg_partman_bgw_dot_interval: Sets the time interval in seconds to run pg_partman's scheduled tasks. The default is `3600`. Example: `3600`.
|
|
1764
1772
|
:param _builtins.str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`.
|
|
1765
1773
|
:param _builtins.str pg_stat_statements_dot_track: Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default is `top`.
|
|
1766
|
-
:param _builtins.int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
1774
|
+
:param _builtins.int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
1767
1775
|
:param _builtins.str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
|
|
1768
1776
|
:param _builtins.int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Changing this parameter causes a service restart. Example: `1024`.
|
|
1769
1777
|
:param _builtins.str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions. Changing this parameter causes a service restart.
|
|
@@ -1891,7 +1899,7 @@ class AlloydbomniAlloydbomniUserConfigPg(dict):
|
|
|
1891
1899
|
@pulumi.getter(name="autovacuumFreezeMaxAge")
|
|
1892
1900
|
def autovacuum_freeze_max_age(self) -> Optional[_builtins.int]:
|
|
1893
1901
|
"""
|
|
1894
|
-
Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
1902
|
+
Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
1895
1903
|
"""
|
|
1896
1904
|
return pulumi.get(self, "autovacuum_freeze_max_age")
|
|
1897
1905
|
|
|
@@ -1971,7 +1979,7 @@ class AlloydbomniAlloydbomniUserConfigPg(dict):
|
|
|
1971
1979
|
@pulumi.getter(name="bgwriterLruMultiplier")
|
|
1972
1980
|
def bgwriter_lru_multiplier(self) -> Optional[_builtins.float]:
|
|
1973
1981
|
"""
|
|
1974
|
-
The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2
|
|
1982
|
+
The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2`.
|
|
1975
1983
|
"""
|
|
1976
1984
|
return pulumi.get(self, "bgwriter_lru_multiplier")
|
|
1977
1985
|
|
|
@@ -2203,7 +2211,7 @@ class AlloydbomniAlloydbomniUserConfigPg(dict):
|
|
|
2203
2211
|
@pulumi.getter(name="tempFileLimit")
|
|
2204
2212
|
def temp_file_limit(self) -> Optional[_builtins.int]:
|
|
2205
2213
|
"""
|
|
2206
|
-
PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
2214
|
+
PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
2207
2215
|
"""
|
|
2208
2216
|
return pulumi.get(self, "temp_file_limit")
|
|
2209
2217
|
|
|
@@ -5654,7 +5662,7 @@ class FlinkFlinkUserConfig(dict):
|
|
|
5654
5662
|
:param Sequence[_builtins.str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
5655
5663
|
:param _builtins.int number_of_task_slots: Task slots per node. For a 3 node plan, total number of task slots is 3x this value. Example: `1`.
|
|
5656
5664
|
:param _builtins.int pekko_ask_timeout_s: Timeout in seconds used for all futures and blocking Pekko requests. Example: `10`.
|
|
5657
|
-
:param _builtins.int pekko_framesize_b: Maximum size in bytes for messages exchanged between the JobManager and the TaskManagers.
|
|
5665
|
+
:param _builtins.int pekko_framesize_b: Maximum size in bytes for messages exchanged between the JobManager and the TaskManagers.
|
|
5658
5666
|
:param 'FlinkFlinkUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
5659
5667
|
:param 'FlinkFlinkUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
|
|
5660
5668
|
:param _builtins.bool service_log: Store logs for the service so that they are available in the HTTP API and console.
|
|
@@ -5757,7 +5765,7 @@ class FlinkFlinkUserConfig(dict):
|
|
|
5757
5765
|
@pulumi.getter(name="pekkoFramesizeB")
|
|
5758
5766
|
def pekko_framesize_b(self) -> Optional[_builtins.int]:
|
|
5759
5767
|
"""
|
|
5760
|
-
Maximum size in bytes for messages exchanged between the JobManager and the TaskManagers.
|
|
5768
|
+
Maximum size in bytes for messages exchanged between the JobManager and the TaskManagers.
|
|
5761
5769
|
"""
|
|
5762
5770
|
return pulumi.get(self, "pekko_framesize_b")
|
|
5763
5771
|
|
|
@@ -9994,9 +10002,9 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
9994
10002
|
"""
|
|
9995
10003
|
:param _builtins.str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
9996
10004
|
:param _builtins.str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
9997
|
-
:param _builtins.int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
10005
|
+
:param _builtins.int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
9998
10006
|
:param _builtins.str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
9999
|
-
:param _builtins.int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
10007
|
+
:param _builtins.int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
10000
10008
|
:param _builtins.int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
10001
10009
|
:param _builtins.int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
10002
10010
|
:param _builtins.int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
|
|
@@ -10005,7 +10013,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
10005
10013
|
:param _builtins.int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
10006
10014
|
:param _builtins.str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
10007
10015
|
:param _builtins.int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
|
|
10008
|
-
:param _builtins.int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
10016
|
+
:param _builtins.int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
10009
10017
|
:param _builtins.int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
10010
10018
|
:param _builtins.int session_timeout_ms: The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
|
|
10011
10019
|
"""
|
|
@@ -10062,7 +10070,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
10062
10070
|
@pulumi.getter(name="consumerFetchMaxBytes")
|
|
10063
10071
|
def consumer_fetch_max_bytes(self) -> Optional[_builtins.int]:
|
|
10064
10072
|
"""
|
|
10065
|
-
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
10073
|
+
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
10066
10074
|
"""
|
|
10067
10075
|
return pulumi.get(self, "consumer_fetch_max_bytes")
|
|
10068
10076
|
|
|
@@ -10078,7 +10086,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
10078
10086
|
@pulumi.getter(name="consumerMaxPartitionFetchBytes")
|
|
10079
10087
|
def consumer_max_partition_fetch_bytes(self) -> Optional[_builtins.int]:
|
|
10080
10088
|
"""
|
|
10081
|
-
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
10089
|
+
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
10082
10090
|
"""
|
|
10083
10091
|
return pulumi.get(self, "consumer_max_partition_fetch_bytes")
|
|
10084
10092
|
|
|
@@ -10150,7 +10158,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
10150
10158
|
@pulumi.getter(name="producerMaxRequestSize")
|
|
10151
10159
|
def producer_max_request_size(self) -> Optional[_builtins.int]:
|
|
10152
10160
|
"""
|
|
10153
|
-
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
10161
|
+
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
10154
10162
|
"""
|
|
10155
10163
|
return pulumi.get(self, "producer_max_request_size")
|
|
10156
10164
|
|
|
@@ -10834,6 +10842,8 @@ class KafkaKafkaUserConfig(dict):
|
|
|
10834
10842
|
suggest = "kafka_connect_plugin_versions"
|
|
10835
10843
|
elif key == "kafkaConnectSecretProviders":
|
|
10836
10844
|
suggest = "kafka_connect_secret_providers"
|
|
10845
|
+
elif key == "kafkaDiskless":
|
|
10846
|
+
suggest = "kafka_diskless"
|
|
10837
10847
|
elif key == "kafkaRest":
|
|
10838
10848
|
suggest = "kafka_rest"
|
|
10839
10849
|
elif key == "kafkaRestAuthorization":
|
|
@@ -10890,6 +10900,7 @@ class KafkaKafkaUserConfig(dict):
|
|
|
10890
10900
|
kafka_connect_config: Optional['outputs.KafkaKafkaUserConfigKafkaConnectConfig'] = None,
|
|
10891
10901
|
kafka_connect_plugin_versions: Optional[Sequence['outputs.KafkaKafkaUserConfigKafkaConnectPluginVersion']] = None,
|
|
10892
10902
|
kafka_connect_secret_providers: Optional[Sequence['outputs.KafkaKafkaUserConfigKafkaConnectSecretProvider']] = None,
|
|
10903
|
+
kafka_diskless: Optional['outputs.KafkaKafkaUserConfigKafkaDiskless'] = None,
|
|
10893
10904
|
kafka_rest: Optional[_builtins.bool] = None,
|
|
10894
10905
|
kafka_rest_authorization: Optional[_builtins.bool] = None,
|
|
10895
10906
|
kafka_rest_config: Optional['outputs.KafkaKafkaUserConfigKafkaRestConfig'] = None,
|
|
@@ -10918,11 +10929,12 @@ class KafkaKafkaUserConfig(dict):
|
|
|
10918
10929
|
:param _builtins.bool kafka_connect: Enable Kafka Connect service. Default: `false`.
|
|
10919
10930
|
:param 'KafkaKafkaUserConfigKafkaConnectConfigArgs' kafka_connect_config: Kafka Connect configuration values
|
|
10920
10931
|
:param Sequence['KafkaKafkaUserConfigKafkaConnectPluginVersionArgs'] kafka_connect_plugin_versions: The plugin selected by the user
|
|
10932
|
+
:param 'KafkaKafkaUserConfigKafkaDisklessArgs' kafka_diskless: Kafka Diskless configuration values
|
|
10921
10933
|
:param _builtins.bool kafka_rest: Enable Kafka-REST service. Default: `false`.
|
|
10922
10934
|
:param _builtins.bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
|
|
10923
10935
|
:param 'KafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
|
|
10924
10936
|
:param 'KafkaKafkaUserConfigKafkaSaslMechanismsArgs' kafka_sasl_mechanisms: Kafka SASL mechanisms
|
|
10925
|
-
:param _builtins.str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, `3.8`, `3.9`, and newer. Kafka major version.
|
|
10937
|
+
:param _builtins.str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, `3.8`, `3.9`, `4.0`, and newer. Kafka major version.
|
|
10926
10938
|
:param _builtins.bool letsencrypt_sasl_privatelink: Use Letsencrypt CA for Kafka SASL via Privatelink.
|
|
10927
10939
|
:param 'KafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
10928
10940
|
:param 'KafkaKafkaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
@@ -10960,6 +10972,8 @@ class KafkaKafkaUserConfig(dict):
|
|
|
10960
10972
|
pulumi.set(__self__, "kafka_connect_plugin_versions", kafka_connect_plugin_versions)
|
|
10961
10973
|
if kafka_connect_secret_providers is not None:
|
|
10962
10974
|
pulumi.set(__self__, "kafka_connect_secret_providers", kafka_connect_secret_providers)
|
|
10975
|
+
if kafka_diskless is not None:
|
|
10976
|
+
pulumi.set(__self__, "kafka_diskless", kafka_diskless)
|
|
10963
10977
|
if kafka_rest is not None:
|
|
10964
10978
|
pulumi.set(__self__, "kafka_rest", kafka_rest)
|
|
10965
10979
|
if kafka_rest_authorization is not None:
|
|
@@ -11094,6 +11108,14 @@ class KafkaKafkaUserConfig(dict):
|
|
|
11094
11108
|
def kafka_connect_secret_providers(self) -> Optional[Sequence['outputs.KafkaKafkaUserConfigKafkaConnectSecretProvider']]:
|
|
11095
11109
|
return pulumi.get(self, "kafka_connect_secret_providers")
|
|
11096
11110
|
|
|
11111
|
+
@_builtins.property
|
|
11112
|
+
@pulumi.getter(name="kafkaDiskless")
|
|
11113
|
+
def kafka_diskless(self) -> Optional['outputs.KafkaKafkaUserConfigKafkaDiskless']:
|
|
11114
|
+
"""
|
|
11115
|
+
Kafka Diskless configuration values
|
|
11116
|
+
"""
|
|
11117
|
+
return pulumi.get(self, "kafka_diskless")
|
|
11118
|
+
|
|
11097
11119
|
@_builtins.property
|
|
11098
11120
|
@pulumi.getter(name="kafkaRest")
|
|
11099
11121
|
def kafka_rest(self) -> Optional[_builtins.bool]:
|
|
@@ -11130,7 +11152,7 @@ class KafkaKafkaUserConfig(dict):
|
|
|
11130
11152
|
@pulumi.getter(name="kafkaVersion")
|
|
11131
11153
|
def kafka_version(self) -> Optional[_builtins.str]:
|
|
11132
11154
|
"""
|
|
11133
|
-
Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, `3.8`, `3.9`, and newer. Kafka major version.
|
|
11155
|
+
Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, `3.8`, `3.9`, `4.0`, and newer. Kafka major version.
|
|
11134
11156
|
"""
|
|
11135
11157
|
return pulumi.get(self, "kafka_version")
|
|
11136
11158
|
|
|
@@ -11307,6 +11329,10 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11307
11329
|
suggest = "log_local_retention_ms"
|
|
11308
11330
|
elif key == "logMessageDownconversionEnable":
|
|
11309
11331
|
suggest = "log_message_downconversion_enable"
|
|
11332
|
+
elif key == "logMessageTimestampAfterMaxMs":
|
|
11333
|
+
suggest = "log_message_timestamp_after_max_ms"
|
|
11334
|
+
elif key == "logMessageTimestampBeforeMaxMs":
|
|
11335
|
+
suggest = "log_message_timestamp_before_max_ms"
|
|
11310
11336
|
elif key == "logMessageTimestampDifferenceMaxMs":
|
|
11311
11337
|
suggest = "log_message_timestamp_difference_max_ms"
|
|
11312
11338
|
elif key == "logMessageTimestampType":
|
|
@@ -11393,6 +11419,8 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11393
11419
|
log_local_retention_bytes: Optional[_builtins.int] = None,
|
|
11394
11420
|
log_local_retention_ms: Optional[_builtins.int] = None,
|
|
11395
11421
|
log_message_downconversion_enable: Optional[_builtins.bool] = None,
|
|
11422
|
+
log_message_timestamp_after_max_ms: Optional[_builtins.int] = None,
|
|
11423
|
+
log_message_timestamp_before_max_ms: Optional[_builtins.int] = None,
|
|
11396
11424
|
log_message_timestamp_difference_max_ms: Optional[_builtins.int] = None,
|
|
11397
11425
|
log_message_timestamp_type: Optional[_builtins.str] = None,
|
|
11398
11426
|
log_preallocate: Optional[_builtins.bool] = None,
|
|
@@ -11426,9 +11454,9 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11426
11454
|
:param _builtins.int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
|
|
11427
11455
|
:param _builtins.int default_replication_factor: Replication factor for auto-created topics (Default: 3).
|
|
11428
11456
|
:param _builtins.int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
|
|
11429
|
-
:param _builtins.int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes).
|
|
11457
|
+
:param _builtins.int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes).
|
|
11430
11458
|
:param _builtins.int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
|
|
11431
|
-
:param _builtins.int log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)).
|
|
11459
|
+
:param _builtins.int log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)).
|
|
11432
11460
|
:param _builtins.int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
|
|
11433
11461
|
:param _builtins.float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
|
|
11434
11462
|
:param _builtins.int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
|
|
@@ -11436,10 +11464,12 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11436
11464
|
:param _builtins.int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
11437
11465
|
:param _builtins.int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
|
|
11438
11466
|
:param _builtins.int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
|
|
11439
|
-
:param _builtins.int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)).
|
|
11467
|
+
:param _builtins.int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)).
|
|
11440
11468
|
:param _builtins.int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
|
|
11441
11469
|
:param _builtins.int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
|
|
11442
11470
|
:param _builtins.bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
|
|
11471
|
+
:param _builtins.int log_message_timestamp_after_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. Applies only for messages with timestamps later than the broker's timestamp. (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
11472
|
+
:param _builtins.int log_message_timestamp_before_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. Applies only for messages with timestamps earlier than the broker's timestamp. (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
11443
11473
|
:param _builtins.int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
11444
11474
|
:param _builtins.str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
|
|
11445
11475
|
:param _builtins.bool log_preallocate: Should pre allocate file when create new segment? (Default: false).
|
|
@@ -11452,7 +11482,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11452
11482
|
:param _builtins.int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
|
|
11453
11483
|
:param _builtins.int max_connections_per_ip: The maximum number of connections allowed from each ip address (Default: 2147483647).
|
|
11454
11484
|
:param _builtins.int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
|
|
11455
|
-
:param _builtins.int message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)).
|
|
11485
|
+
:param _builtins.int message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)).
|
|
11456
11486
|
:param _builtins.int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
|
|
11457
11487
|
:param _builtins.int num_partitions: Number of partitions for auto-created topics (Default: 1).
|
|
11458
11488
|
:param _builtins.int offsets_retention_minutes: Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
|
|
@@ -11465,8 +11495,8 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11465
11495
|
:param _builtins.str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
|
|
11466
11496
|
:param _builtins.int socket_request_max_bytes: The maximum number of bytes in a socket request (Default: 104857600 bytes).
|
|
11467
11497
|
:param _builtins.bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
|
|
11468
|
-
:param _builtins.int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)).
|
|
11469
|
-
:param _builtins.int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)).
|
|
11498
|
+
:param _builtins.int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)).
|
|
11499
|
+
:param _builtins.int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)).
|
|
11470
11500
|
"""
|
|
11471
11501
|
if auto_create_topics_enable is not None:
|
|
11472
11502
|
pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
|
|
@@ -11506,6 +11536,10 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11506
11536
|
pulumi.set(__self__, "log_local_retention_ms", log_local_retention_ms)
|
|
11507
11537
|
if log_message_downconversion_enable is not None:
|
|
11508
11538
|
pulumi.set(__self__, "log_message_downconversion_enable", log_message_downconversion_enable)
|
|
11539
|
+
if log_message_timestamp_after_max_ms is not None:
|
|
11540
|
+
pulumi.set(__self__, "log_message_timestamp_after_max_ms", log_message_timestamp_after_max_ms)
|
|
11541
|
+
if log_message_timestamp_before_max_ms is not None:
|
|
11542
|
+
pulumi.set(__self__, "log_message_timestamp_before_max_ms", log_message_timestamp_before_max_ms)
|
|
11509
11543
|
if log_message_timestamp_difference_max_ms is not None:
|
|
11510
11544
|
pulumi.set(__self__, "log_message_timestamp_difference_max_ms", log_message_timestamp_difference_max_ms)
|
|
11511
11545
|
if log_message_timestamp_type is not None:
|
|
@@ -11605,7 +11639,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11605
11639
|
@pulumi.getter(name="groupMaxSessionTimeoutMs")
|
|
11606
11640
|
def group_max_session_timeout_ms(self) -> Optional[_builtins.int]:
|
|
11607
11641
|
"""
|
|
11608
|
-
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes).
|
|
11642
|
+
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes).
|
|
11609
11643
|
"""
|
|
11610
11644
|
return pulumi.get(self, "group_max_session_timeout_ms")
|
|
11611
11645
|
|
|
@@ -11621,7 +11655,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11621
11655
|
@pulumi.getter(name="logCleanerDeleteRetentionMs")
|
|
11622
11656
|
def log_cleaner_delete_retention_ms(self) -> Optional[_builtins.int]:
|
|
11623
11657
|
"""
|
|
11624
|
-
How long are delete records retained? (Default: 86400000 (1 day)).
|
|
11658
|
+
How long are delete records retained? (Default: 86400000 (1 day)).
|
|
11625
11659
|
"""
|
|
11626
11660
|
return pulumi.get(self, "log_cleaner_delete_retention_ms")
|
|
11627
11661
|
|
|
@@ -11685,7 +11719,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11685
11719
|
@pulumi.getter(name="logIndexSizeMaxBytes")
|
|
11686
11720
|
def log_index_size_max_bytes(self) -> Optional[_builtins.int]:
|
|
11687
11721
|
"""
|
|
11688
|
-
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)).
|
|
11722
|
+
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)).
|
|
11689
11723
|
"""
|
|
11690
11724
|
return pulumi.get(self, "log_index_size_max_bytes")
|
|
11691
11725
|
|
|
@@ -11713,6 +11747,22 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11713
11747
|
"""
|
|
11714
11748
|
return pulumi.get(self, "log_message_downconversion_enable")
|
|
11715
11749
|
|
|
11750
|
+
@_builtins.property
|
|
11751
|
+
@pulumi.getter(name="logMessageTimestampAfterMaxMs")
|
|
11752
|
+
def log_message_timestamp_after_max_ms(self) -> Optional[_builtins.int]:
|
|
11753
|
+
"""
|
|
11754
|
+
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. Applies only for messages with timestamps later than the broker's timestamp. (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
11755
|
+
"""
|
|
11756
|
+
return pulumi.get(self, "log_message_timestamp_after_max_ms")
|
|
11757
|
+
|
|
11758
|
+
@_builtins.property
|
|
11759
|
+
@pulumi.getter(name="logMessageTimestampBeforeMaxMs")
|
|
11760
|
+
def log_message_timestamp_before_max_ms(self) -> Optional[_builtins.int]:
|
|
11761
|
+
"""
|
|
11762
|
+
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. Applies only for messages with timestamps earlier than the broker's timestamp. (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
11763
|
+
"""
|
|
11764
|
+
return pulumi.get(self, "log_message_timestamp_before_max_ms")
|
|
11765
|
+
|
|
11716
11766
|
@_builtins.property
|
|
11717
11767
|
@pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
|
|
11718
11768
|
def log_message_timestamp_difference_max_ms(self) -> Optional[_builtins.int]:
|
|
@@ -11813,7 +11863,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11813
11863
|
@pulumi.getter(name="messageMaxBytes")
|
|
11814
11864
|
def message_max_bytes(self) -> Optional[_builtins.int]:
|
|
11815
11865
|
"""
|
|
11816
|
-
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)).
|
|
11866
|
+
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)).
|
|
11817
11867
|
"""
|
|
11818
11868
|
return pulumi.get(self, "message_max_bytes")
|
|
11819
11869
|
|
|
@@ -11917,7 +11967,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11917
11967
|
@pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
|
|
11918
11968
|
def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[_builtins.int]:
|
|
11919
11969
|
"""
|
|
11920
|
-
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)).
|
|
11970
|
+
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)).
|
|
11921
11971
|
"""
|
|
11922
11972
|
return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
|
|
11923
11973
|
|
|
@@ -11925,7 +11975,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
11925
11975
|
@pulumi.getter(name="transactionStateLogSegmentBytes")
|
|
11926
11976
|
def transaction_state_log_segment_bytes(self) -> Optional[_builtins.int]:
|
|
11927
11977
|
"""
|
|
11928
|
-
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)).
|
|
11978
|
+
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)).
|
|
11929
11979
|
"""
|
|
11930
11980
|
return pulumi.get(self, "transaction_state_log_segment_bytes")
|
|
11931
11981
|
|
|
@@ -12030,9 +12080,9 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
12030
12080
|
"""
|
|
12031
12081
|
:param _builtins.str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
12032
12082
|
:param _builtins.str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
12033
|
-
:param _builtins.int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
12083
|
+
:param _builtins.int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
12034
12084
|
:param _builtins.str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
12035
|
-
:param _builtins.int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
12085
|
+
:param _builtins.int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
12036
12086
|
:param _builtins.int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
12037
12087
|
:param _builtins.int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
12038
12088
|
:param _builtins.int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
|
|
@@ -12041,7 +12091,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
12041
12091
|
:param _builtins.int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
12042
12092
|
:param _builtins.str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
12043
12093
|
:param _builtins.int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
|
|
12044
|
-
:param _builtins.int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
12094
|
+
:param _builtins.int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
12045
12095
|
:param _builtins.int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
12046
12096
|
:param _builtins.int session_timeout_ms: The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
|
|
12047
12097
|
"""
|
|
@@ -12098,7 +12148,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
12098
12148
|
@pulumi.getter(name="consumerFetchMaxBytes")
|
|
12099
12149
|
def consumer_fetch_max_bytes(self) -> Optional[_builtins.int]:
|
|
12100
12150
|
"""
|
|
12101
|
-
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
12151
|
+
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
12102
12152
|
"""
|
|
12103
12153
|
return pulumi.get(self, "consumer_fetch_max_bytes")
|
|
12104
12154
|
|
|
@@ -12114,7 +12164,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
12114
12164
|
@pulumi.getter(name="consumerMaxPartitionFetchBytes")
|
|
12115
12165
|
def consumer_max_partition_fetch_bytes(self) -> Optional[_builtins.int]:
|
|
12116
12166
|
"""
|
|
12117
|
-
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
12167
|
+
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
12118
12168
|
"""
|
|
12119
12169
|
return pulumi.get(self, "consumer_max_partition_fetch_bytes")
|
|
12120
12170
|
|
|
@@ -12186,7 +12236,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
12186
12236
|
@pulumi.getter(name="producerMaxRequestSize")
|
|
12187
12237
|
def producer_max_request_size(self) -> Optional[_builtins.int]:
|
|
12188
12238
|
"""
|
|
12189
|
-
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
12239
|
+
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
12190
12240
|
"""
|
|
12191
12241
|
return pulumi.get(self, "producer_max_request_size")
|
|
12192
12242
|
|
|
@@ -12455,6 +12505,24 @@ class KafkaKafkaUserConfigKafkaConnectSecretProviderVault(dict):
|
|
|
12455
12505
|
return pulumi.get(self, "token")
|
|
12456
12506
|
|
|
12457
12507
|
|
|
12508
|
+
@pulumi.output_type
|
|
12509
|
+
class KafkaKafkaUserConfigKafkaDiskless(dict):
|
|
12510
|
+
def __init__(__self__, *,
|
|
12511
|
+
enabled: _builtins.bool):
|
|
12512
|
+
"""
|
|
12513
|
+
:param _builtins.bool enabled: Whether to enable the Diskless functionality.
|
|
12514
|
+
"""
|
|
12515
|
+
pulumi.set(__self__, "enabled", enabled)
|
|
12516
|
+
|
|
12517
|
+
@_builtins.property
|
|
12518
|
+
@pulumi.getter
|
|
12519
|
+
def enabled(self) -> _builtins.bool:
|
|
12520
|
+
"""
|
|
12521
|
+
Whether to enable the Diskless functionality.
|
|
12522
|
+
"""
|
|
12523
|
+
return pulumi.get(self, "enabled")
|
|
12524
|
+
|
|
12525
|
+
|
|
12458
12526
|
@pulumi.output_type
|
|
12459
12527
|
class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
12460
12528
|
@staticmethod
|
|
@@ -12509,14 +12577,14 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
12509
12577
|
"""
|
|
12510
12578
|
:param _builtins.bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`.
|
|
12511
12579
|
:param _builtins.int consumer_idle_disconnect_timeout: Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: `0`.
|
|
12512
|
-
:param _builtins.int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request.
|
|
12580
|
+
:param _builtins.int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request.
|
|
12513
12581
|
:param _builtins.int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`.
|
|
12514
12582
|
:param _builtins.str name_strategy: Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
|
|
12515
12583
|
:param _builtins.bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.
|
|
12516
12584
|
:param _builtins.str producer_acks: Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
|
|
12517
12585
|
:param _builtins.str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
12518
12586
|
:param _builtins.int producer_linger_ms: Wait for up to the given delay to allow batching records together. Default: `0`.
|
|
12519
|
-
:param _builtins.int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size.
|
|
12587
|
+
:param _builtins.int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size.
|
|
12520
12588
|
:param _builtins.int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`.
|
|
12521
12589
|
"""
|
|
12522
12590
|
if consumer_enable_auto_commit is not None:
|
|
@@ -12562,7 +12630,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
12562
12630
|
@pulumi.getter(name="consumerRequestMaxBytes")
|
|
12563
12631
|
def consumer_request_max_bytes(self) -> Optional[_builtins.int]:
|
|
12564
12632
|
"""
|
|
12565
|
-
Maximum number of bytes in unencoded message keys and values by a single request.
|
|
12633
|
+
Maximum number of bytes in unencoded message keys and values by a single request.
|
|
12566
12634
|
"""
|
|
12567
12635
|
return pulumi.get(self, "consumer_request_max_bytes")
|
|
12568
12636
|
|
|
@@ -12618,7 +12686,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
12618
12686
|
@pulumi.getter(name="producerMaxRequestSize")
|
|
12619
12687
|
def producer_max_request_size(self) -> Optional[_builtins.int]:
|
|
12620
12688
|
"""
|
|
12621
|
-
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size.
|
|
12689
|
+
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size.
|
|
12622
12690
|
"""
|
|
12623
12691
|
return pulumi.get(self, "producer_max_request_size")
|
|
12624
12692
|
|
|
@@ -13972,7 +14040,7 @@ class KafkaTopicConfig(dict):
|
|
|
13972
14040
|
:param _builtins.str max_compaction_lag_ms: The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
|
|
13973
14041
|
:param _builtins.str max_message_bytes: The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
|
|
13974
14042
|
:param _builtins.bool message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
|
|
13975
|
-
:param _builtins.str message_format_version: Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
|
|
14043
|
+
:param _builtins.str message_format_version: Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. Deprecated in Kafka 4.0+: this configuration is removed and any supplied value will be ignored; for services upgraded to 4.0+, the returned value may be 'None'. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
|
|
13976
14044
|
:param _builtins.str message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
|
|
13977
14045
|
:param _builtins.str message_timestamp_type: Define whether the timestamp in the message is message create time or log append time. The possible values are `CreateTime` and `LogAppendTime`.
|
|
13978
14046
|
:param _builtins.float min_cleanable_dirty_ratio: This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
|
|
@@ -14153,7 +14221,7 @@ class KafkaTopicConfig(dict):
|
|
|
14153
14221
|
@pulumi.getter(name="messageFormatVersion")
|
|
14154
14222
|
def message_format_version(self) -> Optional[_builtins.str]:
|
|
14155
14223
|
"""
|
|
14156
|
-
Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
|
|
14224
|
+
Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. Deprecated in Kafka 4.0+: this configuration is removed and any supplied value will be ignored; for services upgraded to 4.0+, the returned value may be 'None'. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
|
|
14157
14225
|
"""
|
|
14158
14226
|
return pulumi.get(self, "message_format_version")
|
|
14159
14227
|
|
|
@@ -15769,7 +15837,7 @@ class M3DbM3dbUserConfigRulesMapping(dict):
|
|
|
15769
15837
|
namespaces_strings: Optional[Sequence[_builtins.str]] = None,
|
|
15770
15838
|
tags: Optional[Sequence['outputs.M3DbM3dbUserConfigRulesMappingTag']] = None):
|
|
15771
15839
|
"""
|
|
15772
|
-
:param _builtins.str filter: Matching metric names with wildcards (using
|
|
15840
|
+
:param _builtins.str filter: Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator. Example: `__name__:disk_* host:important-42 mount:!*/sda`.
|
|
15773
15841
|
:param Sequence[_builtins.str] aggregations: List of aggregations to be applied.
|
|
15774
15842
|
:param _builtins.bool drop: Only store the derived metric (as specified in the roll-up rules), if any.
|
|
15775
15843
|
:param _builtins.str name: The (optional) name of the rule. Example: `important disk metrics`.
|
|
@@ -15798,7 +15866,7 @@ class M3DbM3dbUserConfigRulesMapping(dict):
|
|
|
15798
15866
|
@pulumi.getter
|
|
15799
15867
|
def filter(self) -> _builtins.str:
|
|
15800
15868
|
"""
|
|
15801
|
-
Matching metric names with wildcards (using
|
|
15869
|
+
Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator. Example: `__name__:disk_* host:important-42 mount:!*/sda`.
|
|
15802
15870
|
"""
|
|
15803
15871
|
return pulumi.get(self, "filter")
|
|
15804
15872
|
|
|
@@ -16930,8 +16998,8 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
16930
16998
|
:param _builtins.int innodb_ft_min_token_size: Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service. Example: `3`.
|
|
16931
16999
|
:param _builtins.str innodb_ft_server_stopword_table: This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables. Example: `db_name/table_name`.
|
|
16932
17000
|
:param _builtins.int innodb_lock_wait_timeout: The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120. Example: `50`.
|
|
16933
|
-
:param _builtins.int innodb_log_buffer_size: The size in bytes of the buffer that InnoDB uses to write to the log files on disk.
|
|
16934
|
-
:param _builtins.int innodb_online_alter_log_max_size: The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.
|
|
17001
|
+
:param _builtins.int innodb_log_buffer_size: The size in bytes of the buffer that InnoDB uses to write to the log files on disk.
|
|
17002
|
+
:param _builtins.int innodb_online_alter_log_max_size: The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.
|
|
16935
17003
|
:param _builtins.bool innodb_print_all_deadlocks: When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.
|
|
16936
17004
|
:param _builtins.int innodb_read_io_threads: The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`.
|
|
16937
17005
|
:param _builtins.bool innodb_rollback_on_timeout: When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.
|
|
@@ -16940,9 +17008,9 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
16940
17008
|
:param _builtins.int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`.
|
|
16941
17009
|
:param _builtins.str internal_tmp_mem_storage_engine: Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
|
|
16942
17010
|
:param _builtins.str log_output: Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
|
|
16943
|
-
:param _builtins.float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10
|
|
16944
|
-
:param _builtins.int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
16945
|
-
:param _builtins.int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
|
|
17011
|
+
:param _builtins.float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10`.
|
|
17012
|
+
:param _builtins.int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
17013
|
+
:param _builtins.int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
|
|
16946
17014
|
:param _builtins.int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service. Example: `16384`.
|
|
16947
17015
|
:param _builtins.int net_read_timeout: The number of seconds to wait for more data from a connection before aborting the read. Example: `30`.
|
|
16948
17016
|
:param _builtins.int net_write_timeout: The number of seconds to wait for a block to be written to a connection before aborting the write. Example: `30`.
|
|
@@ -16950,7 +17018,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
16950
17018
|
:param _builtins.int sort_buffer_size: Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K). Example: `262144`.
|
|
16951
17019
|
:param _builtins.str sql_mode: Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned. Example: `ANSI,TRADITIONAL`.
|
|
16952
17020
|
:param _builtins.bool sql_require_primary_key: Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.
|
|
16953
|
-
:param _builtins.int tmp_table_size: Limits the size of internal in-memory tables. Also set max*heap*table_size. Default is 16777216 (16M).
|
|
17021
|
+
:param _builtins.int tmp_table_size: Limits the size of internal in-memory tables. Also set max*heap*table_size. Default is 16777216 (16M).
|
|
16954
17022
|
:param _builtins.int wait_timeout: The number of seconds the server waits for activity on a noninteractive connection before closing it. Example: `28800`.
|
|
16955
17023
|
"""
|
|
16956
17024
|
if connect_timeout is not None:
|
|
@@ -17092,7 +17160,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
17092
17160
|
@pulumi.getter(name="innodbLogBufferSize")
|
|
17093
17161
|
def innodb_log_buffer_size(self) -> Optional[_builtins.int]:
|
|
17094
17162
|
"""
|
|
17095
|
-
The size in bytes of the buffer that InnoDB uses to write to the log files on disk.
|
|
17163
|
+
The size in bytes of the buffer that InnoDB uses to write to the log files on disk.
|
|
17096
17164
|
"""
|
|
17097
17165
|
return pulumi.get(self, "innodb_log_buffer_size")
|
|
17098
17166
|
|
|
@@ -17100,7 +17168,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
17100
17168
|
@pulumi.getter(name="innodbOnlineAlterLogMaxSize")
|
|
17101
17169
|
def innodb_online_alter_log_max_size(self) -> Optional[_builtins.int]:
|
|
17102
17170
|
"""
|
|
17103
|
-
The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.
|
|
17171
|
+
The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.
|
|
17104
17172
|
"""
|
|
17105
17173
|
return pulumi.get(self, "innodb_online_alter_log_max_size")
|
|
17106
17174
|
|
|
@@ -17172,7 +17240,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
17172
17240
|
@pulumi.getter(name="longQueryTime")
|
|
17173
17241
|
def long_query_time(self) -> Optional[_builtins.float]:
|
|
17174
17242
|
"""
|
|
17175
|
-
The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10
|
|
17243
|
+
The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10`.
|
|
17176
17244
|
"""
|
|
17177
17245
|
return pulumi.get(self, "long_query_time")
|
|
17178
17246
|
|
|
@@ -17180,7 +17248,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
17180
17248
|
@pulumi.getter(name="maxAllowedPacket")
|
|
17181
17249
|
def max_allowed_packet(self) -> Optional[_builtins.int]:
|
|
17182
17250
|
"""
|
|
17183
|
-
Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
17251
|
+
Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
17184
17252
|
"""
|
|
17185
17253
|
return pulumi.get(self, "max_allowed_packet")
|
|
17186
17254
|
|
|
@@ -17188,7 +17256,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
17188
17256
|
@pulumi.getter(name="maxHeapTableSize")
|
|
17189
17257
|
def max_heap_table_size(self) -> Optional[_builtins.int]:
|
|
17190
17258
|
"""
|
|
17191
|
-
Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
|
|
17259
|
+
Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
|
|
17192
17260
|
"""
|
|
17193
17261
|
return pulumi.get(self, "max_heap_table_size")
|
|
17194
17262
|
|
|
@@ -17252,7 +17320,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
17252
17320
|
@pulumi.getter(name="tmpTableSize")
|
|
17253
17321
|
def tmp_table_size(self) -> Optional[_builtins.int]:
|
|
17254
17322
|
"""
|
|
17255
|
-
Limits the size of internal in-memory tables. Also set max*heap*table_size. Default is 16777216 (16M).
|
|
17323
|
+
Limits the size of internal in-memory tables. Also set max*heap*table_size. Default is 16777216 (16M).
|
|
17256
17324
|
"""
|
|
17257
17325
|
return pulumi.get(self, "tmp_table_size")
|
|
17258
17326
|
|
|
@@ -17770,6 +17838,8 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
17770
17838
|
suggest = "azure_migration"
|
|
17771
17839
|
elif key == "customDomain":
|
|
17772
17840
|
suggest = "custom_domain"
|
|
17841
|
+
elif key == "customKeystores":
|
|
17842
|
+
suggest = "custom_keystores"
|
|
17773
17843
|
elif key == "disableReplicationFactorAdjustment":
|
|
17774
17844
|
suggest = "disable_replication_factor_adjustment"
|
|
17775
17845
|
elif key == "gcsMigration":
|
|
@@ -17828,6 +17898,7 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
17828
17898
|
additional_backup_regions: Optional[_builtins.str] = None,
|
|
17829
17899
|
azure_migration: Optional['outputs.OpenSearchOpensearchUserConfigAzureMigration'] = None,
|
|
17830
17900
|
custom_domain: Optional[_builtins.str] = None,
|
|
17901
|
+
custom_keystores: Optional[Sequence['outputs.OpenSearchOpensearchUserConfigCustomKeystore']] = None,
|
|
17831
17902
|
disable_replication_factor_adjustment: Optional[_builtins.bool] = None,
|
|
17832
17903
|
gcs_migration: Optional['outputs.OpenSearchOpensearchUserConfigGcsMigration'] = None,
|
|
17833
17904
|
index_patterns: Optional[Sequence['outputs.OpenSearchOpensearchUserConfigIndexPattern']] = None,
|
|
@@ -17856,6 +17927,7 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
17856
17927
|
:param _builtins.str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
17857
17928
|
:param 'OpenSearchOpensearchUserConfigAzureMigrationArgs' azure_migration: Azure migration settings
|
|
17858
17929
|
:param _builtins.str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
17930
|
+
:param Sequence['OpenSearchOpensearchUserConfigCustomKeystoreArgs'] custom_keystores: Allow to register custom keystores in OpenSearch
|
|
17859
17931
|
:param _builtins.bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
|
|
17860
17932
|
:param 'OpenSearchOpensearchUserConfigGcsMigrationArgs' gcs_migration: Google Cloud Storage migration settings
|
|
17861
17933
|
:param Sequence['OpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
|
|
@@ -17869,7 +17941,7 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
17869
17941
|
:param 'OpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
|
|
17870
17942
|
:param 'OpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
|
|
17871
17943
|
:param 'OpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
|
|
17872
|
-
:param _builtins.str opensearch_version: Enum: `1`, `2`, and newer. OpenSearch
|
|
17944
|
+
:param _builtins.str opensearch_version: Enum: `1`, `2`, `2.19`, and newer. OpenSearch version.
|
|
17873
17945
|
:param 'OpenSearchOpensearchUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
17874
17946
|
:param 'OpenSearchOpensearchUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
17875
17947
|
:param _builtins.str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created. Example: `anotherprojectname`.
|
|
@@ -17887,6 +17959,8 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
17887
17959
|
pulumi.set(__self__, "azure_migration", azure_migration)
|
|
17888
17960
|
if custom_domain is not None:
|
|
17889
17961
|
pulumi.set(__self__, "custom_domain", custom_domain)
|
|
17962
|
+
if custom_keystores is not None:
|
|
17963
|
+
pulumi.set(__self__, "custom_keystores", custom_keystores)
|
|
17890
17964
|
if disable_replication_factor_adjustment is not None:
|
|
17891
17965
|
pulumi.set(__self__, "disable_replication_factor_adjustment", disable_replication_factor_adjustment)
|
|
17892
17966
|
if gcs_migration is not None:
|
|
@@ -17960,6 +18034,14 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
17960
18034
|
"""
|
|
17961
18035
|
return pulumi.get(self, "custom_domain")
|
|
17962
18036
|
|
|
18037
|
+
@_builtins.property
|
|
18038
|
+
@pulumi.getter(name="customKeystores")
|
|
18039
|
+
def custom_keystores(self) -> Optional[Sequence['outputs.OpenSearchOpensearchUserConfigCustomKeystore']]:
|
|
18040
|
+
"""
|
|
18041
|
+
Allow to register custom keystores in OpenSearch
|
|
18042
|
+
"""
|
|
18043
|
+
return pulumi.get(self, "custom_keystores")
|
|
18044
|
+
|
|
17963
18045
|
@_builtins.property
|
|
17964
18046
|
@pulumi.getter(name="disableReplicationFactorAdjustment")
|
|
17965
18047
|
def disable_replication_factor_adjustment(self) -> Optional[_builtins.bool]:
|
|
@@ -18069,7 +18151,7 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
18069
18151
|
@pulumi.getter(name="opensearchVersion")
|
|
18070
18152
|
def opensearch_version(self) -> Optional[_builtins.str]:
|
|
18071
18153
|
"""
|
|
18072
|
-
Enum: `1`, `2`, and newer. OpenSearch
|
|
18154
|
+
Enum: `1`, `2`, `2.19`, and newer. OpenSearch version.
|
|
18073
18155
|
"""
|
|
18074
18156
|
return pulumi.get(self, "opensearch_version")
|
|
18075
18157
|
|
|
@@ -18341,6 +18423,31 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
|
|
|
18341
18423
|
return pulumi.get(self, "sas_token")
|
|
18342
18424
|
|
|
18343
18425
|
|
|
18426
|
+
@pulumi.output_type
|
|
18427
|
+
class OpenSearchOpensearchUserConfigCustomKeystore(dict):
|
|
18428
|
+
def __init__(__self__, *,
|
|
18429
|
+
name: _builtins.str,
|
|
18430
|
+
type: _builtins.str):
|
|
18431
|
+
"""
|
|
18432
|
+
:param _builtins.str type: Enum: `azure`, `gcs`, `s3`.
|
|
18433
|
+
"""
|
|
18434
|
+
pulumi.set(__self__, "name", name)
|
|
18435
|
+
pulumi.set(__self__, "type", type)
|
|
18436
|
+
|
|
18437
|
+
@_builtins.property
|
|
18438
|
+
@pulumi.getter
|
|
18439
|
+
def name(self) -> _builtins.str:
|
|
18440
|
+
return pulumi.get(self, "name")
|
|
18441
|
+
|
|
18442
|
+
@_builtins.property
|
|
18443
|
+
@pulumi.getter
|
|
18444
|
+
def type(self) -> _builtins.str:
|
|
18445
|
+
"""
|
|
18446
|
+
Enum: `azure`, `gcs`, `s3`.
|
|
18447
|
+
"""
|
|
18448
|
+
return pulumi.get(self, "type")
|
|
18449
|
+
|
|
18450
|
+
|
|
18344
18451
|
@pulumi.output_type
|
|
18345
18452
|
class OpenSearchOpensearchUserConfigGcsMigration(dict):
|
|
18346
18453
|
@staticmethod
|
|
@@ -19134,11 +19241,11 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
|
|
|
19134
19241
|
:param _builtins.bool ism_enabled: Specifies whether ISM is enabled or not.
|
|
19135
19242
|
:param _builtins.bool ism_history_enabled: Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document.
|
|
19136
19243
|
:param _builtins.int ism_history_max_age: The maximum age before rolling over the audit history index in hours. Example: `24`.
|
|
19137
|
-
:param _builtins.int ism_history_max_docs: The maximum number of documents before rolling over the audit history index.
|
|
19244
|
+
:param _builtins.int ism_history_max_docs: The maximum number of documents before rolling over the audit history index.
|
|
19138
19245
|
:param _builtins.int ism_history_rollover_check_period: The time between rollover checks for the audit history index in hours. Example: `8`.
|
|
19139
19246
|
:param _builtins.int ism_history_rollover_retention_period: How long audit history indices are kept in days. Example: `30`.
|
|
19140
19247
|
:param _builtins.bool knn_memory_circuit_breaker_enabled: Enable or disable KNN memory circuit breaker. Defaults to true.
|
|
19141
|
-
:param _builtins.int knn_memory_circuit_breaker_limit: Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
|
|
19248
|
+
:param _builtins.int knn_memory_circuit_breaker_limit: Maximum amount of memory in percentage that can be used for the KNN index. Defaults to 50% of the JVM heap size. 0 is used to set it to null which can be used to invalidate caches.
|
|
19142
19249
|
:param _builtins.str node_search_cache_size: Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 5gb. Requires restarting all OpenSearch nodes.
|
|
19143
19250
|
:param _builtins.bool override_main_response_version: Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.
|
|
19144
19251
|
:param _builtins.bool plugins_alerting_filter_by_backend_roles: Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false.
|
|
@@ -19523,7 +19630,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
|
|
|
19523
19630
|
@pulumi.getter(name="ismHistoryMaxDocs")
|
|
19524
19631
|
def ism_history_max_docs(self) -> Optional[_builtins.int]:
|
|
19525
19632
|
"""
|
|
19526
|
-
The maximum number of documents before rolling over the audit history index.
|
|
19633
|
+
The maximum number of documents before rolling over the audit history index.
|
|
19527
19634
|
"""
|
|
19528
19635
|
return pulumi.get(self, "ism_history_max_docs")
|
|
19529
19636
|
|
|
@@ -19555,7 +19662,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
|
|
|
19555
19662
|
@pulumi.getter(name="knnMemoryCircuitBreakerLimit")
|
|
19556
19663
|
def knn_memory_circuit_breaker_limit(self) -> Optional[_builtins.int]:
|
|
19557
19664
|
"""
|
|
19558
|
-
Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
|
|
19665
|
+
Maximum amount of memory in percentage that can be used for the KNN index. Defaults to 50% of the JVM heap size. 0 is used to set it to null which can be used to invalidate caches.
|
|
19559
19666
|
"""
|
|
19560
19667
|
return pulumi.get(self, "knn_memory_circuit_breaker_limit")
|
|
19561
19668
|
|
|
@@ -21972,6 +22079,116 @@ class OrganizationAddressTimeouts(dict):
|
|
|
21972
22079
|
return pulumi.get(self, "update")
|
|
21973
22080
|
|
|
21974
22081
|
|
|
22082
|
+
@pulumi.output_type
|
|
22083
|
+
class OrganizationApplicationUserTimeouts(dict):
|
|
22084
|
+
def __init__(__self__, *,
|
|
22085
|
+
create: Optional[_builtins.str] = None,
|
|
22086
|
+
delete: Optional[_builtins.str] = None,
|
|
22087
|
+
read: Optional[_builtins.str] = None,
|
|
22088
|
+
update: Optional[_builtins.str] = None):
|
|
22089
|
+
"""
|
|
22090
|
+
:param _builtins.str create: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22091
|
+
:param _builtins.str delete: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
|
|
22092
|
+
:param _builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
|
|
22093
|
+
:param _builtins.str update: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22094
|
+
"""
|
|
22095
|
+
if create is not None:
|
|
22096
|
+
pulumi.set(__self__, "create", create)
|
|
22097
|
+
if delete is not None:
|
|
22098
|
+
pulumi.set(__self__, "delete", delete)
|
|
22099
|
+
if read is not None:
|
|
22100
|
+
pulumi.set(__self__, "read", read)
|
|
22101
|
+
if update is not None:
|
|
22102
|
+
pulumi.set(__self__, "update", update)
|
|
22103
|
+
|
|
22104
|
+
@_builtins.property
|
|
22105
|
+
@pulumi.getter
|
|
22106
|
+
def create(self) -> Optional[_builtins.str]:
|
|
22107
|
+
"""
|
|
22108
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22109
|
+
"""
|
|
22110
|
+
return pulumi.get(self, "create")
|
|
22111
|
+
|
|
22112
|
+
@_builtins.property
|
|
22113
|
+
@pulumi.getter
|
|
22114
|
+
def delete(self) -> Optional[_builtins.str]:
|
|
22115
|
+
"""
|
|
22116
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
|
|
22117
|
+
"""
|
|
22118
|
+
return pulumi.get(self, "delete")
|
|
22119
|
+
|
|
22120
|
+
@_builtins.property
|
|
22121
|
+
@pulumi.getter
|
|
22122
|
+
def read(self) -> Optional[_builtins.str]:
|
|
22123
|
+
"""
|
|
22124
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
|
|
22125
|
+
"""
|
|
22126
|
+
return pulumi.get(self, "read")
|
|
22127
|
+
|
|
22128
|
+
@_builtins.property
|
|
22129
|
+
@pulumi.getter
|
|
22130
|
+
def update(self) -> Optional[_builtins.str]:
|
|
22131
|
+
"""
|
|
22132
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22133
|
+
"""
|
|
22134
|
+
return pulumi.get(self, "update")
|
|
22135
|
+
|
|
22136
|
+
|
|
22137
|
+
@pulumi.output_type
|
|
22138
|
+
class OrganizationApplicationUserTokenTimeouts(dict):
|
|
22139
|
+
def __init__(__self__, *,
|
|
22140
|
+
create: Optional[_builtins.str] = None,
|
|
22141
|
+
delete: Optional[_builtins.str] = None,
|
|
22142
|
+
read: Optional[_builtins.str] = None,
|
|
22143
|
+
update: Optional[_builtins.str] = None):
|
|
22144
|
+
"""
|
|
22145
|
+
:param _builtins.str create: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22146
|
+
:param _builtins.str delete: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
|
|
22147
|
+
:param _builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
|
|
22148
|
+
:param _builtins.str update: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22149
|
+
"""
|
|
22150
|
+
if create is not None:
|
|
22151
|
+
pulumi.set(__self__, "create", create)
|
|
22152
|
+
if delete is not None:
|
|
22153
|
+
pulumi.set(__self__, "delete", delete)
|
|
22154
|
+
if read is not None:
|
|
22155
|
+
pulumi.set(__self__, "read", read)
|
|
22156
|
+
if update is not None:
|
|
22157
|
+
pulumi.set(__self__, "update", update)
|
|
22158
|
+
|
|
22159
|
+
@_builtins.property
|
|
22160
|
+
@pulumi.getter
|
|
22161
|
+
def create(self) -> Optional[_builtins.str]:
|
|
22162
|
+
"""
|
|
22163
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22164
|
+
"""
|
|
22165
|
+
return pulumi.get(self, "create")
|
|
22166
|
+
|
|
22167
|
+
@_builtins.property
|
|
22168
|
+
@pulumi.getter
|
|
22169
|
+
def delete(self) -> Optional[_builtins.str]:
|
|
22170
|
+
"""
|
|
22171
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
|
|
22172
|
+
"""
|
|
22173
|
+
return pulumi.get(self, "delete")
|
|
22174
|
+
|
|
22175
|
+
@_builtins.property
|
|
22176
|
+
@pulumi.getter
|
|
22177
|
+
def read(self) -> Optional[_builtins.str]:
|
|
22178
|
+
"""
|
|
22179
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
|
|
22180
|
+
"""
|
|
22181
|
+
return pulumi.get(self, "read")
|
|
22182
|
+
|
|
22183
|
+
@_builtins.property
|
|
22184
|
+
@pulumi.getter
|
|
22185
|
+
def update(self) -> Optional[_builtins.str]:
|
|
22186
|
+
"""
|
|
22187
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22188
|
+
"""
|
|
22189
|
+
return pulumi.get(self, "update")
|
|
22190
|
+
|
|
22191
|
+
|
|
21975
22192
|
@pulumi.output_type
|
|
21976
22193
|
class OrganizationBillingGroupTimeouts(dict):
|
|
21977
22194
|
def __init__(__self__, *,
|
|
@@ -22114,11 +22331,11 @@ class OrganizationPermissionPermission(dict):
|
|
|
22114
22331
|
create_time: Optional[_builtins.str] = None,
|
|
22115
22332
|
update_time: Optional[_builtins.str] = None):
|
|
22116
22333
|
"""
|
|
22117
|
-
:param Sequence[_builtins.str] permissions: List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:billing:read`, `organization:billing:write`, `organization:domains:write`, `organization:groups:write`, `organization:networking:read`, `organization:networking:write`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
|
|
22334
|
+
:param Sequence[_builtins.str] permissions: List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant". The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:billing:read`, `organization:billing:write`, `organization:domains:write`, `organization:groups:write`, `organization:networking:read`, `organization:networking:write`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
|
|
22118
22335
|
:param _builtins.str principal_id: ID of the user or group to grant permissions to. Only active users who have accepted an [invite](https://aiven.io/docs/platform/howto/manage-org-users) to join the organization can be granted permissions.
|
|
22119
|
-
:param _builtins.str principal_type:
|
|
22120
|
-
:param _builtins.str create_time: Time
|
|
22121
|
-
:param _builtins.str update_time: Time
|
|
22336
|
+
:param _builtins.str principal_type: An enumeration. The possible values are `user` and `user_group`.
|
|
22337
|
+
:param _builtins.str create_time: Create Time.
|
|
22338
|
+
:param _builtins.str update_time: Update Time.
|
|
22122
22339
|
"""
|
|
22123
22340
|
pulumi.set(__self__, "permissions", permissions)
|
|
22124
22341
|
pulumi.set(__self__, "principal_id", principal_id)
|
|
@@ -22132,7 +22349,7 @@ class OrganizationPermissionPermission(dict):
|
|
|
22132
22349
|
@pulumi.getter
|
|
22133
22350
|
def permissions(self) -> Sequence[_builtins.str]:
|
|
22134
22351
|
"""
|
|
22135
|
-
List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:billing:read`, `organization:billing:write`, `organization:domains:write`, `organization:groups:write`, `organization:networking:read`, `organization:networking:write`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
|
|
22352
|
+
List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant". The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:billing:read`, `organization:billing:write`, `organization:domains:write`, `organization:groups:write`, `organization:networking:read`, `organization:networking:write`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
|
|
22136
22353
|
"""
|
|
22137
22354
|
return pulumi.get(self, "permissions")
|
|
22138
22355
|
|
|
@@ -22148,7 +22365,7 @@ class OrganizationPermissionPermission(dict):
|
|
|
22148
22365
|
@pulumi.getter(name="principalType")
|
|
22149
22366
|
def principal_type(self) -> _builtins.str:
|
|
22150
22367
|
"""
|
|
22151
|
-
|
|
22368
|
+
An enumeration. The possible values are `user` and `user_group`.
|
|
22152
22369
|
"""
|
|
22153
22370
|
return pulumi.get(self, "principal_type")
|
|
22154
22371
|
|
|
@@ -22156,7 +22373,7 @@ class OrganizationPermissionPermission(dict):
|
|
|
22156
22373
|
@pulumi.getter(name="createTime")
|
|
22157
22374
|
def create_time(self) -> Optional[_builtins.str]:
|
|
22158
22375
|
"""
|
|
22159
|
-
Time
|
|
22376
|
+
Create Time.
|
|
22160
22377
|
"""
|
|
22161
22378
|
return pulumi.get(self, "create_time")
|
|
22162
22379
|
|
|
@@ -22164,11 +22381,66 @@ class OrganizationPermissionPermission(dict):
|
|
|
22164
22381
|
@pulumi.getter(name="updateTime")
|
|
22165
22382
|
def update_time(self) -> Optional[_builtins.str]:
|
|
22166
22383
|
"""
|
|
22167
|
-
Time
|
|
22384
|
+
Update Time.
|
|
22168
22385
|
"""
|
|
22169
22386
|
return pulumi.get(self, "update_time")
|
|
22170
22387
|
|
|
22171
22388
|
|
|
22389
|
+
@pulumi.output_type
|
|
22390
|
+
class OrganizationPermissionTimeouts(dict):
|
|
22391
|
+
def __init__(__self__, *,
|
|
22392
|
+
create: Optional[_builtins.str] = None,
|
|
22393
|
+
delete: Optional[_builtins.str] = None,
|
|
22394
|
+
read: Optional[_builtins.str] = None,
|
|
22395
|
+
update: Optional[_builtins.str] = None):
|
|
22396
|
+
"""
|
|
22397
|
+
:param _builtins.str create: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22398
|
+
:param _builtins.str delete: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
|
|
22399
|
+
:param _builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
|
|
22400
|
+
:param _builtins.str update: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22401
|
+
"""
|
|
22402
|
+
if create is not None:
|
|
22403
|
+
pulumi.set(__self__, "create", create)
|
|
22404
|
+
if delete is not None:
|
|
22405
|
+
pulumi.set(__self__, "delete", delete)
|
|
22406
|
+
if read is not None:
|
|
22407
|
+
pulumi.set(__self__, "read", read)
|
|
22408
|
+
if update is not None:
|
|
22409
|
+
pulumi.set(__self__, "update", update)
|
|
22410
|
+
|
|
22411
|
+
@_builtins.property
|
|
22412
|
+
@pulumi.getter
|
|
22413
|
+
def create(self) -> Optional[_builtins.str]:
|
|
22414
|
+
"""
|
|
22415
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22416
|
+
"""
|
|
22417
|
+
return pulumi.get(self, "create")
|
|
22418
|
+
|
|
22419
|
+
@_builtins.property
|
|
22420
|
+
@pulumi.getter
|
|
22421
|
+
def delete(self) -> Optional[_builtins.str]:
|
|
22422
|
+
"""
|
|
22423
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
|
|
22424
|
+
"""
|
|
22425
|
+
return pulumi.get(self, "delete")
|
|
22426
|
+
|
|
22427
|
+
@_builtins.property
|
|
22428
|
+
@pulumi.getter
|
|
22429
|
+
def read(self) -> Optional[_builtins.str]:
|
|
22430
|
+
"""
|
|
22431
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
|
|
22432
|
+
"""
|
|
22433
|
+
return pulumi.get(self, "read")
|
|
22434
|
+
|
|
22435
|
+
@_builtins.property
|
|
22436
|
+
@pulumi.getter
|
|
22437
|
+
def update(self) -> Optional[_builtins.str]:
|
|
22438
|
+
"""
|
|
22439
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
22440
|
+
"""
|
|
22441
|
+
return pulumi.get(self, "update")
|
|
22442
|
+
|
|
22443
|
+
|
|
22172
22444
|
@pulumi.output_type
|
|
22173
22445
|
class OrganizationProjectTag(dict):
|
|
22174
22446
|
def __init__(__self__, *,
|
|
@@ -23587,7 +23859,7 @@ class PgPgUserConfigPg(dict):
|
|
|
23587
23859
|
"""
|
|
23588
23860
|
:param _builtins.float autovacuum_analyze_scale_factor: Specifies a fraction of the table size to add to autovacuum*analyze*threshold when deciding whether to trigger an ANALYZE (e.g. `0.2` for 20% of the table size). The default is `0.2`.
|
|
23589
23861
|
:param _builtins.int autovacuum_analyze_threshold: Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is `50`.
|
|
23590
|
-
:param _builtins.int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
23862
|
+
:param _builtins.int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
23591
23863
|
:param _builtins.int autovacuum_max_workers: Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is `3`. Changing this parameter causes a service restart.
|
|
23592
23864
|
:param _builtins.int autovacuum_naptime: Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds. The default is `60`.
|
|
23593
23865
|
:param _builtins.int autovacuum_vacuum_cost_delay: Specifies the cost delay value that will be used in automatic VACUUM operations. If `-1` is specified, the regular vacuum*cost*delay value will be used. The default is `2` (upstream default).
|
|
@@ -23597,7 +23869,7 @@ class PgPgUserConfigPg(dict):
|
|
|
23597
23869
|
:param _builtins.int bgwriter_delay: Specifies the delay between activity rounds for the background writer in milliseconds. The default is `200`. Example: `200`.
|
|
23598
23870
|
:param _builtins.int bgwriter_flush_after: Whenever more than bgwriter*flush*after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes. Setting of 0 disables forced writeback. The default is `512`. Example: `512`.
|
|
23599
23871
|
:param _builtins.int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. The default is `100`. Example: `100`.
|
|
23600
|
-
:param _builtins.float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2
|
|
23872
|
+
:param _builtins.float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2`.
|
|
23601
23873
|
:param _builtins.int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition. The default is `1000` (upstream default). Example: `1000`.
|
|
23602
23874
|
:param _builtins.str default_toast_compression: Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns. The default is `lz4`. Only available for PostgreSQL 14+.
|
|
23603
23875
|
:param _builtins.int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
|
|
@@ -23629,7 +23901,7 @@ class PgPgUserConfigPg(dict):
|
|
|
23629
23901
|
:param _builtins.bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring. Changing this parameter causes a service restart. Only available for PostgreSQL 13+.
|
|
23630
23902
|
:param _builtins.int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets. Changing this parameter causes a service restart. Only available for PostgreSQL 13+. Example: `10`.
|
|
23631
23903
|
:param _builtins.str pg_stat_statements_dot_track: Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default is `top`.
|
|
23632
|
-
:param _builtins.int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
23904
|
+
:param _builtins.int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
23633
23905
|
:param _builtins.str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
|
|
23634
23906
|
:param _builtins.int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Changing this parameter causes a service restart. Example: `1024`.
|
|
23635
23907
|
:param _builtins.str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions. Changing this parameter causes a service restart.
|
|
@@ -23763,7 +24035,7 @@ class PgPgUserConfigPg(dict):
|
|
|
23763
24035
|
@pulumi.getter(name="autovacuumFreezeMaxAge")
|
|
23764
24036
|
def autovacuum_freeze_max_age(self) -> Optional[_builtins.int]:
|
|
23765
24037
|
"""
|
|
23766
|
-
Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
24038
|
+
Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
23767
24039
|
"""
|
|
23768
24040
|
return pulumi.get(self, "autovacuum_freeze_max_age")
|
|
23769
24041
|
|
|
@@ -23843,7 +24115,7 @@ class PgPgUserConfigPg(dict):
|
|
|
23843
24115
|
@pulumi.getter(name="bgwriterLruMultiplier")
|
|
23844
24116
|
def bgwriter_lru_multiplier(self) -> Optional[_builtins.float]:
|
|
23845
24117
|
"""
|
|
23846
|
-
The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2
|
|
24118
|
+
The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2`.
|
|
23847
24119
|
"""
|
|
23848
24120
|
return pulumi.get(self, "bgwriter_lru_multiplier")
|
|
23849
24121
|
|
|
@@ -24099,7 +24371,7 @@ class PgPgUserConfigPg(dict):
|
|
|
24099
24371
|
@pulumi.getter(name="tempFileLimit")
|
|
24100
24372
|
def temp_file_limit(self) -> Optional[_builtins.int]:
|
|
24101
24373
|
"""
|
|
24102
|
-
PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
24374
|
+
PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
24103
24375
|
"""
|
|
24104
24376
|
return pulumi.get(self, "temp_file_limit")
|
|
24105
24377
|
|
|
@@ -25957,7 +26229,7 @@ class ServiceIntegrationClickhouseKafkaUserConfig(dict):
|
|
|
25957
26229
|
def __init__(__self__, *,
|
|
25958
26230
|
tables: Optional[Sequence['outputs.ServiceIntegrationClickhouseKafkaUserConfigTable']] = None):
|
|
25959
26231
|
"""
|
|
25960
|
-
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableArgs'] tables:
|
|
26232
|
+
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableArgs'] tables: Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
|
|
25961
26233
|
"""
|
|
25962
26234
|
if tables is not None:
|
|
25963
26235
|
pulumi.set(__self__, "tables", tables)
|
|
@@ -25966,7 +26238,7 @@ class ServiceIntegrationClickhouseKafkaUserConfig(dict):
|
|
|
25966
26238
|
@pulumi.getter
|
|
25967
26239
|
def tables(self) -> Optional[Sequence['outputs.ServiceIntegrationClickhouseKafkaUserConfigTable']]:
|
|
25968
26240
|
"""
|
|
25969
|
-
|
|
26241
|
+
Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
|
|
25970
26242
|
"""
|
|
25971
26243
|
return pulumi.get(self, "tables")
|
|
25972
26244
|
|
|
@@ -26053,29 +26325,29 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26053
26325
|
skip_broken_messages: Optional[_builtins.int] = None,
|
|
26054
26326
|
thread_per_consumer: Optional[_builtins.bool] = None):
|
|
26055
26327
|
"""
|
|
26056
|
-
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns:
|
|
26057
|
-
:param _builtins.str data_format: Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`.
|
|
26058
|
-
:param _builtins.str group_name: Kafka consumers group. Default: `clickhouse`.
|
|
26059
|
-
:param _builtins.str name:
|
|
26060
|
-
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
|
|
26061
|
-
:param _builtins.str auto_offset_reset: Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`.
|
|
26062
|
-
:param _builtins.str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`.
|
|
26063
|
-
:param _builtins.str handle_error_mode: Enum: `default`, `stream`.
|
|
26064
|
-
:param _builtins.int max_block_size:
|
|
26065
|
-
:param _builtins.int max_rows_per_message:
|
|
26066
|
-
:param _builtins.int num_consumers:
|
|
26067
|
-
:param _builtins.int poll_max_batch_size: Maximum
|
|
26328
|
+
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
|
|
26329
|
+
:param _builtins.str data_format: Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default: `JSONEachRow`.
|
|
26330
|
+
:param _builtins.str group_name: The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: `clickhouse`.
|
|
26331
|
+
:param _builtins.str name: The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: `events`.
|
|
26332
|
+
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
|
|
26333
|
+
:param _builtins.str auto_offset_reset: Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range. `earliest` starts from the beginning, `latest` starts from the end. Default: `earliest`.
|
|
26334
|
+
:param _builtins.str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Specifies how ClickHouse should parse DateTime values from text-based input formats. `basic` uses simple parsing, `best_effort` attempts more flexible parsing. Default: `basic`.
|
|
26335
|
+
:param _builtins.str handle_error_mode: Enum: `default`, `stream`. Defines how ClickHouse should handle errors when processing Kafka messages. `default` stops on errors, `stream` continues processing and logs errors. Default: `default`.
|
|
26336
|
+
:param _builtins.int max_block_size: Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: `0`.
|
|
26337
|
+
:param _builtins.int max_rows_per_message: Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: `1`.
|
|
26338
|
+
:param _builtins.int num_consumers: Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: `1`.
|
|
26339
|
+
:param _builtins.int poll_max_batch_size: Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: `0`.
|
|
26068
26340
|
:param _builtins.int poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
|
|
26069
26341
|
:param _builtins.int producer_batch_num_messages: The maximum number of messages in a batch sent to Kafka. If the number of messages exceeds this value, the batch is sent. Default: `10000`.
|
|
26070
|
-
:param _builtins.int producer_batch_size: The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
|
|
26342
|
+
:param _builtins.int producer_batch_size: The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
|
|
26071
26343
|
:param _builtins.str producer_compression_codec: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. The compression codec to use when sending a batch of messages to Kafka. Default: `none`.
|
|
26072
26344
|
:param _builtins.int producer_compression_level: The compression level to use when sending a batch of messages to Kafka. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. Default: `-1`.
|
|
26073
26345
|
:param _builtins.int producer_linger_ms: The time in milliseconds to wait for additional messages before sending a batch. If the time is exceeded, the batch is sent. Default: `5`.
|
|
26074
|
-
:param _builtins.int producer_queue_buffering_max_kbytes: The maximum size of the buffer in kilobytes before sending.
|
|
26346
|
+
:param _builtins.int producer_queue_buffering_max_kbytes: The maximum size of the buffer in kilobytes before sending.
|
|
26075
26347
|
:param _builtins.int producer_queue_buffering_max_messages: The maximum number of messages to buffer before sending. Default: `100000`.
|
|
26076
26348
|
:param _builtins.int producer_request_required_acks: The number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, -1 will block until message is committed by all in sync replicas (ISRs). Default: `-1`.
|
|
26077
|
-
:param _builtins.int skip_broken_messages:
|
|
26078
|
-
:param _builtins.bool thread_per_consumer:
|
|
26349
|
+
:param _builtins.int skip_broken_messages: Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: `0`.
|
|
26350
|
+
:param _builtins.bool thread_per_consumer: When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: `false`.
|
|
26079
26351
|
"""
|
|
26080
26352
|
pulumi.set(__self__, "columns", columns)
|
|
26081
26353
|
pulumi.set(__self__, "data_format", data_format)
|
|
@@ -26123,7 +26395,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26123
26395
|
@pulumi.getter
|
|
26124
26396
|
def columns(self) -> Sequence['outputs.ServiceIntegrationClickhouseKafkaUserConfigTableColumn']:
|
|
26125
26397
|
"""
|
|
26126
|
-
|
|
26398
|
+
Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
|
|
26127
26399
|
"""
|
|
26128
26400
|
return pulumi.get(self, "columns")
|
|
26129
26401
|
|
|
@@ -26131,7 +26403,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26131
26403
|
@pulumi.getter(name="dataFormat")
|
|
26132
26404
|
def data_format(self) -> _builtins.str:
|
|
26133
26405
|
"""
|
|
26134
|
-
Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`.
|
|
26406
|
+
Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default: `JSONEachRow`.
|
|
26135
26407
|
"""
|
|
26136
26408
|
return pulumi.get(self, "data_format")
|
|
26137
26409
|
|
|
@@ -26139,7 +26411,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26139
26411
|
@pulumi.getter(name="groupName")
|
|
26140
26412
|
def group_name(self) -> _builtins.str:
|
|
26141
26413
|
"""
|
|
26142
|
-
Kafka consumers group. Default: `clickhouse`.
|
|
26414
|
+
The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: `clickhouse`.
|
|
26143
26415
|
"""
|
|
26144
26416
|
return pulumi.get(self, "group_name")
|
|
26145
26417
|
|
|
@@ -26147,7 +26419,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26147
26419
|
@pulumi.getter
|
|
26148
26420
|
def name(self) -> _builtins.str:
|
|
26149
26421
|
"""
|
|
26150
|
-
|
|
26422
|
+
The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: `events`.
|
|
26151
26423
|
"""
|
|
26152
26424
|
return pulumi.get(self, "name")
|
|
26153
26425
|
|
|
@@ -26155,7 +26427,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26155
26427
|
@pulumi.getter
|
|
26156
26428
|
def topics(self) -> Sequence['outputs.ServiceIntegrationClickhouseKafkaUserConfigTableTopic']:
|
|
26157
26429
|
"""
|
|
26158
|
-
Kafka topics
|
|
26430
|
+
Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
|
|
26159
26431
|
"""
|
|
26160
26432
|
return pulumi.get(self, "topics")
|
|
26161
26433
|
|
|
@@ -26163,7 +26435,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26163
26435
|
@pulumi.getter(name="autoOffsetReset")
|
|
26164
26436
|
def auto_offset_reset(self) -> Optional[_builtins.str]:
|
|
26165
26437
|
"""
|
|
26166
|
-
Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`.
|
|
26438
|
+
Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range. `earliest` starts from the beginning, `latest` starts from the end. Default: `earliest`.
|
|
26167
26439
|
"""
|
|
26168
26440
|
return pulumi.get(self, "auto_offset_reset")
|
|
26169
26441
|
|
|
@@ -26171,7 +26443,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26171
26443
|
@pulumi.getter(name="dateTimeInputFormat")
|
|
26172
26444
|
def date_time_input_format(self) -> Optional[_builtins.str]:
|
|
26173
26445
|
"""
|
|
26174
|
-
Enum: `basic`, `best_effort`, `best_effort_us`.
|
|
26446
|
+
Enum: `basic`, `best_effort`, `best_effort_us`. Specifies how ClickHouse should parse DateTime values from text-based input formats. `basic` uses simple parsing, `best_effort` attempts more flexible parsing. Default: `basic`.
|
|
26175
26447
|
"""
|
|
26176
26448
|
return pulumi.get(self, "date_time_input_format")
|
|
26177
26449
|
|
|
@@ -26179,7 +26451,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26179
26451
|
@pulumi.getter(name="handleErrorMode")
|
|
26180
26452
|
def handle_error_mode(self) -> Optional[_builtins.str]:
|
|
26181
26453
|
"""
|
|
26182
|
-
Enum: `default`, `stream`.
|
|
26454
|
+
Enum: `default`, `stream`. Defines how ClickHouse should handle errors when processing Kafka messages. `default` stops on errors, `stream` continues processing and logs errors. Default: `default`.
|
|
26183
26455
|
"""
|
|
26184
26456
|
return pulumi.get(self, "handle_error_mode")
|
|
26185
26457
|
|
|
@@ -26187,7 +26459,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26187
26459
|
@pulumi.getter(name="maxBlockSize")
|
|
26188
26460
|
def max_block_size(self) -> Optional[_builtins.int]:
|
|
26189
26461
|
"""
|
|
26190
|
-
|
|
26462
|
+
Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: `0`.
|
|
26191
26463
|
"""
|
|
26192
26464
|
return pulumi.get(self, "max_block_size")
|
|
26193
26465
|
|
|
@@ -26195,7 +26467,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26195
26467
|
@pulumi.getter(name="maxRowsPerMessage")
|
|
26196
26468
|
def max_rows_per_message(self) -> Optional[_builtins.int]:
|
|
26197
26469
|
"""
|
|
26198
|
-
|
|
26470
|
+
Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: `1`.
|
|
26199
26471
|
"""
|
|
26200
26472
|
return pulumi.get(self, "max_rows_per_message")
|
|
26201
26473
|
|
|
@@ -26203,7 +26475,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26203
26475
|
@pulumi.getter(name="numConsumers")
|
|
26204
26476
|
def num_consumers(self) -> Optional[_builtins.int]:
|
|
26205
26477
|
"""
|
|
26206
|
-
|
|
26478
|
+
Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: `1`.
|
|
26207
26479
|
"""
|
|
26208
26480
|
return pulumi.get(self, "num_consumers")
|
|
26209
26481
|
|
|
@@ -26211,7 +26483,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26211
26483
|
@pulumi.getter(name="pollMaxBatchSize")
|
|
26212
26484
|
def poll_max_batch_size(self) -> Optional[_builtins.int]:
|
|
26213
26485
|
"""
|
|
26214
|
-
Maximum
|
|
26486
|
+
Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: `0`.
|
|
26215
26487
|
"""
|
|
26216
26488
|
return pulumi.get(self, "poll_max_batch_size")
|
|
26217
26489
|
|
|
@@ -26235,7 +26507,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26235
26507
|
@pulumi.getter(name="producerBatchSize")
|
|
26236
26508
|
def producer_batch_size(self) -> Optional[_builtins.int]:
|
|
26237
26509
|
"""
|
|
26238
|
-
The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
|
|
26510
|
+
The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
|
|
26239
26511
|
"""
|
|
26240
26512
|
return pulumi.get(self, "producer_batch_size")
|
|
26241
26513
|
|
|
@@ -26267,7 +26539,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26267
26539
|
@pulumi.getter(name="producerQueueBufferingMaxKbytes")
|
|
26268
26540
|
def producer_queue_buffering_max_kbytes(self) -> Optional[_builtins.int]:
|
|
26269
26541
|
"""
|
|
26270
|
-
The maximum size of the buffer in kilobytes before sending.
|
|
26542
|
+
The maximum size of the buffer in kilobytes before sending.
|
|
26271
26543
|
"""
|
|
26272
26544
|
return pulumi.get(self, "producer_queue_buffering_max_kbytes")
|
|
26273
26545
|
|
|
@@ -26291,7 +26563,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26291
26563
|
@pulumi.getter(name="skipBrokenMessages")
|
|
26292
26564
|
def skip_broken_messages(self) -> Optional[_builtins.int]:
|
|
26293
26565
|
"""
|
|
26294
|
-
|
|
26566
|
+
Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: `0`.
|
|
26295
26567
|
"""
|
|
26296
26568
|
return pulumi.get(self, "skip_broken_messages")
|
|
26297
26569
|
|
|
@@ -26299,7 +26571,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
26299
26571
|
@pulumi.getter(name="threadPerConsumer")
|
|
26300
26572
|
def thread_per_consumer(self) -> Optional[_builtins.bool]:
|
|
26301
26573
|
"""
|
|
26302
|
-
|
|
26574
|
+
When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: `false`.
|
|
26303
26575
|
"""
|
|
26304
26576
|
return pulumi.get(self, "thread_per_consumer")
|
|
26305
26577
|
|
|
@@ -26310,8 +26582,8 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableColumn(dict):
|
|
|
26310
26582
|
name: _builtins.str,
|
|
26311
26583
|
type: _builtins.str):
|
|
26312
26584
|
"""
|
|
26313
|
-
:param _builtins.str name:
|
|
26314
|
-
:param _builtins.str type:
|
|
26585
|
+
:param _builtins.str name: The name of the column in the ClickHouse table. This should match the field names in your Kafka message format. Example: `key`.
|
|
26586
|
+
:param _builtins.str type: The ClickHouse data type for this column. Must be a valid ClickHouse data type that can handle the data format. Example: `UInt64`.
|
|
26315
26587
|
"""
|
|
26316
26588
|
pulumi.set(__self__, "name", name)
|
|
26317
26589
|
pulumi.set(__self__, "type", type)
|
|
@@ -26320,7 +26592,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableColumn(dict):
|
|
|
26320
26592
|
@pulumi.getter
|
|
26321
26593
|
def name(self) -> _builtins.str:
|
|
26322
26594
|
"""
|
|
26323
|
-
|
|
26595
|
+
The name of the column in the ClickHouse table. This should match the field names in your Kafka message format. Example: `key`.
|
|
26324
26596
|
"""
|
|
26325
26597
|
return pulumi.get(self, "name")
|
|
26326
26598
|
|
|
@@ -26328,7 +26600,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableColumn(dict):
|
|
|
26328
26600
|
@pulumi.getter
|
|
26329
26601
|
def type(self) -> _builtins.str:
|
|
26330
26602
|
"""
|
|
26331
|
-
|
|
26603
|
+
The ClickHouse data type for this column. Must be a valid ClickHouse data type that can handle the data format. Example: `UInt64`.
|
|
26332
26604
|
"""
|
|
26333
26605
|
return pulumi.get(self, "type")
|
|
26334
26606
|
|
|
@@ -26338,7 +26610,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableTopic(dict):
|
|
|
26338
26610
|
def __init__(__self__, *,
|
|
26339
26611
|
name: _builtins.str):
|
|
26340
26612
|
"""
|
|
26341
|
-
:param _builtins.str name:
|
|
26613
|
+
:param _builtins.str name: The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: `topic_name`.
|
|
26342
26614
|
"""
|
|
26343
26615
|
pulumi.set(__self__, "name", name)
|
|
26344
26616
|
|
|
@@ -26346,7 +26618,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableTopic(dict):
|
|
|
26346
26618
|
@pulumi.getter
|
|
26347
26619
|
def name(self) -> _builtins.str:
|
|
26348
26620
|
"""
|
|
26349
|
-
|
|
26621
|
+
The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: `topic_name`.
|
|
26350
26622
|
"""
|
|
26351
26623
|
return pulumi.get(self, "name")
|
|
26352
26624
|
|
|
@@ -27304,7 +27576,7 @@ class ServiceIntegrationEndpointExternalElasticsearchLogsUserConfig(dict):
|
|
|
27304
27576
|
-----END CERTIFICATE-----
|
|
27305
27577
|
`.
|
|
27306
27578
|
:param _builtins.int index_days_max: Maximum number of days of logs to keep. Default: `3`.
|
|
27307
|
-
:param _builtins.float timeout: Elasticsearch request timeout limit. Default: `10
|
|
27579
|
+
:param _builtins.float timeout: Elasticsearch request timeout limit. Default: `10`.
|
|
27308
27580
|
"""
|
|
27309
27581
|
pulumi.set(__self__, "index_prefix", index_prefix)
|
|
27310
27582
|
pulumi.set(__self__, "url", url)
|
|
@@ -27354,7 +27626,7 @@ class ServiceIntegrationEndpointExternalElasticsearchLogsUserConfig(dict):
|
|
|
27354
27626
|
@pulumi.getter
|
|
27355
27627
|
def timeout(self) -> Optional[_builtins.float]:
|
|
27356
27628
|
"""
|
|
27357
|
-
Elasticsearch request timeout limit. Default: `10
|
|
27629
|
+
Elasticsearch request timeout limit. Default: `10`.
|
|
27358
27630
|
"""
|
|
27359
27631
|
return pulumi.get(self, "timeout")
|
|
27360
27632
|
|
|
@@ -27767,7 +28039,7 @@ class ServiceIntegrationEndpointExternalOpensearchLogsUserConfig(dict):
|
|
|
27767
28039
|
-----END CERTIFICATE-----
|
|
27768
28040
|
`.
|
|
27769
28041
|
:param _builtins.int index_days_max: Maximum number of days of logs to keep. Default: `3`.
|
|
27770
|
-
:param _builtins.float timeout: OpenSearch request timeout limit. Default: `10
|
|
28042
|
+
:param _builtins.float timeout: OpenSearch request timeout limit. Default: `10`.
|
|
27771
28043
|
"""
|
|
27772
28044
|
pulumi.set(__self__, "index_prefix", index_prefix)
|
|
27773
28045
|
pulumi.set(__self__, "url", url)
|
|
@@ -27817,7 +28089,7 @@ class ServiceIntegrationEndpointExternalOpensearchLogsUserConfig(dict):
|
|
|
27817
28089
|
@pulumi.getter
|
|
27818
28090
|
def timeout(self) -> Optional[_builtins.float]:
|
|
27819
28091
|
"""
|
|
27820
|
-
OpenSearch request timeout limit. Default: `10
|
|
28092
|
+
OpenSearch request timeout limit. Default: `10`.
|
|
27821
28093
|
"""
|
|
27822
28094
|
return pulumi.get(self, "timeout")
|
|
27823
28095
|
|
|
@@ -28864,10 +29136,10 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
|
|
|
28864
29136
|
:param _builtins.int consumer_fetch_min_bytes: The minimum amount of data the server should return for a fetch request. Example: `1024`.
|
|
28865
29137
|
:param _builtins.int consumer_max_poll_records: Set consumer max.poll.records. The default is 500. Example: `500`.
|
|
28866
29138
|
:param _builtins.int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`.
|
|
28867
|
-
:param _builtins.int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
|
|
29139
|
+
:param _builtins.int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
|
|
28868
29140
|
:param _builtins.str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
28869
29141
|
:param _builtins.int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.
|
|
28870
|
-
:param _builtins.int producer_max_request_size: The maximum request size in bytes.
|
|
29142
|
+
:param _builtins.int producer_max_request_size: The maximum request size in bytes.
|
|
28871
29143
|
"""
|
|
28872
29144
|
if consumer_auto_offset_reset is not None:
|
|
28873
29145
|
pulumi.set(__self__, "consumer_auto_offset_reset", consumer_auto_offset_reset)
|
|
@@ -28922,7 +29194,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
|
|
|
28922
29194
|
@pulumi.getter(name="producerBufferMemory")
|
|
28923
29195
|
def producer_buffer_memory(self) -> Optional[_builtins.int]:
|
|
28924
29196
|
"""
|
|
28925
|
-
The amount of bytes producer can use for buffering data before publishing to broker.
|
|
29197
|
+
The amount of bytes producer can use for buffering data before publishing to broker.
|
|
28926
29198
|
"""
|
|
28927
29199
|
return pulumi.get(self, "producer_buffer_memory")
|
|
28928
29200
|
|
|
@@ -28946,7 +29218,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
|
|
|
28946
29218
|
@pulumi.getter(name="producerMaxRequestSize")
|
|
28947
29219
|
def producer_max_request_size(self) -> Optional[_builtins.int]:
|
|
28948
29220
|
"""
|
|
28949
|
-
The maximum request size in bytes.
|
|
29221
|
+
The maximum request size in bytes.
|
|
28950
29222
|
"""
|
|
28951
29223
|
return pulumi.get(self, "producer_max_request_size")
|
|
28952
29224
|
|
|
@@ -32346,7 +32618,7 @@ class GetAlloydbomniAlloydbomniUserConfigPgResult(dict):
|
|
|
32346
32618
|
"""
|
|
32347
32619
|
:param _builtins.float autovacuum_analyze_scale_factor: Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE (e.g. `0.2` for 20% of the table size). The default is `0.2`.
|
|
32348
32620
|
:param _builtins.int autovacuum_analyze_threshold: Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is `50`.
|
|
32349
|
-
:param _builtins.int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
32621
|
+
:param _builtins.int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
32350
32622
|
:param _builtins.int autovacuum_max_workers: Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is `3`. Changing this parameter causes a service restart.
|
|
32351
32623
|
:param _builtins.int autovacuum_naptime: Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds. The default is `60`.
|
|
32352
32624
|
:param _builtins.int autovacuum_vacuum_cost_delay: Specifies the cost delay value that will be used in automatic VACUUM operations. If `-1` is specified, the regular vacuum_cost_delay value will be used. The default is `2` (upstream default).
|
|
@@ -32356,7 +32628,7 @@ class GetAlloydbomniAlloydbomniUserConfigPgResult(dict):
|
|
|
32356
32628
|
:param _builtins.int bgwriter_delay: Specifies the delay between activity rounds for the background writer in milliseconds. The default is `200`. Example: `200`.
|
|
32357
32629
|
:param _builtins.int bgwriter_flush_after: Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes. Setting of 0 disables forced writeback. The default is `512`. Example: `512`.
|
|
32358
32630
|
:param _builtins.int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. The default is `100`. Example: `100`.
|
|
32359
|
-
:param _builtins.float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2
|
|
32631
|
+
:param _builtins.float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2`.
|
|
32360
32632
|
:param _builtins.int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition. The default is `1000` (upstream default). Example: `1000`.
|
|
32361
32633
|
:param _builtins.str default_toast_compression: Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns. The default is `lz4`.
|
|
32362
32634
|
:param _builtins.int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
|
|
@@ -32385,7 +32657,7 @@ class GetAlloydbomniAlloydbomniUserConfigPgResult(dict):
|
|
|
32385
32657
|
:param _builtins.int pg_partman_bgw_dot_interval: Sets the time interval in seconds to run pg_partman's scheduled tasks. The default is `3600`. Example: `3600`.
|
|
32386
32658
|
:param _builtins.str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`.
|
|
32387
32659
|
:param _builtins.str pg_stat_statements_dot_track: Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default is `top`.
|
|
32388
|
-
:param _builtins.int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
32660
|
+
:param _builtins.int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
32389
32661
|
:param _builtins.str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
|
|
32390
32662
|
:param _builtins.int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Changing this parameter causes a service restart. Example: `1024`.
|
|
32391
32663
|
:param _builtins.str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions. Changing this parameter causes a service restart.
|
|
@@ -32513,7 +32785,7 @@ class GetAlloydbomniAlloydbomniUserConfigPgResult(dict):
|
|
|
32513
32785
|
@pulumi.getter(name="autovacuumFreezeMaxAge")
|
|
32514
32786
|
def autovacuum_freeze_max_age(self) -> Optional[_builtins.int]:
|
|
32515
32787
|
"""
|
|
32516
|
-
Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
32788
|
+
Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
32517
32789
|
"""
|
|
32518
32790
|
return pulumi.get(self, "autovacuum_freeze_max_age")
|
|
32519
32791
|
|
|
@@ -32593,7 +32865,7 @@ class GetAlloydbomniAlloydbomniUserConfigPgResult(dict):
|
|
|
32593
32865
|
@pulumi.getter(name="bgwriterLruMultiplier")
|
|
32594
32866
|
def bgwriter_lru_multiplier(self) -> Optional[_builtins.float]:
|
|
32595
32867
|
"""
|
|
32596
|
-
The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2
|
|
32868
|
+
The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2`.
|
|
32597
32869
|
"""
|
|
32598
32870
|
return pulumi.get(self, "bgwriter_lru_multiplier")
|
|
32599
32871
|
|
|
@@ -32825,7 +33097,7 @@ class GetAlloydbomniAlloydbomniUserConfigPgResult(dict):
|
|
|
32825
33097
|
@pulumi.getter(name="tempFileLimit")
|
|
32826
33098
|
def temp_file_limit(self) -> Optional[_builtins.int]:
|
|
32827
33099
|
"""
|
|
32828
|
-
PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
33100
|
+
PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
32829
33101
|
"""
|
|
32830
33102
|
return pulumi.get(self, "temp_file_limit")
|
|
32831
33103
|
|
|
@@ -36038,7 +36310,7 @@ class GetFlinkFlinkUserConfigResult(dict):
|
|
|
36038
36310
|
:param Sequence[_builtins.str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
36039
36311
|
:param _builtins.int number_of_task_slots: Task slots per node. For a 3 node plan, total number of task slots is 3x this value. Example: `1`.
|
|
36040
36312
|
:param _builtins.int pekko_ask_timeout_s: Timeout in seconds used for all futures and blocking Pekko requests. Example: `10`.
|
|
36041
|
-
:param _builtins.int pekko_framesize_b: Maximum size in bytes for messages exchanged between the JobManager and the TaskManagers.
|
|
36313
|
+
:param _builtins.int pekko_framesize_b: Maximum size in bytes for messages exchanged between the JobManager and the TaskManagers.
|
|
36042
36314
|
:param 'GetFlinkFlinkUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
36043
36315
|
:param 'GetFlinkFlinkUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
|
|
36044
36316
|
:param _builtins.bool service_log: Store logs for the service so that they are available in the HTTP API and console.
|
|
@@ -36141,7 +36413,7 @@ class GetFlinkFlinkUserConfigResult(dict):
|
|
|
36141
36413
|
@pulumi.getter(name="pekkoFramesizeB")
|
|
36142
36414
|
def pekko_framesize_b(self) -> Optional[_builtins.int]:
|
|
36143
36415
|
"""
|
|
36144
|
-
Maximum size in bytes for messages exchanged between the JobManager and the TaskManagers.
|
|
36416
|
+
Maximum size in bytes for messages exchanged between the JobManager and the TaskManagers.
|
|
36145
36417
|
"""
|
|
36146
36418
|
return pulumi.get(self, "pekko_framesize_b")
|
|
36147
36419
|
|
|
@@ -38922,9 +39194,9 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
38922
39194
|
"""
|
|
38923
39195
|
:param _builtins.str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
38924
39196
|
:param _builtins.str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
38925
|
-
:param _builtins.int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
39197
|
+
:param _builtins.int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
38926
39198
|
:param _builtins.str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
38927
|
-
:param _builtins.int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
39199
|
+
:param _builtins.int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
38928
39200
|
:param _builtins.int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
38929
39201
|
:param _builtins.int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
38930
39202
|
:param _builtins.int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
|
|
@@ -38933,7 +39205,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
38933
39205
|
:param _builtins.int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
38934
39206
|
:param _builtins.str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
38935
39207
|
:param _builtins.int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
|
|
38936
|
-
:param _builtins.int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
39208
|
+
:param _builtins.int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
38937
39209
|
:param _builtins.int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
38938
39210
|
:param _builtins.int session_timeout_ms: The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
|
|
38939
39211
|
"""
|
|
@@ -38990,7 +39262,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
38990
39262
|
@pulumi.getter(name="consumerFetchMaxBytes")
|
|
38991
39263
|
def consumer_fetch_max_bytes(self) -> Optional[_builtins.int]:
|
|
38992
39264
|
"""
|
|
38993
|
-
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
39265
|
+
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
38994
39266
|
"""
|
|
38995
39267
|
return pulumi.get(self, "consumer_fetch_max_bytes")
|
|
38996
39268
|
|
|
@@ -39006,7 +39278,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
39006
39278
|
@pulumi.getter(name="consumerMaxPartitionFetchBytes")
|
|
39007
39279
|
def consumer_max_partition_fetch_bytes(self) -> Optional[_builtins.int]:
|
|
39008
39280
|
"""
|
|
39009
|
-
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
39281
|
+
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
39010
39282
|
"""
|
|
39011
39283
|
return pulumi.get(self, "consumer_max_partition_fetch_bytes")
|
|
39012
39284
|
|
|
@@ -39078,7 +39350,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
39078
39350
|
@pulumi.getter(name="producerMaxRequestSize")
|
|
39079
39351
|
def producer_max_request_size(self) -> Optional[_builtins.int]:
|
|
39080
39352
|
"""
|
|
39081
|
-
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
39353
|
+
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
39082
39354
|
"""
|
|
39083
39355
|
return pulumi.get(self, "producer_max_request_size")
|
|
39084
39356
|
|
|
@@ -39587,6 +39859,7 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
39587
39859
|
kafka_connect_config: Optional['outputs.GetKafkaKafkaUserConfigKafkaConnectConfigResult'] = None,
|
|
39588
39860
|
kafka_connect_plugin_versions: Optional[Sequence['outputs.GetKafkaKafkaUserConfigKafkaConnectPluginVersionResult']] = None,
|
|
39589
39861
|
kafka_connect_secret_providers: Optional[Sequence['outputs.GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult']] = None,
|
|
39862
|
+
kafka_diskless: Optional['outputs.GetKafkaKafkaUserConfigKafkaDisklessResult'] = None,
|
|
39590
39863
|
kafka_rest: Optional[_builtins.bool] = None,
|
|
39591
39864
|
kafka_rest_authorization: Optional[_builtins.bool] = None,
|
|
39592
39865
|
kafka_rest_config: Optional['outputs.GetKafkaKafkaUserConfigKafkaRestConfigResult'] = None,
|
|
@@ -39615,11 +39888,12 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
39615
39888
|
:param _builtins.bool kafka_connect: Enable Kafka Connect service. Default: `false`.
|
|
39616
39889
|
:param 'GetKafkaKafkaUserConfigKafkaConnectConfigArgs' kafka_connect_config: Kafka Connect configuration values
|
|
39617
39890
|
:param Sequence['GetKafkaKafkaUserConfigKafkaConnectPluginVersionArgs'] kafka_connect_plugin_versions: The plugin selected by the user
|
|
39891
|
+
:param 'GetKafkaKafkaUserConfigKafkaDisklessArgs' kafka_diskless: Kafka Diskless configuration values
|
|
39618
39892
|
:param _builtins.bool kafka_rest: Enable Kafka-REST service. Default: `false`.
|
|
39619
39893
|
:param _builtins.bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
|
|
39620
39894
|
:param 'GetKafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
|
|
39621
39895
|
:param 'GetKafkaKafkaUserConfigKafkaSaslMechanismsArgs' kafka_sasl_mechanisms: Kafka SASL mechanisms
|
|
39622
|
-
:param _builtins.str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, `3.8`, `3.9`, and newer. Kafka major version.
|
|
39896
|
+
:param _builtins.str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, `3.8`, `3.9`, `4.0`, and newer. Kafka major version.
|
|
39623
39897
|
:param _builtins.bool letsencrypt_sasl_privatelink: Use Letsencrypt CA for Kafka SASL via Privatelink.
|
|
39624
39898
|
:param 'GetKafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
39625
39899
|
:param 'GetKafkaKafkaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
@@ -39657,6 +39931,8 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
39657
39931
|
pulumi.set(__self__, "kafka_connect_plugin_versions", kafka_connect_plugin_versions)
|
|
39658
39932
|
if kafka_connect_secret_providers is not None:
|
|
39659
39933
|
pulumi.set(__self__, "kafka_connect_secret_providers", kafka_connect_secret_providers)
|
|
39934
|
+
if kafka_diskless is not None:
|
|
39935
|
+
pulumi.set(__self__, "kafka_diskless", kafka_diskless)
|
|
39660
39936
|
if kafka_rest is not None:
|
|
39661
39937
|
pulumi.set(__self__, "kafka_rest", kafka_rest)
|
|
39662
39938
|
if kafka_rest_authorization is not None:
|
|
@@ -39791,6 +40067,14 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
39791
40067
|
def kafka_connect_secret_providers(self) -> Optional[Sequence['outputs.GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult']]:
|
|
39792
40068
|
return pulumi.get(self, "kafka_connect_secret_providers")
|
|
39793
40069
|
|
|
40070
|
+
@_builtins.property
|
|
40071
|
+
@pulumi.getter(name="kafkaDiskless")
|
|
40072
|
+
def kafka_diskless(self) -> Optional['outputs.GetKafkaKafkaUserConfigKafkaDisklessResult']:
|
|
40073
|
+
"""
|
|
40074
|
+
Kafka Diskless configuration values
|
|
40075
|
+
"""
|
|
40076
|
+
return pulumi.get(self, "kafka_diskless")
|
|
40077
|
+
|
|
39794
40078
|
@_builtins.property
|
|
39795
40079
|
@pulumi.getter(name="kafkaRest")
|
|
39796
40080
|
def kafka_rest(self) -> Optional[_builtins.bool]:
|
|
@@ -39827,7 +40111,7 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
39827
40111
|
@pulumi.getter(name="kafkaVersion")
|
|
39828
40112
|
def kafka_version(self) -> Optional[_builtins.str]:
|
|
39829
40113
|
"""
|
|
39830
|
-
Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, `3.8`, `3.9`, and newer. Kafka major version.
|
|
40114
|
+
Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, `3.8`, `3.9`, `4.0`, and newer. Kafka major version.
|
|
39831
40115
|
"""
|
|
39832
40116
|
return pulumi.get(self, "kafka_version")
|
|
39833
40117
|
|
|
@@ -39983,6 +40267,8 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
39983
40267
|
log_local_retention_bytes: Optional[_builtins.int] = None,
|
|
39984
40268
|
log_local_retention_ms: Optional[_builtins.int] = None,
|
|
39985
40269
|
log_message_downconversion_enable: Optional[_builtins.bool] = None,
|
|
40270
|
+
log_message_timestamp_after_max_ms: Optional[_builtins.int] = None,
|
|
40271
|
+
log_message_timestamp_before_max_ms: Optional[_builtins.int] = None,
|
|
39986
40272
|
log_message_timestamp_difference_max_ms: Optional[_builtins.int] = None,
|
|
39987
40273
|
log_message_timestamp_type: Optional[_builtins.str] = None,
|
|
39988
40274
|
log_preallocate: Optional[_builtins.bool] = None,
|
|
@@ -40016,9 +40302,9 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40016
40302
|
:param _builtins.int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
|
|
40017
40303
|
:param _builtins.int default_replication_factor: Replication factor for auto-created topics (Default: 3).
|
|
40018
40304
|
:param _builtins.int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
|
|
40019
|
-
:param _builtins.int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes).
|
|
40305
|
+
:param _builtins.int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes).
|
|
40020
40306
|
:param _builtins.int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
|
|
40021
|
-
:param _builtins.int log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)).
|
|
40307
|
+
:param _builtins.int log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)).
|
|
40022
40308
|
:param _builtins.int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
|
|
40023
40309
|
:param _builtins.float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
|
|
40024
40310
|
:param _builtins.int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
|
|
@@ -40026,10 +40312,12 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40026
40312
|
:param _builtins.int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
40027
40313
|
:param _builtins.int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
|
|
40028
40314
|
:param _builtins.int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
|
|
40029
|
-
:param _builtins.int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)).
|
|
40315
|
+
:param _builtins.int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)).
|
|
40030
40316
|
:param _builtins.int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
|
|
40031
40317
|
:param _builtins.int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
|
|
40032
40318
|
:param _builtins.bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
|
|
40319
|
+
:param _builtins.int log_message_timestamp_after_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. Applies only for messages with timestamps later than the broker's timestamp. (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
40320
|
+
:param _builtins.int log_message_timestamp_before_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. Applies only for messages with timestamps earlier than the broker's timestamp. (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
40033
40321
|
:param _builtins.int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
40034
40322
|
:param _builtins.str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
|
|
40035
40323
|
:param _builtins.bool log_preallocate: Should pre allocate file when create new segment? (Default: false).
|
|
@@ -40042,7 +40330,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40042
40330
|
:param _builtins.int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
|
|
40043
40331
|
:param _builtins.int max_connections_per_ip: The maximum number of connections allowed from each ip address (Default: 2147483647).
|
|
40044
40332
|
:param _builtins.int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
|
|
40045
|
-
:param _builtins.int message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)).
|
|
40333
|
+
:param _builtins.int message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)).
|
|
40046
40334
|
:param _builtins.int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
|
|
40047
40335
|
:param _builtins.int num_partitions: Number of partitions for auto-created topics (Default: 1).
|
|
40048
40336
|
:param _builtins.int offsets_retention_minutes: Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
|
|
@@ -40055,8 +40343,8 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40055
40343
|
:param _builtins.str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
|
|
40056
40344
|
:param _builtins.int socket_request_max_bytes: The maximum number of bytes in a socket request (Default: 104857600 bytes).
|
|
40057
40345
|
:param _builtins.bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
|
|
40058
|
-
:param _builtins.int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)).
|
|
40059
|
-
:param _builtins.int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)).
|
|
40346
|
+
:param _builtins.int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)).
|
|
40347
|
+
:param _builtins.int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)).
|
|
40060
40348
|
"""
|
|
40061
40349
|
if auto_create_topics_enable is not None:
|
|
40062
40350
|
pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
|
|
@@ -40096,6 +40384,10 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40096
40384
|
pulumi.set(__self__, "log_local_retention_ms", log_local_retention_ms)
|
|
40097
40385
|
if log_message_downconversion_enable is not None:
|
|
40098
40386
|
pulumi.set(__self__, "log_message_downconversion_enable", log_message_downconversion_enable)
|
|
40387
|
+
if log_message_timestamp_after_max_ms is not None:
|
|
40388
|
+
pulumi.set(__self__, "log_message_timestamp_after_max_ms", log_message_timestamp_after_max_ms)
|
|
40389
|
+
if log_message_timestamp_before_max_ms is not None:
|
|
40390
|
+
pulumi.set(__self__, "log_message_timestamp_before_max_ms", log_message_timestamp_before_max_ms)
|
|
40099
40391
|
if log_message_timestamp_difference_max_ms is not None:
|
|
40100
40392
|
pulumi.set(__self__, "log_message_timestamp_difference_max_ms", log_message_timestamp_difference_max_ms)
|
|
40101
40393
|
if log_message_timestamp_type is not None:
|
|
@@ -40195,7 +40487,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40195
40487
|
@pulumi.getter(name="groupMaxSessionTimeoutMs")
|
|
40196
40488
|
def group_max_session_timeout_ms(self) -> Optional[_builtins.int]:
|
|
40197
40489
|
"""
|
|
40198
|
-
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes).
|
|
40490
|
+
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes).
|
|
40199
40491
|
"""
|
|
40200
40492
|
return pulumi.get(self, "group_max_session_timeout_ms")
|
|
40201
40493
|
|
|
@@ -40211,7 +40503,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40211
40503
|
@pulumi.getter(name="logCleanerDeleteRetentionMs")
|
|
40212
40504
|
def log_cleaner_delete_retention_ms(self) -> Optional[_builtins.int]:
|
|
40213
40505
|
"""
|
|
40214
|
-
How long are delete records retained? (Default: 86400000 (1 day)).
|
|
40506
|
+
How long are delete records retained? (Default: 86400000 (1 day)).
|
|
40215
40507
|
"""
|
|
40216
40508
|
return pulumi.get(self, "log_cleaner_delete_retention_ms")
|
|
40217
40509
|
|
|
@@ -40275,7 +40567,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40275
40567
|
@pulumi.getter(name="logIndexSizeMaxBytes")
|
|
40276
40568
|
def log_index_size_max_bytes(self) -> Optional[_builtins.int]:
|
|
40277
40569
|
"""
|
|
40278
|
-
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)).
|
|
40570
|
+
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)).
|
|
40279
40571
|
"""
|
|
40280
40572
|
return pulumi.get(self, "log_index_size_max_bytes")
|
|
40281
40573
|
|
|
@@ -40303,6 +40595,22 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40303
40595
|
"""
|
|
40304
40596
|
return pulumi.get(self, "log_message_downconversion_enable")
|
|
40305
40597
|
|
|
40598
|
+
@_builtins.property
|
|
40599
|
+
@pulumi.getter(name="logMessageTimestampAfterMaxMs")
|
|
40600
|
+
def log_message_timestamp_after_max_ms(self) -> Optional[_builtins.int]:
|
|
40601
|
+
"""
|
|
40602
|
+
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. Applies only for messages with timestamps later than the broker's timestamp. (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
40603
|
+
"""
|
|
40604
|
+
return pulumi.get(self, "log_message_timestamp_after_max_ms")
|
|
40605
|
+
|
|
40606
|
+
@_builtins.property
|
|
40607
|
+
@pulumi.getter(name="logMessageTimestampBeforeMaxMs")
|
|
40608
|
+
def log_message_timestamp_before_max_ms(self) -> Optional[_builtins.int]:
|
|
40609
|
+
"""
|
|
40610
|
+
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. Applies only for messages with timestamps earlier than the broker's timestamp. (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
40611
|
+
"""
|
|
40612
|
+
return pulumi.get(self, "log_message_timestamp_before_max_ms")
|
|
40613
|
+
|
|
40306
40614
|
@_builtins.property
|
|
40307
40615
|
@pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
|
|
40308
40616
|
def log_message_timestamp_difference_max_ms(self) -> Optional[_builtins.int]:
|
|
@@ -40403,7 +40711,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40403
40711
|
@pulumi.getter(name="messageMaxBytes")
|
|
40404
40712
|
def message_max_bytes(self) -> Optional[_builtins.int]:
|
|
40405
40713
|
"""
|
|
40406
|
-
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)).
|
|
40714
|
+
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)).
|
|
40407
40715
|
"""
|
|
40408
40716
|
return pulumi.get(self, "message_max_bytes")
|
|
40409
40717
|
|
|
@@ -40507,7 +40815,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40507
40815
|
@pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
|
|
40508
40816
|
def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[_builtins.int]:
|
|
40509
40817
|
"""
|
|
40510
|
-
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)).
|
|
40818
|
+
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)).
|
|
40511
40819
|
"""
|
|
40512
40820
|
return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
|
|
40513
40821
|
|
|
@@ -40515,7 +40823,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
40515
40823
|
@pulumi.getter(name="transactionStateLogSegmentBytes")
|
|
40516
40824
|
def transaction_state_log_segment_bytes(self) -> Optional[_builtins.int]:
|
|
40517
40825
|
"""
|
|
40518
|
-
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)).
|
|
40826
|
+
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)).
|
|
40519
40827
|
"""
|
|
40520
40828
|
return pulumi.get(self, "transaction_state_log_segment_bytes")
|
|
40521
40829
|
|
|
@@ -40573,9 +40881,9 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
40573
40881
|
"""
|
|
40574
40882
|
:param _builtins.str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
40575
40883
|
:param _builtins.str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
40576
|
-
:param _builtins.int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
40884
|
+
:param _builtins.int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
40577
40885
|
:param _builtins.str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
40578
|
-
:param _builtins.int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
40886
|
+
:param _builtins.int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
40579
40887
|
:param _builtins.int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
40580
40888
|
:param _builtins.int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
40581
40889
|
:param _builtins.int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
|
|
@@ -40584,7 +40892,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
40584
40892
|
:param _builtins.int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
40585
40893
|
:param _builtins.str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
40586
40894
|
:param _builtins.int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
|
|
40587
|
-
:param _builtins.int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
40895
|
+
:param _builtins.int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
40588
40896
|
:param _builtins.int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
40589
40897
|
:param _builtins.int session_timeout_ms: The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
|
|
40590
40898
|
"""
|
|
@@ -40641,7 +40949,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
40641
40949
|
@pulumi.getter(name="consumerFetchMaxBytes")
|
|
40642
40950
|
def consumer_fetch_max_bytes(self) -> Optional[_builtins.int]:
|
|
40643
40951
|
"""
|
|
40644
|
-
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
40952
|
+
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
40645
40953
|
"""
|
|
40646
40954
|
return pulumi.get(self, "consumer_fetch_max_bytes")
|
|
40647
40955
|
|
|
@@ -40657,7 +40965,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
40657
40965
|
@pulumi.getter(name="consumerMaxPartitionFetchBytes")
|
|
40658
40966
|
def consumer_max_partition_fetch_bytes(self) -> Optional[_builtins.int]:
|
|
40659
40967
|
"""
|
|
40660
|
-
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
40968
|
+
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
40661
40969
|
"""
|
|
40662
40970
|
return pulumi.get(self, "consumer_max_partition_fetch_bytes")
|
|
40663
40971
|
|
|
@@ -40729,7 +41037,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
40729
41037
|
@pulumi.getter(name="producerMaxRequestSize")
|
|
40730
41038
|
def producer_max_request_size(self) -> Optional[_builtins.int]:
|
|
40731
41039
|
"""
|
|
40732
|
-
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
41040
|
+
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
40733
41041
|
"""
|
|
40734
41042
|
return pulumi.get(self, "producer_max_request_size")
|
|
40735
41043
|
|
|
@@ -40939,6 +41247,24 @@ class GetKafkaKafkaUserConfigKafkaConnectSecretProviderVaultResult(dict):
|
|
|
40939
41247
|
return pulumi.get(self, "token")
|
|
40940
41248
|
|
|
40941
41249
|
|
|
41250
|
+
@pulumi.output_type
|
|
41251
|
+
class GetKafkaKafkaUserConfigKafkaDisklessResult(dict):
|
|
41252
|
+
def __init__(__self__, *,
|
|
41253
|
+
enabled: _builtins.bool):
|
|
41254
|
+
"""
|
|
41255
|
+
:param _builtins.bool enabled: Whether to enable the Diskless functionality.
|
|
41256
|
+
"""
|
|
41257
|
+
pulumi.set(__self__, "enabled", enabled)
|
|
41258
|
+
|
|
41259
|
+
@_builtins.property
|
|
41260
|
+
@pulumi.getter
|
|
41261
|
+
def enabled(self) -> _builtins.bool:
|
|
41262
|
+
"""
|
|
41263
|
+
Whether to enable the Diskless functionality.
|
|
41264
|
+
"""
|
|
41265
|
+
return pulumi.get(self, "enabled")
|
|
41266
|
+
|
|
41267
|
+
|
|
40942
41268
|
@pulumi.output_type
|
|
40943
41269
|
class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
40944
41270
|
def __init__(__self__, *,
|
|
@@ -40956,14 +41282,14 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
40956
41282
|
"""
|
|
40957
41283
|
:param _builtins.bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`.
|
|
40958
41284
|
:param _builtins.int consumer_idle_disconnect_timeout: Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: `0`.
|
|
40959
|
-
:param _builtins.int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request.
|
|
41285
|
+
:param _builtins.int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request.
|
|
40960
41286
|
:param _builtins.int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`.
|
|
40961
41287
|
:param _builtins.str name_strategy: Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
|
|
40962
41288
|
:param _builtins.bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.
|
|
40963
41289
|
:param _builtins.str producer_acks: Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
|
|
40964
41290
|
:param _builtins.str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
40965
41291
|
:param _builtins.int producer_linger_ms: Wait for up to the given delay to allow batching records together. Default: `0`.
|
|
40966
|
-
:param _builtins.int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size.
|
|
41292
|
+
:param _builtins.int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size.
|
|
40967
41293
|
:param _builtins.int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`.
|
|
40968
41294
|
"""
|
|
40969
41295
|
if consumer_enable_auto_commit is not None:
|
|
@@ -41009,7 +41335,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
41009
41335
|
@pulumi.getter(name="consumerRequestMaxBytes")
|
|
41010
41336
|
def consumer_request_max_bytes(self) -> Optional[_builtins.int]:
|
|
41011
41337
|
"""
|
|
41012
|
-
Maximum number of bytes in unencoded message keys and values by a single request.
|
|
41338
|
+
Maximum number of bytes in unencoded message keys and values by a single request.
|
|
41013
41339
|
"""
|
|
41014
41340
|
return pulumi.get(self, "consumer_request_max_bytes")
|
|
41015
41341
|
|
|
@@ -41065,7 +41391,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
41065
41391
|
@pulumi.getter(name="producerMaxRequestSize")
|
|
41066
41392
|
def producer_max_request_size(self) -> Optional[_builtins.int]:
|
|
41067
41393
|
"""
|
|
41068
|
-
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size.
|
|
41394
|
+
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size.
|
|
41069
41395
|
"""
|
|
41070
41396
|
return pulumi.get(self, "producer_max_request_size")
|
|
41071
41397
|
|
|
@@ -42073,7 +42399,7 @@ class GetKafkaTopicConfigResult(dict):
|
|
|
42073
42399
|
:param _builtins.str max_compaction_lag_ms: The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
|
|
42074
42400
|
:param _builtins.str max_message_bytes: The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
|
|
42075
42401
|
:param _builtins.bool message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
|
|
42076
|
-
:param _builtins.str message_format_version: Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
|
|
42402
|
+
:param _builtins.str message_format_version: Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. Deprecated in Kafka 4.0+: this configuration is removed and any supplied value will be ignored; for services upgraded to 4.0+, the returned value may be 'None'. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
|
|
42077
42403
|
:param _builtins.str message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
|
|
42078
42404
|
:param _builtins.str message_timestamp_type: Define whether the timestamp in the message is message create time or log append time. The possible values are `CreateTime` and `LogAppendTime`.
|
|
42079
42405
|
:param _builtins.float min_cleanable_dirty_ratio: This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
|
|
@@ -42254,7 +42580,7 @@ class GetKafkaTopicConfigResult(dict):
|
|
|
42254
42580
|
@pulumi.getter(name="messageFormatVersion")
|
|
42255
42581
|
def message_format_version(self) -> Optional[_builtins.str]:
|
|
42256
42582
|
"""
|
|
42257
|
-
Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
|
|
42583
|
+
Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. Deprecated in Kafka 4.0+: this configuration is removed and any supplied value will be ignored; for services upgraded to 4.0+, the returned value may be 'None'. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
|
|
42258
42584
|
"""
|
|
42259
42585
|
return pulumi.get(self, "message_format_version")
|
|
42260
42586
|
|
|
@@ -44453,8 +44779,8 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
44453
44779
|
:param _builtins.int innodb_ft_min_token_size: Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service. Example: `3`.
|
|
44454
44780
|
:param _builtins.str innodb_ft_server_stopword_table: This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables. Example: `db_name/table_name`.
|
|
44455
44781
|
:param _builtins.int innodb_lock_wait_timeout: The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120. Example: `50`.
|
|
44456
|
-
:param _builtins.int innodb_log_buffer_size: The size in bytes of the buffer that InnoDB uses to write to the log files on disk.
|
|
44457
|
-
:param _builtins.int innodb_online_alter_log_max_size: The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.
|
|
44782
|
+
:param _builtins.int innodb_log_buffer_size: The size in bytes of the buffer that InnoDB uses to write to the log files on disk.
|
|
44783
|
+
:param _builtins.int innodb_online_alter_log_max_size: The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.
|
|
44458
44784
|
:param _builtins.bool innodb_print_all_deadlocks: When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.
|
|
44459
44785
|
:param _builtins.int innodb_read_io_threads: The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`.
|
|
44460
44786
|
:param _builtins.bool innodb_rollback_on_timeout: When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.
|
|
@@ -44463,9 +44789,9 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
44463
44789
|
:param _builtins.int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`.
|
|
44464
44790
|
:param _builtins.str internal_tmp_mem_storage_engine: Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
|
|
44465
44791
|
:param _builtins.str log_output: Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
|
|
44466
|
-
:param _builtins.float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10
|
|
44467
|
-
:param _builtins.int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
44468
|
-
:param _builtins.int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
|
|
44792
|
+
:param _builtins.float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.
|
|
44793
|
+
:param _builtins.int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
44794
|
+
:param _builtins.int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
|
|
44469
44795
|
:param _builtins.int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service. Example: `16384`.
|
|
44470
44796
|
:param _builtins.int net_read_timeout: The number of seconds to wait for more data from a connection before aborting the read. Example: `30`.
|
|
44471
44797
|
:param _builtins.int net_write_timeout: The number of seconds to wait for a block to be written to a connection before aborting the write. Example: `30`.
|
|
@@ -44473,7 +44799,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
44473
44799
|
:param _builtins.int sort_buffer_size: Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K). Example: `262144`.
|
|
44474
44800
|
:param _builtins.str sql_mode: Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned. Example: `ANSI,TRADITIONAL`.
|
|
44475
44801
|
:param _builtins.bool sql_require_primary_key: Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.
|
|
44476
|
-
:param _builtins.int tmp_table_size: Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).
|
|
44802
|
+
:param _builtins.int tmp_table_size: Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).
|
|
44477
44803
|
:param _builtins.int wait_timeout: The number of seconds the server waits for activity on a noninteractive connection before closing it. Example: `28800`.
|
|
44478
44804
|
"""
|
|
44479
44805
|
if connect_timeout is not None:
|
|
@@ -44615,7 +44941,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
44615
44941
|
@pulumi.getter(name="innodbLogBufferSize")
|
|
44616
44942
|
def innodb_log_buffer_size(self) -> Optional[_builtins.int]:
|
|
44617
44943
|
"""
|
|
44618
|
-
The size in bytes of the buffer that InnoDB uses to write to the log files on disk.
|
|
44944
|
+
The size in bytes of the buffer that InnoDB uses to write to the log files on disk.
|
|
44619
44945
|
"""
|
|
44620
44946
|
return pulumi.get(self, "innodb_log_buffer_size")
|
|
44621
44947
|
|
|
@@ -44623,7 +44949,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
44623
44949
|
@pulumi.getter(name="innodbOnlineAlterLogMaxSize")
|
|
44624
44950
|
def innodb_online_alter_log_max_size(self) -> Optional[_builtins.int]:
|
|
44625
44951
|
"""
|
|
44626
|
-
The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.
|
|
44952
|
+
The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.
|
|
44627
44953
|
"""
|
|
44628
44954
|
return pulumi.get(self, "innodb_online_alter_log_max_size")
|
|
44629
44955
|
|
|
@@ -44695,7 +45021,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
44695
45021
|
@pulumi.getter(name="longQueryTime")
|
|
44696
45022
|
def long_query_time(self) -> Optional[_builtins.float]:
|
|
44697
45023
|
"""
|
|
44698
|
-
The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10
|
|
45024
|
+
The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.
|
|
44699
45025
|
"""
|
|
44700
45026
|
return pulumi.get(self, "long_query_time")
|
|
44701
45027
|
|
|
@@ -44703,7 +45029,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
44703
45029
|
@pulumi.getter(name="maxAllowedPacket")
|
|
44704
45030
|
def max_allowed_packet(self) -> Optional[_builtins.int]:
|
|
44705
45031
|
"""
|
|
44706
|
-
Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
45032
|
+
Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
44707
45033
|
"""
|
|
44708
45034
|
return pulumi.get(self, "max_allowed_packet")
|
|
44709
45035
|
|
|
@@ -44711,7 +45037,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
44711
45037
|
@pulumi.getter(name="maxHeapTableSize")
|
|
44712
45038
|
def max_heap_table_size(self) -> Optional[_builtins.int]:
|
|
44713
45039
|
"""
|
|
44714
|
-
Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
|
|
45040
|
+
Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
|
|
44715
45041
|
"""
|
|
44716
45042
|
return pulumi.get(self, "max_heap_table_size")
|
|
44717
45043
|
|
|
@@ -44775,7 +45101,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
44775
45101
|
@pulumi.getter(name="tmpTableSize")
|
|
44776
45102
|
def tmp_table_size(self) -> Optional[_builtins.int]:
|
|
44777
45103
|
"""
|
|
44778
|
-
Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).
|
|
45104
|
+
Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).
|
|
44779
45105
|
"""
|
|
44780
45106
|
return pulumi.get(self, "tmp_table_size")
|
|
44781
45107
|
|
|
@@ -45198,6 +45524,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
45198
45524
|
additional_backup_regions: Optional[_builtins.str] = None,
|
|
45199
45525
|
azure_migration: Optional['outputs.GetOpenSearchOpensearchUserConfigAzureMigrationResult'] = None,
|
|
45200
45526
|
custom_domain: Optional[_builtins.str] = None,
|
|
45527
|
+
custom_keystores: Optional[Sequence['outputs.GetOpenSearchOpensearchUserConfigCustomKeystoreResult']] = None,
|
|
45201
45528
|
disable_replication_factor_adjustment: Optional[_builtins.bool] = None,
|
|
45202
45529
|
gcs_migration: Optional['outputs.GetOpenSearchOpensearchUserConfigGcsMigrationResult'] = None,
|
|
45203
45530
|
index_patterns: Optional[Sequence['outputs.GetOpenSearchOpensearchUserConfigIndexPatternResult']] = None,
|
|
@@ -45226,6 +45553,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
45226
45553
|
:param _builtins.str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
45227
45554
|
:param 'GetOpenSearchOpensearchUserConfigAzureMigrationArgs' azure_migration: Azure migration settings
|
|
45228
45555
|
:param _builtins.str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
45556
|
+
:param Sequence['GetOpenSearchOpensearchUserConfigCustomKeystoreArgs'] custom_keystores: Allow to register custom keystores in OpenSearch
|
|
45229
45557
|
:param _builtins.bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
|
|
45230
45558
|
:param 'GetOpenSearchOpensearchUserConfigGcsMigrationArgs' gcs_migration: Google Cloud Storage migration settings
|
|
45231
45559
|
:param Sequence['GetOpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
|
|
@@ -45239,7 +45567,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
45239
45567
|
:param 'GetOpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
|
|
45240
45568
|
:param 'GetOpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
|
|
45241
45569
|
:param 'GetOpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
|
|
45242
|
-
:param _builtins.str opensearch_version: Enum: `1`, `2`, and newer. OpenSearch
|
|
45570
|
+
:param _builtins.str opensearch_version: Enum: `1`, `2`, `2.19`, and newer. OpenSearch version.
|
|
45243
45571
|
:param 'GetOpenSearchOpensearchUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
45244
45572
|
:param 'GetOpenSearchOpensearchUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
45245
45573
|
:param _builtins.str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created. Example: `anotherprojectname`.
|
|
@@ -45257,6 +45585,8 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
45257
45585
|
pulumi.set(__self__, "azure_migration", azure_migration)
|
|
45258
45586
|
if custom_domain is not None:
|
|
45259
45587
|
pulumi.set(__self__, "custom_domain", custom_domain)
|
|
45588
|
+
if custom_keystores is not None:
|
|
45589
|
+
pulumi.set(__self__, "custom_keystores", custom_keystores)
|
|
45260
45590
|
if disable_replication_factor_adjustment is not None:
|
|
45261
45591
|
pulumi.set(__self__, "disable_replication_factor_adjustment", disable_replication_factor_adjustment)
|
|
45262
45592
|
if gcs_migration is not None:
|
|
@@ -45330,6 +45660,14 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
45330
45660
|
"""
|
|
45331
45661
|
return pulumi.get(self, "custom_domain")
|
|
45332
45662
|
|
|
45663
|
+
@_builtins.property
|
|
45664
|
+
@pulumi.getter(name="customKeystores")
|
|
45665
|
+
def custom_keystores(self) -> Optional[Sequence['outputs.GetOpenSearchOpensearchUserConfigCustomKeystoreResult']]:
|
|
45666
|
+
"""
|
|
45667
|
+
Allow to register custom keystores in OpenSearch
|
|
45668
|
+
"""
|
|
45669
|
+
return pulumi.get(self, "custom_keystores")
|
|
45670
|
+
|
|
45333
45671
|
@_builtins.property
|
|
45334
45672
|
@pulumi.getter(name="disableReplicationFactorAdjustment")
|
|
45335
45673
|
def disable_replication_factor_adjustment(self) -> Optional[_builtins.bool]:
|
|
@@ -45439,7 +45777,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
45439
45777
|
@pulumi.getter(name="opensearchVersion")
|
|
45440
45778
|
def opensearch_version(self) -> Optional[_builtins.str]:
|
|
45441
45779
|
"""
|
|
45442
|
-
Enum: `1`, `2`, and newer. OpenSearch
|
|
45780
|
+
Enum: `1`, `2`, `2.19`, and newer. OpenSearch version.
|
|
45443
45781
|
"""
|
|
45444
45782
|
return pulumi.get(self, "opensearch_version")
|
|
45445
45783
|
|
|
@@ -45682,6 +46020,31 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
|
|
|
45682
46020
|
return pulumi.get(self, "sas_token")
|
|
45683
46021
|
|
|
45684
46022
|
|
|
46023
|
+
@pulumi.output_type
|
|
46024
|
+
class GetOpenSearchOpensearchUserConfigCustomKeystoreResult(dict):
|
|
46025
|
+
def __init__(__self__, *,
|
|
46026
|
+
name: _builtins.str,
|
|
46027
|
+
type: _builtins.str):
|
|
46028
|
+
"""
|
|
46029
|
+
:param _builtins.str type: Enum: `azure`, `gcs`, `s3`.
|
|
46030
|
+
"""
|
|
46031
|
+
pulumi.set(__self__, "name", name)
|
|
46032
|
+
pulumi.set(__self__, "type", type)
|
|
46033
|
+
|
|
46034
|
+
@_builtins.property
|
|
46035
|
+
@pulumi.getter
|
|
46036
|
+
def name(self) -> _builtins.str:
|
|
46037
|
+
return pulumi.get(self, "name")
|
|
46038
|
+
|
|
46039
|
+
@_builtins.property
|
|
46040
|
+
@pulumi.getter
|
|
46041
|
+
def type(self) -> _builtins.str:
|
|
46042
|
+
"""
|
|
46043
|
+
Enum: `azure`, `gcs`, `s3`.
|
|
46044
|
+
"""
|
|
46045
|
+
return pulumi.get(self, "type")
|
|
46046
|
+
|
|
46047
|
+
|
|
45685
46048
|
@pulumi.output_type
|
|
45686
46049
|
class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
|
|
45687
46050
|
def __init__(__self__, *,
|
|
@@ -46223,11 +46586,11 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
|
|
|
46223
46586
|
:param _builtins.bool ism_enabled: Specifies whether ISM is enabled or not.
|
|
46224
46587
|
:param _builtins.bool ism_history_enabled: Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document.
|
|
46225
46588
|
:param _builtins.int ism_history_max_age: The maximum age before rolling over the audit history index in hours. Example: `24`.
|
|
46226
|
-
:param _builtins.int ism_history_max_docs: The maximum number of documents before rolling over the audit history index.
|
|
46589
|
+
:param _builtins.int ism_history_max_docs: The maximum number of documents before rolling over the audit history index.
|
|
46227
46590
|
:param _builtins.int ism_history_rollover_check_period: The time between rollover checks for the audit history index in hours. Example: `8`.
|
|
46228
46591
|
:param _builtins.int ism_history_rollover_retention_period: How long audit history indices are kept in days. Example: `30`.
|
|
46229
46592
|
:param _builtins.bool knn_memory_circuit_breaker_enabled: Enable or disable KNN memory circuit breaker. Defaults to true.
|
|
46230
|
-
:param _builtins.int knn_memory_circuit_breaker_limit: Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
|
|
46593
|
+
:param _builtins.int knn_memory_circuit_breaker_limit: Maximum amount of memory in percentage that can be used for the KNN index. Defaults to 50% of the JVM heap size. 0 is used to set it to null which can be used to invalidate caches.
|
|
46231
46594
|
:param _builtins.str node_search_cache_size: Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 5gb. Requires restarting all OpenSearch nodes.
|
|
46232
46595
|
:param _builtins.bool override_main_response_version: Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.
|
|
46233
46596
|
:param _builtins.bool plugins_alerting_filter_by_backend_roles: Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false.
|
|
@@ -46612,7 +46975,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
|
|
|
46612
46975
|
@pulumi.getter(name="ismHistoryMaxDocs")
|
|
46613
46976
|
def ism_history_max_docs(self) -> Optional[_builtins.int]:
|
|
46614
46977
|
"""
|
|
46615
|
-
The maximum number of documents before rolling over the audit history index.
|
|
46978
|
+
The maximum number of documents before rolling over the audit history index.
|
|
46616
46979
|
"""
|
|
46617
46980
|
return pulumi.get(self, "ism_history_max_docs")
|
|
46618
46981
|
|
|
@@ -46644,7 +47007,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
|
|
|
46644
47007
|
@pulumi.getter(name="knnMemoryCircuitBreakerLimit")
|
|
46645
47008
|
def knn_memory_circuit_breaker_limit(self) -> Optional[_builtins.int]:
|
|
46646
47009
|
"""
|
|
46647
|
-
Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
|
|
47010
|
+
Maximum amount of memory in percentage that can be used for the KNN index. Defaults to 50% of the JVM heap size. 0 is used to set it to null which can be used to invalidate caches.
|
|
46648
47011
|
"""
|
|
46649
47012
|
return pulumi.get(self, "knn_memory_circuit_breaker_limit")
|
|
46650
47013
|
|
|
@@ -48501,6 +48864,25 @@ class GetOrganizationAddressTimeoutsResult(dict):
|
|
|
48501
48864
|
return pulumi.get(self, "read")
|
|
48502
48865
|
|
|
48503
48866
|
|
|
48867
|
+
@pulumi.output_type
|
|
48868
|
+
class GetOrganizationApplicationUserTimeoutsResult(dict):
|
|
48869
|
+
def __init__(__self__, *,
|
|
48870
|
+
read: Optional[_builtins.str] = None):
|
|
48871
|
+
"""
|
|
48872
|
+
:param _builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
48873
|
+
"""
|
|
48874
|
+
if read is not None:
|
|
48875
|
+
pulumi.set(__self__, "read", read)
|
|
48876
|
+
|
|
48877
|
+
@_builtins.property
|
|
48878
|
+
@pulumi.getter
|
|
48879
|
+
def read(self) -> Optional[_builtins.str]:
|
|
48880
|
+
"""
|
|
48881
|
+
A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
|
|
48882
|
+
"""
|
|
48883
|
+
return pulumi.get(self, "read")
|
|
48884
|
+
|
|
48885
|
+
|
|
48504
48886
|
@pulumi.output_type
|
|
48505
48887
|
class GetOrganizationBillingGroupListBillingGroupResult(dict):
|
|
48506
48888
|
def __init__(__self__, *,
|
|
@@ -49855,7 +50237,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
49855
50237
|
"""
|
|
49856
50238
|
:param _builtins.float autovacuum_analyze_scale_factor: Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE (e.g. `0.2` for 20% of the table size). The default is `0.2`.
|
|
49857
50239
|
:param _builtins.int autovacuum_analyze_threshold: Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is `50`.
|
|
49858
|
-
:param _builtins.int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
50240
|
+
:param _builtins.int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
49859
50241
|
:param _builtins.int autovacuum_max_workers: Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is `3`. Changing this parameter causes a service restart.
|
|
49860
50242
|
:param _builtins.int autovacuum_naptime: Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds. The default is `60`.
|
|
49861
50243
|
:param _builtins.int autovacuum_vacuum_cost_delay: Specifies the cost delay value that will be used in automatic VACUUM operations. If `-1` is specified, the regular vacuum_cost_delay value will be used. The default is `2` (upstream default).
|
|
@@ -49865,7 +50247,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
49865
50247
|
:param _builtins.int bgwriter_delay: Specifies the delay between activity rounds for the background writer in milliseconds. The default is `200`. Example: `200`.
|
|
49866
50248
|
:param _builtins.int bgwriter_flush_after: Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes. Setting of 0 disables forced writeback. The default is `512`. Example: `512`.
|
|
49867
50249
|
:param _builtins.int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. The default is `100`. Example: `100`.
|
|
49868
|
-
:param _builtins.float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2
|
|
50250
|
+
:param _builtins.float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2`.
|
|
49869
50251
|
:param _builtins.int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition. The default is `1000` (upstream default). Example: `1000`.
|
|
49870
50252
|
:param _builtins.str default_toast_compression: Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns. The default is `lz4`. Only available for PostgreSQL 14+.
|
|
49871
50253
|
:param _builtins.int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
|
|
@@ -49897,7 +50279,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
49897
50279
|
:param _builtins.bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring. Changing this parameter causes a service restart. Only available for PostgreSQL 13+.
|
|
49898
50280
|
:param _builtins.int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets. Changing this parameter causes a service restart. Only available for PostgreSQL 13+. Example: `10`.
|
|
49899
50281
|
:param _builtins.str pg_stat_statements_dot_track: Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default is `top`.
|
|
49900
|
-
:param _builtins.int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
50282
|
+
:param _builtins.int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
49901
50283
|
:param _builtins.str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
|
|
49902
50284
|
:param _builtins.int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Changing this parameter causes a service restart. Example: `1024`.
|
|
49903
50285
|
:param _builtins.str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions. Changing this parameter causes a service restart.
|
|
@@ -50031,7 +50413,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
50031
50413
|
@pulumi.getter(name="autovacuumFreezeMaxAge")
|
|
50032
50414
|
def autovacuum_freeze_max_age(self) -> Optional[_builtins.int]:
|
|
50033
50415
|
"""
|
|
50034
|
-
Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
50416
|
+
Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. The system launches autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. Changing this parameter causes a service restart.
|
|
50035
50417
|
"""
|
|
50036
50418
|
return pulumi.get(self, "autovacuum_freeze_max_age")
|
|
50037
50419
|
|
|
@@ -50111,7 +50493,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
50111
50493
|
@pulumi.getter(name="bgwriterLruMultiplier")
|
|
50112
50494
|
def bgwriter_lru_multiplier(self) -> Optional[_builtins.float]:
|
|
50113
50495
|
"""
|
|
50114
|
-
The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2
|
|
50496
|
+
The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is `2.0`. Example: `2`.
|
|
50115
50497
|
"""
|
|
50116
50498
|
return pulumi.get(self, "bgwriter_lru_multiplier")
|
|
50117
50499
|
|
|
@@ -50367,7 +50749,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
50367
50749
|
@pulumi.getter(name="tempFileLimit")
|
|
50368
50750
|
def temp_file_limit(self) -> Optional[_builtins.int]:
|
|
50369
50751
|
"""
|
|
50370
|
-
PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
50752
|
+
PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
50371
50753
|
"""
|
|
50372
50754
|
return pulumi.get(self, "temp_file_limit")
|
|
50373
50755
|
|
|
@@ -51917,7 +52299,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigResult(dict):
|
|
|
51917
52299
|
def __init__(__self__, *,
|
|
51918
52300
|
tables: Optional[Sequence['outputs.GetServiceIntegrationClickhouseKafkaUserConfigTableResult']] = None):
|
|
51919
52301
|
"""
|
|
51920
|
-
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableArgs'] tables:
|
|
52302
|
+
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableArgs'] tables: Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
|
|
51921
52303
|
"""
|
|
51922
52304
|
if tables is not None:
|
|
51923
52305
|
pulumi.set(__self__, "tables", tables)
|
|
@@ -51926,7 +52308,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigResult(dict):
|
|
|
51926
52308
|
@pulumi.getter
|
|
51927
52309
|
def tables(self) -> Optional[Sequence['outputs.GetServiceIntegrationClickhouseKafkaUserConfigTableResult']]:
|
|
51928
52310
|
"""
|
|
51929
|
-
|
|
52311
|
+
Array of table configurations that define how Kafka topics are mapped to ClickHouse tables. Each table configuration specifies the table structure, associated Kafka topics, and read/write settings
|
|
51930
52312
|
"""
|
|
51931
52313
|
return pulumi.get(self, "tables")
|
|
51932
52314
|
|
|
@@ -51958,29 +52340,29 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
51958
52340
|
skip_broken_messages: Optional[_builtins.int] = None,
|
|
51959
52341
|
thread_per_consumer: Optional[_builtins.bool] = None):
|
|
51960
52342
|
"""
|
|
51961
|
-
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns:
|
|
51962
|
-
:param _builtins.str data_format: Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`.
|
|
51963
|
-
:param _builtins.str group_name: Kafka consumers group. Default: `clickhouse`.
|
|
51964
|
-
:param _builtins.str name:
|
|
51965
|
-
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
|
|
51966
|
-
:param _builtins.str auto_offset_reset: Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`.
|
|
51967
|
-
:param _builtins.str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`.
|
|
51968
|
-
:param _builtins.str handle_error_mode: Enum: `default`, `stream`.
|
|
51969
|
-
:param _builtins.int max_block_size:
|
|
51970
|
-
:param _builtins.int max_rows_per_message:
|
|
51971
|
-
:param _builtins.int num_consumers:
|
|
51972
|
-
:param _builtins.int poll_max_batch_size: Maximum
|
|
52343
|
+
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
|
|
52344
|
+
:param _builtins.str data_format: Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default: `JSONEachRow`.
|
|
52345
|
+
:param _builtins.str group_name: The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: `clickhouse`.
|
|
52346
|
+
:param _builtins.str name: The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: `events`.
|
|
52347
|
+
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
|
|
52348
|
+
:param _builtins.str auto_offset_reset: Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range. `earliest` starts from the beginning, `latest` starts from the end. Default: `earliest`.
|
|
52349
|
+
:param _builtins.str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Specifies how ClickHouse should parse DateTime values from text-based input formats. `basic` uses simple parsing, `best_effort` attempts more flexible parsing. Default: `basic`.
|
|
52350
|
+
:param _builtins.str handle_error_mode: Enum: `default`, `stream`. Defines how ClickHouse should handle errors when processing Kafka messages. `default` stops on errors, `stream` continues processing and logs errors. Default: `default`.
|
|
52351
|
+
:param _builtins.int max_block_size: Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: `0`.
|
|
52352
|
+
:param _builtins.int max_rows_per_message: Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: `1`.
|
|
52353
|
+
:param _builtins.int num_consumers: Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: `1`.
|
|
52354
|
+
:param _builtins.int poll_max_batch_size: Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: `0`.
|
|
51973
52355
|
:param _builtins.int poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: `0`.
|
|
51974
52356
|
:param _builtins.int producer_batch_num_messages: The maximum number of messages in a batch sent to Kafka. If the number of messages exceeds this value, the batch is sent. Default: `10000`.
|
|
51975
|
-
:param _builtins.int producer_batch_size: The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
|
|
52357
|
+
:param _builtins.int producer_batch_size: The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
|
|
51976
52358
|
:param _builtins.str producer_compression_codec: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. The compression codec to use when sending a batch of messages to Kafka. Default: `none`.
|
|
51977
52359
|
:param _builtins.int producer_compression_level: The compression level to use when sending a batch of messages to Kafka. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. Default: `-1`.
|
|
51978
52360
|
:param _builtins.int producer_linger_ms: The time in milliseconds to wait for additional messages before sending a batch. If the time is exceeded, the batch is sent. Default: `5`.
|
|
51979
|
-
:param _builtins.int producer_queue_buffering_max_kbytes: The maximum size of the buffer in kilobytes before sending.
|
|
52361
|
+
:param _builtins.int producer_queue_buffering_max_kbytes: The maximum size of the buffer in kilobytes before sending.
|
|
51980
52362
|
:param _builtins.int producer_queue_buffering_max_messages: The maximum number of messages to buffer before sending. Default: `100000`.
|
|
51981
52363
|
:param _builtins.int producer_request_required_acks: The number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: 0=Broker does not send any response/ack to client, -1 will block until message is committed by all in sync replicas (ISRs). Default: `-1`.
|
|
51982
|
-
:param _builtins.int skip_broken_messages:
|
|
51983
|
-
:param _builtins.bool thread_per_consumer:
|
|
52364
|
+
:param _builtins.int skip_broken_messages: Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: `0`.
|
|
52365
|
+
:param _builtins.bool thread_per_consumer: When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: `false`.
|
|
51984
52366
|
"""
|
|
51985
52367
|
pulumi.set(__self__, "columns", columns)
|
|
51986
52368
|
pulumi.set(__self__, "data_format", data_format)
|
|
@@ -52028,7 +52410,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52028
52410
|
@pulumi.getter
|
|
52029
52411
|
def columns(self) -> Sequence['outputs.GetServiceIntegrationClickhouseKafkaUserConfigTableColumnResult']:
|
|
52030
52412
|
"""
|
|
52031
|
-
|
|
52413
|
+
Array of column definitions that specify the structure of the ClickHouse table. Each column maps to a field in the Kafka messages
|
|
52032
52414
|
"""
|
|
52033
52415
|
return pulumi.get(self, "columns")
|
|
52034
52416
|
|
|
@@ -52036,7 +52418,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52036
52418
|
@pulumi.getter(name="dataFormat")
|
|
52037
52419
|
def data_format(self) -> _builtins.str:
|
|
52038
52420
|
"""
|
|
52039
|
-
Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`.
|
|
52421
|
+
Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. The format of the messages in the Kafka topics. Determines how ClickHouse parses and serializes the data (e.g., JSON, CSV, Avro). Default: `JSONEachRow`.
|
|
52040
52422
|
"""
|
|
52041
52423
|
return pulumi.get(self, "data_format")
|
|
52042
52424
|
|
|
@@ -52044,7 +52426,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52044
52426
|
@pulumi.getter(name="groupName")
|
|
52045
52427
|
def group_name(self) -> _builtins.str:
|
|
52046
52428
|
"""
|
|
52047
|
-
Kafka consumers group. Default: `clickhouse`.
|
|
52429
|
+
The Kafka consumer group name. Multiple consumers with the same group name will share the workload and maintain offset positions. Default: `clickhouse`.
|
|
52048
52430
|
"""
|
|
52049
52431
|
return pulumi.get(self, "group_name")
|
|
52050
52432
|
|
|
@@ -52052,7 +52434,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52052
52434
|
@pulumi.getter
|
|
52053
52435
|
def name(self) -> _builtins.str:
|
|
52054
52436
|
"""
|
|
52055
|
-
|
|
52437
|
+
The name of the ClickHouse table to be created. This table can consume data from and write data to the specified Kafka topics. Example: `events`.
|
|
52056
52438
|
"""
|
|
52057
52439
|
return pulumi.get(self, "name")
|
|
52058
52440
|
|
|
@@ -52060,7 +52442,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52060
52442
|
@pulumi.getter
|
|
52061
52443
|
def topics(self) -> Sequence['outputs.GetServiceIntegrationClickhouseKafkaUserConfigTableTopicResult']:
|
|
52062
52444
|
"""
|
|
52063
|
-
Kafka topics
|
|
52445
|
+
Array of Kafka topics that this table will read data from or write data to. Messages from all specified topics will be inserted into this table, and data inserted into this table will be published to the topics
|
|
52064
52446
|
"""
|
|
52065
52447
|
return pulumi.get(self, "topics")
|
|
52066
52448
|
|
|
@@ -52068,7 +52450,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52068
52450
|
@pulumi.getter(name="autoOffsetReset")
|
|
52069
52451
|
def auto_offset_reset(self) -> Optional[_builtins.str]:
|
|
52070
52452
|
"""
|
|
52071
|
-
Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`.
|
|
52453
|
+
Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Determines where to start reading from Kafka when no offset is stored or the stored offset is out of range. `earliest` starts from the beginning, `latest` starts from the end. Default: `earliest`.
|
|
52072
52454
|
"""
|
|
52073
52455
|
return pulumi.get(self, "auto_offset_reset")
|
|
52074
52456
|
|
|
@@ -52076,7 +52458,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52076
52458
|
@pulumi.getter(name="dateTimeInputFormat")
|
|
52077
52459
|
def date_time_input_format(self) -> Optional[_builtins.str]:
|
|
52078
52460
|
"""
|
|
52079
|
-
Enum: `basic`, `best_effort`, `best_effort_us`.
|
|
52461
|
+
Enum: `basic`, `best_effort`, `best_effort_us`. Specifies how ClickHouse should parse DateTime values from text-based input formats. `basic` uses simple parsing, `best_effort` attempts more flexible parsing. Default: `basic`.
|
|
52080
52462
|
"""
|
|
52081
52463
|
return pulumi.get(self, "date_time_input_format")
|
|
52082
52464
|
|
|
@@ -52084,7 +52466,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52084
52466
|
@pulumi.getter(name="handleErrorMode")
|
|
52085
52467
|
def handle_error_mode(self) -> Optional[_builtins.str]:
|
|
52086
52468
|
"""
|
|
52087
|
-
Enum: `default`, `stream`.
|
|
52469
|
+
Enum: `default`, `stream`. Defines how ClickHouse should handle errors when processing Kafka messages. `default` stops on errors, `stream` continues processing and logs errors. Default: `default`.
|
|
52088
52470
|
"""
|
|
52089
52471
|
return pulumi.get(self, "handle_error_mode")
|
|
52090
52472
|
|
|
@@ -52092,7 +52474,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52092
52474
|
@pulumi.getter(name="maxBlockSize")
|
|
52093
52475
|
def max_block_size(self) -> Optional[_builtins.int]:
|
|
52094
52476
|
"""
|
|
52095
|
-
|
|
52477
|
+
Maximum number of rows to collect before flushing data between Kafka and ClickHouse. Default: `0`.
|
|
52096
52478
|
"""
|
|
52097
52479
|
return pulumi.get(self, "max_block_size")
|
|
52098
52480
|
|
|
@@ -52100,7 +52482,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52100
52482
|
@pulumi.getter(name="maxRowsPerMessage")
|
|
52101
52483
|
def max_rows_per_message(self) -> Optional[_builtins.int]:
|
|
52102
52484
|
"""
|
|
52103
|
-
|
|
52485
|
+
Maximum number of rows that can be processed from a single Kafka message for row-based formats. Useful for controlling memory usage. Default: `1`.
|
|
52104
52486
|
"""
|
|
52105
52487
|
return pulumi.get(self, "max_rows_per_message")
|
|
52106
52488
|
|
|
@@ -52108,7 +52490,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52108
52490
|
@pulumi.getter(name="numConsumers")
|
|
52109
52491
|
def num_consumers(self) -> Optional[_builtins.int]:
|
|
52110
52492
|
"""
|
|
52111
|
-
|
|
52493
|
+
Number of Kafka consumers to run per table per replica. Increasing this can improve throughput but may increase resource usage. Default: `1`.
|
|
52112
52494
|
"""
|
|
52113
52495
|
return pulumi.get(self, "num_consumers")
|
|
52114
52496
|
|
|
@@ -52116,7 +52498,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52116
52498
|
@pulumi.getter(name="pollMaxBatchSize")
|
|
52117
52499
|
def poll_max_batch_size(self) -> Optional[_builtins.int]:
|
|
52118
52500
|
"""
|
|
52119
|
-
Maximum
|
|
52501
|
+
Maximum number of messages to fetch in a single Kafka poll operation for reading. Default: `0`.
|
|
52120
52502
|
"""
|
|
52121
52503
|
return pulumi.get(self, "poll_max_batch_size")
|
|
52122
52504
|
|
|
@@ -52140,7 +52522,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52140
52522
|
@pulumi.getter(name="producerBatchSize")
|
|
52141
52523
|
def producer_batch_size(self) -> Optional[_builtins.int]:
|
|
52142
52524
|
"""
|
|
52143
|
-
The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
|
|
52525
|
+
The maximum size in bytes of a batch of messages sent to Kafka. If the batch size is exceeded, the batch is sent.
|
|
52144
52526
|
"""
|
|
52145
52527
|
return pulumi.get(self, "producer_batch_size")
|
|
52146
52528
|
|
|
@@ -52172,7 +52554,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52172
52554
|
@pulumi.getter(name="producerQueueBufferingMaxKbytes")
|
|
52173
52555
|
def producer_queue_buffering_max_kbytes(self) -> Optional[_builtins.int]:
|
|
52174
52556
|
"""
|
|
52175
|
-
The maximum size of the buffer in kilobytes before sending.
|
|
52557
|
+
The maximum size of the buffer in kilobytes before sending.
|
|
52176
52558
|
"""
|
|
52177
52559
|
return pulumi.get(self, "producer_queue_buffering_max_kbytes")
|
|
52178
52560
|
|
|
@@ -52196,7 +52578,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52196
52578
|
@pulumi.getter(name="skipBrokenMessages")
|
|
52197
52579
|
def skip_broken_messages(self) -> Optional[_builtins.int]:
|
|
52198
52580
|
"""
|
|
52199
|
-
|
|
52581
|
+
Number of broken messages to skip before stopping processing when reading from Kafka. Useful for handling corrupted data without failing the entire integration. Default: `0`.
|
|
52200
52582
|
"""
|
|
52201
52583
|
return pulumi.get(self, "skip_broken_messages")
|
|
52202
52584
|
|
|
@@ -52204,7 +52586,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
52204
52586
|
@pulumi.getter(name="threadPerConsumer")
|
|
52205
52587
|
def thread_per_consumer(self) -> Optional[_builtins.bool]:
|
|
52206
52588
|
"""
|
|
52207
|
-
|
|
52589
|
+
When enabled, each consumer runs in its own thread, providing better isolation and potentially better performance for high-throughput scenarios. Default: `false`.
|
|
52208
52590
|
"""
|
|
52209
52591
|
return pulumi.get(self, "thread_per_consumer")
|
|
52210
52592
|
|
|
@@ -52215,8 +52597,8 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableColumnResult(dict):
|
|
|
52215
52597
|
name: _builtins.str,
|
|
52216
52598
|
type: _builtins.str):
|
|
52217
52599
|
"""
|
|
52218
|
-
:param _builtins.str name:
|
|
52219
|
-
:param _builtins.str type:
|
|
52600
|
+
:param _builtins.str name: The name of the column in the ClickHouse table. This should match the field names in your Kafka message format. Example: `key`.
|
|
52601
|
+
:param _builtins.str type: The ClickHouse data type for this column. Must be a valid ClickHouse data type that can handle the data format. Example: `UInt64`.
|
|
52220
52602
|
"""
|
|
52221
52603
|
pulumi.set(__self__, "name", name)
|
|
52222
52604
|
pulumi.set(__self__, "type", type)
|
|
@@ -52225,7 +52607,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableColumnResult(dict):
|
|
|
52225
52607
|
@pulumi.getter
|
|
52226
52608
|
def name(self) -> _builtins.str:
|
|
52227
52609
|
"""
|
|
52228
|
-
|
|
52610
|
+
The name of the column in the ClickHouse table. This should match the field names in your Kafka message format. Example: `key`.
|
|
52229
52611
|
"""
|
|
52230
52612
|
return pulumi.get(self, "name")
|
|
52231
52613
|
|
|
@@ -52233,7 +52615,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableColumnResult(dict):
|
|
|
52233
52615
|
@pulumi.getter
|
|
52234
52616
|
def type(self) -> _builtins.str:
|
|
52235
52617
|
"""
|
|
52236
|
-
|
|
52618
|
+
The ClickHouse data type for this column. Must be a valid ClickHouse data type that can handle the data format. Example: `UInt64`.
|
|
52237
52619
|
"""
|
|
52238
52620
|
return pulumi.get(self, "type")
|
|
52239
52621
|
|
|
@@ -52243,7 +52625,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableTopicResult(dict):
|
|
|
52243
52625
|
def __init__(__self__, *,
|
|
52244
52626
|
name: _builtins.str):
|
|
52245
52627
|
"""
|
|
52246
|
-
:param _builtins.str name:
|
|
52628
|
+
:param _builtins.str name: The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: `topic_name`.
|
|
52247
52629
|
"""
|
|
52248
52630
|
pulumi.set(__self__, "name", name)
|
|
52249
52631
|
|
|
@@ -52251,7 +52633,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableTopicResult(dict):
|
|
|
52251
52633
|
@pulumi.getter
|
|
52252
52634
|
def name(self) -> _builtins.str:
|
|
52253
52635
|
"""
|
|
52254
|
-
|
|
52636
|
+
The name of the Kafka topic to read messages from or write messages to. The topic must exist in the Kafka cluster. Example: `topic_name`.
|
|
52255
52637
|
"""
|
|
52256
52638
|
return pulumi.get(self, "name")
|
|
52257
52639
|
|
|
@@ -52991,7 +53373,7 @@ class GetServiceIntegrationEndpointExternalElasticsearchLogsUserConfigResult(dic
|
|
|
52991
53373
|
-----END CERTIFICATE-----
|
|
52992
53374
|
`.
|
|
52993
53375
|
:param _builtins.int index_days_max: Maximum number of days of logs to keep. Default: `3`.
|
|
52994
|
-
:param _builtins.float timeout: Elasticsearch request timeout limit. Default: `10
|
|
53376
|
+
:param _builtins.float timeout: Elasticsearch request timeout limit. Default: `10`.
|
|
52995
53377
|
"""
|
|
52996
53378
|
pulumi.set(__self__, "index_prefix", index_prefix)
|
|
52997
53379
|
pulumi.set(__self__, "url", url)
|
|
@@ -53041,7 +53423,7 @@ class GetServiceIntegrationEndpointExternalElasticsearchLogsUserConfigResult(dic
|
|
|
53041
53423
|
@pulumi.getter
|
|
53042
53424
|
def timeout(self) -> Optional[_builtins.float]:
|
|
53043
53425
|
"""
|
|
53044
|
-
Elasticsearch request timeout limit. Default: `10
|
|
53426
|
+
Elasticsearch request timeout limit. Default: `10`.
|
|
53045
53427
|
"""
|
|
53046
53428
|
return pulumi.get(self, "timeout")
|
|
53047
53429
|
|
|
@@ -53343,7 +53725,7 @@ class GetServiceIntegrationEndpointExternalOpensearchLogsUserConfigResult(dict):
|
|
|
53343
53725
|
-----END CERTIFICATE-----
|
|
53344
53726
|
`.
|
|
53345
53727
|
:param _builtins.int index_days_max: Maximum number of days of logs to keep. Default: `3`.
|
|
53346
|
-
:param _builtins.float timeout: OpenSearch request timeout limit. Default: `10
|
|
53728
|
+
:param _builtins.float timeout: OpenSearch request timeout limit. Default: `10`.
|
|
53347
53729
|
"""
|
|
53348
53730
|
pulumi.set(__self__, "index_prefix", index_prefix)
|
|
53349
53731
|
pulumi.set(__self__, "url", url)
|
|
@@ -53393,7 +53775,7 @@ class GetServiceIntegrationEndpointExternalOpensearchLogsUserConfigResult(dict):
|
|
|
53393
53775
|
@pulumi.getter
|
|
53394
53776
|
def timeout(self) -> Optional[_builtins.float]:
|
|
53395
53777
|
"""
|
|
53396
|
-
OpenSearch request timeout limit. Default: `10
|
|
53778
|
+
OpenSearch request timeout limit. Default: `10`.
|
|
53397
53779
|
"""
|
|
53398
53780
|
return pulumi.get(self, "timeout")
|
|
53399
53781
|
|
|
@@ -54141,10 +54523,10 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
|
|
|
54141
54523
|
:param _builtins.int consumer_fetch_min_bytes: The minimum amount of data the server should return for a fetch request. Example: `1024`.
|
|
54142
54524
|
:param _builtins.int consumer_max_poll_records: Set consumer max.poll.records. The default is 500. Example: `500`.
|
|
54143
54525
|
:param _builtins.int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`.
|
|
54144
|
-
:param _builtins.int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
|
|
54526
|
+
:param _builtins.int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
|
|
54145
54527
|
:param _builtins.str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
54146
54528
|
:param _builtins.int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.
|
|
54147
|
-
:param _builtins.int producer_max_request_size: The maximum request size in bytes.
|
|
54529
|
+
:param _builtins.int producer_max_request_size: The maximum request size in bytes.
|
|
54148
54530
|
"""
|
|
54149
54531
|
if consumer_auto_offset_reset is not None:
|
|
54150
54532
|
pulumi.set(__self__, "consumer_auto_offset_reset", consumer_auto_offset_reset)
|
|
@@ -54199,7 +54581,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
|
|
|
54199
54581
|
@pulumi.getter(name="producerBufferMemory")
|
|
54200
54582
|
def producer_buffer_memory(self) -> Optional[_builtins.int]:
|
|
54201
54583
|
"""
|
|
54202
|
-
The amount of bytes producer can use for buffering data before publishing to broker.
|
|
54584
|
+
The amount of bytes producer can use for buffering data before publishing to broker.
|
|
54203
54585
|
"""
|
|
54204
54586
|
return pulumi.get(self, "producer_buffer_memory")
|
|
54205
54587
|
|
|
@@ -54223,7 +54605,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
|
|
|
54223
54605
|
@pulumi.getter(name="producerMaxRequestSize")
|
|
54224
54606
|
def producer_max_request_size(self) -> Optional[_builtins.int]:
|
|
54225
54607
|
"""
|
|
54226
|
-
The maximum request size in bytes.
|
|
54608
|
+
The maximum request size in bytes.
|
|
54227
54609
|
"""
|
|
54228
54610
|
return pulumi.get(self, "producer_max_request_size")
|
|
54229
54611
|
|