pulumi-aiven 6.21.0a1722057299__py3-none-any.whl → 6.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

Files changed (75) hide show
  1. pulumi_aiven/_inputs.py +980 -158
  2. pulumi_aiven/account_authentication.py +5 -5
  3. pulumi_aiven/account_team_project.py +7 -7
  4. pulumi_aiven/aws_vpc_peering_connection.py +7 -7
  5. pulumi_aiven/azure_privatelink_connection_approval.py +16 -16
  6. pulumi_aiven/azure_vpc_peering_connection.py +7 -7
  7. pulumi_aiven/cassandra.py +46 -46
  8. pulumi_aiven/clickhouse.py +34 -34
  9. pulumi_aiven/clickhouse_grant.py +34 -34
  10. pulumi_aiven/dragonfly.py +40 -40
  11. pulumi_aiven/flink.py +40 -40
  12. pulumi_aiven/flink_application_deployment.py +16 -16
  13. pulumi_aiven/flink_application_version.py +36 -36
  14. pulumi_aiven/gcp_vpc_peering_connection.py +7 -7
  15. pulumi_aiven/get_account_team_project.py +1 -1
  16. pulumi_aiven/get_aws_vpc_peering_connection.py +1 -1
  17. pulumi_aiven/get_azure_vpc_peering_connection.py +1 -1
  18. pulumi_aiven/get_cassanda.py +1 -1
  19. pulumi_aiven/get_cassandra.py +1 -1
  20. pulumi_aiven/get_clickhouse.py +1 -1
  21. pulumi_aiven/get_dragonfly.py +1 -1
  22. pulumi_aiven/get_flink.py +1 -1
  23. pulumi_aiven/get_gcp_vpc_peering_connection.py +1 -1
  24. pulumi_aiven/get_grafana.py +1 -1
  25. pulumi_aiven/get_kafka.py +1 -1
  26. pulumi_aiven/get_kafka_connect.py +1 -1
  27. pulumi_aiven/get_kafka_mirror_maker.py +1 -1
  28. pulumi_aiven/get_m3_aggregator.py +1 -1
  29. pulumi_aiven/get_m3_db.py +1 -1
  30. pulumi_aiven/get_my_sql.py +1 -1
  31. pulumi_aiven/get_mysql_user.py +1 -1
  32. pulumi_aiven/get_open_search.py +1 -1
  33. pulumi_aiven/get_pg.py +8 -8
  34. pulumi_aiven/get_pg_database.py +8 -8
  35. pulumi_aiven/get_pg_user.py +16 -16
  36. pulumi_aiven/get_project_user.py +1 -1
  37. pulumi_aiven/get_project_vpc.py +1 -1
  38. pulumi_aiven/get_redis.py +1 -1
  39. pulumi_aiven/get_service_integration.py +3 -3
  40. pulumi_aiven/get_thanos.py +1 -1
  41. pulumi_aiven/get_transit_gateway_vpc_attachment.py +1 -1
  42. pulumi_aiven/get_valkey.py +1 -1
  43. pulumi_aiven/grafana.py +46 -46
  44. pulumi_aiven/influx_db.py +48 -41
  45. pulumi_aiven/kafka.py +62 -62
  46. pulumi_aiven/kafka_connect.py +59 -59
  47. pulumi_aiven/kafka_connector.py +2 -2
  48. pulumi_aiven/kafka_mirror_maker.py +45 -45
  49. pulumi_aiven/kafka_topic.py +18 -18
  50. pulumi_aiven/m3_aggregator.py +40 -40
  51. pulumi_aiven/m3_db.py +48 -48
  52. pulumi_aiven/my_sql.py +54 -54
  53. pulumi_aiven/mysql_user.py +7 -7
  54. pulumi_aiven/open_search.py +56 -56
  55. pulumi_aiven/organization.py +3 -3
  56. pulumi_aiven/organization_group_project.py +3 -3
  57. pulumi_aiven/organization_user_group_member.py +3 -3
  58. pulumi_aiven/outputs.py +1708 -317
  59. pulumi_aiven/pg.py +43 -43
  60. pulumi_aiven/pg_database.py +12 -12
  61. pulumi_aiven/pg_user.py +47 -47
  62. pulumi_aiven/project.py +5 -5
  63. pulumi_aiven/project_user.py +7 -7
  64. pulumi_aiven/project_vpc.py +4 -4
  65. pulumi_aiven/pulumi-plugin.json +1 -1
  66. pulumi_aiven/redis.py +46 -46
  67. pulumi_aiven/service_integration.py +72 -72
  68. pulumi_aiven/service_integration_endpoint.py +65 -65
  69. pulumi_aiven/thanos.py +34 -34
  70. pulumi_aiven/transit_gateway_vpc_attachment.py +7 -7
  71. pulumi_aiven/valkey.py +40 -40
  72. {pulumi_aiven-6.21.0a1722057299.dist-info → pulumi_aiven-6.22.0.dist-info}/METADATA +1 -1
  73. {pulumi_aiven-6.21.0a1722057299.dist-info → pulumi_aiven-6.22.0.dist-info}/RECORD +75 -75
  74. {pulumi_aiven-6.21.0a1722057299.dist-info → pulumi_aiven-6.22.0.dist-info}/WHEEL +1 -1
  75. {pulumi_aiven-6.21.0a1722057299.dist-info → pulumi_aiven-6.22.0.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py CHANGED
@@ -101,6 +101,7 @@ __all__ = [
101
101
  'KafkaConnectorTask',
102
102
  'KafkaKafka',
103
103
  'KafkaKafkaUserConfig',
104
+ 'KafkaKafkaUserConfigFollowerFetching',
104
105
  'KafkaKafkaUserConfigIpFilterObject',
105
106
  'KafkaKafkaUserConfigKafka',
106
107
  'KafkaKafkaUserConfigKafkaAuthenticationMethods',
@@ -109,6 +110,7 @@ __all__ = [
109
110
  'KafkaKafkaUserConfigKafkaConnectSecretProviderAws',
110
111
  'KafkaKafkaUserConfigKafkaConnectSecretProviderVault',
111
112
  'KafkaKafkaUserConfigKafkaRestConfig',
113
+ 'KafkaKafkaUserConfigKafkaSaslMechanisms',
112
114
  'KafkaKafkaUserConfigPrivateAccess',
113
115
  'KafkaKafkaUserConfigPrivatelinkAccess',
114
116
  'KafkaKafkaUserConfigPublicAccess',
@@ -169,7 +171,10 @@ __all__ = [
169
171
  'OpenSearchComponent',
170
172
  'OpenSearchOpensearch',
171
173
  'OpenSearchOpensearchUserConfig',
174
+ 'OpenSearchOpensearchUserConfigAzureMigration',
175
+ 'OpenSearchOpensearchUserConfigGcsMigration',
172
176
  'OpenSearchOpensearchUserConfigIndexPattern',
177
+ 'OpenSearchOpensearchUserConfigIndexRollup',
173
178
  'OpenSearchOpensearchUserConfigIndexTemplate',
174
179
  'OpenSearchOpensearchUserConfigIpFilterObject',
175
180
  'OpenSearchOpensearchUserConfigOpenid',
@@ -181,6 +186,7 @@ __all__ = [
181
186
  'OpenSearchOpensearchUserConfigPrivateAccess',
182
187
  'OpenSearchOpensearchUserConfigPrivatelinkAccess',
183
188
  'OpenSearchOpensearchUserConfigPublicAccess',
189
+ 'OpenSearchOpensearchUserConfigS3Migration',
184
190
  'OpenSearchOpensearchUserConfigSaml',
185
191
  'OpenSearchServiceIntegration',
186
192
  'OpenSearchTag',
@@ -380,6 +386,7 @@ __all__ = [
380
386
  'GetKafkaConnectorTaskResult',
381
387
  'GetKafkaKafkaResult',
382
388
  'GetKafkaKafkaUserConfigResult',
389
+ 'GetKafkaKafkaUserConfigFollowerFetchingResult',
383
390
  'GetKafkaKafkaUserConfigIpFilterObjectResult',
384
391
  'GetKafkaKafkaUserConfigKafkaResult',
385
392
  'GetKafkaKafkaUserConfigKafkaAuthenticationMethodsResult',
@@ -388,6 +395,7 @@ __all__ = [
388
395
  'GetKafkaKafkaUserConfigKafkaConnectSecretProviderAwsResult',
389
396
  'GetKafkaKafkaUserConfigKafkaConnectSecretProviderVaultResult',
390
397
  'GetKafkaKafkaUserConfigKafkaRestConfigResult',
398
+ 'GetKafkaKafkaUserConfigKafkaSaslMechanismsResult',
391
399
  'GetKafkaKafkaUserConfigPrivateAccessResult',
392
400
  'GetKafkaKafkaUserConfigPrivatelinkAccessResult',
393
401
  'GetKafkaKafkaUserConfigPublicAccessResult',
@@ -448,7 +456,10 @@ __all__ = [
448
456
  'GetOpenSearchComponentResult',
449
457
  'GetOpenSearchOpensearchResult',
450
458
  'GetOpenSearchOpensearchUserConfigResult',
459
+ 'GetOpenSearchOpensearchUserConfigAzureMigrationResult',
460
+ 'GetOpenSearchOpensearchUserConfigGcsMigrationResult',
451
461
  'GetOpenSearchOpensearchUserConfigIndexPatternResult',
462
+ 'GetOpenSearchOpensearchUserConfigIndexRollupResult',
452
463
  'GetOpenSearchOpensearchUserConfigIndexTemplateResult',
453
464
  'GetOpenSearchOpensearchUserConfigIpFilterObjectResult',
454
465
  'GetOpenSearchOpensearchUserConfigOpenidResult',
@@ -460,6 +471,7 @@ __all__ = [
460
471
  'GetOpenSearchOpensearchUserConfigPrivateAccessResult',
461
472
  'GetOpenSearchOpensearchUserConfigPrivatelinkAccessResult',
462
473
  'GetOpenSearchOpensearchUserConfigPublicAccessResult',
474
+ 'GetOpenSearchOpensearchUserConfigS3MigrationResult',
463
475
  'GetOpenSearchOpensearchUserConfigSamlResult',
464
476
  'GetOpenSearchServiceIntegrationResult',
465
477
  'GetOpenSearchTagResult',
@@ -2520,6 +2532,8 @@ class DragonflyDragonflyUserConfigMigration(dict):
2520
2532
  suggest = None
2521
2533
  if key == "ignoreDbs":
2522
2534
  suggest = "ignore_dbs"
2535
+ elif key == "ignoreRoles":
2536
+ suggest = "ignore_roles"
2523
2537
 
2524
2538
  if suggest:
2525
2539
  pulumi.log.warn(f"Key '{key}' not found in DragonflyDragonflyUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -2537,6 +2551,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
2537
2551
  port: int,
2538
2552
  dbname: Optional[str] = None,
2539
2553
  ignore_dbs: Optional[str] = None,
2554
+ ignore_roles: Optional[str] = None,
2540
2555
  method: Optional[str] = None,
2541
2556
  password: Optional[str] = None,
2542
2557
  ssl: Optional[bool] = None,
@@ -2546,6 +2561,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
2546
2561
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
2547
2562
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
2548
2563
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
2564
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
2549
2565
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2550
2566
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
2551
2567
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -2557,6 +2573,8 @@ class DragonflyDragonflyUserConfigMigration(dict):
2557
2573
  pulumi.set(__self__, "dbname", dbname)
2558
2574
  if ignore_dbs is not None:
2559
2575
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
2576
+ if ignore_roles is not None:
2577
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
2560
2578
  if method is not None:
2561
2579
  pulumi.set(__self__, "method", method)
2562
2580
  if password is not None:
@@ -2598,6 +2616,14 @@ class DragonflyDragonflyUserConfigMigration(dict):
2598
2616
  """
2599
2617
  return pulumi.get(self, "ignore_dbs")
2600
2618
 
2619
+ @property
2620
+ @pulumi.getter(name="ignoreRoles")
2621
+ def ignore_roles(self) -> Optional[str]:
2622
+ """
2623
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
2624
+ """
2625
+ return pulumi.get(self, "ignore_roles")
2626
+
2601
2627
  @property
2602
2628
  @pulumi.getter
2603
2629
  def method(self) -> Optional[str]:
@@ -3665,7 +3691,8 @@ class GrafanaGrafanaUserConfig(dict):
3665
3691
  unified_alerting_enabled: Optional[bool] = None,
3666
3692
  user_auto_assign_org: Optional[bool] = None,
3667
3693
  user_auto_assign_org_role: Optional[str] = None,
3668
- viewers_can_edit: Optional[bool] = None):
3694
+ viewers_can_edit: Optional[bool] = None,
3695
+ wal: Optional[bool] = None):
3669
3696
  """
3670
3697
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
3671
3698
  :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
@@ -3709,6 +3736,7 @@ class GrafanaGrafanaUserConfig(dict):
3709
3736
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
3710
3737
  :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
3711
3738
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
3739
+ :param bool wal: Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
3712
3740
  """
3713
3741
  if additional_backup_regions is not None:
3714
3742
  pulumi.set(__self__, "additional_backup_regions", additional_backup_regions)
@@ -3794,6 +3822,8 @@ class GrafanaGrafanaUserConfig(dict):
3794
3822
  pulumi.set(__self__, "user_auto_assign_org_role", user_auto_assign_org_role)
3795
3823
  if viewers_can_edit is not None:
3796
3824
  pulumi.set(__self__, "viewers_can_edit", viewers_can_edit)
3825
+ if wal is not None:
3826
+ pulumi.set(__self__, "wal", wal)
3797
3827
 
3798
3828
  @property
3799
3829
  @pulumi.getter(name="additionalBackupRegions")
@@ -4132,6 +4162,14 @@ class GrafanaGrafanaUserConfig(dict):
4132
4162
  """
4133
4163
  return pulumi.get(self, "viewers_can_edit")
4134
4164
 
4165
+ @property
4166
+ @pulumi.getter
4167
+ def wal(self) -> Optional[bool]:
4168
+ """
4169
+ Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
4170
+ """
4171
+ return pulumi.get(self, "wal")
4172
+
4135
4173
 
4136
4174
  @pulumi.output_type
4137
4175
  class GrafanaGrafanaUserConfigAuthAzuread(dict):
@@ -7230,6 +7268,8 @@ class KafkaKafkaUserConfig(dict):
7230
7268
  suggest = "aiven_kafka_topic_messages"
7231
7269
  elif key == "customDomain":
7232
7270
  suggest = "custom_domain"
7271
+ elif key == "followerFetching":
7272
+ suggest = "follower_fetching"
7233
7273
  elif key == "ipFilterObjects":
7234
7274
  suggest = "ip_filter_objects"
7235
7275
  elif key == "ipFilterStrings":
@@ -7250,6 +7290,8 @@ class KafkaKafkaUserConfig(dict):
7250
7290
  suggest = "kafka_rest_authorization"
7251
7291
  elif key == "kafkaRestConfig":
7252
7292
  suggest = "kafka_rest_config"
7293
+ elif key == "kafkaSaslMechanisms":
7294
+ suggest = "kafka_sasl_mechanisms"
7253
7295
  elif key == "kafkaVersion":
7254
7296
  suggest = "kafka_version"
7255
7297
  elif key == "letsencryptSaslPrivatelink":
@@ -7286,6 +7328,7 @@ class KafkaKafkaUserConfig(dict):
7286
7328
  additional_backup_regions: Optional[str] = None,
7287
7329
  aiven_kafka_topic_messages: Optional[bool] = None,
7288
7330
  custom_domain: Optional[str] = None,
7331
+ follower_fetching: Optional['outputs.KafkaKafkaUserConfigFollowerFetching'] = None,
7289
7332
  ip_filter_objects: Optional[Sequence['outputs.KafkaKafkaUserConfigIpFilterObject']] = None,
7290
7333
  ip_filter_strings: Optional[Sequence[str]] = None,
7291
7334
  ip_filters: Optional[Sequence[str]] = None,
@@ -7297,6 +7340,7 @@ class KafkaKafkaUserConfig(dict):
7297
7340
  kafka_rest: Optional[bool] = None,
7298
7341
  kafka_rest_authorization: Optional[bool] = None,
7299
7342
  kafka_rest_config: Optional['outputs.KafkaKafkaUserConfigKafkaRestConfig'] = None,
7343
+ kafka_sasl_mechanisms: Optional['outputs.KafkaKafkaUserConfigKafkaSaslMechanisms'] = None,
7300
7344
  kafka_version: Optional[str] = None,
7301
7345
  letsencrypt_sasl_privatelink: Optional[bool] = None,
7302
7346
  private_access: Optional['outputs.KafkaKafkaUserConfigPrivateAccess'] = None,
@@ -7311,6 +7355,7 @@ class KafkaKafkaUserConfig(dict):
7311
7355
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
7312
7356
  :param bool aiven_kafka_topic_messages: Allow access to read Kafka topic messages in the Aiven Console and REST API.
7313
7357
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
7358
+ :param 'KafkaKafkaUserConfigFollowerFetchingArgs' follower_fetching: Enable follower fetching
7314
7359
  :param Sequence['KafkaKafkaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
7315
7360
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
7316
7361
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -7321,6 +7366,7 @@ class KafkaKafkaUserConfig(dict):
7321
7366
  :param bool kafka_rest: Enable Kafka-REST service. Default: `false`.
7322
7367
  :param bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
7323
7368
  :param 'KafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
7369
+ :param 'KafkaKafkaUserConfigKafkaSaslMechanismsArgs' kafka_sasl_mechanisms: Kafka SASL mechanisms
7324
7370
  :param str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, and newer. Kafka major version.
7325
7371
  :param bool letsencrypt_sasl_privatelink: Use Letsencrypt CA for Kafka SASL via Privatelink.
7326
7372
  :param 'KafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
@@ -7338,6 +7384,8 @@ class KafkaKafkaUserConfig(dict):
7338
7384
  pulumi.set(__self__, "aiven_kafka_topic_messages", aiven_kafka_topic_messages)
7339
7385
  if custom_domain is not None:
7340
7386
  pulumi.set(__self__, "custom_domain", custom_domain)
7387
+ if follower_fetching is not None:
7388
+ pulumi.set(__self__, "follower_fetching", follower_fetching)
7341
7389
  if ip_filter_objects is not None:
7342
7390
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
7343
7391
  if ip_filter_strings is not None:
@@ -7360,6 +7408,8 @@ class KafkaKafkaUserConfig(dict):
7360
7408
  pulumi.set(__self__, "kafka_rest_authorization", kafka_rest_authorization)
7361
7409
  if kafka_rest_config is not None:
7362
7410
  pulumi.set(__self__, "kafka_rest_config", kafka_rest_config)
7411
+ if kafka_sasl_mechanisms is not None:
7412
+ pulumi.set(__self__, "kafka_sasl_mechanisms", kafka_sasl_mechanisms)
7363
7413
  if kafka_version is not None:
7364
7414
  pulumi.set(__self__, "kafka_version", kafka_version)
7365
7415
  if letsencrypt_sasl_privatelink is not None:
@@ -7406,6 +7456,14 @@ class KafkaKafkaUserConfig(dict):
7406
7456
  """
7407
7457
  return pulumi.get(self, "custom_domain")
7408
7458
 
7459
+ @property
7460
+ @pulumi.getter(name="followerFetching")
7461
+ def follower_fetching(self) -> Optional['outputs.KafkaKafkaUserConfigFollowerFetching']:
7462
+ """
7463
+ Enable follower fetching
7464
+ """
7465
+ return pulumi.get(self, "follower_fetching")
7466
+
7409
7467
  @property
7410
7468
  @pulumi.getter(name="ipFilterObjects")
7411
7469
  def ip_filter_objects(self) -> Optional[Sequence['outputs.KafkaKafkaUserConfigIpFilterObject']]:
@@ -7492,6 +7550,14 @@ class KafkaKafkaUserConfig(dict):
7492
7550
  """
7493
7551
  return pulumi.get(self, "kafka_rest_config")
7494
7552
 
7553
+ @property
7554
+ @pulumi.getter(name="kafkaSaslMechanisms")
7555
+ def kafka_sasl_mechanisms(self) -> Optional['outputs.KafkaKafkaUserConfigKafkaSaslMechanisms']:
7556
+ """
7557
+ Kafka SASL mechanisms
7558
+ """
7559
+ return pulumi.get(self, "kafka_sasl_mechanisms")
7560
+
7495
7561
  @property
7496
7562
  @pulumi.getter(name="kafkaVersion")
7497
7563
  def kafka_version(self) -> Optional[str]:
@@ -7573,6 +7639,25 @@ class KafkaKafkaUserConfig(dict):
7573
7639
  return pulumi.get(self, "tiered_storage")
7574
7640
 
7575
7641
 
7642
+ @pulumi.output_type
7643
+ class KafkaKafkaUserConfigFollowerFetching(dict):
7644
+ def __init__(__self__, *,
7645
+ enabled: Optional[bool] = None):
7646
+ """
7647
+ :param bool enabled: Whether to enable the follower fetching functionality.
7648
+ """
7649
+ if enabled is not None:
7650
+ pulumi.set(__self__, "enabled", enabled)
7651
+
7652
+ @property
7653
+ @pulumi.getter
7654
+ def enabled(self) -> Optional[bool]:
7655
+ """
7656
+ Whether to enable the follower fetching functionality.
7657
+ """
7658
+ return pulumi.get(self, "enabled")
7659
+
7660
+
7576
7661
  @pulumi.output_type
7577
7662
  class KafkaKafkaUserConfigIpFilterObject(dict):
7578
7663
  def __init__(__self__, *,
@@ -7760,52 +7845,52 @@ class KafkaKafkaUserConfigKafka(dict):
7760
7845
  transaction_remove_expired_transaction_cleanup_interval_ms: Optional[int] = None,
7761
7846
  transaction_state_log_segment_bytes: Optional[int] = None):
7762
7847
  """
7763
- :param bool auto_create_topics_enable: Enable auto creation of topics.
7764
- :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
7765
- :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
7766
- :param int default_replication_factor: Replication factor for autocreated topics.
7767
- :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
7768
- :param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
7769
- :param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
7770
- :param int log_cleaner_delete_retention_ms: How long are delete records retained? Example: `86400000`.
7771
- :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
7772
- :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
7773
- :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
7774
- :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
7775
- :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
7776
- :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
7777
- :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. Example: `4096`.
7778
- :param int log_index_size_max_bytes: The maximum size in bytes of the offset index. Example: `10485760`.
7779
- :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
7780
- :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
7781
- :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7782
- :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
7783
- :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
7784
- :param bool log_preallocate: Should pre allocate file when create new segment?
7785
- :param int log_retention_bytes: The maximum size of the log before deleting messages.
7786
- :param int log_retention_hours: The number of hours to keep a log file before deleting it.
7787
- :param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
7788
- :param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
7789
- :param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
7790
- :param int log_segment_bytes: The maximum size of a single log file.
7791
- :param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
7792
- :param int max_connections_per_ip: The maximum number of connections allowed from each ip address (defaults to 2147483647).
7793
- :param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
7794
- :param int message_max_bytes: The maximum size of message that the server can receive. Example: `1048588`.
7795
- :param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
7796
- :param int num_partitions: Number of partitions for autocreated topics.
7797
- :param int offsets_retention_minutes: Log retention window in minutes for offsets topic. Example: `10080`.
7798
- :param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
7799
- :param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
7800
- :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
7801
- :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
7802
- :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
7803
- :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
7804
- :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
7805
- :param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
7806
- :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
7807
- :param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
7808
- :param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
7848
+ :param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
7849
+ :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
7850
+ :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
7851
+ :param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
7852
+ :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
7853
+ :param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
7854
+ :param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
7855
+ :param int log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
7856
+ :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
7857
+ :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
7858
+ :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
7859
+ :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
7860
+ :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
7861
+ :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
7862
+ :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
7863
+ :param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
7864
+ :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
7865
+ :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
7866
+ :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
7867
+ :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
7868
+ :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
7869
+ :param bool log_preallocate: Should pre allocate file when create new segment? (Default: false).
7870
+ :param int log_retention_bytes: The maximum size of the log before deleting messages (Default: -1).
7871
+ :param int log_retention_hours: The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
7872
+ :param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
7873
+ :param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
7874
+ :param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
7875
+ :param int log_segment_bytes: The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
7876
+ :param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
7877
+ :param int max_connections_per_ip: The maximum number of connections allowed from each ip address (Default: 2147483647).
7878
+ :param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
7879
+ :param int message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
7880
+ :param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
7881
+ :param int num_partitions: Number of partitions for auto-created topics (Default: 1).
7882
+ :param int offsets_retention_minutes: Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
7883
+ :param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
7884
+ :param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
7885
+ :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
7886
+ :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
7887
+ :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
7888
+ :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
7889
+ :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
7890
+ :param int socket_request_max_bytes: The maximum number of bytes in a socket request (Default: 104857600 bytes).
7891
+ :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
7892
+ :param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
7893
+ :param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
7809
7894
  """
7810
7895
  if auto_create_topics_enable is not None:
7811
7896
  pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
@@ -7904,7 +7989,7 @@ class KafkaKafkaUserConfigKafka(dict):
7904
7989
  @pulumi.getter(name="autoCreateTopicsEnable")
7905
7990
  def auto_create_topics_enable(self) -> Optional[bool]:
7906
7991
  """
7907
- Enable auto creation of topics.
7992
+ Enable auto-creation of topics. (Default: true).
7908
7993
  """
7909
7994
  return pulumi.get(self, "auto_create_topics_enable")
7910
7995
 
@@ -7912,7 +7997,7 @@ class KafkaKafkaUserConfigKafka(dict):
7912
7997
  @pulumi.getter(name="compressionType")
7913
7998
  def compression_type(self) -> Optional[str]:
7914
7999
  """
7915
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
8000
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
7916
8001
  """
7917
8002
  return pulumi.get(self, "compression_type")
7918
8003
 
@@ -7920,7 +8005,7 @@ class KafkaKafkaUserConfigKafka(dict):
7920
8005
  @pulumi.getter(name="connectionsMaxIdleMs")
7921
8006
  def connections_max_idle_ms(self) -> Optional[int]:
7922
8007
  """
7923
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
8008
+ Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
7924
8009
  """
7925
8010
  return pulumi.get(self, "connections_max_idle_ms")
7926
8011
 
@@ -7928,7 +8013,7 @@ class KafkaKafkaUserConfigKafka(dict):
7928
8013
  @pulumi.getter(name="defaultReplicationFactor")
7929
8014
  def default_replication_factor(self) -> Optional[int]:
7930
8015
  """
7931
- Replication factor for autocreated topics.
8016
+ Replication factor for auto-created topics (Default: 3).
7932
8017
  """
7933
8018
  return pulumi.get(self, "default_replication_factor")
7934
8019
 
@@ -7936,7 +8021,7 @@ class KafkaKafkaUserConfigKafka(dict):
7936
8021
  @pulumi.getter(name="groupInitialRebalanceDelayMs")
7937
8022
  def group_initial_rebalance_delay_ms(self) -> Optional[int]:
7938
8023
  """
7939
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
8024
+ The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
7940
8025
  """
7941
8026
  return pulumi.get(self, "group_initial_rebalance_delay_ms")
7942
8027
 
@@ -7944,7 +8029,7 @@ class KafkaKafkaUserConfigKafka(dict):
7944
8029
  @pulumi.getter(name="groupMaxSessionTimeoutMs")
7945
8030
  def group_max_session_timeout_ms(self) -> Optional[int]:
7946
8031
  """
7947
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
8032
+ The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
7948
8033
  """
7949
8034
  return pulumi.get(self, "group_max_session_timeout_ms")
7950
8035
 
@@ -7952,7 +8037,7 @@ class KafkaKafkaUserConfigKafka(dict):
7952
8037
  @pulumi.getter(name="groupMinSessionTimeoutMs")
7953
8038
  def group_min_session_timeout_ms(self) -> Optional[int]:
7954
8039
  """
7955
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
8040
+ The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
7956
8041
  """
7957
8042
  return pulumi.get(self, "group_min_session_timeout_ms")
7958
8043
 
@@ -7960,7 +8045,7 @@ class KafkaKafkaUserConfigKafka(dict):
7960
8045
  @pulumi.getter(name="logCleanerDeleteRetentionMs")
7961
8046
  def log_cleaner_delete_retention_ms(self) -> Optional[int]:
7962
8047
  """
7963
- How long are delete records retained? Example: `86400000`.
8048
+ How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
7964
8049
  """
7965
8050
  return pulumi.get(self, "log_cleaner_delete_retention_ms")
7966
8051
 
@@ -7968,7 +8053,7 @@ class KafkaKafkaUserConfigKafka(dict):
7968
8053
  @pulumi.getter(name="logCleanerMaxCompactionLagMs")
7969
8054
  def log_cleaner_max_compaction_lag_ms(self) -> Optional[int]:
7970
8055
  """
7971
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
8056
+ The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
7972
8057
  """
7973
8058
  return pulumi.get(self, "log_cleaner_max_compaction_lag_ms")
7974
8059
 
@@ -7976,7 +8061,7 @@ class KafkaKafkaUserConfigKafka(dict):
7976
8061
  @pulumi.getter(name="logCleanerMinCleanableRatio")
7977
8062
  def log_cleaner_min_cleanable_ratio(self) -> Optional[float]:
7978
8063
  """
7979
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
8064
+ Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
7980
8065
  """
7981
8066
  return pulumi.get(self, "log_cleaner_min_cleanable_ratio")
7982
8067
 
@@ -7984,7 +8069,7 @@ class KafkaKafkaUserConfigKafka(dict):
7984
8069
  @pulumi.getter(name="logCleanerMinCompactionLagMs")
7985
8070
  def log_cleaner_min_compaction_lag_ms(self) -> Optional[int]:
7986
8071
  """
7987
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
8072
+ The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
7988
8073
  """
7989
8074
  return pulumi.get(self, "log_cleaner_min_compaction_lag_ms")
7990
8075
 
@@ -7992,7 +8077,7 @@ class KafkaKafkaUserConfigKafka(dict):
7992
8077
  @pulumi.getter(name="logCleanupPolicy")
7993
8078
  def log_cleanup_policy(self) -> Optional[str]:
7994
8079
  """
7995
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
8080
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
7996
8081
  """
7997
8082
  return pulumi.get(self, "log_cleanup_policy")
7998
8083
 
@@ -8000,7 +8085,7 @@ class KafkaKafkaUserConfigKafka(dict):
8000
8085
  @pulumi.getter(name="logFlushIntervalMessages")
8001
8086
  def log_flush_interval_messages(self) -> Optional[int]:
8002
8087
  """
8003
- The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
8088
+ The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
8004
8089
  """
8005
8090
  return pulumi.get(self, "log_flush_interval_messages")
8006
8091
 
@@ -8008,7 +8093,7 @@ class KafkaKafkaUserConfigKafka(dict):
8008
8093
  @pulumi.getter(name="logFlushIntervalMs")
8009
8094
  def log_flush_interval_ms(self) -> Optional[int]:
8010
8095
  """
8011
- The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
8096
+ The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
8012
8097
  """
8013
8098
  return pulumi.get(self, "log_flush_interval_ms")
8014
8099
 
@@ -8016,7 +8101,7 @@ class KafkaKafkaUserConfigKafka(dict):
8016
8101
  @pulumi.getter(name="logIndexIntervalBytes")
8017
8102
  def log_index_interval_bytes(self) -> Optional[int]:
8018
8103
  """
8019
- The interval with which Kafka adds an entry to the offset index. Example: `4096`.
8104
+ The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
8020
8105
  """
8021
8106
  return pulumi.get(self, "log_index_interval_bytes")
8022
8107
 
@@ -8024,7 +8109,7 @@ class KafkaKafkaUserConfigKafka(dict):
8024
8109
  @pulumi.getter(name="logIndexSizeMaxBytes")
8025
8110
  def log_index_size_max_bytes(self) -> Optional[int]:
8026
8111
  """
8027
- The maximum size in bytes of the offset index. Example: `10485760`.
8112
+ The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
8028
8113
  """
8029
8114
  return pulumi.get(self, "log_index_size_max_bytes")
8030
8115
 
@@ -8032,7 +8117,7 @@ class KafkaKafkaUserConfigKafka(dict):
8032
8117
  @pulumi.getter(name="logLocalRetentionBytes")
8033
8118
  def log_local_retention_bytes(self) -> Optional[int]:
8034
8119
  """
8035
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
8120
+ The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
8036
8121
  """
8037
8122
  return pulumi.get(self, "log_local_retention_bytes")
8038
8123
 
@@ -8040,7 +8125,7 @@ class KafkaKafkaUserConfigKafka(dict):
8040
8125
  @pulumi.getter(name="logLocalRetentionMs")
8041
8126
  def log_local_retention_ms(self) -> Optional[int]:
8042
8127
  """
8043
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
8128
+ The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
8044
8129
  """
8045
8130
  return pulumi.get(self, "log_local_retention_ms")
8046
8131
 
@@ -8048,7 +8133,7 @@ class KafkaKafkaUserConfigKafka(dict):
8048
8133
  @pulumi.getter(name="logMessageDownconversionEnable")
8049
8134
  def log_message_downconversion_enable(self) -> Optional[bool]:
8050
8135
  """
8051
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
8136
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
8052
8137
  """
8053
8138
  return pulumi.get(self, "log_message_downconversion_enable")
8054
8139
 
@@ -8056,7 +8141,7 @@ class KafkaKafkaUserConfigKafka(dict):
8056
8141
  @pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
8057
8142
  def log_message_timestamp_difference_max_ms(self) -> Optional[int]:
8058
8143
  """
8059
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
8144
+ The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
8060
8145
  """
8061
8146
  return pulumi.get(self, "log_message_timestamp_difference_max_ms")
8062
8147
 
@@ -8064,7 +8149,7 @@ class KafkaKafkaUserConfigKafka(dict):
8064
8149
  @pulumi.getter(name="logMessageTimestampType")
8065
8150
  def log_message_timestamp_type(self) -> Optional[str]:
8066
8151
  """
8067
- Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
8152
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
8068
8153
  """
8069
8154
  return pulumi.get(self, "log_message_timestamp_type")
8070
8155
 
@@ -8072,7 +8157,7 @@ class KafkaKafkaUserConfigKafka(dict):
8072
8157
  @pulumi.getter(name="logPreallocate")
8073
8158
  def log_preallocate(self) -> Optional[bool]:
8074
8159
  """
8075
- Should pre allocate file when create new segment?
8160
+ Should pre allocate file when create new segment? (Default: false).
8076
8161
  """
8077
8162
  return pulumi.get(self, "log_preallocate")
8078
8163
 
@@ -8080,7 +8165,7 @@ class KafkaKafkaUserConfigKafka(dict):
8080
8165
  @pulumi.getter(name="logRetentionBytes")
8081
8166
  def log_retention_bytes(self) -> Optional[int]:
8082
8167
  """
8083
- The maximum size of the log before deleting messages.
8168
+ The maximum size of the log before deleting messages (Default: -1).
8084
8169
  """
8085
8170
  return pulumi.get(self, "log_retention_bytes")
8086
8171
 
@@ -8088,7 +8173,7 @@ class KafkaKafkaUserConfigKafka(dict):
8088
8173
  @pulumi.getter(name="logRetentionHours")
8089
8174
  def log_retention_hours(self) -> Optional[int]:
8090
8175
  """
8091
- The number of hours to keep a log file before deleting it.
8176
+ The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
8092
8177
  """
8093
8178
  return pulumi.get(self, "log_retention_hours")
8094
8179
 
@@ -8096,7 +8181,7 @@ class KafkaKafkaUserConfigKafka(dict):
8096
8181
  @pulumi.getter(name="logRetentionMs")
8097
8182
  def log_retention_ms(self) -> Optional[int]:
8098
8183
  """
8099
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
8184
+ The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
8100
8185
  """
8101
8186
  return pulumi.get(self, "log_retention_ms")
8102
8187
 
@@ -8104,7 +8189,7 @@ class KafkaKafkaUserConfigKafka(dict):
8104
8189
  @pulumi.getter(name="logRollJitterMs")
8105
8190
  def log_roll_jitter_ms(self) -> Optional[int]:
8106
8191
  """
8107
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
8192
+ The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
8108
8193
  """
8109
8194
  return pulumi.get(self, "log_roll_jitter_ms")
8110
8195
 
@@ -8112,7 +8197,7 @@ class KafkaKafkaUserConfigKafka(dict):
8112
8197
  @pulumi.getter(name="logRollMs")
8113
8198
  def log_roll_ms(self) -> Optional[int]:
8114
8199
  """
8115
- The maximum time before a new log segment is rolled out (in milliseconds).
8200
+ The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
8116
8201
  """
8117
8202
  return pulumi.get(self, "log_roll_ms")
8118
8203
 
@@ -8120,7 +8205,7 @@ class KafkaKafkaUserConfigKafka(dict):
8120
8205
  @pulumi.getter(name="logSegmentBytes")
8121
8206
  def log_segment_bytes(self) -> Optional[int]:
8122
8207
  """
8123
- The maximum size of a single log file.
8208
+ The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
8124
8209
  """
8125
8210
  return pulumi.get(self, "log_segment_bytes")
8126
8211
 
@@ -8128,7 +8213,7 @@ class KafkaKafkaUserConfigKafka(dict):
8128
8213
  @pulumi.getter(name="logSegmentDeleteDelayMs")
8129
8214
  def log_segment_delete_delay_ms(self) -> Optional[int]:
8130
8215
  """
8131
- The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
8216
+ The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
8132
8217
  """
8133
8218
  return pulumi.get(self, "log_segment_delete_delay_ms")
8134
8219
 
@@ -8136,7 +8221,7 @@ class KafkaKafkaUserConfigKafka(dict):
8136
8221
  @pulumi.getter(name="maxConnectionsPerIp")
8137
8222
  def max_connections_per_ip(self) -> Optional[int]:
8138
8223
  """
8139
- The maximum number of connections allowed from each ip address (defaults to 2147483647).
8224
+ The maximum number of connections allowed from each ip address (Default: 2147483647).
8140
8225
  """
8141
8226
  return pulumi.get(self, "max_connections_per_ip")
8142
8227
 
@@ -8144,7 +8229,7 @@ class KafkaKafkaUserConfigKafka(dict):
8144
8229
  @pulumi.getter(name="maxIncrementalFetchSessionCacheSlots")
8145
8230
  def max_incremental_fetch_session_cache_slots(self) -> Optional[int]:
8146
8231
  """
8147
- The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
8232
+ The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
8148
8233
  """
8149
8234
  return pulumi.get(self, "max_incremental_fetch_session_cache_slots")
8150
8235
 
@@ -8152,7 +8237,7 @@ class KafkaKafkaUserConfigKafka(dict):
8152
8237
  @pulumi.getter(name="messageMaxBytes")
8153
8238
  def message_max_bytes(self) -> Optional[int]:
8154
8239
  """
8155
- The maximum size of message that the server can receive. Example: `1048588`.
8240
+ The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
8156
8241
  """
8157
8242
  return pulumi.get(self, "message_max_bytes")
8158
8243
 
@@ -8160,7 +8245,7 @@ class KafkaKafkaUserConfigKafka(dict):
8160
8245
  @pulumi.getter(name="minInsyncReplicas")
8161
8246
  def min_insync_replicas(self) -> Optional[int]:
8162
8247
  """
8163
- When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
8248
+ When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
8164
8249
  """
8165
8250
  return pulumi.get(self, "min_insync_replicas")
8166
8251
 
@@ -8168,7 +8253,7 @@ class KafkaKafkaUserConfigKafka(dict):
8168
8253
  @pulumi.getter(name="numPartitions")
8169
8254
  def num_partitions(self) -> Optional[int]:
8170
8255
  """
8171
- Number of partitions for autocreated topics.
8256
+ Number of partitions for auto-created topics (Default: 1).
8172
8257
  """
8173
8258
  return pulumi.get(self, "num_partitions")
8174
8259
 
@@ -8176,7 +8261,7 @@ class KafkaKafkaUserConfigKafka(dict):
8176
8261
  @pulumi.getter(name="offsetsRetentionMinutes")
8177
8262
  def offsets_retention_minutes(self) -> Optional[int]:
8178
8263
  """
8179
- Log retention window in minutes for offsets topic. Example: `10080`.
8264
+ Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
8180
8265
  """
8181
8266
  return pulumi.get(self, "offsets_retention_minutes")
8182
8267
 
@@ -8184,7 +8269,7 @@ class KafkaKafkaUserConfigKafka(dict):
8184
8269
  @pulumi.getter(name="producerPurgatoryPurgeIntervalRequests")
8185
8270
  def producer_purgatory_purge_interval_requests(self) -> Optional[int]:
8186
8271
  """
8187
- The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
8272
+ The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
8188
8273
  """
8189
8274
  return pulumi.get(self, "producer_purgatory_purge_interval_requests")
8190
8275
 
@@ -8192,7 +8277,7 @@ class KafkaKafkaUserConfigKafka(dict):
8192
8277
  @pulumi.getter(name="replicaFetchMaxBytes")
8193
8278
  def replica_fetch_max_bytes(self) -> Optional[int]:
8194
8279
  """
8195
- The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
8280
+ The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
8196
8281
  """
8197
8282
  return pulumi.get(self, "replica_fetch_max_bytes")
8198
8283
 
@@ -8200,7 +8285,7 @@ class KafkaKafkaUserConfigKafka(dict):
8200
8285
  @pulumi.getter(name="replicaFetchResponseMaxBytes")
8201
8286
  def replica_fetch_response_max_bytes(self) -> Optional[int]:
8202
8287
  """
8203
- Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
8288
+ Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
8204
8289
  """
8205
8290
  return pulumi.get(self, "replica_fetch_response_max_bytes")
8206
8291
 
@@ -8208,7 +8293,7 @@ class KafkaKafkaUserConfigKafka(dict):
8208
8293
  @pulumi.getter(name="saslOauthbearerExpectedAudience")
8209
8294
  def sasl_oauthbearer_expected_audience(self) -> Optional[str]:
8210
8295
  """
8211
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
8296
+ The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
8212
8297
  """
8213
8298
  return pulumi.get(self, "sasl_oauthbearer_expected_audience")
8214
8299
 
@@ -8216,7 +8301,7 @@ class KafkaKafkaUserConfigKafka(dict):
8216
8301
  @pulumi.getter(name="saslOauthbearerExpectedIssuer")
8217
8302
  def sasl_oauthbearer_expected_issuer(self) -> Optional[str]:
8218
8303
  """
8219
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
8304
+ Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
8220
8305
  """
8221
8306
  return pulumi.get(self, "sasl_oauthbearer_expected_issuer")
8222
8307
 
@@ -8224,7 +8309,7 @@ class KafkaKafkaUserConfigKafka(dict):
8224
8309
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
8225
8310
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
8226
8311
  """
8227
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
8312
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
8228
8313
  """
8229
8314
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
8230
8315
 
@@ -8232,7 +8317,7 @@ class KafkaKafkaUserConfigKafka(dict):
8232
8317
  @pulumi.getter(name="saslOauthbearerSubClaimName")
8233
8318
  def sasl_oauthbearer_sub_claim_name(self) -> Optional[str]:
8234
8319
  """
8235
- Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
8320
+ Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
8236
8321
  """
8237
8322
  return pulumi.get(self, "sasl_oauthbearer_sub_claim_name")
8238
8323
 
@@ -8240,7 +8325,7 @@ class KafkaKafkaUserConfigKafka(dict):
8240
8325
  @pulumi.getter(name="socketRequestMaxBytes")
8241
8326
  def socket_request_max_bytes(self) -> Optional[int]:
8242
8327
  """
8243
- The maximum number of bytes in a socket request (defaults to 104857600).
8328
+ The maximum number of bytes in a socket request (Default: 104857600 bytes).
8244
8329
  """
8245
8330
  return pulumi.get(self, "socket_request_max_bytes")
8246
8331
 
@@ -8248,7 +8333,7 @@ class KafkaKafkaUserConfigKafka(dict):
8248
8333
  @pulumi.getter(name="transactionPartitionVerificationEnable")
8249
8334
  def transaction_partition_verification_enable(self) -> Optional[bool]:
8250
8335
  """
8251
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
8336
+ Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
8252
8337
  """
8253
8338
  return pulumi.get(self, "transaction_partition_verification_enable")
8254
8339
 
@@ -8256,7 +8341,7 @@ class KafkaKafkaUserConfigKafka(dict):
8256
8341
  @pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
8257
8342
  def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[int]:
8258
8343
  """
8259
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
8344
+ The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
8260
8345
  """
8261
8346
  return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
8262
8347
 
@@ -8264,7 +8349,7 @@ class KafkaKafkaUserConfigKafka(dict):
8264
8349
  @pulumi.getter(name="transactionStateLogSegmentBytes")
8265
8350
  def transaction_state_log_segment_bytes(self) -> Optional[int]:
8266
8351
  """
8267
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
8352
+ The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
8268
8353
  """
8269
8354
  return pulumi.get(self, "transaction_state_log_segment_bytes")
8270
8355
 
@@ -8896,6 +8981,68 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8896
8981
  return pulumi.get(self, "simpleconsumer_pool_size_max")
8897
8982
 
8898
8983
 
8984
+ @pulumi.output_type
8985
+ class KafkaKafkaUserConfigKafkaSaslMechanisms(dict):
8986
+ @staticmethod
8987
+ def __key_warning(key: str):
8988
+ suggest = None
8989
+ if key == "scramSha256":
8990
+ suggest = "scram_sha256"
8991
+ elif key == "scramSha512":
8992
+ suggest = "scram_sha512"
8993
+
8994
+ if suggest:
8995
+ pulumi.log.warn(f"Key '{key}' not found in KafkaKafkaUserConfigKafkaSaslMechanisms. Access the value via the '{suggest}' property getter instead.")
8996
+
8997
+ def __getitem__(self, key: str) -> Any:
8998
+ KafkaKafkaUserConfigKafkaSaslMechanisms.__key_warning(key)
8999
+ return super().__getitem__(key)
9000
+
9001
+ def get(self, key: str, default = None) -> Any:
9002
+ KafkaKafkaUserConfigKafkaSaslMechanisms.__key_warning(key)
9003
+ return super().get(key, default)
9004
+
9005
+ def __init__(__self__, *,
9006
+ plain: Optional[bool] = None,
9007
+ scram_sha256: Optional[bool] = None,
9008
+ scram_sha512: Optional[bool] = None):
9009
+ """
9010
+ :param bool plain: Enable PLAIN mechanism. Default: `true`.
9011
+ :param bool scram_sha256: Enable SCRAM-SHA-256 mechanism. Default: `true`.
9012
+ :param bool scram_sha512: Enable SCRAM-SHA-512 mechanism. Default: `true`.
9013
+ """
9014
+ if plain is not None:
9015
+ pulumi.set(__self__, "plain", plain)
9016
+ if scram_sha256 is not None:
9017
+ pulumi.set(__self__, "scram_sha256", scram_sha256)
9018
+ if scram_sha512 is not None:
9019
+ pulumi.set(__self__, "scram_sha512", scram_sha512)
9020
+
9021
+ @property
9022
+ @pulumi.getter
9023
+ def plain(self) -> Optional[bool]:
9024
+ """
9025
+ Enable PLAIN mechanism. Default: `true`.
9026
+ """
9027
+ return pulumi.get(self, "plain")
9028
+
9029
+ @property
9030
+ @pulumi.getter(name="scramSha256")
9031
+ def scram_sha256(self) -> Optional[bool]:
9032
+ """
9033
+ Enable SCRAM-SHA-256 mechanism. Default: `true`.
9034
+ """
9035
+ return pulumi.get(self, "scram_sha256")
9036
+
9037
+ @property
9038
+ @pulumi.getter(name="scramSha512")
9039
+ def scram_sha512(self) -> Optional[bool]:
9040
+ """
9041
+ Enable SCRAM-SHA-512 mechanism. Default: `true`.
9042
+ """
9043
+ return pulumi.get(self, "scram_sha512")
9044
+
9045
+
8899
9046
  @pulumi.output_type
8900
9047
  class KafkaKafkaUserConfigPrivateAccess(dict):
8901
9048
  @staticmethod
@@ -9570,7 +9717,9 @@ class KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
9570
9717
  @staticmethod
9571
9718
  def __key_warning(key: str):
9572
9719
  suggest = None
9573
- if key == "emitCheckpointsEnabled":
9720
+ if key == "adminTimeoutMs":
9721
+ suggest = "admin_timeout_ms"
9722
+ elif key == "emitCheckpointsEnabled":
9574
9723
  suggest = "emit_checkpoints_enabled"
9575
9724
  elif key == "emitCheckpointsIntervalSeconds":
9576
9725
  suggest = "emit_checkpoints_interval_seconds"
@@ -9607,6 +9756,7 @@ class KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
9607
9756
  return super().get(key, default)
9608
9757
 
9609
9758
  def __init__(__self__, *,
9759
+ admin_timeout_ms: Optional[int] = None,
9610
9760
  emit_checkpoints_enabled: Optional[bool] = None,
9611
9761
  emit_checkpoints_interval_seconds: Optional[int] = None,
9612
9762
  groups: Optional[str] = None,
@@ -9621,6 +9771,7 @@ class KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
9621
9771
  sync_topic_configs_enabled: Optional[bool] = None,
9622
9772
  tasks_max_per_cpu: Optional[int] = None):
9623
9773
  """
9774
+ :param int admin_timeout_ms: Timeout for administrative tasks, e.g. detecting new topics, loading of consumer group and offsets. Defaults to 60000 milliseconds (1 minute).
9624
9775
  :param bool emit_checkpoints_enabled: Whether to emit consumer group offset checkpoints to target cluster periodically (default: true).
9625
9776
  :param int emit_checkpoints_interval_seconds: Frequency at which consumer group offset checkpoints are emitted (default: 60, every minute). Example: `60`.
9626
9777
  :param str groups: Consumer groups to replicate. Supports comma-separated group IDs and regexes. Example: `.*`.
@@ -9635,6 +9786,8 @@ class KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
9635
9786
  :param bool sync_topic_configs_enabled: Whether to periodically configure remote topics to match their corresponding upstream topics.
9636
9787
  :param int tasks_max_per_cpu: `tasks.max` is set to this multiplied by the number of CPUs in the service. Default: `1`.
9637
9788
  """
9789
+ if admin_timeout_ms is not None:
9790
+ pulumi.set(__self__, "admin_timeout_ms", admin_timeout_ms)
9638
9791
  if emit_checkpoints_enabled is not None:
9639
9792
  pulumi.set(__self__, "emit_checkpoints_enabled", emit_checkpoints_enabled)
9640
9793
  if emit_checkpoints_interval_seconds is not None:
@@ -9662,6 +9815,14 @@ class KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
9662
9815
  if tasks_max_per_cpu is not None:
9663
9816
  pulumi.set(__self__, "tasks_max_per_cpu", tasks_max_per_cpu)
9664
9817
 
9818
+ @property
9819
+ @pulumi.getter(name="adminTimeoutMs")
9820
+ def admin_timeout_ms(self) -> Optional[int]:
9821
+ """
9822
+ Timeout for administrative tasks, e.g. detecting new topics, loading of consumer group and offsets. Defaults to 60000 milliseconds (1 minute).
9823
+ """
9824
+ return pulumi.get(self, "admin_timeout_ms")
9825
+
9665
9826
  @property
9666
9827
  @pulumi.getter(name="emitCheckpointsEnabled")
9667
9828
  def emit_checkpoints_enabled(self) -> Optional[bool]:
@@ -12714,6 +12875,8 @@ class MySqlMysqlUserConfigMigration(dict):
12714
12875
  suggest = None
12715
12876
  if key == "ignoreDbs":
12716
12877
  suggest = "ignore_dbs"
12878
+ elif key == "ignoreRoles":
12879
+ suggest = "ignore_roles"
12717
12880
 
12718
12881
  if suggest:
12719
12882
  pulumi.log.warn(f"Key '{key}' not found in MySqlMysqlUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -12731,6 +12894,7 @@ class MySqlMysqlUserConfigMigration(dict):
12731
12894
  port: int,
12732
12895
  dbname: Optional[str] = None,
12733
12896
  ignore_dbs: Optional[str] = None,
12897
+ ignore_roles: Optional[str] = None,
12734
12898
  method: Optional[str] = None,
12735
12899
  password: Optional[str] = None,
12736
12900
  ssl: Optional[bool] = None,
@@ -12740,6 +12904,7 @@ class MySqlMysqlUserConfigMigration(dict):
12740
12904
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
12741
12905
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
12742
12906
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
12907
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
12743
12908
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
12744
12909
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
12745
12910
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -12751,6 +12916,8 @@ class MySqlMysqlUserConfigMigration(dict):
12751
12916
  pulumi.set(__self__, "dbname", dbname)
12752
12917
  if ignore_dbs is not None:
12753
12918
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
12919
+ if ignore_roles is not None:
12920
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
12754
12921
  if method is not None:
12755
12922
  pulumi.set(__self__, "method", method)
12756
12923
  if password is not None:
@@ -12792,6 +12959,14 @@ class MySqlMysqlUserConfigMigration(dict):
12792
12959
  """
12793
12960
  return pulumi.get(self, "ignore_dbs")
12794
12961
 
12962
+ @property
12963
+ @pulumi.getter(name="ignoreRoles")
12964
+ def ignore_roles(self) -> Optional[str]:
12965
+ """
12966
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
12967
+ """
12968
+ return pulumi.get(self, "ignore_roles")
12969
+
12795
12970
  @property
12796
12971
  @pulumi.getter
12797
12972
  def method(self) -> Optional[str]:
@@ -13706,12 +13881,18 @@ class OpenSearchOpensearchUserConfig(dict):
13706
13881
  suggest = None
13707
13882
  if key == "additionalBackupRegions":
13708
13883
  suggest = "additional_backup_regions"
13884
+ elif key == "azureMigration":
13885
+ suggest = "azure_migration"
13709
13886
  elif key == "customDomain":
13710
13887
  suggest = "custom_domain"
13711
13888
  elif key == "disableReplicationFactorAdjustment":
13712
13889
  suggest = "disable_replication_factor_adjustment"
13890
+ elif key == "gcsMigration":
13891
+ suggest = "gcs_migration"
13713
13892
  elif key == "indexPatterns":
13714
13893
  suggest = "index_patterns"
13894
+ elif key == "indexRollup":
13895
+ suggest = "index_rollup"
13715
13896
  elif key == "indexTemplate":
13716
13897
  suggest = "index_template"
13717
13898
  elif key == "ipFilterObjects":
@@ -13738,6 +13919,8 @@ class OpenSearchOpensearchUserConfig(dict):
13738
13919
  suggest = "public_access"
13739
13920
  elif key == "recoveryBasebackupName":
13740
13921
  suggest = "recovery_basebackup_name"
13922
+ elif key == "s3Migration":
13923
+ suggest = "s3_migration"
13741
13924
  elif key == "serviceLog":
13742
13925
  suggest = "service_log"
13743
13926
  elif key == "serviceToForkFrom":
@@ -13758,9 +13941,12 @@ class OpenSearchOpensearchUserConfig(dict):
13758
13941
 
13759
13942
  def __init__(__self__, *,
13760
13943
  additional_backup_regions: Optional[str] = None,
13944
+ azure_migration: Optional['outputs.OpenSearchOpensearchUserConfigAzureMigration'] = None,
13761
13945
  custom_domain: Optional[str] = None,
13762
13946
  disable_replication_factor_adjustment: Optional[bool] = None,
13947
+ gcs_migration: Optional['outputs.OpenSearchOpensearchUserConfigGcsMigration'] = None,
13763
13948
  index_patterns: Optional[Sequence['outputs.OpenSearchOpensearchUserConfigIndexPattern']] = None,
13949
+ index_rollup: Optional['outputs.OpenSearchOpensearchUserConfigIndexRollup'] = None,
13764
13950
  index_template: Optional['outputs.OpenSearchOpensearchUserConfigIndexTemplate'] = None,
13765
13951
  ip_filter_objects: Optional[Sequence['outputs.OpenSearchOpensearchUserConfigIpFilterObject']] = None,
13766
13952
  ip_filter_strings: Optional[Sequence[str]] = None,
@@ -13776,6 +13962,7 @@ class OpenSearchOpensearchUserConfig(dict):
13776
13962
  project_to_fork_from: Optional[str] = None,
13777
13963
  public_access: Optional['outputs.OpenSearchOpensearchUserConfigPublicAccess'] = None,
13778
13964
  recovery_basebackup_name: Optional[str] = None,
13965
+ s3_migration: Optional['outputs.OpenSearchOpensearchUserConfigS3Migration'] = None,
13779
13966
  saml: Optional['outputs.OpenSearchOpensearchUserConfigSaml'] = None,
13780
13967
  service_log: Optional[bool] = None,
13781
13968
  service_to_fork_from: Optional[str] = None,
@@ -13785,6 +13972,7 @@ class OpenSearchOpensearchUserConfig(dict):
13785
13972
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
13786
13973
  :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
13787
13974
  :param Sequence['OpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
13975
+ :param 'OpenSearchOpensearchUserConfigIndexRollupArgs' index_rollup: Index rollup settings
13788
13976
  :param 'OpenSearchOpensearchUserConfigIndexTemplateArgs' index_template: Template settings for all new indexes
13789
13977
  :param Sequence['OpenSearchOpensearchUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
13790
13978
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -13807,12 +13995,18 @@ class OpenSearchOpensearchUserConfig(dict):
13807
13995
  """
13808
13996
  if additional_backup_regions is not None:
13809
13997
  pulumi.set(__self__, "additional_backup_regions", additional_backup_regions)
13998
+ if azure_migration is not None:
13999
+ pulumi.set(__self__, "azure_migration", azure_migration)
13810
14000
  if custom_domain is not None:
13811
14001
  pulumi.set(__self__, "custom_domain", custom_domain)
13812
14002
  if disable_replication_factor_adjustment is not None:
13813
14003
  pulumi.set(__self__, "disable_replication_factor_adjustment", disable_replication_factor_adjustment)
14004
+ if gcs_migration is not None:
14005
+ pulumi.set(__self__, "gcs_migration", gcs_migration)
13814
14006
  if index_patterns is not None:
13815
14007
  pulumi.set(__self__, "index_patterns", index_patterns)
14008
+ if index_rollup is not None:
14009
+ pulumi.set(__self__, "index_rollup", index_rollup)
13816
14010
  if index_template is not None:
13817
14011
  pulumi.set(__self__, "index_template", index_template)
13818
14012
  if ip_filter_objects is not None:
@@ -13843,6 +14037,8 @@ class OpenSearchOpensearchUserConfig(dict):
13843
14037
  pulumi.set(__self__, "public_access", public_access)
13844
14038
  if recovery_basebackup_name is not None:
13845
14039
  pulumi.set(__self__, "recovery_basebackup_name", recovery_basebackup_name)
14040
+ if s3_migration is not None:
14041
+ pulumi.set(__self__, "s3_migration", s3_migration)
13846
14042
  if saml is not None:
13847
14043
  pulumi.set(__self__, "saml", saml)
13848
14044
  if service_log is not None:
@@ -13860,6 +14056,11 @@ class OpenSearchOpensearchUserConfig(dict):
13860
14056
  """
13861
14057
  return pulumi.get(self, "additional_backup_regions")
13862
14058
 
14059
+ @property
14060
+ @pulumi.getter(name="azureMigration")
14061
+ def azure_migration(self) -> Optional['outputs.OpenSearchOpensearchUserConfigAzureMigration']:
14062
+ return pulumi.get(self, "azure_migration")
14063
+
13863
14064
  @property
13864
14065
  @pulumi.getter(name="customDomain")
13865
14066
  def custom_domain(self) -> Optional[str]:
@@ -13876,6 +14077,11 @@ class OpenSearchOpensearchUserConfig(dict):
13876
14077
  """
13877
14078
  return pulumi.get(self, "disable_replication_factor_adjustment")
13878
14079
 
14080
+ @property
14081
+ @pulumi.getter(name="gcsMigration")
14082
+ def gcs_migration(self) -> Optional['outputs.OpenSearchOpensearchUserConfigGcsMigration']:
14083
+ return pulumi.get(self, "gcs_migration")
14084
+
13879
14085
  @property
13880
14086
  @pulumi.getter(name="indexPatterns")
13881
14087
  def index_patterns(self) -> Optional[Sequence['outputs.OpenSearchOpensearchUserConfigIndexPattern']]:
@@ -13884,6 +14090,14 @@ class OpenSearchOpensearchUserConfig(dict):
13884
14090
  """
13885
14091
  return pulumi.get(self, "index_patterns")
13886
14092
 
14093
+ @property
14094
+ @pulumi.getter(name="indexRollup")
14095
+ def index_rollup(self) -> Optional['outputs.OpenSearchOpensearchUserConfigIndexRollup']:
14096
+ """
14097
+ Index rollup settings
14098
+ """
14099
+ return pulumi.get(self, "index_rollup")
14100
+
13887
14101
  @property
13888
14102
  @pulumi.getter(name="indexTemplate")
13889
14103
  def index_template(self) -> Optional['outputs.OpenSearchOpensearchUserConfigIndexTemplate']:
@@ -14005,6 +14219,11 @@ class OpenSearchOpensearchUserConfig(dict):
14005
14219
  """
14006
14220
  return pulumi.get(self, "recovery_basebackup_name")
14007
14221
 
14222
+ @property
14223
+ @pulumi.getter(name="s3Migration")
14224
+ def s3_migration(self) -> Optional['outputs.OpenSearchOpensearchUserConfigS3Migration']:
14225
+ return pulumi.get(self, "s3_migration")
14226
+
14008
14227
  @property
14009
14228
  @pulumi.getter
14010
14229
  def saml(self) -> Optional['outputs.OpenSearchOpensearchUserConfigSaml']:
@@ -14038,6 +14257,238 @@ class OpenSearchOpensearchUserConfig(dict):
14038
14257
  return pulumi.get(self, "static_ips")
14039
14258
 
14040
14259
 
14260
+ @pulumi.output_type
14261
+ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14262
+ @staticmethod
14263
+ def __key_warning(key: str):
14264
+ suggest = None
14265
+ if key == "basePath":
14266
+ suggest = "base_path"
14267
+ elif key == "snapshotName":
14268
+ suggest = "snapshot_name"
14269
+ elif key == "chunkSize":
14270
+ suggest = "chunk_size"
14271
+ elif key == "endpointSuffix":
14272
+ suggest = "endpoint_suffix"
14273
+ elif key == "sasToken":
14274
+ suggest = "sas_token"
14275
+
14276
+ if suggest:
14277
+ pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigAzureMigration. Access the value via the '{suggest}' property getter instead.")
14278
+
14279
+ def __getitem__(self, key: str) -> Any:
14280
+ OpenSearchOpensearchUserConfigAzureMigration.__key_warning(key)
14281
+ return super().__getitem__(key)
14282
+
14283
+ def get(self, key: str, default = None) -> Any:
14284
+ OpenSearchOpensearchUserConfigAzureMigration.__key_warning(key)
14285
+ return super().get(key, default)
14286
+
14287
+ def __init__(__self__, *,
14288
+ account: str,
14289
+ base_path: str,
14290
+ container: str,
14291
+ snapshot_name: str,
14292
+ chunk_size: Optional[str] = None,
14293
+ compress: Optional[bool] = None,
14294
+ endpoint_suffix: Optional[str] = None,
14295
+ key: Optional[str] = None,
14296
+ sas_token: Optional[str] = None):
14297
+ """
14298
+ :param str account: Azure account name.
14299
+ :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
14300
+ :param str container: Azure container name.
14301
+ :param str snapshot_name: The snapshot name to restore from.
14302
+ :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
14303
+ :param bool compress: When set to true metadata files are stored in compressed format.
14304
+ :param str endpoint_suffix: Defines the DNS suffix for Azure Storage endpoints.
14305
+ :param str key: Azure account secret key. One of key or sas_token should be specified.
14306
+ :param str sas_token: A shared access signatures (SAS) token. One of key or sas_token should be specified.
14307
+ """
14308
+ pulumi.set(__self__, "account", account)
14309
+ pulumi.set(__self__, "base_path", base_path)
14310
+ pulumi.set(__self__, "container", container)
14311
+ pulumi.set(__self__, "snapshot_name", snapshot_name)
14312
+ if chunk_size is not None:
14313
+ pulumi.set(__self__, "chunk_size", chunk_size)
14314
+ if compress is not None:
14315
+ pulumi.set(__self__, "compress", compress)
14316
+ if endpoint_suffix is not None:
14317
+ pulumi.set(__self__, "endpoint_suffix", endpoint_suffix)
14318
+ if key is not None:
14319
+ pulumi.set(__self__, "key", key)
14320
+ if sas_token is not None:
14321
+ pulumi.set(__self__, "sas_token", sas_token)
14322
+
14323
+ @property
14324
+ @pulumi.getter
14325
+ def account(self) -> str:
14326
+ """
14327
+ Azure account name.
14328
+ """
14329
+ return pulumi.get(self, "account")
14330
+
14331
+ @property
14332
+ @pulumi.getter(name="basePath")
14333
+ def base_path(self) -> str:
14334
+ """
14335
+ The path to the repository data within its container. The value of this setting should not start or end with a /.
14336
+ """
14337
+ return pulumi.get(self, "base_path")
14338
+
14339
+ @property
14340
+ @pulumi.getter
14341
+ def container(self) -> str:
14342
+ """
14343
+ Azure container name.
14344
+ """
14345
+ return pulumi.get(self, "container")
14346
+
14347
+ @property
14348
+ @pulumi.getter(name="snapshotName")
14349
+ def snapshot_name(self) -> str:
14350
+ """
14351
+ The snapshot name to restore from.
14352
+ """
14353
+ return pulumi.get(self, "snapshot_name")
14354
+
14355
+ @property
14356
+ @pulumi.getter(name="chunkSize")
14357
+ def chunk_size(self) -> Optional[str]:
14358
+ """
14359
+ Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
14360
+ """
14361
+ return pulumi.get(self, "chunk_size")
14362
+
14363
+ @property
14364
+ @pulumi.getter
14365
+ def compress(self) -> Optional[bool]:
14366
+ """
14367
+ When set to true metadata files are stored in compressed format.
14368
+ """
14369
+ return pulumi.get(self, "compress")
14370
+
14371
+ @property
14372
+ @pulumi.getter(name="endpointSuffix")
14373
+ def endpoint_suffix(self) -> Optional[str]:
14374
+ """
14375
+ Defines the DNS suffix for Azure Storage endpoints.
14376
+ """
14377
+ return pulumi.get(self, "endpoint_suffix")
14378
+
14379
+ @property
14380
+ @pulumi.getter
14381
+ def key(self) -> Optional[str]:
14382
+ """
14383
+ Azure account secret key. One of key or sas_token should be specified.
14384
+ """
14385
+ return pulumi.get(self, "key")
14386
+
14387
+ @property
14388
+ @pulumi.getter(name="sasToken")
14389
+ def sas_token(self) -> Optional[str]:
14390
+ """
14391
+ A shared access signatures (SAS) token. One of key or sas_token should be specified.
14392
+ """
14393
+ return pulumi.get(self, "sas_token")
14394
+
14395
+
14396
+ @pulumi.output_type
14397
+ class OpenSearchOpensearchUserConfigGcsMigration(dict):
14398
+ @staticmethod
14399
+ def __key_warning(key: str):
14400
+ suggest = None
14401
+ if key == "basePath":
14402
+ suggest = "base_path"
14403
+ elif key == "snapshotName":
14404
+ suggest = "snapshot_name"
14405
+ elif key == "chunkSize":
14406
+ suggest = "chunk_size"
14407
+
14408
+ if suggest:
14409
+ pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigGcsMigration. Access the value via the '{suggest}' property getter instead.")
14410
+
14411
+ def __getitem__(self, key: str) -> Any:
14412
+ OpenSearchOpensearchUserConfigGcsMigration.__key_warning(key)
14413
+ return super().__getitem__(key)
14414
+
14415
+ def get(self, key: str, default = None) -> Any:
14416
+ OpenSearchOpensearchUserConfigGcsMigration.__key_warning(key)
14417
+ return super().get(key, default)
14418
+
14419
+ def __init__(__self__, *,
14420
+ base_path: str,
14421
+ bucket: str,
14422
+ credentials: str,
14423
+ snapshot_name: str,
14424
+ chunk_size: Optional[str] = None,
14425
+ compress: Optional[bool] = None):
14426
+ """
14427
+ :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
14428
+ :param str bucket: The path to the repository data within its container.
14429
+ :param str credentials: Google Cloud Storage credentials file content.
14430
+ :param str snapshot_name: The snapshot name to restore from.
14431
+ :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
14432
+ :param bool compress: When set to true metadata files are stored in compressed format.
14433
+ """
14434
+ pulumi.set(__self__, "base_path", base_path)
14435
+ pulumi.set(__self__, "bucket", bucket)
14436
+ pulumi.set(__self__, "credentials", credentials)
14437
+ pulumi.set(__self__, "snapshot_name", snapshot_name)
14438
+ if chunk_size is not None:
14439
+ pulumi.set(__self__, "chunk_size", chunk_size)
14440
+ if compress is not None:
14441
+ pulumi.set(__self__, "compress", compress)
14442
+
14443
+ @property
14444
+ @pulumi.getter(name="basePath")
14445
+ def base_path(self) -> str:
14446
+ """
14447
+ The path to the repository data within its container. The value of this setting should not start or end with a /.
14448
+ """
14449
+ return pulumi.get(self, "base_path")
14450
+
14451
+ @property
14452
+ @pulumi.getter
14453
+ def bucket(self) -> str:
14454
+ """
14455
+ The path to the repository data within its container.
14456
+ """
14457
+ return pulumi.get(self, "bucket")
14458
+
14459
+ @property
14460
+ @pulumi.getter
14461
+ def credentials(self) -> str:
14462
+ """
14463
+ Google Cloud Storage credentials file content.
14464
+ """
14465
+ return pulumi.get(self, "credentials")
14466
+
14467
+ @property
14468
+ @pulumi.getter(name="snapshotName")
14469
+ def snapshot_name(self) -> str:
14470
+ """
14471
+ The snapshot name to restore from.
14472
+ """
14473
+ return pulumi.get(self, "snapshot_name")
14474
+
14475
+ @property
14476
+ @pulumi.getter(name="chunkSize")
14477
+ def chunk_size(self) -> Optional[str]:
14478
+ """
14479
+ Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
14480
+ """
14481
+ return pulumi.get(self, "chunk_size")
14482
+
14483
+ @property
14484
+ @pulumi.getter
14485
+ def compress(self) -> Optional[bool]:
14486
+ """
14487
+ When set to true metadata files are stored in compressed format.
14488
+ """
14489
+ return pulumi.get(self, "compress")
14490
+
14491
+
14041
14492
  @pulumi.output_type
14042
14493
  class OpenSearchOpensearchUserConfigIndexPattern(dict):
14043
14494
  @staticmethod
@@ -14098,6 +14549,98 @@ class OpenSearchOpensearchUserConfigIndexPattern(dict):
14098
14549
  return pulumi.get(self, "sorting_algorithm")
14099
14550
 
14100
14551
 
14552
+ @pulumi.output_type
14553
+ class OpenSearchOpensearchUserConfigIndexRollup(dict):
14554
+ @staticmethod
14555
+ def __key_warning(key: str):
14556
+ suggest = None
14557
+ if key == "rollupDashboardsEnabled":
14558
+ suggest = "rollup_dashboards_enabled"
14559
+ elif key == "rollupEnabled":
14560
+ suggest = "rollup_enabled"
14561
+ elif key == "rollupSearchBackoffCount":
14562
+ suggest = "rollup_search_backoff_count"
14563
+ elif key == "rollupSearchBackoffMillis":
14564
+ suggest = "rollup_search_backoff_millis"
14565
+ elif key == "rollupSearchSearchAllJobs":
14566
+ suggest = "rollup_search_search_all_jobs"
14567
+
14568
+ if suggest:
14569
+ pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigIndexRollup. Access the value via the '{suggest}' property getter instead.")
14570
+
14571
+ def __getitem__(self, key: str) -> Any:
14572
+ OpenSearchOpensearchUserConfigIndexRollup.__key_warning(key)
14573
+ return super().__getitem__(key)
14574
+
14575
+ def get(self, key: str, default = None) -> Any:
14576
+ OpenSearchOpensearchUserConfigIndexRollup.__key_warning(key)
14577
+ return super().get(key, default)
14578
+
14579
+ def __init__(__self__, *,
14580
+ rollup_dashboards_enabled: Optional[bool] = None,
14581
+ rollup_enabled: Optional[bool] = None,
14582
+ rollup_search_backoff_count: Optional[int] = None,
14583
+ rollup_search_backoff_millis: Optional[int] = None,
14584
+ rollup_search_search_all_jobs: Optional[bool] = None):
14585
+ """
14586
+ :param bool rollup_dashboards_enabled: Whether rollups are enabled in OpenSearch Dashboards. Defaults to true.
14587
+ :param bool rollup_enabled: Whether the rollup plugin is enabled. Defaults to true.
14588
+ :param int rollup_search_backoff_count: How many retries the plugin should attempt for failed rollup jobs. Defaults to 5.
14589
+ :param int rollup_search_backoff_millis: The backoff time between retries for failed rollup jobs. Defaults to 1000ms.
14590
+ :param bool rollup_search_search_all_jobs: Whether OpenSearch should return all jobs that match all specified search terms. If disabled, OpenSearch returns just one, as opposed to all, of the jobs that matches the search terms. Defaults to false.
14591
+ """
14592
+ if rollup_dashboards_enabled is not None:
14593
+ pulumi.set(__self__, "rollup_dashboards_enabled", rollup_dashboards_enabled)
14594
+ if rollup_enabled is not None:
14595
+ pulumi.set(__self__, "rollup_enabled", rollup_enabled)
14596
+ if rollup_search_backoff_count is not None:
14597
+ pulumi.set(__self__, "rollup_search_backoff_count", rollup_search_backoff_count)
14598
+ if rollup_search_backoff_millis is not None:
14599
+ pulumi.set(__self__, "rollup_search_backoff_millis", rollup_search_backoff_millis)
14600
+ if rollup_search_search_all_jobs is not None:
14601
+ pulumi.set(__self__, "rollup_search_search_all_jobs", rollup_search_search_all_jobs)
14602
+
14603
+ @property
14604
+ @pulumi.getter(name="rollupDashboardsEnabled")
14605
+ def rollup_dashboards_enabled(self) -> Optional[bool]:
14606
+ """
14607
+ Whether rollups are enabled in OpenSearch Dashboards. Defaults to true.
14608
+ """
14609
+ return pulumi.get(self, "rollup_dashboards_enabled")
14610
+
14611
+ @property
14612
+ @pulumi.getter(name="rollupEnabled")
14613
+ def rollup_enabled(self) -> Optional[bool]:
14614
+ """
14615
+ Whether the rollup plugin is enabled. Defaults to true.
14616
+ """
14617
+ return pulumi.get(self, "rollup_enabled")
14618
+
14619
+ @property
14620
+ @pulumi.getter(name="rollupSearchBackoffCount")
14621
+ def rollup_search_backoff_count(self) -> Optional[int]:
14622
+ """
14623
+ How many retries the plugin should attempt for failed rollup jobs. Defaults to 5.
14624
+ """
14625
+ return pulumi.get(self, "rollup_search_backoff_count")
14626
+
14627
+ @property
14628
+ @pulumi.getter(name="rollupSearchBackoffMillis")
14629
+ def rollup_search_backoff_millis(self) -> Optional[int]:
14630
+ """
14631
+ The backoff time between retries for failed rollup jobs. Defaults to 1000ms.
14632
+ """
14633
+ return pulumi.get(self, "rollup_search_backoff_millis")
14634
+
14635
+ @property
14636
+ @pulumi.getter(name="rollupSearchSearchAllJobs")
14637
+ def rollup_search_search_all_jobs(self) -> Optional[bool]:
14638
+ """
14639
+ Whether OpenSearch should return all jobs that match all specified search terms. If disabled, OpenSearch returns just one, as opposed to all, of the jobs that matches the search terms. Defaults to false.
14640
+ """
14641
+ return pulumi.get(self, "rollup_search_search_all_jobs")
14642
+
14643
+
14101
14644
  @pulumi.output_type
14102
14645
  class OpenSearchOpensearchUserConfigIndexTemplate(dict):
14103
14646
  @staticmethod
@@ -14241,8 +14784,8 @@ class OpenSearchOpensearchUserConfigOpenid(dict):
14241
14784
  scope: Optional[str] = None,
14242
14785
  subject_key: Optional[str] = None):
14243
14786
  """
14244
- :param str client_id: The ID of the OpenID Connect client configured in your IdP. Required. Example: ``.
14245
- :param str client_secret: The client secret of the OpenID Connect client configured in your IdP. Required. Example: ``.
14787
+ :param str client_id: The ID of the OpenID Connect client configured in your IdP. Required.
14788
+ :param str client_secret: The client secret of the OpenID Connect client configured in your IdP. Required.
14246
14789
  :param str connect_url: The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings. Example: `https://test-account.okta.com/app/exk491jujcVc83LEX697/sso/saml/metadata`.
14247
14790
  :param bool enabled: Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. Default: `true`.
14248
14791
  :param str header: HTTP header name of the JWT token. Optional. Default is Authorization. Default: `Authorization`.
@@ -14251,7 +14794,7 @@ class OpenSearchOpensearchUserConfigOpenid(dict):
14251
14794
  :param int refresh_rate_limit_count: The maximum number of unknown key IDs in the time frame. Default is 10. Optional. Default: `10`.
14252
14795
  :param int refresh_rate_limit_time_window_ms: The time frame to use when checking the maximum number of unknown key IDs, in milliseconds. Optional.Default is 10000 (10 seconds). Default: `10000`.
14253
14796
  :param str roles_key: The key in the JSON payload that stores the user’s roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT. Example: `roles`.
14254
- :param str scope: The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone. Example: ``.
14797
+ :param str scope: The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone.
14255
14798
  :param str subject_key: The key in the JSON payload that stores the user’s name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional. Example: `preferred_username`.
14256
14799
  """
14257
14800
  pulumi.set(__self__, "client_id", client_id)
@@ -14279,7 +14822,7 @@ class OpenSearchOpensearchUserConfigOpenid(dict):
14279
14822
  @pulumi.getter(name="clientId")
14280
14823
  def client_id(self) -> str:
14281
14824
  """
14282
- The ID of the OpenID Connect client configured in your IdP. Required. Example: ``.
14825
+ The ID of the OpenID Connect client configured in your IdP. Required.
14283
14826
  """
14284
14827
  return pulumi.get(self, "client_id")
14285
14828
 
@@ -14287,7 +14830,7 @@ class OpenSearchOpensearchUserConfigOpenid(dict):
14287
14830
  @pulumi.getter(name="clientSecret")
14288
14831
  def client_secret(self) -> str:
14289
14832
  """
14290
- The client secret of the OpenID Connect client configured in your IdP. Required. Example: ``.
14833
+ The client secret of the OpenID Connect client configured in your IdP. Required.
14291
14834
  """
14292
14835
  return pulumi.get(self, "client_secret")
14293
14836
 
@@ -14359,7 +14902,7 @@ class OpenSearchOpensearchUserConfigOpenid(dict):
14359
14902
  @pulumi.getter
14360
14903
  def scope(self) -> Optional[str]:
14361
14904
  """
14362
- The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone. Example: ``.
14905
+ The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone.
14363
14906
  """
14364
14907
  return pulumi.get(self, "scope")
14365
14908
 
@@ -14531,7 +15074,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14531
15074
  :param str email_sender_name: Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore. Example: `alert-sender`.
14532
15075
  :param str email_sender_password: Sender password for Opensearch alerts to authenticate with SMTP server. Example: `very-secure-mail-password`.
14533
15076
  :param str email_sender_username: Sender username for Opensearch alerts. Example: `jane@example.com`.
14534
- :param bool enable_security_audit: Enable/Disable security audit. Default: `false`.
15077
+ :param bool enable_security_audit: Enable/Disable security audit.
14535
15078
  :param int http_max_content_length: Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.
14536
15079
  :param int http_max_header_size: The max size of allowed headers, in bytes. Example: `8192`.
14537
15080
  :param int http_max_initial_line_length: The max length of an HTTP URL, in bytes. Example: `4096`.
@@ -14543,14 +15086,14 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14543
15086
  :param int indices_query_bool_max_clause_count: Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.
14544
15087
  :param int indices_recovery_max_bytes_per_sec: Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.
14545
15088
  :param int indices_recovery_max_concurrent_file_chunks: Number of file chunks sent in parallel for each recovery. Defaults to 2.
14546
- :param bool ism_enabled: Specifies whether ISM is enabled or not. Default: `true`.
14547
- :param bool ism_history_enabled: Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. Default: `true`.
14548
- :param int ism_history_max_age: The maximum age before rolling over the audit history index in hours. Default: `24`.
14549
- :param int ism_history_max_docs: The maximum number of documents before rolling over the audit history index. Default: `2500000`.
14550
- :param int ism_history_rollover_check_period: The time between rollover checks for the audit history index in hours. Default: `8`.
14551
- :param int ism_history_rollover_retention_period: How long audit history indices are kept in days. Default: `30`.
14552
- :param bool knn_memory_circuit_breaker_enabled: Enable or disable KNN memory circuit breaker. Defaults to true. Default: `true`.
14553
- :param int knn_memory_circuit_breaker_limit: Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size. Default: `50`.
15089
+ :param bool ism_enabled: Specifies whether ISM is enabled or not.
15090
+ :param bool ism_history_enabled: Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document.
15091
+ :param int ism_history_max_age: The maximum age before rolling over the audit history index in hours. Example: `24`.
15092
+ :param int ism_history_max_docs: The maximum number of documents before rolling over the audit history index. Example: `2500000`.
15093
+ :param int ism_history_rollover_check_period: The time between rollover checks for the audit history index in hours. Example: `8`.
15094
+ :param int ism_history_rollover_retention_period: How long audit history indices are kept in days. Example: `30`.
15095
+ :param bool knn_memory_circuit_breaker_enabled: Enable or disable KNN memory circuit breaker. Defaults to true.
15096
+ :param int knn_memory_circuit_breaker_limit: Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
14554
15097
  :param bool override_main_response_version: Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.
14555
15098
  :param bool plugins_alerting_filter_by_backend_roles: Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false.
14556
15099
  :param Sequence[str] reindex_remote_whitelists: Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.
@@ -14725,7 +15268,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14725
15268
  @pulumi.getter(name="enableSecurityAudit")
14726
15269
  def enable_security_audit(self) -> Optional[bool]:
14727
15270
  """
14728
- Enable/Disable security audit. Default: `false`.
15271
+ Enable/Disable security audit.
14729
15272
  """
14730
15273
  return pulumi.get(self, "enable_security_audit")
14731
15274
 
@@ -14821,7 +15364,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14821
15364
  @pulumi.getter(name="ismEnabled")
14822
15365
  def ism_enabled(self) -> Optional[bool]:
14823
15366
  """
14824
- Specifies whether ISM is enabled or not. Default: `true`.
15367
+ Specifies whether ISM is enabled or not.
14825
15368
  """
14826
15369
  return pulumi.get(self, "ism_enabled")
14827
15370
 
@@ -14829,7 +15372,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14829
15372
  @pulumi.getter(name="ismHistoryEnabled")
14830
15373
  def ism_history_enabled(self) -> Optional[bool]:
14831
15374
  """
14832
- Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. Default: `true`.
15375
+ Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document.
14833
15376
  """
14834
15377
  return pulumi.get(self, "ism_history_enabled")
14835
15378
 
@@ -14837,7 +15380,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14837
15380
  @pulumi.getter(name="ismHistoryMaxAge")
14838
15381
  def ism_history_max_age(self) -> Optional[int]:
14839
15382
  """
14840
- The maximum age before rolling over the audit history index in hours. Default: `24`.
15383
+ The maximum age before rolling over the audit history index in hours. Example: `24`.
14841
15384
  """
14842
15385
  return pulumi.get(self, "ism_history_max_age")
14843
15386
 
@@ -14845,7 +15388,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14845
15388
  @pulumi.getter(name="ismHistoryMaxDocs")
14846
15389
  def ism_history_max_docs(self) -> Optional[int]:
14847
15390
  """
14848
- The maximum number of documents before rolling over the audit history index. Default: `2500000`.
15391
+ The maximum number of documents before rolling over the audit history index. Example: `2500000`.
14849
15392
  """
14850
15393
  return pulumi.get(self, "ism_history_max_docs")
14851
15394
 
@@ -14853,7 +15396,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14853
15396
  @pulumi.getter(name="ismHistoryRolloverCheckPeriod")
14854
15397
  def ism_history_rollover_check_period(self) -> Optional[int]:
14855
15398
  """
14856
- The time between rollover checks for the audit history index in hours. Default: `8`.
15399
+ The time between rollover checks for the audit history index in hours. Example: `8`.
14857
15400
  """
14858
15401
  return pulumi.get(self, "ism_history_rollover_check_period")
14859
15402
 
@@ -14861,7 +15404,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14861
15404
  @pulumi.getter(name="ismHistoryRolloverRetentionPeriod")
14862
15405
  def ism_history_rollover_retention_period(self) -> Optional[int]:
14863
15406
  """
14864
- How long audit history indices are kept in days. Default: `30`.
15407
+ How long audit history indices are kept in days. Example: `30`.
14865
15408
  """
14866
15409
  return pulumi.get(self, "ism_history_rollover_retention_period")
14867
15410
 
@@ -14869,7 +15412,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14869
15412
  @pulumi.getter(name="knnMemoryCircuitBreakerEnabled")
14870
15413
  def knn_memory_circuit_breaker_enabled(self) -> Optional[bool]:
14871
15414
  """
14872
- Enable or disable KNN memory circuit breaker. Defaults to true. Default: `true`.
15415
+ Enable or disable KNN memory circuit breaker. Defaults to true.
14873
15416
  """
14874
15417
  return pulumi.get(self, "knn_memory_circuit_breaker_enabled")
14875
15418
 
@@ -14877,7 +15420,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
14877
15420
  @pulumi.getter(name="knnMemoryCircuitBreakerLimit")
14878
15421
  def knn_memory_circuit_breaker_limit(self) -> Optional[int]:
14879
15422
  """
14880
- Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size. Default: `50`.
15423
+ Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
14881
15424
  """
14882
15425
  return pulumi.get(self, "knn_memory_circuit_breaker_limit")
14883
15426
 
@@ -15520,6 +16063,154 @@ class OpenSearchOpensearchUserConfigPublicAccess(dict):
15520
16063
  return pulumi.get(self, "prometheus")
15521
16064
 
15522
16065
 
16066
+ @pulumi.output_type
16067
+ class OpenSearchOpensearchUserConfigS3Migration(dict):
16068
+ @staticmethod
16069
+ def __key_warning(key: str):
16070
+ suggest = None
16071
+ if key == "accessKey":
16072
+ suggest = "access_key"
16073
+ elif key == "basePath":
16074
+ suggest = "base_path"
16075
+ elif key == "secretKey":
16076
+ suggest = "secret_key"
16077
+ elif key == "snapshotName":
16078
+ suggest = "snapshot_name"
16079
+ elif key == "chunkSize":
16080
+ suggest = "chunk_size"
16081
+ elif key == "serverSideEncryption":
16082
+ suggest = "server_side_encryption"
16083
+
16084
+ if suggest:
16085
+ pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigS3Migration. Access the value via the '{suggest}' property getter instead.")
16086
+
16087
+ def __getitem__(self, key: str) -> Any:
16088
+ OpenSearchOpensearchUserConfigS3Migration.__key_warning(key)
16089
+ return super().__getitem__(key)
16090
+
16091
+ def get(self, key: str, default = None) -> Any:
16092
+ OpenSearchOpensearchUserConfigS3Migration.__key_warning(key)
16093
+ return super().get(key, default)
16094
+
16095
+ def __init__(__self__, *,
16096
+ access_key: str,
16097
+ base_path: str,
16098
+ bucket: str,
16099
+ region: str,
16100
+ secret_key: str,
16101
+ snapshot_name: str,
16102
+ chunk_size: Optional[str] = None,
16103
+ compress: Optional[bool] = None,
16104
+ endpoint: Optional[str] = None,
16105
+ server_side_encryption: Optional[bool] = None):
16106
+ """
16107
+ :param str access_key: AWS Access key.
16108
+ :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
16109
+ :param str bucket: S3 bucket name.
16110
+ :param str region: S3 region.
16111
+ :param str secret_key: AWS secret key.
16112
+ :param str snapshot_name: The snapshot name to restore from.
16113
+ :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
16114
+ :param bool compress: When set to true metadata files are stored in compressed format.
16115
+ :param str endpoint: The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
16116
+ :param bool server_side_encryption: When set to true files are encrypted on server side.
16117
+ """
16118
+ pulumi.set(__self__, "access_key", access_key)
16119
+ pulumi.set(__self__, "base_path", base_path)
16120
+ pulumi.set(__self__, "bucket", bucket)
16121
+ pulumi.set(__self__, "region", region)
16122
+ pulumi.set(__self__, "secret_key", secret_key)
16123
+ pulumi.set(__self__, "snapshot_name", snapshot_name)
16124
+ if chunk_size is not None:
16125
+ pulumi.set(__self__, "chunk_size", chunk_size)
16126
+ if compress is not None:
16127
+ pulumi.set(__self__, "compress", compress)
16128
+ if endpoint is not None:
16129
+ pulumi.set(__self__, "endpoint", endpoint)
16130
+ if server_side_encryption is not None:
16131
+ pulumi.set(__self__, "server_side_encryption", server_side_encryption)
16132
+
16133
+ @property
16134
+ @pulumi.getter(name="accessKey")
16135
+ def access_key(self) -> str:
16136
+ """
16137
+ AWS Access key.
16138
+ """
16139
+ return pulumi.get(self, "access_key")
16140
+
16141
+ @property
16142
+ @pulumi.getter(name="basePath")
16143
+ def base_path(self) -> str:
16144
+ """
16145
+ The path to the repository data within its container. The value of this setting should not start or end with a /.
16146
+ """
16147
+ return pulumi.get(self, "base_path")
16148
+
16149
+ @property
16150
+ @pulumi.getter
16151
+ def bucket(self) -> str:
16152
+ """
16153
+ S3 bucket name.
16154
+ """
16155
+ return pulumi.get(self, "bucket")
16156
+
16157
+ @property
16158
+ @pulumi.getter
16159
+ def region(self) -> str:
16160
+ """
16161
+ S3 region.
16162
+ """
16163
+ return pulumi.get(self, "region")
16164
+
16165
+ @property
16166
+ @pulumi.getter(name="secretKey")
16167
+ def secret_key(self) -> str:
16168
+ """
16169
+ AWS secret key.
16170
+ """
16171
+ return pulumi.get(self, "secret_key")
16172
+
16173
+ @property
16174
+ @pulumi.getter(name="snapshotName")
16175
+ def snapshot_name(self) -> str:
16176
+ """
16177
+ The snapshot name to restore from.
16178
+ """
16179
+ return pulumi.get(self, "snapshot_name")
16180
+
16181
+ @property
16182
+ @pulumi.getter(name="chunkSize")
16183
+ def chunk_size(self) -> Optional[str]:
16184
+ """
16185
+ Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
16186
+ """
16187
+ return pulumi.get(self, "chunk_size")
16188
+
16189
+ @property
16190
+ @pulumi.getter
16191
+ def compress(self) -> Optional[bool]:
16192
+ """
16193
+ When set to true metadata files are stored in compressed format.
16194
+ """
16195
+ return pulumi.get(self, "compress")
16196
+
16197
+ @property
16198
+ @pulumi.getter
16199
+ def endpoint(self) -> Optional[str]:
16200
+ """
16201
+ The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
16202
+ """
16203
+ return pulumi.get(self, "endpoint")
16204
+
16205
+ @property
16206
+ @pulumi.getter(name="serverSideEncryption")
16207
+ def server_side_encryption(self) -> Optional[bool]:
16208
+ """
16209
+ When set to true files are encrypted on server side.
16210
+ """
16211
+ return pulumi.get(self, "server_side_encryption")
16212
+
16213
+
15523
16214
  @pulumi.output_type
15524
16215
  class OpenSearchOpensearchUserConfigSaml(dict):
15525
16216
  @staticmethod
@@ -16063,20 +16754,20 @@ class PgPg(dict):
16063
16754
  uris: Optional[Sequence[str]] = None,
16064
16755
  user: Optional[str] = None):
16065
16756
  """
16066
- :param str bouncer: Bouncer connection details
16067
- :param str dbname: Primary PostgreSQL database name
16068
- :param str host: PostgreSQL master node host IP or name
16069
- :param int max_connections: Connection limit
16070
- :param Sequence['PgPgParamArgs'] params: PostgreSQL connection parameters
16071
- :param str password: PostgreSQL admin user password
16072
- :param int port: PostgreSQL port
16073
- :param str replica_uri: PostgreSQL replica URI for services with a replica
16074
- :param str sslmode: PostgreSQL sslmode setting (currently always "require")
16075
- :param Sequence[str] standby_uris: PostgreSQL standby connection URIs
16076
- :param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs
16077
- :param str uri: PostgreSQL master connection URI
16078
- :param Sequence[str] uris: PostgreSQL master connection URIs
16079
- :param str user: PostgreSQL admin user name
16757
+ :param str bouncer: PgBouncer connection details for [connection pooling](https://aiven.io/docs/products/postgresql/concepts/pg-connection-pooling).
16758
+ :param str dbname: Primary PostgreSQL database name.
16759
+ :param str host: PostgreSQL primary node host IP or name.
16760
+ :param int max_connections: The [number of allowed connections](https://aiven.io/docs/products/postgresql/reference/pg-connection-limits). Varies based on the service plan.
16761
+ :param Sequence['PgPgParamArgs'] params: PostgreSQL connection parameters.
16762
+ :param str password: PostgreSQL admin user password.
16763
+ :param int port: PostgreSQL port.
16764
+ :param str replica_uri: PostgreSQL replica URI for services with a replica.
16765
+ :param str sslmode: PostgreSQL SSL mode setting.
16766
+ :param Sequence[str] standby_uris: PostgreSQL standby connection URIs.
16767
+ :param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs.
16768
+ :param str uri: PostgreSQL primary connection URI.
16769
+ :param Sequence[str] uris: PostgreSQL primary connection URIs.
16770
+ :param str user: PostgreSQL admin user name.
16080
16771
  """
16081
16772
  if bouncer is not None:
16082
16773
  pulumi.set(__self__, "bouncer", bouncer)
@@ -16111,7 +16802,7 @@ class PgPg(dict):
16111
16802
  @pulumi.getter
16112
16803
  def bouncer(self) -> Optional[str]:
16113
16804
  """
16114
- Bouncer connection details
16805
+ PgBouncer connection details for [connection pooling](https://aiven.io/docs/products/postgresql/concepts/pg-connection-pooling).
16115
16806
  """
16116
16807
  return pulumi.get(self, "bouncer")
16117
16808
 
@@ -16119,7 +16810,7 @@ class PgPg(dict):
16119
16810
  @pulumi.getter
16120
16811
  def dbname(self) -> Optional[str]:
16121
16812
  """
16122
- Primary PostgreSQL database name
16813
+ Primary PostgreSQL database name.
16123
16814
  """
16124
16815
  return pulumi.get(self, "dbname")
16125
16816
 
@@ -16127,7 +16818,7 @@ class PgPg(dict):
16127
16818
  @pulumi.getter
16128
16819
  def host(self) -> Optional[str]:
16129
16820
  """
16130
- PostgreSQL master node host IP or name
16821
+ PostgreSQL primary node host IP or name.
16131
16822
  """
16132
16823
  return pulumi.get(self, "host")
16133
16824
 
@@ -16135,7 +16826,7 @@ class PgPg(dict):
16135
16826
  @pulumi.getter(name="maxConnections")
16136
16827
  def max_connections(self) -> Optional[int]:
16137
16828
  """
16138
- Connection limit
16829
+ The [number of allowed connections](https://aiven.io/docs/products/postgresql/reference/pg-connection-limits). Varies based on the service plan.
16139
16830
  """
16140
16831
  return pulumi.get(self, "max_connections")
16141
16832
 
@@ -16143,7 +16834,7 @@ class PgPg(dict):
16143
16834
  @pulumi.getter
16144
16835
  def params(self) -> Optional[Sequence['outputs.PgPgParam']]:
16145
16836
  """
16146
- PostgreSQL connection parameters
16837
+ PostgreSQL connection parameters.
16147
16838
  """
16148
16839
  return pulumi.get(self, "params")
16149
16840
 
@@ -16151,7 +16842,7 @@ class PgPg(dict):
16151
16842
  @pulumi.getter
16152
16843
  def password(self) -> Optional[str]:
16153
16844
  """
16154
- PostgreSQL admin user password
16845
+ PostgreSQL admin user password.
16155
16846
  """
16156
16847
  return pulumi.get(self, "password")
16157
16848
 
@@ -16159,7 +16850,7 @@ class PgPg(dict):
16159
16850
  @pulumi.getter
16160
16851
  def port(self) -> Optional[int]:
16161
16852
  """
16162
- PostgreSQL port
16853
+ PostgreSQL port.
16163
16854
  """
16164
16855
  return pulumi.get(self, "port")
16165
16856
 
@@ -16167,7 +16858,7 @@ class PgPg(dict):
16167
16858
  @pulumi.getter(name="replicaUri")
16168
16859
  def replica_uri(self) -> Optional[str]:
16169
16860
  """
16170
- PostgreSQL replica URI for services with a replica
16861
+ PostgreSQL replica URI for services with a replica.
16171
16862
  """
16172
16863
  return pulumi.get(self, "replica_uri")
16173
16864
 
@@ -16175,7 +16866,7 @@ class PgPg(dict):
16175
16866
  @pulumi.getter
16176
16867
  def sslmode(self) -> Optional[str]:
16177
16868
  """
16178
- PostgreSQL sslmode setting (currently always "require")
16869
+ PostgreSQL SSL mode setting.
16179
16870
  """
16180
16871
  return pulumi.get(self, "sslmode")
16181
16872
 
@@ -16183,7 +16874,7 @@ class PgPg(dict):
16183
16874
  @pulumi.getter(name="standbyUris")
16184
16875
  def standby_uris(self) -> Optional[Sequence[str]]:
16185
16876
  """
16186
- PostgreSQL standby connection URIs
16877
+ PostgreSQL standby connection URIs.
16187
16878
  """
16188
16879
  return pulumi.get(self, "standby_uris")
16189
16880
 
@@ -16191,7 +16882,7 @@ class PgPg(dict):
16191
16882
  @pulumi.getter(name="syncingUris")
16192
16883
  def syncing_uris(self) -> Optional[Sequence[str]]:
16193
16884
  """
16194
- PostgreSQL syncing connection URIs
16885
+ PostgreSQL syncing connection URIs.
16195
16886
  """
16196
16887
  return pulumi.get(self, "syncing_uris")
16197
16888
 
@@ -16199,7 +16890,7 @@ class PgPg(dict):
16199
16890
  @pulumi.getter
16200
16891
  def uri(self) -> Optional[str]:
16201
16892
  """
16202
- PostgreSQL master connection URI
16893
+ PostgreSQL primary connection URI.
16203
16894
  """
16204
16895
  return pulumi.get(self, "uri")
16205
16896
 
@@ -16207,7 +16898,7 @@ class PgPg(dict):
16207
16898
  @pulumi.getter
16208
16899
  def uris(self) -> Optional[Sequence[str]]:
16209
16900
  """
16210
- PostgreSQL master connection URIs
16901
+ PostgreSQL primary connection URIs.
16211
16902
  """
16212
16903
  return pulumi.get(self, "uris")
16213
16904
 
@@ -16215,7 +16906,7 @@ class PgPg(dict):
16215
16906
  @pulumi.getter
16216
16907
  def user(self) -> Optional[str]:
16217
16908
  """
16218
- PostgreSQL admin user name
16909
+ PostgreSQL admin user name.
16219
16910
  """
16220
16911
  return pulumi.get(self, "user")
16221
16912
 
@@ -16247,12 +16938,12 @@ class PgPgParam(dict):
16247
16938
  sslmode: Optional[str] = None,
16248
16939
  user: Optional[str] = None):
16249
16940
  """
16250
- :param str database_name: Primary PostgreSQL database name
16251
- :param str host: PostgreSQL host IP or name
16252
- :param str password: PostgreSQL admin user password
16253
- :param int port: PostgreSQL port
16254
- :param str sslmode: PostgreSQL sslmode setting (currently always "require")
16255
- :param str user: PostgreSQL admin user name
16941
+ :param str database_name: Primary PostgreSQL database name.
16942
+ :param str host: PostgreSQL host IP or name.
16943
+ :param str password: PostgreSQL admin user password.
16944
+ :param int port: PostgreSQL port.
16945
+ :param str sslmode: PostgreSQL SSL mode setting.
16946
+ :param str user: PostgreSQL admin user name.
16256
16947
  """
16257
16948
  if database_name is not None:
16258
16949
  pulumi.set(__self__, "database_name", database_name)
@@ -16271,7 +16962,7 @@ class PgPgParam(dict):
16271
16962
  @pulumi.getter(name="databaseName")
16272
16963
  def database_name(self) -> Optional[str]:
16273
16964
  """
16274
- Primary PostgreSQL database name
16965
+ Primary PostgreSQL database name.
16275
16966
  """
16276
16967
  return pulumi.get(self, "database_name")
16277
16968
 
@@ -16279,7 +16970,7 @@ class PgPgParam(dict):
16279
16970
  @pulumi.getter
16280
16971
  def host(self) -> Optional[str]:
16281
16972
  """
16282
- PostgreSQL host IP or name
16973
+ PostgreSQL host IP or name.
16283
16974
  """
16284
16975
  return pulumi.get(self, "host")
16285
16976
 
@@ -16287,7 +16978,7 @@ class PgPgParam(dict):
16287
16978
  @pulumi.getter
16288
16979
  def password(self) -> Optional[str]:
16289
16980
  """
16290
- PostgreSQL admin user password
16981
+ PostgreSQL admin user password.
16291
16982
  """
16292
16983
  return pulumi.get(self, "password")
16293
16984
 
@@ -16295,7 +16986,7 @@ class PgPgParam(dict):
16295
16986
  @pulumi.getter
16296
16987
  def port(self) -> Optional[int]:
16297
16988
  """
16298
- PostgreSQL port
16989
+ PostgreSQL port.
16299
16990
  """
16300
16991
  return pulumi.get(self, "port")
16301
16992
 
@@ -16303,7 +16994,7 @@ class PgPgParam(dict):
16303
16994
  @pulumi.getter
16304
16995
  def sslmode(self) -> Optional[str]:
16305
16996
  """
16306
- PostgreSQL sslmode setting (currently always "require")
16997
+ PostgreSQL SSL mode setting.
16307
16998
  """
16308
16999
  return pulumi.get(self, "sslmode")
16309
17000
 
@@ -16311,7 +17002,7 @@ class PgPgParam(dict):
16311
17002
  @pulumi.getter
16312
17003
  def user(self) -> Optional[str]:
16313
17004
  """
16314
- PostgreSQL admin user name
17005
+ PostgreSQL admin user name.
16315
17006
  """
16316
17007
  return pulumi.get(self, "user")
16317
17008
 
@@ -16812,6 +17503,8 @@ class PgPgUserConfigMigration(dict):
16812
17503
  suggest = None
16813
17504
  if key == "ignoreDbs":
16814
17505
  suggest = "ignore_dbs"
17506
+ elif key == "ignoreRoles":
17507
+ suggest = "ignore_roles"
16815
17508
 
16816
17509
  if suggest:
16817
17510
  pulumi.log.warn(f"Key '{key}' not found in PgPgUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -16829,6 +17522,7 @@ class PgPgUserConfigMigration(dict):
16829
17522
  port: int,
16830
17523
  dbname: Optional[str] = None,
16831
17524
  ignore_dbs: Optional[str] = None,
17525
+ ignore_roles: Optional[str] = None,
16832
17526
  method: Optional[str] = None,
16833
17527
  password: Optional[str] = None,
16834
17528
  ssl: Optional[bool] = None,
@@ -16838,6 +17532,7 @@ class PgPgUserConfigMigration(dict):
16838
17532
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
16839
17533
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
16840
17534
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
17535
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
16841
17536
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
16842
17537
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
16843
17538
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -16849,6 +17544,8 @@ class PgPgUserConfigMigration(dict):
16849
17544
  pulumi.set(__self__, "dbname", dbname)
16850
17545
  if ignore_dbs is not None:
16851
17546
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
17547
+ if ignore_roles is not None:
17548
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
16852
17549
  if method is not None:
16853
17550
  pulumi.set(__self__, "method", method)
16854
17551
  if password is not None:
@@ -16890,6 +17587,14 @@ class PgPgUserConfigMigration(dict):
16890
17587
  """
16891
17588
  return pulumi.get(self, "ignore_dbs")
16892
17589
 
17590
+ @property
17591
+ @pulumi.getter(name="ignoreRoles")
17592
+ def ignore_roles(self) -> Optional[str]:
17593
+ """
17594
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
17595
+ """
17596
+ return pulumi.get(self, "ignore_roles")
17597
+
16893
17598
  @property
16894
17599
  @pulumi.getter
16895
17600
  def method(self) -> Optional[str]:
@@ -17965,6 +18670,8 @@ class PgPgUserConfigPgbouncer(dict):
17965
18670
  suggest = "autodb_pool_size"
17966
18671
  elif key == "ignoreStartupParameters":
17967
18672
  suggest = "ignore_startup_parameters"
18673
+ elif key == "maxPreparedStatements":
18674
+ suggest = "max_prepared_statements"
17968
18675
  elif key == "minPoolSize":
17969
18676
  suggest = "min_pool_size"
17970
18677
  elif key == "serverIdleTimeout":
@@ -17991,6 +18698,7 @@ class PgPgUserConfigPgbouncer(dict):
17991
18698
  autodb_pool_mode: Optional[str] = None,
17992
18699
  autodb_pool_size: Optional[int] = None,
17993
18700
  ignore_startup_parameters: Optional[Sequence[str]] = None,
18701
+ max_prepared_statements: Optional[int] = None,
17994
18702
  min_pool_size: Optional[int] = None,
17995
18703
  server_idle_timeout: Optional[int] = None,
17996
18704
  server_lifetime: Optional[int] = None,
@@ -18001,6 +18709,7 @@ class PgPgUserConfigPgbouncer(dict):
18001
18709
  :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
18002
18710
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
18003
18711
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
18712
+ :param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
18004
18713
  :param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. Default: `0`.
18005
18714
  :param int server_idle_timeout: If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). Default: `600`.
18006
18715
  :param int server_lifetime: The pooler will close an unused server connection that has been connected longer than this. (seconds). Default: `3600`.
@@ -18016,6 +18725,8 @@ class PgPgUserConfigPgbouncer(dict):
18016
18725
  pulumi.set(__self__, "autodb_pool_size", autodb_pool_size)
18017
18726
  if ignore_startup_parameters is not None:
18018
18727
  pulumi.set(__self__, "ignore_startup_parameters", ignore_startup_parameters)
18728
+ if max_prepared_statements is not None:
18729
+ pulumi.set(__self__, "max_prepared_statements", max_prepared_statements)
18019
18730
  if min_pool_size is not None:
18020
18731
  pulumi.set(__self__, "min_pool_size", min_pool_size)
18021
18732
  if server_idle_timeout is not None:
@@ -18065,6 +18776,14 @@ class PgPgUserConfigPgbouncer(dict):
18065
18776
  """
18066
18777
  return pulumi.get(self, "ignore_startup_parameters")
18067
18778
 
18779
+ @property
18780
+ @pulumi.getter(name="maxPreparedStatements")
18781
+ def max_prepared_statements(self) -> Optional[int]:
18782
+ """
18783
+ PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
18784
+ """
18785
+ return pulumi.get(self, "max_prepared_statements")
18786
+
18068
18787
  @property
18069
18788
  @pulumi.getter(name="minPoolSize")
18070
18789
  def min_pool_size(self) -> Optional[int]:
@@ -19027,6 +19746,8 @@ class RedisRedisUserConfigMigration(dict):
19027
19746
  suggest = None
19028
19747
  if key == "ignoreDbs":
19029
19748
  suggest = "ignore_dbs"
19749
+ elif key == "ignoreRoles":
19750
+ suggest = "ignore_roles"
19030
19751
 
19031
19752
  if suggest:
19032
19753
  pulumi.log.warn(f"Key '{key}' not found in RedisRedisUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -19044,6 +19765,7 @@ class RedisRedisUserConfigMigration(dict):
19044
19765
  port: int,
19045
19766
  dbname: Optional[str] = None,
19046
19767
  ignore_dbs: Optional[str] = None,
19768
+ ignore_roles: Optional[str] = None,
19047
19769
  method: Optional[str] = None,
19048
19770
  password: Optional[str] = None,
19049
19771
  ssl: Optional[bool] = None,
@@ -19053,6 +19775,7 @@ class RedisRedisUserConfigMigration(dict):
19053
19775
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
19054
19776
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
19055
19777
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
19778
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
19056
19779
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
19057
19780
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
19058
19781
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -19064,6 +19787,8 @@ class RedisRedisUserConfigMigration(dict):
19064
19787
  pulumi.set(__self__, "dbname", dbname)
19065
19788
  if ignore_dbs is not None:
19066
19789
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
19790
+ if ignore_roles is not None:
19791
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
19067
19792
  if method is not None:
19068
19793
  pulumi.set(__self__, "method", method)
19069
19794
  if password is not None:
@@ -19105,6 +19830,14 @@ class RedisRedisUserConfigMigration(dict):
19105
19830
  """
19106
19831
  return pulumi.get(self, "ignore_dbs")
19107
19832
 
19833
+ @property
19834
+ @pulumi.getter(name="ignoreRoles")
19835
+ def ignore_roles(self) -> Optional[str]:
19836
+ """
19837
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
19838
+ """
19839
+ return pulumi.get(self, "ignore_roles")
19840
+
19108
19841
  @property
19109
19842
  @pulumi.getter
19110
19843
  def method(self) -> Optional[str]:
@@ -19368,8 +20101,12 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19368
20101
  suggest = "num_consumers"
19369
20102
  elif key == "pollMaxBatchSize":
19370
20103
  suggest = "poll_max_batch_size"
20104
+ elif key == "pollMaxTimeoutMs":
20105
+ suggest = "poll_max_timeout_ms"
19371
20106
  elif key == "skipBrokenMessages":
19372
20107
  suggest = "skip_broken_messages"
20108
+ elif key == "threadPerConsumer":
20109
+ suggest = "thread_per_consumer"
19373
20110
 
19374
20111
  if suggest:
19375
20112
  pulumi.log.warn(f"Key '{key}' not found in ServiceIntegrationClickhouseKafkaUserConfigTable. Access the value via the '{suggest}' property getter instead.")
@@ -19395,7 +20132,9 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19395
20132
  max_rows_per_message: Optional[int] = None,
19396
20133
  num_consumers: Optional[int] = None,
19397
20134
  poll_max_batch_size: Optional[int] = None,
19398
- skip_broken_messages: Optional[int] = None):
20135
+ poll_max_timeout_ms: Optional[int] = None,
20136
+ skip_broken_messages: Optional[int] = None,
20137
+ thread_per_consumer: Optional[bool] = None):
19399
20138
  """
19400
20139
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
19401
20140
  :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
@@ -19409,7 +20148,9 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19409
20148
  :param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. Default: `1`.
19410
20149
  :param int num_consumers: The number of consumers per table per replica. Default: `1`.
19411
20150
  :param int poll_max_batch_size: Maximum amount of messages to be polled in a single Kafka poll. Default: `0`.
20151
+ :param int poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
19412
20152
  :param int skip_broken_messages: Skip at least this number of broken messages from Kafka topic per block. Default: `0`.
20153
+ :param bool thread_per_consumer: Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
19413
20154
  """
19414
20155
  pulumi.set(__self__, "columns", columns)
19415
20156
  pulumi.set(__self__, "data_format", data_format)
@@ -19430,8 +20171,12 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19430
20171
  pulumi.set(__self__, "num_consumers", num_consumers)
19431
20172
  if poll_max_batch_size is not None:
19432
20173
  pulumi.set(__self__, "poll_max_batch_size", poll_max_batch_size)
20174
+ if poll_max_timeout_ms is not None:
20175
+ pulumi.set(__self__, "poll_max_timeout_ms", poll_max_timeout_ms)
19433
20176
  if skip_broken_messages is not None:
19434
20177
  pulumi.set(__self__, "skip_broken_messages", skip_broken_messages)
20178
+ if thread_per_consumer is not None:
20179
+ pulumi.set(__self__, "thread_per_consumer", thread_per_consumer)
19435
20180
 
19436
20181
  @property
19437
20182
  @pulumi.getter
@@ -19529,6 +20274,14 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19529
20274
  """
19530
20275
  return pulumi.get(self, "poll_max_batch_size")
19531
20276
 
20277
+ @property
20278
+ @pulumi.getter(name="pollMaxTimeoutMs")
20279
+ def poll_max_timeout_ms(self) -> Optional[int]:
20280
+ """
20281
+ Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream*flush*interval_ms server setting by default (500ms). Default: `0`.
20282
+ """
20283
+ return pulumi.get(self, "poll_max_timeout_ms")
20284
+
19532
20285
  @property
19533
20286
  @pulumi.getter(name="skipBrokenMessages")
19534
20287
  def skip_broken_messages(self) -> Optional[int]:
@@ -19537,6 +20290,14 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
19537
20290
  """
19538
20291
  return pulumi.get(self, "skip_broken_messages")
19539
20292
 
20293
+ @property
20294
+ @pulumi.getter(name="threadPerConsumer")
20295
+ def thread_per_consumer(self) -> Optional[bool]:
20296
+ """
20297
+ Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
20298
+ """
20299
+ return pulumi.get(self, "thread_per_consumer")
20300
+
19540
20301
 
19541
20302
  @pulumi.output_type
19542
20303
  class ServiceIntegrationClickhouseKafkaUserConfigTableColumn(dict):
@@ -23861,6 +24622,8 @@ class ValkeyValkeyUserConfigMigration(dict):
23861
24622
  suggest = None
23862
24623
  if key == "ignoreDbs":
23863
24624
  suggest = "ignore_dbs"
24625
+ elif key == "ignoreRoles":
24626
+ suggest = "ignore_roles"
23864
24627
 
23865
24628
  if suggest:
23866
24629
  pulumi.log.warn(f"Key '{key}' not found in ValkeyValkeyUserConfigMigration. Access the value via the '{suggest}' property getter instead.")
@@ -23878,6 +24641,7 @@ class ValkeyValkeyUserConfigMigration(dict):
23878
24641
  port: int,
23879
24642
  dbname: Optional[str] = None,
23880
24643
  ignore_dbs: Optional[str] = None,
24644
+ ignore_roles: Optional[str] = None,
23881
24645
  method: Optional[str] = None,
23882
24646
  password: Optional[str] = None,
23883
24647
  ssl: Optional[bool] = None,
@@ -23887,6 +24651,7 @@ class ValkeyValkeyUserConfigMigration(dict):
23887
24651
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
23888
24652
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
23889
24653
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
24654
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
23890
24655
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
23891
24656
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
23892
24657
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -23898,6 +24663,8 @@ class ValkeyValkeyUserConfigMigration(dict):
23898
24663
  pulumi.set(__self__, "dbname", dbname)
23899
24664
  if ignore_dbs is not None:
23900
24665
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
24666
+ if ignore_roles is not None:
24667
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
23901
24668
  if method is not None:
23902
24669
  pulumi.set(__self__, "method", method)
23903
24670
  if password is not None:
@@ -23939,6 +24706,14 @@ class ValkeyValkeyUserConfigMigration(dict):
23939
24706
  """
23940
24707
  return pulumi.get(self, "ignore_dbs")
23941
24708
 
24709
+ @property
24710
+ @pulumi.getter(name="ignoreRoles")
24711
+ def ignore_roles(self) -> Optional[str]:
24712
+ """
24713
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
24714
+ """
24715
+ return pulumi.get(self, "ignore_roles")
24716
+
23942
24717
  @property
23943
24718
  @pulumi.getter
23944
24719
  def method(self) -> Optional[str]:
@@ -26078,6 +26853,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
26078
26853
  port: int,
26079
26854
  dbname: Optional[str] = None,
26080
26855
  ignore_dbs: Optional[str] = None,
26856
+ ignore_roles: Optional[str] = None,
26081
26857
  method: Optional[str] = None,
26082
26858
  password: Optional[str] = None,
26083
26859
  ssl: Optional[bool] = None,
@@ -26087,6 +26863,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
26087
26863
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
26088
26864
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
26089
26865
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
26866
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
26090
26867
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
26091
26868
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
26092
26869
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -26098,6 +26875,8 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
26098
26875
  pulumi.set(__self__, "dbname", dbname)
26099
26876
  if ignore_dbs is not None:
26100
26877
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
26878
+ if ignore_roles is not None:
26879
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
26101
26880
  if method is not None:
26102
26881
  pulumi.set(__self__, "method", method)
26103
26882
  if password is not None:
@@ -26139,6 +26918,14 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
26139
26918
  """
26140
26919
  return pulumi.get(self, "ignore_dbs")
26141
26920
 
26921
+ @property
26922
+ @pulumi.getter(name="ignoreRoles")
26923
+ def ignore_roles(self) -> Optional[str]:
26924
+ """
26925
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
26926
+ """
26927
+ return pulumi.get(self, "ignore_roles")
26928
+
26142
26929
  @property
26143
26930
  @pulumi.getter
26144
26931
  def method(self) -> Optional[str]:
@@ -26925,7 +27712,8 @@ class GetGrafanaGrafanaUserConfigResult(dict):
26925
27712
  unified_alerting_enabled: Optional[bool] = None,
26926
27713
  user_auto_assign_org: Optional[bool] = None,
26927
27714
  user_auto_assign_org_role: Optional[str] = None,
26928
- viewers_can_edit: Optional[bool] = None):
27715
+ viewers_can_edit: Optional[bool] = None,
27716
+ wal: Optional[bool] = None):
26929
27717
  """
26930
27718
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
26931
27719
  :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
@@ -26969,6 +27757,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
26969
27757
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
26970
27758
  :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
26971
27759
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
27760
+ :param bool wal: Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
26972
27761
  """
26973
27762
  if additional_backup_regions is not None:
26974
27763
  pulumi.set(__self__, "additional_backup_regions", additional_backup_regions)
@@ -27054,6 +27843,8 @@ class GetGrafanaGrafanaUserConfigResult(dict):
27054
27843
  pulumi.set(__self__, "user_auto_assign_org_role", user_auto_assign_org_role)
27055
27844
  if viewers_can_edit is not None:
27056
27845
  pulumi.set(__self__, "viewers_can_edit", viewers_can_edit)
27846
+ if wal is not None:
27847
+ pulumi.set(__self__, "wal", wal)
27057
27848
 
27058
27849
  @property
27059
27850
  @pulumi.getter(name="additionalBackupRegions")
@@ -27392,6 +28183,14 @@ class GetGrafanaGrafanaUserConfigResult(dict):
27392
28183
  """
27393
28184
  return pulumi.get(self, "viewers_can_edit")
27394
28185
 
28186
+ @property
28187
+ @pulumi.getter
28188
+ def wal(self) -> Optional[bool]:
28189
+ """
28190
+ Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
28191
+ """
28192
+ return pulumi.get(self, "wal")
28193
+
27395
28194
 
27396
28195
  @pulumi.output_type
27397
28196
  class GetGrafanaGrafanaUserConfigAuthAzureadResult(dict):
@@ -29830,6 +30629,7 @@ class GetKafkaKafkaUserConfigResult(dict):
29830
30629
  additional_backup_regions: Optional[str] = None,
29831
30630
  aiven_kafka_topic_messages: Optional[bool] = None,
29832
30631
  custom_domain: Optional[str] = None,
30632
+ follower_fetching: Optional['outputs.GetKafkaKafkaUserConfigFollowerFetchingResult'] = None,
29833
30633
  ip_filter_objects: Optional[Sequence['outputs.GetKafkaKafkaUserConfigIpFilterObjectResult']] = None,
29834
30634
  ip_filter_strings: Optional[Sequence[str]] = None,
29835
30635
  ip_filters: Optional[Sequence[str]] = None,
@@ -29841,6 +30641,7 @@ class GetKafkaKafkaUserConfigResult(dict):
29841
30641
  kafka_rest: Optional[bool] = None,
29842
30642
  kafka_rest_authorization: Optional[bool] = None,
29843
30643
  kafka_rest_config: Optional['outputs.GetKafkaKafkaUserConfigKafkaRestConfigResult'] = None,
30644
+ kafka_sasl_mechanisms: Optional['outputs.GetKafkaKafkaUserConfigKafkaSaslMechanismsResult'] = None,
29844
30645
  kafka_version: Optional[str] = None,
29845
30646
  letsencrypt_sasl_privatelink: Optional[bool] = None,
29846
30647
  private_access: Optional['outputs.GetKafkaKafkaUserConfigPrivateAccessResult'] = None,
@@ -29855,6 +30656,7 @@ class GetKafkaKafkaUserConfigResult(dict):
29855
30656
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
29856
30657
  :param bool aiven_kafka_topic_messages: Allow access to read Kafka topic messages in the Aiven Console and REST API.
29857
30658
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
30659
+ :param 'GetKafkaKafkaUserConfigFollowerFetchingArgs' follower_fetching: Enable follower fetching
29858
30660
  :param Sequence['GetKafkaKafkaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
29859
30661
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
29860
30662
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -29865,6 +30667,7 @@ class GetKafkaKafkaUserConfigResult(dict):
29865
30667
  :param bool kafka_rest: Enable Kafka-REST service. Default: `false`.
29866
30668
  :param bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
29867
30669
  :param 'GetKafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
30670
+ :param 'GetKafkaKafkaUserConfigKafkaSaslMechanismsArgs' kafka_sasl_mechanisms: Kafka SASL mechanisms
29868
30671
  :param str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, and newer. Kafka major version.
29869
30672
  :param bool letsencrypt_sasl_privatelink: Use Letsencrypt CA for Kafka SASL via Privatelink.
29870
30673
  :param 'GetKafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
@@ -29882,6 +30685,8 @@ class GetKafkaKafkaUserConfigResult(dict):
29882
30685
  pulumi.set(__self__, "aiven_kafka_topic_messages", aiven_kafka_topic_messages)
29883
30686
  if custom_domain is not None:
29884
30687
  pulumi.set(__self__, "custom_domain", custom_domain)
30688
+ if follower_fetching is not None:
30689
+ pulumi.set(__self__, "follower_fetching", follower_fetching)
29885
30690
  if ip_filter_objects is not None:
29886
30691
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
29887
30692
  if ip_filter_strings is not None:
@@ -29904,6 +30709,8 @@ class GetKafkaKafkaUserConfigResult(dict):
29904
30709
  pulumi.set(__self__, "kafka_rest_authorization", kafka_rest_authorization)
29905
30710
  if kafka_rest_config is not None:
29906
30711
  pulumi.set(__self__, "kafka_rest_config", kafka_rest_config)
30712
+ if kafka_sasl_mechanisms is not None:
30713
+ pulumi.set(__self__, "kafka_sasl_mechanisms", kafka_sasl_mechanisms)
29907
30714
  if kafka_version is not None:
29908
30715
  pulumi.set(__self__, "kafka_version", kafka_version)
29909
30716
  if letsencrypt_sasl_privatelink is not None:
@@ -29950,6 +30757,14 @@ class GetKafkaKafkaUserConfigResult(dict):
29950
30757
  """
29951
30758
  return pulumi.get(self, "custom_domain")
29952
30759
 
30760
+ @property
30761
+ @pulumi.getter(name="followerFetching")
30762
+ def follower_fetching(self) -> Optional['outputs.GetKafkaKafkaUserConfigFollowerFetchingResult']:
30763
+ """
30764
+ Enable follower fetching
30765
+ """
30766
+ return pulumi.get(self, "follower_fetching")
30767
+
29953
30768
  @property
29954
30769
  @pulumi.getter(name="ipFilterObjects")
29955
30770
  def ip_filter_objects(self) -> Optional[Sequence['outputs.GetKafkaKafkaUserConfigIpFilterObjectResult']]:
@@ -30036,6 +30851,14 @@ class GetKafkaKafkaUserConfigResult(dict):
30036
30851
  """
30037
30852
  return pulumi.get(self, "kafka_rest_config")
30038
30853
 
30854
+ @property
30855
+ @pulumi.getter(name="kafkaSaslMechanisms")
30856
+ def kafka_sasl_mechanisms(self) -> Optional['outputs.GetKafkaKafkaUserConfigKafkaSaslMechanismsResult']:
30857
+ """
30858
+ Kafka SASL mechanisms
30859
+ """
30860
+ return pulumi.get(self, "kafka_sasl_mechanisms")
30861
+
30039
30862
  @property
30040
30863
  @pulumi.getter(name="kafkaVersion")
30041
30864
  def kafka_version(self) -> Optional[str]:
@@ -30117,6 +30940,25 @@ class GetKafkaKafkaUserConfigResult(dict):
30117
30940
  return pulumi.get(self, "tiered_storage")
30118
30941
 
30119
30942
 
30943
+ @pulumi.output_type
30944
+ class GetKafkaKafkaUserConfigFollowerFetchingResult(dict):
30945
+ def __init__(__self__, *,
30946
+ enabled: Optional[bool] = None):
30947
+ """
30948
+ :param bool enabled: Whether to enable the follower fetching functionality.
30949
+ """
30950
+ if enabled is not None:
30951
+ pulumi.set(__self__, "enabled", enabled)
30952
+
30953
+ @property
30954
+ @pulumi.getter
30955
+ def enabled(self) -> Optional[bool]:
30956
+ """
30957
+ Whether to enable the follower fetching functionality.
30958
+ """
30959
+ return pulumi.get(self, "enabled")
30960
+
30961
+
30120
30962
  @pulumi.output_type
30121
30963
  class GetKafkaKafkaUserConfigIpFilterObjectResult(dict):
30122
30964
  def __init__(__self__, *,
@@ -30197,52 +31039,52 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30197
31039
  transaction_remove_expired_transaction_cleanup_interval_ms: Optional[int] = None,
30198
31040
  transaction_state_log_segment_bytes: Optional[int] = None):
30199
31041
  """
30200
- :param bool auto_create_topics_enable: Enable auto creation of topics.
30201
- :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
30202
- :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
30203
- :param int default_replication_factor: Replication factor for autocreated topics.
30204
- :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
30205
- :param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
30206
- :param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
30207
- :param int log_cleaner_delete_retention_ms: How long are delete records retained? Example: `86400000`.
30208
- :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
30209
- :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
30210
- :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
30211
- :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
30212
- :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
30213
- :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
30214
- :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index. Example: `4096`.
30215
- :param int log_index_size_max_bytes: The maximum size in bytes of the offset index. Example: `10485760`.
30216
- :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
30217
- :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
30218
- :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
30219
- :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
30220
- :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
30221
- :param bool log_preallocate: Should pre allocate file when create new segment?
30222
- :param int log_retention_bytes: The maximum size of the log before deleting messages.
30223
- :param int log_retention_hours: The number of hours to keep a log file before deleting it.
30224
- :param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
30225
- :param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
30226
- :param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds).
30227
- :param int log_segment_bytes: The maximum size of a single log file.
30228
- :param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
30229
- :param int max_connections_per_ip: The maximum number of connections allowed from each ip address (defaults to 2147483647).
30230
- :param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
30231
- :param int message_max_bytes: The maximum size of message that the server can receive. Example: `1048588`.
30232
- :param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
30233
- :param int num_partitions: Number of partitions for autocreated topics.
30234
- :param int offsets_retention_minutes: Log retention window in minutes for offsets topic. Example: `10080`.
30235
- :param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
30236
- :param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
30237
- :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
30238
- :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
30239
- :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
30240
- :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
30241
- :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
30242
- :param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
30243
- :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
30244
- :param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
30245
- :param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
31042
+ :param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
31043
+ :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
31044
+ :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
31045
+ :param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
31046
+ :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
31047
+ :param int group_max_session_timeout_ms: The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
31048
+ :param int group_min_session_timeout_ms: The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
31049
+ :param int log_cleaner_delete_retention_ms: How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
31050
+ :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
31051
+ :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
31052
+ :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
31053
+ :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
31054
+ :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
31055
+ :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
31056
+ :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
31057
+ :param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
31058
+ :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
31059
+ :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
31060
+ :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
31061
+ :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
31062
+ :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
31063
+ :param bool log_preallocate: Should pre allocate file when create new segment? (Default: false).
31064
+ :param int log_retention_bytes: The maximum size of the log before deleting messages (Default: -1).
31065
+ :param int log_retention_hours: The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
31066
+ :param int log_retention_ms: The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
31067
+ :param int log_roll_jitter_ms: The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
31068
+ :param int log_roll_ms: The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
31069
+ :param int log_segment_bytes: The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
31070
+ :param int log_segment_delete_delay_ms: The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
31071
+ :param int max_connections_per_ip: The maximum number of connections allowed from each ip address (Default: 2147483647).
31072
+ :param int max_incremental_fetch_session_cache_slots: The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
31073
+ :param int message_max_bytes: The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
31074
+ :param int min_insync_replicas: When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
31075
+ :param int num_partitions: Number of partitions for auto-created topics (Default: 1).
31076
+ :param int offsets_retention_minutes: Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
31077
+ :param int producer_purgatory_purge_interval_requests: The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
31078
+ :param int replica_fetch_max_bytes: The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
31079
+ :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
31080
+ :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
31081
+ :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
31082
+ :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
31083
+ :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
31084
+ :param int socket_request_max_bytes: The maximum number of bytes in a socket request (Default: 104857600 bytes).
31085
+ :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
31086
+ :param int transaction_remove_expired_transaction_cleanup_interval_ms: The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
31087
+ :param int transaction_state_log_segment_bytes: The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
30246
31088
  """
30247
31089
  if auto_create_topics_enable is not None:
30248
31090
  pulumi.set(__self__, "auto_create_topics_enable", auto_create_topics_enable)
@@ -30341,7 +31183,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30341
31183
  @pulumi.getter(name="autoCreateTopicsEnable")
30342
31184
  def auto_create_topics_enable(self) -> Optional[bool]:
30343
31185
  """
30344
- Enable auto creation of topics.
31186
+ Enable auto-creation of topics. (Default: true).
30345
31187
  """
30346
31188
  return pulumi.get(self, "auto_create_topics_enable")
30347
31189
 
@@ -30349,7 +31191,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30349
31191
  @pulumi.getter(name="compressionType")
30350
31192
  def compression_type(self) -> Optional[str]:
30351
31193
  """
30352
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.
31194
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
30353
31195
  """
30354
31196
  return pulumi.get(self, "compression_type")
30355
31197
 
@@ -30357,7 +31199,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30357
31199
  @pulumi.getter(name="connectionsMaxIdleMs")
30358
31200
  def connections_max_idle_ms(self) -> Optional[int]:
30359
31201
  """
30360
- Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: `540000`.
31202
+ Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
30361
31203
  """
30362
31204
  return pulumi.get(self, "connections_max_idle_ms")
30363
31205
 
@@ -30365,7 +31207,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30365
31207
  @pulumi.getter(name="defaultReplicationFactor")
30366
31208
  def default_replication_factor(self) -> Optional[int]:
30367
31209
  """
30368
- Replication factor for autocreated topics.
31210
+ Replication factor for auto-created topics (Default: 3).
30369
31211
  """
30370
31212
  return pulumi.get(self, "default_replication_factor")
30371
31213
 
@@ -30373,7 +31215,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30373
31215
  @pulumi.getter(name="groupInitialRebalanceDelayMs")
30374
31216
  def group_initial_rebalance_delay_ms(self) -> Optional[int]:
30375
31217
  """
30376
- The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: `3000`.
31218
+ The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
30377
31219
  """
30378
31220
  return pulumi.get(self, "group_initial_rebalance_delay_ms")
30379
31221
 
@@ -30381,7 +31223,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30381
31223
  @pulumi.getter(name="groupMaxSessionTimeoutMs")
30382
31224
  def group_max_session_timeout_ms(self) -> Optional[int]:
30383
31225
  """
30384
- The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `1800000`.
31226
+ The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: `1800000`.
30385
31227
  """
30386
31228
  return pulumi.get(self, "group_max_session_timeout_ms")
30387
31229
 
@@ -30389,7 +31231,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30389
31231
  @pulumi.getter(name="groupMinSessionTimeoutMs")
30390
31232
  def group_min_session_timeout_ms(self) -> Optional[int]:
30391
31233
  """
30392
- The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: `6000`.
31234
+ The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: `6000`.
30393
31235
  """
30394
31236
  return pulumi.get(self, "group_min_session_timeout_ms")
30395
31237
 
@@ -30397,7 +31239,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30397
31239
  @pulumi.getter(name="logCleanerDeleteRetentionMs")
30398
31240
  def log_cleaner_delete_retention_ms(self) -> Optional[int]:
30399
31241
  """
30400
- How long are delete records retained? Example: `86400000`.
31242
+ How long are delete records retained? (Default: 86400000 (1 day)). Example: `86400000`.
30401
31243
  """
30402
31244
  return pulumi.get(self, "log_cleaner_delete_retention_ms")
30403
31245
 
@@ -30405,7 +31247,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30405
31247
  @pulumi.getter(name="logCleanerMaxCompactionLagMs")
30406
31248
  def log_cleaner_max_compaction_lag_ms(self) -> Optional[int]:
30407
31249
  """
30408
- The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
31250
+ The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
30409
31251
  """
30410
31252
  return pulumi.get(self, "log_cleaner_max_compaction_lag_ms")
30411
31253
 
@@ -30413,7 +31255,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30413
31255
  @pulumi.getter(name="logCleanerMinCleanableRatio")
30414
31256
  def log_cleaner_min_cleanable_ratio(self) -> Optional[float]:
30415
31257
  """
30416
- Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: `0.5`.
31258
+ Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
30417
31259
  """
30418
31260
  return pulumi.get(self, "log_cleaner_min_cleanable_ratio")
30419
31261
 
@@ -30421,7 +31263,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30421
31263
  @pulumi.getter(name="logCleanerMinCompactionLagMs")
30422
31264
  def log_cleaner_min_compaction_lag_ms(self) -> Optional[int]:
30423
31265
  """
30424
- The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
31266
+ The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
30425
31267
  """
30426
31268
  return pulumi.get(self, "log_cleaner_min_compaction_lag_ms")
30427
31269
 
@@ -30429,7 +31271,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30429
31271
  @pulumi.getter(name="logCleanupPolicy")
30430
31272
  def log_cleanup_policy(self) -> Optional[str]:
30431
31273
  """
30432
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
31274
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
30433
31275
  """
30434
31276
  return pulumi.get(self, "log_cleanup_policy")
30435
31277
 
@@ -30437,7 +31279,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30437
31279
  @pulumi.getter(name="logFlushIntervalMessages")
30438
31280
  def log_flush_interval_messages(self) -> Optional[int]:
30439
31281
  """
30440
- The number of messages accumulated on a log partition before messages are flushed to disk. Example: `9223372036854775807`.
31282
+ The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
30441
31283
  """
30442
31284
  return pulumi.get(self, "log_flush_interval_messages")
30443
31285
 
@@ -30445,7 +31287,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30445
31287
  @pulumi.getter(name="logFlushIntervalMs")
30446
31288
  def log_flush_interval_ms(self) -> Optional[int]:
30447
31289
  """
30448
- The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
31290
+ The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
30449
31291
  """
30450
31292
  return pulumi.get(self, "log_flush_interval_ms")
30451
31293
 
@@ -30453,7 +31295,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30453
31295
  @pulumi.getter(name="logIndexIntervalBytes")
30454
31296
  def log_index_interval_bytes(self) -> Optional[int]:
30455
31297
  """
30456
- The interval with which Kafka adds an entry to the offset index. Example: `4096`.
31298
+ The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
30457
31299
  """
30458
31300
  return pulumi.get(self, "log_index_interval_bytes")
30459
31301
 
@@ -30461,7 +31303,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30461
31303
  @pulumi.getter(name="logIndexSizeMaxBytes")
30462
31304
  def log_index_size_max_bytes(self) -> Optional[int]:
30463
31305
  """
30464
- The maximum size in bytes of the offset index. Example: `10485760`.
31306
+ The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
30465
31307
  """
30466
31308
  return pulumi.get(self, "log_index_size_max_bytes")
30467
31309
 
@@ -30469,7 +31311,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30469
31311
  @pulumi.getter(name="logLocalRetentionBytes")
30470
31312
  def log_local_retention_bytes(self) -> Optional[int]:
30471
31313
  """
30472
- The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
31314
+ The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
30473
31315
  """
30474
31316
  return pulumi.get(self, "log_local_retention_bytes")
30475
31317
 
@@ -30477,7 +31319,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30477
31319
  @pulumi.getter(name="logLocalRetentionMs")
30478
31320
  def log_local_retention_ms(self) -> Optional[int]:
30479
31321
  """
30480
- The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
31322
+ The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
30481
31323
  """
30482
31324
  return pulumi.get(self, "log_local_retention_ms")
30483
31325
 
@@ -30485,7 +31327,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30485
31327
  @pulumi.getter(name="logMessageDownconversionEnable")
30486
31328
  def log_message_downconversion_enable(self) -> Optional[bool]:
30487
31329
  """
30488
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
31330
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
30489
31331
  """
30490
31332
  return pulumi.get(self, "log_message_downconversion_enable")
30491
31333
 
@@ -30493,7 +31335,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30493
31335
  @pulumi.getter(name="logMessageTimestampDifferenceMaxMs")
30494
31336
  def log_message_timestamp_difference_max_ms(self) -> Optional[int]:
30495
31337
  """
30496
- The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
31338
+ The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
30497
31339
  """
30498
31340
  return pulumi.get(self, "log_message_timestamp_difference_max_ms")
30499
31341
 
@@ -30501,7 +31343,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30501
31343
  @pulumi.getter(name="logMessageTimestampType")
30502
31344
  def log_message_timestamp_type(self) -> Optional[str]:
30503
31345
  """
30504
- Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
31346
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
30505
31347
  """
30506
31348
  return pulumi.get(self, "log_message_timestamp_type")
30507
31349
 
@@ -30509,7 +31351,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30509
31351
  @pulumi.getter(name="logPreallocate")
30510
31352
  def log_preallocate(self) -> Optional[bool]:
30511
31353
  """
30512
- Should pre allocate file when create new segment?
31354
+ Should pre allocate file when create new segment? (Default: false).
30513
31355
  """
30514
31356
  return pulumi.get(self, "log_preallocate")
30515
31357
 
@@ -30517,7 +31359,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30517
31359
  @pulumi.getter(name="logRetentionBytes")
30518
31360
  def log_retention_bytes(self) -> Optional[int]:
30519
31361
  """
30520
- The maximum size of the log before deleting messages.
31362
+ The maximum size of the log before deleting messages (Default: -1).
30521
31363
  """
30522
31364
  return pulumi.get(self, "log_retention_bytes")
30523
31365
 
@@ -30525,7 +31367,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30525
31367
  @pulumi.getter(name="logRetentionHours")
30526
31368
  def log_retention_hours(self) -> Optional[int]:
30527
31369
  """
30528
- The number of hours to keep a log file before deleting it.
31370
+ The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
30529
31371
  """
30530
31372
  return pulumi.get(self, "log_retention_hours")
30531
31373
 
@@ -30533,7 +31375,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30533
31375
  @pulumi.getter(name="logRetentionMs")
30534
31376
  def log_retention_ms(self) -> Optional[int]:
30535
31377
  """
30536
- The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
31378
+ The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
30537
31379
  """
30538
31380
  return pulumi.get(self, "log_retention_ms")
30539
31381
 
@@ -30541,7 +31383,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30541
31383
  @pulumi.getter(name="logRollJitterMs")
30542
31384
  def log_roll_jitter_ms(self) -> Optional[int]:
30543
31385
  """
30544
- The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
31386
+ The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
30545
31387
  """
30546
31388
  return pulumi.get(self, "log_roll_jitter_ms")
30547
31389
 
@@ -30549,7 +31391,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30549
31391
  @pulumi.getter(name="logRollMs")
30550
31392
  def log_roll_ms(self) -> Optional[int]:
30551
31393
  """
30552
- The maximum time before a new log segment is rolled out (in milliseconds).
31394
+ The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
30553
31395
  """
30554
31396
  return pulumi.get(self, "log_roll_ms")
30555
31397
 
@@ -30557,7 +31399,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30557
31399
  @pulumi.getter(name="logSegmentBytes")
30558
31400
  def log_segment_bytes(self) -> Optional[int]:
30559
31401
  """
30560
- The maximum size of a single log file.
31402
+ The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
30561
31403
  """
30562
31404
  return pulumi.get(self, "log_segment_bytes")
30563
31405
 
@@ -30565,7 +31407,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30565
31407
  @pulumi.getter(name="logSegmentDeleteDelayMs")
30566
31408
  def log_segment_delete_delay_ms(self) -> Optional[int]:
30567
31409
  """
30568
- The amount of time to wait before deleting a file from the filesystem. Example: `60000`.
31410
+ The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: `60000`.
30569
31411
  """
30570
31412
  return pulumi.get(self, "log_segment_delete_delay_ms")
30571
31413
 
@@ -30573,7 +31415,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30573
31415
  @pulumi.getter(name="maxConnectionsPerIp")
30574
31416
  def max_connections_per_ip(self) -> Optional[int]:
30575
31417
  """
30576
- The maximum number of connections allowed from each ip address (defaults to 2147483647).
31418
+ The maximum number of connections allowed from each ip address (Default: 2147483647).
30577
31419
  """
30578
31420
  return pulumi.get(self, "max_connections_per_ip")
30579
31421
 
@@ -30581,7 +31423,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30581
31423
  @pulumi.getter(name="maxIncrementalFetchSessionCacheSlots")
30582
31424
  def max_incremental_fetch_session_cache_slots(self) -> Optional[int]:
30583
31425
  """
30584
- The maximum number of incremental fetch sessions that the broker will maintain. Example: `1000`.
31426
+ The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: `1000`.
30585
31427
  """
30586
31428
  return pulumi.get(self, "max_incremental_fetch_session_cache_slots")
30587
31429
 
@@ -30589,7 +31431,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30589
31431
  @pulumi.getter(name="messageMaxBytes")
30590
31432
  def message_max_bytes(self) -> Optional[int]:
30591
31433
  """
30592
- The maximum size of message that the server can receive. Example: `1048588`.
31434
+ The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: `1048588`.
30593
31435
  """
30594
31436
  return pulumi.get(self, "message_max_bytes")
30595
31437
 
@@ -30597,7 +31439,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30597
31439
  @pulumi.getter(name="minInsyncReplicas")
30598
31440
  def min_insync_replicas(self) -> Optional[int]:
30599
31441
  """
30600
- When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: `1`.
31442
+ When a producer sets acks to `all` (or `-1`), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: `1`.
30601
31443
  """
30602
31444
  return pulumi.get(self, "min_insync_replicas")
30603
31445
 
@@ -30605,7 +31447,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30605
31447
  @pulumi.getter(name="numPartitions")
30606
31448
  def num_partitions(self) -> Optional[int]:
30607
31449
  """
30608
- Number of partitions for autocreated topics.
31450
+ Number of partitions for auto-created topics (Default: 1).
30609
31451
  """
30610
31452
  return pulumi.get(self, "num_partitions")
30611
31453
 
@@ -30613,7 +31455,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30613
31455
  @pulumi.getter(name="offsetsRetentionMinutes")
30614
31456
  def offsets_retention_minutes(self) -> Optional[int]:
30615
31457
  """
30616
- Log retention window in minutes for offsets topic. Example: `10080`.
31458
+ Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: `10080`.
30617
31459
  """
30618
31460
  return pulumi.get(self, "offsets_retention_minutes")
30619
31461
 
@@ -30621,7 +31463,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30621
31463
  @pulumi.getter(name="producerPurgatoryPurgeIntervalRequests")
30622
31464
  def producer_purgatory_purge_interval_requests(self) -> Optional[int]:
30623
31465
  """
30624
- The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
31466
+ The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
30625
31467
  """
30626
31468
  return pulumi.get(self, "producer_purgatory_purge_interval_requests")
30627
31469
 
@@ -30629,7 +31471,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30629
31471
  @pulumi.getter(name="replicaFetchMaxBytes")
30630
31472
  def replica_fetch_max_bytes(self) -> Optional[int]:
30631
31473
  """
30632
- The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
31474
+ The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
30633
31475
  """
30634
31476
  return pulumi.get(self, "replica_fetch_max_bytes")
30635
31477
 
@@ -30637,7 +31479,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30637
31479
  @pulumi.getter(name="replicaFetchResponseMaxBytes")
30638
31480
  def replica_fetch_response_max_bytes(self) -> Optional[int]:
30639
31481
  """
30640
- Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
31482
+ Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
30641
31483
  """
30642
31484
  return pulumi.get(self, "replica_fetch_response_max_bytes")
30643
31485
 
@@ -30645,7 +31487,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30645
31487
  @pulumi.getter(name="saslOauthbearerExpectedAudience")
30646
31488
  def sasl_oauthbearer_expected_audience(self) -> Optional[str]:
30647
31489
  """
30648
- The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
31490
+ The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
30649
31491
  """
30650
31492
  return pulumi.get(self, "sasl_oauthbearer_expected_audience")
30651
31493
 
@@ -30653,7 +31495,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30653
31495
  @pulumi.getter(name="saslOauthbearerExpectedIssuer")
30654
31496
  def sasl_oauthbearer_expected_issuer(self) -> Optional[str]:
30655
31497
  """
30656
- Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
31498
+ Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
30657
31499
  """
30658
31500
  return pulumi.get(self, "sasl_oauthbearer_expected_issuer")
30659
31501
 
@@ -30661,7 +31503,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30661
31503
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
30662
31504
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
30663
31505
  """
30664
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
31506
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
30665
31507
  """
30666
31508
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
30667
31509
 
@@ -30669,7 +31511,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30669
31511
  @pulumi.getter(name="saslOauthbearerSubClaimName")
30670
31512
  def sasl_oauthbearer_sub_claim_name(self) -> Optional[str]:
30671
31513
  """
30672
- Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
31514
+ Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
30673
31515
  """
30674
31516
  return pulumi.get(self, "sasl_oauthbearer_sub_claim_name")
30675
31517
 
@@ -30677,7 +31519,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30677
31519
  @pulumi.getter(name="socketRequestMaxBytes")
30678
31520
  def socket_request_max_bytes(self) -> Optional[int]:
30679
31521
  """
30680
- The maximum number of bytes in a socket request (defaults to 104857600).
31522
+ The maximum number of bytes in a socket request (Default: 104857600 bytes).
30681
31523
  """
30682
31524
  return pulumi.get(self, "socket_request_max_bytes")
30683
31525
 
@@ -30685,7 +31527,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30685
31527
  @pulumi.getter(name="transactionPartitionVerificationEnable")
30686
31528
  def transaction_partition_verification_enable(self) -> Optional[bool]:
30687
31529
  """
30688
- Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
31530
+ Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
30689
31531
  """
30690
31532
  return pulumi.get(self, "transaction_partition_verification_enable")
30691
31533
 
@@ -30693,7 +31535,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30693
31535
  @pulumi.getter(name="transactionRemoveExpiredTransactionCleanupIntervalMs")
30694
31536
  def transaction_remove_expired_transaction_cleanup_interval_ms(self) -> Optional[int]:
30695
31537
  """
30696
- The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
31538
+ The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: `3600000`.
30697
31539
  """
30698
31540
  return pulumi.get(self, "transaction_remove_expired_transaction_cleanup_interval_ms")
30699
31541
 
@@ -30701,7 +31543,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
30701
31543
  @pulumi.getter(name="transactionStateLogSegmentBytes")
30702
31544
  def transaction_state_log_segment_bytes(self) -> Optional[int]:
30703
31545
  """
30704
- The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
31546
+ The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: `104857600`.
30705
31547
  """
30706
31548
  return pulumi.get(self, "transaction_state_log_segment_bytes")
30707
31549
 
@@ -31211,6 +32053,49 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
31211
32053
  return pulumi.get(self, "simpleconsumer_pool_size_max")
31212
32054
 
31213
32055
 
32056
+ @pulumi.output_type
32057
+ class GetKafkaKafkaUserConfigKafkaSaslMechanismsResult(dict):
32058
+ def __init__(__self__, *,
32059
+ plain: Optional[bool] = None,
32060
+ scram_sha256: Optional[bool] = None,
32061
+ scram_sha512: Optional[bool] = None):
32062
+ """
32063
+ :param bool plain: Enable PLAIN mechanism. Default: `true`.
32064
+ :param bool scram_sha256: Enable SCRAM-SHA-256 mechanism. Default: `true`.
32065
+ :param bool scram_sha512: Enable SCRAM-SHA-512 mechanism. Default: `true`.
32066
+ """
32067
+ if plain is not None:
32068
+ pulumi.set(__self__, "plain", plain)
32069
+ if scram_sha256 is not None:
32070
+ pulumi.set(__self__, "scram_sha256", scram_sha256)
32071
+ if scram_sha512 is not None:
32072
+ pulumi.set(__self__, "scram_sha512", scram_sha512)
32073
+
32074
+ @property
32075
+ @pulumi.getter
32076
+ def plain(self) -> Optional[bool]:
32077
+ """
32078
+ Enable PLAIN mechanism. Default: `true`.
32079
+ """
32080
+ return pulumi.get(self, "plain")
32081
+
32082
+ @property
32083
+ @pulumi.getter(name="scramSha256")
32084
+ def scram_sha256(self) -> Optional[bool]:
32085
+ """
32086
+ Enable SCRAM-SHA-256 mechanism. Default: `true`.
32087
+ """
32088
+ return pulumi.get(self, "scram_sha256")
32089
+
32090
+ @property
32091
+ @pulumi.getter(name="scramSha512")
32092
+ def scram_sha512(self) -> Optional[bool]:
32093
+ """
32094
+ Enable SCRAM-SHA-512 mechanism. Default: `true`.
32095
+ """
32096
+ return pulumi.get(self, "scram_sha512")
32097
+
32098
+
31214
32099
  @pulumi.output_type
31215
32100
  class GetKafkaKafkaUserConfigPrivateAccessResult(dict):
31216
32101
  def __init__(__self__, *,
@@ -31728,6 +32613,7 @@ class GetKafkaMirrorMakerKafkaMirrormakerUserConfigIpFilterObjectResult(dict):
31728
32613
  @pulumi.output_type
31729
32614
  class GetKafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict):
31730
32615
  def __init__(__self__, *,
32616
+ admin_timeout_ms: Optional[int] = None,
31731
32617
  emit_checkpoints_enabled: Optional[bool] = None,
31732
32618
  emit_checkpoints_interval_seconds: Optional[int] = None,
31733
32619
  groups: Optional[str] = None,
@@ -31742,6 +32628,7 @@ class GetKafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict):
31742
32628
  sync_topic_configs_enabled: Optional[bool] = None,
31743
32629
  tasks_max_per_cpu: Optional[int] = None):
31744
32630
  """
32631
+ :param int admin_timeout_ms: Timeout for administrative tasks, e.g. detecting new topics, loading of consumer group and offsets. Defaults to 60000 milliseconds (1 minute).
31745
32632
  :param bool emit_checkpoints_enabled: Whether to emit consumer group offset checkpoints to target cluster periodically (default: true).
31746
32633
  :param int emit_checkpoints_interval_seconds: Frequency at which consumer group offset checkpoints are emitted (default: 60, every minute). Example: `60`.
31747
32634
  :param str groups: Consumer groups to replicate. Supports comma-separated group IDs and regexes. Example: `.*`.
@@ -31756,6 +32643,8 @@ class GetKafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict):
31756
32643
  :param bool sync_topic_configs_enabled: Whether to periodically configure remote topics to match their corresponding upstream topics.
31757
32644
  :param int tasks_max_per_cpu: `tasks.max` is set to this multiplied by the number of CPUs in the service. Default: `1`.
31758
32645
  """
32646
+ if admin_timeout_ms is not None:
32647
+ pulumi.set(__self__, "admin_timeout_ms", admin_timeout_ms)
31759
32648
  if emit_checkpoints_enabled is not None:
31760
32649
  pulumi.set(__self__, "emit_checkpoints_enabled", emit_checkpoints_enabled)
31761
32650
  if emit_checkpoints_interval_seconds is not None:
@@ -31783,6 +32672,14 @@ class GetKafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict):
31783
32672
  if tasks_max_per_cpu is not None:
31784
32673
  pulumi.set(__self__, "tasks_max_per_cpu", tasks_max_per_cpu)
31785
32674
 
32675
+ @property
32676
+ @pulumi.getter(name="adminTimeoutMs")
32677
+ def admin_timeout_ms(self) -> Optional[int]:
32678
+ """
32679
+ Timeout for administrative tasks, e.g. detecting new topics, loading of consumer group and offsets. Defaults to 60000 milliseconds (1 minute).
32680
+ """
32681
+ return pulumi.get(self, "admin_timeout_ms")
32682
+
31786
32683
  @property
31787
32684
  @pulumi.getter(name="emitCheckpointsEnabled")
31788
32685
  def emit_checkpoints_enabled(self) -> Optional[bool]:
@@ -34259,6 +35156,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
34259
35156
  port: int,
34260
35157
  dbname: Optional[str] = None,
34261
35158
  ignore_dbs: Optional[str] = None,
35159
+ ignore_roles: Optional[str] = None,
34262
35160
  method: Optional[str] = None,
34263
35161
  password: Optional[str] = None,
34264
35162
  ssl: Optional[bool] = None,
@@ -34268,6 +35166,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
34268
35166
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
34269
35167
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
34270
35168
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
35169
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
34271
35170
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
34272
35171
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
34273
35172
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -34279,6 +35178,8 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
34279
35178
  pulumi.set(__self__, "dbname", dbname)
34280
35179
  if ignore_dbs is not None:
34281
35180
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
35181
+ if ignore_roles is not None:
35182
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
34282
35183
  if method is not None:
34283
35184
  pulumi.set(__self__, "method", method)
34284
35185
  if password is not None:
@@ -34320,6 +35221,14 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
34320
35221
  """
34321
35222
  return pulumi.get(self, "ignore_dbs")
34322
35223
 
35224
+ @property
35225
+ @pulumi.getter(name="ignoreRoles")
35226
+ def ignore_roles(self) -> Optional[str]:
35227
+ """
35228
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
35229
+ """
35230
+ return pulumi.get(self, "ignore_roles")
35231
+
34323
35232
  @property
34324
35233
  @pulumi.getter
34325
35234
  def method(self) -> Optional[str]:
@@ -35086,9 +35995,12 @@ class GetOpenSearchOpensearchResult(dict):
35086
35995
  class GetOpenSearchOpensearchUserConfigResult(dict):
35087
35996
  def __init__(__self__, *,
35088
35997
  additional_backup_regions: Optional[str] = None,
35998
+ azure_migration: Optional['outputs.GetOpenSearchOpensearchUserConfigAzureMigrationResult'] = None,
35089
35999
  custom_domain: Optional[str] = None,
35090
36000
  disable_replication_factor_adjustment: Optional[bool] = None,
36001
+ gcs_migration: Optional['outputs.GetOpenSearchOpensearchUserConfigGcsMigrationResult'] = None,
35091
36002
  index_patterns: Optional[Sequence['outputs.GetOpenSearchOpensearchUserConfigIndexPatternResult']] = None,
36003
+ index_rollup: Optional['outputs.GetOpenSearchOpensearchUserConfigIndexRollupResult'] = None,
35092
36004
  index_template: Optional['outputs.GetOpenSearchOpensearchUserConfigIndexTemplateResult'] = None,
35093
36005
  ip_filter_objects: Optional[Sequence['outputs.GetOpenSearchOpensearchUserConfigIpFilterObjectResult']] = None,
35094
36006
  ip_filter_strings: Optional[Sequence[str]] = None,
@@ -35104,6 +36016,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
35104
36016
  project_to_fork_from: Optional[str] = None,
35105
36017
  public_access: Optional['outputs.GetOpenSearchOpensearchUserConfigPublicAccessResult'] = None,
35106
36018
  recovery_basebackup_name: Optional[str] = None,
36019
+ s3_migration: Optional['outputs.GetOpenSearchOpensearchUserConfigS3MigrationResult'] = None,
35107
36020
  saml: Optional['outputs.GetOpenSearchOpensearchUserConfigSamlResult'] = None,
35108
36021
  service_log: Optional[bool] = None,
35109
36022
  service_to_fork_from: Optional[str] = None,
@@ -35113,6 +36026,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
35113
36026
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
35114
36027
  :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
35115
36028
  :param Sequence['GetOpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
36029
+ :param 'GetOpenSearchOpensearchUserConfigIndexRollupArgs' index_rollup: Index rollup settings
35116
36030
  :param 'GetOpenSearchOpensearchUserConfigIndexTemplateArgs' index_template: Template settings for all new indexes
35117
36031
  :param Sequence['GetOpenSearchOpensearchUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
35118
36032
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -35135,12 +36049,18 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
35135
36049
  """
35136
36050
  if additional_backup_regions is not None:
35137
36051
  pulumi.set(__self__, "additional_backup_regions", additional_backup_regions)
36052
+ if azure_migration is not None:
36053
+ pulumi.set(__self__, "azure_migration", azure_migration)
35138
36054
  if custom_domain is not None:
35139
36055
  pulumi.set(__self__, "custom_domain", custom_domain)
35140
36056
  if disable_replication_factor_adjustment is not None:
35141
36057
  pulumi.set(__self__, "disable_replication_factor_adjustment", disable_replication_factor_adjustment)
36058
+ if gcs_migration is not None:
36059
+ pulumi.set(__self__, "gcs_migration", gcs_migration)
35142
36060
  if index_patterns is not None:
35143
36061
  pulumi.set(__self__, "index_patterns", index_patterns)
36062
+ if index_rollup is not None:
36063
+ pulumi.set(__self__, "index_rollup", index_rollup)
35144
36064
  if index_template is not None:
35145
36065
  pulumi.set(__self__, "index_template", index_template)
35146
36066
  if ip_filter_objects is not None:
@@ -35171,6 +36091,8 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
35171
36091
  pulumi.set(__self__, "public_access", public_access)
35172
36092
  if recovery_basebackup_name is not None:
35173
36093
  pulumi.set(__self__, "recovery_basebackup_name", recovery_basebackup_name)
36094
+ if s3_migration is not None:
36095
+ pulumi.set(__self__, "s3_migration", s3_migration)
35174
36096
  if saml is not None:
35175
36097
  pulumi.set(__self__, "saml", saml)
35176
36098
  if service_log is not None:
@@ -35188,6 +36110,11 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
35188
36110
  """
35189
36111
  return pulumi.get(self, "additional_backup_regions")
35190
36112
 
36113
+ @property
36114
+ @pulumi.getter(name="azureMigration")
36115
+ def azure_migration(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigAzureMigrationResult']:
36116
+ return pulumi.get(self, "azure_migration")
36117
+
35191
36118
  @property
35192
36119
  @pulumi.getter(name="customDomain")
35193
36120
  def custom_domain(self) -> Optional[str]:
@@ -35204,6 +36131,11 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
35204
36131
  """
35205
36132
  return pulumi.get(self, "disable_replication_factor_adjustment")
35206
36133
 
36134
+ @property
36135
+ @pulumi.getter(name="gcsMigration")
36136
+ def gcs_migration(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigGcsMigrationResult']:
36137
+ return pulumi.get(self, "gcs_migration")
36138
+
35207
36139
  @property
35208
36140
  @pulumi.getter(name="indexPatterns")
35209
36141
  def index_patterns(self) -> Optional[Sequence['outputs.GetOpenSearchOpensearchUserConfigIndexPatternResult']]:
@@ -35212,6 +36144,14 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
35212
36144
  """
35213
36145
  return pulumi.get(self, "index_patterns")
35214
36146
 
36147
+ @property
36148
+ @pulumi.getter(name="indexRollup")
36149
+ def index_rollup(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigIndexRollupResult']:
36150
+ """
36151
+ Index rollup settings
36152
+ """
36153
+ return pulumi.get(self, "index_rollup")
36154
+
35215
36155
  @property
35216
36156
  @pulumi.getter(name="indexTemplate")
35217
36157
  def index_template(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigIndexTemplateResult']:
@@ -35333,6 +36273,11 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
35333
36273
  """
35334
36274
  return pulumi.get(self, "recovery_basebackup_name")
35335
36275
 
36276
+ @property
36277
+ @pulumi.getter(name="s3Migration")
36278
+ def s3_migration(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigS3MigrationResult']:
36279
+ return pulumi.get(self, "s3_migration")
36280
+
35336
36281
  @property
35337
36282
  @pulumi.getter
35338
36283
  def saml(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigSamlResult']:
@@ -35366,6 +36311,192 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
35366
36311
  return pulumi.get(self, "static_ips")
35367
36312
 
35368
36313
 
36314
+ @pulumi.output_type
36315
+ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
36316
+ def __init__(__self__, *,
36317
+ account: str,
36318
+ base_path: str,
36319
+ container: str,
36320
+ snapshot_name: str,
36321
+ chunk_size: Optional[str] = None,
36322
+ compress: Optional[bool] = None,
36323
+ endpoint_suffix: Optional[str] = None,
36324
+ key: Optional[str] = None,
36325
+ sas_token: Optional[str] = None):
36326
+ """
36327
+ :param str account: Azure account name.
36328
+ :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
36329
+ :param str container: Azure container name.
36330
+ :param str snapshot_name: The snapshot name to restore from.
36331
+ :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
36332
+ :param bool compress: When set to true metadata files are stored in compressed format.
36333
+ :param str endpoint_suffix: Defines the DNS suffix for Azure Storage endpoints.
36334
+ :param str key: Azure account secret key. One of key or sas_token should be specified.
36335
+ :param str sas_token: A shared access signatures (SAS) token. One of key or sas_token should be specified.
36336
+ """
36337
+ pulumi.set(__self__, "account", account)
36338
+ pulumi.set(__self__, "base_path", base_path)
36339
+ pulumi.set(__self__, "container", container)
36340
+ pulumi.set(__self__, "snapshot_name", snapshot_name)
36341
+ if chunk_size is not None:
36342
+ pulumi.set(__self__, "chunk_size", chunk_size)
36343
+ if compress is not None:
36344
+ pulumi.set(__self__, "compress", compress)
36345
+ if endpoint_suffix is not None:
36346
+ pulumi.set(__self__, "endpoint_suffix", endpoint_suffix)
36347
+ if key is not None:
36348
+ pulumi.set(__self__, "key", key)
36349
+ if sas_token is not None:
36350
+ pulumi.set(__self__, "sas_token", sas_token)
36351
+
36352
+ @property
36353
+ @pulumi.getter
36354
+ def account(self) -> str:
36355
+ """
36356
+ Azure account name.
36357
+ """
36358
+ return pulumi.get(self, "account")
36359
+
36360
+ @property
36361
+ @pulumi.getter(name="basePath")
36362
+ def base_path(self) -> str:
36363
+ """
36364
+ The path to the repository data within its container. The value of this setting should not start or end with a /.
36365
+ """
36366
+ return pulumi.get(self, "base_path")
36367
+
36368
+ @property
36369
+ @pulumi.getter
36370
+ def container(self) -> str:
36371
+ """
36372
+ Azure container name.
36373
+ """
36374
+ return pulumi.get(self, "container")
36375
+
36376
+ @property
36377
+ @pulumi.getter(name="snapshotName")
36378
+ def snapshot_name(self) -> str:
36379
+ """
36380
+ The snapshot name to restore from.
36381
+ """
36382
+ return pulumi.get(self, "snapshot_name")
36383
+
36384
+ @property
36385
+ @pulumi.getter(name="chunkSize")
36386
+ def chunk_size(self) -> Optional[str]:
36387
+ """
36388
+ Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
36389
+ """
36390
+ return pulumi.get(self, "chunk_size")
36391
+
36392
+ @property
36393
+ @pulumi.getter
36394
+ def compress(self) -> Optional[bool]:
36395
+ """
36396
+ When set to true metadata files are stored in compressed format.
36397
+ """
36398
+ return pulumi.get(self, "compress")
36399
+
36400
+ @property
36401
+ @pulumi.getter(name="endpointSuffix")
36402
+ def endpoint_suffix(self) -> Optional[str]:
36403
+ """
36404
+ Defines the DNS suffix for Azure Storage endpoints.
36405
+ """
36406
+ return pulumi.get(self, "endpoint_suffix")
36407
+
36408
+ @property
36409
+ @pulumi.getter
36410
+ def key(self) -> Optional[str]:
36411
+ """
36412
+ Azure account secret key. One of key or sas_token should be specified.
36413
+ """
36414
+ return pulumi.get(self, "key")
36415
+
36416
+ @property
36417
+ @pulumi.getter(name="sasToken")
36418
+ def sas_token(self) -> Optional[str]:
36419
+ """
36420
+ A shared access signatures (SAS) token. One of key or sas_token should be specified.
36421
+ """
36422
+ return pulumi.get(self, "sas_token")
36423
+
36424
+
36425
+ @pulumi.output_type
36426
+ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
36427
+ def __init__(__self__, *,
36428
+ base_path: str,
36429
+ bucket: str,
36430
+ credentials: str,
36431
+ snapshot_name: str,
36432
+ chunk_size: Optional[str] = None,
36433
+ compress: Optional[bool] = None):
36434
+ """
36435
+ :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
36436
+ :param str bucket: The path to the repository data within its container.
36437
+ :param str credentials: Google Cloud Storage credentials file content.
36438
+ :param str snapshot_name: The snapshot name to restore from.
36439
+ :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
36440
+ :param bool compress: When set to true metadata files are stored in compressed format.
36441
+ """
36442
+ pulumi.set(__self__, "base_path", base_path)
36443
+ pulumi.set(__self__, "bucket", bucket)
36444
+ pulumi.set(__self__, "credentials", credentials)
36445
+ pulumi.set(__self__, "snapshot_name", snapshot_name)
36446
+ if chunk_size is not None:
36447
+ pulumi.set(__self__, "chunk_size", chunk_size)
36448
+ if compress is not None:
36449
+ pulumi.set(__self__, "compress", compress)
36450
+
36451
+ @property
36452
+ @pulumi.getter(name="basePath")
36453
+ def base_path(self) -> str:
36454
+ """
36455
+ The path to the repository data within its container. The value of this setting should not start or end with a /.
36456
+ """
36457
+ return pulumi.get(self, "base_path")
36458
+
36459
+ @property
36460
+ @pulumi.getter
36461
+ def bucket(self) -> str:
36462
+ """
36463
+ The path to the repository data within its container.
36464
+ """
36465
+ return pulumi.get(self, "bucket")
36466
+
36467
+ @property
36468
+ @pulumi.getter
36469
+ def credentials(self) -> str:
36470
+ """
36471
+ Google Cloud Storage credentials file content.
36472
+ """
36473
+ return pulumi.get(self, "credentials")
36474
+
36475
+ @property
36476
+ @pulumi.getter(name="snapshotName")
36477
+ def snapshot_name(self) -> str:
36478
+ """
36479
+ The snapshot name to restore from.
36480
+ """
36481
+ return pulumi.get(self, "snapshot_name")
36482
+
36483
+ @property
36484
+ @pulumi.getter(name="chunkSize")
36485
+ def chunk_size(self) -> Optional[str]:
36486
+ """
36487
+ Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
36488
+ """
36489
+ return pulumi.get(self, "chunk_size")
36490
+
36491
+ @property
36492
+ @pulumi.getter
36493
+ def compress(self) -> Optional[bool]:
36494
+ """
36495
+ When set to true metadata files are stored in compressed format.
36496
+ """
36497
+ return pulumi.get(self, "compress")
36498
+
36499
+
35369
36500
  @pulumi.output_type
35370
36501
  class GetOpenSearchOpensearchUserConfigIndexPatternResult(dict):
35371
36502
  def __init__(__self__, *,
@@ -35407,6 +36538,73 @@ class GetOpenSearchOpensearchUserConfigIndexPatternResult(dict):
35407
36538
  return pulumi.get(self, "sorting_algorithm")
35408
36539
 
35409
36540
 
36541
+ @pulumi.output_type
36542
+ class GetOpenSearchOpensearchUserConfigIndexRollupResult(dict):
36543
+ def __init__(__self__, *,
36544
+ rollup_dashboards_enabled: Optional[bool] = None,
36545
+ rollup_enabled: Optional[bool] = None,
36546
+ rollup_search_backoff_count: Optional[int] = None,
36547
+ rollup_search_backoff_millis: Optional[int] = None,
36548
+ rollup_search_search_all_jobs: Optional[bool] = None):
36549
+ """
36550
+ :param bool rollup_dashboards_enabled: Whether rollups are enabled in OpenSearch Dashboards. Defaults to true.
36551
+ :param bool rollup_enabled: Whether the rollup plugin is enabled. Defaults to true.
36552
+ :param int rollup_search_backoff_count: How many retries the plugin should attempt for failed rollup jobs. Defaults to 5.
36553
+ :param int rollup_search_backoff_millis: The backoff time between retries for failed rollup jobs. Defaults to 1000ms.
36554
+ :param bool rollup_search_search_all_jobs: Whether OpenSearch should return all jobs that match all specified search terms. If disabled, OpenSearch returns just one, as opposed to all, of the jobs that matches the search terms. Defaults to false.
36555
+ """
36556
+ if rollup_dashboards_enabled is not None:
36557
+ pulumi.set(__self__, "rollup_dashboards_enabled", rollup_dashboards_enabled)
36558
+ if rollup_enabled is not None:
36559
+ pulumi.set(__self__, "rollup_enabled", rollup_enabled)
36560
+ if rollup_search_backoff_count is not None:
36561
+ pulumi.set(__self__, "rollup_search_backoff_count", rollup_search_backoff_count)
36562
+ if rollup_search_backoff_millis is not None:
36563
+ pulumi.set(__self__, "rollup_search_backoff_millis", rollup_search_backoff_millis)
36564
+ if rollup_search_search_all_jobs is not None:
36565
+ pulumi.set(__self__, "rollup_search_search_all_jobs", rollup_search_search_all_jobs)
36566
+
36567
+ @property
36568
+ @pulumi.getter(name="rollupDashboardsEnabled")
36569
+ def rollup_dashboards_enabled(self) -> Optional[bool]:
36570
+ """
36571
+ Whether rollups are enabled in OpenSearch Dashboards. Defaults to true.
36572
+ """
36573
+ return pulumi.get(self, "rollup_dashboards_enabled")
36574
+
36575
+ @property
36576
+ @pulumi.getter(name="rollupEnabled")
36577
+ def rollup_enabled(self) -> Optional[bool]:
36578
+ """
36579
+ Whether the rollup plugin is enabled. Defaults to true.
36580
+ """
36581
+ return pulumi.get(self, "rollup_enabled")
36582
+
36583
+ @property
36584
+ @pulumi.getter(name="rollupSearchBackoffCount")
36585
+ def rollup_search_backoff_count(self) -> Optional[int]:
36586
+ """
36587
+ How many retries the plugin should attempt for failed rollup jobs. Defaults to 5.
36588
+ """
36589
+ return pulumi.get(self, "rollup_search_backoff_count")
36590
+
36591
+ @property
36592
+ @pulumi.getter(name="rollupSearchBackoffMillis")
36593
+ def rollup_search_backoff_millis(self) -> Optional[int]:
36594
+ """
36595
+ The backoff time between retries for failed rollup jobs. Defaults to 1000ms.
36596
+ """
36597
+ return pulumi.get(self, "rollup_search_backoff_millis")
36598
+
36599
+ @property
36600
+ @pulumi.getter(name="rollupSearchSearchAllJobs")
36601
+ def rollup_search_search_all_jobs(self) -> Optional[bool]:
36602
+ """
36603
+ Whether OpenSearch should return all jobs that match all specified search terms. If disabled, OpenSearch returns just one, as opposed to all, of the jobs that matches the search terms. Defaults to false.
36604
+ """
36605
+ return pulumi.get(self, "rollup_search_search_all_jobs")
36606
+
36607
+
35410
36608
  @pulumi.output_type
35411
36609
  class GetOpenSearchOpensearchUserConfigIndexTemplateResult(dict):
35412
36610
  def __init__(__self__, *,
@@ -35496,8 +36694,8 @@ class GetOpenSearchOpensearchUserConfigOpenidResult(dict):
35496
36694
  scope: Optional[str] = None,
35497
36695
  subject_key: Optional[str] = None):
35498
36696
  """
35499
- :param str client_id: The ID of the OpenID Connect client configured in your IdP. Required. Example: ``.
35500
- :param str client_secret: The client secret of the OpenID Connect client configured in your IdP. Required. Example: ``.
36697
+ :param str client_id: The ID of the OpenID Connect client configured in your IdP. Required.
36698
+ :param str client_secret: The client secret of the OpenID Connect client configured in your IdP. Required.
35501
36699
  :param str connect_url: The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings. Example: `https://test-account.okta.com/app/exk491jujcVc83LEX697/sso/saml/metadata`.
35502
36700
  :param bool enabled: Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. Default: `true`.
35503
36701
  :param str header: HTTP header name of the JWT token. Optional. Default is Authorization. Default: `Authorization`.
@@ -35506,7 +36704,7 @@ class GetOpenSearchOpensearchUserConfigOpenidResult(dict):
35506
36704
  :param int refresh_rate_limit_count: The maximum number of unknown key IDs in the time frame. Default is 10. Optional. Default: `10`.
35507
36705
  :param int refresh_rate_limit_time_window_ms: The time frame to use when checking the maximum number of unknown key IDs, in milliseconds. Optional.Default is 10000 (10 seconds). Default: `10000`.
35508
36706
  :param str roles_key: The key in the JSON payload that stores the user’s roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT. Example: `roles`.
35509
- :param str scope: The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone. Example: ``.
36707
+ :param str scope: The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone.
35510
36708
  :param str subject_key: The key in the JSON payload that stores the user’s name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional. Example: `preferred_username`.
35511
36709
  """
35512
36710
  pulumi.set(__self__, "client_id", client_id)
@@ -35534,7 +36732,7 @@ class GetOpenSearchOpensearchUserConfigOpenidResult(dict):
35534
36732
  @pulumi.getter(name="clientId")
35535
36733
  def client_id(self) -> str:
35536
36734
  """
35537
- The ID of the OpenID Connect client configured in your IdP. Required. Example: ``.
36735
+ The ID of the OpenID Connect client configured in your IdP. Required.
35538
36736
  """
35539
36737
  return pulumi.get(self, "client_id")
35540
36738
 
@@ -35542,7 +36740,7 @@ class GetOpenSearchOpensearchUserConfigOpenidResult(dict):
35542
36740
  @pulumi.getter(name="clientSecret")
35543
36741
  def client_secret(self) -> str:
35544
36742
  """
35545
- The client secret of the OpenID Connect client configured in your IdP. Required. Example: ``.
36743
+ The client secret of the OpenID Connect client configured in your IdP. Required.
35546
36744
  """
35547
36745
  return pulumi.get(self, "client_secret")
35548
36746
 
@@ -35614,7 +36812,7 @@ class GetOpenSearchOpensearchUserConfigOpenidResult(dict):
35614
36812
  @pulumi.getter
35615
36813
  def scope(self) -> Optional[str]:
35616
36814
  """
35617
- The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone. Example: ``.
36815
+ The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone.
35618
36816
  """
35619
36817
  return pulumi.get(self, "scope")
35620
36818
 
@@ -35683,7 +36881,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
35683
36881
  :param str email_sender_name: Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore. Example: `alert-sender`.
35684
36882
  :param str email_sender_password: Sender password for Opensearch alerts to authenticate with SMTP server. Example: `very-secure-mail-password`.
35685
36883
  :param str email_sender_username: Sender username for Opensearch alerts. Example: `jane@example.com`.
35686
- :param bool enable_security_audit: Enable/Disable security audit. Default: `false`.
36884
+ :param bool enable_security_audit: Enable/Disable security audit.
35687
36885
  :param int http_max_content_length: Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.
35688
36886
  :param int http_max_header_size: The max size of allowed headers, in bytes. Example: `8192`.
35689
36887
  :param int http_max_initial_line_length: The max length of an HTTP URL, in bytes. Example: `4096`.
@@ -35695,14 +36893,14 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
35695
36893
  :param int indices_query_bool_max_clause_count: Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.
35696
36894
  :param int indices_recovery_max_bytes_per_sec: Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.
35697
36895
  :param int indices_recovery_max_concurrent_file_chunks: Number of file chunks sent in parallel for each recovery. Defaults to 2.
35698
- :param bool ism_enabled: Specifies whether ISM is enabled or not. Default: `true`.
35699
- :param bool ism_history_enabled: Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. Default: `true`.
35700
- :param int ism_history_max_age: The maximum age before rolling over the audit history index in hours. Default: `24`.
35701
- :param int ism_history_max_docs: The maximum number of documents before rolling over the audit history index. Default: `2500000`.
35702
- :param int ism_history_rollover_check_period: The time between rollover checks for the audit history index in hours. Default: `8`.
35703
- :param int ism_history_rollover_retention_period: How long audit history indices are kept in days. Default: `30`.
35704
- :param bool knn_memory_circuit_breaker_enabled: Enable or disable KNN memory circuit breaker. Defaults to true. Default: `true`.
35705
- :param int knn_memory_circuit_breaker_limit: Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size. Default: `50`.
36896
+ :param bool ism_enabled: Specifies whether ISM is enabled or not.
36897
+ :param bool ism_history_enabled: Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document.
36898
+ :param int ism_history_max_age: The maximum age before rolling over the audit history index in hours. Example: `24`.
36899
+ :param int ism_history_max_docs: The maximum number of documents before rolling over the audit history index. Example: `2500000`.
36900
+ :param int ism_history_rollover_check_period: The time between rollover checks for the audit history index in hours. Example: `8`.
36901
+ :param int ism_history_rollover_retention_period: How long audit history indices are kept in days. Example: `30`.
36902
+ :param bool knn_memory_circuit_breaker_enabled: Enable or disable KNN memory circuit breaker. Defaults to true.
36903
+ :param int knn_memory_circuit_breaker_limit: Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
35706
36904
  :param bool override_main_response_version: Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.
35707
36905
  :param bool plugins_alerting_filter_by_backend_roles: Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false.
35708
36906
  :param Sequence[str] reindex_remote_whitelists: Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.
@@ -35877,7 +37075,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
35877
37075
  @pulumi.getter(name="enableSecurityAudit")
35878
37076
  def enable_security_audit(self) -> Optional[bool]:
35879
37077
  """
35880
- Enable/Disable security audit. Default: `false`.
37078
+ Enable/Disable security audit.
35881
37079
  """
35882
37080
  return pulumi.get(self, "enable_security_audit")
35883
37081
 
@@ -35973,7 +37171,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
35973
37171
  @pulumi.getter(name="ismEnabled")
35974
37172
  def ism_enabled(self) -> Optional[bool]:
35975
37173
  """
35976
- Specifies whether ISM is enabled or not. Default: `true`.
37174
+ Specifies whether ISM is enabled or not.
35977
37175
  """
35978
37176
  return pulumi.get(self, "ism_enabled")
35979
37177
 
@@ -35981,7 +37179,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
35981
37179
  @pulumi.getter(name="ismHistoryEnabled")
35982
37180
  def ism_history_enabled(self) -> Optional[bool]:
35983
37181
  """
35984
- Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. Default: `true`.
37182
+ Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document.
35985
37183
  """
35986
37184
  return pulumi.get(self, "ism_history_enabled")
35987
37185
 
@@ -35989,7 +37187,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
35989
37187
  @pulumi.getter(name="ismHistoryMaxAge")
35990
37188
  def ism_history_max_age(self) -> Optional[int]:
35991
37189
  """
35992
- The maximum age before rolling over the audit history index in hours. Default: `24`.
37190
+ The maximum age before rolling over the audit history index in hours. Example: `24`.
35993
37191
  """
35994
37192
  return pulumi.get(self, "ism_history_max_age")
35995
37193
 
@@ -35997,7 +37195,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
35997
37195
  @pulumi.getter(name="ismHistoryMaxDocs")
35998
37196
  def ism_history_max_docs(self) -> Optional[int]:
35999
37197
  """
36000
- The maximum number of documents before rolling over the audit history index. Default: `2500000`.
37198
+ The maximum number of documents before rolling over the audit history index. Example: `2500000`.
36001
37199
  """
36002
37200
  return pulumi.get(self, "ism_history_max_docs")
36003
37201
 
@@ -36005,7 +37203,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
36005
37203
  @pulumi.getter(name="ismHistoryRolloverCheckPeriod")
36006
37204
  def ism_history_rollover_check_period(self) -> Optional[int]:
36007
37205
  """
36008
- The time between rollover checks for the audit history index in hours. Default: `8`.
37206
+ The time between rollover checks for the audit history index in hours. Example: `8`.
36009
37207
  """
36010
37208
  return pulumi.get(self, "ism_history_rollover_check_period")
36011
37209
 
@@ -36013,7 +37211,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
36013
37211
  @pulumi.getter(name="ismHistoryRolloverRetentionPeriod")
36014
37212
  def ism_history_rollover_retention_period(self) -> Optional[int]:
36015
37213
  """
36016
- How long audit history indices are kept in days. Default: `30`.
37214
+ How long audit history indices are kept in days. Example: `30`.
36017
37215
  """
36018
37216
  return pulumi.get(self, "ism_history_rollover_retention_period")
36019
37217
 
@@ -36021,7 +37219,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
36021
37219
  @pulumi.getter(name="knnMemoryCircuitBreakerEnabled")
36022
37220
  def knn_memory_circuit_breaker_enabled(self) -> Optional[bool]:
36023
37221
  """
36024
- Enable or disable KNN memory circuit breaker. Defaults to true. Default: `true`.
37222
+ Enable or disable KNN memory circuit breaker. Defaults to true.
36025
37223
  """
36026
37224
  return pulumi.get(self, "knn_memory_circuit_breaker_enabled")
36027
37225
 
@@ -36029,7 +37227,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
36029
37227
  @pulumi.getter(name="knnMemoryCircuitBreakerLimit")
36030
37228
  def knn_memory_circuit_breaker_limit(self) -> Optional[int]:
36031
37229
  """
36032
- Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size. Default: `50`.
37230
+ Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
36033
37231
  """
36034
37232
  return pulumi.get(self, "knn_memory_circuit_breaker_limit")
36035
37233
 
@@ -36531,6 +37729,127 @@ class GetOpenSearchOpensearchUserConfigPublicAccessResult(dict):
36531
37729
  return pulumi.get(self, "prometheus")
36532
37730
 
36533
37731
 
37732
+ @pulumi.output_type
37733
+ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
37734
+ def __init__(__self__, *,
37735
+ access_key: str,
37736
+ base_path: str,
37737
+ bucket: str,
37738
+ region: str,
37739
+ secret_key: str,
37740
+ snapshot_name: str,
37741
+ chunk_size: Optional[str] = None,
37742
+ compress: Optional[bool] = None,
37743
+ endpoint: Optional[str] = None,
37744
+ server_side_encryption: Optional[bool] = None):
37745
+ """
37746
+ :param str access_key: AWS Access key.
37747
+ :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
37748
+ :param str bucket: S3 bucket name.
37749
+ :param str region: S3 region.
37750
+ :param str secret_key: AWS secret key.
37751
+ :param str snapshot_name: The snapshot name to restore from.
37752
+ :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
37753
+ :param bool compress: When set to true metadata files are stored in compressed format.
37754
+ :param str endpoint: The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
37755
+ :param bool server_side_encryption: When set to true files are encrypted on server side.
37756
+ """
37757
+ pulumi.set(__self__, "access_key", access_key)
37758
+ pulumi.set(__self__, "base_path", base_path)
37759
+ pulumi.set(__self__, "bucket", bucket)
37760
+ pulumi.set(__self__, "region", region)
37761
+ pulumi.set(__self__, "secret_key", secret_key)
37762
+ pulumi.set(__self__, "snapshot_name", snapshot_name)
37763
+ if chunk_size is not None:
37764
+ pulumi.set(__self__, "chunk_size", chunk_size)
37765
+ if compress is not None:
37766
+ pulumi.set(__self__, "compress", compress)
37767
+ if endpoint is not None:
37768
+ pulumi.set(__self__, "endpoint", endpoint)
37769
+ if server_side_encryption is not None:
37770
+ pulumi.set(__self__, "server_side_encryption", server_side_encryption)
37771
+
37772
+ @property
37773
+ @pulumi.getter(name="accessKey")
37774
+ def access_key(self) -> str:
37775
+ """
37776
+ AWS Access key.
37777
+ """
37778
+ return pulumi.get(self, "access_key")
37779
+
37780
+ @property
37781
+ @pulumi.getter(name="basePath")
37782
+ def base_path(self) -> str:
37783
+ """
37784
+ The path to the repository data within its container. The value of this setting should not start or end with a /.
37785
+ """
37786
+ return pulumi.get(self, "base_path")
37787
+
37788
+ @property
37789
+ @pulumi.getter
37790
+ def bucket(self) -> str:
37791
+ """
37792
+ S3 bucket name.
37793
+ """
37794
+ return pulumi.get(self, "bucket")
37795
+
37796
+ @property
37797
+ @pulumi.getter
37798
+ def region(self) -> str:
37799
+ """
37800
+ S3 region.
37801
+ """
37802
+ return pulumi.get(self, "region")
37803
+
37804
+ @property
37805
+ @pulumi.getter(name="secretKey")
37806
+ def secret_key(self) -> str:
37807
+ """
37808
+ AWS secret key.
37809
+ """
37810
+ return pulumi.get(self, "secret_key")
37811
+
37812
+ @property
37813
+ @pulumi.getter(name="snapshotName")
37814
+ def snapshot_name(self) -> str:
37815
+ """
37816
+ The snapshot name to restore from.
37817
+ """
37818
+ return pulumi.get(self, "snapshot_name")
37819
+
37820
+ @property
37821
+ @pulumi.getter(name="chunkSize")
37822
+ def chunk_size(self) -> Optional[str]:
37823
+ """
37824
+ Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
37825
+ """
37826
+ return pulumi.get(self, "chunk_size")
37827
+
37828
+ @property
37829
+ @pulumi.getter
37830
+ def compress(self) -> Optional[bool]:
37831
+ """
37832
+ When set to true metadata files are stored in compressed format.
37833
+ """
37834
+ return pulumi.get(self, "compress")
37835
+
37836
+ @property
37837
+ @pulumi.getter
37838
+ def endpoint(self) -> Optional[str]:
37839
+ """
37840
+ The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
37841
+ """
37842
+ return pulumi.get(self, "endpoint")
37843
+
37844
+ @property
37845
+ @pulumi.getter(name="serverSideEncryption")
37846
+ def server_side_encryption(self) -> Optional[bool]:
37847
+ """
37848
+ When set to true files are encrypted on server side.
37849
+ """
37850
+ return pulumi.get(self, "server_side_encryption")
37851
+
37852
+
36534
37853
  @pulumi.output_type
36535
37854
  class GetOpenSearchOpensearchUserConfigSamlResult(dict):
36536
37855
  def __init__(__self__, *,
@@ -36813,20 +38132,20 @@ class GetPgPgResult(dict):
36813
38132
  uris: Sequence[str],
36814
38133
  user: str):
36815
38134
  """
36816
- :param str bouncer: Bouncer connection details
36817
- :param str dbname: Primary PostgreSQL database name
36818
- :param str host: PostgreSQL master node host IP or name
36819
- :param int max_connections: Connection limit
36820
- :param Sequence['GetPgPgParamArgs'] params: PostgreSQL connection parameters
36821
- :param str password: PostgreSQL admin user password
36822
- :param int port: PostgreSQL port
36823
- :param str replica_uri: PostgreSQL replica URI for services with a replica
36824
- :param str sslmode: PostgreSQL sslmode setting (currently always "require")
36825
- :param Sequence[str] standby_uris: PostgreSQL standby connection URIs
36826
- :param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs
36827
- :param str uri: PostgreSQL master connection URI
36828
- :param Sequence[str] uris: PostgreSQL master connection URIs
36829
- :param str user: PostgreSQL admin user name
38135
+ :param str bouncer: PgBouncer connection details for [connection pooling](https://aiven.io/docs/products/postgresql/concepts/pg-connection-pooling).
38136
+ :param str dbname: Primary PostgreSQL database name.
38137
+ :param str host: PostgreSQL primary node host IP or name.
38138
+ :param int max_connections: The [number of allowed connections](https://aiven.io/docs/products/postgresql/reference/pg-connection-limits). Varies based on the service plan.
38139
+ :param Sequence['GetPgPgParamArgs'] params: PostgreSQL connection parameters.
38140
+ :param str password: PostgreSQL admin user password.
38141
+ :param int port: PostgreSQL port.
38142
+ :param str replica_uri: PostgreSQL replica URI for services with a replica.
38143
+ :param str sslmode: PostgreSQL SSL mode setting.
38144
+ :param Sequence[str] standby_uris: PostgreSQL standby connection URIs.
38145
+ :param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs.
38146
+ :param str uri: PostgreSQL primary connection URI.
38147
+ :param Sequence[str] uris: PostgreSQL primary connection URIs.
38148
+ :param str user: PostgreSQL admin user name.
36830
38149
  """
36831
38150
  pulumi.set(__self__, "bouncer", bouncer)
36832
38151
  pulumi.set(__self__, "dbname", dbname)
@@ -36847,7 +38166,7 @@ class GetPgPgResult(dict):
36847
38166
  @pulumi.getter
36848
38167
  def bouncer(self) -> str:
36849
38168
  """
36850
- Bouncer connection details
38169
+ PgBouncer connection details for [connection pooling](https://aiven.io/docs/products/postgresql/concepts/pg-connection-pooling).
36851
38170
  """
36852
38171
  return pulumi.get(self, "bouncer")
36853
38172
 
@@ -36855,7 +38174,7 @@ class GetPgPgResult(dict):
36855
38174
  @pulumi.getter
36856
38175
  def dbname(self) -> str:
36857
38176
  """
36858
- Primary PostgreSQL database name
38177
+ Primary PostgreSQL database name.
36859
38178
  """
36860
38179
  return pulumi.get(self, "dbname")
36861
38180
 
@@ -36863,7 +38182,7 @@ class GetPgPgResult(dict):
36863
38182
  @pulumi.getter
36864
38183
  def host(self) -> str:
36865
38184
  """
36866
- PostgreSQL master node host IP or name
38185
+ PostgreSQL primary node host IP or name.
36867
38186
  """
36868
38187
  return pulumi.get(self, "host")
36869
38188
 
@@ -36871,7 +38190,7 @@ class GetPgPgResult(dict):
36871
38190
  @pulumi.getter(name="maxConnections")
36872
38191
  def max_connections(self) -> int:
36873
38192
  """
36874
- Connection limit
38193
+ The [number of allowed connections](https://aiven.io/docs/products/postgresql/reference/pg-connection-limits). Varies based on the service plan.
36875
38194
  """
36876
38195
  return pulumi.get(self, "max_connections")
36877
38196
 
@@ -36879,7 +38198,7 @@ class GetPgPgResult(dict):
36879
38198
  @pulumi.getter
36880
38199
  def params(self) -> Sequence['outputs.GetPgPgParamResult']:
36881
38200
  """
36882
- PostgreSQL connection parameters
38201
+ PostgreSQL connection parameters.
36883
38202
  """
36884
38203
  return pulumi.get(self, "params")
36885
38204
 
@@ -36887,7 +38206,7 @@ class GetPgPgResult(dict):
36887
38206
  @pulumi.getter
36888
38207
  def password(self) -> str:
36889
38208
  """
36890
- PostgreSQL admin user password
38209
+ PostgreSQL admin user password.
36891
38210
  """
36892
38211
  return pulumi.get(self, "password")
36893
38212
 
@@ -36895,7 +38214,7 @@ class GetPgPgResult(dict):
36895
38214
  @pulumi.getter
36896
38215
  def port(self) -> int:
36897
38216
  """
36898
- PostgreSQL port
38217
+ PostgreSQL port.
36899
38218
  """
36900
38219
  return pulumi.get(self, "port")
36901
38220
 
@@ -36903,7 +38222,7 @@ class GetPgPgResult(dict):
36903
38222
  @pulumi.getter(name="replicaUri")
36904
38223
  def replica_uri(self) -> str:
36905
38224
  """
36906
- PostgreSQL replica URI for services with a replica
38225
+ PostgreSQL replica URI for services with a replica.
36907
38226
  """
36908
38227
  return pulumi.get(self, "replica_uri")
36909
38228
 
@@ -36911,7 +38230,7 @@ class GetPgPgResult(dict):
36911
38230
  @pulumi.getter
36912
38231
  def sslmode(self) -> str:
36913
38232
  """
36914
- PostgreSQL sslmode setting (currently always "require")
38233
+ PostgreSQL SSL mode setting.
36915
38234
  """
36916
38235
  return pulumi.get(self, "sslmode")
36917
38236
 
@@ -36919,7 +38238,7 @@ class GetPgPgResult(dict):
36919
38238
  @pulumi.getter(name="standbyUris")
36920
38239
  def standby_uris(self) -> Sequence[str]:
36921
38240
  """
36922
- PostgreSQL standby connection URIs
38241
+ PostgreSQL standby connection URIs.
36923
38242
  """
36924
38243
  return pulumi.get(self, "standby_uris")
36925
38244
 
@@ -36927,7 +38246,7 @@ class GetPgPgResult(dict):
36927
38246
  @pulumi.getter(name="syncingUris")
36928
38247
  def syncing_uris(self) -> Sequence[str]:
36929
38248
  """
36930
- PostgreSQL syncing connection URIs
38249
+ PostgreSQL syncing connection URIs.
36931
38250
  """
36932
38251
  return pulumi.get(self, "syncing_uris")
36933
38252
 
@@ -36935,7 +38254,7 @@ class GetPgPgResult(dict):
36935
38254
  @pulumi.getter
36936
38255
  def uri(self) -> str:
36937
38256
  """
36938
- PostgreSQL master connection URI
38257
+ PostgreSQL primary connection URI.
36939
38258
  """
36940
38259
  return pulumi.get(self, "uri")
36941
38260
 
@@ -36943,7 +38262,7 @@ class GetPgPgResult(dict):
36943
38262
  @pulumi.getter
36944
38263
  def uris(self) -> Sequence[str]:
36945
38264
  """
36946
- PostgreSQL master connection URIs
38265
+ PostgreSQL primary connection URIs.
36947
38266
  """
36948
38267
  return pulumi.get(self, "uris")
36949
38268
 
@@ -36951,7 +38270,7 @@ class GetPgPgResult(dict):
36951
38270
  @pulumi.getter
36952
38271
  def user(self) -> str:
36953
38272
  """
36954
- PostgreSQL admin user name
38273
+ PostgreSQL admin user name.
36955
38274
  """
36956
38275
  return pulumi.get(self, "user")
36957
38276
 
@@ -36966,12 +38285,12 @@ class GetPgPgParamResult(dict):
36966
38285
  sslmode: str,
36967
38286
  user: str):
36968
38287
  """
36969
- :param str database_name: Primary PostgreSQL database name
36970
- :param str host: PostgreSQL host IP or name
36971
- :param str password: PostgreSQL admin user password
36972
- :param int port: PostgreSQL port
36973
- :param str sslmode: PostgreSQL sslmode setting (currently always "require")
36974
- :param str user: PostgreSQL admin user name
38288
+ :param str database_name: Primary PostgreSQL database name.
38289
+ :param str host: PostgreSQL host IP or name.
38290
+ :param str password: PostgreSQL admin user password.
38291
+ :param int port: PostgreSQL port.
38292
+ :param str sslmode: PostgreSQL SSL mode setting.
38293
+ :param str user: PostgreSQL admin user name.
36975
38294
  """
36976
38295
  pulumi.set(__self__, "database_name", database_name)
36977
38296
  pulumi.set(__self__, "host", host)
@@ -36984,7 +38303,7 @@ class GetPgPgParamResult(dict):
36984
38303
  @pulumi.getter(name="databaseName")
36985
38304
  def database_name(self) -> str:
36986
38305
  """
36987
- Primary PostgreSQL database name
38306
+ Primary PostgreSQL database name.
36988
38307
  """
36989
38308
  return pulumi.get(self, "database_name")
36990
38309
 
@@ -36992,7 +38311,7 @@ class GetPgPgParamResult(dict):
36992
38311
  @pulumi.getter
36993
38312
  def host(self) -> str:
36994
38313
  """
36995
- PostgreSQL host IP or name
38314
+ PostgreSQL host IP or name.
36996
38315
  """
36997
38316
  return pulumi.get(self, "host")
36998
38317
 
@@ -37000,7 +38319,7 @@ class GetPgPgParamResult(dict):
37000
38319
  @pulumi.getter
37001
38320
  def password(self) -> str:
37002
38321
  """
37003
- PostgreSQL admin user password
38322
+ PostgreSQL admin user password.
37004
38323
  """
37005
38324
  return pulumi.get(self, "password")
37006
38325
 
@@ -37008,7 +38327,7 @@ class GetPgPgParamResult(dict):
37008
38327
  @pulumi.getter
37009
38328
  def port(self) -> int:
37010
38329
  """
37011
- PostgreSQL port
38330
+ PostgreSQL port.
37012
38331
  """
37013
38332
  return pulumi.get(self, "port")
37014
38333
 
@@ -37016,7 +38335,7 @@ class GetPgPgParamResult(dict):
37016
38335
  @pulumi.getter
37017
38336
  def sslmode(self) -> str:
37018
38337
  """
37019
- PostgreSQL sslmode setting (currently always "require")
38338
+ PostgreSQL SSL mode setting.
37020
38339
  """
37021
38340
  return pulumi.get(self, "sslmode")
37022
38341
 
@@ -37024,7 +38343,7 @@ class GetPgPgParamResult(dict):
37024
38343
  @pulumi.getter
37025
38344
  def user(self) -> str:
37026
38345
  """
37027
- PostgreSQL admin user name
38346
+ PostgreSQL admin user name.
37028
38347
  """
37029
38348
  return pulumi.get(self, "user")
37030
38349
 
@@ -37460,6 +38779,7 @@ class GetPgPgUserConfigMigrationResult(dict):
37460
38779
  port: int,
37461
38780
  dbname: Optional[str] = None,
37462
38781
  ignore_dbs: Optional[str] = None,
38782
+ ignore_roles: Optional[str] = None,
37463
38783
  method: Optional[str] = None,
37464
38784
  password: Optional[str] = None,
37465
38785
  ssl: Optional[bool] = None,
@@ -37469,6 +38789,7 @@ class GetPgPgUserConfigMigrationResult(dict):
37469
38789
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
37470
38790
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
37471
38791
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
38792
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
37472
38793
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
37473
38794
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
37474
38795
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -37480,6 +38801,8 @@ class GetPgPgUserConfigMigrationResult(dict):
37480
38801
  pulumi.set(__self__, "dbname", dbname)
37481
38802
  if ignore_dbs is not None:
37482
38803
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
38804
+ if ignore_roles is not None:
38805
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
37483
38806
  if method is not None:
37484
38807
  pulumi.set(__self__, "method", method)
37485
38808
  if password is not None:
@@ -37521,6 +38844,14 @@ class GetPgPgUserConfigMigrationResult(dict):
37521
38844
  """
37522
38845
  return pulumi.get(self, "ignore_dbs")
37523
38846
 
38847
+ @property
38848
+ @pulumi.getter(name="ignoreRoles")
38849
+ def ignore_roles(self) -> Optional[str]:
38850
+ """
38851
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
38852
+ """
38853
+ return pulumi.get(self, "ignore_roles")
38854
+
37524
38855
  @property
37525
38856
  @pulumi.getter
37526
38857
  def method(self) -> Optional[str]:
@@ -38418,6 +39749,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
38418
39749
  autodb_pool_mode: Optional[str] = None,
38419
39750
  autodb_pool_size: Optional[int] = None,
38420
39751
  ignore_startup_parameters: Optional[Sequence[str]] = None,
39752
+ max_prepared_statements: Optional[int] = None,
38421
39753
  min_pool_size: Optional[int] = None,
38422
39754
  server_idle_timeout: Optional[int] = None,
38423
39755
  server_lifetime: Optional[int] = None,
@@ -38428,6 +39760,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
38428
39760
  :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
38429
39761
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
38430
39762
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
39763
+ :param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`.
38431
39764
  :param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. Default: `0`.
38432
39765
  :param int server_idle_timeout: If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). Default: `600`.
38433
39766
  :param int server_lifetime: The pooler will close an unused server connection that has been connected longer than this. (seconds). Default: `3600`.
@@ -38443,6 +39776,8 @@ class GetPgPgUserConfigPgbouncerResult(dict):
38443
39776
  pulumi.set(__self__, "autodb_pool_size", autodb_pool_size)
38444
39777
  if ignore_startup_parameters is not None:
38445
39778
  pulumi.set(__self__, "ignore_startup_parameters", ignore_startup_parameters)
39779
+ if max_prepared_statements is not None:
39780
+ pulumi.set(__self__, "max_prepared_statements", max_prepared_statements)
38446
39781
  if min_pool_size is not None:
38447
39782
  pulumi.set(__self__, "min_pool_size", min_pool_size)
38448
39783
  if server_idle_timeout is not None:
@@ -38492,6 +39827,14 @@ class GetPgPgUserConfigPgbouncerResult(dict):
38492
39827
  """
38493
39828
  return pulumi.get(self, "ignore_startup_parameters")
38494
39829
 
39830
+ @property
39831
+ @pulumi.getter(name="maxPreparedStatements")
39832
+ def max_prepared_statements(self) -> Optional[int]:
39833
+ """
39834
+ PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`.
39835
+ """
39836
+ return pulumi.get(self, "max_prepared_statements")
39837
+
38495
39838
  @property
38496
39839
  @pulumi.getter(name="minPoolSize")
38497
39840
  def min_pool_size(self) -> Optional[int]:
@@ -39288,6 +40631,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
39288
40631
  port: int,
39289
40632
  dbname: Optional[str] = None,
39290
40633
  ignore_dbs: Optional[str] = None,
40634
+ ignore_roles: Optional[str] = None,
39291
40635
  method: Optional[str] = None,
39292
40636
  password: Optional[str] = None,
39293
40637
  ssl: Optional[bool] = None,
@@ -39297,6 +40641,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
39297
40641
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
39298
40642
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
39299
40643
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
40644
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
39300
40645
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
39301
40646
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
39302
40647
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -39308,6 +40653,8 @@ class GetRedisRedisUserConfigMigrationResult(dict):
39308
40653
  pulumi.set(__self__, "dbname", dbname)
39309
40654
  if ignore_dbs is not None:
39310
40655
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
40656
+ if ignore_roles is not None:
40657
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
39311
40658
  if method is not None:
39312
40659
  pulumi.set(__self__, "method", method)
39313
40660
  if password is not None:
@@ -39349,6 +40696,14 @@ class GetRedisRedisUserConfigMigrationResult(dict):
39349
40696
  """
39350
40697
  return pulumi.get(self, "ignore_dbs")
39351
40698
 
40699
+ @property
40700
+ @pulumi.getter(name="ignoreRoles")
40701
+ def ignore_roles(self) -> Optional[str]:
40702
+ """
40703
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
40704
+ """
40705
+ return pulumi.get(self, "ignore_roles")
40706
+
39352
40707
  @property
39353
40708
  @pulumi.getter
39354
40709
  def method(self) -> Optional[str]:
@@ -39585,7 +40940,9 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39585
40940
  max_rows_per_message: Optional[int] = None,
39586
40941
  num_consumers: Optional[int] = None,
39587
40942
  poll_max_batch_size: Optional[int] = None,
39588
- skip_broken_messages: Optional[int] = None):
40943
+ poll_max_timeout_ms: Optional[int] = None,
40944
+ skip_broken_messages: Optional[int] = None,
40945
+ thread_per_consumer: Optional[bool] = None):
39589
40946
  """
39590
40947
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
39591
40948
  :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
@@ -39599,7 +40956,9 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39599
40956
  :param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. Default: `1`.
39600
40957
  :param int num_consumers: The number of consumers per table per replica. Default: `1`.
39601
40958
  :param int poll_max_batch_size: Maximum amount of messages to be polled in a single Kafka poll. Default: `0`.
40959
+ :param int poll_max_timeout_ms: Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: `0`.
39602
40960
  :param int skip_broken_messages: Skip at least this number of broken messages from Kafka topic per block. Default: `0`.
40961
+ :param bool thread_per_consumer: Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
39603
40962
  """
39604
40963
  pulumi.set(__self__, "columns", columns)
39605
40964
  pulumi.set(__self__, "data_format", data_format)
@@ -39620,8 +40979,12 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39620
40979
  pulumi.set(__self__, "num_consumers", num_consumers)
39621
40980
  if poll_max_batch_size is not None:
39622
40981
  pulumi.set(__self__, "poll_max_batch_size", poll_max_batch_size)
40982
+ if poll_max_timeout_ms is not None:
40983
+ pulumi.set(__self__, "poll_max_timeout_ms", poll_max_timeout_ms)
39623
40984
  if skip_broken_messages is not None:
39624
40985
  pulumi.set(__self__, "skip_broken_messages", skip_broken_messages)
40986
+ if thread_per_consumer is not None:
40987
+ pulumi.set(__self__, "thread_per_consumer", thread_per_consumer)
39625
40988
 
39626
40989
  @property
39627
40990
  @pulumi.getter
@@ -39719,6 +41082,14 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39719
41082
  """
39720
41083
  return pulumi.get(self, "poll_max_batch_size")
39721
41084
 
41085
+ @property
41086
+ @pulumi.getter(name="pollMaxTimeoutMs")
41087
+ def poll_max_timeout_ms(self) -> Optional[int]:
41088
+ """
41089
+ Timeout in milliseconds for a single poll from Kafka. Takes the value of the stream_flush_interval_ms server setting by default (500ms). Default: `0`.
41090
+ """
41091
+ return pulumi.get(self, "poll_max_timeout_ms")
41092
+
39722
41093
  @property
39723
41094
  @pulumi.getter(name="skipBrokenMessages")
39724
41095
  def skip_broken_messages(self) -> Optional[int]:
@@ -39727,6 +41098,14 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
39727
41098
  """
39728
41099
  return pulumi.get(self, "skip_broken_messages")
39729
41100
 
41101
+ @property
41102
+ @pulumi.getter(name="threadPerConsumer")
41103
+ def thread_per_consumer(self) -> Optional[bool]:
41104
+ """
41105
+ Provide an independent thread for each consumer. All consumers run in the same thread by default. Default: `false`.
41106
+ """
41107
+ return pulumi.get(self, "thread_per_consumer")
41108
+
39730
41109
 
39731
41110
  @pulumi.output_type
39732
41111
  class GetServiceIntegrationClickhouseKafkaUserConfigTableColumnResult(dict):
@@ -43059,6 +44438,7 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
43059
44438
  port: int,
43060
44439
  dbname: Optional[str] = None,
43061
44440
  ignore_dbs: Optional[str] = None,
44441
+ ignore_roles: Optional[str] = None,
43062
44442
  method: Optional[str] = None,
43063
44443
  password: Optional[str] = None,
43064
44444
  ssl: Optional[bool] = None,
@@ -43068,6 +44448,7 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
43068
44448
  :param int port: Port number of the server where to migrate data from. Example: `1234`.
43069
44449
  :param str dbname: Database name for bootstrapping the initial connection. Example: `defaultdb`.
43070
44450
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). Example: `db1,db2`.
44451
+ :param str ignore_roles: Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
43071
44452
  :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
43072
44453
  :param str password: Password for authentication with the server where to migrate data from. Example: `jjKk45Nnd`.
43073
44454
  :param bool ssl: The server where to migrate data from is secured with SSL. Default: `true`.
@@ -43079,6 +44460,8 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
43079
44460
  pulumi.set(__self__, "dbname", dbname)
43080
44461
  if ignore_dbs is not None:
43081
44462
  pulumi.set(__self__, "ignore_dbs", ignore_dbs)
44463
+ if ignore_roles is not None:
44464
+ pulumi.set(__self__, "ignore_roles", ignore_roles)
43082
44465
  if method is not None:
43083
44466
  pulumi.set(__self__, "method", method)
43084
44467
  if password is not None:
@@ -43120,6 +44503,14 @@ class GetValkeyValkeyUserConfigMigrationResult(dict):
43120
44503
  """
43121
44504
  return pulumi.get(self, "ignore_dbs")
43122
44505
 
44506
+ @property
44507
+ @pulumi.getter(name="ignoreRoles")
44508
+ def ignore_roles(self) -> Optional[str]:
44509
+ """
44510
+ Comma-separated list of database roles, which should be ignored during migration (supported by PostgreSQL only at the moment). Example: `role1,role2`.
44511
+ """
44512
+ return pulumi.get(self, "ignore_roles")
44513
+
43123
44514
  @property
43124
44515
  @pulumi.getter
43125
44516
  def method(self) -> Optional[str]: