pulumi-aiven 6.28.0a1731501186__py3-none-any.whl → 6.28.0a1731670317__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

Files changed (54) hide show
  1. pulumi_aiven/_inputs.py +635 -203
  2. pulumi_aiven/account_team_project.py +7 -7
  3. pulumi_aiven/cassandra.py +8 -8
  4. pulumi_aiven/clickhouse.py +8 -8
  5. pulumi_aiven/dragonfly.py +8 -8
  6. pulumi_aiven/flink.py +8 -8
  7. pulumi_aiven/get_account_team_project.py +1 -1
  8. pulumi_aiven/get_cassanda.py +1 -1
  9. pulumi_aiven/get_cassandra.py +1 -1
  10. pulumi_aiven/get_clickhouse.py +1 -1
  11. pulumi_aiven/get_dragonfly.py +1 -1
  12. pulumi_aiven/get_flink.py +1 -1
  13. pulumi_aiven/get_grafana.py +1 -1
  14. pulumi_aiven/get_kafka.py +1 -1
  15. pulumi_aiven/get_kafka_connect.py +1 -1
  16. pulumi_aiven/get_kafka_mirror_maker.py +1 -1
  17. pulumi_aiven/get_m3_aggregator.py +1 -1
  18. pulumi_aiven/get_m3_db.py +1 -1
  19. pulumi_aiven/get_mirror_maker_replication_flow.py +1 -1
  20. pulumi_aiven/get_my_sql.py +1 -1
  21. pulumi_aiven/get_open_search.py +1 -1
  22. pulumi_aiven/get_pg.py +1 -1
  23. pulumi_aiven/get_project_user.py +1 -1
  24. pulumi_aiven/get_redis.py +1 -1
  25. pulumi_aiven/get_service_component.py +9 -9
  26. pulumi_aiven/get_service_integration.py +17 -17
  27. pulumi_aiven/get_service_integration_endpoint.py +59 -31
  28. pulumi_aiven/get_thanos.py +1 -1
  29. pulumi_aiven/get_valkey.py +1 -1
  30. pulumi_aiven/grafana.py +8 -8
  31. pulumi_aiven/influx_db.py +15 -8
  32. pulumi_aiven/kafka.py +8 -8
  33. pulumi_aiven/kafka_connect.py +8 -8
  34. pulumi_aiven/kafka_mirror_maker.py +8 -8
  35. pulumi_aiven/m3_aggregator.py +8 -8
  36. pulumi_aiven/m3_db.py +8 -8
  37. pulumi_aiven/mirror_maker_replication_flow.py +7 -7
  38. pulumi_aiven/my_sql.py +8 -8
  39. pulumi_aiven/open_search.py +8 -8
  40. pulumi_aiven/organization_group_project.py +17 -9
  41. pulumi_aiven/organization_permission.py +49 -49
  42. pulumi_aiven/outputs.py +883 -263
  43. pulumi_aiven/pg.py +8 -8
  44. pulumi_aiven/project_user.py +15 -7
  45. pulumi_aiven/pulumi-plugin.json +1 -1
  46. pulumi_aiven/redis.py +8 -8
  47. pulumi_aiven/service_integration.py +139 -123
  48. pulumi_aiven/service_integration_endpoint.py +302 -140
  49. pulumi_aiven/thanos.py +8 -8
  50. pulumi_aiven/valkey.py +8 -8
  51. {pulumi_aiven-6.28.0a1731501186.dist-info → pulumi_aiven-6.28.0a1731670317.dist-info}/METADATA +1 -1
  52. {pulumi_aiven-6.28.0a1731501186.dist-info → pulumi_aiven-6.28.0a1731670317.dist-info}/RECORD +54 -54
  53. {pulumi_aiven-6.28.0a1731501186.dist-info → pulumi_aiven-6.28.0a1731670317.dist-info}/WHEEL +0 -0
  54. {pulumi_aiven-6.28.0a1731501186.dist-info → pulumi_aiven-6.28.0a1731670317.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py CHANGED
@@ -251,6 +251,8 @@ __all__ = [
251
251
  'ServiceIntegrationDatadogUserConfigDatadogTag',
252
252
  'ServiceIntegrationDatadogUserConfigOpensearch',
253
253
  'ServiceIntegrationDatadogUserConfigRedis',
254
+ 'ServiceIntegrationEndpointAutoscalerUserConfig',
255
+ 'ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling',
254
256
  'ServiceIntegrationEndpointDatadogUserConfig',
255
257
  'ServiceIntegrationEndpointDatadogUserConfigDatadogTag',
256
258
  'ServiceIntegrationEndpointExternalAwsCloudwatchLogsUserConfig',
@@ -264,6 +266,7 @@ __all__ = [
264
266
  'ServiceIntegrationEndpointExternalMysqlUserConfig',
265
267
  'ServiceIntegrationEndpointExternalOpensearchLogsUserConfig',
266
268
  'ServiceIntegrationEndpointExternalPostgresql',
269
+ 'ServiceIntegrationEndpointExternalPrometheusUserConfig',
267
270
  'ServiceIntegrationEndpointExternalSchemaRegistryUserConfig',
268
271
  'ServiceIntegrationEndpointJolokiaUserConfig',
269
272
  'ServiceIntegrationEndpointPrometheusUserConfig',
@@ -550,6 +553,8 @@ __all__ = [
550
553
  'GetServiceIntegrationDatadogUserConfigDatadogTagResult',
551
554
  'GetServiceIntegrationDatadogUserConfigOpensearchResult',
552
555
  'GetServiceIntegrationDatadogUserConfigRedisResult',
556
+ 'GetServiceIntegrationEndpointAutoscalerUserConfigResult',
557
+ 'GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult',
553
558
  'GetServiceIntegrationEndpointDatadogUserConfigResult',
554
559
  'GetServiceIntegrationEndpointDatadogUserConfigDatadogTagResult',
555
560
  'GetServiceIntegrationEndpointExternalAwsCloudwatchLogsUserConfigResult',
@@ -563,6 +568,7 @@ __all__ = [
563
568
  'GetServiceIntegrationEndpointExternalMysqlUserConfigResult',
564
569
  'GetServiceIntegrationEndpointExternalOpensearchLogsUserConfigResult',
565
570
  'GetServiceIntegrationEndpointExternalPostgresqlResult',
571
+ 'GetServiceIntegrationEndpointExternalPrometheusUserConfigResult',
566
572
  'GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult',
567
573
  'GetServiceIntegrationEndpointJolokiaUserConfigResult',
568
574
  'GetServiceIntegrationEndpointPrometheusUserConfigResult',
@@ -834,7 +840,6 @@ class CassandraCassandraUserConfig(dict):
834
840
 
835
841
  @property
836
842
  @pulumi.getter(name="additionalBackupRegions")
837
- @_utilities.deprecated("""This property is deprecated.""")
838
843
  def additional_backup_regions(self) -> Optional[str]:
839
844
  """
840
845
  Additional Cloud Regions for Backup Replication.
@@ -2379,7 +2384,7 @@ class DragonflyDragonflyUserConfig(dict):
2379
2384
  static_ips: Optional[bool] = None):
2380
2385
  """
2381
2386
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. Default: `false`.
2382
- :param str dragonfly_persistence: Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2387
+ :param str dragonfly_persistence: Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2383
2388
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. Default: `true`.
2384
2389
  :param Sequence['DragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
2385
2390
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -2437,7 +2442,7 @@ class DragonflyDragonflyUserConfig(dict):
2437
2442
  @pulumi.getter(name="dragonflyPersistence")
2438
2443
  def dragonfly_persistence(self) -> Optional[str]:
2439
2444
  """
2440
- Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2445
+ Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2441
2446
  """
2442
2447
  return pulumi.get(self, "dragonfly_persistence")
2443
2448
 
@@ -3808,10 +3813,10 @@ class GrafanaGrafanaUserConfig(dict):
3808
3813
  wal: Optional[bool] = None):
3809
3814
  """
3810
3815
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
3811
- :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3816
+ :param bool alerting_enabled: Setting has no effect with Grafana 11 and onward. Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3812
3817
  :param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
3813
3818
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. Example: `0`.
3814
- :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
3819
+ :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.
3815
3820
  :param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
3816
3821
  :param 'GrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
3817
3822
  :param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
@@ -3819,9 +3824,9 @@ class GrafanaGrafanaUserConfig(dict):
3819
3824
  :param 'GrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
3820
3825
  :param 'GrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
3821
3826
  :param 'GrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
3822
- :param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
3827
+ :param str cookie_samesite: Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
3823
3828
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
3824
- :param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
3829
+ :param bool dashboard_previews_enabled: Enable browsing of dashboards in grid (pictures) mode. This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
3825
3830
  :param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h. Example: `5s`.
3826
3831
  :param int dashboards_versions_to_keep: Dashboard versions to keep per dashboard. Example: `20`.
3827
3832
  :param bool dataproxy_send_user_header: Send `X-Grafana-User` header to data source.
@@ -3834,7 +3839,7 @@ class GrafanaGrafanaUserConfig(dict):
3834
3839
  :param Sequence['GrafanaGrafanaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
3835
3840
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
3836
3841
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
3837
- :param bool metrics_enabled: Enable Grafana /metrics endpoint.
3842
+ :param bool metrics_enabled: Enable Grafana's /metrics endpoint.
3838
3843
  :param bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP.
3839
3844
  :param 'GrafanaGrafanaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
3840
3845
  :param 'GrafanaGrafanaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
@@ -3845,9 +3850,9 @@ class GrafanaGrafanaUserConfig(dict):
3845
3850
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
3846
3851
  :param 'GrafanaGrafanaUserConfigSmtpServerArgs' smtp_server: SMTP server settings
3847
3852
  :param bool static_ips: Use static public IP addresses.
3848
- :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3853
+ :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
3849
3854
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
3850
- :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
3855
+ :param str user_auto_assign_org_role: Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.
3851
3856
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
3852
3857
  :param bool wal: Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
3853
3858
  """
@@ -3950,7 +3955,7 @@ class GrafanaGrafanaUserConfig(dict):
3950
3955
  @pulumi.getter(name="alertingEnabled")
3951
3956
  def alerting_enabled(self) -> Optional[bool]:
3952
3957
  """
3953
- Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3958
+ Setting has no effect with Grafana 11 and onward. Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3954
3959
  """
3955
3960
  return pulumi.get(self, "alerting_enabled")
3956
3961
 
@@ -3974,7 +3979,7 @@ class GrafanaGrafanaUserConfig(dict):
3974
3979
  @pulumi.getter(name="alertingNodataOrNullvalues")
3975
3980
  def alerting_nodata_or_nullvalues(self) -> Optional[str]:
3976
3981
  """
3977
- Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
3982
+ Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.
3978
3983
  """
3979
3984
  return pulumi.get(self, "alerting_nodata_or_nullvalues")
3980
3985
 
@@ -4038,7 +4043,7 @@ class GrafanaGrafanaUserConfig(dict):
4038
4043
  @pulumi.getter(name="cookieSamesite")
4039
4044
  def cookie_samesite(self) -> Optional[str]:
4040
4045
  """
4041
- Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
4046
+ Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
4042
4047
  """
4043
4048
  return pulumi.get(self, "cookie_samesite")
4044
4049
 
@@ -4054,7 +4059,7 @@ class GrafanaGrafanaUserConfig(dict):
4054
4059
  @pulumi.getter(name="dashboardPreviewsEnabled")
4055
4060
  def dashboard_previews_enabled(self) -> Optional[bool]:
4056
4061
  """
4057
- This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
4062
+ Enable browsing of dashboards in grid (pictures) mode. This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
4058
4063
  """
4059
4064
  return pulumi.get(self, "dashboard_previews_enabled")
4060
4065
 
@@ -4159,7 +4164,7 @@ class GrafanaGrafanaUserConfig(dict):
4159
4164
  @pulumi.getter(name="metricsEnabled")
4160
4165
  def metrics_enabled(self) -> Optional[bool]:
4161
4166
  """
4162
- Enable Grafana /metrics endpoint.
4167
+ Enable Grafana's /metrics endpoint.
4163
4168
  """
4164
4169
  return pulumi.get(self, "metrics_enabled")
4165
4170
 
@@ -4247,7 +4252,7 @@ class GrafanaGrafanaUserConfig(dict):
4247
4252
  @pulumi.getter(name="unifiedAlertingEnabled")
4248
4253
  def unified_alerting_enabled(self) -> Optional[bool]:
4249
4254
  """
4250
- Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
4255
+ Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
4251
4256
  """
4252
4257
  return pulumi.get(self, "unified_alerting_enabled")
4253
4258
 
@@ -4263,7 +4268,7 @@ class GrafanaGrafanaUserConfig(dict):
4263
4268
  @pulumi.getter(name="userAutoAssignOrgRole")
4264
4269
  def user_auto_assign_org_role(self) -> Optional[str]:
4265
4270
  """
4266
- Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
4271
+ Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.
4267
4272
  """
4268
4273
  return pulumi.get(self, "user_auto_assign_org_role")
4269
4274
 
@@ -4743,9 +4748,9 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
4743
4748
  :param str client_id: Client ID from provider. Example: `b1ba0bf54a4c2c0a1c29`.
4744
4749
  :param str client_secret: Client secret from provider. Example: `bfa6gea4f129076761dcba8ce5e1e406bd83af7b`.
4745
4750
  :param bool allow_sign_up: Automatically sign-up users on successful sign-in.
4746
- :param str api_url: API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
4747
- :param str auth_url: Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
4748
- :param str token_url: Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
4751
+ :param str api_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
4752
+ :param str auth_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
4753
+ :param str token_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
4749
4754
  """
4750
4755
  pulumi.set(__self__, "allowed_groups", allowed_groups)
4751
4756
  pulumi.set(__self__, "client_id", client_id)
@@ -4795,7 +4800,7 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
4795
4800
  @pulumi.getter(name="apiUrl")
4796
4801
  def api_url(self) -> Optional[str]:
4797
4802
  """
4798
- API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
4803
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
4799
4804
  """
4800
4805
  return pulumi.get(self, "api_url")
4801
4806
 
@@ -4803,7 +4808,7 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
4803
4808
  @pulumi.getter(name="authUrl")
4804
4809
  def auth_url(self) -> Optional[str]:
4805
4810
  """
4806
- Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
4811
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
4807
4812
  """
4808
4813
  return pulumi.get(self, "auth_url")
4809
4814
 
@@ -4811,7 +4816,7 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
4811
4816
  @pulumi.getter(name="tokenUrl")
4812
4817
  def token_url(self) -> Optional[str]:
4813
4818
  """
4814
- Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
4819
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
4815
4820
  """
4816
4821
  return pulumi.get(self, "token_url")
4817
4822
 
@@ -5056,7 +5061,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
5056
5061
  """
5057
5062
  :param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions. Example: `AAAAAAAAAAAAAAAAAAA`.
5058
5063
  :param str bucket_url: Bucket URL for S3. Example: `https://grafana.s3-ap-southeast-2.amazonaws.com/`.
5059
- :param str provider: Enum: `s3`. Provider type.
5064
+ :param str provider: Enum: `s3`. External image store provider.
5060
5065
  :param str secret_key: S3 secret key. Example: `AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA`.
5061
5066
  """
5062
5067
  pulumi.set(__self__, "access_key", access_key)
@@ -5084,7 +5089,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
5084
5089
  @pulumi.getter
5085
5090
  def provider(self) -> str:
5086
5091
  """
5087
- Enum: `s3`. Provider type.
5092
+ Enum: `s3`. External image store provider.
5088
5093
  """
5089
5094
  return pulumi.get(self, "provider")
5090
5095
 
@@ -5225,7 +5230,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
5225
5230
  :param str from_name: Name used in outgoing emails, defaults to Grafana.
5226
5231
  :param str password: Password for SMTP authentication. Example: `ein0eemeev5eeth3Ahfu`.
5227
5232
  :param bool skip_verify: Skip verifying server certificate. Defaults to false.
5228
- :param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5233
+ :param str starttls_policy: Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5229
5234
  :param str username: Username for SMTP authentication. Example: `smtpuser`.
5230
5235
  """
5231
5236
  pulumi.set(__self__, "from_address", from_address)
@@ -5294,7 +5299,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
5294
5299
  @pulumi.getter(name="starttlsPolicy")
5295
5300
  def starttls_policy(self) -> Optional[str]:
5296
5301
  """
5297
- Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5302
+ Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5298
5303
  """
5299
5304
  return pulumi.get(self, "starttls_policy")
5300
5305
 
@@ -5605,6 +5610,8 @@ class InfluxDbInfluxdbUserConfig(dict):
5605
5610
  suggest = "additional_backup_regions"
5606
5611
  elif key == "customDomain":
5607
5612
  suggest = "custom_domain"
5613
+ elif key == "influxdbVersion":
5614
+ suggest = "influxdb_version"
5608
5615
  elif key == "ipFilterObjects":
5609
5616
  suggest = "ip_filter_objects"
5610
5617
  elif key == "ipFilterStrings":
@@ -5643,6 +5650,7 @@ class InfluxDbInfluxdbUserConfig(dict):
5643
5650
  additional_backup_regions: Optional[str] = None,
5644
5651
  custom_domain: Optional[str] = None,
5645
5652
  influxdb: Optional['outputs.InfluxDbInfluxdbUserConfigInfluxdb'] = None,
5653
+ influxdb_version: Optional[str] = None,
5646
5654
  ip_filter_objects: Optional[Sequence['outputs.InfluxDbInfluxdbUserConfigIpFilterObject']] = None,
5647
5655
  ip_filter_strings: Optional[Sequence[str]] = None,
5648
5656
  ip_filters: Optional[Sequence[str]] = None,
@@ -5658,6 +5666,7 @@ class InfluxDbInfluxdbUserConfig(dict):
5658
5666
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
5659
5667
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
5660
5668
  :param 'InfluxDbInfluxdbUserConfigInfluxdbArgs' influxdb: influxdb.conf configuration values
5669
+ :param str influxdb_version: Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
5661
5670
  :param Sequence['InfluxDbInfluxdbUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
5662
5671
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
5663
5672
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -5676,6 +5685,8 @@ class InfluxDbInfluxdbUserConfig(dict):
5676
5685
  pulumi.set(__self__, "custom_domain", custom_domain)
5677
5686
  if influxdb is not None:
5678
5687
  pulumi.set(__self__, "influxdb", influxdb)
5688
+ if influxdb_version is not None:
5689
+ pulumi.set(__self__, "influxdb_version", influxdb_version)
5679
5690
  if ip_filter_objects is not None:
5680
5691
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
5681
5692
  if ip_filter_strings is not None:
@@ -5724,6 +5735,14 @@ class InfluxDbInfluxdbUserConfig(dict):
5724
5735
  """
5725
5736
  return pulumi.get(self, "influxdb")
5726
5737
 
5738
+ @property
5739
+ @pulumi.getter(name="influxdbVersion")
5740
+ def influxdb_version(self) -> Optional[str]:
5741
+ """
5742
+ Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
5743
+ """
5744
+ return pulumi.get(self, "influxdb_version")
5745
+
5727
5746
  @property
5728
5747
  @pulumi.getter(name="ipFilterObjects")
5729
5748
  def ip_filter_objects(self) -> Optional[Sequence['outputs.InfluxDbInfluxdbUserConfigIpFilterObject']]:
@@ -5966,13 +5985,34 @@ class InfluxDbInfluxdbUserConfigIpFilterObject(dict):
5966
5985
 
5967
5986
  @pulumi.output_type
5968
5987
  class InfluxDbInfluxdbUserConfigPrivateAccess(dict):
5988
+ @staticmethod
5989
+ def __key_warning(key: str):
5990
+ suggest = None
5991
+ if key == "userBackup":
5992
+ suggest = "user_backup"
5993
+
5994
+ if suggest:
5995
+ pulumi.log.warn(f"Key '{key}' not found in InfluxDbInfluxdbUserConfigPrivateAccess. Access the value via the '{suggest}' property getter instead.")
5996
+
5997
+ def __getitem__(self, key: str) -> Any:
5998
+ InfluxDbInfluxdbUserConfigPrivateAccess.__key_warning(key)
5999
+ return super().__getitem__(key)
6000
+
6001
+ def get(self, key: str, default = None) -> Any:
6002
+ InfluxDbInfluxdbUserConfigPrivateAccess.__key_warning(key)
6003
+ return super().get(key, default)
6004
+
5969
6005
  def __init__(__self__, *,
5970
- influxdb: Optional[bool] = None):
6006
+ influxdb: Optional[bool] = None,
6007
+ user_backup: Optional[bool] = None):
5971
6008
  """
5972
6009
  :param bool influxdb: Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
6010
+ :param bool user_backup: Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
5973
6011
  """
5974
6012
  if influxdb is not None:
5975
6013
  pulumi.set(__self__, "influxdb", influxdb)
6014
+ if user_backup is not None:
6015
+ pulumi.set(__self__, "user_backup", user_backup)
5976
6016
 
5977
6017
  @property
5978
6018
  @pulumi.getter
@@ -5982,16 +6022,45 @@ class InfluxDbInfluxdbUserConfigPrivateAccess(dict):
5982
6022
  """
5983
6023
  return pulumi.get(self, "influxdb")
5984
6024
 
6025
+ @property
6026
+ @pulumi.getter(name="userBackup")
6027
+ def user_backup(self) -> Optional[bool]:
6028
+ """
6029
+ Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
6030
+ """
6031
+ return pulumi.get(self, "user_backup")
6032
+
5985
6033
 
5986
6034
  @pulumi.output_type
5987
6035
  class InfluxDbInfluxdbUserConfigPrivatelinkAccess(dict):
6036
+ @staticmethod
6037
+ def __key_warning(key: str):
6038
+ suggest = None
6039
+ if key == "userBackup":
6040
+ suggest = "user_backup"
6041
+
6042
+ if suggest:
6043
+ pulumi.log.warn(f"Key '{key}' not found in InfluxDbInfluxdbUserConfigPrivatelinkAccess. Access the value via the '{suggest}' property getter instead.")
6044
+
6045
+ def __getitem__(self, key: str) -> Any:
6046
+ InfluxDbInfluxdbUserConfigPrivatelinkAccess.__key_warning(key)
6047
+ return super().__getitem__(key)
6048
+
6049
+ def get(self, key: str, default = None) -> Any:
6050
+ InfluxDbInfluxdbUserConfigPrivatelinkAccess.__key_warning(key)
6051
+ return super().get(key, default)
6052
+
5988
6053
  def __init__(__self__, *,
5989
- influxdb: Optional[bool] = None):
6054
+ influxdb: Optional[bool] = None,
6055
+ user_backup: Optional[bool] = None):
5990
6056
  """
5991
6057
  :param bool influxdb: Enable influxdb.
6058
+ :param bool user_backup: Enable user_backup.
5992
6059
  """
5993
6060
  if influxdb is not None:
5994
6061
  pulumi.set(__self__, "influxdb", influxdb)
6062
+ if user_backup is not None:
6063
+ pulumi.set(__self__, "user_backup", user_backup)
5995
6064
 
5996
6065
  @property
5997
6066
  @pulumi.getter
@@ -6001,16 +6070,45 @@ class InfluxDbInfluxdbUserConfigPrivatelinkAccess(dict):
6001
6070
  """
6002
6071
  return pulumi.get(self, "influxdb")
6003
6072
 
6073
+ @property
6074
+ @pulumi.getter(name="userBackup")
6075
+ def user_backup(self) -> Optional[bool]:
6076
+ """
6077
+ Enable user_backup.
6078
+ """
6079
+ return pulumi.get(self, "user_backup")
6080
+
6004
6081
 
6005
6082
  @pulumi.output_type
6006
6083
  class InfluxDbInfluxdbUserConfigPublicAccess(dict):
6084
+ @staticmethod
6085
+ def __key_warning(key: str):
6086
+ suggest = None
6087
+ if key == "userBackup":
6088
+ suggest = "user_backup"
6089
+
6090
+ if suggest:
6091
+ pulumi.log.warn(f"Key '{key}' not found in InfluxDbInfluxdbUserConfigPublicAccess. Access the value via the '{suggest}' property getter instead.")
6092
+
6093
+ def __getitem__(self, key: str) -> Any:
6094
+ InfluxDbInfluxdbUserConfigPublicAccess.__key_warning(key)
6095
+ return super().__getitem__(key)
6096
+
6097
+ def get(self, key: str, default = None) -> Any:
6098
+ InfluxDbInfluxdbUserConfigPublicAccess.__key_warning(key)
6099
+ return super().get(key, default)
6100
+
6007
6101
  def __init__(__self__, *,
6008
- influxdb: Optional[bool] = None):
6102
+ influxdb: Optional[bool] = None,
6103
+ user_backup: Optional[bool] = None):
6009
6104
  """
6010
6105
  :param bool influxdb: Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.
6106
+ :param bool user_backup: Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
6011
6107
  """
6012
6108
  if influxdb is not None:
6013
6109
  pulumi.set(__self__, "influxdb", influxdb)
6110
+ if user_backup is not None:
6111
+ pulumi.set(__self__, "user_backup", user_backup)
6014
6112
 
6015
6113
  @property
6016
6114
  @pulumi.getter
@@ -6020,6 +6118,14 @@ class InfluxDbInfluxdbUserConfigPublicAccess(dict):
6020
6118
  """
6021
6119
  return pulumi.get(self, "influxdb")
6022
6120
 
6121
+ @property
6122
+ @pulumi.getter(name="userBackup")
6123
+ def user_backup(self) -> Optional[bool]:
6124
+ """
6125
+ Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
6126
+ """
6127
+ return pulumi.get(self, "user_backup")
6128
+
6023
6129
 
6024
6130
  @pulumi.output_type
6025
6131
  class InfluxDbServiceIntegration(dict):
@@ -6631,10 +6737,10 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6631
6737
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
6632
6738
  session_timeout_ms: Optional[int] = None):
6633
6739
  """
6634
- :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
6740
+ :param str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
6635
6741
  :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6636
6742
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
6637
- :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6743
+ :param str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6638
6744
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
6639
6745
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
6640
6746
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -6642,7 +6748,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6642
6748
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
6643
6749
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
6644
6750
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
6645
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
6751
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
6646
6752
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
6647
6753
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
6648
6754
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -6685,7 +6791,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6685
6791
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
6686
6792
  def connector_client_config_override_policy(self) -> Optional[str]:
6687
6793
  """
6688
- Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
6794
+ Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
6689
6795
  """
6690
6796
  return pulumi.get(self, "connector_client_config_override_policy")
6691
6797
 
@@ -6709,7 +6815,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6709
6815
  @pulumi.getter(name="consumerIsolationLevel")
6710
6816
  def consumer_isolation_level(self) -> Optional[str]:
6711
6817
  """
6712
- Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6818
+ Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6713
6819
  """
6714
6820
  return pulumi.get(self, "consumer_isolation_level")
6715
6821
 
@@ -6773,7 +6879,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6773
6879
  @pulumi.getter(name="producerCompressionType")
6774
6880
  def producer_compression_type(self) -> Optional[str]:
6775
6881
  """
6776
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
6882
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
6777
6883
  """
6778
6884
  return pulumi.get(self, "producer_compression_type")
6779
6885
 
@@ -8001,7 +8107,7 @@ class KafkaKafkaUserConfigKafka(dict):
8001
8107
  transaction_state_log_segment_bytes: Optional[int] = None):
8002
8108
  """
8003
8109
  :param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
8004
- :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
8110
+ :param str compression_type: Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
8005
8111
  :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
8006
8112
  :param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
8007
8113
  :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
@@ -8011,8 +8117,8 @@ class KafkaKafkaUserConfigKafka(dict):
8011
8117
  :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
8012
8118
  :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
8013
8119
  :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
8014
- :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8015
- :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
8120
+ :param str log_cleanup_policy: Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8121
+ :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
8016
8122
  :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
8017
8123
  :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
8018
8124
  :param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
@@ -8152,7 +8258,7 @@ class KafkaKafkaUserConfigKafka(dict):
8152
8258
  @pulumi.getter(name="compressionType")
8153
8259
  def compression_type(self) -> Optional[str]:
8154
8260
  """
8155
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
8261
+ Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
8156
8262
  """
8157
8263
  return pulumi.get(self, "compression_type")
8158
8264
 
@@ -8232,7 +8338,7 @@ class KafkaKafkaUserConfigKafka(dict):
8232
8338
  @pulumi.getter(name="logCleanupPolicy")
8233
8339
  def log_cleanup_policy(self) -> Optional[str]:
8234
8340
  """
8235
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8341
+ Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8236
8342
  """
8237
8343
  return pulumi.get(self, "log_cleanup_policy")
8238
8344
 
@@ -8240,7 +8346,7 @@ class KafkaKafkaUserConfigKafka(dict):
8240
8346
  @pulumi.getter(name="logFlushIntervalMessages")
8241
8347
  def log_flush_interval_messages(self) -> Optional[int]:
8242
8348
  """
8243
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
8349
+ The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
8244
8350
  """
8245
8351
  return pulumi.get(self, "log_flush_interval_messages")
8246
8352
 
@@ -8607,10 +8713,10 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8607
8713
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
8608
8714
  session_timeout_ms: Optional[int] = None):
8609
8715
  """
8610
- :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
8716
+ :param str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
8611
8717
  :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
8612
8718
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
8613
- :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8719
+ :param str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8614
8720
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
8615
8721
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
8616
8722
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -8618,7 +8724,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8618
8724
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
8619
8725
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
8620
8726
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
8621
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
8727
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
8622
8728
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
8623
8729
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
8624
8730
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -8661,7 +8767,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8661
8767
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
8662
8768
  def connector_client_config_override_policy(self) -> Optional[str]:
8663
8769
  """
8664
- Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
8770
+ Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
8665
8771
  """
8666
8772
  return pulumi.get(self, "connector_client_config_override_policy")
8667
8773
 
@@ -8685,7 +8791,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8685
8791
  @pulumi.getter(name="consumerIsolationLevel")
8686
8792
  def consumer_isolation_level(self) -> Optional[str]:
8687
8793
  """
8688
- Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8794
+ Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8689
8795
  """
8690
8796
  return pulumi.get(self, "consumer_isolation_level")
8691
8797
 
@@ -8749,7 +8855,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8749
8855
  @pulumi.getter(name="producerCompressionType")
8750
8856
  def producer_compression_type(self) -> Optional[str]:
8751
8857
  """
8752
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
8858
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
8753
8859
  """
8754
8860
  return pulumi.get(self, "producer_compression_type")
8755
8861
 
@@ -9040,10 +9146,10 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
9040
9146
  :param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`.
9041
9147
  :param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. Default: `67108864`.
9042
9148
  :param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`.
9043
- :param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
9149
+ :param str name_strategy: Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
9044
9150
  :param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.
9045
- :param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
9046
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
9151
+ :param str producer_acks: Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
9152
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
9047
9153
  :param int producer_linger_ms: Wait for up to the given delay to allow batching records together. Default: `0`.
9048
9154
  :param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: `1048576`.
9049
9155
  :param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`.
@@ -9097,7 +9203,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
9097
9203
  @pulumi.getter(name="nameStrategy")
9098
9204
  def name_strategy(self) -> Optional[str]:
9099
9205
  """
9100
- Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
9206
+ Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
9101
9207
  """
9102
9208
  return pulumi.get(self, "name_strategy")
9103
9209
 
@@ -9113,7 +9219,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
9113
9219
  @pulumi.getter(name="producerAcks")
9114
9220
  def producer_acks(self) -> Optional[str]:
9115
9221
  """
9116
- Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
9222
+ Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
9117
9223
  """
9118
9224
  return pulumi.get(self, "producer_acks")
9119
9225
 
@@ -9121,7 +9227,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
9121
9227
  @pulumi.getter(name="producerCompressionType")
9122
9228
  def producer_compression_type(self) -> Optional[str]:
9123
9229
  """
9124
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
9230
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
9125
9231
  """
9126
9232
  return pulumi.get(self, "producer_compression_type")
9127
9233
 
@@ -10432,8 +10538,8 @@ class KafkaTopicConfig(dict):
10432
10538
  segment_ms: Optional[str] = None,
10433
10539
  unclean_leader_election_enable: Optional[bool] = None):
10434
10540
  """
10435
- :param str cleanup_policy: cleanup.policy value
10436
- :param str compression_type: compression.type value
10541
+ :param str cleanup_policy: cleanup.policy value. The possible values are `delete`, `compact` and `compact,delete`.
10542
+ :param str compression_type: compression.type value. The possible values are `snappy`, `gzip`, `lz4`, `producer`, `uncompressed` and `zstd`.
10437
10543
  :param str delete_retention_ms: delete.retention.ms value
10438
10544
  :param str file_delete_delay_ms: file.delete.delay.ms value
10439
10545
  :param str flush_messages: flush.messages value
@@ -10444,9 +10550,9 @@ class KafkaTopicConfig(dict):
10444
10550
  :param str max_compaction_lag_ms: max.compaction.lag.ms value
10445
10551
  :param str max_message_bytes: max.message.bytes value
10446
10552
  :param bool message_downconversion_enable: message.downconversion.enable value
10447
- :param str message_format_version: message.format.version value
10553
+ :param str message_format_version: message.format.version value. The possible values are `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0` and `3.9-IV1`.
10448
10554
  :param str message_timestamp_difference_max_ms: message.timestamp.difference.max.ms value
10449
- :param str message_timestamp_type: message.timestamp.type value
10555
+ :param str message_timestamp_type: message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
10450
10556
  :param float min_cleanable_dirty_ratio: min.cleanable.dirty.ratio value
10451
10557
  :param str min_compaction_lag_ms: min.compaction.lag.ms value
10452
10558
  :param str min_insync_replicas: min.insync.replicas value
@@ -10519,7 +10625,7 @@ class KafkaTopicConfig(dict):
10519
10625
  @pulumi.getter(name="cleanupPolicy")
10520
10626
  def cleanup_policy(self) -> Optional[str]:
10521
10627
  """
10522
- cleanup.policy value
10628
+ cleanup.policy value. The possible values are `delete`, `compact` and `compact,delete`.
10523
10629
  """
10524
10630
  return pulumi.get(self, "cleanup_policy")
10525
10631
 
@@ -10527,7 +10633,7 @@ class KafkaTopicConfig(dict):
10527
10633
  @pulumi.getter(name="compressionType")
10528
10634
  def compression_type(self) -> Optional[str]:
10529
10635
  """
10530
- compression.type value
10636
+ compression.type value. The possible values are `snappy`, `gzip`, `lz4`, `producer`, `uncompressed` and `zstd`.
10531
10637
  """
10532
10638
  return pulumi.get(self, "compression_type")
10533
10639
 
@@ -10615,7 +10721,7 @@ class KafkaTopicConfig(dict):
10615
10721
  @pulumi.getter(name="messageFormatVersion")
10616
10722
  def message_format_version(self) -> Optional[str]:
10617
10723
  """
10618
- message.format.version value
10724
+ message.format.version value. The possible values are `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0` and `3.9-IV1`.
10619
10725
  """
10620
10726
  return pulumi.get(self, "message_format_version")
10621
10727
 
@@ -10631,7 +10737,7 @@ class KafkaTopicConfig(dict):
10631
10737
  @pulumi.getter(name="messageTimestampType")
10632
10738
  def message_timestamp_type(self) -> Optional[str]:
10633
10739
  """
10634
- message.timestamp.type value
10740
+ message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
10635
10741
  """
10636
10742
  return pulumi.get(self, "message_timestamp_type")
10637
10743
 
@@ -13345,9 +13451,9 @@ class MySqlMysqlUserConfigMysql(dict):
13345
13451
  :param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit). Example: `10`.
13346
13452
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`.
13347
13453
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`.
13348
- :param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
13349
- :param str log_output: Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
13350
- :param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10`.
13454
+ :param str internal_tmp_mem_storage_engine: Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
13455
+ :param str log_output: Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
13456
+ :param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10.0`.
13351
13457
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M). Example: `67108864`.
13352
13458
  :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M). Example: `16777216`.
13353
13459
  :param int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service. Example: `16384`.
@@ -13563,7 +13669,7 @@ class MySqlMysqlUserConfigMysql(dict):
13563
13669
  @pulumi.getter(name="internalTmpMemStorageEngine")
13564
13670
  def internal_tmp_mem_storage_engine(self) -> Optional[str]:
13565
13671
  """
13566
- Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
13672
+ Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
13567
13673
  """
13568
13674
  return pulumi.get(self, "internal_tmp_mem_storage_engine")
13569
13675
 
@@ -13571,7 +13677,7 @@ class MySqlMysqlUserConfigMysql(dict):
13571
13677
  @pulumi.getter(name="logOutput")
13572
13678
  def log_output(self) -> Optional[str]:
13573
13679
  """
13574
- Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
13680
+ Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
13575
13681
  """
13576
13682
  return pulumi.get(self, "log_output")
13577
13683
 
@@ -13579,7 +13685,7 @@ class MySqlMysqlUserConfigMysql(dict):
13579
13685
  @pulumi.getter(name="longQueryTime")
13580
13686
  def long_query_time(self) -> Optional[float]:
13581
13687
  """
13582
- The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10`.
13688
+ The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10.0`.
13583
13689
  """
13584
13690
  return pulumi.get(self, "long_query_time")
13585
13691
 
@@ -14202,7 +14308,7 @@ class OpenSearchOpensearchUserConfig(dict):
14202
14308
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
14203
14309
  :param 'OpenSearchOpensearchUserConfigAzureMigrationArgs' azure_migration: Azure migration settings
14204
14310
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
14205
- :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
14311
+ :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
14206
14312
  :param 'OpenSearchOpensearchUserConfigGcsMigrationArgs' gcs_migration: Google Cloud Storage migration settings
14207
14313
  :param Sequence['OpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
14208
14314
  :param 'OpenSearchOpensearchUserConfigIndexRollupArgs' index_rollup: Index rollup settings
@@ -14310,7 +14416,7 @@ class OpenSearchOpensearchUserConfig(dict):
14310
14416
  @pulumi.getter(name="disableReplicationFactorAdjustment")
14311
14417
  def disable_replication_factor_adjustment(self) -> Optional[bool]:
14312
14418
  """
14313
- Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
14419
+ Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
14314
14420
  """
14315
14421
  return pulumi.get(self, "disable_replication_factor_adjustment")
14316
14422
 
@@ -14513,6 +14619,10 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14513
14619
  suggest = "chunk_size"
14514
14620
  elif key == "endpointSuffix":
14515
14621
  suggest = "endpoint_suffix"
14622
+ elif key == "includeAliases":
14623
+ suggest = "include_aliases"
14624
+ elif key == "restoreGlobalState":
14625
+ suggest = "restore_global_state"
14516
14626
  elif key == "sasToken":
14517
14627
  suggest = "sas_token"
14518
14628
 
@@ -14531,28 +14641,33 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14531
14641
  account: str,
14532
14642
  base_path: str,
14533
14643
  container: str,
14644
+ indices: str,
14534
14645
  snapshot_name: str,
14535
14646
  chunk_size: Optional[str] = None,
14536
14647
  compress: Optional[bool] = None,
14537
14648
  endpoint_suffix: Optional[str] = None,
14538
- indices: Optional[str] = None,
14649
+ include_aliases: Optional[bool] = None,
14539
14650
  key: Optional[str] = None,
14651
+ restore_global_state: Optional[bool] = None,
14540
14652
  sas_token: Optional[str] = None):
14541
14653
  """
14542
14654
  :param str account: Azure account name.
14543
14655
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
14544
14656
  :param str container: Azure container name.
14657
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
14545
14658
  :param str snapshot_name: The snapshot name to restore from.
14546
14659
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
14547
14660
  :param bool compress: When set to true metadata files are stored in compressed format.
14548
14661
  :param str endpoint_suffix: Defines the DNS suffix for Azure Storage endpoints.
14549
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
14662
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
14550
14663
  :param str key: Azure account secret key. One of key or sas_token should be specified.
14664
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
14551
14665
  :param str sas_token: A shared access signatures (SAS) token. One of key or sas_token should be specified.
14552
14666
  """
14553
14667
  pulumi.set(__self__, "account", account)
14554
14668
  pulumi.set(__self__, "base_path", base_path)
14555
14669
  pulumi.set(__self__, "container", container)
14670
+ pulumi.set(__self__, "indices", indices)
14556
14671
  pulumi.set(__self__, "snapshot_name", snapshot_name)
14557
14672
  if chunk_size is not None:
14558
14673
  pulumi.set(__self__, "chunk_size", chunk_size)
@@ -14560,10 +14675,12 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14560
14675
  pulumi.set(__self__, "compress", compress)
14561
14676
  if endpoint_suffix is not None:
14562
14677
  pulumi.set(__self__, "endpoint_suffix", endpoint_suffix)
14563
- if indices is not None:
14564
- pulumi.set(__self__, "indices", indices)
14678
+ if include_aliases is not None:
14679
+ pulumi.set(__self__, "include_aliases", include_aliases)
14565
14680
  if key is not None:
14566
14681
  pulumi.set(__self__, "key", key)
14682
+ if restore_global_state is not None:
14683
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
14567
14684
  if sas_token is not None:
14568
14685
  pulumi.set(__self__, "sas_token", sas_token)
14569
14686
 
@@ -14591,6 +14708,14 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14591
14708
  """
14592
14709
  return pulumi.get(self, "container")
14593
14710
 
14711
+ @property
14712
+ @pulumi.getter
14713
+ def indices(self) -> str:
14714
+ """
14715
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
14716
+ """
14717
+ return pulumi.get(self, "indices")
14718
+
14594
14719
  @property
14595
14720
  @pulumi.getter(name="snapshotName")
14596
14721
  def snapshot_name(self) -> str:
@@ -14624,12 +14749,12 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14624
14749
  return pulumi.get(self, "endpoint_suffix")
14625
14750
 
14626
14751
  @property
14627
- @pulumi.getter
14628
- def indices(self) -> Optional[str]:
14752
+ @pulumi.getter(name="includeAliases")
14753
+ def include_aliases(self) -> Optional[bool]:
14629
14754
  """
14630
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
14755
+ Whether to restore aliases alongside their associated indexes. Default is true.
14631
14756
  """
14632
- return pulumi.get(self, "indices")
14757
+ return pulumi.get(self, "include_aliases")
14633
14758
 
14634
14759
  @property
14635
14760
  @pulumi.getter
@@ -14639,6 +14764,14 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14639
14764
  """
14640
14765
  return pulumi.get(self, "key")
14641
14766
 
14767
+ @property
14768
+ @pulumi.getter(name="restoreGlobalState")
14769
+ def restore_global_state(self) -> Optional[bool]:
14770
+ """
14771
+ If true, restore the cluster state. Defaults to false.
14772
+ """
14773
+ return pulumi.get(self, "restore_global_state")
14774
+
14642
14775
  @property
14643
14776
  @pulumi.getter(name="sasToken")
14644
14777
  def sas_token(self) -> Optional[str]:
@@ -14659,6 +14792,10 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
14659
14792
  suggest = "snapshot_name"
14660
14793
  elif key == "chunkSize":
14661
14794
  suggest = "chunk_size"
14795
+ elif key == "includeAliases":
14796
+ suggest = "include_aliases"
14797
+ elif key == "restoreGlobalState":
14798
+ suggest = "restore_global_state"
14662
14799
 
14663
14800
  if suggest:
14664
14801
  pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigGcsMigration. Access the value via the '{suggest}' property getter instead.")
@@ -14675,29 +14812,36 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
14675
14812
  base_path: str,
14676
14813
  bucket: str,
14677
14814
  credentials: str,
14815
+ indices: str,
14678
14816
  snapshot_name: str,
14679
14817
  chunk_size: Optional[str] = None,
14680
14818
  compress: Optional[bool] = None,
14681
- indices: Optional[str] = None):
14819
+ include_aliases: Optional[bool] = None,
14820
+ restore_global_state: Optional[bool] = None):
14682
14821
  """
14683
14822
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
14684
14823
  :param str bucket: The path to the repository data within its container.
14685
14824
  :param str credentials: Google Cloud Storage credentials file content.
14825
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
14686
14826
  :param str snapshot_name: The snapshot name to restore from.
14687
14827
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
14688
14828
  :param bool compress: When set to true metadata files are stored in compressed format.
14689
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
14829
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
14830
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
14690
14831
  """
14691
14832
  pulumi.set(__self__, "base_path", base_path)
14692
14833
  pulumi.set(__self__, "bucket", bucket)
14693
14834
  pulumi.set(__self__, "credentials", credentials)
14835
+ pulumi.set(__self__, "indices", indices)
14694
14836
  pulumi.set(__self__, "snapshot_name", snapshot_name)
14695
14837
  if chunk_size is not None:
14696
14838
  pulumi.set(__self__, "chunk_size", chunk_size)
14697
14839
  if compress is not None:
14698
14840
  pulumi.set(__self__, "compress", compress)
14699
- if indices is not None:
14700
- pulumi.set(__self__, "indices", indices)
14841
+ if include_aliases is not None:
14842
+ pulumi.set(__self__, "include_aliases", include_aliases)
14843
+ if restore_global_state is not None:
14844
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
14701
14845
 
14702
14846
  @property
14703
14847
  @pulumi.getter(name="basePath")
@@ -14723,6 +14867,14 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
14723
14867
  """
14724
14868
  return pulumi.get(self, "credentials")
14725
14869
 
14870
+ @property
14871
+ @pulumi.getter
14872
+ def indices(self) -> str:
14873
+ """
14874
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
14875
+ """
14876
+ return pulumi.get(self, "indices")
14877
+
14726
14878
  @property
14727
14879
  @pulumi.getter(name="snapshotName")
14728
14880
  def snapshot_name(self) -> str:
@@ -14748,12 +14900,20 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
14748
14900
  return pulumi.get(self, "compress")
14749
14901
 
14750
14902
  @property
14751
- @pulumi.getter
14752
- def indices(self) -> Optional[str]:
14903
+ @pulumi.getter(name="includeAliases")
14904
+ def include_aliases(self) -> Optional[bool]:
14753
14905
  """
14754
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
14906
+ Whether to restore aliases alongside their associated indexes. Default is true.
14755
14907
  """
14756
- return pulumi.get(self, "indices")
14908
+ return pulumi.get(self, "include_aliases")
14909
+
14910
+ @property
14911
+ @pulumi.getter(name="restoreGlobalState")
14912
+ def restore_global_state(self) -> Optional[bool]:
14913
+ """
14914
+ If true, restore the cluster state. Defaults to false.
14915
+ """
14916
+ return pulumi.get(self, "restore_global_state")
14757
14917
 
14758
14918
 
14759
14919
  @pulumi.output_type
@@ -16207,7 +16367,7 @@ class OpenSearchOpensearchUserConfigOpensearchSearchBackpressure(dict):
16207
16367
  search_shard_task: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTask'] = None,
16208
16368
  search_task: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTask'] = None):
16209
16369
  """
16210
- :param str mode: Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
16370
+ :param str mode: Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
16211
16371
  :param 'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuressArgs' node_duress: Node duress settings
16212
16372
  :param 'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskArgs' search_shard_task: Search shard settings
16213
16373
  :param 'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskArgs' search_task: Search task settings
@@ -16225,7 +16385,7 @@ class OpenSearchOpensearchUserConfigOpensearchSearchBackpressure(dict):
16225
16385
  @pulumi.getter
16226
16386
  def mode(self) -> Optional[str]:
16227
16387
  """
16228
- Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
16388
+ Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
16229
16389
  """
16230
16390
  return pulumi.get(self, "mode")
16231
16391
 
@@ -17055,6 +17215,10 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17055
17215
  suggest = "snapshot_name"
17056
17216
  elif key == "chunkSize":
17057
17217
  suggest = "chunk_size"
17218
+ elif key == "includeAliases":
17219
+ suggest = "include_aliases"
17220
+ elif key == "restoreGlobalState":
17221
+ suggest = "restore_global_state"
17058
17222
  elif key == "serverSideEncryption":
17059
17223
  suggest = "server_side_encryption"
17060
17224
 
@@ -17073,30 +17237,35 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17073
17237
  access_key: str,
17074
17238
  base_path: str,
17075
17239
  bucket: str,
17240
+ indices: str,
17076
17241
  region: str,
17077
17242
  secret_key: str,
17078
17243
  snapshot_name: str,
17079
17244
  chunk_size: Optional[str] = None,
17080
17245
  compress: Optional[bool] = None,
17081
17246
  endpoint: Optional[str] = None,
17082
- indices: Optional[str] = None,
17247
+ include_aliases: Optional[bool] = None,
17248
+ restore_global_state: Optional[bool] = None,
17083
17249
  server_side_encryption: Optional[bool] = None):
17084
17250
  """
17085
17251
  :param str access_key: AWS Access key.
17086
17252
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
17087
17253
  :param str bucket: S3 bucket name.
17254
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
17088
17255
  :param str region: S3 region.
17089
17256
  :param str secret_key: AWS secret key.
17090
17257
  :param str snapshot_name: The snapshot name to restore from.
17091
17258
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
17092
17259
  :param bool compress: When set to true metadata files are stored in compressed format.
17093
17260
  :param str endpoint: The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
17094
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
17261
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
17262
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
17095
17263
  :param bool server_side_encryption: When set to true files are encrypted on server side.
17096
17264
  """
17097
17265
  pulumi.set(__self__, "access_key", access_key)
17098
17266
  pulumi.set(__self__, "base_path", base_path)
17099
17267
  pulumi.set(__self__, "bucket", bucket)
17268
+ pulumi.set(__self__, "indices", indices)
17100
17269
  pulumi.set(__self__, "region", region)
17101
17270
  pulumi.set(__self__, "secret_key", secret_key)
17102
17271
  pulumi.set(__self__, "snapshot_name", snapshot_name)
@@ -17106,8 +17275,10 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17106
17275
  pulumi.set(__self__, "compress", compress)
17107
17276
  if endpoint is not None:
17108
17277
  pulumi.set(__self__, "endpoint", endpoint)
17109
- if indices is not None:
17110
- pulumi.set(__self__, "indices", indices)
17278
+ if include_aliases is not None:
17279
+ pulumi.set(__self__, "include_aliases", include_aliases)
17280
+ if restore_global_state is not None:
17281
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
17111
17282
  if server_side_encryption is not None:
17112
17283
  pulumi.set(__self__, "server_side_encryption", server_side_encryption)
17113
17284
 
@@ -17135,6 +17306,14 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17135
17306
  """
17136
17307
  return pulumi.get(self, "bucket")
17137
17308
 
17309
+ @property
17310
+ @pulumi.getter
17311
+ def indices(self) -> str:
17312
+ """
17313
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
17314
+ """
17315
+ return pulumi.get(self, "indices")
17316
+
17138
17317
  @property
17139
17318
  @pulumi.getter
17140
17319
  def region(self) -> str:
@@ -17184,12 +17363,20 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17184
17363
  return pulumi.get(self, "endpoint")
17185
17364
 
17186
17365
  @property
17187
- @pulumi.getter
17188
- def indices(self) -> Optional[str]:
17366
+ @pulumi.getter(name="includeAliases")
17367
+ def include_aliases(self) -> Optional[bool]:
17189
17368
  """
17190
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
17369
+ Whether to restore aliases alongside their associated indexes. Default is true.
17191
17370
  """
17192
- return pulumi.get(self, "indices")
17371
+ return pulumi.get(self, "include_aliases")
17372
+
17373
+ @property
17374
+ @pulumi.getter(name="restoreGlobalState")
17375
+ def restore_global_state(self) -> Optional[bool]:
17376
+ """
17377
+ If true, restore the cluster state. Defaults to false.
17378
+ """
17379
+ return pulumi.get(self, "restore_global_state")
17193
17380
 
17194
17381
  @property
17195
17382
  @pulumi.getter(name="serverSideEncryption")
@@ -17502,8 +17689,8 @@ class OrganizationPermissionPermission(dict):
17502
17689
  create_time: Optional[str] = None,
17503
17690
  update_time: Optional[str] = None):
17504
17691
  """
17505
- :param Sequence[str] permissions: List of permissions. The possible values are `admin`, `developer`, `operator`, `project:permissions:read`, `read_only` and `service:logs:read`.
17506
- :param str principal_id: ID of the user or group.
17692
+ :param Sequence[str] permissions: List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:billing:read`, `organization:billing:write`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:network:read`, `organization:network:write`, `organization:permissions:read`, `organization:permissions:write`, `organization:projects:read`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
17693
+ :param str principal_id: ID of the user or group to grant permissions to. Only active users who have accepted an [invite](https://aiven.io/docs/platform/howto/manage-org-users) to join the organization can be granted permissions.
17507
17694
  :param str principal_type: The type of principal. The possible values are `user` and `user_group`.
17508
17695
  :param str create_time: Time created.
17509
17696
  :param str update_time: Time updated.
@@ -17520,7 +17707,7 @@ class OrganizationPermissionPermission(dict):
17520
17707
  @pulumi.getter
17521
17708
  def permissions(self) -> Sequence[str]:
17522
17709
  """
17523
- List of permissions. The possible values are `admin`, `developer`, `operator`, `project:permissions:read`, `read_only` and `service:logs:read`.
17710
+ List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:billing:read`, `organization:billing:write`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:network:read`, `organization:network:write`, `organization:permissions:read`, `organization:permissions:write`, `organization:projects:read`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
17524
17711
  """
17525
17712
  return pulumi.get(self, "permissions")
17526
17713
 
@@ -17528,7 +17715,7 @@ class OrganizationPermissionPermission(dict):
17528
17715
  @pulumi.getter(name="principalId")
17529
17716
  def principal_id(self) -> str:
17530
17717
  """
17531
- ID of the user or group.
17718
+ ID of the user or group to grant permissions to. Only active users who have accepted an [invite](https://aiven.io/docs/platform/howto/manage-org-users) to join the organization can be granted permissions.
17532
17719
  """
17533
17720
  return pulumi.get(self, "principal_id")
17534
17721
 
@@ -18213,7 +18400,7 @@ class PgPgUserConfig(dict):
18213
18400
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
18214
18401
  :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. Example: `41.5`.
18215
18402
  :param bool static_ips: Use static public IP addresses.
18216
- :param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
18403
+ :param str synchronous_replication: Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
18217
18404
  :param 'PgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
18218
18405
  :param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
18219
18406
  :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). Example: `4`.
@@ -18515,7 +18702,7 @@ class PgPgUserConfig(dict):
18515
18702
  @pulumi.getter(name="synchronousReplication")
18516
18703
  def synchronous_replication(self) -> Optional[str]:
18517
18704
  """
18518
- Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
18705
+ Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
18519
18706
  """
18520
18707
  return pulumi.get(self, "synchronous_replication")
18521
18708
 
@@ -18886,8 +19073,8 @@ class PgPgUserConfigPg(dict):
18886
19073
  :param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
18887
19074
  :param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
18888
19075
  :param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
18889
- :param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
18890
- :param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
19076
+ :param str log_error_verbosity: Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
19077
+ :param str log_line_prefix: Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
18891
19078
  :param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
18892
19079
  :param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
18893
19080
  :param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
@@ -18908,12 +19095,12 @@ class PgPgUserConfigPg(dict):
18908
19095
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`.
18909
19096
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
18910
19097
  :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets. Example: `10`.
18911
- :param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
19098
+ :param str pg_stat_statements_dot_track: Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
18912
19099
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`.
18913
19100
  :param str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
18914
19101
  :param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Example: `1024`.
18915
19102
  :param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
18916
- :param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
19103
+ :param str track_functions: Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.
18917
19104
  :param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
18918
19105
  :param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout. Example: `60000`.
18919
19106
  :param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance. Example: `50`.
@@ -19165,7 +19352,7 @@ class PgPgUserConfigPg(dict):
19165
19352
  @pulumi.getter(name="logErrorVerbosity")
19166
19353
  def log_error_verbosity(self) -> Optional[str]:
19167
19354
  """
19168
- Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
19355
+ Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
19169
19356
  """
19170
19357
  return pulumi.get(self, "log_error_verbosity")
19171
19358
 
@@ -19173,7 +19360,7 @@ class PgPgUserConfigPg(dict):
19173
19360
  @pulumi.getter(name="logLinePrefix")
19174
19361
  def log_line_prefix(self) -> Optional[str]:
19175
19362
  """
19176
- Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
19363
+ Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
19177
19364
  """
19178
19365
  return pulumi.get(self, "log_line_prefix")
19179
19366
 
@@ -19341,7 +19528,7 @@ class PgPgUserConfigPg(dict):
19341
19528
  @pulumi.getter(name="pgStatStatementsDotTrack")
19342
19529
  def pg_stat_statements_dot_track(self) -> Optional[str]:
19343
19530
  """
19344
- Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
19531
+ Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
19345
19532
  """
19346
19533
  return pulumi.get(self, "pg_stat_statements_dot_track")
19347
19534
 
@@ -19381,7 +19568,7 @@ class PgPgUserConfigPg(dict):
19381
19568
  @pulumi.getter(name="trackFunctions")
19382
19569
  def track_functions(self) -> Optional[str]:
19383
19570
  """
19384
- Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
19571
+ Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.
19385
19572
  """
19386
19573
  return pulumi.get(self, "track_functions")
19387
19574
 
@@ -19784,7 +19971,7 @@ class PgPgUserConfigPgbouncer(dict):
19784
19971
  """
19785
19972
  :param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). Default: `3600`.
19786
19973
  :param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. Example: `0`.
19787
- :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
19974
+ :param str autodb_pool_mode: Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.
19788
19975
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
19789
19976
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
19790
19977
  :param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
@@ -19834,7 +20021,7 @@ class PgPgUserConfigPgbouncer(dict):
19834
20021
  @pulumi.getter(name="autodbPoolMode")
19835
20022
  def autodb_pool_mode(self) -> Optional[str]:
19836
20023
  """
19837
- Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
20024
+ Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.
19838
20025
  """
19839
20026
  return pulumi.get(self, "autodb_pool_mode")
19840
20027
 
@@ -20530,7 +20717,7 @@ class RedisRedisUserConfig(dict):
20530
20717
  :param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service. Example: `1`.
20531
20718
  :param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
20532
20719
  :param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
20533
- :param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
20720
+ :param str redis_maxmemory_policy: Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.
20534
20721
  :param str redis_notify_keyspace_events: Set notify-keyspace-events option.
20535
20722
  :param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service. Example: `16`.
20536
20723
  :param str redis_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
@@ -20730,7 +20917,7 @@ class RedisRedisUserConfig(dict):
20730
20917
  @pulumi.getter(name="redisMaxmemoryPolicy")
20731
20918
  def redis_maxmemory_policy(self) -> Optional[str]:
20732
20919
  """
20733
- Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
20920
+ Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.
20734
20921
  """
20735
20922
  return pulumi.get(self, "redis_maxmemory_policy")
20736
20923
 
@@ -21243,11 +21430,11 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
21243
21430
  thread_per_consumer: Optional[bool] = None):
21244
21431
  """
21245
21432
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
21246
- :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
21433
+ :param str data_format: Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.
21247
21434
  :param str group_name: Kafka consumers group. Default: `clickhouse`.
21248
21435
  :param str name: Name of the table. Example: `events`.
21249
21436
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
21250
- :param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
21437
+ :param str auto_offset_reset: Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
21251
21438
  :param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`.
21252
21439
  :param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. Default: `default`.
21253
21440
  :param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. Default: `0`.
@@ -21296,7 +21483,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
21296
21483
  @pulumi.getter(name="dataFormat")
21297
21484
  def data_format(self) -> str:
21298
21485
  """
21299
- Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
21486
+ Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.
21300
21487
  """
21301
21488
  return pulumi.get(self, "data_format")
21302
21489
 
@@ -21328,7 +21515,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
21328
21515
  @pulumi.getter(name="autoOffsetReset")
21329
21516
  def auto_offset_reset(self) -> Optional[str]:
21330
21517
  """
21331
- Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
21518
+ Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
21332
21519
  """
21333
21520
  return pulumi.get(self, "auto_offset_reset")
21334
21521
 
@@ -21832,6 +22019,70 @@ class ServiceIntegrationDatadogUserConfigRedis(dict):
21832
22019
  return pulumi.get(self, "command_stats_enabled")
21833
22020
 
21834
22021
 
22022
+ @pulumi.output_type
22023
+ class ServiceIntegrationEndpointAutoscalerUserConfig(dict):
22024
+ def __init__(__self__, *,
22025
+ autoscalings: Sequence['outputs.ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling']):
22026
+ """
22027
+ :param Sequence['ServiceIntegrationEndpointAutoscalerUserConfigAutoscalingArgs'] autoscalings: Configure autoscaling thresholds for a service
22028
+ """
22029
+ pulumi.set(__self__, "autoscalings", autoscalings)
22030
+
22031
+ @property
22032
+ @pulumi.getter
22033
+ def autoscalings(self) -> Sequence['outputs.ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling']:
22034
+ """
22035
+ Configure autoscaling thresholds for a service
22036
+ """
22037
+ return pulumi.get(self, "autoscalings")
22038
+
22039
+
22040
+ @pulumi.output_type
22041
+ class ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling(dict):
22042
+ @staticmethod
22043
+ def __key_warning(key: str):
22044
+ suggest = None
22045
+ if key == "capGb":
22046
+ suggest = "cap_gb"
22047
+
22048
+ if suggest:
22049
+ pulumi.log.warn(f"Key '{key}' not found in ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling. Access the value via the '{suggest}' property getter instead.")
22050
+
22051
+ def __getitem__(self, key: str) -> Any:
22052
+ ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling.__key_warning(key)
22053
+ return super().__getitem__(key)
22054
+
22055
+ def get(self, key: str, default = None) -> Any:
22056
+ ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling.__key_warning(key)
22057
+ return super().get(key, default)
22058
+
22059
+ def __init__(__self__, *,
22060
+ cap_gb: int,
22061
+ type: str):
22062
+ """
22063
+ :param int cap_gb: The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
22064
+ :param str type: Enum: `autoscale_disk`. Type of autoscale event.
22065
+ """
22066
+ pulumi.set(__self__, "cap_gb", cap_gb)
22067
+ pulumi.set(__self__, "type", type)
22068
+
22069
+ @property
22070
+ @pulumi.getter(name="capGb")
22071
+ def cap_gb(self) -> int:
22072
+ """
22073
+ The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
22074
+ """
22075
+ return pulumi.get(self, "cap_gb")
22076
+
22077
+ @property
22078
+ @pulumi.getter
22079
+ def type(self) -> str:
22080
+ """
22081
+ Enum: `autoscale_disk`. Type of autoscale event.
22082
+ """
22083
+ return pulumi.get(self, "type")
22084
+
22085
+
21835
22086
  @pulumi.output_type
21836
22087
  class ServiceIntegrationEndpointDatadogUserConfig(dict):
21837
22088
  @staticmethod
@@ -21876,7 +22127,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
21876
22127
  :param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with. Example: `8`.
21877
22128
  :param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers. Example: `60`.
21878
22129
  :param int max_partition_contexts: Maximum number of partition contexts to send. Example: `32000`.
21879
- :param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
22130
+ :param str site: Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
21880
22131
  """
21881
22132
  pulumi.set(__self__, "datadog_api_key", datadog_api_key)
21882
22133
  if datadog_tags is not None:
@@ -21944,7 +22195,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
21944
22195
  @pulumi.getter
21945
22196
  def site(self) -> Optional[str]:
21946
22197
  """
21947
- Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
22198
+ Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
21948
22199
  """
21949
22200
  return pulumi.get(self, "site")
21950
22201
 
@@ -22478,7 +22729,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
22478
22729
  ssl_endpoint_identification_algorithm: Optional[str] = None):
22479
22730
  """
22480
22731
  :param str bootstrap_servers: Bootstrap servers. Example: `10.0.0.1:9092,10.0.0.2:9092`.
22481
- :param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
22732
+ :param str security_protocol: Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.
22482
22733
  :param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
22483
22734
  :param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
22484
22735
  :param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
@@ -22525,7 +22776,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
22525
22776
  @pulumi.getter(name="securityProtocol")
22526
22777
  def security_protocol(self) -> str:
22527
22778
  """
22528
- Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
22779
+ Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.
22529
22780
  """
22530
22781
  return pulumi.get(self, "security_protocol")
22531
22782
 
@@ -22835,7 +23086,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
22835
23086
  :param str ssl_client_key: Client key. Example: `-----BEGIN PRIVATE KEY-----
22836
23087
  ...
22837
23088
  -----END PRIVATE KEY-----`.
22838
- :param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
23089
+ :param str ssl_mode: Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
22839
23090
  :param str ssl_root_cert: SSL Root Cert. Example: `-----BEGIN CERTIFICATE-----
22840
23091
  ...
22841
23092
  -----END CERTIFICATE-----
@@ -22922,7 +23173,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
22922
23173
  @pulumi.getter(name="sslMode")
22923
23174
  def ssl_mode(self) -> Optional[str]:
22924
23175
  """
22925
- Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
23176
+ Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
22926
23177
  """
22927
23178
  return pulumi.get(self, "ssl_mode")
22928
23179
 
@@ -22938,6 +23189,70 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
22938
23189
  return pulumi.get(self, "ssl_root_cert")
22939
23190
 
22940
23191
 
23192
+ @pulumi.output_type
23193
+ class ServiceIntegrationEndpointExternalPrometheusUserConfig(dict):
23194
+ @staticmethod
23195
+ def __key_warning(key: str):
23196
+ suggest = None
23197
+ if key == "basicAuthPassword":
23198
+ suggest = "basic_auth_password"
23199
+ elif key == "basicAuthUsername":
23200
+ suggest = "basic_auth_username"
23201
+ elif key == "serviceUri":
23202
+ suggest = "service_uri"
23203
+
23204
+ if suggest:
23205
+ pulumi.log.warn(f"Key '{key}' not found in ServiceIntegrationEndpointExternalPrometheusUserConfig. Access the value via the '{suggest}' property getter instead.")
23206
+
23207
+ def __getitem__(self, key: str) -> Any:
23208
+ ServiceIntegrationEndpointExternalPrometheusUserConfig.__key_warning(key)
23209
+ return super().__getitem__(key)
23210
+
23211
+ def get(self, key: str, default = None) -> Any:
23212
+ ServiceIntegrationEndpointExternalPrometheusUserConfig.__key_warning(key)
23213
+ return super().get(key, default)
23214
+
23215
+ def __init__(__self__, *,
23216
+ basic_auth_password: Optional[str] = None,
23217
+ basic_auth_username: Optional[str] = None,
23218
+ service_uri: Optional[str] = None):
23219
+ """
23220
+ :param str basic_auth_password: Prometheus basic authentication password. Example: `fhyFNBjj3R`.
23221
+ :param str basic_auth_username: Prometheus basic authentication username. Example: `prom4851`.
23222
+ :param str service_uri: Prometheus enabled write endpoint. Example: `https://write.example.com/`.
23223
+ """
23224
+ if basic_auth_password is not None:
23225
+ pulumi.set(__self__, "basic_auth_password", basic_auth_password)
23226
+ if basic_auth_username is not None:
23227
+ pulumi.set(__self__, "basic_auth_username", basic_auth_username)
23228
+ if service_uri is not None:
23229
+ pulumi.set(__self__, "service_uri", service_uri)
23230
+
23231
+ @property
23232
+ @pulumi.getter(name="basicAuthPassword")
23233
+ def basic_auth_password(self) -> Optional[str]:
23234
+ """
23235
+ Prometheus basic authentication password. Example: `fhyFNBjj3R`.
23236
+ """
23237
+ return pulumi.get(self, "basic_auth_password")
23238
+
23239
+ @property
23240
+ @pulumi.getter(name="basicAuthUsername")
23241
+ def basic_auth_username(self) -> Optional[str]:
23242
+ """
23243
+ Prometheus basic authentication username. Example: `prom4851`.
23244
+ """
23245
+ return pulumi.get(self, "basic_auth_username")
23246
+
23247
+ @property
23248
+ @pulumi.getter(name="serviceUri")
23249
+ def service_uri(self) -> Optional[str]:
23250
+ """
23251
+ Prometheus enabled write endpoint. Example: `https://write.example.com/`.
23252
+ """
23253
+ return pulumi.get(self, "service_uri")
23254
+
23255
+
22941
23256
  @pulumi.output_type
22942
23257
  class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
22943
23258
  @staticmethod
@@ -22965,7 +23280,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
22965
23280
  basic_auth_password: Optional[str] = None,
22966
23281
  basic_auth_username: Optional[str] = None):
22967
23282
  """
22968
- :param str authentication: Enum: `none`, `basic`. Authentication method.
23283
+ :param str authentication: Enum: `basic`, `none`. Authentication method.
22969
23284
  :param str url: Schema Registry URL. Example: `https://schema-registry.kafka.company.com:28419`.
22970
23285
  :param str basic_auth_password: Basic authentication password. Example: `Zm9vYg==`.
22971
23286
  :param str basic_auth_username: Basic authentication user name. Example: `avnadmin`.
@@ -22981,7 +23296,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
22981
23296
  @pulumi.getter
22982
23297
  def authentication(self) -> str:
22983
23298
  """
22984
- Enum: `none`, `basic`. Authentication method.
23299
+ Enum: `basic`, `none`. Authentication method.
22985
23300
  """
22986
23301
  return pulumi.get(self, "authentication")
22987
23302
 
@@ -23141,7 +23456,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
23141
23456
  max_message_size: Optional[int] = None,
23142
23457
  sd: Optional[str] = None):
23143
23458
  """
23144
- :param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
23459
+ :param str format: Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.
23145
23460
  :param int port: Rsyslog server port. Default: `514`.
23146
23461
  :param str server: Rsyslog server IP address or hostname. Example: `logs.example.com`.
23147
23462
  :param bool tls: Require TLS. Default: `true`.
@@ -23182,7 +23497,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
23182
23497
  @pulumi.getter
23183
23498
  def format(self) -> str:
23184
23499
  """
23185
- Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
23500
+ Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.
23186
23501
  """
23187
23502
  return pulumi.get(self, "format")
23188
23503
 
@@ -23764,7 +24079,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
23764
24079
  :param int consumer_max_poll_records: Set consumer max.poll.records. The default is 500. Example: `500`.
23765
24080
  :param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`.
23766
24081
  :param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker. Example: `8388608`.
23767
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
24082
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
23768
24083
  :param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.
23769
24084
  :param int producer_max_request_size: The maximum request size in bytes. Example: `1048576`.
23770
24085
  """
@@ -23829,7 +24144,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
23829
24144
  @pulumi.getter(name="producerCompressionType")
23830
24145
  def producer_compression_type(self) -> Optional[str]:
23831
24146
  """
23832
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
24147
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
23833
24148
  """
23834
24149
  return pulumi.get(self, "producer_compression_type")
23835
24150
 
@@ -24822,6 +25137,10 @@ class ThanosThanosUserConfig(dict):
24822
25137
  suggest = "public_access"
24823
25138
  elif key == "queryFrontend":
24824
25139
  suggest = "query_frontend"
25140
+ elif key == "receiverIngesting":
25141
+ suggest = "receiver_ingesting"
25142
+ elif key == "receiverRouting":
25143
+ suggest = "receiver_routing"
24825
25144
  elif key == "serviceLog":
24826
25145
  suggest = "service_log"
24827
25146
  elif key == "staticIps":
@@ -24848,8 +25167,12 @@ class ThanosThanosUserConfig(dict):
24848
25167
  public_access: Optional['outputs.ThanosThanosUserConfigPublicAccess'] = None,
24849
25168
  query: Optional['outputs.ThanosThanosUserConfigQuery'] = None,
24850
25169
  query_frontend: Optional['outputs.ThanosThanosUserConfigQueryFrontend'] = None,
25170
+ receiver_ingesting: Optional[Mapping[str, str]] = None,
25171
+ receiver_routing: Optional[Mapping[str, str]] = None,
25172
+ ruler: Optional[Mapping[str, str]] = None,
24851
25173
  service_log: Optional[bool] = None,
24852
- static_ips: Optional[bool] = None):
25174
+ static_ips: Optional[bool] = None,
25175
+ store: Optional[Mapping[str, str]] = None):
24853
25176
  """
24854
25177
  :param 'ThanosThanosUserConfigCompactorArgs' compactor: ThanosCompactor
24855
25178
  :param Mapping[str, str] env: Environmental variables.
@@ -24860,8 +25183,12 @@ class ThanosThanosUserConfig(dict):
24860
25183
  :param 'ThanosThanosUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
24861
25184
  :param 'ThanosThanosUserConfigQueryArgs' query: ThanosQuery
24862
25185
  :param 'ThanosThanosUserConfigQueryFrontendArgs' query_frontend: ThanosQueryFrontend
25186
+ :param Mapping[str, str] receiver_ingesting: CommonReceive.
25187
+ :param Mapping[str, str] receiver_routing: ThanosReceiveRouting.
25188
+ :param Mapping[str, str] ruler: ThanosRuler.
24863
25189
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
24864
25190
  :param bool static_ips: Use static public IP addresses.
25191
+ :param Mapping[str, str] store: ThanosStore.
24865
25192
  """
24866
25193
  if compactor is not None:
24867
25194
  pulumi.set(__self__, "compactor", compactor)
@@ -24881,10 +25208,18 @@ class ThanosThanosUserConfig(dict):
24881
25208
  pulumi.set(__self__, "query", query)
24882
25209
  if query_frontend is not None:
24883
25210
  pulumi.set(__self__, "query_frontend", query_frontend)
25211
+ if receiver_ingesting is not None:
25212
+ pulumi.set(__self__, "receiver_ingesting", receiver_ingesting)
25213
+ if receiver_routing is not None:
25214
+ pulumi.set(__self__, "receiver_routing", receiver_routing)
25215
+ if ruler is not None:
25216
+ pulumi.set(__self__, "ruler", ruler)
24884
25217
  if service_log is not None:
24885
25218
  pulumi.set(__self__, "service_log", service_log)
24886
25219
  if static_ips is not None:
24887
25220
  pulumi.set(__self__, "static_ips", static_ips)
25221
+ if store is not None:
25222
+ pulumi.set(__self__, "store", store)
24888
25223
 
24889
25224
  @property
24890
25225
  @pulumi.getter
@@ -24960,6 +25295,30 @@ class ThanosThanosUserConfig(dict):
24960
25295
  """
24961
25296
  return pulumi.get(self, "query_frontend")
24962
25297
 
25298
+ @property
25299
+ @pulumi.getter(name="receiverIngesting")
25300
+ def receiver_ingesting(self) -> Optional[Mapping[str, str]]:
25301
+ """
25302
+ CommonReceive.
25303
+ """
25304
+ return pulumi.get(self, "receiver_ingesting")
25305
+
25306
+ @property
25307
+ @pulumi.getter(name="receiverRouting")
25308
+ def receiver_routing(self) -> Optional[Mapping[str, str]]:
25309
+ """
25310
+ ThanosReceiveRouting.
25311
+ """
25312
+ return pulumi.get(self, "receiver_routing")
25313
+
25314
+ @property
25315
+ @pulumi.getter
25316
+ def ruler(self) -> Optional[Mapping[str, str]]:
25317
+ """
25318
+ ThanosRuler.
25319
+ """
25320
+ return pulumi.get(self, "ruler")
25321
+
24963
25322
  @property
24964
25323
  @pulumi.getter(name="serviceLog")
24965
25324
  def service_log(self) -> Optional[bool]:
@@ -24976,6 +25335,14 @@ class ThanosThanosUserConfig(dict):
24976
25335
  """
24977
25336
  return pulumi.get(self, "static_ips")
24978
25337
 
25338
+ @property
25339
+ @pulumi.getter
25340
+ def store(self) -> Optional[Mapping[str, str]]:
25341
+ """
25342
+ ThanosStore.
25343
+ """
25344
+ return pulumi.get(self, "store")
25345
+
24979
25346
 
24980
25347
  @pulumi.output_type
24981
25348
  class ThanosThanosUserConfigCompactor(dict):
@@ -25702,7 +26069,7 @@ class ValkeyValkeyUserConfig(dict):
25702
26069
  :param int valkey_io_threads: Set Valkey IO thread count. Changing this will cause a restart of the Valkey service. Example: `1`.
25703
26070
  :param int valkey_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
25704
26071
  :param int valkey_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
25705
- :param str valkey_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
26072
+ :param str valkey_maxmemory_policy: Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.
25706
26073
  :param str valkey_notify_keyspace_events: Set notify-keyspace-events option.
25707
26074
  :param int valkey_number_of_databases: Set number of Valkey databases. Changing this will cause a restart of the Valkey service. Example: `16`.
25708
26075
  :param str valkey_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Valkey does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is `off`, no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
@@ -25920,7 +26287,7 @@ class ValkeyValkeyUserConfig(dict):
25920
26287
  @pulumi.getter(name="valkeyMaxmemoryPolicy")
25921
26288
  def valkey_maxmemory_policy(self) -> Optional[str]:
25922
26289
  """
25923
- Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
26290
+ Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.
25924
26291
  """
25925
26292
  return pulumi.get(self, "valkey_maxmemory_policy")
25926
26293
 
@@ -26385,7 +26752,6 @@ class GetCassandaCassandraUserConfigResult(dict):
26385
26752
 
26386
26753
  @property
26387
26754
  @pulumi.getter(name="additionalBackupRegions")
26388
- @_utilities.deprecated("""This property is deprecated.""")
26389
26755
  def additional_backup_regions(self) -> Optional[str]:
26390
26756
  """
26391
26757
  Additional Cloud Regions for Backup Replication.
@@ -26910,7 +27276,6 @@ class GetCassandraCassandraUserConfigResult(dict):
26910
27276
 
26911
27277
  @property
26912
27278
  @pulumi.getter(name="additionalBackupRegions")
26913
- @_utilities.deprecated("""This property is deprecated.""")
26914
27279
  def additional_backup_regions(self) -> Optional[str]:
26915
27280
  """
26916
27281
  Additional Cloud Regions for Backup Replication.
@@ -28048,7 +28413,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
28048
28413
  static_ips: Optional[bool] = None):
28049
28414
  """
28050
28415
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. Default: `false`.
28051
- :param str dragonfly_persistence: Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
28416
+ :param str dragonfly_persistence: Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
28052
28417
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. Default: `true`.
28053
28418
  :param Sequence['GetDragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
28054
28419
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -28106,7 +28471,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
28106
28471
  @pulumi.getter(name="dragonflyPersistence")
28107
28472
  def dragonfly_persistence(self) -> Optional[str]:
28108
28473
  """
28109
- Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
28474
+ Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
28110
28475
  """
28111
28476
  return pulumi.get(self, "dragonfly_persistence")
28112
28477
 
@@ -29171,10 +29536,10 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29171
29536
  wal: Optional[bool] = None):
29172
29537
  """
29173
29538
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
29174
- :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
29539
+ :param bool alerting_enabled: Setting has no effect with Grafana 11 and onward. Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
29175
29540
  :param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
29176
29541
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. Example: `0`.
29177
- :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
29542
+ :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.
29178
29543
  :param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
29179
29544
  :param 'GetGrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
29180
29545
  :param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
@@ -29182,9 +29547,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29182
29547
  :param 'GetGrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
29183
29548
  :param 'GetGrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
29184
29549
  :param 'GetGrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
29185
- :param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
29550
+ :param str cookie_samesite: Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
29186
29551
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
29187
- :param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
29552
+ :param bool dashboard_previews_enabled: Enable browsing of dashboards in grid (pictures) mode. This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
29188
29553
  :param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h. Example: `5s`.
29189
29554
  :param int dashboards_versions_to_keep: Dashboard versions to keep per dashboard. Example: `20`.
29190
29555
  :param bool dataproxy_send_user_header: Send `X-Grafana-User` header to data source.
@@ -29197,7 +29562,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29197
29562
  :param Sequence['GetGrafanaGrafanaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
29198
29563
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
29199
29564
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
29200
- :param bool metrics_enabled: Enable Grafana /metrics endpoint.
29565
+ :param bool metrics_enabled: Enable Grafana's /metrics endpoint.
29201
29566
  :param bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP.
29202
29567
  :param 'GetGrafanaGrafanaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
29203
29568
  :param 'GetGrafanaGrafanaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
@@ -29208,9 +29573,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29208
29573
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
29209
29574
  :param 'GetGrafanaGrafanaUserConfigSmtpServerArgs' smtp_server: SMTP server settings
29210
29575
  :param bool static_ips: Use static public IP addresses.
29211
- :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
29576
+ :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
29212
29577
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
29213
- :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
29578
+ :param str user_auto_assign_org_role: Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.
29214
29579
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
29215
29580
  :param bool wal: Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
29216
29581
  """
@@ -29313,7 +29678,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29313
29678
  @pulumi.getter(name="alertingEnabled")
29314
29679
  def alerting_enabled(self) -> Optional[bool]:
29315
29680
  """
29316
- Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
29681
+ Setting has no effect with Grafana 11 and onward. Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
29317
29682
  """
29318
29683
  return pulumi.get(self, "alerting_enabled")
29319
29684
 
@@ -29337,7 +29702,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29337
29702
  @pulumi.getter(name="alertingNodataOrNullvalues")
29338
29703
  def alerting_nodata_or_nullvalues(self) -> Optional[str]:
29339
29704
  """
29340
- Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
29705
+ Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.
29341
29706
  """
29342
29707
  return pulumi.get(self, "alerting_nodata_or_nullvalues")
29343
29708
 
@@ -29401,7 +29766,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29401
29766
  @pulumi.getter(name="cookieSamesite")
29402
29767
  def cookie_samesite(self) -> Optional[str]:
29403
29768
  """
29404
- Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
29769
+ Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
29405
29770
  """
29406
29771
  return pulumi.get(self, "cookie_samesite")
29407
29772
 
@@ -29417,7 +29782,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29417
29782
  @pulumi.getter(name="dashboardPreviewsEnabled")
29418
29783
  def dashboard_previews_enabled(self) -> Optional[bool]:
29419
29784
  """
29420
- This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
29785
+ Enable browsing of dashboards in grid (pictures) mode. This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
29421
29786
  """
29422
29787
  return pulumi.get(self, "dashboard_previews_enabled")
29423
29788
 
@@ -29522,7 +29887,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29522
29887
  @pulumi.getter(name="metricsEnabled")
29523
29888
  def metrics_enabled(self) -> Optional[bool]:
29524
29889
  """
29525
- Enable Grafana /metrics endpoint.
29890
+ Enable Grafana's /metrics endpoint.
29526
29891
  """
29527
29892
  return pulumi.get(self, "metrics_enabled")
29528
29893
 
@@ -29610,7 +29975,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29610
29975
  @pulumi.getter(name="unifiedAlertingEnabled")
29611
29976
  def unified_alerting_enabled(self) -> Optional[bool]:
29612
29977
  """
29613
- Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
29978
+ Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
29614
29979
  """
29615
29980
  return pulumi.get(self, "unified_alerting_enabled")
29616
29981
 
@@ -29626,7 +29991,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29626
29991
  @pulumi.getter(name="userAutoAssignOrgRole")
29627
29992
  def user_auto_assign_org_role(self) -> Optional[str]:
29628
29993
  """
29629
- Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
29994
+ Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.
29630
29995
  """
29631
29996
  return pulumi.get(self, "user_auto_assign_org_role")
29632
29997
 
@@ -29984,9 +30349,9 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
29984
30349
  :param str client_id: Client ID from provider. Example: `b1ba0bf54a4c2c0a1c29`.
29985
30350
  :param str client_secret: Client secret from provider. Example: `bfa6gea4f129076761dcba8ce5e1e406bd83af7b`.
29986
30351
  :param bool allow_sign_up: Automatically sign-up users on successful sign-in.
29987
- :param str api_url: API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
29988
- :param str auth_url: Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
29989
- :param str token_url: Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
30352
+ :param str api_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
30353
+ :param str auth_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
30354
+ :param str token_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
29990
30355
  """
29991
30356
  pulumi.set(__self__, "allowed_groups", allowed_groups)
29992
30357
  pulumi.set(__self__, "client_id", client_id)
@@ -30036,7 +30401,7 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
30036
30401
  @pulumi.getter(name="apiUrl")
30037
30402
  def api_url(self) -> Optional[str]:
30038
30403
  """
30039
- API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
30404
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
30040
30405
  """
30041
30406
  return pulumi.get(self, "api_url")
30042
30407
 
@@ -30044,7 +30409,7 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
30044
30409
  @pulumi.getter(name="authUrl")
30045
30410
  def auth_url(self) -> Optional[str]:
30046
30411
  """
30047
- Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
30412
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
30048
30413
  """
30049
30414
  return pulumi.get(self, "auth_url")
30050
30415
 
@@ -30052,7 +30417,7 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
30052
30417
  @pulumi.getter(name="tokenUrl")
30053
30418
  def token_url(self) -> Optional[str]:
30054
30419
  """
30055
- Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
30420
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
30056
30421
  """
30057
30422
  return pulumi.get(self, "token_url")
30058
30423
 
@@ -30222,7 +30587,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
30222
30587
  """
30223
30588
  :param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions. Example: `AAAAAAAAAAAAAAAAAAA`.
30224
30589
  :param str bucket_url: Bucket URL for S3. Example: `https://grafana.s3-ap-southeast-2.amazonaws.com/`.
30225
- :param str provider: Enum: `s3`. Provider type.
30590
+ :param str provider: Enum: `s3`. External image store provider.
30226
30591
  :param str secret_key: S3 secret key. Example: `AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA`.
30227
30592
  """
30228
30593
  pulumi.set(__self__, "access_key", access_key)
@@ -30250,7 +30615,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
30250
30615
  @pulumi.getter
30251
30616
  def provider(self) -> str:
30252
30617
  """
30253
- Enum: `s3`. Provider type.
30618
+ Enum: `s3`. External image store provider.
30254
30619
  """
30255
30620
  return pulumi.get(self, "provider")
30256
30621
 
@@ -30368,7 +30733,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
30368
30733
  :param str from_name: Name used in outgoing emails, defaults to Grafana.
30369
30734
  :param str password: Password for SMTP authentication. Example: `ein0eemeev5eeth3Ahfu`.
30370
30735
  :param bool skip_verify: Skip verifying server certificate. Defaults to false.
30371
- :param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
30736
+ :param str starttls_policy: Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
30372
30737
  :param str username: Username for SMTP authentication. Example: `smtpuser`.
30373
30738
  """
30374
30739
  pulumi.set(__self__, "from_address", from_address)
@@ -30437,7 +30802,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
30437
30802
  @pulumi.getter(name="starttlsPolicy")
30438
30803
  def starttls_policy(self) -> Optional[str]:
30439
30804
  """
30440
- Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
30805
+ Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
30441
30806
  """
30442
30807
  return pulumi.get(self, "starttls_policy")
30443
30808
 
@@ -30678,6 +31043,7 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
30678
31043
  additional_backup_regions: Optional[str] = None,
30679
31044
  custom_domain: Optional[str] = None,
30680
31045
  influxdb: Optional['outputs.GetInfluxDbInfluxdbUserConfigInfluxdbResult'] = None,
31046
+ influxdb_version: Optional[str] = None,
30681
31047
  ip_filter_objects: Optional[Sequence['outputs.GetInfluxDbInfluxdbUserConfigIpFilterObjectResult']] = None,
30682
31048
  ip_filter_strings: Optional[Sequence[str]] = None,
30683
31049
  ip_filters: Optional[Sequence[str]] = None,
@@ -30693,6 +31059,7 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
30693
31059
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
30694
31060
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
30695
31061
  :param 'GetInfluxDbInfluxdbUserConfigInfluxdbArgs' influxdb: influxdb.conf configuration values
31062
+ :param str influxdb_version: Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
30696
31063
  :param Sequence['GetInfluxDbInfluxdbUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
30697
31064
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
30698
31065
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -30711,6 +31078,8 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
30711
31078
  pulumi.set(__self__, "custom_domain", custom_domain)
30712
31079
  if influxdb is not None:
30713
31080
  pulumi.set(__self__, "influxdb", influxdb)
31081
+ if influxdb_version is not None:
31082
+ pulumi.set(__self__, "influxdb_version", influxdb_version)
30714
31083
  if ip_filter_objects is not None:
30715
31084
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
30716
31085
  if ip_filter_strings is not None:
@@ -30759,6 +31128,14 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
30759
31128
  """
30760
31129
  return pulumi.get(self, "influxdb")
30761
31130
 
31131
+ @property
31132
+ @pulumi.getter(name="influxdbVersion")
31133
+ def influxdb_version(self) -> Optional[str]:
31134
+ """
31135
+ Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
31136
+ """
31137
+ return pulumi.get(self, "influxdb_version")
31138
+
30762
31139
  @property
30763
31140
  @pulumi.getter(name="ipFilterObjects")
30764
31141
  def ip_filter_objects(self) -> Optional[Sequence['outputs.GetInfluxDbInfluxdbUserConfigIpFilterObjectResult']]:
@@ -30973,12 +31350,16 @@ class GetInfluxDbInfluxdbUserConfigIpFilterObjectResult(dict):
30973
31350
  @pulumi.output_type
30974
31351
  class GetInfluxDbInfluxdbUserConfigPrivateAccessResult(dict):
30975
31352
  def __init__(__self__, *,
30976
- influxdb: Optional[bool] = None):
31353
+ influxdb: Optional[bool] = None,
31354
+ user_backup: Optional[bool] = None):
30977
31355
  """
30978
31356
  :param bool influxdb: Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
31357
+ :param bool user_backup: Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
30979
31358
  """
30980
31359
  if influxdb is not None:
30981
31360
  pulumi.set(__self__, "influxdb", influxdb)
31361
+ if user_backup is not None:
31362
+ pulumi.set(__self__, "user_backup", user_backup)
30982
31363
 
30983
31364
  @property
30984
31365
  @pulumi.getter
@@ -30988,16 +31369,28 @@ class GetInfluxDbInfluxdbUserConfigPrivateAccessResult(dict):
30988
31369
  """
30989
31370
  return pulumi.get(self, "influxdb")
30990
31371
 
31372
+ @property
31373
+ @pulumi.getter(name="userBackup")
31374
+ def user_backup(self) -> Optional[bool]:
31375
+ """
31376
+ Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
31377
+ """
31378
+ return pulumi.get(self, "user_backup")
31379
+
30991
31380
 
30992
31381
  @pulumi.output_type
30993
31382
  class GetInfluxDbInfluxdbUserConfigPrivatelinkAccessResult(dict):
30994
31383
  def __init__(__self__, *,
30995
- influxdb: Optional[bool] = None):
31384
+ influxdb: Optional[bool] = None,
31385
+ user_backup: Optional[bool] = None):
30996
31386
  """
30997
31387
  :param bool influxdb: Enable influxdb.
31388
+ :param bool user_backup: Enable user_backup.
30998
31389
  """
30999
31390
  if influxdb is not None:
31000
31391
  pulumi.set(__self__, "influxdb", influxdb)
31392
+ if user_backup is not None:
31393
+ pulumi.set(__self__, "user_backup", user_backup)
31001
31394
 
31002
31395
  @property
31003
31396
  @pulumi.getter
@@ -31007,16 +31400,28 @@ class GetInfluxDbInfluxdbUserConfigPrivatelinkAccessResult(dict):
31007
31400
  """
31008
31401
  return pulumi.get(self, "influxdb")
31009
31402
 
31403
+ @property
31404
+ @pulumi.getter(name="userBackup")
31405
+ def user_backup(self) -> Optional[bool]:
31406
+ """
31407
+ Enable user_backup.
31408
+ """
31409
+ return pulumi.get(self, "user_backup")
31410
+
31010
31411
 
31011
31412
  @pulumi.output_type
31012
31413
  class GetInfluxDbInfluxdbUserConfigPublicAccessResult(dict):
31013
31414
  def __init__(__self__, *,
31014
- influxdb: Optional[bool] = None):
31415
+ influxdb: Optional[bool] = None,
31416
+ user_backup: Optional[bool] = None):
31015
31417
  """
31016
31418
  :param bool influxdb: Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.
31419
+ :param bool user_backup: Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
31017
31420
  """
31018
31421
  if influxdb is not None:
31019
31422
  pulumi.set(__self__, "influxdb", influxdb)
31423
+ if user_backup is not None:
31424
+ pulumi.set(__self__, "user_backup", user_backup)
31020
31425
 
31021
31426
  @property
31022
31427
  @pulumi.getter
@@ -31026,6 +31431,14 @@ class GetInfluxDbInfluxdbUserConfigPublicAccessResult(dict):
31026
31431
  """
31027
31432
  return pulumi.get(self, "influxdb")
31028
31433
 
31434
+ @property
31435
+ @pulumi.getter(name="userBackup")
31436
+ def user_backup(self) -> Optional[bool]:
31437
+ """
31438
+ Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
31439
+ """
31440
+ return pulumi.get(self, "user_backup")
31441
+
31029
31442
 
31030
31443
  @pulumi.output_type
31031
31444
  class GetInfluxDbServiceIntegrationResult(dict):
@@ -31480,10 +31893,10 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31480
31893
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
31481
31894
  session_timeout_ms: Optional[int] = None):
31482
31895
  """
31483
- :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
31896
+ :param str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
31484
31897
  :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
31485
31898
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
31486
- :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
31899
+ :param str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
31487
31900
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
31488
31901
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
31489
31902
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -31491,7 +31904,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31491
31904
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
31492
31905
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
31493
31906
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
31494
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
31907
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
31495
31908
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
31496
31909
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
31497
31910
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -31534,7 +31947,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31534
31947
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
31535
31948
  def connector_client_config_override_policy(self) -> Optional[str]:
31536
31949
  """
31537
- Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
31950
+ Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
31538
31951
  """
31539
31952
  return pulumi.get(self, "connector_client_config_override_policy")
31540
31953
 
@@ -31558,7 +31971,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31558
31971
  @pulumi.getter(name="consumerIsolationLevel")
31559
31972
  def consumer_isolation_level(self) -> Optional[str]:
31560
31973
  """
31561
- Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
31974
+ Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
31562
31975
  """
31563
31976
  return pulumi.get(self, "consumer_isolation_level")
31564
31977
 
@@ -31622,7 +32035,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31622
32035
  @pulumi.getter(name="producerCompressionType")
31623
32036
  def producer_compression_type(self) -> Optional[str]:
31624
32037
  """
31625
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
32038
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
31626
32039
  """
31627
32040
  return pulumi.get(self, "producer_compression_type")
31628
32041
 
@@ -32531,7 +32944,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32531
32944
  transaction_state_log_segment_bytes: Optional[int] = None):
32532
32945
  """
32533
32946
  :param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
32534
- :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
32947
+ :param str compression_type: Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
32535
32948
  :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
32536
32949
  :param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
32537
32950
  :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
@@ -32541,8 +32954,8 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32541
32954
  :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
32542
32955
  :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
32543
32956
  :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
32544
- :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
32545
- :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
32957
+ :param str log_cleanup_policy: Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
32958
+ :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
32546
32959
  :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
32547
32960
  :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
32548
32961
  :param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
@@ -32682,7 +33095,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32682
33095
  @pulumi.getter(name="compressionType")
32683
33096
  def compression_type(self) -> Optional[str]:
32684
33097
  """
32685
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
33098
+ Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
32686
33099
  """
32687
33100
  return pulumi.get(self, "compression_type")
32688
33101
 
@@ -32762,7 +33175,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32762
33175
  @pulumi.getter(name="logCleanupPolicy")
32763
33176
  def log_cleanup_policy(self) -> Optional[str]:
32764
33177
  """
32765
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
33178
+ Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
32766
33179
  """
32767
33180
  return pulumi.get(self, "log_cleanup_policy")
32768
33181
 
@@ -32770,7 +33183,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32770
33183
  @pulumi.getter(name="logFlushIntervalMessages")
32771
33184
  def log_flush_interval_messages(self) -> Optional[int]:
32772
33185
  """
32773
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
33186
+ The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
32774
33187
  """
32775
33188
  return pulumi.get(self, "log_flush_interval_messages")
32776
33189
 
@@ -33090,10 +33503,10 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33090
33503
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
33091
33504
  session_timeout_ms: Optional[int] = None):
33092
33505
  """
33093
- :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
33506
+ :param str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
33094
33507
  :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
33095
33508
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
33096
- :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
33509
+ :param str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
33097
33510
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
33098
33511
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
33099
33512
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -33101,7 +33514,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33101
33514
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
33102
33515
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
33103
33516
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
33104
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33517
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33105
33518
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
33106
33519
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
33107
33520
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -33144,7 +33557,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33144
33557
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
33145
33558
  def connector_client_config_override_policy(self) -> Optional[str]:
33146
33559
  """
33147
- Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
33560
+ Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
33148
33561
  """
33149
33562
  return pulumi.get(self, "connector_client_config_override_policy")
33150
33563
 
@@ -33168,7 +33581,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33168
33581
  @pulumi.getter(name="consumerIsolationLevel")
33169
33582
  def consumer_isolation_level(self) -> Optional[str]:
33170
33583
  """
33171
- Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
33584
+ Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
33172
33585
  """
33173
33586
  return pulumi.get(self, "consumer_isolation_level")
33174
33587
 
@@ -33232,7 +33645,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33232
33645
  @pulumi.getter(name="producerCompressionType")
33233
33646
  def producer_compression_type(self) -> Optional[str]:
33234
33647
  """
33235
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33648
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33236
33649
  """
33237
33650
  return pulumi.get(self, "producer_compression_type")
33238
33651
 
@@ -33446,10 +33859,10 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
33446
33859
  :param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`.
33447
33860
  :param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. Default: `67108864`.
33448
33861
  :param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`.
33449
- :param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
33862
+ :param str name_strategy: Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
33450
33863
  :param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.
33451
- :param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
33452
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33864
+ :param str producer_acks: Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
33865
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33453
33866
  :param int producer_linger_ms: Wait for up to the given delay to allow batching records together. Default: `0`.
33454
33867
  :param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: `1048576`.
33455
33868
  :param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`.
@@ -33503,7 +33916,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
33503
33916
  @pulumi.getter(name="nameStrategy")
33504
33917
  def name_strategy(self) -> Optional[str]:
33505
33918
  """
33506
- Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
33919
+ Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
33507
33920
  """
33508
33921
  return pulumi.get(self, "name_strategy")
33509
33922
 
@@ -33519,7 +33932,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
33519
33932
  @pulumi.getter(name="producerAcks")
33520
33933
  def producer_acks(self) -> Optional[str]:
33521
33934
  """
33522
- Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
33935
+ Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
33523
33936
  """
33524
33937
  return pulumi.get(self, "producer_acks")
33525
33938
 
@@ -33527,7 +33940,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
33527
33940
  @pulumi.getter(name="producerCompressionType")
33528
33941
  def producer_compression_type(self) -> Optional[str]:
33529
33942
  """
33530
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33943
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33531
33944
  """
33532
33945
  return pulumi.get(self, "producer_compression_type")
33533
33946
 
@@ -34514,8 +34927,8 @@ class GetKafkaTopicConfigResult(dict):
34514
34927
  segment_ms: Optional[str] = None,
34515
34928
  unclean_leader_election_enable: Optional[bool] = None):
34516
34929
  """
34517
- :param str cleanup_policy: cleanup.policy value
34518
- :param str compression_type: compression.type value
34930
+ :param str cleanup_policy: cleanup.policy value. The possible values are `delete`, `compact` and `compact,delete`.
34931
+ :param str compression_type: compression.type value. The possible values are `snappy`, `gzip`, `lz4`, `producer`, `uncompressed` and `zstd`.
34519
34932
  :param str delete_retention_ms: delete.retention.ms value
34520
34933
  :param str file_delete_delay_ms: file.delete.delay.ms value
34521
34934
  :param str flush_messages: flush.messages value
@@ -34526,9 +34939,9 @@ class GetKafkaTopicConfigResult(dict):
34526
34939
  :param str max_compaction_lag_ms: max.compaction.lag.ms value
34527
34940
  :param str max_message_bytes: max.message.bytes value
34528
34941
  :param bool message_downconversion_enable: message.downconversion.enable value
34529
- :param str message_format_version: message.format.version value
34942
+ :param str message_format_version: message.format.version value. The possible values are `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0` and `3.9-IV1`.
34530
34943
  :param str message_timestamp_difference_max_ms: message.timestamp.difference.max.ms value
34531
- :param str message_timestamp_type: message.timestamp.type value
34944
+ :param str message_timestamp_type: message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
34532
34945
  :param float min_cleanable_dirty_ratio: min.cleanable.dirty.ratio value
34533
34946
  :param str min_compaction_lag_ms: min.compaction.lag.ms value
34534
34947
  :param str min_insync_replicas: min.insync.replicas value
@@ -34601,7 +35014,7 @@ class GetKafkaTopicConfigResult(dict):
34601
35014
  @pulumi.getter(name="cleanupPolicy")
34602
35015
  def cleanup_policy(self) -> Optional[str]:
34603
35016
  """
34604
- cleanup.policy value
35017
+ cleanup.policy value. The possible values are `delete`, `compact` and `compact,delete`.
34605
35018
  """
34606
35019
  return pulumi.get(self, "cleanup_policy")
34607
35020
 
@@ -34609,7 +35022,7 @@ class GetKafkaTopicConfigResult(dict):
34609
35022
  @pulumi.getter(name="compressionType")
34610
35023
  def compression_type(self) -> Optional[str]:
34611
35024
  """
34612
- compression.type value
35025
+ compression.type value. The possible values are `snappy`, `gzip`, `lz4`, `producer`, `uncompressed` and `zstd`.
34613
35026
  """
34614
35027
  return pulumi.get(self, "compression_type")
34615
35028
 
@@ -34697,7 +35110,7 @@ class GetKafkaTopicConfigResult(dict):
34697
35110
  @pulumi.getter(name="messageFormatVersion")
34698
35111
  def message_format_version(self) -> Optional[str]:
34699
35112
  """
34700
- message.format.version value
35113
+ message.format.version value. The possible values are `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0` and `3.9-IV1`.
34701
35114
  """
34702
35115
  return pulumi.get(self, "message_format_version")
34703
35116
 
@@ -34713,7 +35126,7 @@ class GetKafkaTopicConfigResult(dict):
34713
35126
  @pulumi.getter(name="messageTimestampType")
34714
35127
  def message_timestamp_type(self) -> Optional[str]:
34715
35128
  """
34716
- message.timestamp.type value
35129
+ message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
34717
35130
  """
34718
35131
  return pulumi.get(self, "message_timestamp_type")
34719
35132
 
@@ -36860,9 +37273,9 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
36860
37273
  :param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit). Example: `10`.
36861
37274
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`.
36862
37275
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`.
36863
- :param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
36864
- :param str log_output: Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
36865
- :param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.
37276
+ :param str internal_tmp_mem_storage_engine: Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
37277
+ :param str log_output: Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
37278
+ :param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10.0`.
36866
37279
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M). Example: `67108864`.
36867
37280
  :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M). Example: `16777216`.
36868
37281
  :param int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service. Example: `16384`.
@@ -37078,7 +37491,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
37078
37491
  @pulumi.getter(name="internalTmpMemStorageEngine")
37079
37492
  def internal_tmp_mem_storage_engine(self) -> Optional[str]:
37080
37493
  """
37081
- Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
37494
+ Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
37082
37495
  """
37083
37496
  return pulumi.get(self, "internal_tmp_mem_storage_engine")
37084
37497
 
@@ -37086,7 +37499,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
37086
37499
  @pulumi.getter(name="logOutput")
37087
37500
  def log_output(self) -> Optional[str]:
37088
37501
  """
37089
- Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
37502
+ Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
37090
37503
  """
37091
37504
  return pulumi.get(self, "log_output")
37092
37505
 
@@ -37094,7 +37507,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
37094
37507
  @pulumi.getter(name="longQueryTime")
37095
37508
  def long_query_time(self) -> Optional[float]:
37096
37509
  """
37097
- The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.
37510
+ The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10.0`.
37098
37511
  """
37099
37512
  return pulumi.get(self, "long_query_time")
37100
37513
 
@@ -37584,7 +37997,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
37584
37997
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
37585
37998
  :param 'GetOpenSearchOpensearchUserConfigAzureMigrationArgs' azure_migration: Azure migration settings
37586
37999
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
37587
- :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
38000
+ :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
37588
38001
  :param 'GetOpenSearchOpensearchUserConfigGcsMigrationArgs' gcs_migration: Google Cloud Storage migration settings
37589
38002
  :param Sequence['GetOpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
37590
38003
  :param 'GetOpenSearchOpensearchUserConfigIndexRollupArgs' index_rollup: Index rollup settings
@@ -37692,7 +38105,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
37692
38105
  @pulumi.getter(name="disableReplicationFactorAdjustment")
37693
38106
  def disable_replication_factor_adjustment(self) -> Optional[bool]:
37694
38107
  """
37695
- Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
38108
+ Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
37696
38109
  """
37697
38110
  return pulumi.get(self, "disable_replication_factor_adjustment")
37698
38111
 
@@ -37888,28 +38301,33 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37888
38301
  account: str,
37889
38302
  base_path: str,
37890
38303
  container: str,
38304
+ indices: str,
37891
38305
  snapshot_name: str,
37892
38306
  chunk_size: Optional[str] = None,
37893
38307
  compress: Optional[bool] = None,
37894
38308
  endpoint_suffix: Optional[str] = None,
37895
- indices: Optional[str] = None,
38309
+ include_aliases: Optional[bool] = None,
37896
38310
  key: Optional[str] = None,
38311
+ restore_global_state: Optional[bool] = None,
37897
38312
  sas_token: Optional[str] = None):
37898
38313
  """
37899
38314
  :param str account: Azure account name.
37900
38315
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
37901
38316
  :param str container: Azure container name.
38317
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
37902
38318
  :param str snapshot_name: The snapshot name to restore from.
37903
38319
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
37904
38320
  :param bool compress: When set to true metadata files are stored in compressed format.
37905
38321
  :param str endpoint_suffix: Defines the DNS suffix for Azure Storage endpoints.
37906
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
38322
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
37907
38323
  :param str key: Azure account secret key. One of key or sas_token should be specified.
38324
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
37908
38325
  :param str sas_token: A shared access signatures (SAS) token. One of key or sas_token should be specified.
37909
38326
  """
37910
38327
  pulumi.set(__self__, "account", account)
37911
38328
  pulumi.set(__self__, "base_path", base_path)
37912
38329
  pulumi.set(__self__, "container", container)
38330
+ pulumi.set(__self__, "indices", indices)
37913
38331
  pulumi.set(__self__, "snapshot_name", snapshot_name)
37914
38332
  if chunk_size is not None:
37915
38333
  pulumi.set(__self__, "chunk_size", chunk_size)
@@ -37917,10 +38335,12 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37917
38335
  pulumi.set(__self__, "compress", compress)
37918
38336
  if endpoint_suffix is not None:
37919
38337
  pulumi.set(__self__, "endpoint_suffix", endpoint_suffix)
37920
- if indices is not None:
37921
- pulumi.set(__self__, "indices", indices)
38338
+ if include_aliases is not None:
38339
+ pulumi.set(__self__, "include_aliases", include_aliases)
37922
38340
  if key is not None:
37923
38341
  pulumi.set(__self__, "key", key)
38342
+ if restore_global_state is not None:
38343
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
37924
38344
  if sas_token is not None:
37925
38345
  pulumi.set(__self__, "sas_token", sas_token)
37926
38346
 
@@ -37948,6 +38368,14 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37948
38368
  """
37949
38369
  return pulumi.get(self, "container")
37950
38370
 
38371
+ @property
38372
+ @pulumi.getter
38373
+ def indices(self) -> str:
38374
+ """
38375
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
38376
+ """
38377
+ return pulumi.get(self, "indices")
38378
+
37951
38379
  @property
37952
38380
  @pulumi.getter(name="snapshotName")
37953
38381
  def snapshot_name(self) -> str:
@@ -37981,12 +38409,12 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37981
38409
  return pulumi.get(self, "endpoint_suffix")
37982
38410
 
37983
38411
  @property
37984
- @pulumi.getter
37985
- def indices(self) -> Optional[str]:
38412
+ @pulumi.getter(name="includeAliases")
38413
+ def include_aliases(self) -> Optional[bool]:
37986
38414
  """
37987
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
38415
+ Whether to restore aliases alongside their associated indexes. Default is true.
37988
38416
  """
37989
- return pulumi.get(self, "indices")
38417
+ return pulumi.get(self, "include_aliases")
37990
38418
 
37991
38419
  @property
37992
38420
  @pulumi.getter
@@ -37996,6 +38424,14 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37996
38424
  """
37997
38425
  return pulumi.get(self, "key")
37998
38426
 
38427
+ @property
38428
+ @pulumi.getter(name="restoreGlobalState")
38429
+ def restore_global_state(self) -> Optional[bool]:
38430
+ """
38431
+ If true, restore the cluster state. Defaults to false.
38432
+ """
38433
+ return pulumi.get(self, "restore_global_state")
38434
+
37999
38435
  @property
38000
38436
  @pulumi.getter(name="sasToken")
38001
38437
  def sas_token(self) -> Optional[str]:
@@ -38011,29 +38447,36 @@ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
38011
38447
  base_path: str,
38012
38448
  bucket: str,
38013
38449
  credentials: str,
38450
+ indices: str,
38014
38451
  snapshot_name: str,
38015
38452
  chunk_size: Optional[str] = None,
38016
38453
  compress: Optional[bool] = None,
38017
- indices: Optional[str] = None):
38454
+ include_aliases: Optional[bool] = None,
38455
+ restore_global_state: Optional[bool] = None):
38018
38456
  """
38019
38457
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
38020
38458
  :param str bucket: The path to the repository data within its container.
38021
38459
  :param str credentials: Google Cloud Storage credentials file content.
38460
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
38022
38461
  :param str snapshot_name: The snapshot name to restore from.
38023
38462
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
38024
38463
  :param bool compress: When set to true metadata files are stored in compressed format.
38025
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
38464
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
38465
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
38026
38466
  """
38027
38467
  pulumi.set(__self__, "base_path", base_path)
38028
38468
  pulumi.set(__self__, "bucket", bucket)
38029
38469
  pulumi.set(__self__, "credentials", credentials)
38470
+ pulumi.set(__self__, "indices", indices)
38030
38471
  pulumi.set(__self__, "snapshot_name", snapshot_name)
38031
38472
  if chunk_size is not None:
38032
38473
  pulumi.set(__self__, "chunk_size", chunk_size)
38033
38474
  if compress is not None:
38034
38475
  pulumi.set(__self__, "compress", compress)
38035
- if indices is not None:
38036
- pulumi.set(__self__, "indices", indices)
38476
+ if include_aliases is not None:
38477
+ pulumi.set(__self__, "include_aliases", include_aliases)
38478
+ if restore_global_state is not None:
38479
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
38037
38480
 
38038
38481
  @property
38039
38482
  @pulumi.getter(name="basePath")
@@ -38059,6 +38502,14 @@ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
38059
38502
  """
38060
38503
  return pulumi.get(self, "credentials")
38061
38504
 
38505
+ @property
38506
+ @pulumi.getter
38507
+ def indices(self) -> str:
38508
+ """
38509
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
38510
+ """
38511
+ return pulumi.get(self, "indices")
38512
+
38062
38513
  @property
38063
38514
  @pulumi.getter(name="snapshotName")
38064
38515
  def snapshot_name(self) -> str:
@@ -38084,12 +38535,20 @@ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
38084
38535
  return pulumi.get(self, "compress")
38085
38536
 
38086
38537
  @property
38087
- @pulumi.getter
38088
- def indices(self) -> Optional[str]:
38538
+ @pulumi.getter(name="includeAliases")
38539
+ def include_aliases(self) -> Optional[bool]:
38089
38540
  """
38090
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
38541
+ Whether to restore aliases alongside their associated indexes. Default is true.
38091
38542
  """
38092
- return pulumi.get(self, "indices")
38543
+ return pulumi.get(self, "include_aliases")
38544
+
38545
+ @property
38546
+ @pulumi.getter(name="restoreGlobalState")
38547
+ def restore_global_state(self) -> Optional[bool]:
38548
+ """
38549
+ If true, restore the cluster state. Defaults to false.
38550
+ """
38551
+ return pulumi.get(self, "restore_global_state")
38093
38552
 
38094
38553
 
38095
38554
  @pulumi.output_type
@@ -39227,7 +39686,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult(dict):
39227
39686
  search_shard_task: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskResult'] = None,
39228
39687
  search_task: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskResult'] = None):
39229
39688
  """
39230
- :param str mode: Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
39689
+ :param str mode: Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
39231
39690
  :param 'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuressArgs' node_duress: Node duress settings
39232
39691
  :param 'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskArgs' search_shard_task: Search shard settings
39233
39692
  :param 'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskArgs' search_task: Search task settings
@@ -39245,7 +39704,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult(dict):
39245
39704
  @pulumi.getter
39246
39705
  def mode(self) -> Optional[str]:
39247
39706
  """
39248
- Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
39707
+ Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
39249
39708
  """
39250
39709
  return pulumi.get(self, "mode")
39251
39710
 
@@ -39875,30 +40334,35 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
39875
40334
  access_key: str,
39876
40335
  base_path: str,
39877
40336
  bucket: str,
40337
+ indices: str,
39878
40338
  region: str,
39879
40339
  secret_key: str,
39880
40340
  snapshot_name: str,
39881
40341
  chunk_size: Optional[str] = None,
39882
40342
  compress: Optional[bool] = None,
39883
40343
  endpoint: Optional[str] = None,
39884
- indices: Optional[str] = None,
40344
+ include_aliases: Optional[bool] = None,
40345
+ restore_global_state: Optional[bool] = None,
39885
40346
  server_side_encryption: Optional[bool] = None):
39886
40347
  """
39887
40348
  :param str access_key: AWS Access key.
39888
40349
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
39889
40350
  :param str bucket: S3 bucket name.
40351
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
39890
40352
  :param str region: S3 region.
39891
40353
  :param str secret_key: AWS secret key.
39892
40354
  :param str snapshot_name: The snapshot name to restore from.
39893
40355
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
39894
40356
  :param bool compress: When set to true metadata files are stored in compressed format.
39895
40357
  :param str endpoint: The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
39896
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
40358
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
40359
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
39897
40360
  :param bool server_side_encryption: When set to true files are encrypted on server side.
39898
40361
  """
39899
40362
  pulumi.set(__self__, "access_key", access_key)
39900
40363
  pulumi.set(__self__, "base_path", base_path)
39901
40364
  pulumi.set(__self__, "bucket", bucket)
40365
+ pulumi.set(__self__, "indices", indices)
39902
40366
  pulumi.set(__self__, "region", region)
39903
40367
  pulumi.set(__self__, "secret_key", secret_key)
39904
40368
  pulumi.set(__self__, "snapshot_name", snapshot_name)
@@ -39908,8 +40372,10 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
39908
40372
  pulumi.set(__self__, "compress", compress)
39909
40373
  if endpoint is not None:
39910
40374
  pulumi.set(__self__, "endpoint", endpoint)
39911
- if indices is not None:
39912
- pulumi.set(__self__, "indices", indices)
40375
+ if include_aliases is not None:
40376
+ pulumi.set(__self__, "include_aliases", include_aliases)
40377
+ if restore_global_state is not None:
40378
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
39913
40379
  if server_side_encryption is not None:
39914
40380
  pulumi.set(__self__, "server_side_encryption", server_side_encryption)
39915
40381
 
@@ -39937,6 +40403,14 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
39937
40403
  """
39938
40404
  return pulumi.get(self, "bucket")
39939
40405
 
40406
+ @property
40407
+ @pulumi.getter
40408
+ def indices(self) -> str:
40409
+ """
40410
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
40411
+ """
40412
+ return pulumi.get(self, "indices")
40413
+
39940
40414
  @property
39941
40415
  @pulumi.getter
39942
40416
  def region(self) -> str:
@@ -39986,12 +40460,20 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
39986
40460
  return pulumi.get(self, "endpoint")
39987
40461
 
39988
40462
  @property
39989
- @pulumi.getter
39990
- def indices(self) -> Optional[str]:
40463
+ @pulumi.getter(name="includeAliases")
40464
+ def include_aliases(self) -> Optional[bool]:
39991
40465
  """
39992
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
40466
+ Whether to restore aliases alongside their associated indexes. Default is true.
39993
40467
  """
39994
- return pulumi.get(self, "indices")
40468
+ return pulumi.get(self, "include_aliases")
40469
+
40470
+ @property
40471
+ @pulumi.getter(name="restoreGlobalState")
40472
+ def restore_global_state(self) -> Optional[bool]:
40473
+ """
40474
+ If true, restore the cluster state. Defaults to false.
40475
+ """
40476
+ return pulumi.get(self, "restore_global_state")
39995
40477
 
39996
40478
  @property
39997
40479
  @pulumi.getter(name="serverSideEncryption")
@@ -40751,7 +41233,7 @@ class GetPgPgUserConfigResult(dict):
40751
41233
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
40752
41234
  :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. Example: `41.5`.
40753
41235
  :param bool static_ips: Use static public IP addresses.
40754
- :param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
41236
+ :param str synchronous_replication: Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
40755
41237
  :param 'GetPgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
40756
41238
  :param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
40757
41239
  :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). Example: `4`.
@@ -41053,7 +41535,7 @@ class GetPgPgUserConfigResult(dict):
41053
41535
  @pulumi.getter(name="synchronousReplication")
41054
41536
  def synchronous_replication(self) -> Optional[str]:
41055
41537
  """
41056
- Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
41538
+ Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
41057
41539
  """
41058
41540
  return pulumi.get(self, "synchronous_replication")
41059
41541
 
@@ -41296,8 +41778,8 @@ class GetPgPgUserConfigPgResult(dict):
41296
41778
  :param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
41297
41779
  :param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
41298
41780
  :param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
41299
- :param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
41300
- :param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
41781
+ :param str log_error_verbosity: Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
41782
+ :param str log_line_prefix: Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
41301
41783
  :param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
41302
41784
  :param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
41303
41785
  :param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
@@ -41318,12 +41800,12 @@ class GetPgPgUserConfigPgResult(dict):
41318
41800
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`.
41319
41801
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
41320
41802
  :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets. Example: `10`.
41321
- :param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
41803
+ :param str pg_stat_statements_dot_track: Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
41322
41804
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`.
41323
41805
  :param str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
41324
41806
  :param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Example: `1024`.
41325
41807
  :param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
41326
- :param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
41808
+ :param str track_functions: Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.
41327
41809
  :param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
41328
41810
  :param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout. Example: `60000`.
41329
41811
  :param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance. Example: `50`.
@@ -41575,7 +42057,7 @@ class GetPgPgUserConfigPgResult(dict):
41575
42057
  @pulumi.getter(name="logErrorVerbosity")
41576
42058
  def log_error_verbosity(self) -> Optional[str]:
41577
42059
  """
41578
- Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
42060
+ Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
41579
42061
  """
41580
42062
  return pulumi.get(self, "log_error_verbosity")
41581
42063
 
@@ -41583,7 +42065,7 @@ class GetPgPgUserConfigPgResult(dict):
41583
42065
  @pulumi.getter(name="logLinePrefix")
41584
42066
  def log_line_prefix(self) -> Optional[str]:
41585
42067
  """
41586
- Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
42068
+ Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
41587
42069
  """
41588
42070
  return pulumi.get(self, "log_line_prefix")
41589
42071
 
@@ -41751,7 +42233,7 @@ class GetPgPgUserConfigPgResult(dict):
41751
42233
  @pulumi.getter(name="pgStatStatementsDotTrack")
41752
42234
  def pg_stat_statements_dot_track(self) -> Optional[str]:
41753
42235
  """
41754
- Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
42236
+ Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
41755
42237
  """
41756
42238
  return pulumi.get(self, "pg_stat_statements_dot_track")
41757
42239
 
@@ -41791,7 +42273,7 @@ class GetPgPgUserConfigPgResult(dict):
41791
42273
  @pulumi.getter(name="trackFunctions")
41792
42274
  def track_functions(self) -> Optional[str]:
41793
42275
  """
41794
- Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
42276
+ Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.
41795
42277
  """
41796
42278
  return pulumi.get(self, "track_functions")
41797
42279
 
@@ -42097,7 +42579,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
42097
42579
  """
42098
42580
  :param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). Default: `3600`.
42099
42581
  :param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. Example: `0`.
42100
- :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
42582
+ :param str autodb_pool_mode: Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.
42101
42583
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
42102
42584
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
42103
42585
  :param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`.
@@ -42147,7 +42629,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
42147
42629
  @pulumi.getter(name="autodbPoolMode")
42148
42630
  def autodb_pool_mode(self) -> Optional[str]:
42149
42631
  """
42150
- Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
42632
+ Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.
42151
42633
  """
42152
42634
  return pulumi.get(self, "autodb_pool_mode")
42153
42635
 
@@ -42673,7 +43155,7 @@ class GetRedisRedisUserConfigResult(dict):
42673
43155
  :param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service. Example: `1`.
42674
43156
  :param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
42675
43157
  :param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
42676
- :param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
43158
+ :param str redis_maxmemory_policy: Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.
42677
43159
  :param str redis_notify_keyspace_events: Set notify-keyspace-events option.
42678
43160
  :param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service. Example: `16`.
42679
43161
  :param str redis_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
@@ -42873,7 +43355,7 @@ class GetRedisRedisUserConfigResult(dict):
42873
43355
  @pulumi.getter(name="redisMaxmemoryPolicy")
42874
43356
  def redis_maxmemory_policy(self) -> Optional[str]:
42875
43357
  """
42876
- Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
43358
+ Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.
42877
43359
  """
42878
43360
  return pulumi.get(self, "redis_maxmemory_policy")
42879
43361
 
@@ -43309,11 +43791,11 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
43309
43791
  thread_per_consumer: Optional[bool] = None):
43310
43792
  """
43311
43793
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
43312
- :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
43794
+ :param str data_format: Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.
43313
43795
  :param str group_name: Kafka consumers group. Default: `clickhouse`.
43314
43796
  :param str name: Name of the table. Example: `events`.
43315
43797
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
43316
- :param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
43798
+ :param str auto_offset_reset: Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
43317
43799
  :param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`.
43318
43800
  :param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. Default: `default`.
43319
43801
  :param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. Default: `0`.
@@ -43362,7 +43844,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
43362
43844
  @pulumi.getter(name="dataFormat")
43363
43845
  def data_format(self) -> str:
43364
43846
  """
43365
- Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
43847
+ Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.
43366
43848
  """
43367
43849
  return pulumi.get(self, "data_format")
43368
43850
 
@@ -43394,7 +43876,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
43394
43876
  @pulumi.getter(name="autoOffsetReset")
43395
43877
  def auto_offset_reset(self) -> Optional[str]:
43396
43878
  """
43397
- Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
43879
+ Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
43398
43880
  """
43399
43881
  return pulumi.get(self, "auto_offset_reset")
43400
43882
 
@@ -43823,6 +44305,53 @@ class GetServiceIntegrationDatadogUserConfigRedisResult(dict):
43823
44305
  return pulumi.get(self, "command_stats_enabled")
43824
44306
 
43825
44307
 
44308
+ @pulumi.output_type
44309
+ class GetServiceIntegrationEndpointAutoscalerUserConfigResult(dict):
44310
+ def __init__(__self__, *,
44311
+ autoscalings: Sequence['outputs.GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult']):
44312
+ """
44313
+ :param Sequence['GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingArgs'] autoscalings: Configure autoscaling thresholds for a service
44314
+ """
44315
+ pulumi.set(__self__, "autoscalings", autoscalings)
44316
+
44317
+ @property
44318
+ @pulumi.getter
44319
+ def autoscalings(self) -> Sequence['outputs.GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult']:
44320
+ """
44321
+ Configure autoscaling thresholds for a service
44322
+ """
44323
+ return pulumi.get(self, "autoscalings")
44324
+
44325
+
44326
+ @pulumi.output_type
44327
+ class GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult(dict):
44328
+ def __init__(__self__, *,
44329
+ cap_gb: int,
44330
+ type: str):
44331
+ """
44332
+ :param int cap_gb: The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
44333
+ :param str type: Enum: `autoscale_disk`. Type of autoscale event.
44334
+ """
44335
+ pulumi.set(__self__, "cap_gb", cap_gb)
44336
+ pulumi.set(__self__, "type", type)
44337
+
44338
+ @property
44339
+ @pulumi.getter(name="capGb")
44340
+ def cap_gb(self) -> int:
44341
+ """
44342
+ The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
44343
+ """
44344
+ return pulumi.get(self, "cap_gb")
44345
+
44346
+ @property
44347
+ @pulumi.getter
44348
+ def type(self) -> str:
44349
+ """
44350
+ Enum: `autoscale_disk`. Type of autoscale event.
44351
+ """
44352
+ return pulumi.get(self, "type")
44353
+
44354
+
43826
44355
  @pulumi.output_type
43827
44356
  class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
43828
44357
  def __init__(__self__, *,
@@ -43840,7 +44369,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
43840
44369
  :param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with. Example: `8`.
43841
44370
  :param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers. Example: `60`.
43842
44371
  :param int max_partition_contexts: Maximum number of partition contexts to send. Example: `32000`.
43843
- :param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
44372
+ :param str site: Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
43844
44373
  """
43845
44374
  pulumi.set(__self__, "datadog_api_key", datadog_api_key)
43846
44375
  if datadog_tags is not None:
@@ -43908,7 +44437,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
43908
44437
  @pulumi.getter
43909
44438
  def site(self) -> Optional[str]:
43910
44439
  """
43911
- Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
44440
+ Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
43912
44441
  """
43913
44442
  return pulumi.get(self, "site")
43914
44443
 
@@ -44291,7 +44820,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
44291
44820
  ssl_endpoint_identification_algorithm: Optional[str] = None):
44292
44821
  """
44293
44822
  :param str bootstrap_servers: Bootstrap servers. Example: `10.0.0.1:9092,10.0.0.2:9092`.
44294
- :param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
44823
+ :param str security_protocol: Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.
44295
44824
  :param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
44296
44825
  :param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
44297
44826
  :param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
@@ -44338,7 +44867,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
44338
44867
  @pulumi.getter(name="securityProtocol")
44339
44868
  def security_protocol(self) -> str:
44340
44869
  """
44341
- Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
44870
+ Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.
44342
44871
  """
44343
44872
  return pulumi.get(self, "security_protocol")
44344
44873
 
@@ -44585,7 +45114,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
44585
45114
  :param str ssl_client_key: Client key. Example: `-----BEGIN PRIVATE KEY-----
44586
45115
  ...
44587
45116
  -----END PRIVATE KEY-----`.
44588
- :param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
45117
+ :param str ssl_mode: Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
44589
45118
  :param str ssl_root_cert: SSL Root Cert. Example: `-----BEGIN CERTIFICATE-----
44590
45119
  ...
44591
45120
  -----END CERTIFICATE-----
@@ -44672,7 +45201,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
44672
45201
  @pulumi.getter(name="sslMode")
44673
45202
  def ssl_mode(self) -> Optional[str]:
44674
45203
  """
44675
- Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
45204
+ Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
44676
45205
  """
44677
45206
  return pulumi.get(self, "ssl_mode")
44678
45207
 
@@ -44688,6 +45217,49 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
44688
45217
  return pulumi.get(self, "ssl_root_cert")
44689
45218
 
44690
45219
 
45220
+ @pulumi.output_type
45221
+ class GetServiceIntegrationEndpointExternalPrometheusUserConfigResult(dict):
45222
+ def __init__(__self__, *,
45223
+ basic_auth_password: Optional[str] = None,
45224
+ basic_auth_username: Optional[str] = None,
45225
+ service_uri: Optional[str] = None):
45226
+ """
45227
+ :param str basic_auth_password: Prometheus basic authentication password. Example: `fhyFNBjj3R`.
45228
+ :param str basic_auth_username: Prometheus basic authentication username. Example: `prom4851`.
45229
+ :param str service_uri: Prometheus enabled write endpoint. Example: `https://write.example.com/`.
45230
+ """
45231
+ if basic_auth_password is not None:
45232
+ pulumi.set(__self__, "basic_auth_password", basic_auth_password)
45233
+ if basic_auth_username is not None:
45234
+ pulumi.set(__self__, "basic_auth_username", basic_auth_username)
45235
+ if service_uri is not None:
45236
+ pulumi.set(__self__, "service_uri", service_uri)
45237
+
45238
+ @property
45239
+ @pulumi.getter(name="basicAuthPassword")
45240
+ def basic_auth_password(self) -> Optional[str]:
45241
+ """
45242
+ Prometheus basic authentication password. Example: `fhyFNBjj3R`.
45243
+ """
45244
+ return pulumi.get(self, "basic_auth_password")
45245
+
45246
+ @property
45247
+ @pulumi.getter(name="basicAuthUsername")
45248
+ def basic_auth_username(self) -> Optional[str]:
45249
+ """
45250
+ Prometheus basic authentication username. Example: `prom4851`.
45251
+ """
45252
+ return pulumi.get(self, "basic_auth_username")
45253
+
45254
+ @property
45255
+ @pulumi.getter(name="serviceUri")
45256
+ def service_uri(self) -> Optional[str]:
45257
+ """
45258
+ Prometheus enabled write endpoint. Example: `https://write.example.com/`.
45259
+ """
45260
+ return pulumi.get(self, "service_uri")
45261
+
45262
+
44691
45263
  @pulumi.output_type
44692
45264
  class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
44693
45265
  def __init__(__self__, *,
@@ -44696,7 +45268,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
44696
45268
  basic_auth_password: Optional[str] = None,
44697
45269
  basic_auth_username: Optional[str] = None):
44698
45270
  """
44699
- :param str authentication: Enum: `none`, `basic`. Authentication method.
45271
+ :param str authentication: Enum: `basic`, `none`. Authentication method.
44700
45272
  :param str url: Schema Registry URL. Example: `https://schema-registry.kafka.company.com:28419`.
44701
45273
  :param str basic_auth_password: Basic authentication password. Example: `Zm9vYg==`.
44702
45274
  :param str basic_auth_username: Basic authentication user name. Example: `avnadmin`.
@@ -44712,7 +45284,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
44712
45284
  @pulumi.getter
44713
45285
  def authentication(self) -> str:
44714
45286
  """
44715
- Enum: `none`, `basic`. Authentication method.
45287
+ Enum: `basic`, `none`. Authentication method.
44716
45288
  """
44717
45289
  return pulumi.get(self, "authentication")
44718
45290
 
@@ -44817,7 +45389,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
44817
45389
  max_message_size: Optional[int] = None,
44818
45390
  sd: Optional[str] = None):
44819
45391
  """
44820
- :param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
45392
+ :param str format: Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.
44821
45393
  :param int port: Rsyslog server port. Default: `514`.
44822
45394
  :param str server: Rsyslog server IP address or hostname. Example: `logs.example.com`.
44823
45395
  :param bool tls: Require TLS. Default: `true`.
@@ -44858,7 +45430,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
44858
45430
  @pulumi.getter
44859
45431
  def format(self) -> str:
44860
45432
  """
44861
- Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
45433
+ Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.
44862
45434
  """
44863
45435
  return pulumi.get(self, "format")
44864
45436
 
@@ -45261,7 +45833,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
45261
45833
  :param int consumer_max_poll_records: Set consumer max.poll.records. The default is 500. Example: `500`.
45262
45834
  :param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`.
45263
45835
  :param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker. Example: `8388608`.
45264
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
45836
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
45265
45837
  :param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.
45266
45838
  :param int producer_max_request_size: The maximum request size in bytes. Example: `1048576`.
45267
45839
  """
@@ -45326,7 +45898,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
45326
45898
  @pulumi.getter(name="producerCompressionType")
45327
45899
  def producer_compression_type(self) -> Optional[str]:
45328
45900
  """
45329
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
45901
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
45330
45902
  """
45331
45903
  return pulumi.get(self, "producer_compression_type")
45332
45904
 
@@ -46098,8 +46670,12 @@ class GetThanosThanosUserConfigResult(dict):
46098
46670
  public_access: Optional['outputs.GetThanosThanosUserConfigPublicAccessResult'] = None,
46099
46671
  query: Optional['outputs.GetThanosThanosUserConfigQueryResult'] = None,
46100
46672
  query_frontend: Optional['outputs.GetThanosThanosUserConfigQueryFrontendResult'] = None,
46673
+ receiver_ingesting: Optional[Mapping[str, str]] = None,
46674
+ receiver_routing: Optional[Mapping[str, str]] = None,
46675
+ ruler: Optional[Mapping[str, str]] = None,
46101
46676
  service_log: Optional[bool] = None,
46102
- static_ips: Optional[bool] = None):
46677
+ static_ips: Optional[bool] = None,
46678
+ store: Optional[Mapping[str, str]] = None):
46103
46679
  """
46104
46680
  :param 'GetThanosThanosUserConfigCompactorArgs' compactor: ThanosCompactor
46105
46681
  :param Mapping[str, str] env: Environmental variables.
@@ -46110,8 +46686,12 @@ class GetThanosThanosUserConfigResult(dict):
46110
46686
  :param 'GetThanosThanosUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
46111
46687
  :param 'GetThanosThanosUserConfigQueryArgs' query: ThanosQuery
46112
46688
  :param 'GetThanosThanosUserConfigQueryFrontendArgs' query_frontend: ThanosQueryFrontend
46689
+ :param Mapping[str, str] receiver_ingesting: CommonReceive.
46690
+ :param Mapping[str, str] receiver_routing: ThanosReceiveRouting.
46691
+ :param Mapping[str, str] ruler: ThanosRuler.
46113
46692
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
46114
46693
  :param bool static_ips: Use static public IP addresses.
46694
+ :param Mapping[str, str] store: ThanosStore.
46115
46695
  """
46116
46696
  if compactor is not None:
46117
46697
  pulumi.set(__self__, "compactor", compactor)
@@ -46131,10 +46711,18 @@ class GetThanosThanosUserConfigResult(dict):
46131
46711
  pulumi.set(__self__, "query", query)
46132
46712
  if query_frontend is not None:
46133
46713
  pulumi.set(__self__, "query_frontend", query_frontend)
46714
+ if receiver_ingesting is not None:
46715
+ pulumi.set(__self__, "receiver_ingesting", receiver_ingesting)
46716
+ if receiver_routing is not None:
46717
+ pulumi.set(__self__, "receiver_routing", receiver_routing)
46718
+ if ruler is not None:
46719
+ pulumi.set(__self__, "ruler", ruler)
46134
46720
  if service_log is not None:
46135
46721
  pulumi.set(__self__, "service_log", service_log)
46136
46722
  if static_ips is not None:
46137
46723
  pulumi.set(__self__, "static_ips", static_ips)
46724
+ if store is not None:
46725
+ pulumi.set(__self__, "store", store)
46138
46726
 
46139
46727
  @property
46140
46728
  @pulumi.getter
@@ -46210,6 +46798,30 @@ class GetThanosThanosUserConfigResult(dict):
46210
46798
  """
46211
46799
  return pulumi.get(self, "query_frontend")
46212
46800
 
46801
+ @property
46802
+ @pulumi.getter(name="receiverIngesting")
46803
+ def receiver_ingesting(self) -> Optional[Mapping[str, str]]:
46804
+ """
46805
+ CommonReceive.
46806
+ """
46807
+ return pulumi.get(self, "receiver_ingesting")
46808
+
46809
+ @property
46810
+ @pulumi.getter(name="receiverRouting")
46811
+ def receiver_routing(self) -> Optional[Mapping[str, str]]:
46812
+ """
46813
+ ThanosReceiveRouting.
46814
+ """
46815
+ return pulumi.get(self, "receiver_routing")
46816
+
46817
+ @property
46818
+ @pulumi.getter
46819
+ def ruler(self) -> Optional[Mapping[str, str]]:
46820
+ """
46821
+ ThanosRuler.
46822
+ """
46823
+ return pulumi.get(self, "ruler")
46824
+
46213
46825
  @property
46214
46826
  @pulumi.getter(name="serviceLog")
46215
46827
  def service_log(self) -> Optional[bool]:
@@ -46226,6 +46838,14 @@ class GetThanosThanosUserConfigResult(dict):
46226
46838
  """
46227
46839
  return pulumi.get(self, "static_ips")
46228
46840
 
46841
+ @property
46842
+ @pulumi.getter
46843
+ def store(self) -> Optional[Mapping[str, str]]:
46844
+ """
46845
+ ThanosStore.
46846
+ """
46847
+ return pulumi.get(self, "store")
46848
+
46229
46849
 
46230
46850
  @pulumi.output_type
46231
46851
  class GetThanosThanosUserConfigCompactorResult(dict):
@@ -46736,7 +47356,7 @@ class GetValkeyValkeyUserConfigResult(dict):
46736
47356
  :param int valkey_io_threads: Set Valkey IO thread count. Changing this will cause a restart of the Valkey service. Example: `1`.
46737
47357
  :param int valkey_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
46738
47358
  :param int valkey_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
46739
- :param str valkey_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
47359
+ :param str valkey_maxmemory_policy: Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.
46740
47360
  :param str valkey_notify_keyspace_events: Set notify-keyspace-events option.
46741
47361
  :param int valkey_number_of_databases: Set number of Valkey databases. Changing this will cause a restart of the Valkey service. Example: `16`.
46742
47362
  :param str valkey_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Valkey does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is `off`, no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
@@ -46954,7 +47574,7 @@ class GetValkeyValkeyUserConfigResult(dict):
46954
47574
  @pulumi.getter(name="valkeyMaxmemoryPolicy")
46955
47575
  def valkey_maxmemory_policy(self) -> Optional[str]:
46956
47576
  """
46957
- Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
47577
+ Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.
46958
47578
  """
46959
47579
  return pulumi.get(self, "valkey_maxmemory_policy")
46960
47580