pulumi-aiven 6.28.0a1731647682__py3-none-any.whl → 6.29.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

Files changed (60) hide show
  1. pulumi_aiven/__init__.py +9 -0
  2. pulumi_aiven/_inputs.py +1136 -227
  3. pulumi_aiven/account_team_project.py +7 -7
  4. pulumi_aiven/billing_group.py +7 -7
  5. pulumi_aiven/cassandra.py +8 -8
  6. pulumi_aiven/clickhouse.py +8 -8
  7. pulumi_aiven/dragonfly.py +8 -8
  8. pulumi_aiven/flink.py +8 -8
  9. pulumi_aiven/get_account_team_project.py +1 -1
  10. pulumi_aiven/get_billing_group.py +1 -1
  11. pulumi_aiven/get_cassanda.py +1 -1
  12. pulumi_aiven/get_cassandra.py +1 -1
  13. pulumi_aiven/get_clickhouse.py +1 -1
  14. pulumi_aiven/get_dragonfly.py +1 -1
  15. pulumi_aiven/get_flink.py +1 -1
  16. pulumi_aiven/get_grafana.py +1 -1
  17. pulumi_aiven/get_kafka.py +1 -1
  18. pulumi_aiven/get_kafka_connect.py +1 -1
  19. pulumi_aiven/get_kafka_mirror_maker.py +1 -1
  20. pulumi_aiven/get_m3_aggregator.py +1 -1
  21. pulumi_aiven/get_m3_db.py +1 -1
  22. pulumi_aiven/get_mirror_maker_replication_flow.py +16 -2
  23. pulumi_aiven/get_my_sql.py +1 -1
  24. pulumi_aiven/get_open_search.py +1 -1
  25. pulumi_aiven/get_pg.py +1 -1
  26. pulumi_aiven/get_project_user.py +1 -1
  27. pulumi_aiven/get_redis.py +1 -1
  28. pulumi_aiven/get_service_component.py +9 -9
  29. pulumi_aiven/get_service_integration.py +17 -17
  30. pulumi_aiven/get_service_integration_endpoint.py +59 -31
  31. pulumi_aiven/get_thanos.py +1 -1
  32. pulumi_aiven/get_valkey.py +1 -1
  33. pulumi_aiven/get_valkey_user.py +9 -9
  34. pulumi_aiven/grafana.py +8 -8
  35. pulumi_aiven/influx_db.py +15 -8
  36. pulumi_aiven/kafka.py +8 -8
  37. pulumi_aiven/kafka_connect.py +8 -8
  38. pulumi_aiven/kafka_mirror_maker.py +8 -8
  39. pulumi_aiven/kafka_native_acl.py +534 -0
  40. pulumi_aiven/m3_aggregator.py +8 -8
  41. pulumi_aiven/m3_db.py +8 -8
  42. pulumi_aiven/mirror_maker_replication_flow.py +54 -7
  43. pulumi_aiven/my_sql.py +8 -8
  44. pulumi_aiven/open_search.py +8 -8
  45. pulumi_aiven/organization_group_project.py +17 -9
  46. pulumi_aiven/organization_permission.py +49 -49
  47. pulumi_aiven/outputs.py +1586 -295
  48. pulumi_aiven/pg.py +8 -8
  49. pulumi_aiven/project_user.py +15 -7
  50. pulumi_aiven/pulumi-plugin.json +1 -1
  51. pulumi_aiven/redis.py +8 -8
  52. pulumi_aiven/service_integration.py +139 -123
  53. pulumi_aiven/service_integration_endpoint.py +302 -140
  54. pulumi_aiven/thanos.py +8 -8
  55. pulumi_aiven/valkey.py +10 -10
  56. pulumi_aiven/valkey_user.py +142 -48
  57. {pulumi_aiven-6.28.0a1731647682.dist-info → pulumi_aiven-6.29.0.dist-info}/METADATA +5 -5
  58. {pulumi_aiven-6.28.0a1731647682.dist-info → pulumi_aiven-6.29.0.dist-info}/RECORD +60 -59
  59. {pulumi_aiven-6.28.0a1731647682.dist-info → pulumi_aiven-6.29.0.dist-info}/WHEEL +1 -1
  60. {pulumi_aiven-6.28.0a1731647682.dist-info → pulumi_aiven-6.29.0.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py CHANGED
@@ -194,6 +194,10 @@ __all__ = [
194
194
  'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuress',
195
195
  'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTask',
196
196
  'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTask',
197
+ 'OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueries',
198
+ 'OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpu',
199
+ 'OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatency',
200
+ 'OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemory',
197
201
  'OpenSearchOpensearchUserConfigOpensearchShardIndexingPressure',
198
202
  'OpenSearchOpensearchUserConfigOpensearchShardIndexingPressureOperatingFactor',
199
203
  'OpenSearchOpensearchUserConfigOpensearchShardIndexingPressurePrimaryParameter',
@@ -251,6 +255,8 @@ __all__ = [
251
255
  'ServiceIntegrationDatadogUserConfigDatadogTag',
252
256
  'ServiceIntegrationDatadogUserConfigOpensearch',
253
257
  'ServiceIntegrationDatadogUserConfigRedis',
258
+ 'ServiceIntegrationEndpointAutoscalerUserConfig',
259
+ 'ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling',
254
260
  'ServiceIntegrationEndpointDatadogUserConfig',
255
261
  'ServiceIntegrationEndpointDatadogUserConfigDatadogTag',
256
262
  'ServiceIntegrationEndpointExternalAwsCloudwatchLogsUserConfig',
@@ -264,6 +270,7 @@ __all__ = [
264
270
  'ServiceIntegrationEndpointExternalMysqlUserConfig',
265
271
  'ServiceIntegrationEndpointExternalOpensearchLogsUserConfig',
266
272
  'ServiceIntegrationEndpointExternalPostgresql',
273
+ 'ServiceIntegrationEndpointExternalPrometheusUserConfig',
267
274
  'ServiceIntegrationEndpointExternalSchemaRegistryUserConfig',
268
275
  'ServiceIntegrationEndpointJolokiaUserConfig',
269
276
  'ServiceIntegrationEndpointPrometheusUserConfig',
@@ -295,6 +302,8 @@ __all__ = [
295
302
  'ThanosThanosUserConfig',
296
303
  'ThanosThanosUserConfigCompactor',
297
304
  'ThanosThanosUserConfigIpFilterObject',
305
+ 'ThanosThanosUserConfigPrivateAccess',
306
+ 'ThanosThanosUserConfigPrivatelinkAccess',
298
307
  'ThanosThanosUserConfigPublicAccess',
299
308
  'ThanosThanosUserConfigQuery',
300
309
  'ThanosThanosUserConfigQueryFrontend',
@@ -495,6 +504,10 @@ __all__ = [
495
504
  'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuressResult',
496
505
  'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskResult',
497
506
  'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskResult',
507
+ 'GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesResult',
508
+ 'GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpuResult',
509
+ 'GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatencyResult',
510
+ 'GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemoryResult',
498
511
  'GetOpenSearchOpensearchUserConfigOpensearchShardIndexingPressureResult',
499
512
  'GetOpenSearchOpensearchUserConfigOpensearchShardIndexingPressureOperatingFactorResult',
500
513
  'GetOpenSearchOpensearchUserConfigOpensearchShardIndexingPressurePrimaryParameterResult',
@@ -550,6 +563,8 @@ __all__ = [
550
563
  'GetServiceIntegrationDatadogUserConfigDatadogTagResult',
551
564
  'GetServiceIntegrationDatadogUserConfigOpensearchResult',
552
565
  'GetServiceIntegrationDatadogUserConfigRedisResult',
566
+ 'GetServiceIntegrationEndpointAutoscalerUserConfigResult',
567
+ 'GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult',
553
568
  'GetServiceIntegrationEndpointDatadogUserConfigResult',
554
569
  'GetServiceIntegrationEndpointDatadogUserConfigDatadogTagResult',
555
570
  'GetServiceIntegrationEndpointExternalAwsCloudwatchLogsUserConfigResult',
@@ -563,6 +578,7 @@ __all__ = [
563
578
  'GetServiceIntegrationEndpointExternalMysqlUserConfigResult',
564
579
  'GetServiceIntegrationEndpointExternalOpensearchLogsUserConfigResult',
565
580
  'GetServiceIntegrationEndpointExternalPostgresqlResult',
581
+ 'GetServiceIntegrationEndpointExternalPrometheusUserConfigResult',
566
582
  'GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult',
567
583
  'GetServiceIntegrationEndpointJolokiaUserConfigResult',
568
584
  'GetServiceIntegrationEndpointPrometheusUserConfigResult',
@@ -594,6 +610,8 @@ __all__ = [
594
610
  'GetThanosThanosUserConfigResult',
595
611
  'GetThanosThanosUserConfigCompactorResult',
596
612
  'GetThanosThanosUserConfigIpFilterObjectResult',
613
+ 'GetThanosThanosUserConfigPrivateAccessResult',
614
+ 'GetThanosThanosUserConfigPrivatelinkAccessResult',
597
615
  'GetThanosThanosUserConfigPublicAccessResult',
598
616
  'GetThanosThanosUserConfigQueryResult',
599
617
  'GetThanosThanosUserConfigQueryFrontendResult',
@@ -834,7 +852,6 @@ class CassandraCassandraUserConfig(dict):
834
852
 
835
853
  @property
836
854
  @pulumi.getter(name="additionalBackupRegions")
837
- @_utilities.deprecated("""This property is deprecated.""")
838
855
  def additional_backup_regions(self) -> Optional[str]:
839
856
  """
840
857
  Additional Cloud Regions for Backup Replication.
@@ -2379,7 +2396,7 @@ class DragonflyDragonflyUserConfig(dict):
2379
2396
  static_ips: Optional[bool] = None):
2380
2397
  """
2381
2398
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. Default: `false`.
2382
- :param str dragonfly_persistence: Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2399
+ :param str dragonfly_persistence: Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2383
2400
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. Default: `true`.
2384
2401
  :param Sequence['DragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
2385
2402
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -2437,7 +2454,7 @@ class DragonflyDragonflyUserConfig(dict):
2437
2454
  @pulumi.getter(name="dragonflyPersistence")
2438
2455
  def dragonfly_persistence(self) -> Optional[str]:
2439
2456
  """
2440
- Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2457
+ Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2441
2458
  """
2442
2459
  return pulumi.get(self, "dragonfly_persistence")
2443
2460
 
@@ -3808,10 +3825,10 @@ class GrafanaGrafanaUserConfig(dict):
3808
3825
  wal: Optional[bool] = None):
3809
3826
  """
3810
3827
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
3811
- :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3828
+ :param bool alerting_enabled: Setting has no effect with Grafana 11 and onward. Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3812
3829
  :param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
3813
3830
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. Example: `0`.
3814
- :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
3831
+ :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.
3815
3832
  :param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
3816
3833
  :param 'GrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
3817
3834
  :param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
@@ -3819,9 +3836,9 @@ class GrafanaGrafanaUserConfig(dict):
3819
3836
  :param 'GrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
3820
3837
  :param 'GrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
3821
3838
  :param 'GrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
3822
- :param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
3839
+ :param str cookie_samesite: Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
3823
3840
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
3824
- :param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
3841
+ :param bool dashboard_previews_enabled: Enable browsing of dashboards in grid (pictures) mode. This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
3825
3842
  :param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h. Example: `5s`.
3826
3843
  :param int dashboards_versions_to_keep: Dashboard versions to keep per dashboard. Example: `20`.
3827
3844
  :param bool dataproxy_send_user_header: Send `X-Grafana-User` header to data source.
@@ -3834,7 +3851,7 @@ class GrafanaGrafanaUserConfig(dict):
3834
3851
  :param Sequence['GrafanaGrafanaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
3835
3852
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
3836
3853
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
3837
- :param bool metrics_enabled: Enable Grafana /metrics endpoint.
3854
+ :param bool metrics_enabled: Enable Grafana's /metrics endpoint.
3838
3855
  :param bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP.
3839
3856
  :param 'GrafanaGrafanaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
3840
3857
  :param 'GrafanaGrafanaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
@@ -3845,9 +3862,9 @@ class GrafanaGrafanaUserConfig(dict):
3845
3862
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
3846
3863
  :param 'GrafanaGrafanaUserConfigSmtpServerArgs' smtp_server: SMTP server settings
3847
3864
  :param bool static_ips: Use static public IP addresses.
3848
- :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3865
+ :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
3849
3866
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
3850
- :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
3867
+ :param str user_auto_assign_org_role: Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.
3851
3868
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
3852
3869
  :param bool wal: Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
3853
3870
  """
@@ -3950,7 +3967,7 @@ class GrafanaGrafanaUserConfig(dict):
3950
3967
  @pulumi.getter(name="alertingEnabled")
3951
3968
  def alerting_enabled(self) -> Optional[bool]:
3952
3969
  """
3953
- Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3970
+ Setting has no effect with Grafana 11 and onward. Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3954
3971
  """
3955
3972
  return pulumi.get(self, "alerting_enabled")
3956
3973
 
@@ -3974,7 +3991,7 @@ class GrafanaGrafanaUserConfig(dict):
3974
3991
  @pulumi.getter(name="alertingNodataOrNullvalues")
3975
3992
  def alerting_nodata_or_nullvalues(self) -> Optional[str]:
3976
3993
  """
3977
- Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
3994
+ Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.
3978
3995
  """
3979
3996
  return pulumi.get(self, "alerting_nodata_or_nullvalues")
3980
3997
 
@@ -4038,7 +4055,7 @@ class GrafanaGrafanaUserConfig(dict):
4038
4055
  @pulumi.getter(name="cookieSamesite")
4039
4056
  def cookie_samesite(self) -> Optional[str]:
4040
4057
  """
4041
- Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
4058
+ Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
4042
4059
  """
4043
4060
  return pulumi.get(self, "cookie_samesite")
4044
4061
 
@@ -4054,7 +4071,7 @@ class GrafanaGrafanaUserConfig(dict):
4054
4071
  @pulumi.getter(name="dashboardPreviewsEnabled")
4055
4072
  def dashboard_previews_enabled(self) -> Optional[bool]:
4056
4073
  """
4057
- This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
4074
+ Enable browsing of dashboards in grid (pictures) mode. This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
4058
4075
  """
4059
4076
  return pulumi.get(self, "dashboard_previews_enabled")
4060
4077
 
@@ -4159,7 +4176,7 @@ class GrafanaGrafanaUserConfig(dict):
4159
4176
  @pulumi.getter(name="metricsEnabled")
4160
4177
  def metrics_enabled(self) -> Optional[bool]:
4161
4178
  """
4162
- Enable Grafana /metrics endpoint.
4179
+ Enable Grafana's /metrics endpoint.
4163
4180
  """
4164
4181
  return pulumi.get(self, "metrics_enabled")
4165
4182
 
@@ -4247,7 +4264,7 @@ class GrafanaGrafanaUserConfig(dict):
4247
4264
  @pulumi.getter(name="unifiedAlertingEnabled")
4248
4265
  def unified_alerting_enabled(self) -> Optional[bool]:
4249
4266
  """
4250
- Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
4267
+ Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
4251
4268
  """
4252
4269
  return pulumi.get(self, "unified_alerting_enabled")
4253
4270
 
@@ -4263,7 +4280,7 @@ class GrafanaGrafanaUserConfig(dict):
4263
4280
  @pulumi.getter(name="userAutoAssignOrgRole")
4264
4281
  def user_auto_assign_org_role(self) -> Optional[str]:
4265
4282
  """
4266
- Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
4283
+ Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.
4267
4284
  """
4268
4285
  return pulumi.get(self, "user_auto_assign_org_role")
4269
4286
 
@@ -4743,9 +4760,9 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
4743
4760
  :param str client_id: Client ID from provider. Example: `b1ba0bf54a4c2c0a1c29`.
4744
4761
  :param str client_secret: Client secret from provider. Example: `bfa6gea4f129076761dcba8ce5e1e406bd83af7b`.
4745
4762
  :param bool allow_sign_up: Automatically sign-up users on successful sign-in.
4746
- :param str api_url: API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
4747
- :param str auth_url: Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
4748
- :param str token_url: Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
4763
+ :param str api_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
4764
+ :param str auth_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
4765
+ :param str token_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
4749
4766
  """
4750
4767
  pulumi.set(__self__, "allowed_groups", allowed_groups)
4751
4768
  pulumi.set(__self__, "client_id", client_id)
@@ -4795,7 +4812,7 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
4795
4812
  @pulumi.getter(name="apiUrl")
4796
4813
  def api_url(self) -> Optional[str]:
4797
4814
  """
4798
- API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
4815
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
4799
4816
  """
4800
4817
  return pulumi.get(self, "api_url")
4801
4818
 
@@ -4803,7 +4820,7 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
4803
4820
  @pulumi.getter(name="authUrl")
4804
4821
  def auth_url(self) -> Optional[str]:
4805
4822
  """
4806
- Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
4823
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
4807
4824
  """
4808
4825
  return pulumi.get(self, "auth_url")
4809
4826
 
@@ -4811,7 +4828,7 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
4811
4828
  @pulumi.getter(name="tokenUrl")
4812
4829
  def token_url(self) -> Optional[str]:
4813
4830
  """
4814
- Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
4831
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
4815
4832
  """
4816
4833
  return pulumi.get(self, "token_url")
4817
4834
 
@@ -5056,7 +5073,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
5056
5073
  """
5057
5074
  :param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions. Example: `AAAAAAAAAAAAAAAAAAA`.
5058
5075
  :param str bucket_url: Bucket URL for S3. Example: `https://grafana.s3-ap-southeast-2.amazonaws.com/`.
5059
- :param str provider: Enum: `s3`. Provider type.
5076
+ :param str provider: Enum: `s3`. External image store provider.
5060
5077
  :param str secret_key: S3 secret key. Example: `AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA`.
5061
5078
  """
5062
5079
  pulumi.set(__self__, "access_key", access_key)
@@ -5084,7 +5101,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
5084
5101
  @pulumi.getter
5085
5102
  def provider(self) -> str:
5086
5103
  """
5087
- Enum: `s3`. Provider type.
5104
+ Enum: `s3`. External image store provider.
5088
5105
  """
5089
5106
  return pulumi.get(self, "provider")
5090
5107
 
@@ -5225,7 +5242,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
5225
5242
  :param str from_name: Name used in outgoing emails, defaults to Grafana.
5226
5243
  :param str password: Password for SMTP authentication. Example: `ein0eemeev5eeth3Ahfu`.
5227
5244
  :param bool skip_verify: Skip verifying server certificate. Defaults to false.
5228
- :param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5245
+ :param str starttls_policy: Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5229
5246
  :param str username: Username for SMTP authentication. Example: `smtpuser`.
5230
5247
  """
5231
5248
  pulumi.set(__self__, "from_address", from_address)
@@ -5294,7 +5311,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
5294
5311
  @pulumi.getter(name="starttlsPolicy")
5295
5312
  def starttls_policy(self) -> Optional[str]:
5296
5313
  """
5297
- Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5314
+ Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5298
5315
  """
5299
5316
  return pulumi.get(self, "starttls_policy")
5300
5317
 
@@ -5605,6 +5622,8 @@ class InfluxDbInfluxdbUserConfig(dict):
5605
5622
  suggest = "additional_backup_regions"
5606
5623
  elif key == "customDomain":
5607
5624
  suggest = "custom_domain"
5625
+ elif key == "influxdbVersion":
5626
+ suggest = "influxdb_version"
5608
5627
  elif key == "ipFilterObjects":
5609
5628
  suggest = "ip_filter_objects"
5610
5629
  elif key == "ipFilterStrings":
@@ -5643,6 +5662,7 @@ class InfluxDbInfluxdbUserConfig(dict):
5643
5662
  additional_backup_regions: Optional[str] = None,
5644
5663
  custom_domain: Optional[str] = None,
5645
5664
  influxdb: Optional['outputs.InfluxDbInfluxdbUserConfigInfluxdb'] = None,
5665
+ influxdb_version: Optional[str] = None,
5646
5666
  ip_filter_objects: Optional[Sequence['outputs.InfluxDbInfluxdbUserConfigIpFilterObject']] = None,
5647
5667
  ip_filter_strings: Optional[Sequence[str]] = None,
5648
5668
  ip_filters: Optional[Sequence[str]] = None,
@@ -5658,6 +5678,7 @@ class InfluxDbInfluxdbUserConfig(dict):
5658
5678
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
5659
5679
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
5660
5680
  :param 'InfluxDbInfluxdbUserConfigInfluxdbArgs' influxdb: influxdb.conf configuration values
5681
+ :param str influxdb_version: Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
5661
5682
  :param Sequence['InfluxDbInfluxdbUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
5662
5683
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
5663
5684
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -5676,6 +5697,8 @@ class InfluxDbInfluxdbUserConfig(dict):
5676
5697
  pulumi.set(__self__, "custom_domain", custom_domain)
5677
5698
  if influxdb is not None:
5678
5699
  pulumi.set(__self__, "influxdb", influxdb)
5700
+ if influxdb_version is not None:
5701
+ pulumi.set(__self__, "influxdb_version", influxdb_version)
5679
5702
  if ip_filter_objects is not None:
5680
5703
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
5681
5704
  if ip_filter_strings is not None:
@@ -5724,6 +5747,14 @@ class InfluxDbInfluxdbUserConfig(dict):
5724
5747
  """
5725
5748
  return pulumi.get(self, "influxdb")
5726
5749
 
5750
+ @property
5751
+ @pulumi.getter(name="influxdbVersion")
5752
+ def influxdb_version(self) -> Optional[str]:
5753
+ """
5754
+ Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
5755
+ """
5756
+ return pulumi.get(self, "influxdb_version")
5757
+
5727
5758
  @property
5728
5759
  @pulumi.getter(name="ipFilterObjects")
5729
5760
  def ip_filter_objects(self) -> Optional[Sequence['outputs.InfluxDbInfluxdbUserConfigIpFilterObject']]:
@@ -5966,13 +5997,34 @@ class InfluxDbInfluxdbUserConfigIpFilterObject(dict):
5966
5997
 
5967
5998
  @pulumi.output_type
5968
5999
  class InfluxDbInfluxdbUserConfigPrivateAccess(dict):
6000
+ @staticmethod
6001
+ def __key_warning(key: str):
6002
+ suggest = None
6003
+ if key == "userBackup":
6004
+ suggest = "user_backup"
6005
+
6006
+ if suggest:
6007
+ pulumi.log.warn(f"Key '{key}' not found in InfluxDbInfluxdbUserConfigPrivateAccess. Access the value via the '{suggest}' property getter instead.")
6008
+
6009
+ def __getitem__(self, key: str) -> Any:
6010
+ InfluxDbInfluxdbUserConfigPrivateAccess.__key_warning(key)
6011
+ return super().__getitem__(key)
6012
+
6013
+ def get(self, key: str, default = None) -> Any:
6014
+ InfluxDbInfluxdbUserConfigPrivateAccess.__key_warning(key)
6015
+ return super().get(key, default)
6016
+
5969
6017
  def __init__(__self__, *,
5970
- influxdb: Optional[bool] = None):
6018
+ influxdb: Optional[bool] = None,
6019
+ user_backup: Optional[bool] = None):
5971
6020
  """
5972
6021
  :param bool influxdb: Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
6022
+ :param bool user_backup: Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
5973
6023
  """
5974
6024
  if influxdb is not None:
5975
6025
  pulumi.set(__self__, "influxdb", influxdb)
6026
+ if user_backup is not None:
6027
+ pulumi.set(__self__, "user_backup", user_backup)
5976
6028
 
5977
6029
  @property
5978
6030
  @pulumi.getter
@@ -5982,16 +6034,45 @@ class InfluxDbInfluxdbUserConfigPrivateAccess(dict):
5982
6034
  """
5983
6035
  return pulumi.get(self, "influxdb")
5984
6036
 
6037
+ @property
6038
+ @pulumi.getter(name="userBackup")
6039
+ def user_backup(self) -> Optional[bool]:
6040
+ """
6041
+ Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
6042
+ """
6043
+ return pulumi.get(self, "user_backup")
6044
+
5985
6045
 
5986
6046
  @pulumi.output_type
5987
6047
  class InfluxDbInfluxdbUserConfigPrivatelinkAccess(dict):
6048
+ @staticmethod
6049
+ def __key_warning(key: str):
6050
+ suggest = None
6051
+ if key == "userBackup":
6052
+ suggest = "user_backup"
6053
+
6054
+ if suggest:
6055
+ pulumi.log.warn(f"Key '{key}' not found in InfluxDbInfluxdbUserConfigPrivatelinkAccess. Access the value via the '{suggest}' property getter instead.")
6056
+
6057
+ def __getitem__(self, key: str) -> Any:
6058
+ InfluxDbInfluxdbUserConfigPrivatelinkAccess.__key_warning(key)
6059
+ return super().__getitem__(key)
6060
+
6061
+ def get(self, key: str, default = None) -> Any:
6062
+ InfluxDbInfluxdbUserConfigPrivatelinkAccess.__key_warning(key)
6063
+ return super().get(key, default)
6064
+
5988
6065
  def __init__(__self__, *,
5989
- influxdb: Optional[bool] = None):
6066
+ influxdb: Optional[bool] = None,
6067
+ user_backup: Optional[bool] = None):
5990
6068
  """
5991
6069
  :param bool influxdb: Enable influxdb.
6070
+ :param bool user_backup: Enable user_backup.
5992
6071
  """
5993
6072
  if influxdb is not None:
5994
6073
  pulumi.set(__self__, "influxdb", influxdb)
6074
+ if user_backup is not None:
6075
+ pulumi.set(__self__, "user_backup", user_backup)
5995
6076
 
5996
6077
  @property
5997
6078
  @pulumi.getter
@@ -6001,16 +6082,45 @@ class InfluxDbInfluxdbUserConfigPrivatelinkAccess(dict):
6001
6082
  """
6002
6083
  return pulumi.get(self, "influxdb")
6003
6084
 
6085
+ @property
6086
+ @pulumi.getter(name="userBackup")
6087
+ def user_backup(self) -> Optional[bool]:
6088
+ """
6089
+ Enable user_backup.
6090
+ """
6091
+ return pulumi.get(self, "user_backup")
6092
+
6004
6093
 
6005
6094
  @pulumi.output_type
6006
6095
  class InfluxDbInfluxdbUserConfigPublicAccess(dict):
6096
+ @staticmethod
6097
+ def __key_warning(key: str):
6098
+ suggest = None
6099
+ if key == "userBackup":
6100
+ suggest = "user_backup"
6101
+
6102
+ if suggest:
6103
+ pulumi.log.warn(f"Key '{key}' not found in InfluxDbInfluxdbUserConfigPublicAccess. Access the value via the '{suggest}' property getter instead.")
6104
+
6105
+ def __getitem__(self, key: str) -> Any:
6106
+ InfluxDbInfluxdbUserConfigPublicAccess.__key_warning(key)
6107
+ return super().__getitem__(key)
6108
+
6109
+ def get(self, key: str, default = None) -> Any:
6110
+ InfluxDbInfluxdbUserConfigPublicAccess.__key_warning(key)
6111
+ return super().get(key, default)
6112
+
6007
6113
  def __init__(__self__, *,
6008
- influxdb: Optional[bool] = None):
6114
+ influxdb: Optional[bool] = None,
6115
+ user_backup: Optional[bool] = None):
6009
6116
  """
6010
6117
  :param bool influxdb: Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.
6118
+ :param bool user_backup: Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
6011
6119
  """
6012
6120
  if influxdb is not None:
6013
6121
  pulumi.set(__self__, "influxdb", influxdb)
6122
+ if user_backup is not None:
6123
+ pulumi.set(__self__, "user_backup", user_backup)
6014
6124
 
6015
6125
  @property
6016
6126
  @pulumi.getter
@@ -6020,6 +6130,14 @@ class InfluxDbInfluxdbUserConfigPublicAccess(dict):
6020
6130
  """
6021
6131
  return pulumi.get(self, "influxdb")
6022
6132
 
6133
+ @property
6134
+ @pulumi.getter(name="userBackup")
6135
+ def user_backup(self) -> Optional[bool]:
6136
+ """
6137
+ Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
6138
+ """
6139
+ return pulumi.get(self, "user_backup")
6140
+
6023
6141
 
6024
6142
  @pulumi.output_type
6025
6143
  class InfluxDbServiceIntegration(dict):
@@ -6631,10 +6749,10 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6631
6749
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
6632
6750
  session_timeout_ms: Optional[int] = None):
6633
6751
  """
6634
- :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
6752
+ :param str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
6635
6753
  :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6636
6754
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
6637
- :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6755
+ :param str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6638
6756
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
6639
6757
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
6640
6758
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -6642,7 +6760,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6642
6760
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
6643
6761
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
6644
6762
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
6645
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
6763
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
6646
6764
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
6647
6765
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
6648
6766
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -6685,7 +6803,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6685
6803
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
6686
6804
  def connector_client_config_override_policy(self) -> Optional[str]:
6687
6805
  """
6688
- Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
6806
+ Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
6689
6807
  """
6690
6808
  return pulumi.get(self, "connector_client_config_override_policy")
6691
6809
 
@@ -6709,7 +6827,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6709
6827
  @pulumi.getter(name="consumerIsolationLevel")
6710
6828
  def consumer_isolation_level(self) -> Optional[str]:
6711
6829
  """
6712
- Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6830
+ Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6713
6831
  """
6714
6832
  return pulumi.get(self, "consumer_isolation_level")
6715
6833
 
@@ -6773,7 +6891,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6773
6891
  @pulumi.getter(name="producerCompressionType")
6774
6892
  def producer_compression_type(self) -> Optional[str]:
6775
6893
  """
6776
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
6894
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
6777
6895
  """
6778
6896
  return pulumi.get(self, "producer_compression_type")
6779
6897
 
@@ -6974,8 +7092,8 @@ class KafkaConnectKafkaConnectUserConfigSecretProvider(dict):
6974
7092
  vault: Optional['outputs.KafkaConnectKafkaConnectUserConfigSecretProviderVault'] = None):
6975
7093
  """
6976
7094
  :param str name: Name of the secret provider. Used to reference secrets in connector config.
6977
- :param 'KafkaConnectKafkaConnectUserConfigSecretProviderAwsArgs' aws: AWS config for Secret Provider
6978
- :param 'KafkaConnectKafkaConnectUserConfigSecretProviderVaultArgs' vault: Vault Config for Secret Provider
7095
+ :param 'KafkaConnectKafkaConnectUserConfigSecretProviderAwsArgs' aws: AWS secret provider configuration
7096
+ :param 'KafkaConnectKafkaConnectUserConfigSecretProviderVaultArgs' vault: Vault secret provider configuration
6979
7097
  """
6980
7098
  pulumi.set(__self__, "name", name)
6981
7099
  if aws is not None:
@@ -6995,7 +7113,7 @@ class KafkaConnectKafkaConnectUserConfigSecretProvider(dict):
6995
7113
  @pulumi.getter
6996
7114
  def aws(self) -> Optional['outputs.KafkaConnectKafkaConnectUserConfigSecretProviderAws']:
6997
7115
  """
6998
- AWS config for Secret Provider
7116
+ AWS secret provider configuration
6999
7117
  """
7000
7118
  return pulumi.get(self, "aws")
7001
7119
 
@@ -7003,7 +7121,7 @@ class KafkaConnectKafkaConnectUserConfigSecretProvider(dict):
7003
7121
  @pulumi.getter
7004
7122
  def vault(self) -> Optional['outputs.KafkaConnectKafkaConnectUserConfigSecretProviderVault']:
7005
7123
  """
7006
- Vault Config for Secret Provider
7124
+ Vault secret provider configuration
7007
7125
  """
7008
7126
  return pulumi.get(self, "vault")
7009
7127
 
@@ -8001,7 +8119,7 @@ class KafkaKafkaUserConfigKafka(dict):
8001
8119
  transaction_state_log_segment_bytes: Optional[int] = None):
8002
8120
  """
8003
8121
  :param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
8004
- :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
8122
+ :param str compression_type: Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
8005
8123
  :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
8006
8124
  :param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
8007
8125
  :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
@@ -8011,8 +8129,8 @@ class KafkaKafkaUserConfigKafka(dict):
8011
8129
  :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
8012
8130
  :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
8013
8131
  :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
8014
- :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8015
- :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
8132
+ :param str log_cleanup_policy: Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8133
+ :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
8016
8134
  :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
8017
8135
  :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
8018
8136
  :param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
@@ -8152,7 +8270,7 @@ class KafkaKafkaUserConfigKafka(dict):
8152
8270
  @pulumi.getter(name="compressionType")
8153
8271
  def compression_type(self) -> Optional[str]:
8154
8272
  """
8155
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
8273
+ Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
8156
8274
  """
8157
8275
  return pulumi.get(self, "compression_type")
8158
8276
 
@@ -8232,7 +8350,7 @@ class KafkaKafkaUserConfigKafka(dict):
8232
8350
  @pulumi.getter(name="logCleanupPolicy")
8233
8351
  def log_cleanup_policy(self) -> Optional[str]:
8234
8352
  """
8235
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8353
+ Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
8236
8354
  """
8237
8355
  return pulumi.get(self, "log_cleanup_policy")
8238
8356
 
@@ -8240,7 +8358,7 @@ class KafkaKafkaUserConfigKafka(dict):
8240
8358
  @pulumi.getter(name="logFlushIntervalMessages")
8241
8359
  def log_flush_interval_messages(self) -> Optional[int]:
8242
8360
  """
8243
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
8361
+ The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
8244
8362
  """
8245
8363
  return pulumi.get(self, "log_flush_interval_messages")
8246
8364
 
@@ -8607,10 +8725,10 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8607
8725
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
8608
8726
  session_timeout_ms: Optional[int] = None):
8609
8727
  """
8610
- :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
8728
+ :param str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
8611
8729
  :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
8612
8730
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
8613
- :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8731
+ :param str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8614
8732
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
8615
8733
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
8616
8734
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -8618,7 +8736,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8618
8736
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
8619
8737
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
8620
8738
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
8621
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
8739
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
8622
8740
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
8623
8741
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
8624
8742
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -8661,7 +8779,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8661
8779
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
8662
8780
  def connector_client_config_override_policy(self) -> Optional[str]:
8663
8781
  """
8664
- Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
8782
+ Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
8665
8783
  """
8666
8784
  return pulumi.get(self, "connector_client_config_override_policy")
8667
8785
 
@@ -8685,7 +8803,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8685
8803
  @pulumi.getter(name="consumerIsolationLevel")
8686
8804
  def consumer_isolation_level(self) -> Optional[str]:
8687
8805
  """
8688
- Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8806
+ Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8689
8807
  """
8690
8808
  return pulumi.get(self, "consumer_isolation_level")
8691
8809
 
@@ -8749,7 +8867,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8749
8867
  @pulumi.getter(name="producerCompressionType")
8750
8868
  def producer_compression_type(self) -> Optional[str]:
8751
8869
  """
8752
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
8870
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
8753
8871
  """
8754
8872
  return pulumi.get(self, "producer_compression_type")
8755
8873
 
@@ -8794,8 +8912,8 @@ class KafkaKafkaUserConfigKafkaConnectSecretProvider(dict):
8794
8912
  vault: Optional['outputs.KafkaKafkaUserConfigKafkaConnectSecretProviderVault'] = None):
8795
8913
  """
8796
8914
  :param str name: Name of the secret provider. Used to reference secrets in connector config.
8797
- :param 'KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs' aws: AWS config for Secret Provider
8798
- :param 'KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs' vault: Vault Config for Secret Provider
8915
+ :param 'KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs' aws: AWS secret provider configuration
8916
+ :param 'KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs' vault: Vault secret provider configuration
8799
8917
  """
8800
8918
  pulumi.set(__self__, "name", name)
8801
8919
  if aws is not None:
@@ -8815,7 +8933,7 @@ class KafkaKafkaUserConfigKafkaConnectSecretProvider(dict):
8815
8933
  @pulumi.getter
8816
8934
  def aws(self) -> Optional['outputs.KafkaKafkaUserConfigKafkaConnectSecretProviderAws']:
8817
8935
  """
8818
- AWS config for Secret Provider
8936
+ AWS secret provider configuration
8819
8937
  """
8820
8938
  return pulumi.get(self, "aws")
8821
8939
 
@@ -8823,7 +8941,7 @@ class KafkaKafkaUserConfigKafkaConnectSecretProvider(dict):
8823
8941
  @pulumi.getter
8824
8942
  def vault(self) -> Optional['outputs.KafkaKafkaUserConfigKafkaConnectSecretProviderVault']:
8825
8943
  """
8826
- Vault Config for Secret Provider
8944
+ Vault secret provider configuration
8827
8945
  """
8828
8946
  return pulumi.get(self, "vault")
8829
8947
 
@@ -9040,10 +9158,10 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
9040
9158
  :param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`.
9041
9159
  :param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. Default: `67108864`.
9042
9160
  :param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`.
9043
- :param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
9161
+ :param str name_strategy: Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
9044
9162
  :param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.
9045
- :param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
9046
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
9163
+ :param str producer_acks: Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
9164
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
9047
9165
  :param int producer_linger_ms: Wait for up to the given delay to allow batching records together. Default: `0`.
9048
9166
  :param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: `1048576`.
9049
9167
  :param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`.
@@ -9097,7 +9215,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
9097
9215
  @pulumi.getter(name="nameStrategy")
9098
9216
  def name_strategy(self) -> Optional[str]:
9099
9217
  """
9100
- Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
9218
+ Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
9101
9219
  """
9102
9220
  return pulumi.get(self, "name_strategy")
9103
9221
 
@@ -9113,7 +9231,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
9113
9231
  @pulumi.getter(name="producerAcks")
9114
9232
  def producer_acks(self) -> Optional[str]:
9115
9233
  """
9116
- Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
9234
+ Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
9117
9235
  """
9118
9236
  return pulumi.get(self, "producer_acks")
9119
9237
 
@@ -9121,7 +9239,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
9121
9239
  @pulumi.getter(name="producerCompressionType")
9122
9240
  def producer_compression_type(self) -> Optional[str]:
9123
9241
  """
9124
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
9242
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
9125
9243
  """
9126
9244
  return pulumi.get(self, "producer_compression_type")
9127
9245
 
@@ -10432,8 +10550,8 @@ class KafkaTopicConfig(dict):
10432
10550
  segment_ms: Optional[str] = None,
10433
10551
  unclean_leader_election_enable: Optional[bool] = None):
10434
10552
  """
10435
- :param str cleanup_policy: cleanup.policy value
10436
- :param str compression_type: compression.type value
10553
+ :param str cleanup_policy: cleanup.policy value. The possible values are `delete`, `compact` and `compact,delete`.
10554
+ :param str compression_type: compression.type value. The possible values are `snappy`, `gzip`, `lz4`, `producer`, `uncompressed` and `zstd`.
10437
10555
  :param str delete_retention_ms: delete.retention.ms value
10438
10556
  :param str file_delete_delay_ms: file.delete.delay.ms value
10439
10557
  :param str flush_messages: flush.messages value
@@ -10444,9 +10562,9 @@ class KafkaTopicConfig(dict):
10444
10562
  :param str max_compaction_lag_ms: max.compaction.lag.ms value
10445
10563
  :param str max_message_bytes: max.message.bytes value
10446
10564
  :param bool message_downconversion_enable: message.downconversion.enable value
10447
- :param str message_format_version: message.format.version value
10565
+ :param str message_format_version: message.format.version value. The possible values are `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0` and `3.9-IV1`.
10448
10566
  :param str message_timestamp_difference_max_ms: message.timestamp.difference.max.ms value
10449
- :param str message_timestamp_type: message.timestamp.type value
10567
+ :param str message_timestamp_type: message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
10450
10568
  :param float min_cleanable_dirty_ratio: min.cleanable.dirty.ratio value
10451
10569
  :param str min_compaction_lag_ms: min.compaction.lag.ms value
10452
10570
  :param str min_insync_replicas: min.insync.replicas value
@@ -10519,7 +10637,7 @@ class KafkaTopicConfig(dict):
10519
10637
  @pulumi.getter(name="cleanupPolicy")
10520
10638
  def cleanup_policy(self) -> Optional[str]:
10521
10639
  """
10522
- cleanup.policy value
10640
+ cleanup.policy value. The possible values are `delete`, `compact` and `compact,delete`.
10523
10641
  """
10524
10642
  return pulumi.get(self, "cleanup_policy")
10525
10643
 
@@ -10527,7 +10645,7 @@ class KafkaTopicConfig(dict):
10527
10645
  @pulumi.getter(name="compressionType")
10528
10646
  def compression_type(self) -> Optional[str]:
10529
10647
  """
10530
- compression.type value
10648
+ compression.type value. The possible values are `snappy`, `gzip`, `lz4`, `producer`, `uncompressed` and `zstd`.
10531
10649
  """
10532
10650
  return pulumi.get(self, "compression_type")
10533
10651
 
@@ -10615,7 +10733,7 @@ class KafkaTopicConfig(dict):
10615
10733
  @pulumi.getter(name="messageFormatVersion")
10616
10734
  def message_format_version(self) -> Optional[str]:
10617
10735
  """
10618
- message.format.version value
10736
+ message.format.version value. The possible values are `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0` and `3.9-IV1`.
10619
10737
  """
10620
10738
  return pulumi.get(self, "message_format_version")
10621
10739
 
@@ -10631,7 +10749,7 @@ class KafkaTopicConfig(dict):
10631
10749
  @pulumi.getter(name="messageTimestampType")
10632
10750
  def message_timestamp_type(self) -> Optional[str]:
10633
10751
  """
10634
- message.timestamp.type value
10752
+ message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
10635
10753
  """
10636
10754
  return pulumi.get(self, "message_timestamp_type")
10637
10755
 
@@ -13345,9 +13463,9 @@ class MySqlMysqlUserConfigMysql(dict):
13345
13463
  :param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit). Example: `10`.
13346
13464
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`.
13347
13465
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`.
13348
- :param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
13349
- :param str log_output: Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
13350
- :param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10`.
13466
+ :param str internal_tmp_mem_storage_engine: Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
13467
+ :param str log_output: Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
13468
+ :param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10.0`.
13351
13469
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M). Example: `67108864`.
13352
13470
  :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M). Example: `16777216`.
13353
13471
  :param int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service. Example: `16384`.
@@ -13563,7 +13681,7 @@ class MySqlMysqlUserConfigMysql(dict):
13563
13681
  @pulumi.getter(name="internalTmpMemStorageEngine")
13564
13682
  def internal_tmp_mem_storage_engine(self) -> Optional[str]:
13565
13683
  """
13566
- Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
13684
+ Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
13567
13685
  """
13568
13686
  return pulumi.get(self, "internal_tmp_mem_storage_engine")
13569
13687
 
@@ -13571,7 +13689,7 @@ class MySqlMysqlUserConfigMysql(dict):
13571
13689
  @pulumi.getter(name="logOutput")
13572
13690
  def log_output(self) -> Optional[str]:
13573
13691
  """
13574
- Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
13692
+ Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
13575
13693
  """
13576
13694
  return pulumi.get(self, "log_output")
13577
13695
 
@@ -13579,7 +13697,7 @@ class MySqlMysqlUserConfigMysql(dict):
13579
13697
  @pulumi.getter(name="longQueryTime")
13580
13698
  def long_query_time(self) -> Optional[float]:
13581
13699
  """
13582
- The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10`.
13700
+ The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10.0`.
13583
13701
  """
13584
13702
  return pulumi.get(self, "long_query_time")
13585
13703
 
@@ -14202,7 +14320,7 @@ class OpenSearchOpensearchUserConfig(dict):
14202
14320
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
14203
14321
  :param 'OpenSearchOpensearchUserConfigAzureMigrationArgs' azure_migration: Azure migration settings
14204
14322
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
14205
- :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
14323
+ :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
14206
14324
  :param 'OpenSearchOpensearchUserConfigGcsMigrationArgs' gcs_migration: Google Cloud Storage migration settings
14207
14325
  :param Sequence['OpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
14208
14326
  :param 'OpenSearchOpensearchUserConfigIndexRollupArgs' index_rollup: Index rollup settings
@@ -14310,7 +14428,7 @@ class OpenSearchOpensearchUserConfig(dict):
14310
14428
  @pulumi.getter(name="disableReplicationFactorAdjustment")
14311
14429
  def disable_replication_factor_adjustment(self) -> Optional[bool]:
14312
14430
  """
14313
- Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
14431
+ Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
14314
14432
  """
14315
14433
  return pulumi.get(self, "disable_replication_factor_adjustment")
14316
14434
 
@@ -14513,6 +14631,10 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14513
14631
  suggest = "chunk_size"
14514
14632
  elif key == "endpointSuffix":
14515
14633
  suggest = "endpoint_suffix"
14634
+ elif key == "includeAliases":
14635
+ suggest = "include_aliases"
14636
+ elif key == "restoreGlobalState":
14637
+ suggest = "restore_global_state"
14516
14638
  elif key == "sasToken":
14517
14639
  suggest = "sas_token"
14518
14640
 
@@ -14531,28 +14653,33 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14531
14653
  account: str,
14532
14654
  base_path: str,
14533
14655
  container: str,
14656
+ indices: str,
14534
14657
  snapshot_name: str,
14535
14658
  chunk_size: Optional[str] = None,
14536
14659
  compress: Optional[bool] = None,
14537
14660
  endpoint_suffix: Optional[str] = None,
14538
- indices: Optional[str] = None,
14661
+ include_aliases: Optional[bool] = None,
14539
14662
  key: Optional[str] = None,
14663
+ restore_global_state: Optional[bool] = None,
14540
14664
  sas_token: Optional[str] = None):
14541
14665
  """
14542
- :param str account: Azure account name.
14666
+ :param str account: Account name.
14543
14667
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
14544
14668
  :param str container: Azure container name.
14669
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
14545
14670
  :param str snapshot_name: The snapshot name to restore from.
14546
14671
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
14547
14672
  :param bool compress: When set to true metadata files are stored in compressed format.
14548
14673
  :param str endpoint_suffix: Defines the DNS suffix for Azure Storage endpoints.
14549
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
14674
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
14550
14675
  :param str key: Azure account secret key. One of key or sas_token should be specified.
14676
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
14551
14677
  :param str sas_token: A shared access signatures (SAS) token. One of key or sas_token should be specified.
14552
14678
  """
14553
14679
  pulumi.set(__self__, "account", account)
14554
14680
  pulumi.set(__self__, "base_path", base_path)
14555
14681
  pulumi.set(__self__, "container", container)
14682
+ pulumi.set(__self__, "indices", indices)
14556
14683
  pulumi.set(__self__, "snapshot_name", snapshot_name)
14557
14684
  if chunk_size is not None:
14558
14685
  pulumi.set(__self__, "chunk_size", chunk_size)
@@ -14560,10 +14687,12 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14560
14687
  pulumi.set(__self__, "compress", compress)
14561
14688
  if endpoint_suffix is not None:
14562
14689
  pulumi.set(__self__, "endpoint_suffix", endpoint_suffix)
14563
- if indices is not None:
14564
- pulumi.set(__self__, "indices", indices)
14690
+ if include_aliases is not None:
14691
+ pulumi.set(__self__, "include_aliases", include_aliases)
14565
14692
  if key is not None:
14566
14693
  pulumi.set(__self__, "key", key)
14694
+ if restore_global_state is not None:
14695
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
14567
14696
  if sas_token is not None:
14568
14697
  pulumi.set(__self__, "sas_token", sas_token)
14569
14698
 
@@ -14571,7 +14700,7 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14571
14700
  @pulumi.getter
14572
14701
  def account(self) -> str:
14573
14702
  """
14574
- Azure account name.
14703
+ Account name.
14575
14704
  """
14576
14705
  return pulumi.get(self, "account")
14577
14706
 
@@ -14591,6 +14720,14 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14591
14720
  """
14592
14721
  return pulumi.get(self, "container")
14593
14722
 
14723
+ @property
14724
+ @pulumi.getter
14725
+ def indices(self) -> str:
14726
+ """
14727
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
14728
+ """
14729
+ return pulumi.get(self, "indices")
14730
+
14594
14731
  @property
14595
14732
  @pulumi.getter(name="snapshotName")
14596
14733
  def snapshot_name(self) -> str:
@@ -14624,12 +14761,12 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14624
14761
  return pulumi.get(self, "endpoint_suffix")
14625
14762
 
14626
14763
  @property
14627
- @pulumi.getter
14628
- def indices(self) -> Optional[str]:
14764
+ @pulumi.getter(name="includeAliases")
14765
+ def include_aliases(self) -> Optional[bool]:
14629
14766
  """
14630
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
14767
+ Whether to restore aliases alongside their associated indexes. Default is true.
14631
14768
  """
14632
- return pulumi.get(self, "indices")
14769
+ return pulumi.get(self, "include_aliases")
14633
14770
 
14634
14771
  @property
14635
14772
  @pulumi.getter
@@ -14639,6 +14776,14 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
14639
14776
  """
14640
14777
  return pulumi.get(self, "key")
14641
14778
 
14779
+ @property
14780
+ @pulumi.getter(name="restoreGlobalState")
14781
+ def restore_global_state(self) -> Optional[bool]:
14782
+ """
14783
+ If true, restore the cluster state. Defaults to false.
14784
+ """
14785
+ return pulumi.get(self, "restore_global_state")
14786
+
14642
14787
  @property
14643
14788
  @pulumi.getter(name="sasToken")
14644
14789
  def sas_token(self) -> Optional[str]:
@@ -14659,6 +14804,10 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
14659
14804
  suggest = "snapshot_name"
14660
14805
  elif key == "chunkSize":
14661
14806
  suggest = "chunk_size"
14807
+ elif key == "includeAliases":
14808
+ suggest = "include_aliases"
14809
+ elif key == "restoreGlobalState":
14810
+ suggest = "restore_global_state"
14662
14811
 
14663
14812
  if suggest:
14664
14813
  pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigGcsMigration. Access the value via the '{suggest}' property getter instead.")
@@ -14675,29 +14824,36 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
14675
14824
  base_path: str,
14676
14825
  bucket: str,
14677
14826
  credentials: str,
14827
+ indices: str,
14678
14828
  snapshot_name: str,
14679
14829
  chunk_size: Optional[str] = None,
14680
14830
  compress: Optional[bool] = None,
14681
- indices: Optional[str] = None):
14831
+ include_aliases: Optional[bool] = None,
14832
+ restore_global_state: Optional[bool] = None):
14682
14833
  """
14683
14834
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
14684
14835
  :param str bucket: The path to the repository data within its container.
14685
14836
  :param str credentials: Google Cloud Storage credentials file content.
14837
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
14686
14838
  :param str snapshot_name: The snapshot name to restore from.
14687
14839
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
14688
14840
  :param bool compress: When set to true metadata files are stored in compressed format.
14689
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
14841
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
14842
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
14690
14843
  """
14691
14844
  pulumi.set(__self__, "base_path", base_path)
14692
14845
  pulumi.set(__self__, "bucket", bucket)
14693
14846
  pulumi.set(__self__, "credentials", credentials)
14847
+ pulumi.set(__self__, "indices", indices)
14694
14848
  pulumi.set(__self__, "snapshot_name", snapshot_name)
14695
14849
  if chunk_size is not None:
14696
14850
  pulumi.set(__self__, "chunk_size", chunk_size)
14697
14851
  if compress is not None:
14698
14852
  pulumi.set(__self__, "compress", compress)
14699
- if indices is not None:
14700
- pulumi.set(__self__, "indices", indices)
14853
+ if include_aliases is not None:
14854
+ pulumi.set(__self__, "include_aliases", include_aliases)
14855
+ if restore_global_state is not None:
14856
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
14701
14857
 
14702
14858
  @property
14703
14859
  @pulumi.getter(name="basePath")
@@ -14723,6 +14879,14 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
14723
14879
  """
14724
14880
  return pulumi.get(self, "credentials")
14725
14881
 
14882
+ @property
14883
+ @pulumi.getter
14884
+ def indices(self) -> str:
14885
+ """
14886
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
14887
+ """
14888
+ return pulumi.get(self, "indices")
14889
+
14726
14890
  @property
14727
14891
  @pulumi.getter(name="snapshotName")
14728
14892
  def snapshot_name(self) -> str:
@@ -14748,12 +14912,20 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
14748
14912
  return pulumi.get(self, "compress")
14749
14913
 
14750
14914
  @property
14751
- @pulumi.getter
14752
- def indices(self) -> Optional[str]:
14915
+ @pulumi.getter(name="includeAliases")
14916
+ def include_aliases(self) -> Optional[bool]:
14753
14917
  """
14754
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
14918
+ Whether to restore aliases alongside their associated indexes. Default is true.
14755
14919
  """
14756
- return pulumi.get(self, "indices")
14920
+ return pulumi.get(self, "include_aliases")
14921
+
14922
+ @property
14923
+ @pulumi.getter(name="restoreGlobalState")
14924
+ def restore_global_state(self) -> Optional[bool]:
14925
+ """
14926
+ If true, restore the cluster state. Defaults to false.
14927
+ """
14928
+ return pulumi.get(self, "restore_global_state")
14757
14929
 
14758
14930
 
14759
14931
  @pulumi.output_type
@@ -15253,6 +15425,8 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
15253
15425
  suggest = "script_max_compilations_rate"
15254
15426
  elif key == "searchBackpressure":
15255
15427
  suggest = "search_backpressure"
15428
+ elif key == "searchInsightsTopQueries":
15429
+ suggest = "search_insights_top_queries"
15256
15430
  elif key == "searchMaxBuckets":
15257
15431
  suggest = "search_max_buckets"
15258
15432
  elif key == "shardIndexingPressure":
@@ -15325,6 +15499,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
15325
15499
  reindex_remote_whitelists: Optional[Sequence[str]] = None,
15326
15500
  script_max_compilations_rate: Optional[str] = None,
15327
15501
  search_backpressure: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchBackpressure'] = None,
15502
+ search_insights_top_queries: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueries'] = None,
15328
15503
  search_max_buckets: Optional[int] = None,
15329
15504
  shard_indexing_pressure: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchShardIndexingPressure'] = None,
15330
15505
  thread_pool_analyze_queue_size: Optional[int] = None,
@@ -15452,6 +15627,8 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
15452
15627
  pulumi.set(__self__, "script_max_compilations_rate", script_max_compilations_rate)
15453
15628
  if search_backpressure is not None:
15454
15629
  pulumi.set(__self__, "search_backpressure", search_backpressure)
15630
+ if search_insights_top_queries is not None:
15631
+ pulumi.set(__self__, "search_insights_top_queries", search_insights_top_queries)
15455
15632
  if search_max_buckets is not None:
15456
15633
  pulumi.set(__self__, "search_max_buckets", search_max_buckets)
15457
15634
  if shard_indexing_pressure is not None:
@@ -15743,6 +15920,11 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
15743
15920
  """
15744
15921
  return pulumi.get(self, "search_backpressure")
15745
15922
 
15923
+ @property
15924
+ @pulumi.getter(name="searchInsightsTopQueries")
15925
+ def search_insights_top_queries(self) -> Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueries']:
15926
+ return pulumi.get(self, "search_insights_top_queries")
15927
+
15746
15928
  @property
15747
15929
  @pulumi.getter(name="searchMaxBuckets")
15748
15930
  def search_max_buckets(self) -> Optional[int]:
@@ -16207,7 +16389,7 @@ class OpenSearchOpensearchUserConfigOpensearchSearchBackpressure(dict):
16207
16389
  search_shard_task: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTask'] = None,
16208
16390
  search_task: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTask'] = None):
16209
16391
  """
16210
- :param str mode: Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
16392
+ :param str mode: Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
16211
16393
  :param 'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuressArgs' node_duress: Node duress settings
16212
16394
  :param 'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskArgs' search_shard_task: Search shard settings
16213
16395
  :param 'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskArgs' search_task: Search task settings
@@ -16225,7 +16407,7 @@ class OpenSearchOpensearchUserConfigOpensearchSearchBackpressure(dict):
16225
16407
  @pulumi.getter
16226
16408
  def mode(self) -> Optional[str]:
16227
16409
  """
16228
- Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
16410
+ Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
16229
16411
  """
16230
16412
  return pulumi.get(self, "mode")
16231
16413
 
@@ -16614,6 +16796,235 @@ class OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTask(dict)
16614
16796
  return pulumi.get(self, "total_heap_percent_threshold")
16615
16797
 
16616
16798
 
16799
+ @pulumi.output_type
16800
+ class OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueries(dict):
16801
+ def __init__(__self__, *,
16802
+ cpu: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpu'] = None,
16803
+ latency: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatency'] = None,
16804
+ memory: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemory'] = None):
16805
+ """
16806
+ :param 'OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpuArgs' cpu: Top N queries monitoring by CPU
16807
+ :param 'OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatencyArgs' latency: Top N queries monitoring by latency
16808
+ :param 'OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemoryArgs' memory: Top N queries monitoring by memory
16809
+ """
16810
+ if cpu is not None:
16811
+ pulumi.set(__self__, "cpu", cpu)
16812
+ if latency is not None:
16813
+ pulumi.set(__self__, "latency", latency)
16814
+ if memory is not None:
16815
+ pulumi.set(__self__, "memory", memory)
16816
+
16817
+ @property
16818
+ @pulumi.getter
16819
+ def cpu(self) -> Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpu']:
16820
+ """
16821
+ Top N queries monitoring by CPU
16822
+ """
16823
+ return pulumi.get(self, "cpu")
16824
+
16825
+ @property
16826
+ @pulumi.getter
16827
+ def latency(self) -> Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatency']:
16828
+ """
16829
+ Top N queries monitoring by latency
16830
+ """
16831
+ return pulumi.get(self, "latency")
16832
+
16833
+ @property
16834
+ @pulumi.getter
16835
+ def memory(self) -> Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemory']:
16836
+ """
16837
+ Top N queries monitoring by memory
16838
+ """
16839
+ return pulumi.get(self, "memory")
16840
+
16841
+
16842
+ @pulumi.output_type
16843
+ class OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpu(dict):
16844
+ @staticmethod
16845
+ def __key_warning(key: str):
16846
+ suggest = None
16847
+ if key == "topNSize":
16848
+ suggest = "top_n_size"
16849
+ elif key == "windowSize":
16850
+ suggest = "window_size"
16851
+
16852
+ if suggest:
16853
+ pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpu. Access the value via the '{suggest}' property getter instead.")
16854
+
16855
+ def __getitem__(self, key: str) -> Any:
16856
+ OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpu.__key_warning(key)
16857
+ return super().__getitem__(key)
16858
+
16859
+ def get(self, key: str, default = None) -> Any:
16860
+ OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpu.__key_warning(key)
16861
+ return super().get(key, default)
16862
+
16863
+ def __init__(__self__, *,
16864
+ enabled: Optional[bool] = None,
16865
+ top_n_size: Optional[int] = None,
16866
+ window_size: Optional[str] = None):
16867
+ """
16868
+ :param bool enabled: Enable or disable top N query monitoring by the metric. Default: `false`.
16869
+ :param int top_n_size: Specify the value of N for the top N queries by the metric.
16870
+ :param str window_size: The window size of the top N queries by the metric.
16871
+ """
16872
+ if enabled is not None:
16873
+ pulumi.set(__self__, "enabled", enabled)
16874
+ if top_n_size is not None:
16875
+ pulumi.set(__self__, "top_n_size", top_n_size)
16876
+ if window_size is not None:
16877
+ pulumi.set(__self__, "window_size", window_size)
16878
+
16879
+ @property
16880
+ @pulumi.getter
16881
+ def enabled(self) -> Optional[bool]:
16882
+ """
16883
+ Enable or disable top N query monitoring by the metric. Default: `false`.
16884
+ """
16885
+ return pulumi.get(self, "enabled")
16886
+
16887
+ @property
16888
+ @pulumi.getter(name="topNSize")
16889
+ def top_n_size(self) -> Optional[int]:
16890
+ """
16891
+ Specify the value of N for the top N queries by the metric.
16892
+ """
16893
+ return pulumi.get(self, "top_n_size")
16894
+
16895
+ @property
16896
+ @pulumi.getter(name="windowSize")
16897
+ def window_size(self) -> Optional[str]:
16898
+ """
16899
+ The window size of the top N queries by the metric.
16900
+ """
16901
+ return pulumi.get(self, "window_size")
16902
+
16903
+
16904
+ @pulumi.output_type
16905
+ class OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatency(dict):
16906
+ @staticmethod
16907
+ def __key_warning(key: str):
16908
+ suggest = None
16909
+ if key == "topNSize":
16910
+ suggest = "top_n_size"
16911
+ elif key == "windowSize":
16912
+ suggest = "window_size"
16913
+
16914
+ if suggest:
16915
+ pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatency. Access the value via the '{suggest}' property getter instead.")
16916
+
16917
+ def __getitem__(self, key: str) -> Any:
16918
+ OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatency.__key_warning(key)
16919
+ return super().__getitem__(key)
16920
+
16921
+ def get(self, key: str, default = None) -> Any:
16922
+ OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatency.__key_warning(key)
16923
+ return super().get(key, default)
16924
+
16925
+ def __init__(__self__, *,
16926
+ enabled: Optional[bool] = None,
16927
+ top_n_size: Optional[int] = None,
16928
+ window_size: Optional[str] = None):
16929
+ """
16930
+ :param bool enabled: Enable or disable top N query monitoring by the metric. Default: `false`.
16931
+ :param int top_n_size: Specify the value of N for the top N queries by the metric.
16932
+ :param str window_size: The window size of the top N queries by the metric.
16933
+ """
16934
+ if enabled is not None:
16935
+ pulumi.set(__self__, "enabled", enabled)
16936
+ if top_n_size is not None:
16937
+ pulumi.set(__self__, "top_n_size", top_n_size)
16938
+ if window_size is not None:
16939
+ pulumi.set(__self__, "window_size", window_size)
16940
+
16941
+ @property
16942
+ @pulumi.getter
16943
+ def enabled(self) -> Optional[bool]:
16944
+ """
16945
+ Enable or disable top N query monitoring by the metric. Default: `false`.
16946
+ """
16947
+ return pulumi.get(self, "enabled")
16948
+
16949
+ @property
16950
+ @pulumi.getter(name="topNSize")
16951
+ def top_n_size(self) -> Optional[int]:
16952
+ """
16953
+ Specify the value of N for the top N queries by the metric.
16954
+ """
16955
+ return pulumi.get(self, "top_n_size")
16956
+
16957
+ @property
16958
+ @pulumi.getter(name="windowSize")
16959
+ def window_size(self) -> Optional[str]:
16960
+ """
16961
+ The window size of the top N queries by the metric.
16962
+ """
16963
+ return pulumi.get(self, "window_size")
16964
+
16965
+
16966
+ @pulumi.output_type
16967
+ class OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemory(dict):
16968
+ @staticmethod
16969
+ def __key_warning(key: str):
16970
+ suggest = None
16971
+ if key == "topNSize":
16972
+ suggest = "top_n_size"
16973
+ elif key == "windowSize":
16974
+ suggest = "window_size"
16975
+
16976
+ if suggest:
16977
+ pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemory. Access the value via the '{suggest}' property getter instead.")
16978
+
16979
+ def __getitem__(self, key: str) -> Any:
16980
+ OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemory.__key_warning(key)
16981
+ return super().__getitem__(key)
16982
+
16983
+ def get(self, key: str, default = None) -> Any:
16984
+ OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemory.__key_warning(key)
16985
+ return super().get(key, default)
16986
+
16987
+ def __init__(__self__, *,
16988
+ enabled: Optional[bool] = None,
16989
+ top_n_size: Optional[int] = None,
16990
+ window_size: Optional[str] = None):
16991
+ """
16992
+ :param bool enabled: Enable or disable top N query monitoring by the metric. Default: `false`.
16993
+ :param int top_n_size: Specify the value of N for the top N queries by the metric.
16994
+ :param str window_size: The window size of the top N queries by the metric.
16995
+ """
16996
+ if enabled is not None:
16997
+ pulumi.set(__self__, "enabled", enabled)
16998
+ if top_n_size is not None:
16999
+ pulumi.set(__self__, "top_n_size", top_n_size)
17000
+ if window_size is not None:
17001
+ pulumi.set(__self__, "window_size", window_size)
17002
+
17003
+ @property
17004
+ @pulumi.getter
17005
+ def enabled(self) -> Optional[bool]:
17006
+ """
17007
+ Enable or disable top N query monitoring by the metric. Default: `false`.
17008
+ """
17009
+ return pulumi.get(self, "enabled")
17010
+
17011
+ @property
17012
+ @pulumi.getter(name="topNSize")
17013
+ def top_n_size(self) -> Optional[int]:
17014
+ """
17015
+ Specify the value of N for the top N queries by the metric.
17016
+ """
17017
+ return pulumi.get(self, "top_n_size")
17018
+
17019
+ @property
17020
+ @pulumi.getter(name="windowSize")
17021
+ def window_size(self) -> Optional[str]:
17022
+ """
17023
+ The window size of the top N queries by the metric.
17024
+ """
17025
+ return pulumi.get(self, "window_size")
17026
+
17027
+
16617
17028
  @pulumi.output_type
16618
17029
  class OpenSearchOpensearchUserConfigOpensearchShardIndexingPressure(dict):
16619
17030
  @staticmethod
@@ -17055,6 +17466,10 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17055
17466
  suggest = "snapshot_name"
17056
17467
  elif key == "chunkSize":
17057
17468
  suggest = "chunk_size"
17469
+ elif key == "includeAliases":
17470
+ suggest = "include_aliases"
17471
+ elif key == "restoreGlobalState":
17472
+ suggest = "restore_global_state"
17058
17473
  elif key == "serverSideEncryption":
17059
17474
  suggest = "server_side_encryption"
17060
17475
 
@@ -17073,30 +17488,35 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17073
17488
  access_key: str,
17074
17489
  base_path: str,
17075
17490
  bucket: str,
17491
+ indices: str,
17076
17492
  region: str,
17077
17493
  secret_key: str,
17078
17494
  snapshot_name: str,
17079
17495
  chunk_size: Optional[str] = None,
17080
17496
  compress: Optional[bool] = None,
17081
17497
  endpoint: Optional[str] = None,
17082
- indices: Optional[str] = None,
17498
+ include_aliases: Optional[bool] = None,
17499
+ restore_global_state: Optional[bool] = None,
17083
17500
  server_side_encryption: Optional[bool] = None):
17084
17501
  """
17085
17502
  :param str access_key: AWS Access key.
17086
17503
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
17087
17504
  :param str bucket: S3 bucket name.
17505
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
17088
17506
  :param str region: S3 region.
17089
17507
  :param str secret_key: AWS secret key.
17090
17508
  :param str snapshot_name: The snapshot name to restore from.
17091
17509
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
17092
17510
  :param bool compress: When set to true metadata files are stored in compressed format.
17093
17511
  :param str endpoint: The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
17094
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
17512
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
17513
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
17095
17514
  :param bool server_side_encryption: When set to true files are encrypted on server side.
17096
17515
  """
17097
17516
  pulumi.set(__self__, "access_key", access_key)
17098
17517
  pulumi.set(__self__, "base_path", base_path)
17099
17518
  pulumi.set(__self__, "bucket", bucket)
17519
+ pulumi.set(__self__, "indices", indices)
17100
17520
  pulumi.set(__self__, "region", region)
17101
17521
  pulumi.set(__self__, "secret_key", secret_key)
17102
17522
  pulumi.set(__self__, "snapshot_name", snapshot_name)
@@ -17106,8 +17526,10 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17106
17526
  pulumi.set(__self__, "compress", compress)
17107
17527
  if endpoint is not None:
17108
17528
  pulumi.set(__self__, "endpoint", endpoint)
17109
- if indices is not None:
17110
- pulumi.set(__self__, "indices", indices)
17529
+ if include_aliases is not None:
17530
+ pulumi.set(__self__, "include_aliases", include_aliases)
17531
+ if restore_global_state is not None:
17532
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
17111
17533
  if server_side_encryption is not None:
17112
17534
  pulumi.set(__self__, "server_side_encryption", server_side_encryption)
17113
17535
 
@@ -17135,6 +17557,14 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17135
17557
  """
17136
17558
  return pulumi.get(self, "bucket")
17137
17559
 
17560
+ @property
17561
+ @pulumi.getter
17562
+ def indices(self) -> str:
17563
+ """
17564
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
17565
+ """
17566
+ return pulumi.get(self, "indices")
17567
+
17138
17568
  @property
17139
17569
  @pulumi.getter
17140
17570
  def region(self) -> str:
@@ -17184,12 +17614,20 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
17184
17614
  return pulumi.get(self, "endpoint")
17185
17615
 
17186
17616
  @property
17187
- @pulumi.getter
17188
- def indices(self) -> Optional[str]:
17617
+ @pulumi.getter(name="includeAliases")
17618
+ def include_aliases(self) -> Optional[bool]:
17189
17619
  """
17190
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
17620
+ Whether to restore aliases alongside their associated indexes. Default is true.
17191
17621
  """
17192
- return pulumi.get(self, "indices")
17622
+ return pulumi.get(self, "include_aliases")
17623
+
17624
+ @property
17625
+ @pulumi.getter(name="restoreGlobalState")
17626
+ def restore_global_state(self) -> Optional[bool]:
17627
+ """
17628
+ If true, restore the cluster state. Defaults to false.
17629
+ """
17630
+ return pulumi.get(self, "restore_global_state")
17193
17631
 
17194
17632
  @property
17195
17633
  @pulumi.getter(name="serverSideEncryption")
@@ -17502,8 +17940,8 @@ class OrganizationPermissionPermission(dict):
17502
17940
  create_time: Optional[str] = None,
17503
17941
  update_time: Optional[str] = None):
17504
17942
  """
17505
- :param Sequence[str] permissions: List of permissions. The possible values are `admin`, `developer`, `operator`, `project:permissions:read`, `read_only` and `service:logs:read`.
17506
- :param str principal_id: ID of the user or group.
17943
+ :param Sequence[str] permissions: List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:billing:read`, `organization:billing:write`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:network:read`, `organization:network:write`, `organization:permissions:read`, `organization:permissions:write`, `organization:projects:read`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
17944
+ :param str principal_id: ID of the user or group to grant permissions to. Only active users who have accepted an [invite](https://aiven.io/docs/platform/howto/manage-org-users) to join the organization can be granted permissions.
17507
17945
  :param str principal_type: The type of principal. The possible values are `user` and `user_group`.
17508
17946
  :param str create_time: Time created.
17509
17947
  :param str update_time: Time updated.
@@ -17520,7 +17958,7 @@ class OrganizationPermissionPermission(dict):
17520
17958
  @pulumi.getter
17521
17959
  def permissions(self) -> Sequence[str]:
17522
17960
  """
17523
- List of permissions. The possible values are `admin`, `developer`, `operator`, `project:permissions:read`, `read_only` and `service:logs:read`.
17961
+ List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:billing:read`, `organization:billing:write`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:network:read`, `organization:network:write`, `organization:permissions:read`, `organization:permissions:write`, `organization:projects:read`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
17524
17962
  """
17525
17963
  return pulumi.get(self, "permissions")
17526
17964
 
@@ -17528,7 +17966,7 @@ class OrganizationPermissionPermission(dict):
17528
17966
  @pulumi.getter(name="principalId")
17529
17967
  def principal_id(self) -> str:
17530
17968
  """
17531
- ID of the user or group.
17969
+ ID of the user or group to grant permissions to. Only active users who have accepted an [invite](https://aiven.io/docs/platform/howto/manage-org-users) to join the organization can be granted permissions.
17532
17970
  """
17533
17971
  return pulumi.get(self, "principal_id")
17534
17972
 
@@ -18213,7 +18651,7 @@ class PgPgUserConfig(dict):
18213
18651
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
18214
18652
  :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. Example: `41.5`.
18215
18653
  :param bool static_ips: Use static public IP addresses.
18216
- :param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
18654
+ :param str synchronous_replication: Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
18217
18655
  :param 'PgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
18218
18656
  :param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
18219
18657
  :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). Example: `4`.
@@ -18515,7 +18953,7 @@ class PgPgUserConfig(dict):
18515
18953
  @pulumi.getter(name="synchronousReplication")
18516
18954
  def synchronous_replication(self) -> Optional[str]:
18517
18955
  """
18518
- Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
18956
+ Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
18519
18957
  """
18520
18958
  return pulumi.get(self, "synchronous_replication")
18521
18959
 
@@ -18886,8 +19324,8 @@ class PgPgUserConfigPg(dict):
18886
19324
  :param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
18887
19325
  :param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
18888
19326
  :param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
18889
- :param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
18890
- :param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
19327
+ :param str log_error_verbosity: Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
19328
+ :param str log_line_prefix: Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
18891
19329
  :param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
18892
19330
  :param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
18893
19331
  :param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
@@ -18908,12 +19346,12 @@ class PgPgUserConfigPg(dict):
18908
19346
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`.
18909
19347
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
18910
19348
  :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets. Example: `10`.
18911
- :param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
19349
+ :param str pg_stat_statements_dot_track: Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
18912
19350
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`.
18913
19351
  :param str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
18914
19352
  :param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Example: `1024`.
18915
19353
  :param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
18916
- :param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
19354
+ :param str track_functions: Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.
18917
19355
  :param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
18918
19356
  :param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout. Example: `60000`.
18919
19357
  :param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance. Example: `50`.
@@ -19165,7 +19603,7 @@ class PgPgUserConfigPg(dict):
19165
19603
  @pulumi.getter(name="logErrorVerbosity")
19166
19604
  def log_error_verbosity(self) -> Optional[str]:
19167
19605
  """
19168
- Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
19606
+ Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
19169
19607
  """
19170
19608
  return pulumi.get(self, "log_error_verbosity")
19171
19609
 
@@ -19173,7 +19611,7 @@ class PgPgUserConfigPg(dict):
19173
19611
  @pulumi.getter(name="logLinePrefix")
19174
19612
  def log_line_prefix(self) -> Optional[str]:
19175
19613
  """
19176
- Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
19614
+ Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
19177
19615
  """
19178
19616
  return pulumi.get(self, "log_line_prefix")
19179
19617
 
@@ -19341,7 +19779,7 @@ class PgPgUserConfigPg(dict):
19341
19779
  @pulumi.getter(name="pgStatStatementsDotTrack")
19342
19780
  def pg_stat_statements_dot_track(self) -> Optional[str]:
19343
19781
  """
19344
- Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
19782
+ Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
19345
19783
  """
19346
19784
  return pulumi.get(self, "pg_stat_statements_dot_track")
19347
19785
 
@@ -19381,7 +19819,7 @@ class PgPgUserConfigPg(dict):
19381
19819
  @pulumi.getter(name="trackFunctions")
19382
19820
  def track_functions(self) -> Optional[str]:
19383
19821
  """
19384
- Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
19822
+ Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.
19385
19823
  """
19386
19824
  return pulumi.get(self, "track_functions")
19387
19825
 
@@ -19784,7 +20222,7 @@ class PgPgUserConfigPgbouncer(dict):
19784
20222
  """
19785
20223
  :param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). Default: `3600`.
19786
20224
  :param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. Example: `0`.
19787
- :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
20225
+ :param str autodb_pool_mode: Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.
19788
20226
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
19789
20227
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
19790
20228
  :param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
@@ -19834,7 +20272,7 @@ class PgPgUserConfigPgbouncer(dict):
19834
20272
  @pulumi.getter(name="autodbPoolMode")
19835
20273
  def autodb_pool_mode(self) -> Optional[str]:
19836
20274
  """
19837
- Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
20275
+ Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.
19838
20276
  """
19839
20277
  return pulumi.get(self, "autodb_pool_mode")
19840
20278
 
@@ -20530,7 +20968,7 @@ class RedisRedisUserConfig(dict):
20530
20968
  :param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service. Example: `1`.
20531
20969
  :param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
20532
20970
  :param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
20533
- :param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
20971
+ :param str redis_maxmemory_policy: Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.
20534
20972
  :param str redis_notify_keyspace_events: Set notify-keyspace-events option.
20535
20973
  :param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service. Example: `16`.
20536
20974
  :param str redis_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
@@ -20730,7 +21168,7 @@ class RedisRedisUserConfig(dict):
20730
21168
  @pulumi.getter(name="redisMaxmemoryPolicy")
20731
21169
  def redis_maxmemory_policy(self) -> Optional[str]:
20732
21170
  """
20733
- Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
21171
+ Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.
20734
21172
  """
20735
21173
  return pulumi.get(self, "redis_maxmemory_policy")
20736
21174
 
@@ -21243,11 +21681,11 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
21243
21681
  thread_per_consumer: Optional[bool] = None):
21244
21682
  """
21245
21683
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
21246
- :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
21684
+ :param str data_format: Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.
21247
21685
  :param str group_name: Kafka consumers group. Default: `clickhouse`.
21248
21686
  :param str name: Name of the table. Example: `events`.
21249
21687
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
21250
- :param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
21688
+ :param str auto_offset_reset: Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
21251
21689
  :param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`.
21252
21690
  :param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. Default: `default`.
21253
21691
  :param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. Default: `0`.
@@ -21296,7 +21734,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
21296
21734
  @pulumi.getter(name="dataFormat")
21297
21735
  def data_format(self) -> str:
21298
21736
  """
21299
- Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
21737
+ Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.
21300
21738
  """
21301
21739
  return pulumi.get(self, "data_format")
21302
21740
 
@@ -21328,7 +21766,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
21328
21766
  @pulumi.getter(name="autoOffsetReset")
21329
21767
  def auto_offset_reset(self) -> Optional[str]:
21330
21768
  """
21331
- Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
21769
+ Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
21332
21770
  """
21333
21771
  return pulumi.get(self, "auto_offset_reset")
21334
21772
 
@@ -21832,6 +22270,70 @@ class ServiceIntegrationDatadogUserConfigRedis(dict):
21832
22270
  return pulumi.get(self, "command_stats_enabled")
21833
22271
 
21834
22272
 
22273
+ @pulumi.output_type
22274
+ class ServiceIntegrationEndpointAutoscalerUserConfig(dict):
22275
+ def __init__(__self__, *,
22276
+ autoscalings: Sequence['outputs.ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling']):
22277
+ """
22278
+ :param Sequence['ServiceIntegrationEndpointAutoscalerUserConfigAutoscalingArgs'] autoscalings: Configure autoscaling thresholds for a service
22279
+ """
22280
+ pulumi.set(__self__, "autoscalings", autoscalings)
22281
+
22282
+ @property
22283
+ @pulumi.getter
22284
+ def autoscalings(self) -> Sequence['outputs.ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling']:
22285
+ """
22286
+ Configure autoscaling thresholds for a service
22287
+ """
22288
+ return pulumi.get(self, "autoscalings")
22289
+
22290
+
22291
+ @pulumi.output_type
22292
+ class ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling(dict):
22293
+ @staticmethod
22294
+ def __key_warning(key: str):
22295
+ suggest = None
22296
+ if key == "capGb":
22297
+ suggest = "cap_gb"
22298
+
22299
+ if suggest:
22300
+ pulumi.log.warn(f"Key '{key}' not found in ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling. Access the value via the '{suggest}' property getter instead.")
22301
+
22302
+ def __getitem__(self, key: str) -> Any:
22303
+ ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling.__key_warning(key)
22304
+ return super().__getitem__(key)
22305
+
22306
+ def get(self, key: str, default = None) -> Any:
22307
+ ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling.__key_warning(key)
22308
+ return super().get(key, default)
22309
+
22310
+ def __init__(__self__, *,
22311
+ cap_gb: int,
22312
+ type: str):
22313
+ """
22314
+ :param int cap_gb: The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
22315
+ :param str type: Enum: `autoscale_disk`. Type of autoscale event.
22316
+ """
22317
+ pulumi.set(__self__, "cap_gb", cap_gb)
22318
+ pulumi.set(__self__, "type", type)
22319
+
22320
+ @property
22321
+ @pulumi.getter(name="capGb")
22322
+ def cap_gb(self) -> int:
22323
+ """
22324
+ The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
22325
+ """
22326
+ return pulumi.get(self, "cap_gb")
22327
+
22328
+ @property
22329
+ @pulumi.getter
22330
+ def type(self) -> str:
22331
+ """
22332
+ Enum: `autoscale_disk`. Type of autoscale event.
22333
+ """
22334
+ return pulumi.get(self, "type")
22335
+
22336
+
21835
22337
  @pulumi.output_type
21836
22338
  class ServiceIntegrationEndpointDatadogUserConfig(dict):
21837
22339
  @staticmethod
@@ -21843,6 +22345,8 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
21843
22345
  suggest = "datadog_tags"
21844
22346
  elif key == "disableConsumerStats":
21845
22347
  suggest = "disable_consumer_stats"
22348
+ elif key == "extraTagsPrefix":
22349
+ suggest = "extra_tags_prefix"
21846
22350
  elif key == "kafkaConsumerCheckInstances":
21847
22351
  suggest = "kafka_consumer_check_instances"
21848
22352
  elif key == "kafkaConsumerStatsTimeout":
@@ -21865,6 +22369,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
21865
22369
  datadog_api_key: str,
21866
22370
  datadog_tags: Optional[Sequence['outputs.ServiceIntegrationEndpointDatadogUserConfigDatadogTag']] = None,
21867
22371
  disable_consumer_stats: Optional[bool] = None,
22372
+ extra_tags_prefix: Optional[str] = None,
21868
22373
  kafka_consumer_check_instances: Optional[int] = None,
21869
22374
  kafka_consumer_stats_timeout: Optional[int] = None,
21870
22375
  max_partition_contexts: Optional[int] = None,
@@ -21873,16 +22378,19 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
21873
22378
  :param str datadog_api_key: Datadog API key. Example: `848f30907c15c55d601fe45487cce9b6`.
21874
22379
  :param Sequence['ServiceIntegrationEndpointDatadogUserConfigDatadogTagArgs'] datadog_tags: Custom tags provided by user
21875
22380
  :param bool disable_consumer_stats: Disable consumer group metrics.
22381
+ :param str extra_tags_prefix: Extra tags prefix. Defaults to aiven.
21876
22382
  :param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with. Example: `8`.
21877
22383
  :param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers. Example: `60`.
21878
22384
  :param int max_partition_contexts: Maximum number of partition contexts to send. Example: `32000`.
21879
- :param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
22385
+ :param str site: Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
21880
22386
  """
21881
22387
  pulumi.set(__self__, "datadog_api_key", datadog_api_key)
21882
22388
  if datadog_tags is not None:
21883
22389
  pulumi.set(__self__, "datadog_tags", datadog_tags)
21884
22390
  if disable_consumer_stats is not None:
21885
22391
  pulumi.set(__self__, "disable_consumer_stats", disable_consumer_stats)
22392
+ if extra_tags_prefix is not None:
22393
+ pulumi.set(__self__, "extra_tags_prefix", extra_tags_prefix)
21886
22394
  if kafka_consumer_check_instances is not None:
21887
22395
  pulumi.set(__self__, "kafka_consumer_check_instances", kafka_consumer_check_instances)
21888
22396
  if kafka_consumer_stats_timeout is not None:
@@ -21916,6 +22424,14 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
21916
22424
  """
21917
22425
  return pulumi.get(self, "disable_consumer_stats")
21918
22426
 
22427
+ @property
22428
+ @pulumi.getter(name="extraTagsPrefix")
22429
+ def extra_tags_prefix(self) -> Optional[str]:
22430
+ """
22431
+ Extra tags prefix. Defaults to aiven.
22432
+ """
22433
+ return pulumi.get(self, "extra_tags_prefix")
22434
+
21919
22435
  @property
21920
22436
  @pulumi.getter(name="kafkaConsumerCheckInstances")
21921
22437
  def kafka_consumer_check_instances(self) -> Optional[int]:
@@ -21944,7 +22460,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
21944
22460
  @pulumi.getter
21945
22461
  def site(self) -> Optional[str]:
21946
22462
  """
21947
- Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
22463
+ Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
21948
22464
  """
21949
22465
  return pulumi.get(self, "site")
21950
22466
 
@@ -22478,7 +22994,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
22478
22994
  ssl_endpoint_identification_algorithm: Optional[str] = None):
22479
22995
  """
22480
22996
  :param str bootstrap_servers: Bootstrap servers. Example: `10.0.0.1:9092,10.0.0.2:9092`.
22481
- :param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
22997
+ :param str security_protocol: Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.
22482
22998
  :param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
22483
22999
  :param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
22484
23000
  :param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
@@ -22525,7 +23041,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
22525
23041
  @pulumi.getter(name="securityProtocol")
22526
23042
  def security_protocol(self) -> str:
22527
23043
  """
22528
- Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
23044
+ Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.
22529
23045
  """
22530
23046
  return pulumi.get(self, "security_protocol")
22531
23047
 
@@ -22835,7 +23351,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
22835
23351
  :param str ssl_client_key: Client key. Example: `-----BEGIN PRIVATE KEY-----
22836
23352
  ...
22837
23353
  -----END PRIVATE KEY-----`.
22838
- :param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
23354
+ :param str ssl_mode: Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
22839
23355
  :param str ssl_root_cert: SSL Root Cert. Example: `-----BEGIN CERTIFICATE-----
22840
23356
  ...
22841
23357
  -----END CERTIFICATE-----
@@ -22922,7 +23438,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
22922
23438
  @pulumi.getter(name="sslMode")
22923
23439
  def ssl_mode(self) -> Optional[str]:
22924
23440
  """
22925
- Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
23441
+ Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
22926
23442
  """
22927
23443
  return pulumi.get(self, "ssl_mode")
22928
23444
 
@@ -22938,6 +23454,70 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
22938
23454
  return pulumi.get(self, "ssl_root_cert")
22939
23455
 
22940
23456
 
23457
+ @pulumi.output_type
23458
+ class ServiceIntegrationEndpointExternalPrometheusUserConfig(dict):
23459
+ @staticmethod
23460
+ def __key_warning(key: str):
23461
+ suggest = None
23462
+ if key == "basicAuthPassword":
23463
+ suggest = "basic_auth_password"
23464
+ elif key == "basicAuthUsername":
23465
+ suggest = "basic_auth_username"
23466
+ elif key == "serviceUri":
23467
+ suggest = "service_uri"
23468
+
23469
+ if suggest:
23470
+ pulumi.log.warn(f"Key '{key}' not found in ServiceIntegrationEndpointExternalPrometheusUserConfig. Access the value via the '{suggest}' property getter instead.")
23471
+
23472
+ def __getitem__(self, key: str) -> Any:
23473
+ ServiceIntegrationEndpointExternalPrometheusUserConfig.__key_warning(key)
23474
+ return super().__getitem__(key)
23475
+
23476
+ def get(self, key: str, default = None) -> Any:
23477
+ ServiceIntegrationEndpointExternalPrometheusUserConfig.__key_warning(key)
23478
+ return super().get(key, default)
23479
+
23480
+ def __init__(__self__, *,
23481
+ basic_auth_password: Optional[str] = None,
23482
+ basic_auth_username: Optional[str] = None,
23483
+ service_uri: Optional[str] = None):
23484
+ """
23485
+ :param str basic_auth_password: Prometheus basic authentication password. Example: `fhyFNBjj3R`.
23486
+ :param str basic_auth_username: Prometheus basic authentication username. Example: `prom4851`.
23487
+ :param str service_uri: Prometheus enabled write endpoint. Example: `https://write.example.com/`.
23488
+ """
23489
+ if basic_auth_password is not None:
23490
+ pulumi.set(__self__, "basic_auth_password", basic_auth_password)
23491
+ if basic_auth_username is not None:
23492
+ pulumi.set(__self__, "basic_auth_username", basic_auth_username)
23493
+ if service_uri is not None:
23494
+ pulumi.set(__self__, "service_uri", service_uri)
23495
+
23496
+ @property
23497
+ @pulumi.getter(name="basicAuthPassword")
23498
+ def basic_auth_password(self) -> Optional[str]:
23499
+ """
23500
+ Prometheus basic authentication password. Example: `fhyFNBjj3R`.
23501
+ """
23502
+ return pulumi.get(self, "basic_auth_password")
23503
+
23504
+ @property
23505
+ @pulumi.getter(name="basicAuthUsername")
23506
+ def basic_auth_username(self) -> Optional[str]:
23507
+ """
23508
+ Prometheus basic authentication username. Example: `prom4851`.
23509
+ """
23510
+ return pulumi.get(self, "basic_auth_username")
23511
+
23512
+ @property
23513
+ @pulumi.getter(name="serviceUri")
23514
+ def service_uri(self) -> Optional[str]:
23515
+ """
23516
+ Prometheus enabled write endpoint. Example: `https://write.example.com/`.
23517
+ """
23518
+ return pulumi.get(self, "service_uri")
23519
+
23520
+
22941
23521
  @pulumi.output_type
22942
23522
  class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
22943
23523
  @staticmethod
@@ -22965,7 +23545,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
22965
23545
  basic_auth_password: Optional[str] = None,
22966
23546
  basic_auth_username: Optional[str] = None):
22967
23547
  """
22968
- :param str authentication: Enum: `none`, `basic`. Authentication method.
23548
+ :param str authentication: Enum: `basic`, `none`. Authentication method.
22969
23549
  :param str url: Schema Registry URL. Example: `https://schema-registry.kafka.company.com:28419`.
22970
23550
  :param str basic_auth_password: Basic authentication password. Example: `Zm9vYg==`.
22971
23551
  :param str basic_auth_username: Basic authentication user name. Example: `avnadmin`.
@@ -22981,7 +23561,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
22981
23561
  @pulumi.getter
22982
23562
  def authentication(self) -> str:
22983
23563
  """
22984
- Enum: `none`, `basic`. Authentication method.
23564
+ Enum: `basic`, `none`. Authentication method.
22985
23565
  """
22986
23566
  return pulumi.get(self, "authentication")
22987
23567
 
@@ -23141,7 +23721,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
23141
23721
  max_message_size: Optional[int] = None,
23142
23722
  sd: Optional[str] = None):
23143
23723
  """
23144
- :param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
23724
+ :param str format: Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.
23145
23725
  :param int port: Rsyslog server port. Default: `514`.
23146
23726
  :param str server: Rsyslog server IP address or hostname. Example: `logs.example.com`.
23147
23727
  :param bool tls: Require TLS. Default: `true`.
@@ -23182,7 +23762,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
23182
23762
  @pulumi.getter
23183
23763
  def format(self) -> str:
23184
23764
  """
23185
- Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
23765
+ Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.
23186
23766
  """
23187
23767
  return pulumi.get(self, "format")
23188
23768
 
@@ -23764,7 +24344,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
23764
24344
  :param int consumer_max_poll_records: Set consumer max.poll.records. The default is 500. Example: `500`.
23765
24345
  :param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`.
23766
24346
  :param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker. Example: `8388608`.
23767
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
24347
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
23768
24348
  :param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.
23769
24349
  :param int producer_max_request_size: The maximum request size in bytes. Example: `1048576`.
23770
24350
  """
@@ -23829,7 +24409,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
23829
24409
  @pulumi.getter(name="producerCompressionType")
23830
24410
  def producer_compression_type(self) -> Optional[str]:
23831
24411
  """
23832
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
24412
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
23833
24413
  """
23834
24414
  return pulumi.get(self, "producer_compression_type")
23835
24415
 
@@ -24818,10 +25398,18 @@ class ThanosThanosUserConfig(dict):
24818
25398
  suggest = "ip_filters"
24819
25399
  elif key == "objectStorageUsageAlertThresholdGb":
24820
25400
  suggest = "object_storage_usage_alert_threshold_gb"
25401
+ elif key == "privateAccess":
25402
+ suggest = "private_access"
25403
+ elif key == "privatelinkAccess":
25404
+ suggest = "privatelink_access"
24821
25405
  elif key == "publicAccess":
24822
25406
  suggest = "public_access"
24823
25407
  elif key == "queryFrontend":
24824
25408
  suggest = "query_frontend"
25409
+ elif key == "receiverIngesting":
25410
+ suggest = "receiver_ingesting"
25411
+ elif key == "receiverRouting":
25412
+ suggest = "receiver_routing"
24825
25413
  elif key == "serviceLog":
24826
25414
  suggest = "service_log"
24827
25415
  elif key == "staticIps":
@@ -24845,23 +25433,35 @@ class ThanosThanosUserConfig(dict):
24845
25433
  ip_filter_strings: Optional[Sequence[str]] = None,
24846
25434
  ip_filters: Optional[Sequence[str]] = None,
24847
25435
  object_storage_usage_alert_threshold_gb: Optional[int] = None,
25436
+ private_access: Optional['outputs.ThanosThanosUserConfigPrivateAccess'] = None,
25437
+ privatelink_access: Optional['outputs.ThanosThanosUserConfigPrivatelinkAccess'] = None,
24848
25438
  public_access: Optional['outputs.ThanosThanosUserConfigPublicAccess'] = None,
24849
25439
  query: Optional['outputs.ThanosThanosUserConfigQuery'] = None,
24850
25440
  query_frontend: Optional['outputs.ThanosThanosUserConfigQueryFrontend'] = None,
25441
+ receiver_ingesting: Optional[Mapping[str, str]] = None,
25442
+ receiver_routing: Optional[Mapping[str, str]] = None,
25443
+ ruler: Optional[Mapping[str, str]] = None,
24851
25444
  service_log: Optional[bool] = None,
24852
- static_ips: Optional[bool] = None):
25445
+ static_ips: Optional[bool] = None,
25446
+ store: Optional[Mapping[str, str]] = None):
24853
25447
  """
24854
- :param 'ThanosThanosUserConfigCompactorArgs' compactor: ThanosCompactor
25448
+ :param 'ThanosThanosUserConfigCompactorArgs' compactor: Configuration options for Thanos Compactor
24855
25449
  :param Mapping[str, str] env: Environmental variables.
24856
25450
  :param Sequence['ThanosThanosUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
24857
25451
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
24858
25452
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
24859
25453
  :param int object_storage_usage_alert_threshold_gb: After exceeding the limit a service alert is going to be raised (0 means not set).
25454
+ :param 'ThanosThanosUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
25455
+ :param 'ThanosThanosUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
24860
25456
  :param 'ThanosThanosUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
24861
- :param 'ThanosThanosUserConfigQueryArgs' query: ThanosQuery
24862
- :param 'ThanosThanosUserConfigQueryFrontendArgs' query_frontend: ThanosQueryFrontend
25457
+ :param 'ThanosThanosUserConfigQueryArgs' query: Configuration options for Thanos Query
25458
+ :param 'ThanosThanosUserConfigQueryFrontendArgs' query_frontend: Configuration options for Thanos Query Frontend
25459
+ :param Mapping[str, str] receiver_ingesting: Common configuration options for Thanos Receive.
25460
+ :param Mapping[str, str] receiver_routing: Configuration options for Thanos Receive Routing.
25461
+ :param Mapping[str, str] ruler: Configuration options for Thanos Ruler.
24863
25462
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
24864
25463
  :param bool static_ips: Use static public IP addresses.
25464
+ :param Mapping[str, str] store: Configuration options for Thanos Store.
24865
25465
  """
24866
25466
  if compactor is not None:
24867
25467
  pulumi.set(__self__, "compactor", compactor)
@@ -24875,22 +25475,34 @@ class ThanosThanosUserConfig(dict):
24875
25475
  pulumi.set(__self__, "ip_filters", ip_filters)
24876
25476
  if object_storage_usage_alert_threshold_gb is not None:
24877
25477
  pulumi.set(__self__, "object_storage_usage_alert_threshold_gb", object_storage_usage_alert_threshold_gb)
25478
+ if private_access is not None:
25479
+ pulumi.set(__self__, "private_access", private_access)
25480
+ if privatelink_access is not None:
25481
+ pulumi.set(__self__, "privatelink_access", privatelink_access)
24878
25482
  if public_access is not None:
24879
25483
  pulumi.set(__self__, "public_access", public_access)
24880
25484
  if query is not None:
24881
25485
  pulumi.set(__self__, "query", query)
24882
25486
  if query_frontend is not None:
24883
25487
  pulumi.set(__self__, "query_frontend", query_frontend)
25488
+ if receiver_ingesting is not None:
25489
+ pulumi.set(__self__, "receiver_ingesting", receiver_ingesting)
25490
+ if receiver_routing is not None:
25491
+ pulumi.set(__self__, "receiver_routing", receiver_routing)
25492
+ if ruler is not None:
25493
+ pulumi.set(__self__, "ruler", ruler)
24884
25494
  if service_log is not None:
24885
25495
  pulumi.set(__self__, "service_log", service_log)
24886
25496
  if static_ips is not None:
24887
25497
  pulumi.set(__self__, "static_ips", static_ips)
25498
+ if store is not None:
25499
+ pulumi.set(__self__, "store", store)
24888
25500
 
24889
25501
  @property
24890
25502
  @pulumi.getter
24891
25503
  def compactor(self) -> Optional['outputs.ThanosThanosUserConfigCompactor']:
24892
25504
  """
24893
- ThanosCompactor
25505
+ Configuration options for Thanos Compactor
24894
25506
  """
24895
25507
  return pulumi.get(self, "compactor")
24896
25508
 
@@ -24936,6 +25548,22 @@ class ThanosThanosUserConfig(dict):
24936
25548
  """
24937
25549
  return pulumi.get(self, "object_storage_usage_alert_threshold_gb")
24938
25550
 
25551
+ @property
25552
+ @pulumi.getter(name="privateAccess")
25553
+ def private_access(self) -> Optional['outputs.ThanosThanosUserConfigPrivateAccess']:
25554
+ """
25555
+ Allow access to selected service ports from private networks
25556
+ """
25557
+ return pulumi.get(self, "private_access")
25558
+
25559
+ @property
25560
+ @pulumi.getter(name="privatelinkAccess")
25561
+ def privatelink_access(self) -> Optional['outputs.ThanosThanosUserConfigPrivatelinkAccess']:
25562
+ """
25563
+ Allow access to selected service components through Privatelink
25564
+ """
25565
+ return pulumi.get(self, "privatelink_access")
25566
+
24939
25567
  @property
24940
25568
  @pulumi.getter(name="publicAccess")
24941
25569
  def public_access(self) -> Optional['outputs.ThanosThanosUserConfigPublicAccess']:
@@ -24948,7 +25576,7 @@ class ThanosThanosUserConfig(dict):
24948
25576
  @pulumi.getter
24949
25577
  def query(self) -> Optional['outputs.ThanosThanosUserConfigQuery']:
24950
25578
  """
24951
- ThanosQuery
25579
+ Configuration options for Thanos Query
24952
25580
  """
24953
25581
  return pulumi.get(self, "query")
24954
25582
 
@@ -24956,10 +25584,34 @@ class ThanosThanosUserConfig(dict):
24956
25584
  @pulumi.getter(name="queryFrontend")
24957
25585
  def query_frontend(self) -> Optional['outputs.ThanosThanosUserConfigQueryFrontend']:
24958
25586
  """
24959
- ThanosQueryFrontend
25587
+ Configuration options for Thanos Query Frontend
24960
25588
  """
24961
25589
  return pulumi.get(self, "query_frontend")
24962
25590
 
25591
+ @property
25592
+ @pulumi.getter(name="receiverIngesting")
25593
+ def receiver_ingesting(self) -> Optional[Mapping[str, str]]:
25594
+ """
25595
+ Common configuration options for Thanos Receive.
25596
+ """
25597
+ return pulumi.get(self, "receiver_ingesting")
25598
+
25599
+ @property
25600
+ @pulumi.getter(name="receiverRouting")
25601
+ def receiver_routing(self) -> Optional[Mapping[str, str]]:
25602
+ """
25603
+ Configuration options for Thanos Receive Routing.
25604
+ """
25605
+ return pulumi.get(self, "receiver_routing")
25606
+
25607
+ @property
25608
+ @pulumi.getter
25609
+ def ruler(self) -> Optional[Mapping[str, str]]:
25610
+ """
25611
+ Configuration options for Thanos Ruler.
25612
+ """
25613
+ return pulumi.get(self, "ruler")
25614
+
24963
25615
  @property
24964
25616
  @pulumi.getter(name="serviceLog")
24965
25617
  def service_log(self) -> Optional[bool]:
@@ -24976,6 +25628,14 @@ class ThanosThanosUserConfig(dict):
24976
25628
  """
24977
25629
  return pulumi.get(self, "static_ips")
24978
25630
 
25631
+ @property
25632
+ @pulumi.getter
25633
+ def store(self) -> Optional[Mapping[str, str]]:
25634
+ """
25635
+ Configuration options for Thanos Store.
25636
+ """
25637
+ return pulumi.get(self, "store")
25638
+
24979
25639
 
24980
25640
  @pulumi.output_type
24981
25641
  class ThanosThanosUserConfigCompactor(dict):
@@ -25043,6 +25703,106 @@ class ThanosThanosUserConfigIpFilterObject(dict):
25043
25703
  return pulumi.get(self, "description")
25044
25704
 
25045
25705
 
25706
+ @pulumi.output_type
25707
+ class ThanosThanosUserConfigPrivateAccess(dict):
25708
+ @staticmethod
25709
+ def __key_warning(key: str):
25710
+ suggest = None
25711
+ if key == "queryFrontend":
25712
+ suggest = "query_frontend"
25713
+ elif key == "receiverRouting":
25714
+ suggest = "receiver_routing"
25715
+
25716
+ if suggest:
25717
+ pulumi.log.warn(f"Key '{key}' not found in ThanosThanosUserConfigPrivateAccess. Access the value via the '{suggest}' property getter instead.")
25718
+
25719
+ def __getitem__(self, key: str) -> Any:
25720
+ ThanosThanosUserConfigPrivateAccess.__key_warning(key)
25721
+ return super().__getitem__(key)
25722
+
25723
+ def get(self, key: str, default = None) -> Any:
25724
+ ThanosThanosUserConfigPrivateAccess.__key_warning(key)
25725
+ return super().get(key, default)
25726
+
25727
+ def __init__(__self__, *,
25728
+ query_frontend: Optional[bool] = None,
25729
+ receiver_routing: Optional[bool] = None):
25730
+ """
25731
+ :param bool query_frontend: Allow clients to connect to query_frontend with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
25732
+ :param bool receiver_routing: Allow clients to connect to receiver_routing with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
25733
+ """
25734
+ if query_frontend is not None:
25735
+ pulumi.set(__self__, "query_frontend", query_frontend)
25736
+ if receiver_routing is not None:
25737
+ pulumi.set(__self__, "receiver_routing", receiver_routing)
25738
+
25739
+ @property
25740
+ @pulumi.getter(name="queryFrontend")
25741
+ def query_frontend(self) -> Optional[bool]:
25742
+ """
25743
+ Allow clients to connect to query_frontend with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
25744
+ """
25745
+ return pulumi.get(self, "query_frontend")
25746
+
25747
+ @property
25748
+ @pulumi.getter(name="receiverRouting")
25749
+ def receiver_routing(self) -> Optional[bool]:
25750
+ """
25751
+ Allow clients to connect to receiver_routing with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
25752
+ """
25753
+ return pulumi.get(self, "receiver_routing")
25754
+
25755
+
25756
+ @pulumi.output_type
25757
+ class ThanosThanosUserConfigPrivatelinkAccess(dict):
25758
+ @staticmethod
25759
+ def __key_warning(key: str):
25760
+ suggest = None
25761
+ if key == "queryFrontend":
25762
+ suggest = "query_frontend"
25763
+ elif key == "receiverRouting":
25764
+ suggest = "receiver_routing"
25765
+
25766
+ if suggest:
25767
+ pulumi.log.warn(f"Key '{key}' not found in ThanosThanosUserConfigPrivatelinkAccess. Access the value via the '{suggest}' property getter instead.")
25768
+
25769
+ def __getitem__(self, key: str) -> Any:
25770
+ ThanosThanosUserConfigPrivatelinkAccess.__key_warning(key)
25771
+ return super().__getitem__(key)
25772
+
25773
+ def get(self, key: str, default = None) -> Any:
25774
+ ThanosThanosUserConfigPrivatelinkAccess.__key_warning(key)
25775
+ return super().get(key, default)
25776
+
25777
+ def __init__(__self__, *,
25778
+ query_frontend: Optional[bool] = None,
25779
+ receiver_routing: Optional[bool] = None):
25780
+ """
25781
+ :param bool query_frontend: Enable query_frontend.
25782
+ :param bool receiver_routing: Enable receiver_routing.
25783
+ """
25784
+ if query_frontend is not None:
25785
+ pulumi.set(__self__, "query_frontend", query_frontend)
25786
+ if receiver_routing is not None:
25787
+ pulumi.set(__self__, "receiver_routing", receiver_routing)
25788
+
25789
+ @property
25790
+ @pulumi.getter(name="queryFrontend")
25791
+ def query_frontend(self) -> Optional[bool]:
25792
+ """
25793
+ Enable query_frontend.
25794
+ """
25795
+ return pulumi.get(self, "query_frontend")
25796
+
25797
+ @property
25798
+ @pulumi.getter(name="receiverRouting")
25799
+ def receiver_routing(self) -> Optional[bool]:
25800
+ """
25801
+ Enable receiver_routing.
25802
+ """
25803
+ return pulumi.get(self, "receiver_routing")
25804
+
25805
+
25046
25806
  @pulumi.output_type
25047
25807
  class ThanosThanosUserConfigPublicAccess(dict):
25048
25808
  @staticmethod
@@ -25702,7 +26462,7 @@ class ValkeyValkeyUserConfig(dict):
25702
26462
  :param int valkey_io_threads: Set Valkey IO thread count. Changing this will cause a restart of the Valkey service. Example: `1`.
25703
26463
  :param int valkey_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
25704
26464
  :param int valkey_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
25705
- :param str valkey_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
26465
+ :param str valkey_maxmemory_policy: Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.
25706
26466
  :param str valkey_notify_keyspace_events: Set notify-keyspace-events option.
25707
26467
  :param int valkey_number_of_databases: Set number of Valkey databases. Changing this will cause a restart of the Valkey service. Example: `16`.
25708
26468
  :param str valkey_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Valkey does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is `off`, no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
@@ -25920,7 +26680,7 @@ class ValkeyValkeyUserConfig(dict):
25920
26680
  @pulumi.getter(name="valkeyMaxmemoryPolicy")
25921
26681
  def valkey_maxmemory_policy(self) -> Optional[str]:
25922
26682
  """
25923
- Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
26683
+ Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.
25924
26684
  """
25925
26685
  return pulumi.get(self, "valkey_maxmemory_policy")
25926
26686
 
@@ -26385,7 +27145,6 @@ class GetCassandaCassandraUserConfigResult(dict):
26385
27145
 
26386
27146
  @property
26387
27147
  @pulumi.getter(name="additionalBackupRegions")
26388
- @_utilities.deprecated("""This property is deprecated.""")
26389
27148
  def additional_backup_regions(self) -> Optional[str]:
26390
27149
  """
26391
27150
  Additional Cloud Regions for Backup Replication.
@@ -26910,7 +27669,6 @@ class GetCassandraCassandraUserConfigResult(dict):
26910
27669
 
26911
27670
  @property
26912
27671
  @pulumi.getter(name="additionalBackupRegions")
26913
- @_utilities.deprecated("""This property is deprecated.""")
26914
27672
  def additional_backup_regions(self) -> Optional[str]:
26915
27673
  """
26916
27674
  Additional Cloud Regions for Backup Replication.
@@ -28048,7 +28806,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
28048
28806
  static_ips: Optional[bool] = None):
28049
28807
  """
28050
28808
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. Default: `false`.
28051
- :param str dragonfly_persistence: Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
28809
+ :param str dragonfly_persistence: Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
28052
28810
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. Default: `true`.
28053
28811
  :param Sequence['GetDragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
28054
28812
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -28106,7 +28864,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
28106
28864
  @pulumi.getter(name="dragonflyPersistence")
28107
28865
  def dragonfly_persistence(self) -> Optional[str]:
28108
28866
  """
28109
- Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
28867
+ Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
28110
28868
  """
28111
28869
  return pulumi.get(self, "dragonfly_persistence")
28112
28870
 
@@ -29171,10 +29929,10 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29171
29929
  wal: Optional[bool] = None):
29172
29930
  """
29173
29931
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
29174
- :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
29932
+ :param bool alerting_enabled: Setting has no effect with Grafana 11 and onward. Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
29175
29933
  :param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
29176
29934
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. Example: `0`.
29177
- :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
29935
+ :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.
29178
29936
  :param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
29179
29937
  :param 'GetGrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
29180
29938
  :param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
@@ -29182,9 +29940,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29182
29940
  :param 'GetGrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
29183
29941
  :param 'GetGrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
29184
29942
  :param 'GetGrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
29185
- :param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
29943
+ :param str cookie_samesite: Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
29186
29944
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
29187
- :param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
29945
+ :param bool dashboard_previews_enabled: Enable browsing of dashboards in grid (pictures) mode. This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
29188
29946
  :param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h. Example: `5s`.
29189
29947
  :param int dashboards_versions_to_keep: Dashboard versions to keep per dashboard. Example: `20`.
29190
29948
  :param bool dataproxy_send_user_header: Send `X-Grafana-User` header to data source.
@@ -29197,7 +29955,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29197
29955
  :param Sequence['GetGrafanaGrafanaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
29198
29956
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
29199
29957
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
29200
- :param bool metrics_enabled: Enable Grafana /metrics endpoint.
29958
+ :param bool metrics_enabled: Enable Grafana's /metrics endpoint.
29201
29959
  :param bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP.
29202
29960
  :param 'GetGrafanaGrafanaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
29203
29961
  :param 'GetGrafanaGrafanaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
@@ -29208,9 +29966,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29208
29966
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
29209
29967
  :param 'GetGrafanaGrafanaUserConfigSmtpServerArgs' smtp_server: SMTP server settings
29210
29968
  :param bool static_ips: Use static public IP addresses.
29211
- :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
29969
+ :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
29212
29970
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
29213
- :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
29971
+ :param str user_auto_assign_org_role: Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.
29214
29972
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
29215
29973
  :param bool wal: Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
29216
29974
  """
@@ -29313,7 +30071,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29313
30071
  @pulumi.getter(name="alertingEnabled")
29314
30072
  def alerting_enabled(self) -> Optional[bool]:
29315
30073
  """
29316
- Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
30074
+ Setting has no effect with Grafana 11 and onward. Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
29317
30075
  """
29318
30076
  return pulumi.get(self, "alerting_enabled")
29319
30077
 
@@ -29337,7 +30095,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29337
30095
  @pulumi.getter(name="alertingNodataOrNullvalues")
29338
30096
  def alerting_nodata_or_nullvalues(self) -> Optional[str]:
29339
30097
  """
29340
- Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
30098
+ Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.
29341
30099
  """
29342
30100
  return pulumi.get(self, "alerting_nodata_or_nullvalues")
29343
30101
 
@@ -29401,7 +30159,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29401
30159
  @pulumi.getter(name="cookieSamesite")
29402
30160
  def cookie_samesite(self) -> Optional[str]:
29403
30161
  """
29404
- Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
30162
+ Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
29405
30163
  """
29406
30164
  return pulumi.get(self, "cookie_samesite")
29407
30165
 
@@ -29417,7 +30175,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29417
30175
  @pulumi.getter(name="dashboardPreviewsEnabled")
29418
30176
  def dashboard_previews_enabled(self) -> Optional[bool]:
29419
30177
  """
29420
- This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
30178
+ Enable browsing of dashboards in grid (pictures) mode. This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
29421
30179
  """
29422
30180
  return pulumi.get(self, "dashboard_previews_enabled")
29423
30181
 
@@ -29522,7 +30280,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29522
30280
  @pulumi.getter(name="metricsEnabled")
29523
30281
  def metrics_enabled(self) -> Optional[bool]:
29524
30282
  """
29525
- Enable Grafana /metrics endpoint.
30283
+ Enable Grafana's /metrics endpoint.
29526
30284
  """
29527
30285
  return pulumi.get(self, "metrics_enabled")
29528
30286
 
@@ -29610,7 +30368,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29610
30368
  @pulumi.getter(name="unifiedAlertingEnabled")
29611
30369
  def unified_alerting_enabled(self) -> Optional[bool]:
29612
30370
  """
29613
- Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
30371
+ Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
29614
30372
  """
29615
30373
  return pulumi.get(self, "unified_alerting_enabled")
29616
30374
 
@@ -29626,7 +30384,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
29626
30384
  @pulumi.getter(name="userAutoAssignOrgRole")
29627
30385
  def user_auto_assign_org_role(self) -> Optional[str]:
29628
30386
  """
29629
- Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
30387
+ Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.
29630
30388
  """
29631
30389
  return pulumi.get(self, "user_auto_assign_org_role")
29632
30390
 
@@ -29984,9 +30742,9 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
29984
30742
  :param str client_id: Client ID from provider. Example: `b1ba0bf54a4c2c0a1c29`.
29985
30743
  :param str client_secret: Client secret from provider. Example: `bfa6gea4f129076761dcba8ce5e1e406bd83af7b`.
29986
30744
  :param bool allow_sign_up: Automatically sign-up users on successful sign-in.
29987
- :param str api_url: API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
29988
- :param str auth_url: Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
29989
- :param str token_url: Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
30745
+ :param str api_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
30746
+ :param str auth_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
30747
+ :param str token_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
29990
30748
  """
29991
30749
  pulumi.set(__self__, "allowed_groups", allowed_groups)
29992
30750
  pulumi.set(__self__, "client_id", client_id)
@@ -30036,7 +30794,7 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
30036
30794
  @pulumi.getter(name="apiUrl")
30037
30795
  def api_url(self) -> Optional[str]:
30038
30796
  """
30039
- API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
30797
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
30040
30798
  """
30041
30799
  return pulumi.get(self, "api_url")
30042
30800
 
@@ -30044,7 +30802,7 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
30044
30802
  @pulumi.getter(name="authUrl")
30045
30803
  def auth_url(self) -> Optional[str]:
30046
30804
  """
30047
- Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
30805
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
30048
30806
  """
30049
30807
  return pulumi.get(self, "auth_url")
30050
30808
 
@@ -30052,7 +30810,7 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
30052
30810
  @pulumi.getter(name="tokenUrl")
30053
30811
  def token_url(self) -> Optional[str]:
30054
30812
  """
30055
- Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
30813
+ This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
30056
30814
  """
30057
30815
  return pulumi.get(self, "token_url")
30058
30816
 
@@ -30222,7 +30980,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
30222
30980
  """
30223
30981
  :param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions. Example: `AAAAAAAAAAAAAAAAAAA`.
30224
30982
  :param str bucket_url: Bucket URL for S3. Example: `https://grafana.s3-ap-southeast-2.amazonaws.com/`.
30225
- :param str provider: Enum: `s3`. Provider type.
30983
+ :param str provider: Enum: `s3`. External image store provider.
30226
30984
  :param str secret_key: S3 secret key. Example: `AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA`.
30227
30985
  """
30228
30986
  pulumi.set(__self__, "access_key", access_key)
@@ -30250,7 +31008,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
30250
31008
  @pulumi.getter
30251
31009
  def provider(self) -> str:
30252
31010
  """
30253
- Enum: `s3`. Provider type.
31011
+ Enum: `s3`. External image store provider.
30254
31012
  """
30255
31013
  return pulumi.get(self, "provider")
30256
31014
 
@@ -30368,7 +31126,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
30368
31126
  :param str from_name: Name used in outgoing emails, defaults to Grafana.
30369
31127
  :param str password: Password for SMTP authentication. Example: `ein0eemeev5eeth3Ahfu`.
30370
31128
  :param bool skip_verify: Skip verifying server certificate. Defaults to false.
30371
- :param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
31129
+ :param str starttls_policy: Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
30372
31130
  :param str username: Username for SMTP authentication. Example: `smtpuser`.
30373
31131
  """
30374
31132
  pulumi.set(__self__, "from_address", from_address)
@@ -30437,7 +31195,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
30437
31195
  @pulumi.getter(name="starttlsPolicy")
30438
31196
  def starttls_policy(self) -> Optional[str]:
30439
31197
  """
30440
- Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
31198
+ Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
30441
31199
  """
30442
31200
  return pulumi.get(self, "starttls_policy")
30443
31201
 
@@ -30678,6 +31436,7 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
30678
31436
  additional_backup_regions: Optional[str] = None,
30679
31437
  custom_domain: Optional[str] = None,
30680
31438
  influxdb: Optional['outputs.GetInfluxDbInfluxdbUserConfigInfluxdbResult'] = None,
31439
+ influxdb_version: Optional[str] = None,
30681
31440
  ip_filter_objects: Optional[Sequence['outputs.GetInfluxDbInfluxdbUserConfigIpFilterObjectResult']] = None,
30682
31441
  ip_filter_strings: Optional[Sequence[str]] = None,
30683
31442
  ip_filters: Optional[Sequence[str]] = None,
@@ -30693,6 +31452,7 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
30693
31452
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
30694
31453
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
30695
31454
  :param 'GetInfluxDbInfluxdbUserConfigInfluxdbArgs' influxdb: influxdb.conf configuration values
31455
+ :param str influxdb_version: Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
30696
31456
  :param Sequence['GetInfluxDbInfluxdbUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
30697
31457
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
30698
31458
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -30711,6 +31471,8 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
30711
31471
  pulumi.set(__self__, "custom_domain", custom_domain)
30712
31472
  if influxdb is not None:
30713
31473
  pulumi.set(__self__, "influxdb", influxdb)
31474
+ if influxdb_version is not None:
31475
+ pulumi.set(__self__, "influxdb_version", influxdb_version)
30714
31476
  if ip_filter_objects is not None:
30715
31477
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
30716
31478
  if ip_filter_strings is not None:
@@ -30759,6 +31521,14 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
30759
31521
  """
30760
31522
  return pulumi.get(self, "influxdb")
30761
31523
 
31524
+ @property
31525
+ @pulumi.getter(name="influxdbVersion")
31526
+ def influxdb_version(self) -> Optional[str]:
31527
+ """
31528
+ Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
31529
+ """
31530
+ return pulumi.get(self, "influxdb_version")
31531
+
30762
31532
  @property
30763
31533
  @pulumi.getter(name="ipFilterObjects")
30764
31534
  def ip_filter_objects(self) -> Optional[Sequence['outputs.GetInfluxDbInfluxdbUserConfigIpFilterObjectResult']]:
@@ -30973,12 +31743,16 @@ class GetInfluxDbInfluxdbUserConfigIpFilterObjectResult(dict):
30973
31743
  @pulumi.output_type
30974
31744
  class GetInfluxDbInfluxdbUserConfigPrivateAccessResult(dict):
30975
31745
  def __init__(__self__, *,
30976
- influxdb: Optional[bool] = None):
31746
+ influxdb: Optional[bool] = None,
31747
+ user_backup: Optional[bool] = None):
30977
31748
  """
30978
31749
  :param bool influxdb: Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
31750
+ :param bool user_backup: Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
30979
31751
  """
30980
31752
  if influxdb is not None:
30981
31753
  pulumi.set(__self__, "influxdb", influxdb)
31754
+ if user_backup is not None:
31755
+ pulumi.set(__self__, "user_backup", user_backup)
30982
31756
 
30983
31757
  @property
30984
31758
  @pulumi.getter
@@ -30988,16 +31762,28 @@ class GetInfluxDbInfluxdbUserConfigPrivateAccessResult(dict):
30988
31762
  """
30989
31763
  return pulumi.get(self, "influxdb")
30990
31764
 
31765
+ @property
31766
+ @pulumi.getter(name="userBackup")
31767
+ def user_backup(self) -> Optional[bool]:
31768
+ """
31769
+ Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
31770
+ """
31771
+ return pulumi.get(self, "user_backup")
31772
+
30991
31773
 
30992
31774
  @pulumi.output_type
30993
31775
  class GetInfluxDbInfluxdbUserConfigPrivatelinkAccessResult(dict):
30994
31776
  def __init__(__self__, *,
30995
- influxdb: Optional[bool] = None):
31777
+ influxdb: Optional[bool] = None,
31778
+ user_backup: Optional[bool] = None):
30996
31779
  """
30997
31780
  :param bool influxdb: Enable influxdb.
31781
+ :param bool user_backup: Enable user_backup.
30998
31782
  """
30999
31783
  if influxdb is not None:
31000
31784
  pulumi.set(__self__, "influxdb", influxdb)
31785
+ if user_backup is not None:
31786
+ pulumi.set(__self__, "user_backup", user_backup)
31001
31787
 
31002
31788
  @property
31003
31789
  @pulumi.getter
@@ -31007,16 +31793,28 @@ class GetInfluxDbInfluxdbUserConfigPrivatelinkAccessResult(dict):
31007
31793
  """
31008
31794
  return pulumi.get(self, "influxdb")
31009
31795
 
31796
+ @property
31797
+ @pulumi.getter(name="userBackup")
31798
+ def user_backup(self) -> Optional[bool]:
31799
+ """
31800
+ Enable user_backup.
31801
+ """
31802
+ return pulumi.get(self, "user_backup")
31803
+
31010
31804
 
31011
31805
  @pulumi.output_type
31012
31806
  class GetInfluxDbInfluxdbUserConfigPublicAccessResult(dict):
31013
31807
  def __init__(__self__, *,
31014
- influxdb: Optional[bool] = None):
31808
+ influxdb: Optional[bool] = None,
31809
+ user_backup: Optional[bool] = None):
31015
31810
  """
31016
31811
  :param bool influxdb: Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.
31812
+ :param bool user_backup: Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
31017
31813
  """
31018
31814
  if influxdb is not None:
31019
31815
  pulumi.set(__self__, "influxdb", influxdb)
31816
+ if user_backup is not None:
31817
+ pulumi.set(__self__, "user_backup", user_backup)
31020
31818
 
31021
31819
  @property
31022
31820
  @pulumi.getter
@@ -31026,6 +31824,14 @@ class GetInfluxDbInfluxdbUserConfigPublicAccessResult(dict):
31026
31824
  """
31027
31825
  return pulumi.get(self, "influxdb")
31028
31826
 
31827
+ @property
31828
+ @pulumi.getter(name="userBackup")
31829
+ def user_backup(self) -> Optional[bool]:
31830
+ """
31831
+ Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
31832
+ """
31833
+ return pulumi.get(self, "user_backup")
31834
+
31029
31835
 
31030
31836
  @pulumi.output_type
31031
31837
  class GetInfluxDbServiceIntegrationResult(dict):
@@ -31480,10 +32286,10 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31480
32286
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
31481
32287
  session_timeout_ms: Optional[int] = None):
31482
32288
  """
31483
- :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
32289
+ :param str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
31484
32290
  :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
31485
32291
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
31486
- :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
32292
+ :param str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
31487
32293
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
31488
32294
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
31489
32295
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -31491,7 +32297,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31491
32297
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
31492
32298
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
31493
32299
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
31494
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
32300
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
31495
32301
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
31496
32302
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
31497
32303
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -31534,7 +32340,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31534
32340
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
31535
32341
  def connector_client_config_override_policy(self) -> Optional[str]:
31536
32342
  """
31537
- Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
32343
+ Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
31538
32344
  """
31539
32345
  return pulumi.get(self, "connector_client_config_override_policy")
31540
32346
 
@@ -31558,7 +32364,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31558
32364
  @pulumi.getter(name="consumerIsolationLevel")
31559
32365
  def consumer_isolation_level(self) -> Optional[str]:
31560
32366
  """
31561
- Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
32367
+ Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
31562
32368
  """
31563
32369
  return pulumi.get(self, "consumer_isolation_level")
31564
32370
 
@@ -31622,7 +32428,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
31622
32428
  @pulumi.getter(name="producerCompressionType")
31623
32429
  def producer_compression_type(self) -> Optional[str]:
31624
32430
  """
31625
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
32431
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
31626
32432
  """
31627
32433
  return pulumi.get(self, "producer_compression_type")
31628
32434
 
@@ -31772,8 +32578,8 @@ class GetKafkaConnectKafkaConnectUserConfigSecretProviderResult(dict):
31772
32578
  vault: Optional['outputs.GetKafkaConnectKafkaConnectUserConfigSecretProviderVaultResult'] = None):
31773
32579
  """
31774
32580
  :param str name: Name of the secret provider. Used to reference secrets in connector config.
31775
- :param 'GetKafkaConnectKafkaConnectUserConfigSecretProviderAwsArgs' aws: AWS config for Secret Provider
31776
- :param 'GetKafkaConnectKafkaConnectUserConfigSecretProviderVaultArgs' vault: Vault Config for Secret Provider
32581
+ :param 'GetKafkaConnectKafkaConnectUserConfigSecretProviderAwsArgs' aws: AWS secret provider configuration
32582
+ :param 'GetKafkaConnectKafkaConnectUserConfigSecretProviderVaultArgs' vault: Vault secret provider configuration
31777
32583
  """
31778
32584
  pulumi.set(__self__, "name", name)
31779
32585
  if aws is not None:
@@ -31793,7 +32599,7 @@ class GetKafkaConnectKafkaConnectUserConfigSecretProviderResult(dict):
31793
32599
  @pulumi.getter
31794
32600
  def aws(self) -> Optional['outputs.GetKafkaConnectKafkaConnectUserConfigSecretProviderAwsResult']:
31795
32601
  """
31796
- AWS config for Secret Provider
32602
+ AWS secret provider configuration
31797
32603
  """
31798
32604
  return pulumi.get(self, "aws")
31799
32605
 
@@ -31801,7 +32607,7 @@ class GetKafkaConnectKafkaConnectUserConfigSecretProviderResult(dict):
31801
32607
  @pulumi.getter
31802
32608
  def vault(self) -> Optional['outputs.GetKafkaConnectKafkaConnectUserConfigSecretProviderVaultResult']:
31803
32609
  """
31804
- Vault Config for Secret Provider
32610
+ Vault secret provider configuration
31805
32611
  """
31806
32612
  return pulumi.get(self, "vault")
31807
32613
 
@@ -32531,7 +33337,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32531
33337
  transaction_state_log_segment_bytes: Optional[int] = None):
32532
33338
  """
32533
33339
  :param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
32534
- :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
33340
+ :param str compression_type: Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
32535
33341
  :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
32536
33342
  :param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
32537
33343
  :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
@@ -32541,8 +33347,8 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32541
33347
  :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
32542
33348
  :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
32543
33349
  :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
32544
- :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
32545
- :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
33350
+ :param str log_cleanup_policy: Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
33351
+ :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
32546
33352
  :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
32547
33353
  :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
32548
33354
  :param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
@@ -32682,7 +33488,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32682
33488
  @pulumi.getter(name="compressionType")
32683
33489
  def compression_type(self) -> Optional[str]:
32684
33490
  """
32685
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
33491
+ Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
32686
33492
  """
32687
33493
  return pulumi.get(self, "compression_type")
32688
33494
 
@@ -32762,7 +33568,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32762
33568
  @pulumi.getter(name="logCleanupPolicy")
32763
33569
  def log_cleanup_policy(self) -> Optional[str]:
32764
33570
  """
32765
- Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
33571
+ Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
32766
33572
  """
32767
33573
  return pulumi.get(self, "log_cleanup_policy")
32768
33574
 
@@ -32770,7 +33576,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
32770
33576
  @pulumi.getter(name="logFlushIntervalMessages")
32771
33577
  def log_flush_interval_messages(self) -> Optional[int]:
32772
33578
  """
32773
- The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
33579
+ The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
32774
33580
  """
32775
33581
  return pulumi.get(self, "log_flush_interval_messages")
32776
33582
 
@@ -33090,10 +33896,10 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33090
33896
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
33091
33897
  session_timeout_ms: Optional[int] = None):
33092
33898
  """
33093
- :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
33899
+ :param str connector_client_config_override_policy: Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
33094
33900
  :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
33095
33901
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
33096
- :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
33902
+ :param str consumer_isolation_level: Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
33097
33903
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
33098
33904
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
33099
33905
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -33101,7 +33907,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33101
33907
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
33102
33908
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
33103
33909
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
33104
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33910
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33105
33911
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
33106
33912
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
33107
33913
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -33144,7 +33950,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33144
33950
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
33145
33951
  def connector_client_config_override_policy(self) -> Optional[str]:
33146
33952
  """
33147
- Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
33953
+ Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.
33148
33954
  """
33149
33955
  return pulumi.get(self, "connector_client_config_override_policy")
33150
33956
 
@@ -33168,7 +33974,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33168
33974
  @pulumi.getter(name="consumerIsolationLevel")
33169
33975
  def consumer_isolation_level(self) -> Optional[str]:
33170
33976
  """
33171
- Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
33977
+ Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
33172
33978
  """
33173
33979
  return pulumi.get(self, "consumer_isolation_level")
33174
33980
 
@@ -33232,7 +34038,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
33232
34038
  @pulumi.getter(name="producerCompressionType")
33233
34039
  def producer_compression_type(self) -> Optional[str]:
33234
34040
  """
33235
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
34041
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33236
34042
  """
33237
34043
  return pulumi.get(self, "producer_compression_type")
33238
34044
 
@@ -33277,8 +34083,8 @@ class GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult(dict):
33277
34083
  vault: Optional['outputs.GetKafkaKafkaUserConfigKafkaConnectSecretProviderVaultResult'] = None):
33278
34084
  """
33279
34085
  :param str name: Name of the secret provider. Used to reference secrets in connector config.
33280
- :param 'GetKafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs' aws: AWS config for Secret Provider
33281
- :param 'GetKafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs' vault: Vault Config for Secret Provider
34086
+ :param 'GetKafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs' aws: AWS secret provider configuration
34087
+ :param 'GetKafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs' vault: Vault secret provider configuration
33282
34088
  """
33283
34089
  pulumi.set(__self__, "name", name)
33284
34090
  if aws is not None:
@@ -33298,7 +34104,7 @@ class GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult(dict):
33298
34104
  @pulumi.getter
33299
34105
  def aws(self) -> Optional['outputs.GetKafkaKafkaUserConfigKafkaConnectSecretProviderAwsResult']:
33300
34106
  """
33301
- AWS config for Secret Provider
34107
+ AWS secret provider configuration
33302
34108
  """
33303
34109
  return pulumi.get(self, "aws")
33304
34110
 
@@ -33306,7 +34112,7 @@ class GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult(dict):
33306
34112
  @pulumi.getter
33307
34113
  def vault(self) -> Optional['outputs.GetKafkaKafkaUserConfigKafkaConnectSecretProviderVaultResult']:
33308
34114
  """
33309
- Vault Config for Secret Provider
34115
+ Vault secret provider configuration
33310
34116
  """
33311
34117
  return pulumi.get(self, "vault")
33312
34118
 
@@ -33446,10 +34252,10 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
33446
34252
  :param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`.
33447
34253
  :param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. Default: `67108864`.
33448
34254
  :param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`.
33449
- :param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
34255
+ :param str name_strategy: Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
33450
34256
  :param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.
33451
- :param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
33452
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
34257
+ :param str producer_acks: Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
34258
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33453
34259
  :param int producer_linger_ms: Wait for up to the given delay to allow batching records together. Default: `0`.
33454
34260
  :param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: `1048576`.
33455
34261
  :param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`.
@@ -33503,7 +34309,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
33503
34309
  @pulumi.getter(name="nameStrategy")
33504
34310
  def name_strategy(self) -> Optional[str]:
33505
34311
  """
33506
- Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
34312
+ Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
33507
34313
  """
33508
34314
  return pulumi.get(self, "name_strategy")
33509
34315
 
@@ -33519,7 +34325,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
33519
34325
  @pulumi.getter(name="producerAcks")
33520
34326
  def producer_acks(self) -> Optional[str]:
33521
34327
  """
33522
- Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
34328
+ Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
33523
34329
  """
33524
34330
  return pulumi.get(self, "producer_acks")
33525
34331
 
@@ -33527,7 +34333,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
33527
34333
  @pulumi.getter(name="producerCompressionType")
33528
34334
  def producer_compression_type(self) -> Optional[str]:
33529
34335
  """
33530
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
34336
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
33531
34337
  """
33532
34338
  return pulumi.get(self, "producer_compression_type")
33533
34339
 
@@ -34514,8 +35320,8 @@ class GetKafkaTopicConfigResult(dict):
34514
35320
  segment_ms: Optional[str] = None,
34515
35321
  unclean_leader_election_enable: Optional[bool] = None):
34516
35322
  """
34517
- :param str cleanup_policy: cleanup.policy value
34518
- :param str compression_type: compression.type value
35323
+ :param str cleanup_policy: cleanup.policy value. The possible values are `delete`, `compact` and `compact,delete`.
35324
+ :param str compression_type: compression.type value. The possible values are `snappy`, `gzip`, `lz4`, `producer`, `uncompressed` and `zstd`.
34519
35325
  :param str delete_retention_ms: delete.retention.ms value
34520
35326
  :param str file_delete_delay_ms: file.delete.delay.ms value
34521
35327
  :param str flush_messages: flush.messages value
@@ -34526,9 +35332,9 @@ class GetKafkaTopicConfigResult(dict):
34526
35332
  :param str max_compaction_lag_ms: max.compaction.lag.ms value
34527
35333
  :param str max_message_bytes: max.message.bytes value
34528
35334
  :param bool message_downconversion_enable: message.downconversion.enable value
34529
- :param str message_format_version: message.format.version value
35335
+ :param str message_format_version: message.format.version value. The possible values are `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0` and `3.9-IV1`.
34530
35336
  :param str message_timestamp_difference_max_ms: message.timestamp.difference.max.ms value
34531
- :param str message_timestamp_type: message.timestamp.type value
35337
+ :param str message_timestamp_type: message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
34532
35338
  :param float min_cleanable_dirty_ratio: min.cleanable.dirty.ratio value
34533
35339
  :param str min_compaction_lag_ms: min.compaction.lag.ms value
34534
35340
  :param str min_insync_replicas: min.insync.replicas value
@@ -34601,7 +35407,7 @@ class GetKafkaTopicConfigResult(dict):
34601
35407
  @pulumi.getter(name="cleanupPolicy")
34602
35408
  def cleanup_policy(self) -> Optional[str]:
34603
35409
  """
34604
- cleanup.policy value
35410
+ cleanup.policy value. The possible values are `delete`, `compact` and `compact,delete`.
34605
35411
  """
34606
35412
  return pulumi.get(self, "cleanup_policy")
34607
35413
 
@@ -34609,7 +35415,7 @@ class GetKafkaTopicConfigResult(dict):
34609
35415
  @pulumi.getter(name="compressionType")
34610
35416
  def compression_type(self) -> Optional[str]:
34611
35417
  """
34612
- compression.type value
35418
+ compression.type value. The possible values are `snappy`, `gzip`, `lz4`, `producer`, `uncompressed` and `zstd`.
34613
35419
  """
34614
35420
  return pulumi.get(self, "compression_type")
34615
35421
 
@@ -34697,7 +35503,7 @@ class GetKafkaTopicConfigResult(dict):
34697
35503
  @pulumi.getter(name="messageFormatVersion")
34698
35504
  def message_format_version(self) -> Optional[str]:
34699
35505
  """
34700
- message.format.version value
35506
+ message.format.version value. The possible values are `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0` and `3.9-IV1`.
34701
35507
  """
34702
35508
  return pulumi.get(self, "message_format_version")
34703
35509
 
@@ -34713,7 +35519,7 @@ class GetKafkaTopicConfigResult(dict):
34713
35519
  @pulumi.getter(name="messageTimestampType")
34714
35520
  def message_timestamp_type(self) -> Optional[str]:
34715
35521
  """
34716
- message.timestamp.type value
35522
+ message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
34717
35523
  """
34718
35524
  return pulumi.get(self, "message_timestamp_type")
34719
35525
 
@@ -36860,9 +37666,9 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
36860
37666
  :param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit). Example: `10`.
36861
37667
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`.
36862
37668
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`.
36863
- :param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
36864
- :param str log_output: Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
36865
- :param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.
37669
+ :param str internal_tmp_mem_storage_engine: Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
37670
+ :param str log_output: Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
37671
+ :param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10.0`.
36866
37672
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M). Example: `67108864`.
36867
37673
  :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M). Example: `16777216`.
36868
37674
  :param int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service. Example: `16384`.
@@ -37078,7 +37884,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
37078
37884
  @pulumi.getter(name="internalTmpMemStorageEngine")
37079
37885
  def internal_tmp_mem_storage_engine(self) -> Optional[str]:
37080
37886
  """
37081
- Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
37887
+ Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.
37082
37888
  """
37083
37889
  return pulumi.get(self, "internal_tmp_mem_storage_engine")
37084
37890
 
@@ -37086,7 +37892,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
37086
37892
  @pulumi.getter(name="logOutput")
37087
37893
  def log_output(self) -> Optional[str]:
37088
37894
  """
37089
- Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
37895
+ Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
37090
37896
  """
37091
37897
  return pulumi.get(self, "log_output")
37092
37898
 
@@ -37094,7 +37900,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
37094
37900
  @pulumi.getter(name="longQueryTime")
37095
37901
  def long_query_time(self) -> Optional[float]:
37096
37902
  """
37097
- The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.
37903
+ The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10.0`.
37098
37904
  """
37099
37905
  return pulumi.get(self, "long_query_time")
37100
37906
 
@@ -37584,7 +38390,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
37584
38390
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
37585
38391
  :param 'GetOpenSearchOpensearchUserConfigAzureMigrationArgs' azure_migration: Azure migration settings
37586
38392
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
37587
- :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
38393
+ :param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
37588
38394
  :param 'GetOpenSearchOpensearchUserConfigGcsMigrationArgs' gcs_migration: Google Cloud Storage migration settings
37589
38395
  :param Sequence['GetOpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
37590
38396
  :param 'GetOpenSearchOpensearchUserConfigIndexRollupArgs' index_rollup: Index rollup settings
@@ -37692,7 +38498,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
37692
38498
  @pulumi.getter(name="disableReplicationFactorAdjustment")
37693
38499
  def disable_replication_factor_adjustment(self) -> Optional[bool]:
37694
38500
  """
37695
- Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
38501
+ Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can not be activated unless specifically allowed for the project.
37696
38502
  """
37697
38503
  return pulumi.get(self, "disable_replication_factor_adjustment")
37698
38504
 
@@ -37888,28 +38694,33 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37888
38694
  account: str,
37889
38695
  base_path: str,
37890
38696
  container: str,
38697
+ indices: str,
37891
38698
  snapshot_name: str,
37892
38699
  chunk_size: Optional[str] = None,
37893
38700
  compress: Optional[bool] = None,
37894
38701
  endpoint_suffix: Optional[str] = None,
37895
- indices: Optional[str] = None,
38702
+ include_aliases: Optional[bool] = None,
37896
38703
  key: Optional[str] = None,
38704
+ restore_global_state: Optional[bool] = None,
37897
38705
  sas_token: Optional[str] = None):
37898
38706
  """
37899
- :param str account: Azure account name.
38707
+ :param str account: Account name.
37900
38708
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
37901
38709
  :param str container: Azure container name.
38710
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
37902
38711
  :param str snapshot_name: The snapshot name to restore from.
37903
38712
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
37904
38713
  :param bool compress: When set to true metadata files are stored in compressed format.
37905
38714
  :param str endpoint_suffix: Defines the DNS suffix for Azure Storage endpoints.
37906
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
38715
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
37907
38716
  :param str key: Azure account secret key. One of key or sas_token should be specified.
38717
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
37908
38718
  :param str sas_token: A shared access signatures (SAS) token. One of key or sas_token should be specified.
37909
38719
  """
37910
38720
  pulumi.set(__self__, "account", account)
37911
38721
  pulumi.set(__self__, "base_path", base_path)
37912
38722
  pulumi.set(__self__, "container", container)
38723
+ pulumi.set(__self__, "indices", indices)
37913
38724
  pulumi.set(__self__, "snapshot_name", snapshot_name)
37914
38725
  if chunk_size is not None:
37915
38726
  pulumi.set(__self__, "chunk_size", chunk_size)
@@ -37917,10 +38728,12 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37917
38728
  pulumi.set(__self__, "compress", compress)
37918
38729
  if endpoint_suffix is not None:
37919
38730
  pulumi.set(__self__, "endpoint_suffix", endpoint_suffix)
37920
- if indices is not None:
37921
- pulumi.set(__self__, "indices", indices)
38731
+ if include_aliases is not None:
38732
+ pulumi.set(__self__, "include_aliases", include_aliases)
37922
38733
  if key is not None:
37923
38734
  pulumi.set(__self__, "key", key)
38735
+ if restore_global_state is not None:
38736
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
37924
38737
  if sas_token is not None:
37925
38738
  pulumi.set(__self__, "sas_token", sas_token)
37926
38739
 
@@ -37928,7 +38741,7 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37928
38741
  @pulumi.getter
37929
38742
  def account(self) -> str:
37930
38743
  """
37931
- Azure account name.
38744
+ Account name.
37932
38745
  """
37933
38746
  return pulumi.get(self, "account")
37934
38747
 
@@ -37948,6 +38761,14 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37948
38761
  """
37949
38762
  return pulumi.get(self, "container")
37950
38763
 
38764
+ @property
38765
+ @pulumi.getter
38766
+ def indices(self) -> str:
38767
+ """
38768
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
38769
+ """
38770
+ return pulumi.get(self, "indices")
38771
+
37951
38772
  @property
37952
38773
  @pulumi.getter(name="snapshotName")
37953
38774
  def snapshot_name(self) -> str:
@@ -37981,12 +38802,12 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37981
38802
  return pulumi.get(self, "endpoint_suffix")
37982
38803
 
37983
38804
  @property
37984
- @pulumi.getter
37985
- def indices(self) -> Optional[str]:
38805
+ @pulumi.getter(name="includeAliases")
38806
+ def include_aliases(self) -> Optional[bool]:
37986
38807
  """
37987
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
38808
+ Whether to restore aliases alongside their associated indexes. Default is true.
37988
38809
  """
37989
- return pulumi.get(self, "indices")
38810
+ return pulumi.get(self, "include_aliases")
37990
38811
 
37991
38812
  @property
37992
38813
  @pulumi.getter
@@ -37996,6 +38817,14 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
37996
38817
  """
37997
38818
  return pulumi.get(self, "key")
37998
38819
 
38820
+ @property
38821
+ @pulumi.getter(name="restoreGlobalState")
38822
+ def restore_global_state(self) -> Optional[bool]:
38823
+ """
38824
+ If true, restore the cluster state. Defaults to false.
38825
+ """
38826
+ return pulumi.get(self, "restore_global_state")
38827
+
37999
38828
  @property
38000
38829
  @pulumi.getter(name="sasToken")
38001
38830
  def sas_token(self) -> Optional[str]:
@@ -38011,29 +38840,36 @@ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
38011
38840
  base_path: str,
38012
38841
  bucket: str,
38013
38842
  credentials: str,
38843
+ indices: str,
38014
38844
  snapshot_name: str,
38015
38845
  chunk_size: Optional[str] = None,
38016
38846
  compress: Optional[bool] = None,
38017
- indices: Optional[str] = None):
38847
+ include_aliases: Optional[bool] = None,
38848
+ restore_global_state: Optional[bool] = None):
38018
38849
  """
38019
38850
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
38020
38851
  :param str bucket: The path to the repository data within its container.
38021
38852
  :param str credentials: Google Cloud Storage credentials file content.
38853
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
38022
38854
  :param str snapshot_name: The snapshot name to restore from.
38023
38855
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
38024
38856
  :param bool compress: When set to true metadata files are stored in compressed format.
38025
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
38857
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
38858
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
38026
38859
  """
38027
38860
  pulumi.set(__self__, "base_path", base_path)
38028
38861
  pulumi.set(__self__, "bucket", bucket)
38029
38862
  pulumi.set(__self__, "credentials", credentials)
38863
+ pulumi.set(__self__, "indices", indices)
38030
38864
  pulumi.set(__self__, "snapshot_name", snapshot_name)
38031
38865
  if chunk_size is not None:
38032
38866
  pulumi.set(__self__, "chunk_size", chunk_size)
38033
38867
  if compress is not None:
38034
38868
  pulumi.set(__self__, "compress", compress)
38035
- if indices is not None:
38036
- pulumi.set(__self__, "indices", indices)
38869
+ if include_aliases is not None:
38870
+ pulumi.set(__self__, "include_aliases", include_aliases)
38871
+ if restore_global_state is not None:
38872
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
38037
38873
 
38038
38874
  @property
38039
38875
  @pulumi.getter(name="basePath")
@@ -38059,6 +38895,14 @@ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
38059
38895
  """
38060
38896
  return pulumi.get(self, "credentials")
38061
38897
 
38898
+ @property
38899
+ @pulumi.getter
38900
+ def indices(self) -> str:
38901
+ """
38902
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
38903
+ """
38904
+ return pulumi.get(self, "indices")
38905
+
38062
38906
  @property
38063
38907
  @pulumi.getter(name="snapshotName")
38064
38908
  def snapshot_name(self) -> str:
@@ -38084,12 +38928,20 @@ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
38084
38928
  return pulumi.get(self, "compress")
38085
38929
 
38086
38930
  @property
38087
- @pulumi.getter
38088
- def indices(self) -> Optional[str]:
38931
+ @pulumi.getter(name="includeAliases")
38932
+ def include_aliases(self) -> Optional[bool]:
38089
38933
  """
38090
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
38934
+ Whether to restore aliases alongside their associated indexes. Default is true.
38091
38935
  """
38092
- return pulumi.get(self, "indices")
38936
+ return pulumi.get(self, "include_aliases")
38937
+
38938
+ @property
38939
+ @pulumi.getter(name="restoreGlobalState")
38940
+ def restore_global_state(self) -> Optional[bool]:
38941
+ """
38942
+ If true, restore the cluster state. Defaults to false.
38943
+ """
38944
+ return pulumi.get(self, "restore_global_state")
38093
38945
 
38094
38946
 
38095
38947
  @pulumi.output_type
@@ -38456,6 +39308,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
38456
39308
  reindex_remote_whitelists: Optional[Sequence[str]] = None,
38457
39309
  script_max_compilations_rate: Optional[str] = None,
38458
39310
  search_backpressure: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult'] = None,
39311
+ search_insights_top_queries: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesResult'] = None,
38459
39312
  search_max_buckets: Optional[int] = None,
38460
39313
  shard_indexing_pressure: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchShardIndexingPressureResult'] = None,
38461
39314
  thread_pool_analyze_queue_size: Optional[int] = None,
@@ -38583,6 +39436,8 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
38583
39436
  pulumi.set(__self__, "script_max_compilations_rate", script_max_compilations_rate)
38584
39437
  if search_backpressure is not None:
38585
39438
  pulumi.set(__self__, "search_backpressure", search_backpressure)
39439
+ if search_insights_top_queries is not None:
39440
+ pulumi.set(__self__, "search_insights_top_queries", search_insights_top_queries)
38586
39441
  if search_max_buckets is not None:
38587
39442
  pulumi.set(__self__, "search_max_buckets", search_max_buckets)
38588
39443
  if shard_indexing_pressure is not None:
@@ -38874,6 +39729,11 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
38874
39729
  """
38875
39730
  return pulumi.get(self, "search_backpressure")
38876
39731
 
39732
+ @property
39733
+ @pulumi.getter(name="searchInsightsTopQueries")
39734
+ def search_insights_top_queries(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesResult']:
39735
+ return pulumi.get(self, "search_insights_top_queries")
39736
+
38877
39737
  @property
38878
39738
  @pulumi.getter(name="searchMaxBuckets")
38879
39739
  def search_max_buckets(self) -> Optional[int]:
@@ -39227,7 +40087,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult(dict):
39227
40087
  search_shard_task: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskResult'] = None,
39228
40088
  search_task: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskResult'] = None):
39229
40089
  """
39230
- :param str mode: Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
40090
+ :param str mode: Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
39231
40091
  :param 'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuressArgs' node_duress: Node duress settings
39232
40092
  :param 'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskArgs' search_shard_task: Search shard settings
39233
40093
  :param 'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskArgs' search_task: Search task settings
@@ -39245,7 +40105,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult(dict):
39245
40105
  @pulumi.getter
39246
40106
  def mode(self) -> Optional[str]:
39247
40107
  """
39248
- Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
40108
+ Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
39249
40109
  """
39250
40110
  return pulumi.get(self, "mode")
39251
40111
 
@@ -39547,6 +40407,178 @@ class GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskRes
39547
40407
  return pulumi.get(self, "total_heap_percent_threshold")
39548
40408
 
39549
40409
 
40410
+ @pulumi.output_type
40411
+ class GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesResult(dict):
40412
+ def __init__(__self__, *,
40413
+ cpu: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpuResult'] = None,
40414
+ latency: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatencyResult'] = None,
40415
+ memory: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemoryResult'] = None):
40416
+ """
40417
+ :param 'GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpuArgs' cpu: Top N queries monitoring by CPU
40418
+ :param 'GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatencyArgs' latency: Top N queries monitoring by latency
40419
+ :param 'GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemoryArgs' memory: Top N queries monitoring by memory
40420
+ """
40421
+ if cpu is not None:
40422
+ pulumi.set(__self__, "cpu", cpu)
40423
+ if latency is not None:
40424
+ pulumi.set(__self__, "latency", latency)
40425
+ if memory is not None:
40426
+ pulumi.set(__self__, "memory", memory)
40427
+
40428
+ @property
40429
+ @pulumi.getter
40430
+ def cpu(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpuResult']:
40431
+ """
40432
+ Top N queries monitoring by CPU
40433
+ """
40434
+ return pulumi.get(self, "cpu")
40435
+
40436
+ @property
40437
+ @pulumi.getter
40438
+ def latency(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatencyResult']:
40439
+ """
40440
+ Top N queries monitoring by latency
40441
+ """
40442
+ return pulumi.get(self, "latency")
40443
+
40444
+ @property
40445
+ @pulumi.getter
40446
+ def memory(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemoryResult']:
40447
+ """
40448
+ Top N queries monitoring by memory
40449
+ """
40450
+ return pulumi.get(self, "memory")
40451
+
40452
+
40453
+ @pulumi.output_type
40454
+ class GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesCpuResult(dict):
40455
+ def __init__(__self__, *,
40456
+ enabled: Optional[bool] = None,
40457
+ top_n_size: Optional[int] = None,
40458
+ window_size: Optional[str] = None):
40459
+ """
40460
+ :param bool enabled: Enable or disable top N query monitoring by the metric. Default: `false`.
40461
+ :param int top_n_size: Specify the value of N for the top N queries by the metric.
40462
+ :param str window_size: The window size of the top N queries by the metric.
40463
+ """
40464
+ if enabled is not None:
40465
+ pulumi.set(__self__, "enabled", enabled)
40466
+ if top_n_size is not None:
40467
+ pulumi.set(__self__, "top_n_size", top_n_size)
40468
+ if window_size is not None:
40469
+ pulumi.set(__self__, "window_size", window_size)
40470
+
40471
+ @property
40472
+ @pulumi.getter
40473
+ def enabled(self) -> Optional[bool]:
40474
+ """
40475
+ Enable or disable top N query monitoring by the metric. Default: `false`.
40476
+ """
40477
+ return pulumi.get(self, "enabled")
40478
+
40479
+ @property
40480
+ @pulumi.getter(name="topNSize")
40481
+ def top_n_size(self) -> Optional[int]:
40482
+ """
40483
+ Specify the value of N for the top N queries by the metric.
40484
+ """
40485
+ return pulumi.get(self, "top_n_size")
40486
+
40487
+ @property
40488
+ @pulumi.getter(name="windowSize")
40489
+ def window_size(self) -> Optional[str]:
40490
+ """
40491
+ The window size of the top N queries by the metric.
40492
+ """
40493
+ return pulumi.get(self, "window_size")
40494
+
40495
+
40496
+ @pulumi.output_type
40497
+ class GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesLatencyResult(dict):
40498
+ def __init__(__self__, *,
40499
+ enabled: Optional[bool] = None,
40500
+ top_n_size: Optional[int] = None,
40501
+ window_size: Optional[str] = None):
40502
+ """
40503
+ :param bool enabled: Enable or disable top N query monitoring by the metric. Default: `false`.
40504
+ :param int top_n_size: Specify the value of N for the top N queries by the metric.
40505
+ :param str window_size: The window size of the top N queries by the metric.
40506
+ """
40507
+ if enabled is not None:
40508
+ pulumi.set(__self__, "enabled", enabled)
40509
+ if top_n_size is not None:
40510
+ pulumi.set(__self__, "top_n_size", top_n_size)
40511
+ if window_size is not None:
40512
+ pulumi.set(__self__, "window_size", window_size)
40513
+
40514
+ @property
40515
+ @pulumi.getter
40516
+ def enabled(self) -> Optional[bool]:
40517
+ """
40518
+ Enable or disable top N query monitoring by the metric. Default: `false`.
40519
+ """
40520
+ return pulumi.get(self, "enabled")
40521
+
40522
+ @property
40523
+ @pulumi.getter(name="topNSize")
40524
+ def top_n_size(self) -> Optional[int]:
40525
+ """
40526
+ Specify the value of N for the top N queries by the metric.
40527
+ """
40528
+ return pulumi.get(self, "top_n_size")
40529
+
40530
+ @property
40531
+ @pulumi.getter(name="windowSize")
40532
+ def window_size(self) -> Optional[str]:
40533
+ """
40534
+ The window size of the top N queries by the metric.
40535
+ """
40536
+ return pulumi.get(self, "window_size")
40537
+
40538
+
40539
+ @pulumi.output_type
40540
+ class GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesMemoryResult(dict):
40541
+ def __init__(__self__, *,
40542
+ enabled: Optional[bool] = None,
40543
+ top_n_size: Optional[int] = None,
40544
+ window_size: Optional[str] = None):
40545
+ """
40546
+ :param bool enabled: Enable or disable top N query monitoring by the metric. Default: `false`.
40547
+ :param int top_n_size: Specify the value of N for the top N queries by the metric.
40548
+ :param str window_size: The window size of the top N queries by the metric.
40549
+ """
40550
+ if enabled is not None:
40551
+ pulumi.set(__self__, "enabled", enabled)
40552
+ if top_n_size is not None:
40553
+ pulumi.set(__self__, "top_n_size", top_n_size)
40554
+ if window_size is not None:
40555
+ pulumi.set(__self__, "window_size", window_size)
40556
+
40557
+ @property
40558
+ @pulumi.getter
40559
+ def enabled(self) -> Optional[bool]:
40560
+ """
40561
+ Enable or disable top N query monitoring by the metric. Default: `false`.
40562
+ """
40563
+ return pulumi.get(self, "enabled")
40564
+
40565
+ @property
40566
+ @pulumi.getter(name="topNSize")
40567
+ def top_n_size(self) -> Optional[int]:
40568
+ """
40569
+ Specify the value of N for the top N queries by the metric.
40570
+ """
40571
+ return pulumi.get(self, "top_n_size")
40572
+
40573
+ @property
40574
+ @pulumi.getter(name="windowSize")
40575
+ def window_size(self) -> Optional[str]:
40576
+ """
40577
+ The window size of the top N queries by the metric.
40578
+ """
40579
+ return pulumi.get(self, "window_size")
40580
+
40581
+
39550
40582
  @pulumi.output_type
39551
40583
  class GetOpenSearchOpensearchUserConfigOpensearchShardIndexingPressureResult(dict):
39552
40584
  def __init__(__self__, *,
@@ -39875,30 +40907,35 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
39875
40907
  access_key: str,
39876
40908
  base_path: str,
39877
40909
  bucket: str,
40910
+ indices: str,
39878
40911
  region: str,
39879
40912
  secret_key: str,
39880
40913
  snapshot_name: str,
39881
40914
  chunk_size: Optional[str] = None,
39882
40915
  compress: Optional[bool] = None,
39883
40916
  endpoint: Optional[str] = None,
39884
- indices: Optional[str] = None,
40917
+ include_aliases: Optional[bool] = None,
40918
+ restore_global_state: Optional[bool] = None,
39885
40919
  server_side_encryption: Optional[bool] = None):
39886
40920
  """
39887
40921
  :param str access_key: AWS Access key.
39888
40922
  :param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
39889
40923
  :param str bucket: S3 bucket name.
40924
+ :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
39890
40925
  :param str region: S3 region.
39891
40926
  :param str secret_key: AWS secret key.
39892
40927
  :param str snapshot_name: The snapshot name to restore from.
39893
40928
  :param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
39894
40929
  :param bool compress: When set to true metadata files are stored in compressed format.
39895
40930
  :param str endpoint: The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
39896
- :param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
40931
+ :param bool include_aliases: Whether to restore aliases alongside their associated indexes. Default is true.
40932
+ :param bool restore_global_state: If true, restore the cluster state. Defaults to false.
39897
40933
  :param bool server_side_encryption: When set to true files are encrypted on server side.
39898
40934
  """
39899
40935
  pulumi.set(__self__, "access_key", access_key)
39900
40936
  pulumi.set(__self__, "base_path", base_path)
39901
40937
  pulumi.set(__self__, "bucket", bucket)
40938
+ pulumi.set(__self__, "indices", indices)
39902
40939
  pulumi.set(__self__, "region", region)
39903
40940
  pulumi.set(__self__, "secret_key", secret_key)
39904
40941
  pulumi.set(__self__, "snapshot_name", snapshot_name)
@@ -39908,8 +40945,10 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
39908
40945
  pulumi.set(__self__, "compress", compress)
39909
40946
  if endpoint is not None:
39910
40947
  pulumi.set(__self__, "endpoint", endpoint)
39911
- if indices is not None:
39912
- pulumi.set(__self__, "indices", indices)
40948
+ if include_aliases is not None:
40949
+ pulumi.set(__self__, "include_aliases", include_aliases)
40950
+ if restore_global_state is not None:
40951
+ pulumi.set(__self__, "restore_global_state", restore_global_state)
39913
40952
  if server_side_encryption is not None:
39914
40953
  pulumi.set(__self__, "server_side_encryption", server_side_encryption)
39915
40954
 
@@ -39937,6 +40976,14 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
39937
40976
  """
39938
40977
  return pulumi.get(self, "bucket")
39939
40978
 
40979
+ @property
40980
+ @pulumi.getter
40981
+ def indices(self) -> str:
40982
+ """
40983
+ A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
40984
+ """
40985
+ return pulumi.get(self, "indices")
40986
+
39940
40987
  @property
39941
40988
  @pulumi.getter
39942
40989
  def region(self) -> str:
@@ -39986,12 +41033,20 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
39986
41033
  return pulumi.get(self, "endpoint")
39987
41034
 
39988
41035
  @property
39989
- @pulumi.getter
39990
- def indices(self) -> Optional[str]:
41036
+ @pulumi.getter(name="includeAliases")
41037
+ def include_aliases(self) -> Optional[bool]:
39991
41038
  """
39992
- A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
41039
+ Whether to restore aliases alongside their associated indexes. Default is true.
39993
41040
  """
39994
- return pulumi.get(self, "indices")
41041
+ return pulumi.get(self, "include_aliases")
41042
+
41043
+ @property
41044
+ @pulumi.getter(name="restoreGlobalState")
41045
+ def restore_global_state(self) -> Optional[bool]:
41046
+ """
41047
+ If true, restore the cluster state. Defaults to false.
41048
+ """
41049
+ return pulumi.get(self, "restore_global_state")
39995
41050
 
39996
41051
  @property
39997
41052
  @pulumi.getter(name="serverSideEncryption")
@@ -40751,7 +41806,7 @@ class GetPgPgUserConfigResult(dict):
40751
41806
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
40752
41807
  :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. Example: `41.5`.
40753
41808
  :param bool static_ips: Use static public IP addresses.
40754
- :param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
41809
+ :param str synchronous_replication: Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
40755
41810
  :param 'GetPgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
40756
41811
  :param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
40757
41812
  :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). Example: `4`.
@@ -41053,7 +42108,7 @@ class GetPgPgUserConfigResult(dict):
41053
42108
  @pulumi.getter(name="synchronousReplication")
41054
42109
  def synchronous_replication(self) -> Optional[str]:
41055
42110
  """
41056
- Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
42111
+ Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
41057
42112
  """
41058
42113
  return pulumi.get(self, "synchronous_replication")
41059
42114
 
@@ -41296,8 +42351,8 @@ class GetPgPgUserConfigPgResult(dict):
41296
42351
  :param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
41297
42352
  :param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
41298
42353
  :param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
41299
- :param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
41300
- :param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
42354
+ :param str log_error_verbosity: Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
42355
+ :param str log_line_prefix: Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
41301
42356
  :param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
41302
42357
  :param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
41303
42358
  :param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
@@ -41318,12 +42373,12 @@ class GetPgPgUserConfigPgResult(dict):
41318
42373
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`.
41319
42374
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
41320
42375
  :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets. Example: `10`.
41321
- :param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
42376
+ :param str pg_stat_statements_dot_track: Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
41322
42377
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`.
41323
42378
  :param str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
41324
42379
  :param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Example: `1024`.
41325
42380
  :param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
41326
- :param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
42381
+ :param str track_functions: Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.
41327
42382
  :param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
41328
42383
  :param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout. Example: `60000`.
41329
42384
  :param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance. Example: `50`.
@@ -41575,7 +42630,7 @@ class GetPgPgUserConfigPgResult(dict):
41575
42630
  @pulumi.getter(name="logErrorVerbosity")
41576
42631
  def log_error_verbosity(self) -> Optional[str]:
41577
42632
  """
41578
- Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
42633
+ Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
41579
42634
  """
41580
42635
  return pulumi.get(self, "log_error_verbosity")
41581
42636
 
@@ -41583,7 +42638,7 @@ class GetPgPgUserConfigPgResult(dict):
41583
42638
  @pulumi.getter(name="logLinePrefix")
41584
42639
  def log_line_prefix(self) -> Optional[str]:
41585
42640
  """
41586
- Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
42641
+ Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
41587
42642
  """
41588
42643
  return pulumi.get(self, "log_line_prefix")
41589
42644
 
@@ -41751,7 +42806,7 @@ class GetPgPgUserConfigPgResult(dict):
41751
42806
  @pulumi.getter(name="pgStatStatementsDotTrack")
41752
42807
  def pg_stat_statements_dot_track(self) -> Optional[str]:
41753
42808
  """
41754
- Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
42809
+ Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
41755
42810
  """
41756
42811
  return pulumi.get(self, "pg_stat_statements_dot_track")
41757
42812
 
@@ -41791,7 +42846,7 @@ class GetPgPgUserConfigPgResult(dict):
41791
42846
  @pulumi.getter(name="trackFunctions")
41792
42847
  def track_functions(self) -> Optional[str]:
41793
42848
  """
41794
- Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
42849
+ Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.
41795
42850
  """
41796
42851
  return pulumi.get(self, "track_functions")
41797
42852
 
@@ -42097,7 +43152,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
42097
43152
  """
42098
43153
  :param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). Default: `3600`.
42099
43154
  :param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. Example: `0`.
42100
- :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
43155
+ :param str autodb_pool_mode: Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.
42101
43156
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
42102
43157
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
42103
43158
  :param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`.
@@ -42147,7 +43202,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
42147
43202
  @pulumi.getter(name="autodbPoolMode")
42148
43203
  def autodb_pool_mode(self) -> Optional[str]:
42149
43204
  """
42150
- Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
43205
+ Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.
42151
43206
  """
42152
43207
  return pulumi.get(self, "autodb_pool_mode")
42153
43208
 
@@ -42673,7 +43728,7 @@ class GetRedisRedisUserConfigResult(dict):
42673
43728
  :param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service. Example: `1`.
42674
43729
  :param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
42675
43730
  :param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
42676
- :param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
43731
+ :param str redis_maxmemory_policy: Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.
42677
43732
  :param str redis_notify_keyspace_events: Set notify-keyspace-events option.
42678
43733
  :param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service. Example: `16`.
42679
43734
  :param str redis_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
@@ -42873,7 +43928,7 @@ class GetRedisRedisUserConfigResult(dict):
42873
43928
  @pulumi.getter(name="redisMaxmemoryPolicy")
42874
43929
  def redis_maxmemory_policy(self) -> Optional[str]:
42875
43930
  """
42876
- Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
43931
+ Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.
42877
43932
  """
42878
43933
  return pulumi.get(self, "redis_maxmemory_policy")
42879
43934
 
@@ -43309,11 +44364,11 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
43309
44364
  thread_per_consumer: Optional[bool] = None):
43310
44365
  """
43311
44366
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
43312
- :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
44367
+ :param str data_format: Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.
43313
44368
  :param str group_name: Kafka consumers group. Default: `clickhouse`.
43314
44369
  :param str name: Name of the table. Example: `events`.
43315
44370
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
43316
- :param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
44371
+ :param str auto_offset_reset: Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
43317
44372
  :param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`.
43318
44373
  :param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. Default: `default`.
43319
44374
  :param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. Default: `0`.
@@ -43362,7 +44417,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
43362
44417
  @pulumi.getter(name="dataFormat")
43363
44418
  def data_format(self) -> str:
43364
44419
  """
43365
- Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
44420
+ Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.
43366
44421
  """
43367
44422
  return pulumi.get(self, "data_format")
43368
44423
 
@@ -43394,7 +44449,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
43394
44449
  @pulumi.getter(name="autoOffsetReset")
43395
44450
  def auto_offset_reset(self) -> Optional[str]:
43396
44451
  """
43397
- Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
44452
+ Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
43398
44453
  """
43399
44454
  return pulumi.get(self, "auto_offset_reset")
43400
44455
 
@@ -43823,12 +44878,60 @@ class GetServiceIntegrationDatadogUserConfigRedisResult(dict):
43823
44878
  return pulumi.get(self, "command_stats_enabled")
43824
44879
 
43825
44880
 
44881
+ @pulumi.output_type
44882
+ class GetServiceIntegrationEndpointAutoscalerUserConfigResult(dict):
44883
+ def __init__(__self__, *,
44884
+ autoscalings: Sequence['outputs.GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult']):
44885
+ """
44886
+ :param Sequence['GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingArgs'] autoscalings: Configure autoscaling thresholds for a service
44887
+ """
44888
+ pulumi.set(__self__, "autoscalings", autoscalings)
44889
+
44890
+ @property
44891
+ @pulumi.getter
44892
+ def autoscalings(self) -> Sequence['outputs.GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult']:
44893
+ """
44894
+ Configure autoscaling thresholds for a service
44895
+ """
44896
+ return pulumi.get(self, "autoscalings")
44897
+
44898
+
44899
+ @pulumi.output_type
44900
+ class GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult(dict):
44901
+ def __init__(__self__, *,
44902
+ cap_gb: int,
44903
+ type: str):
44904
+ """
44905
+ :param int cap_gb: The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
44906
+ :param str type: Enum: `autoscale_disk`. Type of autoscale event.
44907
+ """
44908
+ pulumi.set(__self__, "cap_gb", cap_gb)
44909
+ pulumi.set(__self__, "type", type)
44910
+
44911
+ @property
44912
+ @pulumi.getter(name="capGb")
44913
+ def cap_gb(self) -> int:
44914
+ """
44915
+ The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
44916
+ """
44917
+ return pulumi.get(self, "cap_gb")
44918
+
44919
+ @property
44920
+ @pulumi.getter
44921
+ def type(self) -> str:
44922
+ """
44923
+ Enum: `autoscale_disk`. Type of autoscale event.
44924
+ """
44925
+ return pulumi.get(self, "type")
44926
+
44927
+
43826
44928
  @pulumi.output_type
43827
44929
  class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
43828
44930
  def __init__(__self__, *,
43829
44931
  datadog_api_key: str,
43830
44932
  datadog_tags: Optional[Sequence['outputs.GetServiceIntegrationEndpointDatadogUserConfigDatadogTagResult']] = None,
43831
44933
  disable_consumer_stats: Optional[bool] = None,
44934
+ extra_tags_prefix: Optional[str] = None,
43832
44935
  kafka_consumer_check_instances: Optional[int] = None,
43833
44936
  kafka_consumer_stats_timeout: Optional[int] = None,
43834
44937
  max_partition_contexts: Optional[int] = None,
@@ -43837,16 +44940,19 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
43837
44940
  :param str datadog_api_key: Datadog API key. Example: `848f30907c15c55d601fe45487cce9b6`.
43838
44941
  :param Sequence['GetServiceIntegrationEndpointDatadogUserConfigDatadogTagArgs'] datadog_tags: Custom tags provided by user
43839
44942
  :param bool disable_consumer_stats: Disable consumer group metrics.
44943
+ :param str extra_tags_prefix: Extra tags prefix. Defaults to aiven.
43840
44944
  :param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with. Example: `8`.
43841
44945
  :param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers. Example: `60`.
43842
44946
  :param int max_partition_contexts: Maximum number of partition contexts to send. Example: `32000`.
43843
- :param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
44947
+ :param str site: Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
43844
44948
  """
43845
44949
  pulumi.set(__self__, "datadog_api_key", datadog_api_key)
43846
44950
  if datadog_tags is not None:
43847
44951
  pulumi.set(__self__, "datadog_tags", datadog_tags)
43848
44952
  if disable_consumer_stats is not None:
43849
44953
  pulumi.set(__self__, "disable_consumer_stats", disable_consumer_stats)
44954
+ if extra_tags_prefix is not None:
44955
+ pulumi.set(__self__, "extra_tags_prefix", extra_tags_prefix)
43850
44956
  if kafka_consumer_check_instances is not None:
43851
44957
  pulumi.set(__self__, "kafka_consumer_check_instances", kafka_consumer_check_instances)
43852
44958
  if kafka_consumer_stats_timeout is not None:
@@ -43880,6 +44986,14 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
43880
44986
  """
43881
44987
  return pulumi.get(self, "disable_consumer_stats")
43882
44988
 
44989
+ @property
44990
+ @pulumi.getter(name="extraTagsPrefix")
44991
+ def extra_tags_prefix(self) -> Optional[str]:
44992
+ """
44993
+ Extra tags prefix. Defaults to aiven.
44994
+ """
44995
+ return pulumi.get(self, "extra_tags_prefix")
44996
+
43883
44997
  @property
43884
44998
  @pulumi.getter(name="kafkaConsumerCheckInstances")
43885
44999
  def kafka_consumer_check_instances(self) -> Optional[int]:
@@ -43908,7 +45022,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
43908
45022
  @pulumi.getter
43909
45023
  def site(self) -> Optional[str]:
43910
45024
  """
43911
- Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
45025
+ Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
43912
45026
  """
43913
45027
  return pulumi.get(self, "site")
43914
45028
 
@@ -44291,7 +45405,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
44291
45405
  ssl_endpoint_identification_algorithm: Optional[str] = None):
44292
45406
  """
44293
45407
  :param str bootstrap_servers: Bootstrap servers. Example: `10.0.0.1:9092,10.0.0.2:9092`.
44294
- :param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
45408
+ :param str security_protocol: Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.
44295
45409
  :param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
44296
45410
  :param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
44297
45411
  :param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
@@ -44338,7 +45452,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
44338
45452
  @pulumi.getter(name="securityProtocol")
44339
45453
  def security_protocol(self) -> str:
44340
45454
  """
44341
- Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
45455
+ Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.
44342
45456
  """
44343
45457
  return pulumi.get(self, "security_protocol")
44344
45458
 
@@ -44585,7 +45699,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
44585
45699
  :param str ssl_client_key: Client key. Example: `-----BEGIN PRIVATE KEY-----
44586
45700
  ...
44587
45701
  -----END PRIVATE KEY-----`.
44588
- :param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
45702
+ :param str ssl_mode: Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
44589
45703
  :param str ssl_root_cert: SSL Root Cert. Example: `-----BEGIN CERTIFICATE-----
44590
45704
  ...
44591
45705
  -----END CERTIFICATE-----
@@ -44672,7 +45786,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
44672
45786
  @pulumi.getter(name="sslMode")
44673
45787
  def ssl_mode(self) -> Optional[str]:
44674
45788
  """
44675
- Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
45789
+ Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
44676
45790
  """
44677
45791
  return pulumi.get(self, "ssl_mode")
44678
45792
 
@@ -44688,6 +45802,49 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
44688
45802
  return pulumi.get(self, "ssl_root_cert")
44689
45803
 
44690
45804
 
45805
+ @pulumi.output_type
45806
+ class GetServiceIntegrationEndpointExternalPrometheusUserConfigResult(dict):
45807
+ def __init__(__self__, *,
45808
+ basic_auth_password: Optional[str] = None,
45809
+ basic_auth_username: Optional[str] = None,
45810
+ service_uri: Optional[str] = None):
45811
+ """
45812
+ :param str basic_auth_password: Prometheus basic authentication password. Example: `fhyFNBjj3R`.
45813
+ :param str basic_auth_username: Prometheus basic authentication username. Example: `prom4851`.
45814
+ :param str service_uri: Prometheus enabled write endpoint. Example: `https://write.example.com/`.
45815
+ """
45816
+ if basic_auth_password is not None:
45817
+ pulumi.set(__self__, "basic_auth_password", basic_auth_password)
45818
+ if basic_auth_username is not None:
45819
+ pulumi.set(__self__, "basic_auth_username", basic_auth_username)
45820
+ if service_uri is not None:
45821
+ pulumi.set(__self__, "service_uri", service_uri)
45822
+
45823
+ @property
45824
+ @pulumi.getter(name="basicAuthPassword")
45825
+ def basic_auth_password(self) -> Optional[str]:
45826
+ """
45827
+ Prometheus basic authentication password. Example: `fhyFNBjj3R`.
45828
+ """
45829
+ return pulumi.get(self, "basic_auth_password")
45830
+
45831
+ @property
45832
+ @pulumi.getter(name="basicAuthUsername")
45833
+ def basic_auth_username(self) -> Optional[str]:
45834
+ """
45835
+ Prometheus basic authentication username. Example: `prom4851`.
45836
+ """
45837
+ return pulumi.get(self, "basic_auth_username")
45838
+
45839
+ @property
45840
+ @pulumi.getter(name="serviceUri")
45841
+ def service_uri(self) -> Optional[str]:
45842
+ """
45843
+ Prometheus enabled write endpoint. Example: `https://write.example.com/`.
45844
+ """
45845
+ return pulumi.get(self, "service_uri")
45846
+
45847
+
44691
45848
  @pulumi.output_type
44692
45849
  class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
44693
45850
  def __init__(__self__, *,
@@ -44696,7 +45853,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
44696
45853
  basic_auth_password: Optional[str] = None,
44697
45854
  basic_auth_username: Optional[str] = None):
44698
45855
  """
44699
- :param str authentication: Enum: `none`, `basic`. Authentication method.
45856
+ :param str authentication: Enum: `basic`, `none`. Authentication method.
44700
45857
  :param str url: Schema Registry URL. Example: `https://schema-registry.kafka.company.com:28419`.
44701
45858
  :param str basic_auth_password: Basic authentication password. Example: `Zm9vYg==`.
44702
45859
  :param str basic_auth_username: Basic authentication user name. Example: `avnadmin`.
@@ -44712,7 +45869,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
44712
45869
  @pulumi.getter
44713
45870
  def authentication(self) -> str:
44714
45871
  """
44715
- Enum: `none`, `basic`. Authentication method.
45872
+ Enum: `basic`, `none`. Authentication method.
44716
45873
  """
44717
45874
  return pulumi.get(self, "authentication")
44718
45875
 
@@ -44817,7 +45974,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
44817
45974
  max_message_size: Optional[int] = None,
44818
45975
  sd: Optional[str] = None):
44819
45976
  """
44820
- :param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
45977
+ :param str format: Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.
44821
45978
  :param int port: Rsyslog server port. Default: `514`.
44822
45979
  :param str server: Rsyslog server IP address or hostname. Example: `logs.example.com`.
44823
45980
  :param bool tls: Require TLS. Default: `true`.
@@ -44858,7 +46015,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
44858
46015
  @pulumi.getter
44859
46016
  def format(self) -> str:
44860
46017
  """
44861
- Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
46018
+ Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.
44862
46019
  """
44863
46020
  return pulumi.get(self, "format")
44864
46021
 
@@ -45261,7 +46418,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
45261
46418
  :param int consumer_max_poll_records: Set consumer max.poll.records. The default is 500. Example: `500`.
45262
46419
  :param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`.
45263
46420
  :param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker. Example: `8388608`.
45264
- :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
46421
+ :param str producer_compression_type: Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
45265
46422
  :param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.
45266
46423
  :param int producer_max_request_size: The maximum request size in bytes. Example: `1048576`.
45267
46424
  """
@@ -45326,7 +46483,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
45326
46483
  @pulumi.getter(name="producerCompressionType")
45327
46484
  def producer_compression_type(self) -> Optional[str]:
45328
46485
  """
45329
- Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
46486
+ Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
45330
46487
  """
45331
46488
  return pulumi.get(self, "producer_compression_type")
45332
46489
 
@@ -46095,23 +47252,35 @@ class GetThanosThanosUserConfigResult(dict):
46095
47252
  ip_filter_strings: Optional[Sequence[str]] = None,
46096
47253
  ip_filters: Optional[Sequence[str]] = None,
46097
47254
  object_storage_usage_alert_threshold_gb: Optional[int] = None,
47255
+ private_access: Optional['outputs.GetThanosThanosUserConfigPrivateAccessResult'] = None,
47256
+ privatelink_access: Optional['outputs.GetThanosThanosUserConfigPrivatelinkAccessResult'] = None,
46098
47257
  public_access: Optional['outputs.GetThanosThanosUserConfigPublicAccessResult'] = None,
46099
47258
  query: Optional['outputs.GetThanosThanosUserConfigQueryResult'] = None,
46100
47259
  query_frontend: Optional['outputs.GetThanosThanosUserConfigQueryFrontendResult'] = None,
47260
+ receiver_ingesting: Optional[Mapping[str, str]] = None,
47261
+ receiver_routing: Optional[Mapping[str, str]] = None,
47262
+ ruler: Optional[Mapping[str, str]] = None,
46101
47263
  service_log: Optional[bool] = None,
46102
- static_ips: Optional[bool] = None):
47264
+ static_ips: Optional[bool] = None,
47265
+ store: Optional[Mapping[str, str]] = None):
46103
47266
  """
46104
- :param 'GetThanosThanosUserConfigCompactorArgs' compactor: ThanosCompactor
47267
+ :param 'GetThanosThanosUserConfigCompactorArgs' compactor: Configuration options for Thanos Compactor
46105
47268
  :param Mapping[str, str] env: Environmental variables.
46106
47269
  :param Sequence['GetThanosThanosUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
46107
47270
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
46108
47271
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
46109
47272
  :param int object_storage_usage_alert_threshold_gb: After exceeding the limit a service alert is going to be raised (0 means not set).
47273
+ :param 'GetThanosThanosUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
47274
+ :param 'GetThanosThanosUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
46110
47275
  :param 'GetThanosThanosUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
46111
- :param 'GetThanosThanosUserConfigQueryArgs' query: ThanosQuery
46112
- :param 'GetThanosThanosUserConfigQueryFrontendArgs' query_frontend: ThanosQueryFrontend
47276
+ :param 'GetThanosThanosUserConfigQueryArgs' query: Configuration options for Thanos Query
47277
+ :param 'GetThanosThanosUserConfigQueryFrontendArgs' query_frontend: Configuration options for Thanos Query Frontend
47278
+ :param Mapping[str, str] receiver_ingesting: Common configuration options for Thanos Receive.
47279
+ :param Mapping[str, str] receiver_routing: Configuration options for Thanos Receive Routing.
47280
+ :param Mapping[str, str] ruler: Configuration options for Thanos Ruler.
46113
47281
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
46114
47282
  :param bool static_ips: Use static public IP addresses.
47283
+ :param Mapping[str, str] store: Configuration options for Thanos Store.
46115
47284
  """
46116
47285
  if compactor is not None:
46117
47286
  pulumi.set(__self__, "compactor", compactor)
@@ -46125,22 +47294,34 @@ class GetThanosThanosUserConfigResult(dict):
46125
47294
  pulumi.set(__self__, "ip_filters", ip_filters)
46126
47295
  if object_storage_usage_alert_threshold_gb is not None:
46127
47296
  pulumi.set(__self__, "object_storage_usage_alert_threshold_gb", object_storage_usage_alert_threshold_gb)
47297
+ if private_access is not None:
47298
+ pulumi.set(__self__, "private_access", private_access)
47299
+ if privatelink_access is not None:
47300
+ pulumi.set(__self__, "privatelink_access", privatelink_access)
46128
47301
  if public_access is not None:
46129
47302
  pulumi.set(__self__, "public_access", public_access)
46130
47303
  if query is not None:
46131
47304
  pulumi.set(__self__, "query", query)
46132
47305
  if query_frontend is not None:
46133
47306
  pulumi.set(__self__, "query_frontend", query_frontend)
47307
+ if receiver_ingesting is not None:
47308
+ pulumi.set(__self__, "receiver_ingesting", receiver_ingesting)
47309
+ if receiver_routing is not None:
47310
+ pulumi.set(__self__, "receiver_routing", receiver_routing)
47311
+ if ruler is not None:
47312
+ pulumi.set(__self__, "ruler", ruler)
46134
47313
  if service_log is not None:
46135
47314
  pulumi.set(__self__, "service_log", service_log)
46136
47315
  if static_ips is not None:
46137
47316
  pulumi.set(__self__, "static_ips", static_ips)
47317
+ if store is not None:
47318
+ pulumi.set(__self__, "store", store)
46138
47319
 
46139
47320
  @property
46140
47321
  @pulumi.getter
46141
47322
  def compactor(self) -> Optional['outputs.GetThanosThanosUserConfigCompactorResult']:
46142
47323
  """
46143
- ThanosCompactor
47324
+ Configuration options for Thanos Compactor
46144
47325
  """
46145
47326
  return pulumi.get(self, "compactor")
46146
47327
 
@@ -46186,6 +47367,22 @@ class GetThanosThanosUserConfigResult(dict):
46186
47367
  """
46187
47368
  return pulumi.get(self, "object_storage_usage_alert_threshold_gb")
46188
47369
 
47370
+ @property
47371
+ @pulumi.getter(name="privateAccess")
47372
+ def private_access(self) -> Optional['outputs.GetThanosThanosUserConfigPrivateAccessResult']:
47373
+ """
47374
+ Allow access to selected service ports from private networks
47375
+ """
47376
+ return pulumi.get(self, "private_access")
47377
+
47378
+ @property
47379
+ @pulumi.getter(name="privatelinkAccess")
47380
+ def privatelink_access(self) -> Optional['outputs.GetThanosThanosUserConfigPrivatelinkAccessResult']:
47381
+ """
47382
+ Allow access to selected service components through Privatelink
47383
+ """
47384
+ return pulumi.get(self, "privatelink_access")
47385
+
46189
47386
  @property
46190
47387
  @pulumi.getter(name="publicAccess")
46191
47388
  def public_access(self) -> Optional['outputs.GetThanosThanosUserConfigPublicAccessResult']:
@@ -46198,7 +47395,7 @@ class GetThanosThanosUserConfigResult(dict):
46198
47395
  @pulumi.getter
46199
47396
  def query(self) -> Optional['outputs.GetThanosThanosUserConfigQueryResult']:
46200
47397
  """
46201
- ThanosQuery
47398
+ Configuration options for Thanos Query
46202
47399
  """
46203
47400
  return pulumi.get(self, "query")
46204
47401
 
@@ -46206,10 +47403,34 @@ class GetThanosThanosUserConfigResult(dict):
46206
47403
  @pulumi.getter(name="queryFrontend")
46207
47404
  def query_frontend(self) -> Optional['outputs.GetThanosThanosUserConfigQueryFrontendResult']:
46208
47405
  """
46209
- ThanosQueryFrontend
47406
+ Configuration options for Thanos Query Frontend
46210
47407
  """
46211
47408
  return pulumi.get(self, "query_frontend")
46212
47409
 
47410
+ @property
47411
+ @pulumi.getter(name="receiverIngesting")
47412
+ def receiver_ingesting(self) -> Optional[Mapping[str, str]]:
47413
+ """
47414
+ Common configuration options for Thanos Receive.
47415
+ """
47416
+ return pulumi.get(self, "receiver_ingesting")
47417
+
47418
+ @property
47419
+ @pulumi.getter(name="receiverRouting")
47420
+ def receiver_routing(self) -> Optional[Mapping[str, str]]:
47421
+ """
47422
+ Configuration options for Thanos Receive Routing.
47423
+ """
47424
+ return pulumi.get(self, "receiver_routing")
47425
+
47426
+ @property
47427
+ @pulumi.getter
47428
+ def ruler(self) -> Optional[Mapping[str, str]]:
47429
+ """
47430
+ Configuration options for Thanos Ruler.
47431
+ """
47432
+ return pulumi.get(self, "ruler")
47433
+
46213
47434
  @property
46214
47435
  @pulumi.getter(name="serviceLog")
46215
47436
  def service_log(self) -> Optional[bool]:
@@ -46226,6 +47447,14 @@ class GetThanosThanosUserConfigResult(dict):
46226
47447
  """
46227
47448
  return pulumi.get(self, "static_ips")
46228
47449
 
47450
+ @property
47451
+ @pulumi.getter
47452
+ def store(self) -> Optional[Mapping[str, str]]:
47453
+ """
47454
+ Configuration options for Thanos Store.
47455
+ """
47456
+ return pulumi.get(self, "store")
47457
+
46229
47458
 
46230
47459
  @pulumi.output_type
46231
47460
  class GetThanosThanosUserConfigCompactorResult(dict):
@@ -46276,6 +47505,68 @@ class GetThanosThanosUserConfigIpFilterObjectResult(dict):
46276
47505
  return pulumi.get(self, "description")
46277
47506
 
46278
47507
 
47508
+ @pulumi.output_type
47509
+ class GetThanosThanosUserConfigPrivateAccessResult(dict):
47510
+ def __init__(__self__, *,
47511
+ query_frontend: Optional[bool] = None,
47512
+ receiver_routing: Optional[bool] = None):
47513
+ """
47514
+ :param bool query_frontend: Allow clients to connect to query_frontend with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
47515
+ :param bool receiver_routing: Allow clients to connect to receiver_routing with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
47516
+ """
47517
+ if query_frontend is not None:
47518
+ pulumi.set(__self__, "query_frontend", query_frontend)
47519
+ if receiver_routing is not None:
47520
+ pulumi.set(__self__, "receiver_routing", receiver_routing)
47521
+
47522
+ @property
47523
+ @pulumi.getter(name="queryFrontend")
47524
+ def query_frontend(self) -> Optional[bool]:
47525
+ """
47526
+ Allow clients to connect to query_frontend with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
47527
+ """
47528
+ return pulumi.get(self, "query_frontend")
47529
+
47530
+ @property
47531
+ @pulumi.getter(name="receiverRouting")
47532
+ def receiver_routing(self) -> Optional[bool]:
47533
+ """
47534
+ Allow clients to connect to receiver_routing with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
47535
+ """
47536
+ return pulumi.get(self, "receiver_routing")
47537
+
47538
+
47539
+ @pulumi.output_type
47540
+ class GetThanosThanosUserConfigPrivatelinkAccessResult(dict):
47541
+ def __init__(__self__, *,
47542
+ query_frontend: Optional[bool] = None,
47543
+ receiver_routing: Optional[bool] = None):
47544
+ """
47545
+ :param bool query_frontend: Enable query_frontend.
47546
+ :param bool receiver_routing: Enable receiver_routing.
47547
+ """
47548
+ if query_frontend is not None:
47549
+ pulumi.set(__self__, "query_frontend", query_frontend)
47550
+ if receiver_routing is not None:
47551
+ pulumi.set(__self__, "receiver_routing", receiver_routing)
47552
+
47553
+ @property
47554
+ @pulumi.getter(name="queryFrontend")
47555
+ def query_frontend(self) -> Optional[bool]:
47556
+ """
47557
+ Enable query_frontend.
47558
+ """
47559
+ return pulumi.get(self, "query_frontend")
47560
+
47561
+ @property
47562
+ @pulumi.getter(name="receiverRouting")
47563
+ def receiver_routing(self) -> Optional[bool]:
47564
+ """
47565
+ Enable receiver_routing.
47566
+ """
47567
+ return pulumi.get(self, "receiver_routing")
47568
+
47569
+
46279
47570
  @pulumi.output_type
46280
47571
  class GetThanosThanosUserConfigPublicAccessResult(dict):
46281
47572
  def __init__(__self__, *,
@@ -46736,7 +48027,7 @@ class GetValkeyValkeyUserConfigResult(dict):
46736
48027
  :param int valkey_io_threads: Set Valkey IO thread count. Changing this will cause a restart of the Valkey service. Example: `1`.
46737
48028
  :param int valkey_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
46738
48029
  :param int valkey_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
46739
- :param str valkey_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
48030
+ :param str valkey_maxmemory_policy: Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.
46740
48031
  :param str valkey_notify_keyspace_events: Set notify-keyspace-events option.
46741
48032
  :param int valkey_number_of_databases: Set number of Valkey databases. Changing this will cause a restart of the Valkey service. Example: `16`.
46742
48033
  :param str valkey_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Valkey does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is `off`, no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
@@ -46954,7 +48245,7 @@ class GetValkeyValkeyUserConfigResult(dict):
46954
48245
  @pulumi.getter(name="valkeyMaxmemoryPolicy")
46955
48246
  def valkey_maxmemory_policy(self) -> Optional[str]:
46956
48247
  """
46957
- Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
48248
+ Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.
46958
48249
  """
46959
48250
  return pulumi.get(self, "valkey_maxmemory_policy")
46960
48251