pulumi-aiven 6.40.0a1750223346__py3-none-any.whl → 6.41.0a1752729091__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

Files changed (47) hide show
  1. pulumi_aiven/_inputs.py +322 -28
  2. pulumi_aiven/alloydbomni.py +8 -8
  3. pulumi_aiven/cassandra.py +8 -8
  4. pulumi_aiven/clickhouse.py +8 -8
  5. pulumi_aiven/clickhouse_database.py +0 -10
  6. pulumi_aiven/clickhouse_grant.py +0 -16
  7. pulumi_aiven/dragonfly.py +8 -8
  8. pulumi_aiven/flink.py +8 -8
  9. pulumi_aiven/get_alloydbomni.py +1 -1
  10. pulumi_aiven/get_cassanda.py +1 -1
  11. pulumi_aiven/get_cassandra.py +1 -1
  12. pulumi_aiven/get_clickhouse.py +1 -1
  13. pulumi_aiven/get_dragonfly.py +1 -1
  14. pulumi_aiven/get_flink.py +1 -1
  15. pulumi_aiven/get_grafana.py +1 -1
  16. pulumi_aiven/get_kafka.py +1 -1
  17. pulumi_aiven/get_kafka_connect.py +1 -1
  18. pulumi_aiven/get_kafka_mirror_maker.py +1 -1
  19. pulumi_aiven/get_m3_aggregator.py +1 -1
  20. pulumi_aiven/get_m3_db.py +1 -1
  21. pulumi_aiven/get_my_sql.py +1 -1
  22. pulumi_aiven/get_open_search.py +1 -1
  23. pulumi_aiven/get_organization_billing_group.py +15 -15
  24. pulumi_aiven/get_pg.py +1 -1
  25. pulumi_aiven/get_redis.py +1 -1
  26. pulumi_aiven/get_thanos.py +1 -1
  27. pulumi_aiven/get_valkey.py +1 -1
  28. pulumi_aiven/grafana.py +8 -8
  29. pulumi_aiven/influx_db.py +36 -29
  30. pulumi_aiven/kafka.py +8 -8
  31. pulumi_aiven/kafka_connect.py +8 -8
  32. pulumi_aiven/kafka_mirror_maker.py +8 -8
  33. pulumi_aiven/m3_aggregator.py +8 -8
  34. pulumi_aiven/m3_db.py +8 -8
  35. pulumi_aiven/my_sql.py +8 -8
  36. pulumi_aiven/open_search.py +8 -8
  37. pulumi_aiven/organization_billing_group.py +41 -41
  38. pulumi_aiven/outputs.py +433 -23
  39. pulumi_aiven/pg.py +8 -8
  40. pulumi_aiven/pulumi-plugin.json +1 -1
  41. pulumi_aiven/redis.py +8 -8
  42. pulumi_aiven/thanos.py +8 -8
  43. pulumi_aiven/valkey.py +8 -8
  44. {pulumi_aiven-6.40.0a1750223346.dist-info → pulumi_aiven-6.41.0a1752729091.dist-info}/METADATA +1 -1
  45. {pulumi_aiven-6.40.0a1750223346.dist-info → pulumi_aiven-6.41.0a1752729091.dist-info}/RECORD +47 -47
  46. {pulumi_aiven-6.40.0a1750223346.dist-info → pulumi_aiven-6.41.0a1752729091.dist-info}/WHEEL +0 -0
  47. {pulumi_aiven-6.40.0a1750223346.dist-info → pulumi_aiven-6.41.0a1752729091.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py CHANGED
@@ -214,10 +214,12 @@ __all__ = [
214
214
  'OpenSearchOpensearchUserConfigOpensearchAuthFailureListeners',
215
215
  'OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthenticationBackendLimiting',
216
216
  'OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting',
217
+ 'OpenSearchOpensearchUserConfigOpensearchClusterRemoteStore',
217
218
  'OpenSearchOpensearchUserConfigOpensearchClusterSearchRequestSlowlog',
218
219
  'OpenSearchOpensearchUserConfigOpensearchClusterSearchRequestSlowlogThreshold',
219
220
  'OpenSearchOpensearchUserConfigOpensearchDashboards',
220
221
  'OpenSearchOpensearchUserConfigOpensearchDiskWatermarks',
222
+ 'OpenSearchOpensearchUserConfigOpensearchRemoteStore',
221
223
  'OpenSearchOpensearchUserConfigOpensearchSearchBackpressure',
222
224
  'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuress',
223
225
  'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTask',
@@ -550,10 +552,12 @@ __all__ = [
550
552
  'GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersResult',
551
553
  'GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthenticationBackendLimitingResult',
552
554
  'GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimitingResult',
555
+ 'GetOpenSearchOpensearchUserConfigOpensearchClusterRemoteStoreResult',
553
556
  'GetOpenSearchOpensearchUserConfigOpensearchClusterSearchRequestSlowlogResult',
554
557
  'GetOpenSearchOpensearchUserConfigOpensearchClusterSearchRequestSlowlogThresholdResult',
555
558
  'GetOpenSearchOpensearchUserConfigOpensearchDashboardsResult',
556
559
  'GetOpenSearchOpensearchUserConfigOpensearchDiskWatermarksResult',
560
+ 'GetOpenSearchOpensearchUserConfigOpensearchRemoteStoreResult',
557
561
  'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult',
558
562
  'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuressResult',
559
563
  'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskResult',
@@ -3696,6 +3700,8 @@ class ClickhouseClickhouseUserConfig(dict):
3696
3700
  suggest = "backup_hour"
3697
3701
  elif key == "backupMinute":
3698
3702
  suggest = "backup_minute"
3703
+ elif key == "enableIpv6":
3704
+ suggest = "enable_ipv6"
3699
3705
  elif key == "ipFilterObjects":
3700
3706
  suggest = "ip_filter_objects"
3701
3707
  elif key == "ipFilterStrings":
@@ -3734,6 +3740,7 @@ class ClickhouseClickhouseUserConfig(dict):
3734
3740
  additional_backup_regions: Optional[builtins.str] = None,
3735
3741
  backup_hour: Optional[builtins.int] = None,
3736
3742
  backup_minute: Optional[builtins.int] = None,
3743
+ enable_ipv6: Optional[builtins.bool] = None,
3737
3744
  ip_filter_objects: Optional[Sequence['outputs.ClickhouseClickhouseUserConfigIpFilterObject']] = None,
3738
3745
  ip_filter_strings: Optional[Sequence[builtins.str]] = None,
3739
3746
  ip_filters: Optional[Sequence[builtins.str]] = None,
@@ -3749,6 +3756,7 @@ class ClickhouseClickhouseUserConfig(dict):
3749
3756
  :param builtins.str additional_backup_regions: Additional Cloud Regions for Backup Replication.
3750
3757
  :param builtins.int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. Example: `3`.
3751
3758
  :param builtins.int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. Example: `30`.
3759
+ :param builtins.bool enable_ipv6: Register AAAA DNS records for the service, and allow IPv6 packets to service ports.
3752
3760
  :param Sequence['ClickhouseClickhouseUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
3753
3761
  :param Sequence[builtins.str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
3754
3762
  :param Sequence[builtins.str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -3767,6 +3775,8 @@ class ClickhouseClickhouseUserConfig(dict):
3767
3775
  pulumi.set(__self__, "backup_hour", backup_hour)
3768
3776
  if backup_minute is not None:
3769
3777
  pulumi.set(__self__, "backup_minute", backup_minute)
3778
+ if enable_ipv6 is not None:
3779
+ pulumi.set(__self__, "enable_ipv6", enable_ipv6)
3770
3780
  if ip_filter_objects is not None:
3771
3781
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
3772
3782
  if ip_filter_strings is not None:
@@ -3815,6 +3825,14 @@ class ClickhouseClickhouseUserConfig(dict):
3815
3825
  """
3816
3826
  return pulumi.get(self, "backup_minute")
3817
3827
 
3828
+ @property
3829
+ @pulumi.getter(name="enableIpv6")
3830
+ def enable_ipv6(self) -> Optional[builtins.bool]:
3831
+ """
3832
+ Register AAAA DNS records for the service, and allow IPv6 packets to service ports.
3833
+ """
3834
+ return pulumi.get(self, "enable_ipv6")
3835
+
3818
3836
  @property
3819
3837
  @pulumi.getter(name="ipFilterObjects")
3820
3838
  def ip_filter_objects(self) -> Optional[Sequence['outputs.ClickhouseClickhouseUserConfigIpFilterObject']]:
@@ -6992,7 +7010,7 @@ class GrafanaGrafanaUserConfig(dict):
6992
7010
  :param Sequence[builtins.str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
6993
7011
  :param Sequence[builtins.str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
6994
7012
  :param builtins.bool metrics_enabled: Enable Grafana's /metrics endpoint.
6995
- :param builtins.bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP.
7013
+ :param builtins.bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP. This setup introduces significant security risks, such as potential phishing, spoofing, and other data breaches.
6996
7014
  :param 'GrafanaGrafanaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
6997
7015
  :param 'GrafanaGrafanaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
6998
7016
  :param builtins.str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created. Example: `anotherprojectname`.
@@ -7334,7 +7352,7 @@ class GrafanaGrafanaUserConfig(dict):
7334
7352
  @pulumi.getter(name="oauthAllowInsecureEmailLookup")
7335
7353
  def oauth_allow_insecure_email_lookup(self) -> Optional[builtins.bool]:
7336
7354
  """
7337
- Enforce user lookup based on email instead of the unique ID provided by the IdP.
7355
+ Enforce user lookup based on email instead of the unique ID provided by the IdP. This setup introduces significant security risks, such as potential phishing, spoofing, and other data breaches.
7338
7356
  """
7339
7357
  return pulumi.get(self, "oauth_allow_insecure_email_lookup")
7340
7358
 
@@ -13811,6 +13829,8 @@ class KafkaTopicConfig(dict):
13811
13829
  suggest = "flush_ms"
13812
13830
  elif key == "indexIntervalBytes":
13813
13831
  suggest = "index_interval_bytes"
13832
+ elif key == "inklessEnable":
13833
+ suggest = "inkless_enable"
13814
13834
  elif key == "localRetentionBytes":
13815
13835
  suggest = "local_retention_bytes"
13816
13836
  elif key == "localRetentionMs":
@@ -13869,6 +13889,7 @@ class KafkaTopicConfig(dict):
13869
13889
  flush_messages: Optional[builtins.str] = None,
13870
13890
  flush_ms: Optional[builtins.str] = None,
13871
13891
  index_interval_bytes: Optional[builtins.str] = None,
13892
+ inkless_enable: Optional[builtins.bool] = None,
13872
13893
  local_retention_bytes: Optional[builtins.str] = None,
13873
13894
  local_retention_ms: Optional[builtins.str] = None,
13874
13895
  max_compaction_lag_ms: Optional[builtins.str] = None,
@@ -13897,6 +13918,7 @@ class KafkaTopicConfig(dict):
13897
13918
  :param builtins.str flush_messages: This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
13898
13919
  :param builtins.str flush_ms: This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
13899
13920
  :param builtins.str index_interval_bytes: This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
13921
+ :param builtins.bool inkless_enable: Indicates whether inkless should be enabled. This is only available for BYOC services with Inkless feature enabled.
13900
13922
  :param builtins.str local_retention_bytes: This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
13901
13923
  :param builtins.str local_retention_ms: This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
13902
13924
  :param builtins.str max_compaction_lag_ms: The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
@@ -13932,6 +13954,8 @@ class KafkaTopicConfig(dict):
13932
13954
  pulumi.set(__self__, "flush_ms", flush_ms)
13933
13955
  if index_interval_bytes is not None:
13934
13956
  pulumi.set(__self__, "index_interval_bytes", index_interval_bytes)
13957
+ if inkless_enable is not None:
13958
+ pulumi.set(__self__, "inkless_enable", inkless_enable)
13935
13959
  if local_retention_bytes is not None:
13936
13960
  pulumi.set(__self__, "local_retention_bytes", local_retention_bytes)
13937
13961
  if local_retention_ms is not None:
@@ -14029,6 +14053,14 @@ class KafkaTopicConfig(dict):
14029
14053
  """
14030
14054
  return pulumi.get(self, "index_interval_bytes")
14031
14055
 
14056
+ @property
14057
+ @pulumi.getter(name="inklessEnable")
14058
+ def inkless_enable(self) -> Optional[builtins.bool]:
14059
+ """
14060
+ Indicates whether inkless should be enabled. This is only available for BYOC services with Inkless feature enabled.
14061
+ """
14062
+ return pulumi.get(self, "inkless_enable")
14063
+
14032
14064
  @property
14033
14065
  @pulumi.getter(name="localRetentionBytes")
14034
14066
  def local_retention_bytes(self) -> Optional[builtins.str]:
@@ -18784,8 +18816,12 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18784
18816
  suggest = "action_destructive_requires_name"
18785
18817
  elif key == "authFailureListeners":
18786
18818
  suggest = "auth_failure_listeners"
18819
+ elif key == "clusterFilecacheRemoteDataRatio":
18820
+ suggest = "cluster_filecache_remote_data_ratio"
18787
18821
  elif key == "clusterMaxShardsPerNode":
18788
18822
  suggest = "cluster_max_shards_per_node"
18823
+ elif key == "clusterRemoteStore":
18824
+ suggest = "cluster_remote_store"
18789
18825
  elif key == "clusterRoutingAllocationBalancePreferPrimary":
18790
18826
  suggest = "cluster_routing_allocation_balance_prefer_primary"
18791
18827
  elif key == "clusterRoutingAllocationNodeConcurrentRecoveries":
@@ -18846,12 +18882,16 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18846
18882
  suggest = "knn_memory_circuit_breaker_enabled"
18847
18883
  elif key == "knnMemoryCircuitBreakerLimit":
18848
18884
  suggest = "knn_memory_circuit_breaker_limit"
18885
+ elif key == "nodeSearchCacheSize":
18886
+ suggest = "node_search_cache_size"
18849
18887
  elif key == "overrideMainResponseVersion":
18850
18888
  suggest = "override_main_response_version"
18851
18889
  elif key == "pluginsAlertingFilterByBackendRoles":
18852
18890
  suggest = "plugins_alerting_filter_by_backend_roles"
18853
18891
  elif key == "reindexRemoteWhitelists":
18854
18892
  suggest = "reindex_remote_whitelists"
18893
+ elif key == "remoteStore":
18894
+ suggest = "remote_store"
18855
18895
  elif key == "scriptMaxCompilationsRate":
18856
18896
  suggest = "script_max_compilations_rate"
18857
18897
  elif key == "searchBackpressure":
@@ -18900,7 +18940,9 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18900
18940
  action_auto_create_index_enabled: Optional[builtins.bool] = None,
18901
18941
  action_destructive_requires_name: Optional[builtins.bool] = None,
18902
18942
  auth_failure_listeners: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchAuthFailureListeners'] = None,
18943
+ cluster_filecache_remote_data_ratio: Optional[builtins.float] = None,
18903
18944
  cluster_max_shards_per_node: Optional[builtins.int] = None,
18945
+ cluster_remote_store: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchClusterRemoteStore'] = None,
18904
18946
  cluster_routing_allocation_balance_prefer_primary: Optional[builtins.bool] = None,
18905
18947
  cluster_routing_allocation_node_concurrent_recoveries: Optional[builtins.int] = None,
18906
18948
  cluster_search_request_slowlog: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchClusterSearchRequestSlowlog'] = None,
@@ -18931,9 +18973,11 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18931
18973
  ism_history_rollover_retention_period: Optional[builtins.int] = None,
18932
18974
  knn_memory_circuit_breaker_enabled: Optional[builtins.bool] = None,
18933
18975
  knn_memory_circuit_breaker_limit: Optional[builtins.int] = None,
18976
+ node_search_cache_size: Optional[builtins.str] = None,
18934
18977
  override_main_response_version: Optional[builtins.bool] = None,
18935
18978
  plugins_alerting_filter_by_backend_roles: Optional[builtins.bool] = None,
18936
18979
  reindex_remote_whitelists: Optional[Sequence[builtins.str]] = None,
18980
+ remote_store: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchRemoteStore'] = None,
18937
18981
  script_max_compilations_rate: Optional[builtins.str] = None,
18938
18982
  search_backpressure: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchBackpressure'] = None,
18939
18983
  search_insights_top_queries: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueries'] = None,
@@ -18955,8 +18999,9 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18955
18999
  :param builtins.bool action_auto_create_index_enabled: Explicitly allow or block automatic creation of indices. Defaults to true.
18956
19000
  :param builtins.bool action_destructive_requires_name: Require explicit index names when deleting.
18957
19001
  :param 'OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersArgs' auth_failure_listeners: Opensearch Security Plugin Settings
19002
+ :param builtins.float cluster_filecache_remote_data_ratio: Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 0.
18958
19003
  :param builtins.int cluster_max_shards_per_node: Controls the number of shards allowed in the cluster per data node. Example: `1000`.
18959
- :param builtins.bool cluster_routing_allocation_balance_prefer_primary: When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false. Default: `false`.
19004
+ :param builtins.bool cluster_routing_allocation_balance_prefer_primary: When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false.
18960
19005
  :param builtins.int cluster_routing_allocation_node_concurrent_recoveries: How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to node cpu count * 2.
18961
19006
  :param 'OpenSearchOpensearchUserConfigOpensearchDiskWatermarksArgs' disk_watermarks: Watermark settings
18962
19007
  :param builtins.str email_sender_name: Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore. Example: `alert-sender`.
@@ -18985,6 +19030,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18985
19030
  :param builtins.int ism_history_rollover_retention_period: How long audit history indices are kept in days. Example: `30`.
18986
19031
  :param builtins.bool knn_memory_circuit_breaker_enabled: Enable or disable KNN memory circuit breaker. Defaults to true.
18987
19032
  :param builtins.int knn_memory_circuit_breaker_limit: Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
19033
+ :param builtins.str node_search_cache_size: Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 5gb. Requires restarting all OpenSearch nodes.
18988
19034
  :param builtins.bool override_main_response_version: Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.
18989
19035
  :param builtins.bool plugins_alerting_filter_by_backend_roles: Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false.
18990
19036
  :param Sequence[builtins.str] reindex_remote_whitelists: Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.
@@ -19011,8 +19057,12 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
19011
19057
  pulumi.set(__self__, "action_destructive_requires_name", action_destructive_requires_name)
19012
19058
  if auth_failure_listeners is not None:
19013
19059
  pulumi.set(__self__, "auth_failure_listeners", auth_failure_listeners)
19060
+ if cluster_filecache_remote_data_ratio is not None:
19061
+ pulumi.set(__self__, "cluster_filecache_remote_data_ratio", cluster_filecache_remote_data_ratio)
19014
19062
  if cluster_max_shards_per_node is not None:
19015
19063
  pulumi.set(__self__, "cluster_max_shards_per_node", cluster_max_shards_per_node)
19064
+ if cluster_remote_store is not None:
19065
+ pulumi.set(__self__, "cluster_remote_store", cluster_remote_store)
19016
19066
  if cluster_routing_allocation_balance_prefer_primary is not None:
19017
19067
  pulumi.set(__self__, "cluster_routing_allocation_balance_prefer_primary", cluster_routing_allocation_balance_prefer_primary)
19018
19068
  if cluster_routing_allocation_node_concurrent_recoveries is not None:
@@ -19073,12 +19123,16 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
19073
19123
  pulumi.set(__self__, "knn_memory_circuit_breaker_enabled", knn_memory_circuit_breaker_enabled)
19074
19124
  if knn_memory_circuit_breaker_limit is not None:
19075
19125
  pulumi.set(__self__, "knn_memory_circuit_breaker_limit", knn_memory_circuit_breaker_limit)
19126
+ if node_search_cache_size is not None:
19127
+ pulumi.set(__self__, "node_search_cache_size", node_search_cache_size)
19076
19128
  if override_main_response_version is not None:
19077
19129
  pulumi.set(__self__, "override_main_response_version", override_main_response_version)
19078
19130
  if plugins_alerting_filter_by_backend_roles is not None:
19079
19131
  pulumi.set(__self__, "plugins_alerting_filter_by_backend_roles", plugins_alerting_filter_by_backend_roles)
19080
19132
  if reindex_remote_whitelists is not None:
19081
19133
  pulumi.set(__self__, "reindex_remote_whitelists", reindex_remote_whitelists)
19134
+ if remote_store is not None:
19135
+ pulumi.set(__self__, "remote_store", remote_store)
19082
19136
  if script_max_compilations_rate is not None:
19083
19137
  pulumi.set(__self__, "script_max_compilations_rate", script_max_compilations_rate)
19084
19138
  if search_backpressure is not None:
@@ -19138,6 +19192,14 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
19138
19192
  """
19139
19193
  return pulumi.get(self, "auth_failure_listeners")
19140
19194
 
19195
+ @property
19196
+ @pulumi.getter(name="clusterFilecacheRemoteDataRatio")
19197
+ def cluster_filecache_remote_data_ratio(self) -> Optional[builtins.float]:
19198
+ """
19199
+ Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 0.
19200
+ """
19201
+ return pulumi.get(self, "cluster_filecache_remote_data_ratio")
19202
+
19141
19203
  @property
19142
19204
  @pulumi.getter(name="clusterMaxShardsPerNode")
19143
19205
  def cluster_max_shards_per_node(self) -> Optional[builtins.int]:
@@ -19146,11 +19208,16 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
19146
19208
  """
19147
19209
  return pulumi.get(self, "cluster_max_shards_per_node")
19148
19210
 
19211
+ @property
19212
+ @pulumi.getter(name="clusterRemoteStore")
19213
+ def cluster_remote_store(self) -> Optional['outputs.OpenSearchOpensearchUserConfigOpensearchClusterRemoteStore']:
19214
+ return pulumi.get(self, "cluster_remote_store")
19215
+
19149
19216
  @property
19150
19217
  @pulumi.getter(name="clusterRoutingAllocationBalancePreferPrimary")
19151
19218
  def cluster_routing_allocation_balance_prefer_primary(self) -> Optional[builtins.bool]:
19152
19219
  """
19153
- When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false. Default: `false`.
19220
+ When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false.
19154
19221
  """
19155
19222
  return pulumi.get(self, "cluster_routing_allocation_balance_prefer_primary")
19156
19223
 
@@ -19383,6 +19450,14 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
19383
19450
  """
19384
19451
  return pulumi.get(self, "knn_memory_circuit_breaker_limit")
19385
19452
 
19453
+ @property
19454
+ @pulumi.getter(name="nodeSearchCacheSize")
19455
+ def node_search_cache_size(self) -> Optional[builtins.str]:
19456
+ """
19457
+ Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 5gb. Requires restarting all OpenSearch nodes.
19458
+ """
19459
+ return pulumi.get(self, "node_search_cache_size")
19460
+
19386
19461
  @property
19387
19462
  @pulumi.getter(name="overrideMainResponseVersion")
19388
19463
  def override_main_response_version(self) -> Optional[builtins.bool]:
@@ -19407,6 +19482,11 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
19407
19482
  """
19408
19483
  return pulumi.get(self, "reindex_remote_whitelists")
19409
19484
 
19485
+ @property
19486
+ @pulumi.getter(name="remoteStore")
19487
+ def remote_store(self) -> Optional['outputs.OpenSearchOpensearchUserConfigOpensearchRemoteStore']:
19488
+ return pulumi.get(self, "remote_store")
19489
+
19410
19490
  @property
19411
19491
  @pulumi.getter(name="scriptMaxCompilationsRate")
19412
19492
  def script_max_compilations_rate(self) -> Optional[builtins.str]:
@@ -19810,6 +19890,84 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
19810
19890
  return pulumi.get(self, "type")
19811
19891
 
19812
19892
 
19893
+ @pulumi.output_type
19894
+ class OpenSearchOpensearchUserConfigOpensearchClusterRemoteStore(dict):
19895
+ @staticmethod
19896
+ def __key_warning(key: str):
19897
+ suggest = None
19898
+ if key == "stateGlobalMetadataUploadTimeout":
19899
+ suggest = "state_global_metadata_upload_timeout"
19900
+ elif key == "stateMetadataManifestUploadTimeout":
19901
+ suggest = "state_metadata_manifest_upload_timeout"
19902
+ elif key == "translogBufferInterval":
19903
+ suggest = "translog_buffer_interval"
19904
+ elif key == "translogMaxReaders":
19905
+ suggest = "translog_max_readers"
19906
+
19907
+ if suggest:
19908
+ pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigOpensearchClusterRemoteStore. Access the value via the '{suggest}' property getter instead.")
19909
+
19910
+ def __getitem__(self, key: str) -> Any:
19911
+ OpenSearchOpensearchUserConfigOpensearchClusterRemoteStore.__key_warning(key)
19912
+ return super().__getitem__(key)
19913
+
19914
+ def get(self, key: str, default = None) -> Any:
19915
+ OpenSearchOpensearchUserConfigOpensearchClusterRemoteStore.__key_warning(key)
19916
+ return super().get(key, default)
19917
+
19918
+ def __init__(__self__, *,
19919
+ state_global_metadata_upload_timeout: Optional[builtins.str] = None,
19920
+ state_metadata_manifest_upload_timeout: Optional[builtins.str] = None,
19921
+ translog_buffer_interval: Optional[builtins.str] = None,
19922
+ translog_max_readers: Optional[builtins.int] = None):
19923
+ """
19924
+ :param builtins.str state_global_metadata_upload_timeout: The amount of time to wait for the cluster state upload to complete. Defaults to 20s.
19925
+ :param builtins.str state_metadata_manifest_upload_timeout: The amount of time to wait for the manifest file upload to complete. The manifest file contains the details of each of the files uploaded for a single cluster state, both index metadata files and global metadata files. Defaults to 20s.
19926
+ :param builtins.str translog_buffer_interval: The default value of the translog buffer interval used when performing periodic translog updates. This setting is only effective when the index setting `index.remote_store.translog.buffer_interval` is not present. Defaults to 650ms.
19927
+ :param builtins.int translog_max_readers: Sets the maximum number of open translog files for remote-backed indexes. This limits the total number of translog files per shard. After reaching this limit, the remote store flushes the translog files. Default is 1000. The minimum required is 100. Example: `1000`.
19928
+ """
19929
+ if state_global_metadata_upload_timeout is not None:
19930
+ pulumi.set(__self__, "state_global_metadata_upload_timeout", state_global_metadata_upload_timeout)
19931
+ if state_metadata_manifest_upload_timeout is not None:
19932
+ pulumi.set(__self__, "state_metadata_manifest_upload_timeout", state_metadata_manifest_upload_timeout)
19933
+ if translog_buffer_interval is not None:
19934
+ pulumi.set(__self__, "translog_buffer_interval", translog_buffer_interval)
19935
+ if translog_max_readers is not None:
19936
+ pulumi.set(__self__, "translog_max_readers", translog_max_readers)
19937
+
19938
+ @property
19939
+ @pulumi.getter(name="stateGlobalMetadataUploadTimeout")
19940
+ def state_global_metadata_upload_timeout(self) -> Optional[builtins.str]:
19941
+ """
19942
+ The amount of time to wait for the cluster state upload to complete. Defaults to 20s.
19943
+ """
19944
+ return pulumi.get(self, "state_global_metadata_upload_timeout")
19945
+
19946
+ @property
19947
+ @pulumi.getter(name="stateMetadataManifestUploadTimeout")
19948
+ def state_metadata_manifest_upload_timeout(self) -> Optional[builtins.str]:
19949
+ """
19950
+ The amount of time to wait for the manifest file upload to complete. The manifest file contains the details of each of the files uploaded for a single cluster state, both index metadata files and global metadata files. Defaults to 20s.
19951
+ """
19952
+ return pulumi.get(self, "state_metadata_manifest_upload_timeout")
19953
+
19954
+ @property
19955
+ @pulumi.getter(name="translogBufferInterval")
19956
+ def translog_buffer_interval(self) -> Optional[builtins.str]:
19957
+ """
19958
+ The default value of the translog buffer interval used when performing periodic translog updates. This setting is only effective when the index setting `index.remote_store.translog.buffer_interval` is not present. Defaults to 650ms.
19959
+ """
19960
+ return pulumi.get(self, "translog_buffer_interval")
19961
+
19962
+ @property
19963
+ @pulumi.getter(name="translogMaxReaders")
19964
+ def translog_max_readers(self) -> Optional[builtins.int]:
19965
+ """
19966
+ Sets the maximum number of open translog files for remote-backed indexes. This limits the total number of translog files per shard. After reaching this limit, the remote store flushes the translog files. Default is 1000. The minimum required is 100. Example: `1000`.
19967
+ """
19968
+ return pulumi.get(self, "translog_max_readers")
19969
+
19970
+
19813
19971
  @pulumi.output_type
19814
19972
  class OpenSearchOpensearchUserConfigOpensearchClusterSearchRequestSlowlog(dict):
19815
19973
  def __init__(__self__, *,
@@ -20025,6 +20183,84 @@ class OpenSearchOpensearchUserConfigOpensearchDiskWatermarks(dict):
20025
20183
  return pulumi.get(self, "low")
20026
20184
 
20027
20185
 
20186
+ @pulumi.output_type
20187
+ class OpenSearchOpensearchUserConfigOpensearchRemoteStore(dict):
20188
+ @staticmethod
20189
+ def __key_warning(key: str):
20190
+ suggest = None
20191
+ if key == "segmentPressureBytesLagVarianceFactor":
20192
+ suggest = "segment_pressure_bytes_lag_variance_factor"
20193
+ elif key == "segmentPressureConsecutiveFailuresLimit":
20194
+ suggest = "segment_pressure_consecutive_failures_limit"
20195
+ elif key == "segmentPressureEnabled":
20196
+ suggest = "segment_pressure_enabled"
20197
+ elif key == "segmentPressureTimeLagVarianceFactor":
20198
+ suggest = "segment_pressure_time_lag_variance_factor"
20199
+
20200
+ if suggest:
20201
+ pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigOpensearchRemoteStore. Access the value via the '{suggest}' property getter instead.")
20202
+
20203
+ def __getitem__(self, key: str) -> Any:
20204
+ OpenSearchOpensearchUserConfigOpensearchRemoteStore.__key_warning(key)
20205
+ return super().__getitem__(key)
20206
+
20207
+ def get(self, key: str, default = None) -> Any:
20208
+ OpenSearchOpensearchUserConfigOpensearchRemoteStore.__key_warning(key)
20209
+ return super().get(key, default)
20210
+
20211
+ def __init__(__self__, *,
20212
+ segment_pressure_bytes_lag_variance_factor: Optional[builtins.float] = None,
20213
+ segment_pressure_consecutive_failures_limit: Optional[builtins.int] = None,
20214
+ segment_pressure_enabled: Optional[builtins.bool] = None,
20215
+ segment_pressure_time_lag_variance_factor: Optional[builtins.float] = None):
20216
+ """
20217
+ :param builtins.float segment_pressure_bytes_lag_variance_factor: The variance factor that is used together with the moving average to calculate the dynamic bytes lag threshold for activating remote segment backpressure. Defaults to 10.
20218
+ :param builtins.int segment_pressure_consecutive_failures_limit: The minimum consecutive failure count for activating remote segment backpressure. Defaults to 5.
20219
+ :param builtins.bool segment_pressure_enabled: Enables remote segment backpressure. Default is `true`.
20220
+ :param builtins.float segment_pressure_time_lag_variance_factor: The variance factor that is used together with the moving average to calculate the dynamic time lag threshold for activating remote segment backpressure. Defaults to 10.
20221
+ """
20222
+ if segment_pressure_bytes_lag_variance_factor is not None:
20223
+ pulumi.set(__self__, "segment_pressure_bytes_lag_variance_factor", segment_pressure_bytes_lag_variance_factor)
20224
+ if segment_pressure_consecutive_failures_limit is not None:
20225
+ pulumi.set(__self__, "segment_pressure_consecutive_failures_limit", segment_pressure_consecutive_failures_limit)
20226
+ if segment_pressure_enabled is not None:
20227
+ pulumi.set(__self__, "segment_pressure_enabled", segment_pressure_enabled)
20228
+ if segment_pressure_time_lag_variance_factor is not None:
20229
+ pulumi.set(__self__, "segment_pressure_time_lag_variance_factor", segment_pressure_time_lag_variance_factor)
20230
+
20231
+ @property
20232
+ @pulumi.getter(name="segmentPressureBytesLagVarianceFactor")
20233
+ def segment_pressure_bytes_lag_variance_factor(self) -> Optional[builtins.float]:
20234
+ """
20235
+ The variance factor that is used together with the moving average to calculate the dynamic bytes lag threshold for activating remote segment backpressure. Defaults to 10.
20236
+ """
20237
+ return pulumi.get(self, "segment_pressure_bytes_lag_variance_factor")
20238
+
20239
+ @property
20240
+ @pulumi.getter(name="segmentPressureConsecutiveFailuresLimit")
20241
+ def segment_pressure_consecutive_failures_limit(self) -> Optional[builtins.int]:
20242
+ """
20243
+ The minimum consecutive failure count for activating remote segment backpressure. Defaults to 5.
20244
+ """
20245
+ return pulumi.get(self, "segment_pressure_consecutive_failures_limit")
20246
+
20247
+ @property
20248
+ @pulumi.getter(name="segmentPressureEnabled")
20249
+ def segment_pressure_enabled(self) -> Optional[builtins.bool]:
20250
+ """
20251
+ Enables remote segment backpressure. Default is `true`.
20252
+ """
20253
+ return pulumi.get(self, "segment_pressure_enabled")
20254
+
20255
+ @property
20256
+ @pulumi.getter(name="segmentPressureTimeLagVarianceFactor")
20257
+ def segment_pressure_time_lag_variance_factor(self) -> Optional[builtins.float]:
20258
+ """
20259
+ The variance factor that is used together with the moving average to calculate the dynamic time lag threshold for activating remote segment backpressure. Defaults to 10.
20260
+ """
20261
+ return pulumi.get(self, "segment_pressure_time_lag_variance_factor")
20262
+
20263
+
20028
20264
  @pulumi.output_type
20029
20265
  class OpenSearchOpensearchUserConfigOpensearchSearchBackpressure(dict):
20030
20266
  @staticmethod
@@ -24481,7 +24717,7 @@ class PgPgUserConfigTimescaledb(dict):
24481
24717
  def __init__(__self__, *,
24482
24718
  max_background_workers: Optional[builtins.int] = None):
24483
24719
  """
24484
- :param builtins.int max_background_workers: The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. Default: `16`.
24720
+ :param builtins.int max_background_workers: The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. Changing this parameter causes a service restart. Default: `16`.
24485
24721
  """
24486
24722
  if max_background_workers is not None:
24487
24723
  pulumi.set(__self__, "max_background_workers", max_background_workers)
@@ -24490,7 +24726,7 @@ class PgPgUserConfigTimescaledb(dict):
24490
24726
  @pulumi.getter(name="maxBackgroundWorkers")
24491
24727
  def max_background_workers(self) -> Optional[builtins.int]:
24492
24728
  """
24493
- The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. Default: `16`.
24729
+ The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. Changing this parameter causes a service restart. Default: `16`.
24494
24730
  """
24495
24731
  return pulumi.get(self, "max_background_workers")
24496
24732
 
@@ -34114,6 +34350,7 @@ class GetClickhouseClickhouseUserConfigResult(dict):
34114
34350
  additional_backup_regions: Optional[builtins.str] = None,
34115
34351
  backup_hour: Optional[builtins.int] = None,
34116
34352
  backup_minute: Optional[builtins.int] = None,
34353
+ enable_ipv6: Optional[builtins.bool] = None,
34117
34354
  ip_filter_objects: Optional[Sequence['outputs.GetClickhouseClickhouseUserConfigIpFilterObjectResult']] = None,
34118
34355
  ip_filter_strings: Optional[Sequence[builtins.str]] = None,
34119
34356
  ip_filters: Optional[Sequence[builtins.str]] = None,
@@ -34129,6 +34366,7 @@ class GetClickhouseClickhouseUserConfigResult(dict):
34129
34366
  :param builtins.str additional_backup_regions: Additional Cloud Regions for Backup Replication.
34130
34367
  :param builtins.int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. Example: `3`.
34131
34368
  :param builtins.int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. Example: `30`.
34369
+ :param builtins.bool enable_ipv6: Register AAAA DNS records for the service, and allow IPv6 packets to service ports.
34132
34370
  :param Sequence['GetClickhouseClickhouseUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
34133
34371
  :param Sequence[builtins.str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
34134
34372
  :param Sequence[builtins.str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
@@ -34147,6 +34385,8 @@ class GetClickhouseClickhouseUserConfigResult(dict):
34147
34385
  pulumi.set(__self__, "backup_hour", backup_hour)
34148
34386
  if backup_minute is not None:
34149
34387
  pulumi.set(__self__, "backup_minute", backup_minute)
34388
+ if enable_ipv6 is not None:
34389
+ pulumi.set(__self__, "enable_ipv6", enable_ipv6)
34150
34390
  if ip_filter_objects is not None:
34151
34391
  pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
34152
34392
  if ip_filter_strings is not None:
@@ -34195,6 +34435,14 @@ class GetClickhouseClickhouseUserConfigResult(dict):
34195
34435
  """
34196
34436
  return pulumi.get(self, "backup_minute")
34197
34437
 
34438
+ @property
34439
+ @pulumi.getter(name="enableIpv6")
34440
+ def enable_ipv6(self) -> Optional[builtins.bool]:
34441
+ """
34442
+ Register AAAA DNS records for the service, and allow IPv6 packets to service ports.
34443
+ """
34444
+ return pulumi.get(self, "enable_ipv6")
34445
+
34198
34446
  @property
34199
34447
  @pulumi.getter(name="ipFilterObjects")
34200
34448
  def ip_filter_objects(self) -> Optional[Sequence['outputs.GetClickhouseClickhouseUserConfigIpFilterObjectResult']]:
@@ -36025,7 +36273,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
36025
36273
  :param Sequence[builtins.str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
36026
36274
  :param Sequence[builtins.str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
36027
36275
  :param builtins.bool metrics_enabled: Enable Grafana's /metrics endpoint.
36028
- :param builtins.bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP.
36276
+ :param builtins.bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP. This setup introduces significant security risks, such as potential phishing, spoofing, and other data breaches.
36029
36277
  :param 'GetGrafanaGrafanaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
36030
36278
  :param 'GetGrafanaGrafanaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
36031
36279
  :param builtins.str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created. Example: `anotherprojectname`.
@@ -36367,7 +36615,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
36367
36615
  @pulumi.getter(name="oauthAllowInsecureEmailLookup")
36368
36616
  def oauth_allow_insecure_email_lookup(self) -> Optional[builtins.bool]:
36369
36617
  """
36370
- Enforce user lookup based on email instead of the unique ID provided by the IdP.
36618
+ Enforce user lookup based on email instead of the unique ID provided by the IdP. This setup introduces significant security risks, such as potential phishing, spoofing, and other data breaches.
36371
36619
  """
36372
36620
  return pulumi.get(self, "oauth_allow_insecure_email_lookup")
36373
36621
 
@@ -41516,6 +41764,7 @@ class GetKafkaTopicConfigResult(dict):
41516
41764
  flush_messages: Optional[builtins.str] = None,
41517
41765
  flush_ms: Optional[builtins.str] = None,
41518
41766
  index_interval_bytes: Optional[builtins.str] = None,
41767
+ inkless_enable: Optional[builtins.bool] = None,
41519
41768
  local_retention_bytes: Optional[builtins.str] = None,
41520
41769
  local_retention_ms: Optional[builtins.str] = None,
41521
41770
  max_compaction_lag_ms: Optional[builtins.str] = None,
@@ -41544,6 +41793,7 @@ class GetKafkaTopicConfigResult(dict):
41544
41793
  :param builtins.str flush_messages: This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
41545
41794
  :param builtins.str flush_ms: This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
41546
41795
  :param builtins.str index_interval_bytes: This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
41796
+ :param builtins.bool inkless_enable: Indicates whether inkless should be enabled. This is only available for BYOC services with Inkless feature enabled.
41547
41797
  :param builtins.str local_retention_bytes: This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
41548
41798
  :param builtins.str local_retention_ms: This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
41549
41799
  :param builtins.str max_compaction_lag_ms: The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
@@ -41579,6 +41829,8 @@ class GetKafkaTopicConfigResult(dict):
41579
41829
  pulumi.set(__self__, "flush_ms", flush_ms)
41580
41830
  if index_interval_bytes is not None:
41581
41831
  pulumi.set(__self__, "index_interval_bytes", index_interval_bytes)
41832
+ if inkless_enable is not None:
41833
+ pulumi.set(__self__, "inkless_enable", inkless_enable)
41582
41834
  if local_retention_bytes is not None:
41583
41835
  pulumi.set(__self__, "local_retention_bytes", local_retention_bytes)
41584
41836
  if local_retention_ms is not None:
@@ -41676,6 +41928,14 @@ class GetKafkaTopicConfigResult(dict):
41676
41928
  """
41677
41929
  return pulumi.get(self, "index_interval_bytes")
41678
41930
 
41931
+ @property
41932
+ @pulumi.getter(name="inklessEnable")
41933
+ def inkless_enable(self) -> Optional[builtins.bool]:
41934
+ """
41935
+ Indicates whether inkless should be enabled. This is only available for BYOC services with Inkless feature enabled.
41936
+ """
41937
+ return pulumi.get(self, "inkless_enable")
41938
+
41679
41939
  @property
41680
41940
  @pulumi.getter(name="localRetentionBytes")
41681
41941
  def local_retention_bytes(self) -> Optional[builtins.str]:
@@ -45562,7 +45822,9 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
45562
45822
  action_auto_create_index_enabled: Optional[builtins.bool] = None,
45563
45823
  action_destructive_requires_name: Optional[builtins.bool] = None,
45564
45824
  auth_failure_listeners: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersResult'] = None,
45825
+ cluster_filecache_remote_data_ratio: Optional[builtins.float] = None,
45565
45826
  cluster_max_shards_per_node: Optional[builtins.int] = None,
45827
+ cluster_remote_store: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchClusterRemoteStoreResult'] = None,
45566
45828
  cluster_routing_allocation_balance_prefer_primary: Optional[builtins.bool] = None,
45567
45829
  cluster_routing_allocation_node_concurrent_recoveries: Optional[builtins.int] = None,
45568
45830
  cluster_search_request_slowlog: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchClusterSearchRequestSlowlogResult'] = None,
@@ -45593,9 +45855,11 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
45593
45855
  ism_history_rollover_retention_period: Optional[builtins.int] = None,
45594
45856
  knn_memory_circuit_breaker_enabled: Optional[builtins.bool] = None,
45595
45857
  knn_memory_circuit_breaker_limit: Optional[builtins.int] = None,
45858
+ node_search_cache_size: Optional[builtins.str] = None,
45596
45859
  override_main_response_version: Optional[builtins.bool] = None,
45597
45860
  plugins_alerting_filter_by_backend_roles: Optional[builtins.bool] = None,
45598
45861
  reindex_remote_whitelists: Optional[Sequence[builtins.str]] = None,
45862
+ remote_store: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchRemoteStoreResult'] = None,
45599
45863
  script_max_compilations_rate: Optional[builtins.str] = None,
45600
45864
  search_backpressure: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult'] = None,
45601
45865
  search_insights_top_queries: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchInsightsTopQueriesResult'] = None,
@@ -45617,8 +45881,9 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
45617
45881
  :param builtins.bool action_auto_create_index_enabled: Explicitly allow or block automatic creation of indices. Defaults to true.
45618
45882
  :param builtins.bool action_destructive_requires_name: Require explicit index names when deleting.
45619
45883
  :param 'GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersArgs' auth_failure_listeners: Opensearch Security Plugin Settings
45884
+ :param builtins.float cluster_filecache_remote_data_ratio: Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 0.
45620
45885
  :param builtins.int cluster_max_shards_per_node: Controls the number of shards allowed in the cluster per data node. Example: `1000`.
45621
- :param builtins.bool cluster_routing_allocation_balance_prefer_primary: When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false. Default: `false`.
45886
+ :param builtins.bool cluster_routing_allocation_balance_prefer_primary: When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false.
45622
45887
  :param builtins.int cluster_routing_allocation_node_concurrent_recoveries: How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to node cpu count * 2.
45623
45888
  :param 'GetOpenSearchOpensearchUserConfigOpensearchDiskWatermarksArgs' disk_watermarks: Watermark settings
45624
45889
  :param builtins.str email_sender_name: Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore. Example: `alert-sender`.
@@ -45647,6 +45912,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
45647
45912
  :param builtins.int ism_history_rollover_retention_period: How long audit history indices are kept in days. Example: `30`.
45648
45913
  :param builtins.bool knn_memory_circuit_breaker_enabled: Enable or disable KNN memory circuit breaker. Defaults to true.
45649
45914
  :param builtins.int knn_memory_circuit_breaker_limit: Maximum amount of memory that can be used for KNN index. Defaults to 50% of the JVM heap size.
45915
+ :param builtins.str node_search_cache_size: Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 5gb. Requires restarting all OpenSearch nodes.
45650
45916
  :param builtins.bool override_main_response_version: Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.
45651
45917
  :param builtins.bool plugins_alerting_filter_by_backend_roles: Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false.
45652
45918
  :param Sequence[builtins.str] reindex_remote_whitelists: Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.
@@ -45673,8 +45939,12 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
45673
45939
  pulumi.set(__self__, "action_destructive_requires_name", action_destructive_requires_name)
45674
45940
  if auth_failure_listeners is not None:
45675
45941
  pulumi.set(__self__, "auth_failure_listeners", auth_failure_listeners)
45942
+ if cluster_filecache_remote_data_ratio is not None:
45943
+ pulumi.set(__self__, "cluster_filecache_remote_data_ratio", cluster_filecache_remote_data_ratio)
45676
45944
  if cluster_max_shards_per_node is not None:
45677
45945
  pulumi.set(__self__, "cluster_max_shards_per_node", cluster_max_shards_per_node)
45946
+ if cluster_remote_store is not None:
45947
+ pulumi.set(__self__, "cluster_remote_store", cluster_remote_store)
45678
45948
  if cluster_routing_allocation_balance_prefer_primary is not None:
45679
45949
  pulumi.set(__self__, "cluster_routing_allocation_balance_prefer_primary", cluster_routing_allocation_balance_prefer_primary)
45680
45950
  if cluster_routing_allocation_node_concurrent_recoveries is not None:
@@ -45735,12 +46005,16 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
45735
46005
  pulumi.set(__self__, "knn_memory_circuit_breaker_enabled", knn_memory_circuit_breaker_enabled)
45736
46006
  if knn_memory_circuit_breaker_limit is not None:
45737
46007
  pulumi.set(__self__, "knn_memory_circuit_breaker_limit", knn_memory_circuit_breaker_limit)
46008
+ if node_search_cache_size is not None:
46009
+ pulumi.set(__self__, "node_search_cache_size", node_search_cache_size)
45738
46010
  if override_main_response_version is not None:
45739
46011
  pulumi.set(__self__, "override_main_response_version", override_main_response_version)
45740
46012
  if plugins_alerting_filter_by_backend_roles is not None:
45741
46013
  pulumi.set(__self__, "plugins_alerting_filter_by_backend_roles", plugins_alerting_filter_by_backend_roles)
45742
46014
  if reindex_remote_whitelists is not None:
45743
46015
  pulumi.set(__self__, "reindex_remote_whitelists", reindex_remote_whitelists)
46016
+ if remote_store is not None:
46017
+ pulumi.set(__self__, "remote_store", remote_store)
45744
46018
  if script_max_compilations_rate is not None:
45745
46019
  pulumi.set(__self__, "script_max_compilations_rate", script_max_compilations_rate)
45746
46020
  if search_backpressure is not None:
@@ -45800,6 +46074,14 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
45800
46074
  """
45801
46075
  return pulumi.get(self, "auth_failure_listeners")
45802
46076
 
46077
+ @property
46078
+ @pulumi.getter(name="clusterFilecacheRemoteDataRatio")
46079
+ def cluster_filecache_remote_data_ratio(self) -> Optional[builtins.float]:
46080
+ """
46081
+ Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 0.
46082
+ """
46083
+ return pulumi.get(self, "cluster_filecache_remote_data_ratio")
46084
+
45803
46085
  @property
45804
46086
  @pulumi.getter(name="clusterMaxShardsPerNode")
45805
46087
  def cluster_max_shards_per_node(self) -> Optional[builtins.int]:
@@ -45808,11 +46090,16 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
45808
46090
  """
45809
46091
  return pulumi.get(self, "cluster_max_shards_per_node")
45810
46092
 
46093
+ @property
46094
+ @pulumi.getter(name="clusterRemoteStore")
46095
+ def cluster_remote_store(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchClusterRemoteStoreResult']:
46096
+ return pulumi.get(self, "cluster_remote_store")
46097
+
45811
46098
  @property
45812
46099
  @pulumi.getter(name="clusterRoutingAllocationBalancePreferPrimary")
45813
46100
  def cluster_routing_allocation_balance_prefer_primary(self) -> Optional[builtins.bool]:
45814
46101
  """
45815
- When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false. Default: `false`.
46102
+ When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false.
45816
46103
  """
45817
46104
  return pulumi.get(self, "cluster_routing_allocation_balance_prefer_primary")
45818
46105
 
@@ -46045,6 +46332,14 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
46045
46332
  """
46046
46333
  return pulumi.get(self, "knn_memory_circuit_breaker_limit")
46047
46334
 
46335
+ @property
46336
+ @pulumi.getter(name="nodeSearchCacheSize")
46337
+ def node_search_cache_size(self) -> Optional[builtins.str]:
46338
+ """
46339
+ Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for the file cache. This is designed to be a safeguard to prevent oversubscribing a cluster. Defaults to 5gb. Requires restarting all OpenSearch nodes.
46340
+ """
46341
+ return pulumi.get(self, "node_search_cache_size")
46342
+
46048
46343
  @property
46049
46344
  @pulumi.getter(name="overrideMainResponseVersion")
46050
46345
  def override_main_response_version(self) -> Optional[builtins.bool]:
@@ -46069,6 +46364,11 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
46069
46364
  """
46070
46365
  return pulumi.get(self, "reindex_remote_whitelists")
46071
46366
 
46367
+ @property
46368
+ @pulumi.getter(name="remoteStore")
46369
+ def remote_store(self) -> Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchRemoteStoreResult']:
46370
+ return pulumi.get(self, "remote_store")
46371
+
46072
46372
  @property
46073
46373
  @pulumi.getter(name="scriptMaxCompilationsRate")
46074
46374
  def script_max_compilations_rate(self) -> Optional[builtins.str]:
@@ -46401,6 +46701,61 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimit
46401
46701
  return pulumi.get(self, "type")
46402
46702
 
46403
46703
 
46704
+ @pulumi.output_type
46705
+ class GetOpenSearchOpensearchUserConfigOpensearchClusterRemoteStoreResult(dict):
46706
+ def __init__(__self__, *,
46707
+ state_global_metadata_upload_timeout: Optional[builtins.str] = None,
46708
+ state_metadata_manifest_upload_timeout: Optional[builtins.str] = None,
46709
+ translog_buffer_interval: Optional[builtins.str] = None,
46710
+ translog_max_readers: Optional[builtins.int] = None):
46711
+ """
46712
+ :param builtins.str state_global_metadata_upload_timeout: The amount of time to wait for the cluster state upload to complete. Defaults to 20s.
46713
+ :param builtins.str state_metadata_manifest_upload_timeout: The amount of time to wait for the manifest file upload to complete. The manifest file contains the details of each of the files uploaded for a single cluster state, both index metadata files and global metadata files. Defaults to 20s.
46714
+ :param builtins.str translog_buffer_interval: The default value of the translog buffer interval used when performing periodic translog updates. This setting is only effective when the index setting `index.remote_store.translog.buffer_interval` is not present. Defaults to 650ms.
46715
+ :param builtins.int translog_max_readers: Sets the maximum number of open translog files for remote-backed indexes. This limits the total number of translog files per shard. After reaching this limit, the remote store flushes the translog files. Default is 1000. The minimum required is 100. Example: `1000`.
46716
+ """
46717
+ if state_global_metadata_upload_timeout is not None:
46718
+ pulumi.set(__self__, "state_global_metadata_upload_timeout", state_global_metadata_upload_timeout)
46719
+ if state_metadata_manifest_upload_timeout is not None:
46720
+ pulumi.set(__self__, "state_metadata_manifest_upload_timeout", state_metadata_manifest_upload_timeout)
46721
+ if translog_buffer_interval is not None:
46722
+ pulumi.set(__self__, "translog_buffer_interval", translog_buffer_interval)
46723
+ if translog_max_readers is not None:
46724
+ pulumi.set(__self__, "translog_max_readers", translog_max_readers)
46725
+
46726
+ @property
46727
+ @pulumi.getter(name="stateGlobalMetadataUploadTimeout")
46728
+ def state_global_metadata_upload_timeout(self) -> Optional[builtins.str]:
46729
+ """
46730
+ The amount of time to wait for the cluster state upload to complete. Defaults to 20s.
46731
+ """
46732
+ return pulumi.get(self, "state_global_metadata_upload_timeout")
46733
+
46734
+ @property
46735
+ @pulumi.getter(name="stateMetadataManifestUploadTimeout")
46736
+ def state_metadata_manifest_upload_timeout(self) -> Optional[builtins.str]:
46737
+ """
46738
+ The amount of time to wait for the manifest file upload to complete. The manifest file contains the details of each of the files uploaded for a single cluster state, both index metadata files and global metadata files. Defaults to 20s.
46739
+ """
46740
+ return pulumi.get(self, "state_metadata_manifest_upload_timeout")
46741
+
46742
+ @property
46743
+ @pulumi.getter(name="translogBufferInterval")
46744
+ def translog_buffer_interval(self) -> Optional[builtins.str]:
46745
+ """
46746
+ The default value of the translog buffer interval used when performing periodic translog updates. This setting is only effective when the index setting `index.remote_store.translog.buffer_interval` is not present. Defaults to 650ms.
46747
+ """
46748
+ return pulumi.get(self, "translog_buffer_interval")
46749
+
46750
+ @property
46751
+ @pulumi.getter(name="translogMaxReaders")
46752
+ def translog_max_readers(self) -> Optional[builtins.int]:
46753
+ """
46754
+ Sets the maximum number of open translog files for remote-backed indexes. This limits the total number of translog files per shard. After reaching this limit, the remote store flushes the translog files. Default is 1000. The minimum required is 100. Example: `1000`.
46755
+ """
46756
+ return pulumi.get(self, "translog_max_readers")
46757
+
46758
+
46404
46759
  @pulumi.output_type
46405
46760
  class GetOpenSearchOpensearchUserConfigOpensearchClusterSearchRequestSlowlogResult(dict):
46406
46761
  def __init__(__self__, *,
@@ -46578,6 +46933,61 @@ class GetOpenSearchOpensearchUserConfigOpensearchDiskWatermarksResult(dict):
46578
46933
  return pulumi.get(self, "low")
46579
46934
 
46580
46935
 
46936
+ @pulumi.output_type
46937
+ class GetOpenSearchOpensearchUserConfigOpensearchRemoteStoreResult(dict):
46938
+ def __init__(__self__, *,
46939
+ segment_pressure_bytes_lag_variance_factor: Optional[builtins.float] = None,
46940
+ segment_pressure_consecutive_failures_limit: Optional[builtins.int] = None,
46941
+ segment_pressure_enabled: Optional[builtins.bool] = None,
46942
+ segment_pressure_time_lag_variance_factor: Optional[builtins.float] = None):
46943
+ """
46944
+ :param builtins.float segment_pressure_bytes_lag_variance_factor: The variance factor that is used together with the moving average to calculate the dynamic bytes lag threshold for activating remote segment backpressure. Defaults to 10.
46945
+ :param builtins.int segment_pressure_consecutive_failures_limit: The minimum consecutive failure count for activating remote segment backpressure. Defaults to 5.
46946
+ :param builtins.bool segment_pressure_enabled: Enables remote segment backpressure. Default is `true`.
46947
+ :param builtins.float segment_pressure_time_lag_variance_factor: The variance factor that is used together with the moving average to calculate the dynamic time lag threshold for activating remote segment backpressure. Defaults to 10.
46948
+ """
46949
+ if segment_pressure_bytes_lag_variance_factor is not None:
46950
+ pulumi.set(__self__, "segment_pressure_bytes_lag_variance_factor", segment_pressure_bytes_lag_variance_factor)
46951
+ if segment_pressure_consecutive_failures_limit is not None:
46952
+ pulumi.set(__self__, "segment_pressure_consecutive_failures_limit", segment_pressure_consecutive_failures_limit)
46953
+ if segment_pressure_enabled is not None:
46954
+ pulumi.set(__self__, "segment_pressure_enabled", segment_pressure_enabled)
46955
+ if segment_pressure_time_lag_variance_factor is not None:
46956
+ pulumi.set(__self__, "segment_pressure_time_lag_variance_factor", segment_pressure_time_lag_variance_factor)
46957
+
46958
+ @property
46959
+ @pulumi.getter(name="segmentPressureBytesLagVarianceFactor")
46960
+ def segment_pressure_bytes_lag_variance_factor(self) -> Optional[builtins.float]:
46961
+ """
46962
+ The variance factor that is used together with the moving average to calculate the dynamic bytes lag threshold for activating remote segment backpressure. Defaults to 10.
46963
+ """
46964
+ return pulumi.get(self, "segment_pressure_bytes_lag_variance_factor")
46965
+
46966
+ @property
46967
+ @pulumi.getter(name="segmentPressureConsecutiveFailuresLimit")
46968
+ def segment_pressure_consecutive_failures_limit(self) -> Optional[builtins.int]:
46969
+ """
46970
+ The minimum consecutive failure count for activating remote segment backpressure. Defaults to 5.
46971
+ """
46972
+ return pulumi.get(self, "segment_pressure_consecutive_failures_limit")
46973
+
46974
+ @property
46975
+ @pulumi.getter(name="segmentPressureEnabled")
46976
+ def segment_pressure_enabled(self) -> Optional[builtins.bool]:
46977
+ """
46978
+ Enables remote segment backpressure. Default is `true`.
46979
+ """
46980
+ return pulumi.get(self, "segment_pressure_enabled")
46981
+
46982
+ @property
46983
+ @pulumi.getter(name="segmentPressureTimeLagVarianceFactor")
46984
+ def segment_pressure_time_lag_variance_factor(self) -> Optional[builtins.float]:
46985
+ """
46986
+ The variance factor that is used together with the moving average to calculate the dynamic time lag threshold for activating remote segment backpressure. Defaults to 10.
46987
+ """
46988
+ return pulumi.get(self, "segment_pressure_time_lag_variance_factor")
46989
+
46990
+
46581
46991
  @pulumi.output_type
46582
46992
  class GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult(dict):
46583
46993
  def __init__(__self__, *,
@@ -47816,10 +48226,10 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47816
48226
  def __init__(__self__, *,
47817
48227
  billing_address_id: builtins.str,
47818
48228
  billing_contact_emails: Sequence[builtins.str],
47819
- billing_currency: builtins.str,
47820
48229
  billing_emails: Sequence[builtins.str],
47821
48230
  billing_group_id: builtins.str,
47822
48231
  billing_group_name: builtins.str,
48232
+ currency: builtins.str,
47823
48233
  custom_invoice_text: builtins.str,
47824
48234
  organization_id: builtins.str,
47825
48235
  payment_method_id: builtins.str,
@@ -47828,10 +48238,10 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47828
48238
  """
47829
48239
  :param builtins.str billing_address_id: Billing address ID.
47830
48240
  :param Sequence[builtins.str] billing_contact_emails: List of billing contact emails.
47831
- :param builtins.str billing_currency: Acceptable currencies for a billing group. The possible values are `AUD`, `CAD`, `CHF`, `DKK`, `EUR`, `GBP`, `JPY`, `NOK`, `NZD`, `SEK`, `SGD` and `USD`.
47832
48241
  :param Sequence[builtins.str] billing_emails: List of billing contact emails.
47833
48242
  :param builtins.str billing_group_id: Billing group ID.
47834
48243
  :param builtins.str billing_group_name: Billing Group Name.
48244
+ :param builtins.str currency: Acceptable currencies for a billing group. The possible values are `AUD`, `CAD`, `CHF`, `DKK`, `EUR`, `GBP`, `JPY`, `NOK`, `NZD`, `SEK`, `SGD` and `USD`.
47835
48245
  :param builtins.str custom_invoice_text: Extra billing text.
47836
48246
  :param builtins.str organization_id: Organization ID.
47837
48247
  :param builtins.str payment_method_id: Payment method ID.
@@ -47840,10 +48250,10 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47840
48250
  """
47841
48251
  pulumi.set(__self__, "billing_address_id", billing_address_id)
47842
48252
  pulumi.set(__self__, "billing_contact_emails", billing_contact_emails)
47843
- pulumi.set(__self__, "billing_currency", billing_currency)
47844
48253
  pulumi.set(__self__, "billing_emails", billing_emails)
47845
48254
  pulumi.set(__self__, "billing_group_id", billing_group_id)
47846
48255
  pulumi.set(__self__, "billing_group_name", billing_group_name)
48256
+ pulumi.set(__self__, "currency", currency)
47847
48257
  pulumi.set(__self__, "custom_invoice_text", custom_invoice_text)
47848
48258
  pulumi.set(__self__, "organization_id", organization_id)
47849
48259
  pulumi.set(__self__, "payment_method_id", payment_method_id)
@@ -47866,14 +48276,6 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47866
48276
  """
47867
48277
  return pulumi.get(self, "billing_contact_emails")
47868
48278
 
47869
- @property
47870
- @pulumi.getter(name="billingCurrency")
47871
- def billing_currency(self) -> builtins.str:
47872
- """
47873
- Acceptable currencies for a billing group. The possible values are `AUD`, `CAD`, `CHF`, `DKK`, `EUR`, `GBP`, `JPY`, `NOK`, `NZD`, `SEK`, `SGD` and `USD`.
47874
- """
47875
- return pulumi.get(self, "billing_currency")
47876
-
47877
48279
  @property
47878
48280
  @pulumi.getter(name="billingEmails")
47879
48281
  def billing_emails(self) -> Sequence[builtins.str]:
@@ -47898,6 +48300,14 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47898
48300
  """
47899
48301
  return pulumi.get(self, "billing_group_name")
47900
48302
 
48303
+ @property
48304
+ @pulumi.getter
48305
+ def currency(self) -> builtins.str:
48306
+ """
48307
+ Acceptable currencies for a billing group. The possible values are `AUD`, `CAD`, `CHF`, `DKK`, `EUR`, `GBP`, `JPY`, `NOK`, `NZD`, `SEK`, `SGD` and `USD`.
48308
+ """
48309
+ return pulumi.get(self, "currency")
48310
+
47901
48311
  @property
47902
48312
  @pulumi.getter(name="customInvoiceText")
47903
48313
  def custom_invoice_text(self) -> builtins.str:
@@ -50241,7 +50651,7 @@ class GetPgPgUserConfigTimescaledbResult(dict):
50241
50651
  def __init__(__self__, *,
50242
50652
  max_background_workers: Optional[builtins.int] = None):
50243
50653
  """
50244
- :param builtins.int max_background_workers: The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. Default: `16`.
50654
+ :param builtins.int max_background_workers: The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. Changing this parameter causes a service restart. Default: `16`.
50245
50655
  """
50246
50656
  if max_background_workers is not None:
50247
50657
  pulumi.set(__self__, "max_background_workers", max_background_workers)
@@ -50250,7 +50660,7 @@ class GetPgPgUserConfigTimescaledbResult(dict):
50250
50660
  @pulumi.getter(name="maxBackgroundWorkers")
50251
50661
  def max_background_workers(self) -> Optional[builtins.int]:
50252
50662
  """
50253
- The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. Default: `16`.
50663
+ The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. Changing this parameter causes a service restart. Default: `16`.
50254
50664
  """
50255
50665
  return pulumi.get(self, "max_background_workers")
50256
50666