pulumi-aiven 6.28.0__py3-none-any.whl → 6.28.0a1730384431__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-aiven might be problematic. Click here for more details.
- pulumi_aiven/_inputs.py +203 -635
- pulumi_aiven/account_team_project.py +7 -7
- pulumi_aiven/cassandra.py +8 -8
- pulumi_aiven/clickhouse.py +8 -8
- pulumi_aiven/dragonfly.py +8 -8
- pulumi_aiven/flink.py +8 -8
- pulumi_aiven/get_account_team_project.py +1 -1
- pulumi_aiven/get_cassanda.py +1 -1
- pulumi_aiven/get_cassandra.py +1 -1
- pulumi_aiven/get_clickhouse.py +1 -1
- pulumi_aiven/get_dragonfly.py +1 -1
- pulumi_aiven/get_flink.py +1 -1
- pulumi_aiven/get_grafana.py +1 -1
- pulumi_aiven/get_kafka.py +1 -1
- pulumi_aiven/get_kafka_connect.py +1 -1
- pulumi_aiven/get_kafka_mirror_maker.py +1 -1
- pulumi_aiven/get_m3_aggregator.py +1 -1
- pulumi_aiven/get_m3_db.py +1 -1
- pulumi_aiven/get_mirror_maker_replication_flow.py +1 -1
- pulumi_aiven/get_my_sql.py +1 -1
- pulumi_aiven/get_open_search.py +1 -1
- pulumi_aiven/get_pg.py +1 -1
- pulumi_aiven/get_project_user.py +1 -1
- pulumi_aiven/get_redis.py +1 -1
- pulumi_aiven/get_service_component.py +9 -9
- pulumi_aiven/get_service_integration.py +17 -17
- pulumi_aiven/get_service_integration_endpoint.py +31 -59
- pulumi_aiven/get_thanos.py +1 -1
- pulumi_aiven/get_valkey.py +1 -1
- pulumi_aiven/grafana.py +8 -8
- pulumi_aiven/influx_db.py +8 -15
- pulumi_aiven/kafka.py +8 -8
- pulumi_aiven/kafka_connect.py +8 -8
- pulumi_aiven/kafka_mirror_maker.py +8 -8
- pulumi_aiven/m3_aggregator.py +8 -8
- pulumi_aiven/m3_db.py +8 -8
- pulumi_aiven/mirror_maker_replication_flow.py +7 -7
- pulumi_aiven/my_sql.py +8 -8
- pulumi_aiven/open_search.py +8 -8
- pulumi_aiven/organization_group_project.py +9 -17
- pulumi_aiven/organization_permission.py +49 -49
- pulumi_aiven/outputs.py +263 -883
- pulumi_aiven/pg.py +8 -8
- pulumi_aiven/project_user.py +7 -15
- pulumi_aiven/pulumi-plugin.json +1 -1
- pulumi_aiven/redis.py +8 -8
- pulumi_aiven/service_integration.py +123 -139
- pulumi_aiven/service_integration_endpoint.py +140 -302
- pulumi_aiven/thanos.py +8 -8
- pulumi_aiven/valkey.py +8 -8
- {pulumi_aiven-6.28.0.dist-info → pulumi_aiven-6.28.0a1730384431.dist-info}/METADATA +1 -1
- {pulumi_aiven-6.28.0.dist-info → pulumi_aiven-6.28.0a1730384431.dist-info}/RECORD +54 -54
- {pulumi_aiven-6.28.0.dist-info → pulumi_aiven-6.28.0a1730384431.dist-info}/WHEEL +1 -1
- {pulumi_aiven-6.28.0.dist-info → pulumi_aiven-6.28.0a1730384431.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py
CHANGED
|
@@ -251,8 +251,6 @@ __all__ = [
|
|
|
251
251
|
'ServiceIntegrationDatadogUserConfigDatadogTag',
|
|
252
252
|
'ServiceIntegrationDatadogUserConfigOpensearch',
|
|
253
253
|
'ServiceIntegrationDatadogUserConfigRedis',
|
|
254
|
-
'ServiceIntegrationEndpointAutoscalerUserConfig',
|
|
255
|
-
'ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling',
|
|
256
254
|
'ServiceIntegrationEndpointDatadogUserConfig',
|
|
257
255
|
'ServiceIntegrationEndpointDatadogUserConfigDatadogTag',
|
|
258
256
|
'ServiceIntegrationEndpointExternalAwsCloudwatchLogsUserConfig',
|
|
@@ -266,7 +264,6 @@ __all__ = [
|
|
|
266
264
|
'ServiceIntegrationEndpointExternalMysqlUserConfig',
|
|
267
265
|
'ServiceIntegrationEndpointExternalOpensearchLogsUserConfig',
|
|
268
266
|
'ServiceIntegrationEndpointExternalPostgresql',
|
|
269
|
-
'ServiceIntegrationEndpointExternalPrometheusUserConfig',
|
|
270
267
|
'ServiceIntegrationEndpointExternalSchemaRegistryUserConfig',
|
|
271
268
|
'ServiceIntegrationEndpointJolokiaUserConfig',
|
|
272
269
|
'ServiceIntegrationEndpointPrometheusUserConfig',
|
|
@@ -553,8 +550,6 @@ __all__ = [
|
|
|
553
550
|
'GetServiceIntegrationDatadogUserConfigDatadogTagResult',
|
|
554
551
|
'GetServiceIntegrationDatadogUserConfigOpensearchResult',
|
|
555
552
|
'GetServiceIntegrationDatadogUserConfigRedisResult',
|
|
556
|
-
'GetServiceIntegrationEndpointAutoscalerUserConfigResult',
|
|
557
|
-
'GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult',
|
|
558
553
|
'GetServiceIntegrationEndpointDatadogUserConfigResult',
|
|
559
554
|
'GetServiceIntegrationEndpointDatadogUserConfigDatadogTagResult',
|
|
560
555
|
'GetServiceIntegrationEndpointExternalAwsCloudwatchLogsUserConfigResult',
|
|
@@ -568,7 +563,6 @@ __all__ = [
|
|
|
568
563
|
'GetServiceIntegrationEndpointExternalMysqlUserConfigResult',
|
|
569
564
|
'GetServiceIntegrationEndpointExternalOpensearchLogsUserConfigResult',
|
|
570
565
|
'GetServiceIntegrationEndpointExternalPostgresqlResult',
|
|
571
|
-
'GetServiceIntegrationEndpointExternalPrometheusUserConfigResult',
|
|
572
566
|
'GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult',
|
|
573
567
|
'GetServiceIntegrationEndpointJolokiaUserConfigResult',
|
|
574
568
|
'GetServiceIntegrationEndpointPrometheusUserConfigResult',
|
|
@@ -840,6 +834,7 @@ class CassandraCassandraUserConfig(dict):
|
|
|
840
834
|
|
|
841
835
|
@property
|
|
842
836
|
@pulumi.getter(name="additionalBackupRegions")
|
|
837
|
+
@_utilities.deprecated("""This property is deprecated.""")
|
|
843
838
|
def additional_backup_regions(self) -> Optional[str]:
|
|
844
839
|
"""
|
|
845
840
|
Additional Cloud Regions for Backup Replication.
|
|
@@ -2384,7 +2379,7 @@ class DragonflyDragonflyUserConfig(dict):
|
|
|
2384
2379
|
static_ips: Optional[bool] = None):
|
|
2385
2380
|
"""
|
|
2386
2381
|
:param bool cache_mode: Evict entries when getting close to maxmemory limit. Default: `false`.
|
|
2387
|
-
:param str dragonfly_persistence: Enum: `
|
|
2382
|
+
:param str dragonfly_persistence: Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
|
|
2388
2383
|
:param bool dragonfly_ssl: Require SSL to access Dragonfly. Default: `true`.
|
|
2389
2384
|
:param Sequence['DragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
|
|
2390
2385
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
@@ -2442,7 +2437,7 @@ class DragonflyDragonflyUserConfig(dict):
|
|
|
2442
2437
|
@pulumi.getter(name="dragonflyPersistence")
|
|
2443
2438
|
def dragonfly_persistence(self) -> Optional[str]:
|
|
2444
2439
|
"""
|
|
2445
|
-
Enum: `
|
|
2440
|
+
Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
|
|
2446
2441
|
"""
|
|
2447
2442
|
return pulumi.get(self, "dragonfly_persistence")
|
|
2448
2443
|
|
|
@@ -3813,10 +3808,10 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3813
3808
|
wal: Optional[bool] = None):
|
|
3814
3809
|
"""
|
|
3815
3810
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
3816
|
-
:param bool alerting_enabled:
|
|
3811
|
+
:param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
|
|
3817
3812
|
:param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
|
|
3818
3813
|
:param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. Example: `0`.
|
|
3819
|
-
:param str alerting_nodata_or_nullvalues: Enum: `alerting`, `
|
|
3814
|
+
:param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
|
|
3820
3815
|
:param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
|
|
3821
3816
|
:param 'GrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
|
|
3822
3817
|
:param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
|
|
@@ -3824,9 +3819,9 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3824
3819
|
:param 'GrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
|
|
3825
3820
|
:param 'GrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
|
|
3826
3821
|
:param 'GrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
|
|
3827
|
-
:param str cookie_samesite: Enum: `lax`, `
|
|
3822
|
+
:param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
|
|
3828
3823
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
3829
|
-
:param bool dashboard_previews_enabled:
|
|
3824
|
+
:param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
|
|
3830
3825
|
:param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h. Example: `5s`.
|
|
3831
3826
|
:param int dashboards_versions_to_keep: Dashboard versions to keep per dashboard. Example: `20`.
|
|
3832
3827
|
:param bool dataproxy_send_user_header: Send `X-Grafana-User` header to data source.
|
|
@@ -3839,7 +3834,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3839
3834
|
:param Sequence['GrafanaGrafanaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
|
|
3840
3835
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
3841
3836
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
3842
|
-
:param bool metrics_enabled: Enable Grafana
|
|
3837
|
+
:param bool metrics_enabled: Enable Grafana /metrics endpoint.
|
|
3843
3838
|
:param bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP.
|
|
3844
3839
|
:param 'GrafanaGrafanaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
3845
3840
|
:param 'GrafanaGrafanaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
@@ -3850,9 +3845,9 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3850
3845
|
:param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
|
|
3851
3846
|
:param 'GrafanaGrafanaUserConfigSmtpServerArgs' smtp_server: SMTP server settings
|
|
3852
3847
|
:param bool static_ips: Use static public IP addresses.
|
|
3853
|
-
:param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
|
|
3848
|
+
:param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
|
|
3854
3849
|
:param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
|
|
3855
|
-
:param str user_auto_assign_org_role: Enum: `
|
|
3850
|
+
:param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
|
|
3856
3851
|
:param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
|
|
3857
3852
|
:param bool wal: Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
|
|
3858
3853
|
"""
|
|
@@ -3955,7 +3950,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3955
3950
|
@pulumi.getter(name="alertingEnabled")
|
|
3956
3951
|
def alerting_enabled(self) -> Optional[bool]:
|
|
3957
3952
|
"""
|
|
3958
|
-
|
|
3953
|
+
Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
|
|
3959
3954
|
"""
|
|
3960
3955
|
return pulumi.get(self, "alerting_enabled")
|
|
3961
3956
|
|
|
@@ -3979,7 +3974,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3979
3974
|
@pulumi.getter(name="alertingNodataOrNullvalues")
|
|
3980
3975
|
def alerting_nodata_or_nullvalues(self) -> Optional[str]:
|
|
3981
3976
|
"""
|
|
3982
|
-
Enum: `alerting`, `
|
|
3977
|
+
Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
|
|
3983
3978
|
"""
|
|
3984
3979
|
return pulumi.get(self, "alerting_nodata_or_nullvalues")
|
|
3985
3980
|
|
|
@@ -4043,7 +4038,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
4043
4038
|
@pulumi.getter(name="cookieSamesite")
|
|
4044
4039
|
def cookie_samesite(self) -> Optional[str]:
|
|
4045
4040
|
"""
|
|
4046
|
-
Enum: `lax`, `
|
|
4041
|
+
Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
|
|
4047
4042
|
"""
|
|
4048
4043
|
return pulumi.get(self, "cookie_samesite")
|
|
4049
4044
|
|
|
@@ -4059,7 +4054,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
4059
4054
|
@pulumi.getter(name="dashboardPreviewsEnabled")
|
|
4060
4055
|
def dashboard_previews_enabled(self) -> Optional[bool]:
|
|
4061
4056
|
"""
|
|
4062
|
-
|
|
4057
|
+
This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
|
|
4063
4058
|
"""
|
|
4064
4059
|
return pulumi.get(self, "dashboard_previews_enabled")
|
|
4065
4060
|
|
|
@@ -4164,7 +4159,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
4164
4159
|
@pulumi.getter(name="metricsEnabled")
|
|
4165
4160
|
def metrics_enabled(self) -> Optional[bool]:
|
|
4166
4161
|
"""
|
|
4167
|
-
Enable Grafana
|
|
4162
|
+
Enable Grafana /metrics endpoint.
|
|
4168
4163
|
"""
|
|
4169
4164
|
return pulumi.get(self, "metrics_enabled")
|
|
4170
4165
|
|
|
@@ -4252,7 +4247,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
4252
4247
|
@pulumi.getter(name="unifiedAlertingEnabled")
|
|
4253
4248
|
def unified_alerting_enabled(self) -> Optional[bool]:
|
|
4254
4249
|
"""
|
|
4255
|
-
Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
|
|
4250
|
+
Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
|
|
4256
4251
|
"""
|
|
4257
4252
|
return pulumi.get(self, "unified_alerting_enabled")
|
|
4258
4253
|
|
|
@@ -4268,7 +4263,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
4268
4263
|
@pulumi.getter(name="userAutoAssignOrgRole")
|
|
4269
4264
|
def user_auto_assign_org_role(self) -> Optional[str]:
|
|
4270
4265
|
"""
|
|
4271
|
-
Enum: `
|
|
4266
|
+
Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
|
|
4272
4267
|
"""
|
|
4273
4268
|
return pulumi.get(self, "user_auto_assign_org_role")
|
|
4274
4269
|
|
|
@@ -4748,9 +4743,9 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
|
|
|
4748
4743
|
:param str client_id: Client ID from provider. Example: `b1ba0bf54a4c2c0a1c29`.
|
|
4749
4744
|
:param str client_secret: Client secret from provider. Example: `bfa6gea4f129076761dcba8ce5e1e406bd83af7b`.
|
|
4750
4745
|
:param bool allow_sign_up: Automatically sign-up users on successful sign-in.
|
|
4751
|
-
:param str api_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
|
|
4752
|
-
:param str auth_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
|
|
4753
|
-
:param str token_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
|
|
4746
|
+
:param str api_url: API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
|
|
4747
|
+
:param str auth_url: Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
|
|
4748
|
+
:param str token_url: Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
|
|
4754
4749
|
"""
|
|
4755
4750
|
pulumi.set(__self__, "allowed_groups", allowed_groups)
|
|
4756
4751
|
pulumi.set(__self__, "client_id", client_id)
|
|
@@ -4800,7 +4795,7 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
|
|
|
4800
4795
|
@pulumi.getter(name="apiUrl")
|
|
4801
4796
|
def api_url(self) -> Optional[str]:
|
|
4802
4797
|
"""
|
|
4803
|
-
This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
|
|
4798
|
+
API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
|
|
4804
4799
|
"""
|
|
4805
4800
|
return pulumi.get(self, "api_url")
|
|
4806
4801
|
|
|
@@ -4808,7 +4803,7 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
|
|
|
4808
4803
|
@pulumi.getter(name="authUrl")
|
|
4809
4804
|
def auth_url(self) -> Optional[str]:
|
|
4810
4805
|
"""
|
|
4811
|
-
This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
|
|
4806
|
+
Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
|
|
4812
4807
|
"""
|
|
4813
4808
|
return pulumi.get(self, "auth_url")
|
|
4814
4809
|
|
|
@@ -4816,7 +4811,7 @@ class GrafanaGrafanaUserConfigAuthGitlab(dict):
|
|
|
4816
4811
|
@pulumi.getter(name="tokenUrl")
|
|
4817
4812
|
def token_url(self) -> Optional[str]:
|
|
4818
4813
|
"""
|
|
4819
|
-
This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
|
|
4814
|
+
Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
|
|
4820
4815
|
"""
|
|
4821
4816
|
return pulumi.get(self, "token_url")
|
|
4822
4817
|
|
|
@@ -5061,7 +5056,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
|
|
|
5061
5056
|
"""
|
|
5062
5057
|
:param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions. Example: `AAAAAAAAAAAAAAAAAAA`.
|
|
5063
5058
|
:param str bucket_url: Bucket URL for S3. Example: `https://grafana.s3-ap-southeast-2.amazonaws.com/`.
|
|
5064
|
-
:param str provider: Enum: `s3`.
|
|
5059
|
+
:param str provider: Enum: `s3`. Provider type.
|
|
5065
5060
|
:param str secret_key: S3 secret key. Example: `AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA`.
|
|
5066
5061
|
"""
|
|
5067
5062
|
pulumi.set(__self__, "access_key", access_key)
|
|
@@ -5089,7 +5084,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
|
|
|
5089
5084
|
@pulumi.getter
|
|
5090
5085
|
def provider(self) -> str:
|
|
5091
5086
|
"""
|
|
5092
|
-
Enum: `s3`.
|
|
5087
|
+
Enum: `s3`. Provider type.
|
|
5093
5088
|
"""
|
|
5094
5089
|
return pulumi.get(self, "provider")
|
|
5095
5090
|
|
|
@@ -5230,7 +5225,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
|
|
|
5230
5225
|
:param str from_name: Name used in outgoing emails, defaults to Grafana.
|
|
5231
5226
|
:param str password: Password for SMTP authentication. Example: `ein0eemeev5eeth3Ahfu`.
|
|
5232
5227
|
:param bool skip_verify: Skip verifying server certificate. Defaults to false.
|
|
5233
|
-
:param str starttls_policy: Enum: `
|
|
5228
|
+
:param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
|
|
5234
5229
|
:param str username: Username for SMTP authentication. Example: `smtpuser`.
|
|
5235
5230
|
"""
|
|
5236
5231
|
pulumi.set(__self__, "from_address", from_address)
|
|
@@ -5299,7 +5294,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
|
|
|
5299
5294
|
@pulumi.getter(name="starttlsPolicy")
|
|
5300
5295
|
def starttls_policy(self) -> Optional[str]:
|
|
5301
5296
|
"""
|
|
5302
|
-
Enum: `
|
|
5297
|
+
Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
|
|
5303
5298
|
"""
|
|
5304
5299
|
return pulumi.get(self, "starttls_policy")
|
|
5305
5300
|
|
|
@@ -5610,8 +5605,6 @@ class InfluxDbInfluxdbUserConfig(dict):
|
|
|
5610
5605
|
suggest = "additional_backup_regions"
|
|
5611
5606
|
elif key == "customDomain":
|
|
5612
5607
|
suggest = "custom_domain"
|
|
5613
|
-
elif key == "influxdbVersion":
|
|
5614
|
-
suggest = "influxdb_version"
|
|
5615
5608
|
elif key == "ipFilterObjects":
|
|
5616
5609
|
suggest = "ip_filter_objects"
|
|
5617
5610
|
elif key == "ipFilterStrings":
|
|
@@ -5650,7 +5643,6 @@ class InfluxDbInfluxdbUserConfig(dict):
|
|
|
5650
5643
|
additional_backup_regions: Optional[str] = None,
|
|
5651
5644
|
custom_domain: Optional[str] = None,
|
|
5652
5645
|
influxdb: Optional['outputs.InfluxDbInfluxdbUserConfigInfluxdb'] = None,
|
|
5653
|
-
influxdb_version: Optional[str] = None,
|
|
5654
5646
|
ip_filter_objects: Optional[Sequence['outputs.InfluxDbInfluxdbUserConfigIpFilterObject']] = None,
|
|
5655
5647
|
ip_filter_strings: Optional[Sequence[str]] = None,
|
|
5656
5648
|
ip_filters: Optional[Sequence[str]] = None,
|
|
@@ -5666,7 +5658,6 @@ class InfluxDbInfluxdbUserConfig(dict):
|
|
|
5666
5658
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
5667
5659
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
5668
5660
|
:param 'InfluxDbInfluxdbUserConfigInfluxdbArgs' influxdb: influxdb.conf configuration values
|
|
5669
|
-
:param str influxdb_version: Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
|
|
5670
5661
|
:param Sequence['InfluxDbInfluxdbUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
|
|
5671
5662
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
5672
5663
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
@@ -5685,8 +5676,6 @@ class InfluxDbInfluxdbUserConfig(dict):
|
|
|
5685
5676
|
pulumi.set(__self__, "custom_domain", custom_domain)
|
|
5686
5677
|
if influxdb is not None:
|
|
5687
5678
|
pulumi.set(__self__, "influxdb", influxdb)
|
|
5688
|
-
if influxdb_version is not None:
|
|
5689
|
-
pulumi.set(__self__, "influxdb_version", influxdb_version)
|
|
5690
5679
|
if ip_filter_objects is not None:
|
|
5691
5680
|
pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
|
|
5692
5681
|
if ip_filter_strings is not None:
|
|
@@ -5735,14 +5724,6 @@ class InfluxDbInfluxdbUserConfig(dict):
|
|
|
5735
5724
|
"""
|
|
5736
5725
|
return pulumi.get(self, "influxdb")
|
|
5737
5726
|
|
|
5738
|
-
@property
|
|
5739
|
-
@pulumi.getter(name="influxdbVersion")
|
|
5740
|
-
def influxdb_version(self) -> Optional[str]:
|
|
5741
|
-
"""
|
|
5742
|
-
Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
|
|
5743
|
-
"""
|
|
5744
|
-
return pulumi.get(self, "influxdb_version")
|
|
5745
|
-
|
|
5746
5727
|
@property
|
|
5747
5728
|
@pulumi.getter(name="ipFilterObjects")
|
|
5748
5729
|
def ip_filter_objects(self) -> Optional[Sequence['outputs.InfluxDbInfluxdbUserConfigIpFilterObject']]:
|
|
@@ -5985,34 +5966,13 @@ class InfluxDbInfluxdbUserConfigIpFilterObject(dict):
|
|
|
5985
5966
|
|
|
5986
5967
|
@pulumi.output_type
|
|
5987
5968
|
class InfluxDbInfluxdbUserConfigPrivateAccess(dict):
|
|
5988
|
-
@staticmethod
|
|
5989
|
-
def __key_warning(key: str):
|
|
5990
|
-
suggest = None
|
|
5991
|
-
if key == "userBackup":
|
|
5992
|
-
suggest = "user_backup"
|
|
5993
|
-
|
|
5994
|
-
if suggest:
|
|
5995
|
-
pulumi.log.warn(f"Key '{key}' not found in InfluxDbInfluxdbUserConfigPrivateAccess. Access the value via the '{suggest}' property getter instead.")
|
|
5996
|
-
|
|
5997
|
-
def __getitem__(self, key: str) -> Any:
|
|
5998
|
-
InfluxDbInfluxdbUserConfigPrivateAccess.__key_warning(key)
|
|
5999
|
-
return super().__getitem__(key)
|
|
6000
|
-
|
|
6001
|
-
def get(self, key: str, default = None) -> Any:
|
|
6002
|
-
InfluxDbInfluxdbUserConfigPrivateAccess.__key_warning(key)
|
|
6003
|
-
return super().get(key, default)
|
|
6004
|
-
|
|
6005
5969
|
def __init__(__self__, *,
|
|
6006
|
-
influxdb: Optional[bool] = None
|
|
6007
|
-
user_backup: Optional[bool] = None):
|
|
5970
|
+
influxdb: Optional[bool] = None):
|
|
6008
5971
|
"""
|
|
6009
5972
|
:param bool influxdb: Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
|
|
6010
|
-
:param bool user_backup: Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
|
|
6011
5973
|
"""
|
|
6012
5974
|
if influxdb is not None:
|
|
6013
5975
|
pulumi.set(__self__, "influxdb", influxdb)
|
|
6014
|
-
if user_backup is not None:
|
|
6015
|
-
pulumi.set(__self__, "user_backup", user_backup)
|
|
6016
5976
|
|
|
6017
5977
|
@property
|
|
6018
5978
|
@pulumi.getter
|
|
@@ -6022,45 +5982,16 @@ class InfluxDbInfluxdbUserConfigPrivateAccess(dict):
|
|
|
6022
5982
|
"""
|
|
6023
5983
|
return pulumi.get(self, "influxdb")
|
|
6024
5984
|
|
|
6025
|
-
@property
|
|
6026
|
-
@pulumi.getter(name="userBackup")
|
|
6027
|
-
def user_backup(self) -> Optional[bool]:
|
|
6028
|
-
"""
|
|
6029
|
-
Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
|
|
6030
|
-
"""
|
|
6031
|
-
return pulumi.get(self, "user_backup")
|
|
6032
|
-
|
|
6033
5985
|
|
|
6034
5986
|
@pulumi.output_type
|
|
6035
5987
|
class InfluxDbInfluxdbUserConfigPrivatelinkAccess(dict):
|
|
6036
|
-
@staticmethod
|
|
6037
|
-
def __key_warning(key: str):
|
|
6038
|
-
suggest = None
|
|
6039
|
-
if key == "userBackup":
|
|
6040
|
-
suggest = "user_backup"
|
|
6041
|
-
|
|
6042
|
-
if suggest:
|
|
6043
|
-
pulumi.log.warn(f"Key '{key}' not found in InfluxDbInfluxdbUserConfigPrivatelinkAccess. Access the value via the '{suggest}' property getter instead.")
|
|
6044
|
-
|
|
6045
|
-
def __getitem__(self, key: str) -> Any:
|
|
6046
|
-
InfluxDbInfluxdbUserConfigPrivatelinkAccess.__key_warning(key)
|
|
6047
|
-
return super().__getitem__(key)
|
|
6048
|
-
|
|
6049
|
-
def get(self, key: str, default = None) -> Any:
|
|
6050
|
-
InfluxDbInfluxdbUserConfigPrivatelinkAccess.__key_warning(key)
|
|
6051
|
-
return super().get(key, default)
|
|
6052
|
-
|
|
6053
5988
|
def __init__(__self__, *,
|
|
6054
|
-
influxdb: Optional[bool] = None
|
|
6055
|
-
user_backup: Optional[bool] = None):
|
|
5989
|
+
influxdb: Optional[bool] = None):
|
|
6056
5990
|
"""
|
|
6057
5991
|
:param bool influxdb: Enable influxdb.
|
|
6058
|
-
:param bool user_backup: Enable user_backup.
|
|
6059
5992
|
"""
|
|
6060
5993
|
if influxdb is not None:
|
|
6061
5994
|
pulumi.set(__self__, "influxdb", influxdb)
|
|
6062
|
-
if user_backup is not None:
|
|
6063
|
-
pulumi.set(__self__, "user_backup", user_backup)
|
|
6064
5995
|
|
|
6065
5996
|
@property
|
|
6066
5997
|
@pulumi.getter
|
|
@@ -6070,45 +6001,16 @@ class InfluxDbInfluxdbUserConfigPrivatelinkAccess(dict):
|
|
|
6070
6001
|
"""
|
|
6071
6002
|
return pulumi.get(self, "influxdb")
|
|
6072
6003
|
|
|
6073
|
-
@property
|
|
6074
|
-
@pulumi.getter(name="userBackup")
|
|
6075
|
-
def user_backup(self) -> Optional[bool]:
|
|
6076
|
-
"""
|
|
6077
|
-
Enable user_backup.
|
|
6078
|
-
"""
|
|
6079
|
-
return pulumi.get(self, "user_backup")
|
|
6080
|
-
|
|
6081
6004
|
|
|
6082
6005
|
@pulumi.output_type
|
|
6083
6006
|
class InfluxDbInfluxdbUserConfigPublicAccess(dict):
|
|
6084
|
-
@staticmethod
|
|
6085
|
-
def __key_warning(key: str):
|
|
6086
|
-
suggest = None
|
|
6087
|
-
if key == "userBackup":
|
|
6088
|
-
suggest = "user_backup"
|
|
6089
|
-
|
|
6090
|
-
if suggest:
|
|
6091
|
-
pulumi.log.warn(f"Key '{key}' not found in InfluxDbInfluxdbUserConfigPublicAccess. Access the value via the '{suggest}' property getter instead.")
|
|
6092
|
-
|
|
6093
|
-
def __getitem__(self, key: str) -> Any:
|
|
6094
|
-
InfluxDbInfluxdbUserConfigPublicAccess.__key_warning(key)
|
|
6095
|
-
return super().__getitem__(key)
|
|
6096
|
-
|
|
6097
|
-
def get(self, key: str, default = None) -> Any:
|
|
6098
|
-
InfluxDbInfluxdbUserConfigPublicAccess.__key_warning(key)
|
|
6099
|
-
return super().get(key, default)
|
|
6100
|
-
|
|
6101
6007
|
def __init__(__self__, *,
|
|
6102
|
-
influxdb: Optional[bool] = None
|
|
6103
|
-
user_backup: Optional[bool] = None):
|
|
6008
|
+
influxdb: Optional[bool] = None):
|
|
6104
6009
|
"""
|
|
6105
6010
|
:param bool influxdb: Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.
|
|
6106
|
-
:param bool user_backup: Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
|
|
6107
6011
|
"""
|
|
6108
6012
|
if influxdb is not None:
|
|
6109
6013
|
pulumi.set(__self__, "influxdb", influxdb)
|
|
6110
|
-
if user_backup is not None:
|
|
6111
|
-
pulumi.set(__self__, "user_backup", user_backup)
|
|
6112
6014
|
|
|
6113
6015
|
@property
|
|
6114
6016
|
@pulumi.getter
|
|
@@ -6118,14 +6020,6 @@ class InfluxDbInfluxdbUserConfigPublicAccess(dict):
|
|
|
6118
6020
|
"""
|
|
6119
6021
|
return pulumi.get(self, "influxdb")
|
|
6120
6022
|
|
|
6121
|
-
@property
|
|
6122
|
-
@pulumi.getter(name="userBackup")
|
|
6123
|
-
def user_backup(self) -> Optional[bool]:
|
|
6124
|
-
"""
|
|
6125
|
-
Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
|
|
6126
|
-
"""
|
|
6127
|
-
return pulumi.get(self, "user_backup")
|
|
6128
|
-
|
|
6129
6023
|
|
|
6130
6024
|
@pulumi.output_type
|
|
6131
6025
|
class InfluxDbServiceIntegration(dict):
|
|
@@ -6737,10 +6631,10 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6737
6631
|
scheduled_rebalance_max_delay_ms: Optional[int] = None,
|
|
6738
6632
|
session_timeout_ms: Optional[int] = None):
|
|
6739
6633
|
"""
|
|
6740
|
-
:param str connector_client_config_override_policy: Enum: `
|
|
6634
|
+
:param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
6741
6635
|
:param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
6742
6636
|
:param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
|
|
6743
|
-
:param str consumer_isolation_level: Enum: `
|
|
6637
|
+
:param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
6744
6638
|
:param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
|
|
6745
6639
|
:param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
6746
6640
|
:param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
@@ -6748,7 +6642,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6748
6642
|
:param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
|
|
6749
6643
|
:param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
|
|
6750
6644
|
:param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
6751
|
-
:param str producer_compression_type: Enum: `gzip`, `
|
|
6645
|
+
:param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
6752
6646
|
:param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
|
|
6753
6647
|
:param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
|
|
6754
6648
|
:param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
@@ -6791,7 +6685,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6791
6685
|
@pulumi.getter(name="connectorClientConfigOverridePolicy")
|
|
6792
6686
|
def connector_client_config_override_policy(self) -> Optional[str]:
|
|
6793
6687
|
"""
|
|
6794
|
-
Enum: `
|
|
6688
|
+
Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
6795
6689
|
"""
|
|
6796
6690
|
return pulumi.get(self, "connector_client_config_override_policy")
|
|
6797
6691
|
|
|
@@ -6815,7 +6709,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6815
6709
|
@pulumi.getter(name="consumerIsolationLevel")
|
|
6816
6710
|
def consumer_isolation_level(self) -> Optional[str]:
|
|
6817
6711
|
"""
|
|
6818
|
-
Enum: `
|
|
6712
|
+
Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
6819
6713
|
"""
|
|
6820
6714
|
return pulumi.get(self, "consumer_isolation_level")
|
|
6821
6715
|
|
|
@@ -6879,7 +6773,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6879
6773
|
@pulumi.getter(name="producerCompressionType")
|
|
6880
6774
|
def producer_compression_type(self) -> Optional[str]:
|
|
6881
6775
|
"""
|
|
6882
|
-
Enum: `gzip`, `
|
|
6776
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
6883
6777
|
"""
|
|
6884
6778
|
return pulumi.get(self, "producer_compression_type")
|
|
6885
6779
|
|
|
@@ -8107,7 +8001,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8107
8001
|
transaction_state_log_segment_bytes: Optional[int] = None):
|
|
8108
8002
|
"""
|
|
8109
8003
|
:param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
|
|
8110
|
-
:param str compression_type: Enum: `gzip`, `
|
|
8004
|
+
:param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
|
|
8111
8005
|
:param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
|
|
8112
8006
|
:param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
|
|
8113
8007
|
:param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
|
|
@@ -8117,8 +8011,8 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8117
8011
|
:param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
|
|
8118
8012
|
:param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
|
|
8119
8013
|
:param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
|
|
8120
|
-
:param str log_cleanup_policy: Enum: `
|
|
8121
|
-
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
8014
|
+
:param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
|
|
8015
|
+
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
|
|
8122
8016
|
:param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
|
|
8123
8017
|
:param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
|
|
8124
8018
|
:param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
|
|
@@ -8258,7 +8152,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8258
8152
|
@pulumi.getter(name="compressionType")
|
|
8259
8153
|
def compression_type(self) -> Optional[str]:
|
|
8260
8154
|
"""
|
|
8261
|
-
Enum: `gzip`, `
|
|
8155
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
|
|
8262
8156
|
"""
|
|
8263
8157
|
return pulumi.get(self, "compression_type")
|
|
8264
8158
|
|
|
@@ -8338,7 +8232,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8338
8232
|
@pulumi.getter(name="logCleanupPolicy")
|
|
8339
8233
|
def log_cleanup_policy(self) -> Optional[str]:
|
|
8340
8234
|
"""
|
|
8341
|
-
Enum: `
|
|
8235
|
+
Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
|
|
8342
8236
|
"""
|
|
8343
8237
|
return pulumi.get(self, "log_cleanup_policy")
|
|
8344
8238
|
|
|
@@ -8346,7 +8240,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
8346
8240
|
@pulumi.getter(name="logFlushIntervalMessages")
|
|
8347
8241
|
def log_flush_interval_messages(self) -> Optional[int]:
|
|
8348
8242
|
"""
|
|
8349
|
-
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
8243
|
+
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
|
|
8350
8244
|
"""
|
|
8351
8245
|
return pulumi.get(self, "log_flush_interval_messages")
|
|
8352
8246
|
|
|
@@ -8713,10 +8607,10 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8713
8607
|
scheduled_rebalance_max_delay_ms: Optional[int] = None,
|
|
8714
8608
|
session_timeout_ms: Optional[int] = None):
|
|
8715
8609
|
"""
|
|
8716
|
-
:param str connector_client_config_override_policy: Enum: `
|
|
8610
|
+
:param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
8717
8611
|
:param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
8718
8612
|
:param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
|
|
8719
|
-
:param str consumer_isolation_level: Enum: `
|
|
8613
|
+
:param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
8720
8614
|
:param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
|
|
8721
8615
|
:param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
8722
8616
|
:param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
@@ -8724,7 +8618,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8724
8618
|
:param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
|
|
8725
8619
|
:param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
|
|
8726
8620
|
:param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
8727
|
-
:param str producer_compression_type: Enum: `gzip`, `
|
|
8621
|
+
:param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
8728
8622
|
:param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
|
|
8729
8623
|
:param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
|
|
8730
8624
|
:param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
@@ -8767,7 +8661,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8767
8661
|
@pulumi.getter(name="connectorClientConfigOverridePolicy")
|
|
8768
8662
|
def connector_client_config_override_policy(self) -> Optional[str]:
|
|
8769
8663
|
"""
|
|
8770
|
-
Enum: `
|
|
8664
|
+
Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
8771
8665
|
"""
|
|
8772
8666
|
return pulumi.get(self, "connector_client_config_override_policy")
|
|
8773
8667
|
|
|
@@ -8791,7 +8685,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8791
8685
|
@pulumi.getter(name="consumerIsolationLevel")
|
|
8792
8686
|
def consumer_isolation_level(self) -> Optional[str]:
|
|
8793
8687
|
"""
|
|
8794
|
-
Enum: `
|
|
8688
|
+
Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
8795
8689
|
"""
|
|
8796
8690
|
return pulumi.get(self, "consumer_isolation_level")
|
|
8797
8691
|
|
|
@@ -8855,7 +8749,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8855
8749
|
@pulumi.getter(name="producerCompressionType")
|
|
8856
8750
|
def producer_compression_type(self) -> Optional[str]:
|
|
8857
8751
|
"""
|
|
8858
|
-
Enum: `gzip`, `
|
|
8752
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
8859
8753
|
"""
|
|
8860
8754
|
return pulumi.get(self, "producer_compression_type")
|
|
8861
8755
|
|
|
@@ -9146,10 +9040,10 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
9146
9040
|
:param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`.
|
|
9147
9041
|
:param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. Default: `67108864`.
|
|
9148
9042
|
:param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`.
|
|
9149
|
-
:param str name_strategy: Enum: `
|
|
9043
|
+
:param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
|
|
9150
9044
|
:param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.
|
|
9151
|
-
:param str producer_acks: Enum: `-1`, `0`, `1
|
|
9152
|
-
:param str producer_compression_type: Enum: `gzip`, `
|
|
9045
|
+
:param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
|
|
9046
|
+
:param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
9153
9047
|
:param int producer_linger_ms: Wait for up to the given delay to allow batching records together. Default: `0`.
|
|
9154
9048
|
:param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: `1048576`.
|
|
9155
9049
|
:param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`.
|
|
@@ -9203,7 +9097,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
9203
9097
|
@pulumi.getter(name="nameStrategy")
|
|
9204
9098
|
def name_strategy(self) -> Optional[str]:
|
|
9205
9099
|
"""
|
|
9206
|
-
Enum: `
|
|
9100
|
+
Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
|
|
9207
9101
|
"""
|
|
9208
9102
|
return pulumi.get(self, "name_strategy")
|
|
9209
9103
|
|
|
@@ -9219,7 +9113,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
9219
9113
|
@pulumi.getter(name="producerAcks")
|
|
9220
9114
|
def producer_acks(self) -> Optional[str]:
|
|
9221
9115
|
"""
|
|
9222
|
-
Enum: `-1`, `0`, `1
|
|
9116
|
+
Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
|
|
9223
9117
|
"""
|
|
9224
9118
|
return pulumi.get(self, "producer_acks")
|
|
9225
9119
|
|
|
@@ -9227,7 +9121,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
9227
9121
|
@pulumi.getter(name="producerCompressionType")
|
|
9228
9122
|
def producer_compression_type(self) -> Optional[str]:
|
|
9229
9123
|
"""
|
|
9230
|
-
Enum: `gzip`, `
|
|
9124
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
9231
9125
|
"""
|
|
9232
9126
|
return pulumi.get(self, "producer_compression_type")
|
|
9233
9127
|
|
|
@@ -10538,8 +10432,8 @@ class KafkaTopicConfig(dict):
|
|
|
10538
10432
|
segment_ms: Optional[str] = None,
|
|
10539
10433
|
unclean_leader_election_enable: Optional[bool] = None):
|
|
10540
10434
|
"""
|
|
10541
|
-
:param str cleanup_policy: cleanup.policy value
|
|
10542
|
-
:param str compression_type: compression.type value
|
|
10435
|
+
:param str cleanup_policy: cleanup.policy value
|
|
10436
|
+
:param str compression_type: compression.type value
|
|
10543
10437
|
:param str delete_retention_ms: delete.retention.ms value
|
|
10544
10438
|
:param str file_delete_delay_ms: file.delete.delay.ms value
|
|
10545
10439
|
:param str flush_messages: flush.messages value
|
|
@@ -10550,9 +10444,9 @@ class KafkaTopicConfig(dict):
|
|
|
10550
10444
|
:param str max_compaction_lag_ms: max.compaction.lag.ms value
|
|
10551
10445
|
:param str max_message_bytes: max.message.bytes value
|
|
10552
10446
|
:param bool message_downconversion_enable: message.downconversion.enable value
|
|
10553
|
-
:param str message_format_version: message.format.version value
|
|
10447
|
+
:param str message_format_version: message.format.version value
|
|
10554
10448
|
:param str message_timestamp_difference_max_ms: message.timestamp.difference.max.ms value
|
|
10555
|
-
:param str message_timestamp_type: message.timestamp.type value
|
|
10449
|
+
:param str message_timestamp_type: message.timestamp.type value
|
|
10556
10450
|
:param float min_cleanable_dirty_ratio: min.cleanable.dirty.ratio value
|
|
10557
10451
|
:param str min_compaction_lag_ms: min.compaction.lag.ms value
|
|
10558
10452
|
:param str min_insync_replicas: min.insync.replicas value
|
|
@@ -10625,7 +10519,7 @@ class KafkaTopicConfig(dict):
|
|
|
10625
10519
|
@pulumi.getter(name="cleanupPolicy")
|
|
10626
10520
|
def cleanup_policy(self) -> Optional[str]:
|
|
10627
10521
|
"""
|
|
10628
|
-
cleanup.policy value
|
|
10522
|
+
cleanup.policy value
|
|
10629
10523
|
"""
|
|
10630
10524
|
return pulumi.get(self, "cleanup_policy")
|
|
10631
10525
|
|
|
@@ -10633,7 +10527,7 @@ class KafkaTopicConfig(dict):
|
|
|
10633
10527
|
@pulumi.getter(name="compressionType")
|
|
10634
10528
|
def compression_type(self) -> Optional[str]:
|
|
10635
10529
|
"""
|
|
10636
|
-
compression.type value
|
|
10530
|
+
compression.type value
|
|
10637
10531
|
"""
|
|
10638
10532
|
return pulumi.get(self, "compression_type")
|
|
10639
10533
|
|
|
@@ -10721,7 +10615,7 @@ class KafkaTopicConfig(dict):
|
|
|
10721
10615
|
@pulumi.getter(name="messageFormatVersion")
|
|
10722
10616
|
def message_format_version(self) -> Optional[str]:
|
|
10723
10617
|
"""
|
|
10724
|
-
message.format.version value
|
|
10618
|
+
message.format.version value
|
|
10725
10619
|
"""
|
|
10726
10620
|
return pulumi.get(self, "message_format_version")
|
|
10727
10621
|
|
|
@@ -10737,7 +10631,7 @@ class KafkaTopicConfig(dict):
|
|
|
10737
10631
|
@pulumi.getter(name="messageTimestampType")
|
|
10738
10632
|
def message_timestamp_type(self) -> Optional[str]:
|
|
10739
10633
|
"""
|
|
10740
|
-
message.timestamp.type value
|
|
10634
|
+
message.timestamp.type value
|
|
10741
10635
|
"""
|
|
10742
10636
|
return pulumi.get(self, "message_timestamp_type")
|
|
10743
10637
|
|
|
@@ -13451,9 +13345,9 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
13451
13345
|
:param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit). Example: `10`.
|
|
13452
13346
|
:param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`.
|
|
13453
13347
|
:param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`.
|
|
13454
|
-
:param str internal_tmp_mem_storage_engine: Enum: `
|
|
13455
|
-
:param str log_output: Enum: `INSIGHTS`, `
|
|
13456
|
-
:param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10
|
|
13348
|
+
:param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
|
|
13349
|
+
:param str log_output: Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
|
|
13350
|
+
:param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10`.
|
|
13457
13351
|
:param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M). Example: `67108864`.
|
|
13458
13352
|
:param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M). Example: `16777216`.
|
|
13459
13353
|
:param int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service. Example: `16384`.
|
|
@@ -13669,7 +13563,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
13669
13563
|
@pulumi.getter(name="internalTmpMemStorageEngine")
|
|
13670
13564
|
def internal_tmp_mem_storage_engine(self) -> Optional[str]:
|
|
13671
13565
|
"""
|
|
13672
|
-
Enum: `
|
|
13566
|
+
Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
|
|
13673
13567
|
"""
|
|
13674
13568
|
return pulumi.get(self, "internal_tmp_mem_storage_engine")
|
|
13675
13569
|
|
|
@@ -13677,7 +13571,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
13677
13571
|
@pulumi.getter(name="logOutput")
|
|
13678
13572
|
def log_output(self) -> Optional[str]:
|
|
13679
13573
|
"""
|
|
13680
|
-
Enum: `INSIGHTS`, `
|
|
13574
|
+
Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow*query*log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow*log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow*log table, choose TABLE. To silence slow logs, choose NONE.
|
|
13681
13575
|
"""
|
|
13682
13576
|
return pulumi.get(self, "log_output")
|
|
13683
13577
|
|
|
@@ -13685,7 +13579,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
13685
13579
|
@pulumi.getter(name="longQueryTime")
|
|
13686
13580
|
def long_query_time(self) -> Optional[float]:
|
|
13687
13581
|
"""
|
|
13688
|
-
The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10
|
|
13582
|
+
The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Example: `10`.
|
|
13689
13583
|
"""
|
|
13690
13584
|
return pulumi.get(self, "long_query_time")
|
|
13691
13585
|
|
|
@@ -14308,7 +14202,7 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
14308
14202
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
14309
14203
|
:param 'OpenSearchOpensearchUserConfigAzureMigrationArgs' azure_migration: Azure migration settings
|
|
14310
14204
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
14311
|
-
:param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can
|
|
14205
|
+
:param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
|
|
14312
14206
|
:param 'OpenSearchOpensearchUserConfigGcsMigrationArgs' gcs_migration: Google Cloud Storage migration settings
|
|
14313
14207
|
:param Sequence['OpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
|
|
14314
14208
|
:param 'OpenSearchOpensearchUserConfigIndexRollupArgs' index_rollup: Index rollup settings
|
|
@@ -14416,7 +14310,7 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
14416
14310
|
@pulumi.getter(name="disableReplicationFactorAdjustment")
|
|
14417
14311
|
def disable_replication_factor_adjustment(self) -> Optional[bool]:
|
|
14418
14312
|
"""
|
|
14419
|
-
Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can
|
|
14313
|
+
Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
|
|
14420
14314
|
"""
|
|
14421
14315
|
return pulumi.get(self, "disable_replication_factor_adjustment")
|
|
14422
14316
|
|
|
@@ -14619,10 +14513,6 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
|
|
|
14619
14513
|
suggest = "chunk_size"
|
|
14620
14514
|
elif key == "endpointSuffix":
|
|
14621
14515
|
suggest = "endpoint_suffix"
|
|
14622
|
-
elif key == "includeAliases":
|
|
14623
|
-
suggest = "include_aliases"
|
|
14624
|
-
elif key == "restoreGlobalState":
|
|
14625
|
-
suggest = "restore_global_state"
|
|
14626
14516
|
elif key == "sasToken":
|
|
14627
14517
|
suggest = "sas_token"
|
|
14628
14518
|
|
|
@@ -14641,33 +14531,28 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
|
|
|
14641
14531
|
account: str,
|
|
14642
14532
|
base_path: str,
|
|
14643
14533
|
container: str,
|
|
14644
|
-
indices: str,
|
|
14645
14534
|
snapshot_name: str,
|
|
14646
14535
|
chunk_size: Optional[str] = None,
|
|
14647
14536
|
compress: Optional[bool] = None,
|
|
14648
14537
|
endpoint_suffix: Optional[str] = None,
|
|
14649
|
-
|
|
14538
|
+
indices: Optional[str] = None,
|
|
14650
14539
|
key: Optional[str] = None,
|
|
14651
|
-
restore_global_state: Optional[bool] = None,
|
|
14652
14540
|
sas_token: Optional[str] = None):
|
|
14653
14541
|
"""
|
|
14654
14542
|
:param str account: Azure account name.
|
|
14655
14543
|
:param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
|
|
14656
14544
|
:param str container: Azure container name.
|
|
14657
|
-
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
14658
14545
|
:param str snapshot_name: The snapshot name to restore from.
|
|
14659
14546
|
:param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
|
|
14660
14547
|
:param bool compress: When set to true metadata files are stored in compressed format.
|
|
14661
14548
|
:param str endpoint_suffix: Defines the DNS suffix for Azure Storage endpoints.
|
|
14662
|
-
:param
|
|
14549
|
+
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
14663
14550
|
:param str key: Azure account secret key. One of key or sas_token should be specified.
|
|
14664
|
-
:param bool restore_global_state: If true, restore the cluster state. Defaults to false.
|
|
14665
14551
|
:param str sas_token: A shared access signatures (SAS) token. One of key or sas_token should be specified.
|
|
14666
14552
|
"""
|
|
14667
14553
|
pulumi.set(__self__, "account", account)
|
|
14668
14554
|
pulumi.set(__self__, "base_path", base_path)
|
|
14669
14555
|
pulumi.set(__self__, "container", container)
|
|
14670
|
-
pulumi.set(__self__, "indices", indices)
|
|
14671
14556
|
pulumi.set(__self__, "snapshot_name", snapshot_name)
|
|
14672
14557
|
if chunk_size is not None:
|
|
14673
14558
|
pulumi.set(__self__, "chunk_size", chunk_size)
|
|
@@ -14675,12 +14560,10 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
|
|
|
14675
14560
|
pulumi.set(__self__, "compress", compress)
|
|
14676
14561
|
if endpoint_suffix is not None:
|
|
14677
14562
|
pulumi.set(__self__, "endpoint_suffix", endpoint_suffix)
|
|
14678
|
-
if
|
|
14679
|
-
pulumi.set(__self__, "
|
|
14563
|
+
if indices is not None:
|
|
14564
|
+
pulumi.set(__self__, "indices", indices)
|
|
14680
14565
|
if key is not None:
|
|
14681
14566
|
pulumi.set(__self__, "key", key)
|
|
14682
|
-
if restore_global_state is not None:
|
|
14683
|
-
pulumi.set(__self__, "restore_global_state", restore_global_state)
|
|
14684
14567
|
if sas_token is not None:
|
|
14685
14568
|
pulumi.set(__self__, "sas_token", sas_token)
|
|
14686
14569
|
|
|
@@ -14708,14 +14591,6 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
|
|
|
14708
14591
|
"""
|
|
14709
14592
|
return pulumi.get(self, "container")
|
|
14710
14593
|
|
|
14711
|
-
@property
|
|
14712
|
-
@pulumi.getter
|
|
14713
|
-
def indices(self) -> str:
|
|
14714
|
-
"""
|
|
14715
|
-
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
14716
|
-
"""
|
|
14717
|
-
return pulumi.get(self, "indices")
|
|
14718
|
-
|
|
14719
14594
|
@property
|
|
14720
14595
|
@pulumi.getter(name="snapshotName")
|
|
14721
14596
|
def snapshot_name(self) -> str:
|
|
@@ -14749,12 +14624,12 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
|
|
|
14749
14624
|
return pulumi.get(self, "endpoint_suffix")
|
|
14750
14625
|
|
|
14751
14626
|
@property
|
|
14752
|
-
@pulumi.getter
|
|
14753
|
-
def
|
|
14627
|
+
@pulumi.getter
|
|
14628
|
+
def indices(self) -> Optional[str]:
|
|
14754
14629
|
"""
|
|
14755
|
-
|
|
14630
|
+
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
14756
14631
|
"""
|
|
14757
|
-
return pulumi.get(self, "
|
|
14632
|
+
return pulumi.get(self, "indices")
|
|
14758
14633
|
|
|
14759
14634
|
@property
|
|
14760
14635
|
@pulumi.getter
|
|
@@ -14764,14 +14639,6 @@ class OpenSearchOpensearchUserConfigAzureMigration(dict):
|
|
|
14764
14639
|
"""
|
|
14765
14640
|
return pulumi.get(self, "key")
|
|
14766
14641
|
|
|
14767
|
-
@property
|
|
14768
|
-
@pulumi.getter(name="restoreGlobalState")
|
|
14769
|
-
def restore_global_state(self) -> Optional[bool]:
|
|
14770
|
-
"""
|
|
14771
|
-
If true, restore the cluster state. Defaults to false.
|
|
14772
|
-
"""
|
|
14773
|
-
return pulumi.get(self, "restore_global_state")
|
|
14774
|
-
|
|
14775
14642
|
@property
|
|
14776
14643
|
@pulumi.getter(name="sasToken")
|
|
14777
14644
|
def sas_token(self) -> Optional[str]:
|
|
@@ -14792,10 +14659,6 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
|
|
|
14792
14659
|
suggest = "snapshot_name"
|
|
14793
14660
|
elif key == "chunkSize":
|
|
14794
14661
|
suggest = "chunk_size"
|
|
14795
|
-
elif key == "includeAliases":
|
|
14796
|
-
suggest = "include_aliases"
|
|
14797
|
-
elif key == "restoreGlobalState":
|
|
14798
|
-
suggest = "restore_global_state"
|
|
14799
14662
|
|
|
14800
14663
|
if suggest:
|
|
14801
14664
|
pulumi.log.warn(f"Key '{key}' not found in OpenSearchOpensearchUserConfigGcsMigration. Access the value via the '{suggest}' property getter instead.")
|
|
@@ -14812,36 +14675,29 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
|
|
|
14812
14675
|
base_path: str,
|
|
14813
14676
|
bucket: str,
|
|
14814
14677
|
credentials: str,
|
|
14815
|
-
indices: str,
|
|
14816
14678
|
snapshot_name: str,
|
|
14817
14679
|
chunk_size: Optional[str] = None,
|
|
14818
14680
|
compress: Optional[bool] = None,
|
|
14819
|
-
|
|
14820
|
-
restore_global_state: Optional[bool] = None):
|
|
14681
|
+
indices: Optional[str] = None):
|
|
14821
14682
|
"""
|
|
14822
14683
|
:param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
|
|
14823
14684
|
:param str bucket: The path to the repository data within its container.
|
|
14824
14685
|
:param str credentials: Google Cloud Storage credentials file content.
|
|
14825
|
-
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
14826
14686
|
:param str snapshot_name: The snapshot name to restore from.
|
|
14827
14687
|
:param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
|
|
14828
14688
|
:param bool compress: When set to true metadata files are stored in compressed format.
|
|
14829
|
-
:param
|
|
14830
|
-
:param bool restore_global_state: If true, restore the cluster state. Defaults to false.
|
|
14689
|
+
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
14831
14690
|
"""
|
|
14832
14691
|
pulumi.set(__self__, "base_path", base_path)
|
|
14833
14692
|
pulumi.set(__self__, "bucket", bucket)
|
|
14834
14693
|
pulumi.set(__self__, "credentials", credentials)
|
|
14835
|
-
pulumi.set(__self__, "indices", indices)
|
|
14836
14694
|
pulumi.set(__self__, "snapshot_name", snapshot_name)
|
|
14837
14695
|
if chunk_size is not None:
|
|
14838
14696
|
pulumi.set(__self__, "chunk_size", chunk_size)
|
|
14839
14697
|
if compress is not None:
|
|
14840
14698
|
pulumi.set(__self__, "compress", compress)
|
|
14841
|
-
if
|
|
14842
|
-
pulumi.set(__self__, "
|
|
14843
|
-
if restore_global_state is not None:
|
|
14844
|
-
pulumi.set(__self__, "restore_global_state", restore_global_state)
|
|
14699
|
+
if indices is not None:
|
|
14700
|
+
pulumi.set(__self__, "indices", indices)
|
|
14845
14701
|
|
|
14846
14702
|
@property
|
|
14847
14703
|
@pulumi.getter(name="basePath")
|
|
@@ -14867,14 +14723,6 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
|
|
|
14867
14723
|
"""
|
|
14868
14724
|
return pulumi.get(self, "credentials")
|
|
14869
14725
|
|
|
14870
|
-
@property
|
|
14871
|
-
@pulumi.getter
|
|
14872
|
-
def indices(self) -> str:
|
|
14873
|
-
"""
|
|
14874
|
-
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
14875
|
-
"""
|
|
14876
|
-
return pulumi.get(self, "indices")
|
|
14877
|
-
|
|
14878
14726
|
@property
|
|
14879
14727
|
@pulumi.getter(name="snapshotName")
|
|
14880
14728
|
def snapshot_name(self) -> str:
|
|
@@ -14900,20 +14748,12 @@ class OpenSearchOpensearchUserConfigGcsMigration(dict):
|
|
|
14900
14748
|
return pulumi.get(self, "compress")
|
|
14901
14749
|
|
|
14902
14750
|
@property
|
|
14903
|
-
@pulumi.getter
|
|
14904
|
-
def
|
|
14905
|
-
"""
|
|
14906
|
-
Whether to restore aliases alongside their associated indexes. Default is true.
|
|
14907
|
-
"""
|
|
14908
|
-
return pulumi.get(self, "include_aliases")
|
|
14909
|
-
|
|
14910
|
-
@property
|
|
14911
|
-
@pulumi.getter(name="restoreGlobalState")
|
|
14912
|
-
def restore_global_state(self) -> Optional[bool]:
|
|
14751
|
+
@pulumi.getter
|
|
14752
|
+
def indices(self) -> Optional[str]:
|
|
14913
14753
|
"""
|
|
14914
|
-
If
|
|
14754
|
+
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
14915
14755
|
"""
|
|
14916
|
-
return pulumi.get(self, "
|
|
14756
|
+
return pulumi.get(self, "indices")
|
|
14917
14757
|
|
|
14918
14758
|
|
|
14919
14759
|
@pulumi.output_type
|
|
@@ -16367,7 +16207,7 @@ class OpenSearchOpensearchUserConfigOpensearchSearchBackpressure(dict):
|
|
|
16367
16207
|
search_shard_task: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTask'] = None,
|
|
16368
16208
|
search_task: Optional['outputs.OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTask'] = None):
|
|
16369
16209
|
"""
|
|
16370
|
-
:param str mode: Enum: `
|
|
16210
|
+
:param str mode: Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
|
|
16371
16211
|
:param 'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuressArgs' node_duress: Node duress settings
|
|
16372
16212
|
:param 'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskArgs' search_shard_task: Search shard settings
|
|
16373
16213
|
:param 'OpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskArgs' search_task: Search task settings
|
|
@@ -16385,7 +16225,7 @@ class OpenSearchOpensearchUserConfigOpensearchSearchBackpressure(dict):
|
|
|
16385
16225
|
@pulumi.getter
|
|
16386
16226
|
def mode(self) -> Optional[str]:
|
|
16387
16227
|
"""
|
|
16388
|
-
Enum: `
|
|
16228
|
+
Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor*only, enforced, or disabled. Default is monitor*only.
|
|
16389
16229
|
"""
|
|
16390
16230
|
return pulumi.get(self, "mode")
|
|
16391
16231
|
|
|
@@ -17215,10 +17055,6 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
|
|
|
17215
17055
|
suggest = "snapshot_name"
|
|
17216
17056
|
elif key == "chunkSize":
|
|
17217
17057
|
suggest = "chunk_size"
|
|
17218
|
-
elif key == "includeAliases":
|
|
17219
|
-
suggest = "include_aliases"
|
|
17220
|
-
elif key == "restoreGlobalState":
|
|
17221
|
-
suggest = "restore_global_state"
|
|
17222
17058
|
elif key == "serverSideEncryption":
|
|
17223
17059
|
suggest = "server_side_encryption"
|
|
17224
17060
|
|
|
@@ -17237,35 +17073,30 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
|
|
|
17237
17073
|
access_key: str,
|
|
17238
17074
|
base_path: str,
|
|
17239
17075
|
bucket: str,
|
|
17240
|
-
indices: str,
|
|
17241
17076
|
region: str,
|
|
17242
17077
|
secret_key: str,
|
|
17243
17078
|
snapshot_name: str,
|
|
17244
17079
|
chunk_size: Optional[str] = None,
|
|
17245
17080
|
compress: Optional[bool] = None,
|
|
17246
17081
|
endpoint: Optional[str] = None,
|
|
17247
|
-
|
|
17248
|
-
restore_global_state: Optional[bool] = None,
|
|
17082
|
+
indices: Optional[str] = None,
|
|
17249
17083
|
server_side_encryption: Optional[bool] = None):
|
|
17250
17084
|
"""
|
|
17251
17085
|
:param str access_key: AWS Access key.
|
|
17252
17086
|
:param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
|
|
17253
17087
|
:param str bucket: S3 bucket name.
|
|
17254
|
-
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
17255
17088
|
:param str region: S3 region.
|
|
17256
17089
|
:param str secret_key: AWS secret key.
|
|
17257
17090
|
:param str snapshot_name: The snapshot name to restore from.
|
|
17258
17091
|
:param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
|
|
17259
17092
|
:param bool compress: When set to true metadata files are stored in compressed format.
|
|
17260
17093
|
:param str endpoint: The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
|
|
17261
|
-
:param
|
|
17262
|
-
:param bool restore_global_state: If true, restore the cluster state. Defaults to false.
|
|
17094
|
+
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
17263
17095
|
:param bool server_side_encryption: When set to true files are encrypted on server side.
|
|
17264
17096
|
"""
|
|
17265
17097
|
pulumi.set(__self__, "access_key", access_key)
|
|
17266
17098
|
pulumi.set(__self__, "base_path", base_path)
|
|
17267
17099
|
pulumi.set(__self__, "bucket", bucket)
|
|
17268
|
-
pulumi.set(__self__, "indices", indices)
|
|
17269
17100
|
pulumi.set(__self__, "region", region)
|
|
17270
17101
|
pulumi.set(__self__, "secret_key", secret_key)
|
|
17271
17102
|
pulumi.set(__self__, "snapshot_name", snapshot_name)
|
|
@@ -17275,10 +17106,8 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
|
|
|
17275
17106
|
pulumi.set(__self__, "compress", compress)
|
|
17276
17107
|
if endpoint is not None:
|
|
17277
17108
|
pulumi.set(__self__, "endpoint", endpoint)
|
|
17278
|
-
if
|
|
17279
|
-
pulumi.set(__self__, "
|
|
17280
|
-
if restore_global_state is not None:
|
|
17281
|
-
pulumi.set(__self__, "restore_global_state", restore_global_state)
|
|
17109
|
+
if indices is not None:
|
|
17110
|
+
pulumi.set(__self__, "indices", indices)
|
|
17282
17111
|
if server_side_encryption is not None:
|
|
17283
17112
|
pulumi.set(__self__, "server_side_encryption", server_side_encryption)
|
|
17284
17113
|
|
|
@@ -17306,14 +17135,6 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
|
|
|
17306
17135
|
"""
|
|
17307
17136
|
return pulumi.get(self, "bucket")
|
|
17308
17137
|
|
|
17309
|
-
@property
|
|
17310
|
-
@pulumi.getter
|
|
17311
|
-
def indices(self) -> str:
|
|
17312
|
-
"""
|
|
17313
|
-
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
17314
|
-
"""
|
|
17315
|
-
return pulumi.get(self, "indices")
|
|
17316
|
-
|
|
17317
17138
|
@property
|
|
17318
17139
|
@pulumi.getter
|
|
17319
17140
|
def region(self) -> str:
|
|
@@ -17363,20 +17184,12 @@ class OpenSearchOpensearchUserConfigS3Migration(dict):
|
|
|
17363
17184
|
return pulumi.get(self, "endpoint")
|
|
17364
17185
|
|
|
17365
17186
|
@property
|
|
17366
|
-
@pulumi.getter
|
|
17367
|
-
def
|
|
17368
|
-
"""
|
|
17369
|
-
Whether to restore aliases alongside their associated indexes. Default is true.
|
|
17370
|
-
"""
|
|
17371
|
-
return pulumi.get(self, "include_aliases")
|
|
17372
|
-
|
|
17373
|
-
@property
|
|
17374
|
-
@pulumi.getter(name="restoreGlobalState")
|
|
17375
|
-
def restore_global_state(self) -> Optional[bool]:
|
|
17187
|
+
@pulumi.getter
|
|
17188
|
+
def indices(self) -> Optional[str]:
|
|
17376
17189
|
"""
|
|
17377
|
-
If
|
|
17190
|
+
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
17378
17191
|
"""
|
|
17379
|
-
return pulumi.get(self, "
|
|
17192
|
+
return pulumi.get(self, "indices")
|
|
17380
17193
|
|
|
17381
17194
|
@property
|
|
17382
17195
|
@pulumi.getter(name="serverSideEncryption")
|
|
@@ -17689,8 +17502,8 @@ class OrganizationPermissionPermission(dict):
|
|
|
17689
17502
|
create_time: Optional[str] = None,
|
|
17690
17503
|
update_time: Optional[str] = None):
|
|
17691
17504
|
"""
|
|
17692
|
-
:param Sequence[str] permissions: List of
|
|
17693
|
-
:param str principal_id: ID of the user or group
|
|
17505
|
+
:param Sequence[str] permissions: List of permissions. The possible values are `admin`, `developer`, `operator`, `project:permissions:read`, `read_only` and `service:logs:read`.
|
|
17506
|
+
:param str principal_id: ID of the user or group.
|
|
17694
17507
|
:param str principal_type: The type of principal. The possible values are `user` and `user_group`.
|
|
17695
17508
|
:param str create_time: Time created.
|
|
17696
17509
|
:param str update_time: Time updated.
|
|
@@ -17707,7 +17520,7 @@ class OrganizationPermissionPermission(dict):
|
|
|
17707
17520
|
@pulumi.getter
|
|
17708
17521
|
def permissions(self) -> Sequence[str]:
|
|
17709
17522
|
"""
|
|
17710
|
-
List of
|
|
17523
|
+
List of permissions. The possible values are `admin`, `developer`, `operator`, `project:permissions:read`, `read_only` and `service:logs:read`.
|
|
17711
17524
|
"""
|
|
17712
17525
|
return pulumi.get(self, "permissions")
|
|
17713
17526
|
|
|
@@ -17715,7 +17528,7 @@ class OrganizationPermissionPermission(dict):
|
|
|
17715
17528
|
@pulumi.getter(name="principalId")
|
|
17716
17529
|
def principal_id(self) -> str:
|
|
17717
17530
|
"""
|
|
17718
|
-
ID of the user or group
|
|
17531
|
+
ID of the user or group.
|
|
17719
17532
|
"""
|
|
17720
17533
|
return pulumi.get(self, "principal_id")
|
|
17721
17534
|
|
|
@@ -18400,7 +18213,7 @@ class PgPgUserConfig(dict):
|
|
|
18400
18213
|
:param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
|
|
18401
18214
|
:param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. Example: `41.5`.
|
|
18402
18215
|
:param bool static_ips: Use static public IP addresses.
|
|
18403
|
-
:param str synchronous_replication: Enum: `
|
|
18216
|
+
:param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
|
|
18404
18217
|
:param 'PgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
|
|
18405
18218
|
:param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
|
|
18406
18219
|
:param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). Example: `4`.
|
|
@@ -18702,7 +18515,7 @@ class PgPgUserConfig(dict):
|
|
|
18702
18515
|
@pulumi.getter(name="synchronousReplication")
|
|
18703
18516
|
def synchronous_replication(self) -> Optional[str]:
|
|
18704
18517
|
"""
|
|
18705
|
-
Enum: `
|
|
18518
|
+
Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
|
|
18706
18519
|
"""
|
|
18707
18520
|
return pulumi.get(self, "synchronous_replication")
|
|
18708
18521
|
|
|
@@ -19073,8 +18886,8 @@ class PgPgUserConfigPg(dict):
|
|
|
19073
18886
|
:param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
|
|
19074
18887
|
:param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
|
|
19075
18888
|
:param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
|
|
19076
|
-
:param str log_error_verbosity: Enum: `
|
|
19077
|
-
:param str log_line_prefix: Enum: `'
|
|
18889
|
+
:param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
|
|
18890
|
+
:param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
|
|
19078
18891
|
:param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
|
|
19079
18892
|
:param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
|
|
19080
18893
|
:param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
|
|
@@ -19095,12 +18908,12 @@ class PgPgUserConfigPg(dict):
|
|
|
19095
18908
|
:param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`.
|
|
19096
18909
|
:param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
|
|
19097
18910
|
:param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets. Example: `10`.
|
|
19098
|
-
:param str pg_stat_statements_dot_track: Enum: `all`, `
|
|
18911
|
+
:param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
|
|
19099
18912
|
:param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`.
|
|
19100
18913
|
:param str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
|
|
19101
18914
|
:param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Example: `1024`.
|
|
19102
18915
|
:param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
|
|
19103
|
-
:param str track_functions: Enum: `all`, `
|
|
18916
|
+
:param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
|
|
19104
18917
|
:param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
|
|
19105
18918
|
:param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout. Example: `60000`.
|
|
19106
18919
|
:param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance. Example: `50`.
|
|
@@ -19352,7 +19165,7 @@ class PgPgUserConfigPg(dict):
|
|
|
19352
19165
|
@pulumi.getter(name="logErrorVerbosity")
|
|
19353
19166
|
def log_error_verbosity(self) -> Optional[str]:
|
|
19354
19167
|
"""
|
|
19355
|
-
Enum: `
|
|
19168
|
+
Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
|
|
19356
19169
|
"""
|
|
19357
19170
|
return pulumi.get(self, "log_error_verbosity")
|
|
19358
19171
|
|
|
@@ -19360,7 +19173,7 @@ class PgPgUserConfigPg(dict):
|
|
|
19360
19173
|
@pulumi.getter(name="logLinePrefix")
|
|
19361
19174
|
def log_line_prefix(self) -> Optional[str]:
|
|
19362
19175
|
"""
|
|
19363
|
-
Enum: `'
|
|
19176
|
+
Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
|
|
19364
19177
|
"""
|
|
19365
19178
|
return pulumi.get(self, "log_line_prefix")
|
|
19366
19179
|
|
|
@@ -19528,7 +19341,7 @@ class PgPgUserConfigPg(dict):
|
|
|
19528
19341
|
@pulumi.getter(name="pgStatStatementsDotTrack")
|
|
19529
19342
|
def pg_stat_statements_dot_track(self) -> Optional[str]:
|
|
19530
19343
|
"""
|
|
19531
|
-
Enum: `all`, `
|
|
19344
|
+
Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
|
|
19532
19345
|
"""
|
|
19533
19346
|
return pulumi.get(self, "pg_stat_statements_dot_track")
|
|
19534
19347
|
|
|
@@ -19568,7 +19381,7 @@ class PgPgUserConfigPg(dict):
|
|
|
19568
19381
|
@pulumi.getter(name="trackFunctions")
|
|
19569
19382
|
def track_functions(self) -> Optional[str]:
|
|
19570
19383
|
"""
|
|
19571
|
-
Enum: `all`, `
|
|
19384
|
+
Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
|
|
19572
19385
|
"""
|
|
19573
19386
|
return pulumi.get(self, "track_functions")
|
|
19574
19387
|
|
|
@@ -19971,7 +19784,7 @@ class PgPgUserConfigPgbouncer(dict):
|
|
|
19971
19784
|
"""
|
|
19972
19785
|
:param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). Default: `3600`.
|
|
19973
19786
|
:param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. Example: `0`.
|
|
19974
|
-
:param str autodb_pool_mode: Enum: `session`, `
|
|
19787
|
+
:param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
|
|
19975
19788
|
:param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
|
|
19976
19789
|
:param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
|
|
19977
19790
|
:param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max*prepared*statements is set to a non-zero value. Setting it to 0 disables prepared statements. max*prepared*statements defaults to 100, and its maximum is 3000. Default: `100`.
|
|
@@ -20021,7 +19834,7 @@ class PgPgUserConfigPgbouncer(dict):
|
|
|
20021
19834
|
@pulumi.getter(name="autodbPoolMode")
|
|
20022
19835
|
def autodb_pool_mode(self) -> Optional[str]:
|
|
20023
19836
|
"""
|
|
20024
|
-
Enum: `session`, `
|
|
19837
|
+
Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
|
|
20025
19838
|
"""
|
|
20026
19839
|
return pulumi.get(self, "autodb_pool_mode")
|
|
20027
19840
|
|
|
@@ -20717,7 +20530,7 @@ class RedisRedisUserConfig(dict):
|
|
|
20717
20530
|
:param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service. Example: `1`.
|
|
20718
20531
|
:param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
|
|
20719
20532
|
:param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
|
|
20720
|
-
:param str redis_maxmemory_policy: Enum: `allkeys-
|
|
20533
|
+
:param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
|
|
20721
20534
|
:param str redis_notify_keyspace_events: Set notify-keyspace-events option.
|
|
20722
20535
|
:param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service. Example: `16`.
|
|
20723
20536
|
:param str redis_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
|
|
@@ -20917,7 +20730,7 @@ class RedisRedisUserConfig(dict):
|
|
|
20917
20730
|
@pulumi.getter(name="redisMaxmemoryPolicy")
|
|
20918
20731
|
def redis_maxmemory_policy(self) -> Optional[str]:
|
|
20919
20732
|
"""
|
|
20920
|
-
Enum: `allkeys-
|
|
20733
|
+
Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
|
|
20921
20734
|
"""
|
|
20922
20735
|
return pulumi.get(self, "redis_maxmemory_policy")
|
|
20923
20736
|
|
|
@@ -21430,11 +21243,11 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
21430
21243
|
thread_per_consumer: Optional[bool] = None):
|
|
21431
21244
|
"""
|
|
21432
21245
|
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
|
|
21433
|
-
:param str data_format: Enum: `Avro`, `
|
|
21246
|
+
:param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
|
|
21434
21247
|
:param str group_name: Kafka consumers group. Default: `clickhouse`.
|
|
21435
21248
|
:param str name: Name of the table. Example: `events`.
|
|
21436
21249
|
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
|
|
21437
|
-
:param str auto_offset_reset: Enum: `
|
|
21250
|
+
:param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
|
|
21438
21251
|
:param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`.
|
|
21439
21252
|
:param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. Default: `default`.
|
|
21440
21253
|
:param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. Default: `0`.
|
|
@@ -21483,7 +21296,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
21483
21296
|
@pulumi.getter(name="dataFormat")
|
|
21484
21297
|
def data_format(self) -> str:
|
|
21485
21298
|
"""
|
|
21486
|
-
Enum: `Avro`, `
|
|
21299
|
+
Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
|
|
21487
21300
|
"""
|
|
21488
21301
|
return pulumi.get(self, "data_format")
|
|
21489
21302
|
|
|
@@ -21515,7 +21328,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
21515
21328
|
@pulumi.getter(name="autoOffsetReset")
|
|
21516
21329
|
def auto_offset_reset(self) -> Optional[str]:
|
|
21517
21330
|
"""
|
|
21518
|
-
Enum: `
|
|
21331
|
+
Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
|
|
21519
21332
|
"""
|
|
21520
21333
|
return pulumi.get(self, "auto_offset_reset")
|
|
21521
21334
|
|
|
@@ -22019,70 +21832,6 @@ class ServiceIntegrationDatadogUserConfigRedis(dict):
|
|
|
22019
21832
|
return pulumi.get(self, "command_stats_enabled")
|
|
22020
21833
|
|
|
22021
21834
|
|
|
22022
|
-
@pulumi.output_type
|
|
22023
|
-
class ServiceIntegrationEndpointAutoscalerUserConfig(dict):
|
|
22024
|
-
def __init__(__self__, *,
|
|
22025
|
-
autoscalings: Sequence['outputs.ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling']):
|
|
22026
|
-
"""
|
|
22027
|
-
:param Sequence['ServiceIntegrationEndpointAutoscalerUserConfigAutoscalingArgs'] autoscalings: Configure autoscaling thresholds for a service
|
|
22028
|
-
"""
|
|
22029
|
-
pulumi.set(__self__, "autoscalings", autoscalings)
|
|
22030
|
-
|
|
22031
|
-
@property
|
|
22032
|
-
@pulumi.getter
|
|
22033
|
-
def autoscalings(self) -> Sequence['outputs.ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling']:
|
|
22034
|
-
"""
|
|
22035
|
-
Configure autoscaling thresholds for a service
|
|
22036
|
-
"""
|
|
22037
|
-
return pulumi.get(self, "autoscalings")
|
|
22038
|
-
|
|
22039
|
-
|
|
22040
|
-
@pulumi.output_type
|
|
22041
|
-
class ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling(dict):
|
|
22042
|
-
@staticmethod
|
|
22043
|
-
def __key_warning(key: str):
|
|
22044
|
-
suggest = None
|
|
22045
|
-
if key == "capGb":
|
|
22046
|
-
suggest = "cap_gb"
|
|
22047
|
-
|
|
22048
|
-
if suggest:
|
|
22049
|
-
pulumi.log.warn(f"Key '{key}' not found in ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling. Access the value via the '{suggest}' property getter instead.")
|
|
22050
|
-
|
|
22051
|
-
def __getitem__(self, key: str) -> Any:
|
|
22052
|
-
ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling.__key_warning(key)
|
|
22053
|
-
return super().__getitem__(key)
|
|
22054
|
-
|
|
22055
|
-
def get(self, key: str, default = None) -> Any:
|
|
22056
|
-
ServiceIntegrationEndpointAutoscalerUserConfigAutoscaling.__key_warning(key)
|
|
22057
|
-
return super().get(key, default)
|
|
22058
|
-
|
|
22059
|
-
def __init__(__self__, *,
|
|
22060
|
-
cap_gb: int,
|
|
22061
|
-
type: str):
|
|
22062
|
-
"""
|
|
22063
|
-
:param int cap_gb: The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
|
|
22064
|
-
:param str type: Enum: `autoscale_disk`. Type of autoscale event.
|
|
22065
|
-
"""
|
|
22066
|
-
pulumi.set(__self__, "cap_gb", cap_gb)
|
|
22067
|
-
pulumi.set(__self__, "type", type)
|
|
22068
|
-
|
|
22069
|
-
@property
|
|
22070
|
-
@pulumi.getter(name="capGb")
|
|
22071
|
-
def cap_gb(self) -> int:
|
|
22072
|
-
"""
|
|
22073
|
-
The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
|
|
22074
|
-
"""
|
|
22075
|
-
return pulumi.get(self, "cap_gb")
|
|
22076
|
-
|
|
22077
|
-
@property
|
|
22078
|
-
@pulumi.getter
|
|
22079
|
-
def type(self) -> str:
|
|
22080
|
-
"""
|
|
22081
|
-
Enum: `autoscale_disk`. Type of autoscale event.
|
|
22082
|
-
"""
|
|
22083
|
-
return pulumi.get(self, "type")
|
|
22084
|
-
|
|
22085
|
-
|
|
22086
21835
|
@pulumi.output_type
|
|
22087
21836
|
class ServiceIntegrationEndpointDatadogUserConfig(dict):
|
|
22088
21837
|
@staticmethod
|
|
@@ -22127,7 +21876,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
|
|
|
22127
21876
|
:param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with. Example: `8`.
|
|
22128
21877
|
:param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers. Example: `60`.
|
|
22129
21878
|
:param int max_partition_contexts: Maximum number of partition contexts to send. Example: `32000`.
|
|
22130
|
-
:param str site: Enum: `
|
|
21879
|
+
:param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
|
|
22131
21880
|
"""
|
|
22132
21881
|
pulumi.set(__self__, "datadog_api_key", datadog_api_key)
|
|
22133
21882
|
if datadog_tags is not None:
|
|
@@ -22195,7 +21944,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
|
|
|
22195
21944
|
@pulumi.getter
|
|
22196
21945
|
def site(self) -> Optional[str]:
|
|
22197
21946
|
"""
|
|
22198
|
-
Enum: `
|
|
21947
|
+
Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
|
|
22199
21948
|
"""
|
|
22200
21949
|
return pulumi.get(self, "site")
|
|
22201
21950
|
|
|
@@ -22729,7 +22478,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
|
|
|
22729
22478
|
ssl_endpoint_identification_algorithm: Optional[str] = None):
|
|
22730
22479
|
"""
|
|
22731
22480
|
:param str bootstrap_servers: Bootstrap servers. Example: `10.0.0.1:9092,10.0.0.2:9092`.
|
|
22732
|
-
:param str security_protocol: Enum: `PLAINTEXT`, `
|
|
22481
|
+
:param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
|
|
22733
22482
|
:param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
|
|
22734
22483
|
:param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
|
|
22735
22484
|
:param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
|
|
@@ -22776,7 +22525,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
|
|
|
22776
22525
|
@pulumi.getter(name="securityProtocol")
|
|
22777
22526
|
def security_protocol(self) -> str:
|
|
22778
22527
|
"""
|
|
22779
|
-
Enum: `PLAINTEXT`, `
|
|
22528
|
+
Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
|
|
22780
22529
|
"""
|
|
22781
22530
|
return pulumi.get(self, "security_protocol")
|
|
22782
22531
|
|
|
@@ -23086,7 +22835,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
|
|
|
23086
22835
|
:param str ssl_client_key: Client key. Example: `-----BEGIN PRIVATE KEY-----
|
|
23087
22836
|
...
|
|
23088
22837
|
-----END PRIVATE KEY-----`.
|
|
23089
|
-
:param str ssl_mode: Enum: `
|
|
22838
|
+
:param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
|
|
23090
22839
|
:param str ssl_root_cert: SSL Root Cert. Example: `-----BEGIN CERTIFICATE-----
|
|
23091
22840
|
...
|
|
23092
22841
|
-----END CERTIFICATE-----
|
|
@@ -23173,7 +22922,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
|
|
|
23173
22922
|
@pulumi.getter(name="sslMode")
|
|
23174
22923
|
def ssl_mode(self) -> Optional[str]:
|
|
23175
22924
|
"""
|
|
23176
|
-
Enum: `
|
|
22925
|
+
Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
|
|
23177
22926
|
"""
|
|
23178
22927
|
return pulumi.get(self, "ssl_mode")
|
|
23179
22928
|
|
|
@@ -23189,70 +22938,6 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
|
|
|
23189
22938
|
return pulumi.get(self, "ssl_root_cert")
|
|
23190
22939
|
|
|
23191
22940
|
|
|
23192
|
-
@pulumi.output_type
|
|
23193
|
-
class ServiceIntegrationEndpointExternalPrometheusUserConfig(dict):
|
|
23194
|
-
@staticmethod
|
|
23195
|
-
def __key_warning(key: str):
|
|
23196
|
-
suggest = None
|
|
23197
|
-
if key == "basicAuthPassword":
|
|
23198
|
-
suggest = "basic_auth_password"
|
|
23199
|
-
elif key == "basicAuthUsername":
|
|
23200
|
-
suggest = "basic_auth_username"
|
|
23201
|
-
elif key == "serviceUri":
|
|
23202
|
-
suggest = "service_uri"
|
|
23203
|
-
|
|
23204
|
-
if suggest:
|
|
23205
|
-
pulumi.log.warn(f"Key '{key}' not found in ServiceIntegrationEndpointExternalPrometheusUserConfig. Access the value via the '{suggest}' property getter instead.")
|
|
23206
|
-
|
|
23207
|
-
def __getitem__(self, key: str) -> Any:
|
|
23208
|
-
ServiceIntegrationEndpointExternalPrometheusUserConfig.__key_warning(key)
|
|
23209
|
-
return super().__getitem__(key)
|
|
23210
|
-
|
|
23211
|
-
def get(self, key: str, default = None) -> Any:
|
|
23212
|
-
ServiceIntegrationEndpointExternalPrometheusUserConfig.__key_warning(key)
|
|
23213
|
-
return super().get(key, default)
|
|
23214
|
-
|
|
23215
|
-
def __init__(__self__, *,
|
|
23216
|
-
basic_auth_password: Optional[str] = None,
|
|
23217
|
-
basic_auth_username: Optional[str] = None,
|
|
23218
|
-
service_uri: Optional[str] = None):
|
|
23219
|
-
"""
|
|
23220
|
-
:param str basic_auth_password: Prometheus basic authentication password. Example: `fhyFNBjj3R`.
|
|
23221
|
-
:param str basic_auth_username: Prometheus basic authentication username. Example: `prom4851`.
|
|
23222
|
-
:param str service_uri: Prometheus enabled write endpoint. Example: `https://write.example.com/`.
|
|
23223
|
-
"""
|
|
23224
|
-
if basic_auth_password is not None:
|
|
23225
|
-
pulumi.set(__self__, "basic_auth_password", basic_auth_password)
|
|
23226
|
-
if basic_auth_username is not None:
|
|
23227
|
-
pulumi.set(__self__, "basic_auth_username", basic_auth_username)
|
|
23228
|
-
if service_uri is not None:
|
|
23229
|
-
pulumi.set(__self__, "service_uri", service_uri)
|
|
23230
|
-
|
|
23231
|
-
@property
|
|
23232
|
-
@pulumi.getter(name="basicAuthPassword")
|
|
23233
|
-
def basic_auth_password(self) -> Optional[str]:
|
|
23234
|
-
"""
|
|
23235
|
-
Prometheus basic authentication password. Example: `fhyFNBjj3R`.
|
|
23236
|
-
"""
|
|
23237
|
-
return pulumi.get(self, "basic_auth_password")
|
|
23238
|
-
|
|
23239
|
-
@property
|
|
23240
|
-
@pulumi.getter(name="basicAuthUsername")
|
|
23241
|
-
def basic_auth_username(self) -> Optional[str]:
|
|
23242
|
-
"""
|
|
23243
|
-
Prometheus basic authentication username. Example: `prom4851`.
|
|
23244
|
-
"""
|
|
23245
|
-
return pulumi.get(self, "basic_auth_username")
|
|
23246
|
-
|
|
23247
|
-
@property
|
|
23248
|
-
@pulumi.getter(name="serviceUri")
|
|
23249
|
-
def service_uri(self) -> Optional[str]:
|
|
23250
|
-
"""
|
|
23251
|
-
Prometheus enabled write endpoint. Example: `https://write.example.com/`.
|
|
23252
|
-
"""
|
|
23253
|
-
return pulumi.get(self, "service_uri")
|
|
23254
|
-
|
|
23255
|
-
|
|
23256
22941
|
@pulumi.output_type
|
|
23257
22942
|
class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
|
|
23258
22943
|
@staticmethod
|
|
@@ -23280,7 +22965,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
|
|
|
23280
22965
|
basic_auth_password: Optional[str] = None,
|
|
23281
22966
|
basic_auth_username: Optional[str] = None):
|
|
23282
22967
|
"""
|
|
23283
|
-
:param str authentication: Enum: `
|
|
22968
|
+
:param str authentication: Enum: `none`, `basic`. Authentication method.
|
|
23284
22969
|
:param str url: Schema Registry URL. Example: `https://schema-registry.kafka.company.com:28419`.
|
|
23285
22970
|
:param str basic_auth_password: Basic authentication password. Example: `Zm9vYg==`.
|
|
23286
22971
|
:param str basic_auth_username: Basic authentication user name. Example: `avnadmin`.
|
|
@@ -23296,7 +22981,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
|
|
|
23296
22981
|
@pulumi.getter
|
|
23297
22982
|
def authentication(self) -> str:
|
|
23298
22983
|
"""
|
|
23299
|
-
Enum: `
|
|
22984
|
+
Enum: `none`, `basic`. Authentication method.
|
|
23300
22985
|
"""
|
|
23301
22986
|
return pulumi.get(self, "authentication")
|
|
23302
22987
|
|
|
@@ -23456,7 +23141,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
|
|
|
23456
23141
|
max_message_size: Optional[int] = None,
|
|
23457
23142
|
sd: Optional[str] = None):
|
|
23458
23143
|
"""
|
|
23459
|
-
:param str format: Enum: `
|
|
23144
|
+
:param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
|
|
23460
23145
|
:param int port: Rsyslog server port. Default: `514`.
|
|
23461
23146
|
:param str server: Rsyslog server IP address or hostname. Example: `logs.example.com`.
|
|
23462
23147
|
:param bool tls: Require TLS. Default: `true`.
|
|
@@ -23497,7 +23182,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
|
|
|
23497
23182
|
@pulumi.getter
|
|
23498
23183
|
def format(self) -> str:
|
|
23499
23184
|
"""
|
|
23500
|
-
Enum: `
|
|
23185
|
+
Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
|
|
23501
23186
|
"""
|
|
23502
23187
|
return pulumi.get(self, "format")
|
|
23503
23188
|
|
|
@@ -24079,7 +23764,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
|
|
|
24079
23764
|
:param int consumer_max_poll_records: Set consumer max.poll.records. The default is 500. Example: `500`.
|
|
24080
23765
|
:param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`.
|
|
24081
23766
|
:param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker. Example: `8388608`.
|
|
24082
|
-
:param str producer_compression_type: Enum: `gzip`, `
|
|
23767
|
+
:param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
24083
23768
|
:param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.
|
|
24084
23769
|
:param int producer_max_request_size: The maximum request size in bytes. Example: `1048576`.
|
|
24085
23770
|
"""
|
|
@@ -24144,7 +23829,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
|
|
|
24144
23829
|
@pulumi.getter(name="producerCompressionType")
|
|
24145
23830
|
def producer_compression_type(self) -> Optional[str]:
|
|
24146
23831
|
"""
|
|
24147
|
-
Enum: `gzip`, `
|
|
23832
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
24148
23833
|
"""
|
|
24149
23834
|
return pulumi.get(self, "producer_compression_type")
|
|
24150
23835
|
|
|
@@ -25137,10 +24822,6 @@ class ThanosThanosUserConfig(dict):
|
|
|
25137
24822
|
suggest = "public_access"
|
|
25138
24823
|
elif key == "queryFrontend":
|
|
25139
24824
|
suggest = "query_frontend"
|
|
25140
|
-
elif key == "receiverIngesting":
|
|
25141
|
-
suggest = "receiver_ingesting"
|
|
25142
|
-
elif key == "receiverRouting":
|
|
25143
|
-
suggest = "receiver_routing"
|
|
25144
24825
|
elif key == "serviceLog":
|
|
25145
24826
|
suggest = "service_log"
|
|
25146
24827
|
elif key == "staticIps":
|
|
@@ -25167,12 +24848,8 @@ class ThanosThanosUserConfig(dict):
|
|
|
25167
24848
|
public_access: Optional['outputs.ThanosThanosUserConfigPublicAccess'] = None,
|
|
25168
24849
|
query: Optional['outputs.ThanosThanosUserConfigQuery'] = None,
|
|
25169
24850
|
query_frontend: Optional['outputs.ThanosThanosUserConfigQueryFrontend'] = None,
|
|
25170
|
-
receiver_ingesting: Optional[Mapping[str, str]] = None,
|
|
25171
|
-
receiver_routing: Optional[Mapping[str, str]] = None,
|
|
25172
|
-
ruler: Optional[Mapping[str, str]] = None,
|
|
25173
24851
|
service_log: Optional[bool] = None,
|
|
25174
|
-
static_ips: Optional[bool] = None
|
|
25175
|
-
store: Optional[Mapping[str, str]] = None):
|
|
24852
|
+
static_ips: Optional[bool] = None):
|
|
25176
24853
|
"""
|
|
25177
24854
|
:param 'ThanosThanosUserConfigCompactorArgs' compactor: ThanosCompactor
|
|
25178
24855
|
:param Mapping[str, str] env: Environmental variables.
|
|
@@ -25183,12 +24860,8 @@ class ThanosThanosUserConfig(dict):
|
|
|
25183
24860
|
:param 'ThanosThanosUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
|
|
25184
24861
|
:param 'ThanosThanosUserConfigQueryArgs' query: ThanosQuery
|
|
25185
24862
|
:param 'ThanosThanosUserConfigQueryFrontendArgs' query_frontend: ThanosQueryFrontend
|
|
25186
|
-
:param Mapping[str, str] receiver_ingesting: CommonReceive.
|
|
25187
|
-
:param Mapping[str, str] receiver_routing: ThanosReceiveRouting.
|
|
25188
|
-
:param Mapping[str, str] ruler: ThanosRuler.
|
|
25189
24863
|
:param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
|
|
25190
24864
|
:param bool static_ips: Use static public IP addresses.
|
|
25191
|
-
:param Mapping[str, str] store: ThanosStore.
|
|
25192
24865
|
"""
|
|
25193
24866
|
if compactor is not None:
|
|
25194
24867
|
pulumi.set(__self__, "compactor", compactor)
|
|
@@ -25208,18 +24881,10 @@ class ThanosThanosUserConfig(dict):
|
|
|
25208
24881
|
pulumi.set(__self__, "query", query)
|
|
25209
24882
|
if query_frontend is not None:
|
|
25210
24883
|
pulumi.set(__self__, "query_frontend", query_frontend)
|
|
25211
|
-
if receiver_ingesting is not None:
|
|
25212
|
-
pulumi.set(__self__, "receiver_ingesting", receiver_ingesting)
|
|
25213
|
-
if receiver_routing is not None:
|
|
25214
|
-
pulumi.set(__self__, "receiver_routing", receiver_routing)
|
|
25215
|
-
if ruler is not None:
|
|
25216
|
-
pulumi.set(__self__, "ruler", ruler)
|
|
25217
24884
|
if service_log is not None:
|
|
25218
24885
|
pulumi.set(__self__, "service_log", service_log)
|
|
25219
24886
|
if static_ips is not None:
|
|
25220
24887
|
pulumi.set(__self__, "static_ips", static_ips)
|
|
25221
|
-
if store is not None:
|
|
25222
|
-
pulumi.set(__self__, "store", store)
|
|
25223
24888
|
|
|
25224
24889
|
@property
|
|
25225
24890
|
@pulumi.getter
|
|
@@ -25295,30 +24960,6 @@ class ThanosThanosUserConfig(dict):
|
|
|
25295
24960
|
"""
|
|
25296
24961
|
return pulumi.get(self, "query_frontend")
|
|
25297
24962
|
|
|
25298
|
-
@property
|
|
25299
|
-
@pulumi.getter(name="receiverIngesting")
|
|
25300
|
-
def receiver_ingesting(self) -> Optional[Mapping[str, str]]:
|
|
25301
|
-
"""
|
|
25302
|
-
CommonReceive.
|
|
25303
|
-
"""
|
|
25304
|
-
return pulumi.get(self, "receiver_ingesting")
|
|
25305
|
-
|
|
25306
|
-
@property
|
|
25307
|
-
@pulumi.getter(name="receiverRouting")
|
|
25308
|
-
def receiver_routing(self) -> Optional[Mapping[str, str]]:
|
|
25309
|
-
"""
|
|
25310
|
-
ThanosReceiveRouting.
|
|
25311
|
-
"""
|
|
25312
|
-
return pulumi.get(self, "receiver_routing")
|
|
25313
|
-
|
|
25314
|
-
@property
|
|
25315
|
-
@pulumi.getter
|
|
25316
|
-
def ruler(self) -> Optional[Mapping[str, str]]:
|
|
25317
|
-
"""
|
|
25318
|
-
ThanosRuler.
|
|
25319
|
-
"""
|
|
25320
|
-
return pulumi.get(self, "ruler")
|
|
25321
|
-
|
|
25322
24963
|
@property
|
|
25323
24964
|
@pulumi.getter(name="serviceLog")
|
|
25324
24965
|
def service_log(self) -> Optional[bool]:
|
|
@@ -25335,14 +24976,6 @@ class ThanosThanosUserConfig(dict):
|
|
|
25335
24976
|
"""
|
|
25336
24977
|
return pulumi.get(self, "static_ips")
|
|
25337
24978
|
|
|
25338
|
-
@property
|
|
25339
|
-
@pulumi.getter
|
|
25340
|
-
def store(self) -> Optional[Mapping[str, str]]:
|
|
25341
|
-
"""
|
|
25342
|
-
ThanosStore.
|
|
25343
|
-
"""
|
|
25344
|
-
return pulumi.get(self, "store")
|
|
25345
|
-
|
|
25346
24979
|
|
|
25347
24980
|
@pulumi.output_type
|
|
25348
24981
|
class ThanosThanosUserConfigCompactor(dict):
|
|
@@ -26069,7 +25702,7 @@ class ValkeyValkeyUserConfig(dict):
|
|
|
26069
25702
|
:param int valkey_io_threads: Set Valkey IO thread count. Changing this will cause a restart of the Valkey service. Example: `1`.
|
|
26070
25703
|
:param int valkey_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
|
|
26071
25704
|
:param int valkey_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
|
|
26072
|
-
:param str valkey_maxmemory_policy: Enum: `allkeys-
|
|
25705
|
+
:param str valkey_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
|
|
26073
25706
|
:param str valkey_notify_keyspace_events: Set notify-keyspace-events option.
|
|
26074
25707
|
:param int valkey_number_of_databases: Set number of Valkey databases. Changing this will cause a restart of the Valkey service. Example: `16`.
|
|
26075
25708
|
:param str valkey_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Valkey does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is `off`, no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
@@ -26287,7 +25920,7 @@ class ValkeyValkeyUserConfig(dict):
|
|
|
26287
25920
|
@pulumi.getter(name="valkeyMaxmemoryPolicy")
|
|
26288
25921
|
def valkey_maxmemory_policy(self) -> Optional[str]:
|
|
26289
25922
|
"""
|
|
26290
|
-
Enum: `allkeys-
|
|
25923
|
+
Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
|
|
26291
25924
|
"""
|
|
26292
25925
|
return pulumi.get(self, "valkey_maxmemory_policy")
|
|
26293
25926
|
|
|
@@ -26752,6 +26385,7 @@ class GetCassandaCassandraUserConfigResult(dict):
|
|
|
26752
26385
|
|
|
26753
26386
|
@property
|
|
26754
26387
|
@pulumi.getter(name="additionalBackupRegions")
|
|
26388
|
+
@_utilities.deprecated("""This property is deprecated.""")
|
|
26755
26389
|
def additional_backup_regions(self) -> Optional[str]:
|
|
26756
26390
|
"""
|
|
26757
26391
|
Additional Cloud Regions for Backup Replication.
|
|
@@ -27276,6 +26910,7 @@ class GetCassandraCassandraUserConfigResult(dict):
|
|
|
27276
26910
|
|
|
27277
26911
|
@property
|
|
27278
26912
|
@pulumi.getter(name="additionalBackupRegions")
|
|
26913
|
+
@_utilities.deprecated("""This property is deprecated.""")
|
|
27279
26914
|
def additional_backup_regions(self) -> Optional[str]:
|
|
27280
26915
|
"""
|
|
27281
26916
|
Additional Cloud Regions for Backup Replication.
|
|
@@ -28413,7 +28048,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
|
|
|
28413
28048
|
static_ips: Optional[bool] = None):
|
|
28414
28049
|
"""
|
|
28415
28050
|
:param bool cache_mode: Evict entries when getting close to maxmemory limit. Default: `false`.
|
|
28416
|
-
:param str dragonfly_persistence: Enum: `
|
|
28051
|
+
:param str dragonfly_persistence: Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
|
|
28417
28052
|
:param bool dragonfly_ssl: Require SSL to access Dragonfly. Default: `true`.
|
|
28418
28053
|
:param Sequence['GetDragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
|
|
28419
28054
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
@@ -28471,7 +28106,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
|
|
|
28471
28106
|
@pulumi.getter(name="dragonflyPersistence")
|
|
28472
28107
|
def dragonfly_persistence(self) -> Optional[str]:
|
|
28473
28108
|
"""
|
|
28474
|
-
Enum: `
|
|
28109
|
+
Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
|
|
28475
28110
|
"""
|
|
28476
28111
|
return pulumi.get(self, "dragonfly_persistence")
|
|
28477
28112
|
|
|
@@ -29536,10 +29171,10 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29536
29171
|
wal: Optional[bool] = None):
|
|
29537
29172
|
"""
|
|
29538
29173
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
29539
|
-
:param bool alerting_enabled:
|
|
29174
|
+
:param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
|
|
29540
29175
|
:param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
|
|
29541
29176
|
:param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. Example: `0`.
|
|
29542
|
-
:param str alerting_nodata_or_nullvalues: Enum: `alerting`, `
|
|
29177
|
+
:param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
|
|
29543
29178
|
:param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
|
|
29544
29179
|
:param 'GetGrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
|
|
29545
29180
|
:param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
|
|
@@ -29547,9 +29182,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29547
29182
|
:param 'GetGrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
|
|
29548
29183
|
:param 'GetGrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
|
|
29549
29184
|
:param 'GetGrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
|
|
29550
|
-
:param str cookie_samesite: Enum: `lax`, `
|
|
29185
|
+
:param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
|
|
29551
29186
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
29552
|
-
:param bool dashboard_previews_enabled:
|
|
29187
|
+
:param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
|
|
29553
29188
|
:param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h. Example: `5s`.
|
|
29554
29189
|
:param int dashboards_versions_to_keep: Dashboard versions to keep per dashboard. Example: `20`.
|
|
29555
29190
|
:param bool dataproxy_send_user_header: Send `X-Grafana-User` header to data source.
|
|
@@ -29562,7 +29197,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29562
29197
|
:param Sequence['GetGrafanaGrafanaUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
|
|
29563
29198
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
29564
29199
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
29565
|
-
:param bool metrics_enabled: Enable Grafana
|
|
29200
|
+
:param bool metrics_enabled: Enable Grafana /metrics endpoint.
|
|
29566
29201
|
:param bool oauth_allow_insecure_email_lookup: Enforce user lookup based on email instead of the unique ID provided by the IdP.
|
|
29567
29202
|
:param 'GetGrafanaGrafanaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
29568
29203
|
:param 'GetGrafanaGrafanaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
@@ -29573,9 +29208,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29573
29208
|
:param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
|
|
29574
29209
|
:param 'GetGrafanaGrafanaUserConfigSmtpServerArgs' smtp_server: SMTP server settings
|
|
29575
29210
|
:param bool static_ips: Use static public IP addresses.
|
|
29576
|
-
:param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
|
|
29211
|
+
:param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
|
|
29577
29212
|
:param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
|
|
29578
|
-
:param str user_auto_assign_org_role: Enum: `
|
|
29213
|
+
:param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
|
|
29579
29214
|
:param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
|
|
29580
29215
|
:param bool wal: Setting to enable/disable Write-Ahead Logging. The default value is false (disabled).
|
|
29581
29216
|
"""
|
|
@@ -29678,7 +29313,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29678
29313
|
@pulumi.getter(name="alertingEnabled")
|
|
29679
29314
|
def alerting_enabled(self) -> Optional[bool]:
|
|
29680
29315
|
"""
|
|
29681
|
-
|
|
29316
|
+
Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
|
|
29682
29317
|
"""
|
|
29683
29318
|
return pulumi.get(self, "alerting_enabled")
|
|
29684
29319
|
|
|
@@ -29702,7 +29337,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29702
29337
|
@pulumi.getter(name="alertingNodataOrNullvalues")
|
|
29703
29338
|
def alerting_nodata_or_nullvalues(self) -> Optional[str]:
|
|
29704
29339
|
"""
|
|
29705
|
-
Enum: `alerting`, `
|
|
29340
|
+
Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
|
|
29706
29341
|
"""
|
|
29707
29342
|
return pulumi.get(self, "alerting_nodata_or_nullvalues")
|
|
29708
29343
|
|
|
@@ -29766,7 +29401,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29766
29401
|
@pulumi.getter(name="cookieSamesite")
|
|
29767
29402
|
def cookie_samesite(self) -> Optional[str]:
|
|
29768
29403
|
"""
|
|
29769
|
-
Enum: `lax`, `
|
|
29404
|
+
Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.
|
|
29770
29405
|
"""
|
|
29771
29406
|
return pulumi.get(self, "cookie_samesite")
|
|
29772
29407
|
|
|
@@ -29782,7 +29417,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29782
29417
|
@pulumi.getter(name="dashboardPreviewsEnabled")
|
|
29783
29418
|
def dashboard_previews_enabled(self) -> Optional[bool]:
|
|
29784
29419
|
"""
|
|
29785
|
-
|
|
29420
|
+
This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
|
|
29786
29421
|
"""
|
|
29787
29422
|
return pulumi.get(self, "dashboard_previews_enabled")
|
|
29788
29423
|
|
|
@@ -29887,7 +29522,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29887
29522
|
@pulumi.getter(name="metricsEnabled")
|
|
29888
29523
|
def metrics_enabled(self) -> Optional[bool]:
|
|
29889
29524
|
"""
|
|
29890
|
-
Enable Grafana
|
|
29525
|
+
Enable Grafana /metrics endpoint.
|
|
29891
29526
|
"""
|
|
29892
29527
|
return pulumi.get(self, "metrics_enabled")
|
|
29893
29528
|
|
|
@@ -29975,7 +29610,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29975
29610
|
@pulumi.getter(name="unifiedAlertingEnabled")
|
|
29976
29611
|
def unified_alerting_enabled(self) -> Optional[bool]:
|
|
29977
29612
|
"""
|
|
29978
|
-
Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/ for more details.
|
|
29613
|
+
Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
|
|
29979
29614
|
"""
|
|
29980
29615
|
return pulumi.get(self, "unified_alerting_enabled")
|
|
29981
29616
|
|
|
@@ -29991,7 +29626,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
29991
29626
|
@pulumi.getter(name="userAutoAssignOrgRole")
|
|
29992
29627
|
def user_auto_assign_org_role(self) -> Optional[str]:
|
|
29993
29628
|
"""
|
|
29994
|
-
Enum: `
|
|
29629
|
+
Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
|
|
29995
29630
|
"""
|
|
29996
29631
|
return pulumi.get(self, "user_auto_assign_org_role")
|
|
29997
29632
|
|
|
@@ -30349,9 +29984,9 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
|
|
|
30349
29984
|
:param str client_id: Client ID from provider. Example: `b1ba0bf54a4c2c0a1c29`.
|
|
30350
29985
|
:param str client_secret: Client secret from provider. Example: `bfa6gea4f129076761dcba8ce5e1e406bd83af7b`.
|
|
30351
29986
|
:param bool allow_sign_up: Automatically sign-up users on successful sign-in.
|
|
30352
|
-
:param str api_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
|
|
30353
|
-
:param str auth_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
|
|
30354
|
-
:param str token_url: This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
|
|
29987
|
+
:param str api_url: API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
|
|
29988
|
+
:param str auth_url: Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
|
|
29989
|
+
:param str token_url: Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
|
|
30355
29990
|
"""
|
|
30356
29991
|
pulumi.set(__self__, "allowed_groups", allowed_groups)
|
|
30357
29992
|
pulumi.set(__self__, "client_id", client_id)
|
|
@@ -30401,7 +30036,7 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
|
|
|
30401
30036
|
@pulumi.getter(name="apiUrl")
|
|
30402
30037
|
def api_url(self) -> Optional[str]:
|
|
30403
30038
|
"""
|
|
30404
|
-
This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
|
|
30039
|
+
API URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/api/v4`.
|
|
30405
30040
|
"""
|
|
30406
30041
|
return pulumi.get(self, "api_url")
|
|
30407
30042
|
|
|
@@ -30409,7 +30044,7 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
|
|
|
30409
30044
|
@pulumi.getter(name="authUrl")
|
|
30410
30045
|
def auth_url(self) -> Optional[str]:
|
|
30411
30046
|
"""
|
|
30412
|
-
This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
|
|
30047
|
+
Authorization URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/authorize`.
|
|
30413
30048
|
"""
|
|
30414
30049
|
return pulumi.get(self, "auth_url")
|
|
30415
30050
|
|
|
@@ -30417,7 +30052,7 @@ class GetGrafanaGrafanaUserConfigAuthGitlabResult(dict):
|
|
|
30417
30052
|
@pulumi.getter(name="tokenUrl")
|
|
30418
30053
|
def token_url(self) -> Optional[str]:
|
|
30419
30054
|
"""
|
|
30420
|
-
This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
|
|
30055
|
+
Token URL. This only needs to be set when using self hosted GitLab. Example: `https://gitlab.com/oauth/token`.
|
|
30421
30056
|
"""
|
|
30422
30057
|
return pulumi.get(self, "token_url")
|
|
30423
30058
|
|
|
@@ -30587,7 +30222,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
|
|
|
30587
30222
|
"""
|
|
30588
30223
|
:param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions. Example: `AAAAAAAAAAAAAAAAAAA`.
|
|
30589
30224
|
:param str bucket_url: Bucket URL for S3. Example: `https://grafana.s3-ap-southeast-2.amazonaws.com/`.
|
|
30590
|
-
:param str provider: Enum: `s3`.
|
|
30225
|
+
:param str provider: Enum: `s3`. Provider type.
|
|
30591
30226
|
:param str secret_key: S3 secret key. Example: `AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA`.
|
|
30592
30227
|
"""
|
|
30593
30228
|
pulumi.set(__self__, "access_key", access_key)
|
|
@@ -30615,7 +30250,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
|
|
|
30615
30250
|
@pulumi.getter
|
|
30616
30251
|
def provider(self) -> str:
|
|
30617
30252
|
"""
|
|
30618
|
-
Enum: `s3`.
|
|
30253
|
+
Enum: `s3`. Provider type.
|
|
30619
30254
|
"""
|
|
30620
30255
|
return pulumi.get(self, "provider")
|
|
30621
30256
|
|
|
@@ -30733,7 +30368,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
|
|
|
30733
30368
|
:param str from_name: Name used in outgoing emails, defaults to Grafana.
|
|
30734
30369
|
:param str password: Password for SMTP authentication. Example: `ein0eemeev5eeth3Ahfu`.
|
|
30735
30370
|
:param bool skip_verify: Skip verifying server certificate. Defaults to false.
|
|
30736
|
-
:param str starttls_policy: Enum: `
|
|
30371
|
+
:param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
|
|
30737
30372
|
:param str username: Username for SMTP authentication. Example: `smtpuser`.
|
|
30738
30373
|
"""
|
|
30739
30374
|
pulumi.set(__self__, "from_address", from_address)
|
|
@@ -30802,7 +30437,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
|
|
|
30802
30437
|
@pulumi.getter(name="starttlsPolicy")
|
|
30803
30438
|
def starttls_policy(self) -> Optional[str]:
|
|
30804
30439
|
"""
|
|
30805
|
-
Enum: `
|
|
30440
|
+
Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
|
|
30806
30441
|
"""
|
|
30807
30442
|
return pulumi.get(self, "starttls_policy")
|
|
30808
30443
|
|
|
@@ -31043,7 +30678,6 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
|
|
|
31043
30678
|
additional_backup_regions: Optional[str] = None,
|
|
31044
30679
|
custom_domain: Optional[str] = None,
|
|
31045
30680
|
influxdb: Optional['outputs.GetInfluxDbInfluxdbUserConfigInfluxdbResult'] = None,
|
|
31046
|
-
influxdb_version: Optional[str] = None,
|
|
31047
30681
|
ip_filter_objects: Optional[Sequence['outputs.GetInfluxDbInfluxdbUserConfigIpFilterObjectResult']] = None,
|
|
31048
30682
|
ip_filter_strings: Optional[Sequence[str]] = None,
|
|
31049
30683
|
ip_filters: Optional[Sequence[str]] = None,
|
|
@@ -31059,7 +30693,6 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
|
|
|
31059
30693
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
31060
30694
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
31061
30695
|
:param 'GetInfluxDbInfluxdbUserConfigInfluxdbArgs' influxdb: influxdb.conf configuration values
|
|
31062
|
-
:param str influxdb_version: Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
|
|
31063
30696
|
:param Sequence['GetInfluxDbInfluxdbUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`
|
|
31064
30697
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
31065
30698
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`.
|
|
@@ -31078,8 +30711,6 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
|
|
|
31078
30711
|
pulumi.set(__self__, "custom_domain", custom_domain)
|
|
31079
30712
|
if influxdb is not None:
|
|
31080
30713
|
pulumi.set(__self__, "influxdb", influxdb)
|
|
31081
|
-
if influxdb_version is not None:
|
|
31082
|
-
pulumi.set(__self__, "influxdb_version", influxdb_version)
|
|
31083
30714
|
if ip_filter_objects is not None:
|
|
31084
30715
|
pulumi.set(__self__, "ip_filter_objects", ip_filter_objects)
|
|
31085
30716
|
if ip_filter_strings is not None:
|
|
@@ -31128,14 +30759,6 @@ class GetInfluxDbInfluxdbUserConfigResult(dict):
|
|
|
31128
30759
|
"""
|
|
31129
30760
|
return pulumi.get(self, "influxdb")
|
|
31130
30761
|
|
|
31131
|
-
@property
|
|
31132
|
-
@pulumi.getter(name="influxdbVersion")
|
|
31133
|
-
def influxdb_version(self) -> Optional[str]:
|
|
31134
|
-
"""
|
|
31135
|
-
Enum: `1.8`, and newer. InfluxDB major version. Default: `1.8`.
|
|
31136
|
-
"""
|
|
31137
|
-
return pulumi.get(self, "influxdb_version")
|
|
31138
|
-
|
|
31139
30762
|
@property
|
|
31140
30763
|
@pulumi.getter(name="ipFilterObjects")
|
|
31141
30764
|
def ip_filter_objects(self) -> Optional[Sequence['outputs.GetInfluxDbInfluxdbUserConfigIpFilterObjectResult']]:
|
|
@@ -31350,16 +30973,12 @@ class GetInfluxDbInfluxdbUserConfigIpFilterObjectResult(dict):
|
|
|
31350
30973
|
@pulumi.output_type
|
|
31351
30974
|
class GetInfluxDbInfluxdbUserConfigPrivateAccessResult(dict):
|
|
31352
30975
|
def __init__(__self__, *,
|
|
31353
|
-
influxdb: Optional[bool] = None
|
|
31354
|
-
user_backup: Optional[bool] = None):
|
|
30976
|
+
influxdb: Optional[bool] = None):
|
|
31355
30977
|
"""
|
|
31356
30978
|
:param bool influxdb: Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
|
|
31357
|
-
:param bool user_backup: Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
|
|
31358
30979
|
"""
|
|
31359
30980
|
if influxdb is not None:
|
|
31360
30981
|
pulumi.set(__self__, "influxdb", influxdb)
|
|
31361
|
-
if user_backup is not None:
|
|
31362
|
-
pulumi.set(__self__, "user_backup", user_backup)
|
|
31363
30982
|
|
|
31364
30983
|
@property
|
|
31365
30984
|
@pulumi.getter
|
|
@@ -31369,28 +30988,16 @@ class GetInfluxDbInfluxdbUserConfigPrivateAccessResult(dict):
|
|
|
31369
30988
|
"""
|
|
31370
30989
|
return pulumi.get(self, "influxdb")
|
|
31371
30990
|
|
|
31372
|
-
@property
|
|
31373
|
-
@pulumi.getter(name="userBackup")
|
|
31374
|
-
def user_backup(self) -> Optional[bool]:
|
|
31375
|
-
"""
|
|
31376
|
-
Allow clients to connect to user_backup with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
|
|
31377
|
-
"""
|
|
31378
|
-
return pulumi.get(self, "user_backup")
|
|
31379
|
-
|
|
31380
30991
|
|
|
31381
30992
|
@pulumi.output_type
|
|
31382
30993
|
class GetInfluxDbInfluxdbUserConfigPrivatelinkAccessResult(dict):
|
|
31383
30994
|
def __init__(__self__, *,
|
|
31384
|
-
influxdb: Optional[bool] = None
|
|
31385
|
-
user_backup: Optional[bool] = None):
|
|
30995
|
+
influxdb: Optional[bool] = None):
|
|
31386
30996
|
"""
|
|
31387
30997
|
:param bool influxdb: Enable influxdb.
|
|
31388
|
-
:param bool user_backup: Enable user_backup.
|
|
31389
30998
|
"""
|
|
31390
30999
|
if influxdb is not None:
|
|
31391
31000
|
pulumi.set(__self__, "influxdb", influxdb)
|
|
31392
|
-
if user_backup is not None:
|
|
31393
|
-
pulumi.set(__self__, "user_backup", user_backup)
|
|
31394
31001
|
|
|
31395
31002
|
@property
|
|
31396
31003
|
@pulumi.getter
|
|
@@ -31400,28 +31007,16 @@ class GetInfluxDbInfluxdbUserConfigPrivatelinkAccessResult(dict):
|
|
|
31400
31007
|
"""
|
|
31401
31008
|
return pulumi.get(self, "influxdb")
|
|
31402
31009
|
|
|
31403
|
-
@property
|
|
31404
|
-
@pulumi.getter(name="userBackup")
|
|
31405
|
-
def user_backup(self) -> Optional[bool]:
|
|
31406
|
-
"""
|
|
31407
|
-
Enable user_backup.
|
|
31408
|
-
"""
|
|
31409
|
-
return pulumi.get(self, "user_backup")
|
|
31410
|
-
|
|
31411
31010
|
|
|
31412
31011
|
@pulumi.output_type
|
|
31413
31012
|
class GetInfluxDbInfluxdbUserConfigPublicAccessResult(dict):
|
|
31414
31013
|
def __init__(__self__, *,
|
|
31415
|
-
influxdb: Optional[bool] = None
|
|
31416
|
-
user_backup: Optional[bool] = None):
|
|
31014
|
+
influxdb: Optional[bool] = None):
|
|
31417
31015
|
"""
|
|
31418
31016
|
:param bool influxdb: Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.
|
|
31419
|
-
:param bool user_backup: Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
|
|
31420
31017
|
"""
|
|
31421
31018
|
if influxdb is not None:
|
|
31422
31019
|
pulumi.set(__self__, "influxdb", influxdb)
|
|
31423
|
-
if user_backup is not None:
|
|
31424
|
-
pulumi.set(__self__, "user_backup", user_backup)
|
|
31425
31020
|
|
|
31426
31021
|
@property
|
|
31427
31022
|
@pulumi.getter
|
|
@@ -31431,14 +31026,6 @@ class GetInfluxDbInfluxdbUserConfigPublicAccessResult(dict):
|
|
|
31431
31026
|
"""
|
|
31432
31027
|
return pulumi.get(self, "influxdb")
|
|
31433
31028
|
|
|
31434
|
-
@property
|
|
31435
|
-
@pulumi.getter(name="userBackup")
|
|
31436
|
-
def user_backup(self) -> Optional[bool]:
|
|
31437
|
-
"""
|
|
31438
|
-
Allow clients to connect to user_backup from the public internet for service nodes that are in a project VPC or another type of private network.
|
|
31439
|
-
"""
|
|
31440
|
-
return pulumi.get(self, "user_backup")
|
|
31441
|
-
|
|
31442
31029
|
|
|
31443
31030
|
@pulumi.output_type
|
|
31444
31031
|
class GetInfluxDbServiceIntegrationResult(dict):
|
|
@@ -31893,10 +31480,10 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
31893
31480
|
scheduled_rebalance_max_delay_ms: Optional[int] = None,
|
|
31894
31481
|
session_timeout_ms: Optional[int] = None):
|
|
31895
31482
|
"""
|
|
31896
|
-
:param str connector_client_config_override_policy: Enum: `
|
|
31483
|
+
:param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
31897
31484
|
:param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
31898
31485
|
:param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
|
|
31899
|
-
:param str consumer_isolation_level: Enum: `
|
|
31486
|
+
:param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
31900
31487
|
:param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
|
|
31901
31488
|
:param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
31902
31489
|
:param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
@@ -31904,7 +31491,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
31904
31491
|
:param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
|
|
31905
31492
|
:param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
|
|
31906
31493
|
:param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
31907
|
-
:param str producer_compression_type: Enum: `gzip`, `
|
|
31494
|
+
:param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
31908
31495
|
:param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
|
|
31909
31496
|
:param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
|
|
31910
31497
|
:param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
@@ -31947,7 +31534,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
31947
31534
|
@pulumi.getter(name="connectorClientConfigOverridePolicy")
|
|
31948
31535
|
def connector_client_config_override_policy(self) -> Optional[str]:
|
|
31949
31536
|
"""
|
|
31950
|
-
Enum: `
|
|
31537
|
+
Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
31951
31538
|
"""
|
|
31952
31539
|
return pulumi.get(self, "connector_client_config_override_policy")
|
|
31953
31540
|
|
|
@@ -31971,7 +31558,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
31971
31558
|
@pulumi.getter(name="consumerIsolationLevel")
|
|
31972
31559
|
def consumer_isolation_level(self) -> Optional[str]:
|
|
31973
31560
|
"""
|
|
31974
|
-
Enum: `
|
|
31561
|
+
Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
31975
31562
|
"""
|
|
31976
31563
|
return pulumi.get(self, "consumer_isolation_level")
|
|
31977
31564
|
|
|
@@ -32035,7 +31622,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
32035
31622
|
@pulumi.getter(name="producerCompressionType")
|
|
32036
31623
|
def producer_compression_type(self) -> Optional[str]:
|
|
32037
31624
|
"""
|
|
32038
|
-
Enum: `gzip`, `
|
|
31625
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
32039
31626
|
"""
|
|
32040
31627
|
return pulumi.get(self, "producer_compression_type")
|
|
32041
31628
|
|
|
@@ -32944,7 +32531,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
32944
32531
|
transaction_state_log_segment_bytes: Optional[int] = None):
|
|
32945
32532
|
"""
|
|
32946
32533
|
:param bool auto_create_topics_enable: Enable auto-creation of topics. (Default: true).
|
|
32947
|
-
:param str compression_type: Enum: `gzip`, `
|
|
32534
|
+
:param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
|
|
32948
32535
|
:param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.
|
|
32949
32536
|
:param int default_replication_factor: Replication factor for auto-created topics (Default: 3).
|
|
32950
32537
|
:param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`.
|
|
@@ -32954,8 +32541,8 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
32954
32541
|
:param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
|
|
32955
32542
|
:param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`.
|
|
32956
32543
|
:param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
|
|
32957
|
-
:param str log_cleanup_policy: Enum: `
|
|
32958
|
-
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
32544
|
+
:param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
|
|
32545
|
+
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
|
|
32959
32546
|
:param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
|
|
32960
32547
|
:param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`.
|
|
32961
32548
|
:param int log_index_size_max_bytes: The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: `10485760`.
|
|
@@ -33095,7 +32682,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
33095
32682
|
@pulumi.getter(name="compressionType")
|
|
33096
32683
|
def compression_type(self) -> Optional[str]:
|
|
33097
32684
|
"""
|
|
33098
|
-
Enum: `gzip`, `
|
|
32685
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).
|
|
33099
32686
|
"""
|
|
33100
32687
|
return pulumi.get(self, "compression_type")
|
|
33101
32688
|
|
|
@@ -33175,7 +32762,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
33175
32762
|
@pulumi.getter(name="logCleanupPolicy")
|
|
33176
32763
|
def log_cleanup_policy(self) -> Optional[str]:
|
|
33177
32764
|
"""
|
|
33178
|
-
Enum: `
|
|
32765
|
+
Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).
|
|
33179
32766
|
"""
|
|
33180
32767
|
return pulumi.get(self, "log_cleanup_policy")
|
|
33181
32768
|
|
|
@@ -33183,7 +32770,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
33183
32770
|
@pulumi.getter(name="logFlushIntervalMessages")
|
|
33184
32771
|
def log_flush_interval_messages(self) -> Optional[int]:
|
|
33185
32772
|
"""
|
|
33186
|
-
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
|
|
32773
|
+
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.
|
|
33187
32774
|
"""
|
|
33188
32775
|
return pulumi.get(self, "log_flush_interval_messages")
|
|
33189
32776
|
|
|
@@ -33503,10 +33090,10 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
33503
33090
|
scheduled_rebalance_max_delay_ms: Optional[int] = None,
|
|
33504
33091
|
session_timeout_ms: Optional[int] = None):
|
|
33505
33092
|
"""
|
|
33506
|
-
:param str connector_client_config_override_policy: Enum: `
|
|
33093
|
+
:param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
33507
33094
|
:param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
33508
33095
|
:param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`.
|
|
33509
|
-
:param str consumer_isolation_level: Enum: `
|
|
33096
|
+
:param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
33510
33097
|
:param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.
|
|
33511
33098
|
:param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
33512
33099
|
:param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
@@ -33514,7 +33101,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
33514
33101
|
:param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
|
|
33515
33102
|
:param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
|
|
33516
33103
|
:param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
33517
|
-
:param str producer_compression_type: Enum: `gzip`, `
|
|
33104
|
+
:param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
33518
33105
|
:param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.
|
|
33519
33106
|
:param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`.
|
|
33520
33107
|
:param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
@@ -33557,7 +33144,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
33557
33144
|
@pulumi.getter(name="connectorClientConfigOverridePolicy")
|
|
33558
33145
|
def connector_client_config_override_policy(self) -> Optional[str]:
|
|
33559
33146
|
"""
|
|
33560
|
-
Enum: `
|
|
33147
|
+
Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
|
|
33561
33148
|
"""
|
|
33562
33149
|
return pulumi.get(self, "connector_client_config_override_policy")
|
|
33563
33150
|
|
|
@@ -33581,7 +33168,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
33581
33168
|
@pulumi.getter(name="consumerIsolationLevel")
|
|
33582
33169
|
def consumer_isolation_level(self) -> Optional[str]:
|
|
33583
33170
|
"""
|
|
33584
|
-
Enum: `
|
|
33171
|
+
Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
33585
33172
|
"""
|
|
33586
33173
|
return pulumi.get(self, "consumer_isolation_level")
|
|
33587
33174
|
|
|
@@ -33645,7 +33232,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
33645
33232
|
@pulumi.getter(name="producerCompressionType")
|
|
33646
33233
|
def producer_compression_type(self) -> Optional[str]:
|
|
33647
33234
|
"""
|
|
33648
|
-
Enum: `gzip`, `
|
|
33235
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
33649
33236
|
"""
|
|
33650
33237
|
return pulumi.get(self, "producer_compression_type")
|
|
33651
33238
|
|
|
@@ -33859,10 +33446,10 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
33859
33446
|
:param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`.
|
|
33860
33447
|
:param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. Default: `67108864`.
|
|
33861
33448
|
:param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`.
|
|
33862
|
-
:param str name_strategy: Enum: `
|
|
33449
|
+
:param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
|
|
33863
33450
|
:param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.
|
|
33864
|
-
:param str producer_acks: Enum: `-1`, `0`, `1
|
|
33865
|
-
:param str producer_compression_type: Enum: `gzip`, `
|
|
33451
|
+
:param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
|
|
33452
|
+
:param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
33866
33453
|
:param int producer_linger_ms: Wait for up to the given delay to allow batching records together. Default: `0`.
|
|
33867
33454
|
:param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: `1048576`.
|
|
33868
33455
|
:param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`.
|
|
@@ -33916,7 +33503,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
33916
33503
|
@pulumi.getter(name="nameStrategy")
|
|
33917
33504
|
def name_strategy(self) -> Optional[str]:
|
|
33918
33505
|
"""
|
|
33919
|
-
Enum: `
|
|
33506
|
+
Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.
|
|
33920
33507
|
"""
|
|
33921
33508
|
return pulumi.get(self, "name_strategy")
|
|
33922
33509
|
|
|
@@ -33932,7 +33519,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
33932
33519
|
@pulumi.getter(name="producerAcks")
|
|
33933
33520
|
def producer_acks(self) -> Optional[str]:
|
|
33934
33521
|
"""
|
|
33935
|
-
Enum: `-1`, `0`, `1
|
|
33522
|
+
Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.
|
|
33936
33523
|
"""
|
|
33937
33524
|
return pulumi.get(self, "producer_acks")
|
|
33938
33525
|
|
|
@@ -33940,7 +33527,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
33940
33527
|
@pulumi.getter(name="producerCompressionType")
|
|
33941
33528
|
def producer_compression_type(self) -> Optional[str]:
|
|
33942
33529
|
"""
|
|
33943
|
-
Enum: `gzip`, `
|
|
33530
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
33944
33531
|
"""
|
|
33945
33532
|
return pulumi.get(self, "producer_compression_type")
|
|
33946
33533
|
|
|
@@ -34927,8 +34514,8 @@ class GetKafkaTopicConfigResult(dict):
|
|
|
34927
34514
|
segment_ms: Optional[str] = None,
|
|
34928
34515
|
unclean_leader_election_enable: Optional[bool] = None):
|
|
34929
34516
|
"""
|
|
34930
|
-
:param str cleanup_policy: cleanup.policy value
|
|
34931
|
-
:param str compression_type: compression.type value
|
|
34517
|
+
:param str cleanup_policy: cleanup.policy value
|
|
34518
|
+
:param str compression_type: compression.type value
|
|
34932
34519
|
:param str delete_retention_ms: delete.retention.ms value
|
|
34933
34520
|
:param str file_delete_delay_ms: file.delete.delay.ms value
|
|
34934
34521
|
:param str flush_messages: flush.messages value
|
|
@@ -34939,9 +34526,9 @@ class GetKafkaTopicConfigResult(dict):
|
|
|
34939
34526
|
:param str max_compaction_lag_ms: max.compaction.lag.ms value
|
|
34940
34527
|
:param str max_message_bytes: max.message.bytes value
|
|
34941
34528
|
:param bool message_downconversion_enable: message.downconversion.enable value
|
|
34942
|
-
:param str message_format_version: message.format.version value
|
|
34529
|
+
:param str message_format_version: message.format.version value
|
|
34943
34530
|
:param str message_timestamp_difference_max_ms: message.timestamp.difference.max.ms value
|
|
34944
|
-
:param str message_timestamp_type: message.timestamp.type value
|
|
34531
|
+
:param str message_timestamp_type: message.timestamp.type value
|
|
34945
34532
|
:param float min_cleanable_dirty_ratio: min.cleanable.dirty.ratio value
|
|
34946
34533
|
:param str min_compaction_lag_ms: min.compaction.lag.ms value
|
|
34947
34534
|
:param str min_insync_replicas: min.insync.replicas value
|
|
@@ -35014,7 +34601,7 @@ class GetKafkaTopicConfigResult(dict):
|
|
|
35014
34601
|
@pulumi.getter(name="cleanupPolicy")
|
|
35015
34602
|
def cleanup_policy(self) -> Optional[str]:
|
|
35016
34603
|
"""
|
|
35017
|
-
cleanup.policy value
|
|
34604
|
+
cleanup.policy value
|
|
35018
34605
|
"""
|
|
35019
34606
|
return pulumi.get(self, "cleanup_policy")
|
|
35020
34607
|
|
|
@@ -35022,7 +34609,7 @@ class GetKafkaTopicConfigResult(dict):
|
|
|
35022
34609
|
@pulumi.getter(name="compressionType")
|
|
35023
34610
|
def compression_type(self) -> Optional[str]:
|
|
35024
34611
|
"""
|
|
35025
|
-
compression.type value
|
|
34612
|
+
compression.type value
|
|
35026
34613
|
"""
|
|
35027
34614
|
return pulumi.get(self, "compression_type")
|
|
35028
34615
|
|
|
@@ -35110,7 +34697,7 @@ class GetKafkaTopicConfigResult(dict):
|
|
|
35110
34697
|
@pulumi.getter(name="messageFormatVersion")
|
|
35111
34698
|
def message_format_version(self) -> Optional[str]:
|
|
35112
34699
|
"""
|
|
35113
|
-
message.format.version value
|
|
34700
|
+
message.format.version value
|
|
35114
34701
|
"""
|
|
35115
34702
|
return pulumi.get(self, "message_format_version")
|
|
35116
34703
|
|
|
@@ -35126,7 +34713,7 @@ class GetKafkaTopicConfigResult(dict):
|
|
|
35126
34713
|
@pulumi.getter(name="messageTimestampType")
|
|
35127
34714
|
def message_timestamp_type(self) -> Optional[str]:
|
|
35128
34715
|
"""
|
|
35129
|
-
message.timestamp.type value
|
|
34716
|
+
message.timestamp.type value
|
|
35130
34717
|
"""
|
|
35131
34718
|
return pulumi.get(self, "message_timestamp_type")
|
|
35132
34719
|
|
|
@@ -37273,9 +36860,9 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
37273
36860
|
:param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit). Example: `10`.
|
|
37274
36861
|
:param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`.
|
|
37275
36862
|
:param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`.
|
|
37276
|
-
:param str internal_tmp_mem_storage_engine: Enum: `
|
|
37277
|
-
:param str log_output: Enum: `INSIGHTS`, `
|
|
37278
|
-
:param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10
|
|
36863
|
+
:param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
|
|
36864
|
+
:param str log_output: Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
|
|
36865
|
+
:param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.
|
|
37279
36866
|
:param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M). Example: `67108864`.
|
|
37280
36867
|
:param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M). Example: `16777216`.
|
|
37281
36868
|
:param int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service. Example: `16384`.
|
|
@@ -37491,7 +37078,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
37491
37078
|
@pulumi.getter(name="internalTmpMemStorageEngine")
|
|
37492
37079
|
def internal_tmp_mem_storage_engine(self) -> Optional[str]:
|
|
37493
37080
|
"""
|
|
37494
|
-
Enum: `
|
|
37081
|
+
Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
|
|
37495
37082
|
"""
|
|
37496
37083
|
return pulumi.get(self, "internal_tmp_mem_storage_engine")
|
|
37497
37084
|
|
|
@@ -37499,7 +37086,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
37499
37086
|
@pulumi.getter(name="logOutput")
|
|
37500
37087
|
def log_output(self) -> Optional[str]:
|
|
37501
37088
|
"""
|
|
37502
|
-
Enum: `INSIGHTS`, `
|
|
37089
|
+
Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.
|
|
37503
37090
|
"""
|
|
37504
37091
|
return pulumi.get(self, "log_output")
|
|
37505
37092
|
|
|
@@ -37507,7 +37094,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
37507
37094
|
@pulumi.getter(name="longQueryTime")
|
|
37508
37095
|
def long_query_time(self) -> Optional[float]:
|
|
37509
37096
|
"""
|
|
37510
|
-
The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10
|
|
37097
|
+
The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.
|
|
37511
37098
|
"""
|
|
37512
37099
|
return pulumi.get(self, "long_query_time")
|
|
37513
37100
|
|
|
@@ -37997,7 +37584,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
37997
37584
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
37998
37585
|
:param 'GetOpenSearchOpensearchUserConfigAzureMigrationArgs' azure_migration: Azure migration settings
|
|
37999
37586
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.
|
|
38000
|
-
:param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can
|
|
37587
|
+
:param bool disable_replication_factor_adjustment: Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
|
|
38001
37588
|
:param 'GetOpenSearchOpensearchUserConfigGcsMigrationArgs' gcs_migration: Google Cloud Storage migration settings
|
|
38002
37589
|
:param Sequence['GetOpenSearchOpensearchUserConfigIndexPatternArgs'] index_patterns: Index patterns
|
|
38003
37590
|
:param 'GetOpenSearchOpensearchUserConfigIndexRollupArgs' index_rollup: Index rollup settings
|
|
@@ -38105,7 +37692,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
38105
37692
|
@pulumi.getter(name="disableReplicationFactorAdjustment")
|
|
38106
37693
|
def disable_replication_factor_adjustment(self) -> Optional[bool]:
|
|
38107
37694
|
"""
|
|
38108
|
-
Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can
|
|
37695
|
+
Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.
|
|
38109
37696
|
"""
|
|
38110
37697
|
return pulumi.get(self, "disable_replication_factor_adjustment")
|
|
38111
37698
|
|
|
@@ -38301,33 +37888,28 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
|
|
|
38301
37888
|
account: str,
|
|
38302
37889
|
base_path: str,
|
|
38303
37890
|
container: str,
|
|
38304
|
-
indices: str,
|
|
38305
37891
|
snapshot_name: str,
|
|
38306
37892
|
chunk_size: Optional[str] = None,
|
|
38307
37893
|
compress: Optional[bool] = None,
|
|
38308
37894
|
endpoint_suffix: Optional[str] = None,
|
|
38309
|
-
|
|
37895
|
+
indices: Optional[str] = None,
|
|
38310
37896
|
key: Optional[str] = None,
|
|
38311
|
-
restore_global_state: Optional[bool] = None,
|
|
38312
37897
|
sas_token: Optional[str] = None):
|
|
38313
37898
|
"""
|
|
38314
37899
|
:param str account: Azure account name.
|
|
38315
37900
|
:param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
|
|
38316
37901
|
:param str container: Azure container name.
|
|
38317
|
-
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
38318
37902
|
:param str snapshot_name: The snapshot name to restore from.
|
|
38319
37903
|
:param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
|
|
38320
37904
|
:param bool compress: When set to true metadata files are stored in compressed format.
|
|
38321
37905
|
:param str endpoint_suffix: Defines the DNS suffix for Azure Storage endpoints.
|
|
38322
|
-
:param
|
|
37906
|
+
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
38323
37907
|
:param str key: Azure account secret key. One of key or sas_token should be specified.
|
|
38324
|
-
:param bool restore_global_state: If true, restore the cluster state. Defaults to false.
|
|
38325
37908
|
:param str sas_token: A shared access signatures (SAS) token. One of key or sas_token should be specified.
|
|
38326
37909
|
"""
|
|
38327
37910
|
pulumi.set(__self__, "account", account)
|
|
38328
37911
|
pulumi.set(__self__, "base_path", base_path)
|
|
38329
37912
|
pulumi.set(__self__, "container", container)
|
|
38330
|
-
pulumi.set(__self__, "indices", indices)
|
|
38331
37913
|
pulumi.set(__self__, "snapshot_name", snapshot_name)
|
|
38332
37914
|
if chunk_size is not None:
|
|
38333
37915
|
pulumi.set(__self__, "chunk_size", chunk_size)
|
|
@@ -38335,12 +37917,10 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
|
|
|
38335
37917
|
pulumi.set(__self__, "compress", compress)
|
|
38336
37918
|
if endpoint_suffix is not None:
|
|
38337
37919
|
pulumi.set(__self__, "endpoint_suffix", endpoint_suffix)
|
|
38338
|
-
if
|
|
38339
|
-
pulumi.set(__self__, "
|
|
37920
|
+
if indices is not None:
|
|
37921
|
+
pulumi.set(__self__, "indices", indices)
|
|
38340
37922
|
if key is not None:
|
|
38341
37923
|
pulumi.set(__self__, "key", key)
|
|
38342
|
-
if restore_global_state is not None:
|
|
38343
|
-
pulumi.set(__self__, "restore_global_state", restore_global_state)
|
|
38344
37924
|
if sas_token is not None:
|
|
38345
37925
|
pulumi.set(__self__, "sas_token", sas_token)
|
|
38346
37926
|
|
|
@@ -38368,14 +37948,6 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
|
|
|
38368
37948
|
"""
|
|
38369
37949
|
return pulumi.get(self, "container")
|
|
38370
37950
|
|
|
38371
|
-
@property
|
|
38372
|
-
@pulumi.getter
|
|
38373
|
-
def indices(self) -> str:
|
|
38374
|
-
"""
|
|
38375
|
-
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
38376
|
-
"""
|
|
38377
|
-
return pulumi.get(self, "indices")
|
|
38378
|
-
|
|
38379
37951
|
@property
|
|
38380
37952
|
@pulumi.getter(name="snapshotName")
|
|
38381
37953
|
def snapshot_name(self) -> str:
|
|
@@ -38409,12 +37981,12 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
|
|
|
38409
37981
|
return pulumi.get(self, "endpoint_suffix")
|
|
38410
37982
|
|
|
38411
37983
|
@property
|
|
38412
|
-
@pulumi.getter
|
|
38413
|
-
def
|
|
37984
|
+
@pulumi.getter
|
|
37985
|
+
def indices(self) -> Optional[str]:
|
|
38414
37986
|
"""
|
|
38415
|
-
|
|
37987
|
+
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
38416
37988
|
"""
|
|
38417
|
-
return pulumi.get(self, "
|
|
37989
|
+
return pulumi.get(self, "indices")
|
|
38418
37990
|
|
|
38419
37991
|
@property
|
|
38420
37992
|
@pulumi.getter
|
|
@@ -38424,14 +37996,6 @@ class GetOpenSearchOpensearchUserConfigAzureMigrationResult(dict):
|
|
|
38424
37996
|
"""
|
|
38425
37997
|
return pulumi.get(self, "key")
|
|
38426
37998
|
|
|
38427
|
-
@property
|
|
38428
|
-
@pulumi.getter(name="restoreGlobalState")
|
|
38429
|
-
def restore_global_state(self) -> Optional[bool]:
|
|
38430
|
-
"""
|
|
38431
|
-
If true, restore the cluster state. Defaults to false.
|
|
38432
|
-
"""
|
|
38433
|
-
return pulumi.get(self, "restore_global_state")
|
|
38434
|
-
|
|
38435
37999
|
@property
|
|
38436
38000
|
@pulumi.getter(name="sasToken")
|
|
38437
38001
|
def sas_token(self) -> Optional[str]:
|
|
@@ -38447,36 +38011,29 @@ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
|
|
|
38447
38011
|
base_path: str,
|
|
38448
38012
|
bucket: str,
|
|
38449
38013
|
credentials: str,
|
|
38450
|
-
indices: str,
|
|
38451
38014
|
snapshot_name: str,
|
|
38452
38015
|
chunk_size: Optional[str] = None,
|
|
38453
38016
|
compress: Optional[bool] = None,
|
|
38454
|
-
|
|
38455
|
-
restore_global_state: Optional[bool] = None):
|
|
38017
|
+
indices: Optional[str] = None):
|
|
38456
38018
|
"""
|
|
38457
38019
|
:param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
|
|
38458
38020
|
:param str bucket: The path to the repository data within its container.
|
|
38459
38021
|
:param str credentials: Google Cloud Storage credentials file content.
|
|
38460
|
-
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
38461
38022
|
:param str snapshot_name: The snapshot name to restore from.
|
|
38462
38023
|
:param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
|
|
38463
38024
|
:param bool compress: When set to true metadata files are stored in compressed format.
|
|
38464
|
-
:param
|
|
38465
|
-
:param bool restore_global_state: If true, restore the cluster state. Defaults to false.
|
|
38025
|
+
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
38466
38026
|
"""
|
|
38467
38027
|
pulumi.set(__self__, "base_path", base_path)
|
|
38468
38028
|
pulumi.set(__self__, "bucket", bucket)
|
|
38469
38029
|
pulumi.set(__self__, "credentials", credentials)
|
|
38470
|
-
pulumi.set(__self__, "indices", indices)
|
|
38471
38030
|
pulumi.set(__self__, "snapshot_name", snapshot_name)
|
|
38472
38031
|
if chunk_size is not None:
|
|
38473
38032
|
pulumi.set(__self__, "chunk_size", chunk_size)
|
|
38474
38033
|
if compress is not None:
|
|
38475
38034
|
pulumi.set(__self__, "compress", compress)
|
|
38476
|
-
if
|
|
38477
|
-
pulumi.set(__self__, "
|
|
38478
|
-
if restore_global_state is not None:
|
|
38479
|
-
pulumi.set(__self__, "restore_global_state", restore_global_state)
|
|
38035
|
+
if indices is not None:
|
|
38036
|
+
pulumi.set(__self__, "indices", indices)
|
|
38480
38037
|
|
|
38481
38038
|
@property
|
|
38482
38039
|
@pulumi.getter(name="basePath")
|
|
@@ -38502,14 +38059,6 @@ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
|
|
|
38502
38059
|
"""
|
|
38503
38060
|
return pulumi.get(self, "credentials")
|
|
38504
38061
|
|
|
38505
|
-
@property
|
|
38506
|
-
@pulumi.getter
|
|
38507
|
-
def indices(self) -> str:
|
|
38508
|
-
"""
|
|
38509
|
-
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
38510
|
-
"""
|
|
38511
|
-
return pulumi.get(self, "indices")
|
|
38512
|
-
|
|
38513
38062
|
@property
|
|
38514
38063
|
@pulumi.getter(name="snapshotName")
|
|
38515
38064
|
def snapshot_name(self) -> str:
|
|
@@ -38535,20 +38084,12 @@ class GetOpenSearchOpensearchUserConfigGcsMigrationResult(dict):
|
|
|
38535
38084
|
return pulumi.get(self, "compress")
|
|
38536
38085
|
|
|
38537
38086
|
@property
|
|
38538
|
-
@pulumi.getter
|
|
38539
|
-
def
|
|
38540
|
-
"""
|
|
38541
|
-
Whether to restore aliases alongside their associated indexes. Default is true.
|
|
38542
|
-
"""
|
|
38543
|
-
return pulumi.get(self, "include_aliases")
|
|
38544
|
-
|
|
38545
|
-
@property
|
|
38546
|
-
@pulumi.getter(name="restoreGlobalState")
|
|
38547
|
-
def restore_global_state(self) -> Optional[bool]:
|
|
38087
|
+
@pulumi.getter
|
|
38088
|
+
def indices(self) -> Optional[str]:
|
|
38548
38089
|
"""
|
|
38549
|
-
If
|
|
38090
|
+
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
38550
38091
|
"""
|
|
38551
|
-
return pulumi.get(self, "
|
|
38092
|
+
return pulumi.get(self, "indices")
|
|
38552
38093
|
|
|
38553
38094
|
|
|
38554
38095
|
@pulumi.output_type
|
|
@@ -39686,7 +39227,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult(dict):
|
|
|
39686
39227
|
search_shard_task: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskResult'] = None,
|
|
39687
39228
|
search_task: Optional['outputs.GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskResult'] = None):
|
|
39688
39229
|
"""
|
|
39689
|
-
:param str mode: Enum: `
|
|
39230
|
+
:param str mode: Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
|
|
39690
39231
|
:param 'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureNodeDuressArgs' node_duress: Node duress settings
|
|
39691
39232
|
:param 'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchShardTaskArgs' search_shard_task: Search shard settings
|
|
39692
39233
|
:param 'GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureSearchTaskArgs' search_task: Search task settings
|
|
@@ -39704,7 +39245,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchSearchBackpressureResult(dict):
|
|
|
39704
39245
|
@pulumi.getter
|
|
39705
39246
|
def mode(self) -> Optional[str]:
|
|
39706
39247
|
"""
|
|
39707
|
-
Enum: `
|
|
39248
|
+
Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.
|
|
39708
39249
|
"""
|
|
39709
39250
|
return pulumi.get(self, "mode")
|
|
39710
39251
|
|
|
@@ -40334,35 +39875,30 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
|
|
|
40334
39875
|
access_key: str,
|
|
40335
39876
|
base_path: str,
|
|
40336
39877
|
bucket: str,
|
|
40337
|
-
indices: str,
|
|
40338
39878
|
region: str,
|
|
40339
39879
|
secret_key: str,
|
|
40340
39880
|
snapshot_name: str,
|
|
40341
39881
|
chunk_size: Optional[str] = None,
|
|
40342
39882
|
compress: Optional[bool] = None,
|
|
40343
39883
|
endpoint: Optional[str] = None,
|
|
40344
|
-
|
|
40345
|
-
restore_global_state: Optional[bool] = None,
|
|
39884
|
+
indices: Optional[str] = None,
|
|
40346
39885
|
server_side_encryption: Optional[bool] = None):
|
|
40347
39886
|
"""
|
|
40348
39887
|
:param str access_key: AWS Access key.
|
|
40349
39888
|
:param str base_path: The path to the repository data within its container. The value of this setting should not start or end with a /.
|
|
40350
39889
|
:param str bucket: S3 bucket name.
|
|
40351
|
-
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
40352
39890
|
:param str region: S3 region.
|
|
40353
39891
|
:param str secret_key: AWS secret key.
|
|
40354
39892
|
:param str snapshot_name: The snapshot name to restore from.
|
|
40355
39893
|
:param str chunk_size: Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository.
|
|
40356
39894
|
:param bool compress: When set to true metadata files are stored in compressed format.
|
|
40357
39895
|
:param str endpoint: The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint.
|
|
40358
|
-
:param
|
|
40359
|
-
:param bool restore_global_state: If true, restore the cluster state. Defaults to false.
|
|
39896
|
+
:param str indices: A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
40360
39897
|
:param bool server_side_encryption: When set to true files are encrypted on server side.
|
|
40361
39898
|
"""
|
|
40362
39899
|
pulumi.set(__self__, "access_key", access_key)
|
|
40363
39900
|
pulumi.set(__self__, "base_path", base_path)
|
|
40364
39901
|
pulumi.set(__self__, "bucket", bucket)
|
|
40365
|
-
pulumi.set(__self__, "indices", indices)
|
|
40366
39902
|
pulumi.set(__self__, "region", region)
|
|
40367
39903
|
pulumi.set(__self__, "secret_key", secret_key)
|
|
40368
39904
|
pulumi.set(__self__, "snapshot_name", snapshot_name)
|
|
@@ -40372,10 +39908,8 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
|
|
|
40372
39908
|
pulumi.set(__self__, "compress", compress)
|
|
40373
39909
|
if endpoint is not None:
|
|
40374
39910
|
pulumi.set(__self__, "endpoint", endpoint)
|
|
40375
|
-
if
|
|
40376
|
-
pulumi.set(__self__, "
|
|
40377
|
-
if restore_global_state is not None:
|
|
40378
|
-
pulumi.set(__self__, "restore_global_state", restore_global_state)
|
|
39911
|
+
if indices is not None:
|
|
39912
|
+
pulumi.set(__self__, "indices", indices)
|
|
40379
39913
|
if server_side_encryption is not None:
|
|
40380
39914
|
pulumi.set(__self__, "server_side_encryption", server_side_encryption)
|
|
40381
39915
|
|
|
@@ -40403,14 +39937,6 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
|
|
|
40403
39937
|
"""
|
|
40404
39938
|
return pulumi.get(self, "bucket")
|
|
40405
39939
|
|
|
40406
|
-
@property
|
|
40407
|
-
@pulumi.getter
|
|
40408
|
-
def indices(self) -> str:
|
|
40409
|
-
"""
|
|
40410
|
-
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. Example: `metrics*,logs*,data-20240823`.
|
|
40411
|
-
"""
|
|
40412
|
-
return pulumi.get(self, "indices")
|
|
40413
|
-
|
|
40414
39940
|
@property
|
|
40415
39941
|
@pulumi.getter
|
|
40416
39942
|
def region(self) -> str:
|
|
@@ -40460,20 +39986,12 @@ class GetOpenSearchOpensearchUserConfigS3MigrationResult(dict):
|
|
|
40460
39986
|
return pulumi.get(self, "endpoint")
|
|
40461
39987
|
|
|
40462
39988
|
@property
|
|
40463
|
-
@pulumi.getter
|
|
40464
|
-
def
|
|
40465
|
-
"""
|
|
40466
|
-
Whether to restore aliases alongside their associated indexes. Default is true.
|
|
40467
|
-
"""
|
|
40468
|
-
return pulumi.get(self, "include_aliases")
|
|
40469
|
-
|
|
40470
|
-
@property
|
|
40471
|
-
@pulumi.getter(name="restoreGlobalState")
|
|
40472
|
-
def restore_global_state(self) -> Optional[bool]:
|
|
39989
|
+
@pulumi.getter
|
|
39990
|
+
def indices(self) -> Optional[str]:
|
|
40473
39991
|
"""
|
|
40474
|
-
If
|
|
39992
|
+
A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.
|
|
40475
39993
|
"""
|
|
40476
|
-
return pulumi.get(self, "
|
|
39994
|
+
return pulumi.get(self, "indices")
|
|
40477
39995
|
|
|
40478
39996
|
@property
|
|
40479
39997
|
@pulumi.getter(name="serverSideEncryption")
|
|
@@ -41233,7 +40751,7 @@ class GetPgPgUserConfigResult(dict):
|
|
|
41233
40751
|
:param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`.
|
|
41234
40752
|
:param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. Example: `41.5`.
|
|
41235
40753
|
:param bool static_ips: Use static public IP addresses.
|
|
41236
|
-
:param str synchronous_replication: Enum: `
|
|
40754
|
+
:param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
|
|
41237
40755
|
:param 'GetPgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
|
|
41238
40756
|
:param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
|
|
41239
40757
|
:param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). Example: `4`.
|
|
@@ -41535,7 +41053,7 @@ class GetPgPgUserConfigResult(dict):
|
|
|
41535
41053
|
@pulumi.getter(name="synchronousReplication")
|
|
41536
41054
|
def synchronous_replication(self) -> Optional[str]:
|
|
41537
41055
|
"""
|
|
41538
|
-
Enum: `
|
|
41056
|
+
Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
|
|
41539
41057
|
"""
|
|
41540
41058
|
return pulumi.get(self, "synchronous_replication")
|
|
41541
41059
|
|
|
@@ -41778,8 +41296,8 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
41778
41296
|
:param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
|
|
41779
41297
|
:param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
|
|
41780
41298
|
:param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
|
|
41781
|
-
:param str log_error_verbosity: Enum: `
|
|
41782
|
-
:param str log_line_prefix: Enum: `'
|
|
41299
|
+
:param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
|
|
41300
|
+
:param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
|
|
41783
41301
|
:param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
|
|
41784
41302
|
:param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
|
|
41785
41303
|
:param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
|
|
@@ -41800,12 +41318,12 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
41800
41318
|
:param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`.
|
|
41801
41319
|
:param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
|
|
41802
41320
|
:param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets. Example: `10`.
|
|
41803
|
-
:param str pg_stat_statements_dot_track: Enum: `all`, `
|
|
41321
|
+
:param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
|
|
41804
41322
|
:param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`.
|
|
41805
41323
|
:param str timezone: PostgreSQL service timezone. Example: `Europe/Helsinki`.
|
|
41806
41324
|
:param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session. Example: `1024`.
|
|
41807
41325
|
:param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
|
|
41808
|
-
:param str track_functions: Enum: `all`, `
|
|
41326
|
+
:param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
|
|
41809
41327
|
:param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
|
|
41810
41328
|
:param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout. Example: `60000`.
|
|
41811
41329
|
:param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance. Example: `50`.
|
|
@@ -42057,7 +41575,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
42057
41575
|
@pulumi.getter(name="logErrorVerbosity")
|
|
42058
41576
|
def log_error_verbosity(self) -> Optional[str]:
|
|
42059
41577
|
"""
|
|
42060
|
-
Enum: `
|
|
41578
|
+
Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
|
|
42061
41579
|
"""
|
|
42062
41580
|
return pulumi.get(self, "log_error_verbosity")
|
|
42063
41581
|
|
|
@@ -42065,7 +41583,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
42065
41583
|
@pulumi.getter(name="logLinePrefix")
|
|
42066
41584
|
def log_line_prefix(self) -> Optional[str]:
|
|
42067
41585
|
"""
|
|
42068
|
-
Enum: `'
|
|
41586
|
+
Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.
|
|
42069
41587
|
"""
|
|
42070
41588
|
return pulumi.get(self, "log_line_prefix")
|
|
42071
41589
|
|
|
@@ -42233,7 +41751,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
42233
41751
|
@pulumi.getter(name="pgStatStatementsDotTrack")
|
|
42234
41752
|
def pg_stat_statements_dot_track(self) -> Optional[str]:
|
|
42235
41753
|
"""
|
|
42236
|
-
Enum: `all`, `
|
|
41754
|
+
Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
|
|
42237
41755
|
"""
|
|
42238
41756
|
return pulumi.get(self, "pg_stat_statements_dot_track")
|
|
42239
41757
|
|
|
@@ -42273,7 +41791,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
42273
41791
|
@pulumi.getter(name="trackFunctions")
|
|
42274
41792
|
def track_functions(self) -> Optional[str]:
|
|
42275
41793
|
"""
|
|
42276
|
-
Enum: `all`, `
|
|
41794
|
+
Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
|
|
42277
41795
|
"""
|
|
42278
41796
|
return pulumi.get(self, "track_functions")
|
|
42279
41797
|
|
|
@@ -42579,7 +42097,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
|
|
|
42579
42097
|
"""
|
|
42580
42098
|
:param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). Default: `3600`.
|
|
42581
42099
|
:param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. Example: `0`.
|
|
42582
|
-
:param str autodb_pool_mode: Enum: `session`, `
|
|
42100
|
+
:param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
|
|
42583
42101
|
:param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.
|
|
42584
42102
|
:param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
|
|
42585
42103
|
:param int max_prepared_statements: PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`.
|
|
@@ -42629,7 +42147,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
|
|
|
42629
42147
|
@pulumi.getter(name="autodbPoolMode")
|
|
42630
42148
|
def autodb_pool_mode(self) -> Optional[str]:
|
|
42631
42149
|
"""
|
|
42632
|
-
Enum: `session`, `
|
|
42150
|
+
Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.
|
|
42633
42151
|
"""
|
|
42634
42152
|
return pulumi.get(self, "autodb_pool_mode")
|
|
42635
42153
|
|
|
@@ -43155,7 +42673,7 @@ class GetRedisRedisUserConfigResult(dict):
|
|
|
43155
42673
|
:param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service. Example: `1`.
|
|
43156
42674
|
:param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
|
|
43157
42675
|
:param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
|
|
43158
|
-
:param str redis_maxmemory_policy: Enum: `allkeys-
|
|
42676
|
+
:param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
|
|
43159
42677
|
:param str redis_notify_keyspace_events: Set notify-keyspace-events option.
|
|
43160
42678
|
:param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service. Example: `16`.
|
|
43161
42679
|
:param str redis_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
|
|
@@ -43355,7 +42873,7 @@ class GetRedisRedisUserConfigResult(dict):
|
|
|
43355
42873
|
@pulumi.getter(name="redisMaxmemoryPolicy")
|
|
43356
42874
|
def redis_maxmemory_policy(self) -> Optional[str]:
|
|
43357
42875
|
"""
|
|
43358
|
-
Enum: `allkeys-
|
|
42876
|
+
Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.
|
|
43359
42877
|
"""
|
|
43360
42878
|
return pulumi.get(self, "redis_maxmemory_policy")
|
|
43361
42879
|
|
|
@@ -43791,11 +43309,11 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
43791
43309
|
thread_per_consumer: Optional[bool] = None):
|
|
43792
43310
|
"""
|
|
43793
43311
|
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
|
|
43794
|
-
:param str data_format: Enum: `Avro`, `
|
|
43312
|
+
:param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
|
|
43795
43313
|
:param str group_name: Kafka consumers group. Default: `clickhouse`.
|
|
43796
43314
|
:param str name: Name of the table. Example: `events`.
|
|
43797
43315
|
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
|
|
43798
|
-
:param str auto_offset_reset: Enum: `
|
|
43316
|
+
:param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
|
|
43799
43317
|
:param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`.
|
|
43800
43318
|
:param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. Default: `default`.
|
|
43801
43319
|
:param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. Default: `0`.
|
|
@@ -43844,7 +43362,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
43844
43362
|
@pulumi.getter(name="dataFormat")
|
|
43845
43363
|
def data_format(self) -> str:
|
|
43846
43364
|
"""
|
|
43847
|
-
Enum: `Avro`, `
|
|
43365
|
+
Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.
|
|
43848
43366
|
"""
|
|
43849
43367
|
return pulumi.get(self, "data_format")
|
|
43850
43368
|
|
|
@@ -43876,7 +43394,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
43876
43394
|
@pulumi.getter(name="autoOffsetReset")
|
|
43877
43395
|
def auto_offset_reset(self) -> Optional[str]:
|
|
43878
43396
|
"""
|
|
43879
|
-
Enum: `
|
|
43397
|
+
Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.
|
|
43880
43398
|
"""
|
|
43881
43399
|
return pulumi.get(self, "auto_offset_reset")
|
|
43882
43400
|
|
|
@@ -44305,53 +43823,6 @@ class GetServiceIntegrationDatadogUserConfigRedisResult(dict):
|
|
|
44305
43823
|
return pulumi.get(self, "command_stats_enabled")
|
|
44306
43824
|
|
|
44307
43825
|
|
|
44308
|
-
@pulumi.output_type
|
|
44309
|
-
class GetServiceIntegrationEndpointAutoscalerUserConfigResult(dict):
|
|
44310
|
-
def __init__(__self__, *,
|
|
44311
|
-
autoscalings: Sequence['outputs.GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult']):
|
|
44312
|
-
"""
|
|
44313
|
-
:param Sequence['GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingArgs'] autoscalings: Configure autoscaling thresholds for a service
|
|
44314
|
-
"""
|
|
44315
|
-
pulumi.set(__self__, "autoscalings", autoscalings)
|
|
44316
|
-
|
|
44317
|
-
@property
|
|
44318
|
-
@pulumi.getter
|
|
44319
|
-
def autoscalings(self) -> Sequence['outputs.GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult']:
|
|
44320
|
-
"""
|
|
44321
|
-
Configure autoscaling thresholds for a service
|
|
44322
|
-
"""
|
|
44323
|
-
return pulumi.get(self, "autoscalings")
|
|
44324
|
-
|
|
44325
|
-
|
|
44326
|
-
@pulumi.output_type
|
|
44327
|
-
class GetServiceIntegrationEndpointAutoscalerUserConfigAutoscalingResult(dict):
|
|
44328
|
-
def __init__(__self__, *,
|
|
44329
|
-
cap_gb: int,
|
|
44330
|
-
type: str):
|
|
44331
|
-
"""
|
|
44332
|
-
:param int cap_gb: The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
|
|
44333
|
-
:param str type: Enum: `autoscale_disk`. Type of autoscale event.
|
|
44334
|
-
"""
|
|
44335
|
-
pulumi.set(__self__, "cap_gb", cap_gb)
|
|
44336
|
-
pulumi.set(__self__, "type", type)
|
|
44337
|
-
|
|
44338
|
-
@property
|
|
44339
|
-
@pulumi.getter(name="capGb")
|
|
44340
|
-
def cap_gb(self) -> int:
|
|
44341
|
-
"""
|
|
44342
|
-
The maximum total disk size (in gb) to allow autoscaler to scale up to. Example: `300`.
|
|
44343
|
-
"""
|
|
44344
|
-
return pulumi.get(self, "cap_gb")
|
|
44345
|
-
|
|
44346
|
-
@property
|
|
44347
|
-
@pulumi.getter
|
|
44348
|
-
def type(self) -> str:
|
|
44349
|
-
"""
|
|
44350
|
-
Enum: `autoscale_disk`. Type of autoscale event.
|
|
44351
|
-
"""
|
|
44352
|
-
return pulumi.get(self, "type")
|
|
44353
|
-
|
|
44354
|
-
|
|
44355
43826
|
@pulumi.output_type
|
|
44356
43827
|
class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
|
|
44357
43828
|
def __init__(__self__, *,
|
|
@@ -44369,7 +43840,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
|
|
|
44369
43840
|
:param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with. Example: `8`.
|
|
44370
43841
|
:param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers. Example: `60`.
|
|
44371
43842
|
:param int max_partition_contexts: Maximum number of partition contexts to send. Example: `32000`.
|
|
44372
|
-
:param str site: Enum: `
|
|
43843
|
+
:param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
|
|
44373
43844
|
"""
|
|
44374
43845
|
pulumi.set(__self__, "datadog_api_key", datadog_api_key)
|
|
44375
43846
|
if datadog_tags is not None:
|
|
@@ -44437,7 +43908,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
|
|
|
44437
43908
|
@pulumi.getter
|
|
44438
43909
|
def site(self) -> Optional[str]:
|
|
44439
43910
|
"""
|
|
44440
|
-
Enum: `
|
|
43911
|
+
Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
|
|
44441
43912
|
"""
|
|
44442
43913
|
return pulumi.get(self, "site")
|
|
44443
43914
|
|
|
@@ -44820,7 +44291,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
|
|
|
44820
44291
|
ssl_endpoint_identification_algorithm: Optional[str] = None):
|
|
44821
44292
|
"""
|
|
44822
44293
|
:param str bootstrap_servers: Bootstrap servers. Example: `10.0.0.1:9092,10.0.0.2:9092`.
|
|
44823
|
-
:param str security_protocol: Enum: `PLAINTEXT`, `
|
|
44294
|
+
:param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
|
|
44824
44295
|
:param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
|
|
44825
44296
|
:param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
|
|
44826
44297
|
:param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server. Example: `admin`.
|
|
@@ -44867,7 +44338,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
|
|
|
44867
44338
|
@pulumi.getter(name="securityProtocol")
|
|
44868
44339
|
def security_protocol(self) -> str:
|
|
44869
44340
|
"""
|
|
44870
|
-
Enum: `PLAINTEXT`, `
|
|
44341
|
+
Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
|
|
44871
44342
|
"""
|
|
44872
44343
|
return pulumi.get(self, "security_protocol")
|
|
44873
44344
|
|
|
@@ -45114,7 +44585,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
|
|
|
45114
44585
|
:param str ssl_client_key: Client key. Example: `-----BEGIN PRIVATE KEY-----
|
|
45115
44586
|
...
|
|
45116
44587
|
-----END PRIVATE KEY-----`.
|
|
45117
|
-
:param str ssl_mode: Enum: `
|
|
44588
|
+
:param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
|
|
45118
44589
|
:param str ssl_root_cert: SSL Root Cert. Example: `-----BEGIN CERTIFICATE-----
|
|
45119
44590
|
...
|
|
45120
44591
|
-----END CERTIFICATE-----
|
|
@@ -45201,7 +44672,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
|
|
|
45201
44672
|
@pulumi.getter(name="sslMode")
|
|
45202
44673
|
def ssl_mode(self) -> Optional[str]:
|
|
45203
44674
|
"""
|
|
45204
|
-
Enum: `
|
|
44675
|
+
Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.
|
|
45205
44676
|
"""
|
|
45206
44677
|
return pulumi.get(self, "ssl_mode")
|
|
45207
44678
|
|
|
@@ -45217,49 +44688,6 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
|
|
|
45217
44688
|
return pulumi.get(self, "ssl_root_cert")
|
|
45218
44689
|
|
|
45219
44690
|
|
|
45220
|
-
@pulumi.output_type
|
|
45221
|
-
class GetServiceIntegrationEndpointExternalPrometheusUserConfigResult(dict):
|
|
45222
|
-
def __init__(__self__, *,
|
|
45223
|
-
basic_auth_password: Optional[str] = None,
|
|
45224
|
-
basic_auth_username: Optional[str] = None,
|
|
45225
|
-
service_uri: Optional[str] = None):
|
|
45226
|
-
"""
|
|
45227
|
-
:param str basic_auth_password: Prometheus basic authentication password. Example: `fhyFNBjj3R`.
|
|
45228
|
-
:param str basic_auth_username: Prometheus basic authentication username. Example: `prom4851`.
|
|
45229
|
-
:param str service_uri: Prometheus enabled write endpoint. Example: `https://write.example.com/`.
|
|
45230
|
-
"""
|
|
45231
|
-
if basic_auth_password is not None:
|
|
45232
|
-
pulumi.set(__self__, "basic_auth_password", basic_auth_password)
|
|
45233
|
-
if basic_auth_username is not None:
|
|
45234
|
-
pulumi.set(__self__, "basic_auth_username", basic_auth_username)
|
|
45235
|
-
if service_uri is not None:
|
|
45236
|
-
pulumi.set(__self__, "service_uri", service_uri)
|
|
45237
|
-
|
|
45238
|
-
@property
|
|
45239
|
-
@pulumi.getter(name="basicAuthPassword")
|
|
45240
|
-
def basic_auth_password(self) -> Optional[str]:
|
|
45241
|
-
"""
|
|
45242
|
-
Prometheus basic authentication password. Example: `fhyFNBjj3R`.
|
|
45243
|
-
"""
|
|
45244
|
-
return pulumi.get(self, "basic_auth_password")
|
|
45245
|
-
|
|
45246
|
-
@property
|
|
45247
|
-
@pulumi.getter(name="basicAuthUsername")
|
|
45248
|
-
def basic_auth_username(self) -> Optional[str]:
|
|
45249
|
-
"""
|
|
45250
|
-
Prometheus basic authentication username. Example: `prom4851`.
|
|
45251
|
-
"""
|
|
45252
|
-
return pulumi.get(self, "basic_auth_username")
|
|
45253
|
-
|
|
45254
|
-
@property
|
|
45255
|
-
@pulumi.getter(name="serviceUri")
|
|
45256
|
-
def service_uri(self) -> Optional[str]:
|
|
45257
|
-
"""
|
|
45258
|
-
Prometheus enabled write endpoint. Example: `https://write.example.com/`.
|
|
45259
|
-
"""
|
|
45260
|
-
return pulumi.get(self, "service_uri")
|
|
45261
|
-
|
|
45262
|
-
|
|
45263
44691
|
@pulumi.output_type
|
|
45264
44692
|
class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
|
|
45265
44693
|
def __init__(__self__, *,
|
|
@@ -45268,7 +44696,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
|
|
|
45268
44696
|
basic_auth_password: Optional[str] = None,
|
|
45269
44697
|
basic_auth_username: Optional[str] = None):
|
|
45270
44698
|
"""
|
|
45271
|
-
:param str authentication: Enum: `
|
|
44699
|
+
:param str authentication: Enum: `none`, `basic`. Authentication method.
|
|
45272
44700
|
:param str url: Schema Registry URL. Example: `https://schema-registry.kafka.company.com:28419`.
|
|
45273
44701
|
:param str basic_auth_password: Basic authentication password. Example: `Zm9vYg==`.
|
|
45274
44702
|
:param str basic_auth_username: Basic authentication user name. Example: `avnadmin`.
|
|
@@ -45284,7 +44712,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
|
|
|
45284
44712
|
@pulumi.getter
|
|
45285
44713
|
def authentication(self) -> str:
|
|
45286
44714
|
"""
|
|
45287
|
-
Enum: `
|
|
44715
|
+
Enum: `none`, `basic`. Authentication method.
|
|
45288
44716
|
"""
|
|
45289
44717
|
return pulumi.get(self, "authentication")
|
|
45290
44718
|
|
|
@@ -45389,7 +44817,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
|
|
|
45389
44817
|
max_message_size: Optional[int] = None,
|
|
45390
44818
|
sd: Optional[str] = None):
|
|
45391
44819
|
"""
|
|
45392
|
-
:param str format: Enum: `
|
|
44820
|
+
:param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
|
|
45393
44821
|
:param int port: Rsyslog server port. Default: `514`.
|
|
45394
44822
|
:param str server: Rsyslog server IP address or hostname. Example: `logs.example.com`.
|
|
45395
44823
|
:param bool tls: Require TLS. Default: `true`.
|
|
@@ -45430,7 +44858,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
|
|
|
45430
44858
|
@pulumi.getter
|
|
45431
44859
|
def format(self) -> str:
|
|
45432
44860
|
"""
|
|
45433
|
-
Enum: `
|
|
44861
|
+
Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.
|
|
45434
44862
|
"""
|
|
45435
44863
|
return pulumi.get(self, "format")
|
|
45436
44864
|
|
|
@@ -45833,7 +45261,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
|
|
|
45833
45261
|
:param int consumer_max_poll_records: Set consumer max.poll.records. The default is 500. Example: `500`.
|
|
45834
45262
|
:param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`.
|
|
45835
45263
|
:param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker. Example: `8388608`.
|
|
45836
|
-
:param str producer_compression_type: Enum: `gzip`, `
|
|
45264
|
+
:param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
45837
45265
|
:param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.
|
|
45838
45266
|
:param int producer_max_request_size: The maximum request size in bytes. Example: `1048576`.
|
|
45839
45267
|
"""
|
|
@@ -45898,7 +45326,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
|
|
|
45898
45326
|
@pulumi.getter(name="producerCompressionType")
|
|
45899
45327
|
def producer_compression_type(self) -> Optional[str]:
|
|
45900
45328
|
"""
|
|
45901
|
-
Enum: `gzip`, `
|
|
45329
|
+
Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.
|
|
45902
45330
|
"""
|
|
45903
45331
|
return pulumi.get(self, "producer_compression_type")
|
|
45904
45332
|
|
|
@@ -46670,12 +46098,8 @@ class GetThanosThanosUserConfigResult(dict):
|
|
|
46670
46098
|
public_access: Optional['outputs.GetThanosThanosUserConfigPublicAccessResult'] = None,
|
|
46671
46099
|
query: Optional['outputs.GetThanosThanosUserConfigQueryResult'] = None,
|
|
46672
46100
|
query_frontend: Optional['outputs.GetThanosThanosUserConfigQueryFrontendResult'] = None,
|
|
46673
|
-
receiver_ingesting: Optional[Mapping[str, str]] = None,
|
|
46674
|
-
receiver_routing: Optional[Mapping[str, str]] = None,
|
|
46675
|
-
ruler: Optional[Mapping[str, str]] = None,
|
|
46676
46101
|
service_log: Optional[bool] = None,
|
|
46677
|
-
static_ips: Optional[bool] = None
|
|
46678
|
-
store: Optional[Mapping[str, str]] = None):
|
|
46102
|
+
static_ips: Optional[bool] = None):
|
|
46679
46103
|
"""
|
|
46680
46104
|
:param 'GetThanosThanosUserConfigCompactorArgs' compactor: ThanosCompactor
|
|
46681
46105
|
:param Mapping[str, str] env: Environmental variables.
|
|
@@ -46686,12 +46110,8 @@ class GetThanosThanosUserConfigResult(dict):
|
|
|
46686
46110
|
:param 'GetThanosThanosUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
|
|
46687
46111
|
:param 'GetThanosThanosUserConfigQueryArgs' query: ThanosQuery
|
|
46688
46112
|
:param 'GetThanosThanosUserConfigQueryFrontendArgs' query_frontend: ThanosQueryFrontend
|
|
46689
|
-
:param Mapping[str, str] receiver_ingesting: CommonReceive.
|
|
46690
|
-
:param Mapping[str, str] receiver_routing: ThanosReceiveRouting.
|
|
46691
|
-
:param Mapping[str, str] ruler: ThanosRuler.
|
|
46692
46113
|
:param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
|
|
46693
46114
|
:param bool static_ips: Use static public IP addresses.
|
|
46694
|
-
:param Mapping[str, str] store: ThanosStore.
|
|
46695
46115
|
"""
|
|
46696
46116
|
if compactor is not None:
|
|
46697
46117
|
pulumi.set(__self__, "compactor", compactor)
|
|
@@ -46711,18 +46131,10 @@ class GetThanosThanosUserConfigResult(dict):
|
|
|
46711
46131
|
pulumi.set(__self__, "query", query)
|
|
46712
46132
|
if query_frontend is not None:
|
|
46713
46133
|
pulumi.set(__self__, "query_frontend", query_frontend)
|
|
46714
|
-
if receiver_ingesting is not None:
|
|
46715
|
-
pulumi.set(__self__, "receiver_ingesting", receiver_ingesting)
|
|
46716
|
-
if receiver_routing is not None:
|
|
46717
|
-
pulumi.set(__self__, "receiver_routing", receiver_routing)
|
|
46718
|
-
if ruler is not None:
|
|
46719
|
-
pulumi.set(__self__, "ruler", ruler)
|
|
46720
46134
|
if service_log is not None:
|
|
46721
46135
|
pulumi.set(__self__, "service_log", service_log)
|
|
46722
46136
|
if static_ips is not None:
|
|
46723
46137
|
pulumi.set(__self__, "static_ips", static_ips)
|
|
46724
|
-
if store is not None:
|
|
46725
|
-
pulumi.set(__self__, "store", store)
|
|
46726
46138
|
|
|
46727
46139
|
@property
|
|
46728
46140
|
@pulumi.getter
|
|
@@ -46798,30 +46210,6 @@ class GetThanosThanosUserConfigResult(dict):
|
|
|
46798
46210
|
"""
|
|
46799
46211
|
return pulumi.get(self, "query_frontend")
|
|
46800
46212
|
|
|
46801
|
-
@property
|
|
46802
|
-
@pulumi.getter(name="receiverIngesting")
|
|
46803
|
-
def receiver_ingesting(self) -> Optional[Mapping[str, str]]:
|
|
46804
|
-
"""
|
|
46805
|
-
CommonReceive.
|
|
46806
|
-
"""
|
|
46807
|
-
return pulumi.get(self, "receiver_ingesting")
|
|
46808
|
-
|
|
46809
|
-
@property
|
|
46810
|
-
@pulumi.getter(name="receiverRouting")
|
|
46811
|
-
def receiver_routing(self) -> Optional[Mapping[str, str]]:
|
|
46812
|
-
"""
|
|
46813
|
-
ThanosReceiveRouting.
|
|
46814
|
-
"""
|
|
46815
|
-
return pulumi.get(self, "receiver_routing")
|
|
46816
|
-
|
|
46817
|
-
@property
|
|
46818
|
-
@pulumi.getter
|
|
46819
|
-
def ruler(self) -> Optional[Mapping[str, str]]:
|
|
46820
|
-
"""
|
|
46821
|
-
ThanosRuler.
|
|
46822
|
-
"""
|
|
46823
|
-
return pulumi.get(self, "ruler")
|
|
46824
|
-
|
|
46825
46213
|
@property
|
|
46826
46214
|
@pulumi.getter(name="serviceLog")
|
|
46827
46215
|
def service_log(self) -> Optional[bool]:
|
|
@@ -46838,14 +46226,6 @@ class GetThanosThanosUserConfigResult(dict):
|
|
|
46838
46226
|
"""
|
|
46839
46227
|
return pulumi.get(self, "static_ips")
|
|
46840
46228
|
|
|
46841
|
-
@property
|
|
46842
|
-
@pulumi.getter
|
|
46843
|
-
def store(self) -> Optional[Mapping[str, str]]:
|
|
46844
|
-
"""
|
|
46845
|
-
ThanosStore.
|
|
46846
|
-
"""
|
|
46847
|
-
return pulumi.get(self, "store")
|
|
46848
|
-
|
|
46849
46229
|
|
|
46850
46230
|
@pulumi.output_type
|
|
46851
46231
|
class GetThanosThanosUserConfigCompactorResult(dict):
|
|
@@ -47356,7 +46736,7 @@ class GetValkeyValkeyUserConfigResult(dict):
|
|
|
47356
46736
|
:param int valkey_io_threads: Set Valkey IO thread count. Changing this will cause a restart of the Valkey service. Example: `1`.
|
|
47357
46737
|
:param int valkey_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. Default: `1`.
|
|
47358
46738
|
:param int valkey_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`.
|
|
47359
|
-
:param str valkey_maxmemory_policy: Enum: `allkeys-
|
|
46739
|
+
:param str valkey_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
|
|
47360
46740
|
:param str valkey_notify_keyspace_events: Set notify-keyspace-events option.
|
|
47361
46741
|
:param int valkey_number_of_databases: Set number of Valkey databases. Changing this will cause a restart of the Valkey service. Example: `16`.
|
|
47362
46742
|
:param str valkey_persistence: Enum: `off`, `rdb`. When persistence is `rdb`, Valkey does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is `off`, no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
@@ -47574,7 +46954,7 @@ class GetValkeyValkeyUserConfigResult(dict):
|
|
|
47574
46954
|
@pulumi.getter(name="valkeyMaxmemoryPolicy")
|
|
47575
46955
|
def valkey_maxmemory_policy(self) -> Optional[str]:
|
|
47576
46956
|
"""
|
|
47577
|
-
Enum: `allkeys-
|
|
46957
|
+
Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.
|
|
47578
46958
|
"""
|
|
47579
46959
|
return pulumi.get(self, "valkey_maxmemory_policy")
|
|
47580
46960
|
|