pulumi-aiven 6.15.0a1715356672__py3-none-any.whl → 6.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

Files changed (39) hide show
  1. pulumi_aiven/_inputs.py +1096 -216
  2. pulumi_aiven/cassandra.py +48 -1
  3. pulumi_aiven/clickhouse.py +48 -1
  4. pulumi_aiven/dragonfly.py +48 -1
  5. pulumi_aiven/flink.py +2 -2
  6. pulumi_aiven/flink_application_deployment.py +56 -30
  7. pulumi_aiven/gcp_privatelink.py +52 -30
  8. pulumi_aiven/gcp_privatelink_connection_approval.py +54 -30
  9. pulumi_aiven/get_cassanda.py +14 -1
  10. pulumi_aiven/get_cassandra.py +14 -1
  11. pulumi_aiven/get_clickhouse.py +14 -1
  12. pulumi_aiven/get_dragonfly.py +14 -1
  13. pulumi_aiven/get_gcp_privatelink.py +45 -2
  14. pulumi_aiven/get_grafana.py +14 -1
  15. pulumi_aiven/get_m3_aggregator.py +14 -1
  16. pulumi_aiven/get_m3_db.py +14 -1
  17. pulumi_aiven/get_mirror_maker_replication_flow.py +1 -1
  18. pulumi_aiven/get_my_sql.py +14 -1
  19. pulumi_aiven/get_redis.py +14 -1
  20. pulumi_aiven/get_service_integration.py +3 -3
  21. pulumi_aiven/get_service_integration_endpoint.py +1 -1
  22. pulumi_aiven/grafana.py +48 -1
  23. pulumi_aiven/influx_db.py +21 -2
  24. pulumi_aiven/kafka.py +20 -1
  25. pulumi_aiven/m3_aggregator.py +48 -1
  26. pulumi_aiven/m3_db.py +48 -1
  27. pulumi_aiven/mirror_maker_replication_flow.py +7 -7
  28. pulumi_aiven/my_sql.py +48 -1
  29. pulumi_aiven/open_search.py +21 -2
  30. pulumi_aiven/outputs.py +1758 -285
  31. pulumi_aiven/pg.py +2 -2
  32. pulumi_aiven/pulumi-plugin.json +2 -1
  33. pulumi_aiven/redis.py +48 -1
  34. pulumi_aiven/service_integration.py +7 -7
  35. pulumi_aiven/service_integration_endpoint.py +7 -7
  36. {pulumi_aiven-6.15.0a1715356672.dist-info → pulumi_aiven-6.16.0.dist-info}/METADATA +1 -1
  37. {pulumi_aiven-6.15.0a1715356672.dist-info → pulumi_aiven-6.16.0.dist-info}/RECORD +39 -39
  38. {pulumi_aiven-6.15.0a1715356672.dist-info → pulumi_aiven-6.16.0.dist-info}/WHEEL +0 -0
  39. {pulumi_aiven-6.15.0a1715356672.dist-info → pulumi_aiven-6.16.0.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py CHANGED
@@ -12,6 +12,7 @@ from . import outputs
12
12
 
13
13
  __all__ = [
14
14
  'AccountAuthenticationSamlFieldMapping',
15
+ 'CassandraCassandra',
15
16
  'CassandraCassandraUserConfig',
16
17
  'CassandraCassandraUserConfigCassandra',
17
18
  'CassandraCassandraUserConfigIpFilterObject',
@@ -21,6 +22,7 @@ __all__ = [
21
22
  'CassandraServiceIntegration',
22
23
  'CassandraTag',
23
24
  'CassandraTechEmail',
25
+ 'ClickhouseClickhouse',
24
26
  'ClickhouseClickhouseUserConfig',
25
27
  'ClickhouseClickhouseUserConfigIpFilterObject',
26
28
  'ClickhouseClickhouseUserConfigPrivateAccess',
@@ -33,6 +35,7 @@ __all__ = [
33
35
  'ClickhouseTag',
34
36
  'ClickhouseTechEmail',
35
37
  'DragonflyComponent',
38
+ 'DragonflyDragonfly',
36
39
  'DragonflyDragonflyUserConfig',
37
40
  'DragonflyDragonflyUserConfigIpFilterObject',
38
41
  'DragonflyDragonflyUserConfigMigration',
@@ -53,6 +56,7 @@ __all__ = [
53
56
  'FlinkTag',
54
57
  'FlinkTechEmail',
55
58
  'GrafanaComponent',
59
+ 'GrafanaGrafana',
56
60
  'GrafanaGrafanaUserConfig',
57
61
  'GrafanaGrafanaUserConfigAuthAzuread',
58
62
  'GrafanaGrafanaUserConfigAuthGenericOauth',
@@ -118,12 +122,14 @@ __all__ = [
118
122
  'KafkaTopicConfig',
119
123
  'KafkaTopicTag',
120
124
  'M3AggregatorComponent',
125
+ 'M3AggregatorM3aggregator',
121
126
  'M3AggregatorM3aggregatorUserConfig',
122
127
  'M3AggregatorM3aggregatorUserConfigIpFilterObject',
123
128
  'M3AggregatorServiceIntegration',
124
129
  'M3AggregatorTag',
125
130
  'M3AggregatorTechEmail',
126
131
  'M3DbComponent',
132
+ 'M3DbM3db',
127
133
  'M3DbM3dbUserConfig',
128
134
  'M3DbM3dbUserConfigIpFilterObject',
129
135
  'M3DbM3dbUserConfigLimits',
@@ -142,6 +148,8 @@ __all__ = [
142
148
  'M3DbTag',
143
149
  'M3DbTechEmail',
144
150
  'MySqlComponent',
151
+ 'MySqlMysql',
152
+ 'MySqlMysqlParam',
145
153
  'MySqlMysqlUserConfig',
146
154
  'MySqlMysqlUserConfigIpFilterObject',
147
155
  'MySqlMysqlUserConfigMigration',
@@ -178,6 +186,7 @@ __all__ = [
178
186
  'OrganizationUserGroupMemberTimeouts',
179
187
  'PgComponent',
180
188
  'PgPg',
189
+ 'PgPgParam',
181
190
  'PgPgUserConfig',
182
191
  'PgPgUserConfigIpFilterObject',
183
192
  'PgPgUserConfigMigration',
@@ -195,6 +204,7 @@ __all__ = [
195
204
  'PgTechEmail',
196
205
  'ProjectTag',
197
206
  'RedisComponent',
207
+ 'RedisRedis',
198
208
  'RedisRedisUserConfig',
199
209
  'RedisRedisUserConfigIpFilterObject',
200
210
  'RedisRedisUserConfigMigration',
@@ -247,6 +257,7 @@ __all__ = [
247
257
  'ServiceIntegrationPrometheusUserConfigSourceMysql',
248
258
  'ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf',
249
259
  'GetAccountAuthenticationSamlFieldMappingResult',
260
+ 'GetCassandaCassandraResult',
250
261
  'GetCassandaCassandraUserConfigResult',
251
262
  'GetCassandaCassandraUserConfigCassandraResult',
252
263
  'GetCassandaCassandraUserConfigIpFilterObjectResult',
@@ -256,6 +267,7 @@ __all__ = [
256
267
  'GetCassandaServiceIntegrationResult',
257
268
  'GetCassandaTagResult',
258
269
  'GetCassandaTechEmailResult',
270
+ 'GetCassandraCassandraResult',
259
271
  'GetCassandraCassandraUserConfigResult',
260
272
  'GetCassandraCassandraUserConfigCassandraResult',
261
273
  'GetCassandraCassandraUserConfigIpFilterObjectResult',
@@ -265,6 +277,7 @@ __all__ = [
265
277
  'GetCassandraServiceIntegrationResult',
266
278
  'GetCassandraTagResult',
267
279
  'GetCassandraTechEmailResult',
280
+ 'GetClickhouseClickhouseResult',
268
281
  'GetClickhouseClickhouseUserConfigResult',
269
282
  'GetClickhouseClickhouseUserConfigIpFilterObjectResult',
270
283
  'GetClickhouseClickhouseUserConfigPrivateAccessResult',
@@ -275,6 +288,7 @@ __all__ = [
275
288
  'GetClickhouseTagResult',
276
289
  'GetClickhouseTechEmailResult',
277
290
  'GetDragonflyComponentResult',
291
+ 'GetDragonflyDragonflyResult',
278
292
  'GetDragonflyDragonflyUserConfigResult',
279
293
  'GetDragonflyDragonflyUserConfigIpFilterObjectResult',
280
294
  'GetDragonflyDragonflyUserConfigMigrationResult',
@@ -295,6 +309,7 @@ __all__ = [
295
309
  'GetFlinkTagResult',
296
310
  'GetFlinkTechEmailResult',
297
311
  'GetGrafanaComponentResult',
312
+ 'GetGrafanaGrafanaResult',
298
313
  'GetGrafanaGrafanaUserConfigResult',
299
314
  'GetGrafanaGrafanaUserConfigAuthAzureadResult',
300
315
  'GetGrafanaGrafanaUserConfigAuthGenericOauthResult',
@@ -360,12 +375,14 @@ __all__ = [
360
375
  'GetKafkaTopicConfigResult',
361
376
  'GetKafkaTopicTagResult',
362
377
  'GetM3AggregatorComponentResult',
378
+ 'GetM3AggregatorM3aggregatorResult',
363
379
  'GetM3AggregatorM3aggregatorUserConfigResult',
364
380
  'GetM3AggregatorM3aggregatorUserConfigIpFilterObjectResult',
365
381
  'GetM3AggregatorServiceIntegrationResult',
366
382
  'GetM3AggregatorTagResult',
367
383
  'GetM3AggregatorTechEmailResult',
368
384
  'GetM3DbComponentResult',
385
+ 'GetM3DbM3dbResult',
369
386
  'GetM3DbM3dbUserConfigResult',
370
387
  'GetM3DbM3dbUserConfigIpFilterObjectResult',
371
388
  'GetM3DbM3dbUserConfigLimitsResult',
@@ -384,6 +401,8 @@ __all__ = [
384
401
  'GetM3DbTagResult',
385
402
  'GetM3DbTechEmailResult',
386
403
  'GetMySqlComponentResult',
404
+ 'GetMySqlMysqlResult',
405
+ 'GetMySqlMysqlParamResult',
387
406
  'GetMySqlMysqlUserConfigResult',
388
407
  'GetMySqlMysqlUserConfigIpFilterObjectResult',
389
408
  'GetMySqlMysqlUserConfigMigrationResult',
@@ -415,6 +434,7 @@ __all__ = [
415
434
  'GetOpenSearchTechEmailResult',
416
435
  'GetPgComponentResult',
417
436
  'GetPgPgResult',
437
+ 'GetPgPgParamResult',
418
438
  'GetPgPgUserConfigResult',
419
439
  'GetPgPgUserConfigIpFilterObjectResult',
420
440
  'GetPgPgUserConfigMigrationResult',
@@ -432,6 +452,7 @@ __all__ = [
432
452
  'GetPgTechEmailResult',
433
453
  'GetProjectTagResult',
434
454
  'GetRedisComponentResult',
455
+ 'GetRedisRediResult',
435
456
  'GetRedisRedisUserConfigResult',
436
457
  'GetRedisRedisUserConfigIpFilterObjectResult',
437
458
  'GetRedisRedisUserConfigMigrationResult',
@@ -573,6 +594,25 @@ class AccountAuthenticationSamlFieldMapping(dict):
573
594
  return pulumi.get(self, "real_name")
574
595
 
575
596
 
597
+ @pulumi.output_type
598
+ class CassandraCassandra(dict):
599
+ def __init__(__self__, *,
600
+ uris: Optional[Sequence[str]] = None):
601
+ """
602
+ :param Sequence[str] uris: Cassandra server URIs.
603
+ """
604
+ if uris is not None:
605
+ pulumi.set(__self__, "uris", uris)
606
+
607
+ @property
608
+ @pulumi.getter
609
+ def uris(self) -> Optional[Sequence[str]]:
610
+ """
611
+ Cassandra server URIs.
612
+ """
613
+ return pulumi.get(self, "uris")
614
+
615
+
576
616
  @pulumi.output_type
577
617
  class CassandraCassandraUserConfig(dict):
578
618
  @staticmethod
@@ -642,7 +682,7 @@ class CassandraCassandraUserConfig(dict):
642
682
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
643
683
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
644
684
  :param 'CassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
645
- :param str cassandra_version: Cassandra version.
685
+ :param str cassandra_version: Enum: `3`, `4`, `4.1`. Cassandra version.
646
686
  :param Sequence['CassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
647
687
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
648
688
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -727,7 +767,7 @@ class CassandraCassandraUserConfig(dict):
727
767
  @pulumi.getter(name="cassandraVersion")
728
768
  def cassandra_version(self) -> Optional[str]:
729
769
  """
730
- Cassandra version.
770
+ Enum: `3`, `4`, `4.1`. Cassandra version.
731
771
  """
732
772
  return pulumi.get(self, "cassandra_version")
733
773
 
@@ -1170,6 +1210,25 @@ class CassandraTechEmail(dict):
1170
1210
  return pulumi.get(self, "email")
1171
1211
 
1172
1212
 
1213
+ @pulumi.output_type
1214
+ class ClickhouseClickhouse(dict):
1215
+ def __init__(__self__, *,
1216
+ uris: Optional[Sequence[str]] = None):
1217
+ """
1218
+ :param Sequence[str] uris: Clickhouse server URIs.
1219
+ """
1220
+ if uris is not None:
1221
+ pulumi.set(__self__, "uris", uris)
1222
+
1223
+ @property
1224
+ @pulumi.getter
1225
+ def uris(self) -> Optional[Sequence[str]]:
1226
+ """
1227
+ Clickhouse server URIs.
1228
+ """
1229
+ return pulumi.get(self, "uris")
1230
+
1231
+
1173
1232
  @pulumi.output_type
1174
1233
  class ClickhouseClickhouseUserConfig(dict):
1175
1234
  @staticmethod
@@ -2045,6 +2104,80 @@ class DragonflyComponent(dict):
2045
2104
  return pulumi.get(self, "usage")
2046
2105
 
2047
2106
 
2107
+ @pulumi.output_type
2108
+ class DragonflyDragonfly(dict):
2109
+ @staticmethod
2110
+ def __key_warning(key: str):
2111
+ suggest = None
2112
+ if key == "replicaUri":
2113
+ suggest = "replica_uri"
2114
+ elif key == "slaveUris":
2115
+ suggest = "slave_uris"
2116
+
2117
+ if suggest:
2118
+ pulumi.log.warn(f"Key '{key}' not found in DragonflyDragonfly. Access the value via the '{suggest}' property getter instead.")
2119
+
2120
+ def __getitem__(self, key: str) -> Any:
2121
+ DragonflyDragonfly.__key_warning(key)
2122
+ return super().__getitem__(key)
2123
+
2124
+ def get(self, key: str, default = None) -> Any:
2125
+ DragonflyDragonfly.__key_warning(key)
2126
+ return super().get(key, default)
2127
+
2128
+ def __init__(__self__, *,
2129
+ password: Optional[str] = None,
2130
+ replica_uri: Optional[str] = None,
2131
+ slave_uris: Optional[Sequence[str]] = None,
2132
+ uris: Optional[Sequence[str]] = None):
2133
+ """
2134
+ :param str password: Dragonfly password.
2135
+ :param str replica_uri: Dragonfly replica server URI.
2136
+ :param Sequence[str] slave_uris: Dragonfly slave server URIs.
2137
+ :param Sequence[str] uris: Dragonfly server URIs.
2138
+ """
2139
+ if password is not None:
2140
+ pulumi.set(__self__, "password", password)
2141
+ if replica_uri is not None:
2142
+ pulumi.set(__self__, "replica_uri", replica_uri)
2143
+ if slave_uris is not None:
2144
+ pulumi.set(__self__, "slave_uris", slave_uris)
2145
+ if uris is not None:
2146
+ pulumi.set(__self__, "uris", uris)
2147
+
2148
+ @property
2149
+ @pulumi.getter
2150
+ def password(self) -> Optional[str]:
2151
+ """
2152
+ Dragonfly password.
2153
+ """
2154
+ return pulumi.get(self, "password")
2155
+
2156
+ @property
2157
+ @pulumi.getter(name="replicaUri")
2158
+ def replica_uri(self) -> Optional[str]:
2159
+ """
2160
+ Dragonfly replica server URI.
2161
+ """
2162
+ return pulumi.get(self, "replica_uri")
2163
+
2164
+ @property
2165
+ @pulumi.getter(name="slaveUris")
2166
+ def slave_uris(self) -> Optional[Sequence[str]]:
2167
+ """
2168
+ Dragonfly slave server URIs.
2169
+ """
2170
+ return pulumi.get(self, "slave_uris")
2171
+
2172
+ @property
2173
+ @pulumi.getter
2174
+ def uris(self) -> Optional[Sequence[str]]:
2175
+ """
2176
+ Dragonfly server URIs.
2177
+ """
2178
+ return pulumi.get(self, "uris")
2179
+
2180
+
2048
2181
  @pulumi.output_type
2049
2182
  class DragonflyDragonflyUserConfig(dict):
2050
2183
  @staticmethod
@@ -2108,7 +2241,7 @@ class DragonflyDragonflyUserConfig(dict):
2108
2241
  static_ips: Optional[bool] = None):
2109
2242
  """
2110
2243
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
2111
- :param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2244
+ :param str dragonfly_persistence: Enum: `off`, `rdb`. When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2112
2245
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
2113
2246
  :param Sequence['DragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
2114
2247
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -2166,7 +2299,7 @@ class DragonflyDragonflyUserConfig(dict):
2166
2299
  @pulumi.getter(name="dragonflyPersistence")
2167
2300
  def dragonfly_persistence(self) -> Optional[str]:
2168
2301
  """
2169
- When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2302
+ Enum: `off`, `rdb`. When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2170
2303
  """
2171
2304
  return pulumi.get(self, "dragonfly_persistence")
2172
2305
 
@@ -2341,7 +2474,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
2341
2474
  :param int port: Port number of the server where to migrate data from.
2342
2475
  :param str dbname: Database name for bootstrapping the initial connection.
2343
2476
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
2344
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2477
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2345
2478
  :param str password: Password for authentication with the server where to migrate data from.
2346
2479
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
2347
2480
  :param str username: User name for authentication with the server where to migrate data from.
@@ -2397,7 +2530,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
2397
2530
  @pulumi.getter
2398
2531
  def method(self) -> Optional[str]:
2399
2532
  """
2400
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2533
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2401
2534
  """
2402
2535
  return pulumi.get(self, "method")
2403
2536
 
@@ -2917,7 +3050,7 @@ class FlinkFlinkUserConfig(dict):
2917
3050
  static_ips: Optional[bool] = None):
2918
3051
  """
2919
3052
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
2920
- :param str flink_version: Flink major version.
3053
+ :param str flink_version: Enum: `1.16`. Flink major version.
2921
3054
  :param Sequence['FlinkFlinkUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
2922
3055
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
2923
3056
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -2960,7 +3093,7 @@ class FlinkFlinkUserConfig(dict):
2960
3093
  @pulumi.getter(name="flinkVersion")
2961
3094
  def flink_version(self) -> Optional[str]:
2962
3095
  """
2963
- Flink major version.
3096
+ Enum: `1.16`. Flink major version.
2964
3097
  """
2965
3098
  return pulumi.get(self, "flink_version")
2966
3099
 
@@ -3302,6 +3435,25 @@ class GrafanaComponent(dict):
3302
3435
  return pulumi.get(self, "usage")
3303
3436
 
3304
3437
 
3438
+ @pulumi.output_type
3439
+ class GrafanaGrafana(dict):
3440
+ def __init__(__self__, *,
3441
+ uris: Optional[Sequence[str]] = None):
3442
+ """
3443
+ :param Sequence[str] uris: Grafana server URIs.
3444
+ """
3445
+ if uris is not None:
3446
+ pulumi.set(__self__, "uris", uris)
3447
+
3448
+ @property
3449
+ @pulumi.getter
3450
+ def uris(self) -> Optional[Sequence[str]]:
3451
+ """
3452
+ Grafana server URIs.
3453
+ """
3454
+ return pulumi.get(self, "uris")
3455
+
3456
+
3305
3457
  @pulumi.output_type
3306
3458
  class GrafanaGrafanaUserConfig(dict):
3307
3459
  @staticmethod
@@ -3449,9 +3601,9 @@ class GrafanaGrafanaUserConfig(dict):
3449
3601
  """
3450
3602
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
3451
3603
  :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3452
- :param str alerting_error_or_timeout: Default error or timeout setting for new alerting rules.
3604
+ :param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
3453
3605
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.
3454
- :param str alerting_nodata_or_nullvalues: Default value for 'no data or null values' for new alerting rules.
3606
+ :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
3455
3607
  :param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
3456
3608
  :param 'GrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
3457
3609
  :param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
@@ -3459,7 +3611,7 @@ class GrafanaGrafanaUserConfig(dict):
3459
3611
  :param 'GrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
3460
3612
  :param 'GrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
3461
3613
  :param 'GrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
3462
- :param str cookie_samesite: Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
3614
+ :param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
3463
3615
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
3464
3616
  :param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
3465
3617
  :param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h.
@@ -3487,7 +3639,7 @@ class GrafanaGrafanaUserConfig(dict):
3487
3639
  :param bool static_ips: Use static public IP addresses.
3488
3640
  :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3489
3641
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
3490
- :param str user_auto_assign_org_role: Set role for new signups. Defaults to Viewer.
3642
+ :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
3491
3643
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
3492
3644
  """
3493
3645
  if additional_backup_regions is not None:
@@ -3595,7 +3747,7 @@ class GrafanaGrafanaUserConfig(dict):
3595
3747
  @pulumi.getter(name="alertingErrorOrTimeout")
3596
3748
  def alerting_error_or_timeout(self) -> Optional[str]:
3597
3749
  """
3598
- Default error or timeout setting for new alerting rules.
3750
+ Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
3599
3751
  """
3600
3752
  return pulumi.get(self, "alerting_error_or_timeout")
3601
3753
 
@@ -3611,7 +3763,7 @@ class GrafanaGrafanaUserConfig(dict):
3611
3763
  @pulumi.getter(name="alertingNodataOrNullvalues")
3612
3764
  def alerting_nodata_or_nullvalues(self) -> Optional[str]:
3613
3765
  """
3614
- Default value for 'no data or null values' for new alerting rules.
3766
+ Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
3615
3767
  """
3616
3768
  return pulumi.get(self, "alerting_nodata_or_nullvalues")
3617
3769
 
@@ -3675,7 +3827,7 @@ class GrafanaGrafanaUserConfig(dict):
3675
3827
  @pulumi.getter(name="cookieSamesite")
3676
3828
  def cookie_samesite(self) -> Optional[str]:
3677
3829
  """
3678
- Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
3830
+ Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
3679
3831
  """
3680
3832
  return pulumi.get(self, "cookie_samesite")
3681
3833
 
@@ -3902,7 +4054,7 @@ class GrafanaGrafanaUserConfig(dict):
3902
4054
  @pulumi.getter(name="userAutoAssignOrgRole")
3903
4055
  def user_auto_assign_org_role(self) -> Optional[str]:
3904
4056
  """
3905
- Set role for new signups. Defaults to Viewer.
4057
+ Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
3906
4058
  """
3907
4059
  return pulumi.get(self, "user_auto_assign_org_role")
3908
4060
 
@@ -4673,7 +4825,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
4673
4825
  """
4674
4826
  :param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.
4675
4827
  :param str bucket_url: Bucket URL for S3.
4676
- :param str provider: Provider type.
4828
+ :param str provider: Enum: `s3`. Provider type.
4677
4829
  :param str secret_key: S3 secret key.
4678
4830
  """
4679
4831
  pulumi.set(__self__, "access_key", access_key)
@@ -4701,7 +4853,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
4701
4853
  @pulumi.getter
4702
4854
  def provider(self) -> str:
4703
4855
  """
4704
- Provider type.
4856
+ Enum: `s3`. Provider type.
4705
4857
  """
4706
4858
  return pulumi.get(self, "provider")
4707
4859
 
@@ -4842,7 +4994,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
4842
4994
  :param str from_name: Name used in outgoing emails, defaults to Grafana.
4843
4995
  :param str password: Password for SMTP authentication.
4844
4996
  :param bool skip_verify: Skip verifying server certificate. Defaults to false.
4845
- :param str starttls_policy: Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
4997
+ :param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
4846
4998
  :param str username: Username for SMTP authentication.
4847
4999
  """
4848
5000
  pulumi.set(__self__, "from_address", from_address)
@@ -4911,7 +5063,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
4911
5063
  @pulumi.getter(name="starttlsPolicy")
4912
5064
  def starttls_policy(self) -> Optional[str]:
4913
5065
  """
4914
- Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5066
+ Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
4915
5067
  """
4916
5068
  return pulumi.get(self, "starttls_policy")
4917
5069
 
@@ -5161,12 +5313,24 @@ class InfluxDbInfluxdb(dict):
5161
5313
  return super().get(key, default)
5162
5314
 
5163
5315
  def __init__(__self__, *,
5164
- database_name: Optional[str] = None):
5316
+ database_name: Optional[str] = None,
5317
+ password: Optional[str] = None,
5318
+ uris: Optional[Sequence[str]] = None,
5319
+ username: Optional[str] = None):
5165
5320
  """
5166
5321
  :param str database_name: Name of the default InfluxDB database
5322
+ :param str password: InfluxDB password
5323
+ :param Sequence[str] uris: InfluxDB server URIs.
5324
+ :param str username: InfluxDB username
5167
5325
  """
5168
5326
  if database_name is not None:
5169
5327
  pulumi.set(__self__, "database_name", database_name)
5328
+ if password is not None:
5329
+ pulumi.set(__self__, "password", password)
5330
+ if uris is not None:
5331
+ pulumi.set(__self__, "uris", uris)
5332
+ if username is not None:
5333
+ pulumi.set(__self__, "username", username)
5170
5334
 
5171
5335
  @property
5172
5336
  @pulumi.getter(name="databaseName")
@@ -5176,6 +5340,30 @@ class InfluxDbInfluxdb(dict):
5176
5340
  """
5177
5341
  return pulumi.get(self, "database_name")
5178
5342
 
5343
+ @property
5344
+ @pulumi.getter
5345
+ def password(self) -> Optional[str]:
5346
+ """
5347
+ InfluxDB password
5348
+ """
5349
+ return pulumi.get(self, "password")
5350
+
5351
+ @property
5352
+ @pulumi.getter
5353
+ def uris(self) -> Optional[Sequence[str]]:
5354
+ """
5355
+ InfluxDB server URIs.
5356
+ """
5357
+ return pulumi.get(self, "uris")
5358
+
5359
+ @property
5360
+ @pulumi.getter
5361
+ def username(self) -> Optional[str]:
5362
+ """
5363
+ InfluxDB username
5364
+ """
5365
+ return pulumi.get(self, "username")
5366
+
5179
5367
 
5180
5368
  @pulumi.output_type
5181
5369
  class InfluxDbInfluxdbUserConfig(dict):
@@ -6210,10 +6398,10 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6210
6398
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
6211
6399
  session_timeout_ms: Optional[int] = None):
6212
6400
  """
6213
- :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
6214
- :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6401
+ :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
6402
+ :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6215
6403
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
6216
- :param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6404
+ :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6217
6405
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
6218
6406
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
6219
6407
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -6221,7 +6409,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6221
6409
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
6222
6410
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
6223
6411
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
6224
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
6412
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
6225
6413
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
6226
6414
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
6227
6415
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -6264,7 +6452,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6264
6452
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
6265
6453
  def connector_client_config_override_policy(self) -> Optional[str]:
6266
6454
  """
6267
- Defines what client configurations can be overridden by the connector. Default is None.
6455
+ Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
6268
6456
  """
6269
6457
  return pulumi.get(self, "connector_client_config_override_policy")
6270
6458
 
@@ -6272,7 +6460,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6272
6460
  @pulumi.getter(name="consumerAutoOffsetReset")
6273
6461
  def consumer_auto_offset_reset(self) -> Optional[str]:
6274
6462
  """
6275
- What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6463
+ Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6276
6464
  """
6277
6465
  return pulumi.get(self, "consumer_auto_offset_reset")
6278
6466
 
@@ -6288,7 +6476,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6288
6476
  @pulumi.getter(name="consumerIsolationLevel")
6289
6477
  def consumer_isolation_level(self) -> Optional[str]:
6290
6478
  """
6291
- Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6479
+ Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6292
6480
  """
6293
6481
  return pulumi.get(self, "consumer_isolation_level")
6294
6482
 
@@ -6352,7 +6540,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6352
6540
  @pulumi.getter(name="producerCompressionType")
6353
6541
  def producer_compression_type(self) -> Optional[str]:
6354
6542
  """
6355
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
6543
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
6356
6544
  """
6357
6545
  return pulumi.get(self, "producer_compression_type")
6358
6546
 
@@ -6703,13 +6891,15 @@ class KafkaKafka(dict):
6703
6891
  access_key: Optional[str] = None,
6704
6892
  connect_uri: Optional[str] = None,
6705
6893
  rest_uri: Optional[str] = None,
6706
- schema_registry_uri: Optional[str] = None):
6894
+ schema_registry_uri: Optional[str] = None,
6895
+ uris: Optional[Sequence[str]] = None):
6707
6896
  """
6708
6897
  :param str access_cert: The Kafka client certificate.
6709
6898
  :param str access_key: The Kafka client certificate key.
6710
6899
  :param str connect_uri: The Kafka Connect URI.
6711
6900
  :param str rest_uri: The Kafka REST URI.
6712
6901
  :param str schema_registry_uri: The Schema Registry URI.
6902
+ :param Sequence[str] uris: Kafka server URIs.
6713
6903
  """
6714
6904
  if access_cert is not None:
6715
6905
  pulumi.set(__self__, "access_cert", access_cert)
@@ -6721,6 +6911,8 @@ class KafkaKafka(dict):
6721
6911
  pulumi.set(__self__, "rest_uri", rest_uri)
6722
6912
  if schema_registry_uri is not None:
6723
6913
  pulumi.set(__self__, "schema_registry_uri", schema_registry_uri)
6914
+ if uris is not None:
6915
+ pulumi.set(__self__, "uris", uris)
6724
6916
 
6725
6917
  @property
6726
6918
  @pulumi.getter(name="accessCert")
@@ -6762,6 +6954,14 @@ class KafkaKafka(dict):
6762
6954
  """
6763
6955
  return pulumi.get(self, "schema_registry_uri")
6764
6956
 
6957
+ @property
6958
+ @pulumi.getter
6959
+ def uris(self) -> Optional[Sequence[str]]:
6960
+ """
6961
+ Kafka server URIs.
6962
+ """
6963
+ return pulumi.get(self, "uris")
6964
+
6765
6965
 
6766
6966
  @pulumi.output_type
6767
6967
  class KafkaKafkaUserConfig(dict):
@@ -6859,7 +7059,7 @@ class KafkaKafkaUserConfig(dict):
6859
7059
  :param bool kafka_rest: Enable Kafka-REST service. The default value is `false`.
6860
7060
  :param bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
6861
7061
  :param 'KafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
6862
- :param str kafka_version: Kafka major version.
7062
+ :param str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`. Kafka major version.
6863
7063
  :param 'KafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
6864
7064
  :param 'KafkaKafkaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
6865
7065
  :param 'KafkaKafkaUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
@@ -7028,7 +7228,7 @@ class KafkaKafkaUserConfig(dict):
7028
7228
  @pulumi.getter(name="kafkaVersion")
7029
7229
  def kafka_version(self) -> Optional[str]:
7030
7230
  """
7031
- Kafka major version.
7231
+ Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`. Kafka major version.
7032
7232
  """
7033
7233
  return pulumi.get(self, "kafka_version")
7034
7234
 
@@ -7285,7 +7485,7 @@ class KafkaKafkaUserConfigKafka(dict):
7285
7485
  transaction_state_log_segment_bytes: Optional[int] = None):
7286
7486
  """
7287
7487
  :param bool auto_create_topics_enable: Enable auto creation of topics.
7288
- :param str compression_type: Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
7488
+ :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
7289
7489
  :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
7290
7490
  :param int default_replication_factor: Replication factor for autocreated topics.
7291
7491
  :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
@@ -7295,7 +7495,7 @@ class KafkaKafkaUserConfigKafka(dict):
7295
7495
  :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
7296
7496
  :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
7297
7497
  :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
7298
- :param str log_cleanup_policy: The default cleanup policy for segments beyond the retention window.
7498
+ :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
7299
7499
  :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk.
7300
7500
  :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
7301
7501
  :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index.
@@ -7304,7 +7504,7 @@ class KafkaKafkaUserConfigKafka(dict):
7304
7504
  :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
7305
7505
  :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7306
7506
  :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
7307
- :param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
7507
+ :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
7308
7508
  :param bool log_preallocate: Should pre allocate file when create new segment?
7309
7509
  :param int log_retention_bytes: The maximum size of the log before deleting messages.
7310
7510
  :param int log_retention_hours: The number of hours to keep a log file before deleting it.
@@ -7436,7 +7636,7 @@ class KafkaKafkaUserConfigKafka(dict):
7436
7636
  @pulumi.getter(name="compressionType")
7437
7637
  def compression_type(self) -> Optional[str]:
7438
7638
  """
7439
- Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
7639
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
7440
7640
  """
7441
7641
  return pulumi.get(self, "compression_type")
7442
7642
 
@@ -7516,7 +7716,7 @@ class KafkaKafkaUserConfigKafka(dict):
7516
7716
  @pulumi.getter(name="logCleanupPolicy")
7517
7717
  def log_cleanup_policy(self) -> Optional[str]:
7518
7718
  """
7519
- The default cleanup policy for segments beyond the retention window.
7719
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
7520
7720
  """
7521
7721
  return pulumi.get(self, "log_cleanup_policy")
7522
7722
 
@@ -7588,7 +7788,7 @@ class KafkaKafkaUserConfigKafka(dict):
7588
7788
  @pulumi.getter(name="logMessageTimestampType")
7589
7789
  def log_message_timestamp_type(self) -> Optional[str]:
7590
7790
  """
7591
- Define whether the timestamp in the message is message create time or log append time.
7791
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
7592
7792
  """
7593
7793
  return pulumi.get(self, "log_message_timestamp_type")
7594
7794
 
@@ -7891,10 +8091,10 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7891
8091
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
7892
8092
  session_timeout_ms: Optional[int] = None):
7893
8093
  """
7894
- :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
7895
- :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
8094
+ :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
8095
+ :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
7896
8096
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
7897
- :param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8097
+ :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
7898
8098
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
7899
8099
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
7900
8100
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -7902,7 +8102,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7902
8102
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
7903
8103
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
7904
8104
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
7905
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8105
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
7906
8106
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
7907
8107
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
7908
8108
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -7945,7 +8145,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7945
8145
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
7946
8146
  def connector_client_config_override_policy(self) -> Optional[str]:
7947
8147
  """
7948
- Defines what client configurations can be overridden by the connector. Default is None.
8148
+ Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
7949
8149
  """
7950
8150
  return pulumi.get(self, "connector_client_config_override_policy")
7951
8151
 
@@ -7953,7 +8153,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7953
8153
  @pulumi.getter(name="consumerAutoOffsetReset")
7954
8154
  def consumer_auto_offset_reset(self) -> Optional[str]:
7955
8155
  """
7956
- What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
8156
+ Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
7957
8157
  """
7958
8158
  return pulumi.get(self, "consumer_auto_offset_reset")
7959
8159
 
@@ -7969,7 +8169,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7969
8169
  @pulumi.getter(name="consumerIsolationLevel")
7970
8170
  def consumer_isolation_level(self) -> Optional[str]:
7971
8171
  """
7972
- Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8172
+ Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
7973
8173
  """
7974
8174
  return pulumi.get(self, "consumer_isolation_level")
7975
8175
 
@@ -8033,7 +8233,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8033
8233
  @pulumi.getter(name="producerCompressionType")
8034
8234
  def producer_compression_type(self) -> Optional[str]:
8035
8235
  """
8036
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8236
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8037
8237
  """
8038
8238
  return pulumi.get(self, "producer_compression_type")
8039
8239
 
@@ -8121,11 +8321,11 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8121
8321
  """
8122
8322
  :param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.
8123
8323
  :param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.
8124
- :param int consumer_request_timeout_ms: The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
8125
- :param str name_strategy: Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
8324
+ :param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
8325
+ :param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
8126
8326
  :param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`.
8127
- :param str producer_acks: The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
8128
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8327
+ :param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
8328
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8129
8329
  :param int producer_linger_ms: Wait for up to the given delay to allow batching records together. The default value is `0`.
8130
8330
  :param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.
8131
8331
  :param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.
@@ -8171,7 +8371,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8171
8371
  @pulumi.getter(name="consumerRequestTimeoutMs")
8172
8372
  def consumer_request_timeout_ms(self) -> Optional[int]:
8173
8373
  """
8174
- The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
8374
+ Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
8175
8375
  """
8176
8376
  return pulumi.get(self, "consumer_request_timeout_ms")
8177
8377
 
@@ -8179,7 +8379,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8179
8379
  @pulumi.getter(name="nameStrategy")
8180
8380
  def name_strategy(self) -> Optional[str]:
8181
8381
  """
8182
- Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
8382
+ Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
8183
8383
  """
8184
8384
  return pulumi.get(self, "name_strategy")
8185
8385
 
@@ -8195,7 +8395,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8195
8395
  @pulumi.getter(name="producerAcks")
8196
8396
  def producer_acks(self) -> Optional[str]:
8197
8397
  """
8198
- The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
8398
+ Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
8199
8399
  """
8200
8400
  return pulumi.get(self, "producer_acks")
8201
8401
 
@@ -8203,7 +8403,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8203
8403
  @pulumi.getter(name="producerCompressionType")
8204
8404
  def producer_compression_type(self) -> Optional[str]:
8205
8405
  """
8206
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8406
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8207
8407
  """
8208
8408
  return pulumi.get(self, "producer_compression_type")
8209
8409
 
@@ -9854,6 +10054,54 @@ class M3AggregatorComponent(dict):
9854
10054
  return pulumi.get(self, "usage")
9855
10055
 
9856
10056
 
10057
+ @pulumi.output_type
10058
+ class M3AggregatorM3aggregator(dict):
10059
+ @staticmethod
10060
+ def __key_warning(key: str):
10061
+ suggest = None
10062
+ if key == "aggregatorHttpUri":
10063
+ suggest = "aggregator_http_uri"
10064
+
10065
+ if suggest:
10066
+ pulumi.log.warn(f"Key '{key}' not found in M3AggregatorM3aggregator. Access the value via the '{suggest}' property getter instead.")
10067
+
10068
+ def __getitem__(self, key: str) -> Any:
10069
+ M3AggregatorM3aggregator.__key_warning(key)
10070
+ return super().__getitem__(key)
10071
+
10072
+ def get(self, key: str, default = None) -> Any:
10073
+ M3AggregatorM3aggregator.__key_warning(key)
10074
+ return super().get(key, default)
10075
+
10076
+ def __init__(__self__, *,
10077
+ aggregator_http_uri: Optional[str] = None,
10078
+ uris: Optional[Sequence[str]] = None):
10079
+ """
10080
+ :param str aggregator_http_uri: M3 Aggregator HTTP URI.
10081
+ :param Sequence[str] uris: M3 Aggregator server URIs.
10082
+ """
10083
+ if aggregator_http_uri is not None:
10084
+ pulumi.set(__self__, "aggregator_http_uri", aggregator_http_uri)
10085
+ if uris is not None:
10086
+ pulumi.set(__self__, "uris", uris)
10087
+
10088
+ @property
10089
+ @pulumi.getter(name="aggregatorHttpUri")
10090
+ def aggregator_http_uri(self) -> Optional[str]:
10091
+ """
10092
+ M3 Aggregator HTTP URI.
10093
+ """
10094
+ return pulumi.get(self, "aggregator_http_uri")
10095
+
10096
+ @property
10097
+ @pulumi.getter
10098
+ def uris(self) -> Optional[Sequence[str]]:
10099
+ """
10100
+ M3 Aggregator server URIs.
10101
+ """
10102
+ return pulumi.get(self, "uris")
10103
+
10104
+
9857
10105
  @pulumi.output_type
9858
10106
  class M3AggregatorM3aggregatorUserConfig(dict):
9859
10107
  @staticmethod
@@ -9901,8 +10149,8 @@ class M3AggregatorM3aggregatorUserConfig(dict):
9901
10149
  :param Sequence['M3AggregatorM3aggregatorUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
9902
10150
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
9903
10151
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
9904
- :param str m3_version: M3 major version (deprecated, use m3aggregator_version).
9905
- :param str m3aggregator_version: M3 major version (the minimum compatible version).
10152
+ :param str m3_version: Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3aggregator_version).
10153
+ :param str m3aggregator_version: Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).
9906
10154
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
9907
10155
  :param bool static_ips: Use static public IP addresses.
9908
10156
  """
@@ -9962,7 +10210,7 @@ class M3AggregatorM3aggregatorUserConfig(dict):
9962
10210
  @pulumi.getter(name="m3Version")
9963
10211
  def m3_version(self) -> Optional[str]:
9964
10212
  """
9965
- M3 major version (deprecated, use m3aggregator_version).
10213
+ Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3aggregator_version).
9966
10214
  """
9967
10215
  return pulumi.get(self, "m3_version")
9968
10216
 
@@ -9970,7 +10218,7 @@ class M3AggregatorM3aggregatorUserConfig(dict):
9970
10218
  @pulumi.getter(name="m3aggregatorVersion")
9971
10219
  def m3aggregator_version(self) -> Optional[str]:
9972
10220
  """
9973
- M3 major version (the minimum compatible version).
10221
+ Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).
9974
10222
  """
9975
10223
  return pulumi.get(self, "m3aggregator_version")
9976
10224
 
@@ -10238,6 +10486,110 @@ class M3DbComponent(dict):
10238
10486
  return pulumi.get(self, "usage")
10239
10487
 
10240
10488
 
10489
+ @pulumi.output_type
10490
+ class M3DbM3db(dict):
10491
+ @staticmethod
10492
+ def __key_warning(key: str):
10493
+ suggest = None
10494
+ if key == "httpClusterUri":
10495
+ suggest = "http_cluster_uri"
10496
+ elif key == "httpNodeUri":
10497
+ suggest = "http_node_uri"
10498
+ elif key == "influxdbUri":
10499
+ suggest = "influxdb_uri"
10500
+ elif key == "prometheusRemoteReadUri":
10501
+ suggest = "prometheus_remote_read_uri"
10502
+ elif key == "prometheusRemoteWriteUri":
10503
+ suggest = "prometheus_remote_write_uri"
10504
+
10505
+ if suggest:
10506
+ pulumi.log.warn(f"Key '{key}' not found in M3DbM3db. Access the value via the '{suggest}' property getter instead.")
10507
+
10508
+ def __getitem__(self, key: str) -> Any:
10509
+ M3DbM3db.__key_warning(key)
10510
+ return super().__getitem__(key)
10511
+
10512
+ def get(self, key: str, default = None) -> Any:
10513
+ M3DbM3db.__key_warning(key)
10514
+ return super().get(key, default)
10515
+
10516
+ def __init__(__self__, *,
10517
+ http_cluster_uri: Optional[str] = None,
10518
+ http_node_uri: Optional[str] = None,
10519
+ influxdb_uri: Optional[str] = None,
10520
+ prometheus_remote_read_uri: Optional[str] = None,
10521
+ prometheus_remote_write_uri: Optional[str] = None,
10522
+ uris: Optional[Sequence[str]] = None):
10523
+ """
10524
+ :param str http_cluster_uri: M3DB cluster URI.
10525
+ :param str http_node_uri: M3DB node URI.
10526
+ :param str influxdb_uri: InfluxDB URI.
10527
+ :param str prometheus_remote_read_uri: Prometheus remote read URI.
10528
+ :param str prometheus_remote_write_uri: Prometheus remote write URI.
10529
+ :param Sequence[str] uris: M3DB server URIs.
10530
+ """
10531
+ if http_cluster_uri is not None:
10532
+ pulumi.set(__self__, "http_cluster_uri", http_cluster_uri)
10533
+ if http_node_uri is not None:
10534
+ pulumi.set(__self__, "http_node_uri", http_node_uri)
10535
+ if influxdb_uri is not None:
10536
+ pulumi.set(__self__, "influxdb_uri", influxdb_uri)
10537
+ if prometheus_remote_read_uri is not None:
10538
+ pulumi.set(__self__, "prometheus_remote_read_uri", prometheus_remote_read_uri)
10539
+ if prometheus_remote_write_uri is not None:
10540
+ pulumi.set(__self__, "prometheus_remote_write_uri", prometheus_remote_write_uri)
10541
+ if uris is not None:
10542
+ pulumi.set(__self__, "uris", uris)
10543
+
10544
+ @property
10545
+ @pulumi.getter(name="httpClusterUri")
10546
+ def http_cluster_uri(self) -> Optional[str]:
10547
+ """
10548
+ M3DB cluster URI.
10549
+ """
10550
+ return pulumi.get(self, "http_cluster_uri")
10551
+
10552
+ @property
10553
+ @pulumi.getter(name="httpNodeUri")
10554
+ def http_node_uri(self) -> Optional[str]:
10555
+ """
10556
+ M3DB node URI.
10557
+ """
10558
+ return pulumi.get(self, "http_node_uri")
10559
+
10560
+ @property
10561
+ @pulumi.getter(name="influxdbUri")
10562
+ def influxdb_uri(self) -> Optional[str]:
10563
+ """
10564
+ InfluxDB URI.
10565
+ """
10566
+ return pulumi.get(self, "influxdb_uri")
10567
+
10568
+ @property
10569
+ @pulumi.getter(name="prometheusRemoteReadUri")
10570
+ def prometheus_remote_read_uri(self) -> Optional[str]:
10571
+ """
10572
+ Prometheus remote read URI.
10573
+ """
10574
+ return pulumi.get(self, "prometheus_remote_read_uri")
10575
+
10576
+ @property
10577
+ @pulumi.getter(name="prometheusRemoteWriteUri")
10578
+ def prometheus_remote_write_uri(self) -> Optional[str]:
10579
+ """
10580
+ Prometheus remote write URI.
10581
+ """
10582
+ return pulumi.get(self, "prometheus_remote_write_uri")
10583
+
10584
+ @property
10585
+ @pulumi.getter
10586
+ def uris(self) -> Optional[Sequence[str]]:
10587
+ """
10588
+ M3DB server URIs.
10589
+ """
10590
+ return pulumi.get(self, "uris")
10591
+
10592
+
10241
10593
  @pulumi.output_type
10242
10594
  class M3DbM3dbUserConfig(dict):
10243
10595
  @staticmethod
@@ -10310,9 +10662,9 @@ class M3DbM3dbUserConfig(dict):
10310
10662
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
10311
10663
  :param 'M3DbM3dbUserConfigLimitsArgs' limits: M3 limits
10312
10664
  :param 'M3DbM3dbUserConfigM3Args' m3: M3 specific configuration options
10313
- :param str m3_version: M3 major version (deprecated, use m3db_version).
10665
+ :param str m3_version: Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3db_version).
10314
10666
  :param bool m3coordinator_enable_graphite_carbon_ingest: Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only.
10315
- :param str m3db_version: M3 major version (the minimum compatible version).
10667
+ :param str m3db_version: Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).
10316
10668
  :param Sequence['M3DbM3dbUserConfigNamespaceArgs'] namespaces: List of M3 namespaces
10317
10669
  :param 'M3DbM3dbUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
10318
10670
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -10422,7 +10774,7 @@ class M3DbM3dbUserConfig(dict):
10422
10774
  @pulumi.getter(name="m3Version")
10423
10775
  def m3_version(self) -> Optional[str]:
10424
10776
  """
10425
- M3 major version (deprecated, use m3db_version).
10777
+ Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3db_version).
10426
10778
  """
10427
10779
  return pulumi.get(self, "m3_version")
10428
10780
 
@@ -10438,7 +10790,7 @@ class M3DbM3dbUserConfig(dict):
10438
10790
  @pulumi.getter(name="m3dbVersion")
10439
10791
  def m3db_version(self) -> Optional[str]:
10440
10792
  """
10441
- M3 major version (the minimum compatible version).
10793
+ Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).
10442
10794
  """
10443
10795
  return pulumi.get(self, "m3db_version")
10444
10796
 
@@ -10738,7 +11090,7 @@ class M3DbM3dbUserConfigNamespace(dict):
10738
11090
  resolution: Optional[str] = None):
10739
11091
  """
10740
11092
  :param str name: The name of the namespace.
10741
- :param str type: The type of aggregation (aggregated/unaggregated).
11093
+ :param str type: Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated).
10742
11094
  :param 'M3DbM3dbUserConfigNamespaceOptionsArgs' options: Namespace options
10743
11095
  :param str resolution: The resolution for an aggregated namespace.
10744
11096
  """
@@ -10761,7 +11113,7 @@ class M3DbM3dbUserConfigNamespace(dict):
10761
11113
  @pulumi.getter
10762
11114
  def type(self) -> str:
10763
11115
  """
10764
- The type of aggregation (aggregated/unaggregated).
11116
+ Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated).
10765
11117
  """
10766
11118
  return pulumi.get(self, "type")
10767
11119
 
@@ -11394,6 +11746,190 @@ class MySqlComponent(dict):
11394
11746
  return pulumi.get(self, "usage")
11395
11747
 
11396
11748
 
11749
+ @pulumi.output_type
11750
+ class MySqlMysql(dict):
11751
+ @staticmethod
11752
+ def __key_warning(key: str):
11753
+ suggest = None
11754
+ if key == "replicaUri":
11755
+ suggest = "replica_uri"
11756
+ elif key == "standbyUris":
11757
+ suggest = "standby_uris"
11758
+ elif key == "syncingUris":
11759
+ suggest = "syncing_uris"
11760
+
11761
+ if suggest:
11762
+ pulumi.log.warn(f"Key '{key}' not found in MySqlMysql. Access the value via the '{suggest}' property getter instead.")
11763
+
11764
+ def __getitem__(self, key: str) -> Any:
11765
+ MySqlMysql.__key_warning(key)
11766
+ return super().__getitem__(key)
11767
+
11768
+ def get(self, key: str, default = None) -> Any:
11769
+ MySqlMysql.__key_warning(key)
11770
+ return super().get(key, default)
11771
+
11772
+ def __init__(__self__, *,
11773
+ params: Optional[Sequence['outputs.MySqlMysqlParam']] = None,
11774
+ replica_uri: Optional[str] = None,
11775
+ standby_uris: Optional[Sequence[str]] = None,
11776
+ syncing_uris: Optional[Sequence[str]] = None,
11777
+ uris: Optional[Sequence[str]] = None):
11778
+ """
11779
+ :param Sequence['MySqlMysqlParamArgs'] params: MySQL connection parameters
11780
+ :param str replica_uri: MySQL replica URI for services with a replica
11781
+ :param Sequence[str] standby_uris: MySQL standby connection URIs
11782
+ :param Sequence[str] syncing_uris: MySQL syncing connection URIs
11783
+ :param Sequence[str] uris: MySQL master connection URIs
11784
+ """
11785
+ if params is not None:
11786
+ pulumi.set(__self__, "params", params)
11787
+ if replica_uri is not None:
11788
+ pulumi.set(__self__, "replica_uri", replica_uri)
11789
+ if standby_uris is not None:
11790
+ pulumi.set(__self__, "standby_uris", standby_uris)
11791
+ if syncing_uris is not None:
11792
+ pulumi.set(__self__, "syncing_uris", syncing_uris)
11793
+ if uris is not None:
11794
+ pulumi.set(__self__, "uris", uris)
11795
+
11796
+ @property
11797
+ @pulumi.getter
11798
+ def params(self) -> Optional[Sequence['outputs.MySqlMysqlParam']]:
11799
+ """
11800
+ MySQL connection parameters
11801
+ """
11802
+ return pulumi.get(self, "params")
11803
+
11804
+ @property
11805
+ @pulumi.getter(name="replicaUri")
11806
+ def replica_uri(self) -> Optional[str]:
11807
+ """
11808
+ MySQL replica URI for services with a replica
11809
+ """
11810
+ return pulumi.get(self, "replica_uri")
11811
+
11812
+ @property
11813
+ @pulumi.getter(name="standbyUris")
11814
+ def standby_uris(self) -> Optional[Sequence[str]]:
11815
+ """
11816
+ MySQL standby connection URIs
11817
+ """
11818
+ return pulumi.get(self, "standby_uris")
11819
+
11820
+ @property
11821
+ @pulumi.getter(name="syncingUris")
11822
+ def syncing_uris(self) -> Optional[Sequence[str]]:
11823
+ """
11824
+ MySQL syncing connection URIs
11825
+ """
11826
+ return pulumi.get(self, "syncing_uris")
11827
+
11828
+ @property
11829
+ @pulumi.getter
11830
+ def uris(self) -> Optional[Sequence[str]]:
11831
+ """
11832
+ MySQL master connection URIs
11833
+ """
11834
+ return pulumi.get(self, "uris")
11835
+
11836
+
11837
+ @pulumi.output_type
11838
+ class MySqlMysqlParam(dict):
11839
+ @staticmethod
11840
+ def __key_warning(key: str):
11841
+ suggest = None
11842
+ if key == "databaseName":
11843
+ suggest = "database_name"
11844
+
11845
+ if suggest:
11846
+ pulumi.log.warn(f"Key '{key}' not found in MySqlMysqlParam. Access the value via the '{suggest}' property getter instead.")
11847
+
11848
+ def __getitem__(self, key: str) -> Any:
11849
+ MySqlMysqlParam.__key_warning(key)
11850
+ return super().__getitem__(key)
11851
+
11852
+ def get(self, key: str, default = None) -> Any:
11853
+ MySqlMysqlParam.__key_warning(key)
11854
+ return super().get(key, default)
11855
+
11856
+ def __init__(__self__, *,
11857
+ database_name: Optional[str] = None,
11858
+ host: Optional[str] = None,
11859
+ password: Optional[str] = None,
11860
+ port: Optional[int] = None,
11861
+ sslmode: Optional[str] = None,
11862
+ user: Optional[str] = None):
11863
+ """
11864
+ :param str database_name: Primary MySQL database name
11865
+ :param str host: MySQL host IP or name
11866
+ :param str password: MySQL admin user password
11867
+ :param int port: MySQL port
11868
+ :param str sslmode: MySQL sslmode setting (currently always "require")
11869
+ :param str user: MySQL admin user name
11870
+ """
11871
+ if database_name is not None:
11872
+ pulumi.set(__self__, "database_name", database_name)
11873
+ if host is not None:
11874
+ pulumi.set(__self__, "host", host)
11875
+ if password is not None:
11876
+ pulumi.set(__self__, "password", password)
11877
+ if port is not None:
11878
+ pulumi.set(__self__, "port", port)
11879
+ if sslmode is not None:
11880
+ pulumi.set(__self__, "sslmode", sslmode)
11881
+ if user is not None:
11882
+ pulumi.set(__self__, "user", user)
11883
+
11884
+ @property
11885
+ @pulumi.getter(name="databaseName")
11886
+ def database_name(self) -> Optional[str]:
11887
+ """
11888
+ Primary MySQL database name
11889
+ """
11890
+ return pulumi.get(self, "database_name")
11891
+
11892
+ @property
11893
+ @pulumi.getter
11894
+ def host(self) -> Optional[str]:
11895
+ """
11896
+ MySQL host IP or name
11897
+ """
11898
+ return pulumi.get(self, "host")
11899
+
11900
+ @property
11901
+ @pulumi.getter
11902
+ def password(self) -> Optional[str]:
11903
+ """
11904
+ MySQL admin user password
11905
+ """
11906
+ return pulumi.get(self, "password")
11907
+
11908
+ @property
11909
+ @pulumi.getter
11910
+ def port(self) -> Optional[int]:
11911
+ """
11912
+ MySQL port
11913
+ """
11914
+ return pulumi.get(self, "port")
11915
+
11916
+ @property
11917
+ @pulumi.getter
11918
+ def sslmode(self) -> Optional[str]:
11919
+ """
11920
+ MySQL sslmode setting (currently always "require")
11921
+ """
11922
+ return pulumi.get(self, "sslmode")
11923
+
11924
+ @property
11925
+ @pulumi.getter
11926
+ def user(self) -> Optional[str]:
11927
+ """
11928
+ MySQL admin user name
11929
+ """
11930
+ return pulumi.get(self, "user")
11931
+
11932
+
11397
11933
  @pulumi.output_type
11398
11934
  class MySqlMysqlUserConfig(dict):
11399
11935
  @staticmethod
@@ -11480,7 +12016,7 @@ class MySqlMysqlUserConfig(dict):
11480
12016
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
11481
12017
  :param 'MySqlMysqlUserConfigMigrationArgs' migration: Migrate data from existing server
11482
12018
  :param 'MySqlMysqlUserConfigMysqlArgs' mysql: mysql.conf configuration values
11483
- :param str mysql_version: MySQL major version.
12019
+ :param str mysql_version: Enum: `8`. MySQL major version.
11484
12020
  :param 'MySqlMysqlUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
11485
12021
  :param 'MySqlMysqlUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
11486
12022
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -11626,7 +12162,7 @@ class MySqlMysqlUserConfig(dict):
11626
12162
  @pulumi.getter(name="mysqlVersion")
11627
12163
  def mysql_version(self) -> Optional[str]:
11628
12164
  """
11629
- MySQL major version.
12165
+ Enum: `8`. MySQL major version.
11630
12166
  """
11631
12167
  return pulumi.get(self, "mysql_version")
11632
12168
 
@@ -11758,7 +12294,7 @@ class MySqlMysqlUserConfigMigration(dict):
11758
12294
  :param int port: Port number of the server where to migrate data from.
11759
12295
  :param str dbname: Database name for bootstrapping the initial connection.
11760
12296
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
11761
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
12297
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
11762
12298
  :param str password: Password for authentication with the server where to migrate data from.
11763
12299
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
11764
12300
  :param str username: User name for authentication with the server where to migrate data from.
@@ -11814,7 +12350,7 @@ class MySqlMysqlUserConfigMigration(dict):
11814
12350
  @pulumi.getter
11815
12351
  def method(self) -> Optional[str]:
11816
12352
  """
11817
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
12353
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
11818
12354
  """
11819
12355
  return pulumi.get(self, "method")
11820
12356
 
@@ -11969,7 +12505,7 @@ class MySqlMysqlUserConfigMysql(dict):
11969
12505
  :param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).
11970
12506
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.
11971
12507
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it.
11972
- :param str internal_tmp_mem_storage_engine: The storage engine for in-memory internal temporary tables.
12508
+ :param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
11973
12509
  :param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Default is 10s.
11974
12510
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
11975
12511
  :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
@@ -12184,7 +12720,7 @@ class MySqlMysqlUserConfigMysql(dict):
12184
12720
  @pulumi.getter(name="internalTmpMemStorageEngine")
12185
12721
  def internal_tmp_mem_storage_engine(self) -> Optional[str]:
12186
12722
  """
12187
- The storage engine for in-memory internal temporary tables.
12723
+ Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
12188
12724
  """
12189
12725
  return pulumi.get(self, "internal_tmp_mem_storage_engine")
12190
12726
 
@@ -12636,7 +13172,9 @@ class OpenSearchOpensearch(dict):
12636
13172
  @staticmethod
12637
13173
  def __key_warning(key: str):
12638
13174
  suggest = None
12639
- if key == "opensearchDashboardsUri":
13175
+ if key == "kibanaUri":
13176
+ suggest = "kibana_uri"
13177
+ elif key == "opensearchDashboardsUri":
12640
13178
  suggest = "opensearch_dashboards_uri"
12641
13179
 
12642
13180
  if suggest:
@@ -12651,12 +13189,36 @@ class OpenSearchOpensearch(dict):
12651
13189
  return super().get(key, default)
12652
13190
 
12653
13191
  def __init__(__self__, *,
12654
- opensearch_dashboards_uri: Optional[str] = None):
13192
+ kibana_uri: Optional[str] = None,
13193
+ opensearch_dashboards_uri: Optional[str] = None,
13194
+ password: Optional[str] = None,
13195
+ uris: Optional[Sequence[str]] = None,
13196
+ username: Optional[str] = None):
12655
13197
  """
13198
+ :param str kibana_uri: URI for Kibana dashboard frontend
12656
13199
  :param str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend
13200
+ :param str password: OpenSearch password
13201
+ :param Sequence[str] uris: OpenSearch server URIs.
13202
+ :param str username: OpenSearch username
12657
13203
  """
13204
+ if kibana_uri is not None:
13205
+ pulumi.set(__self__, "kibana_uri", kibana_uri)
12658
13206
  if opensearch_dashboards_uri is not None:
12659
13207
  pulumi.set(__self__, "opensearch_dashboards_uri", opensearch_dashboards_uri)
13208
+ if password is not None:
13209
+ pulumi.set(__self__, "password", password)
13210
+ if uris is not None:
13211
+ pulumi.set(__self__, "uris", uris)
13212
+ if username is not None:
13213
+ pulumi.set(__self__, "username", username)
13214
+
13215
+ @property
13216
+ @pulumi.getter(name="kibanaUri")
13217
+ def kibana_uri(self) -> Optional[str]:
13218
+ """
13219
+ URI for Kibana dashboard frontend
13220
+ """
13221
+ return pulumi.get(self, "kibana_uri")
12660
13222
 
12661
13223
  @property
12662
13224
  @pulumi.getter(name="opensearchDashboardsUri")
@@ -12666,6 +13228,30 @@ class OpenSearchOpensearch(dict):
12666
13228
  """
12667
13229
  return pulumi.get(self, "opensearch_dashboards_uri")
12668
13230
 
13231
+ @property
13232
+ @pulumi.getter
13233
+ def password(self) -> Optional[str]:
13234
+ """
13235
+ OpenSearch password
13236
+ """
13237
+ return pulumi.get(self, "password")
13238
+
13239
+ @property
13240
+ @pulumi.getter
13241
+ def uris(self) -> Optional[Sequence[str]]:
13242
+ """
13243
+ OpenSearch server URIs.
13244
+ """
13245
+ return pulumi.get(self, "uris")
13246
+
13247
+ @property
13248
+ @pulumi.getter
13249
+ def username(self) -> Optional[str]:
13250
+ """
13251
+ OpenSearch username
13252
+ """
13253
+ return pulumi.get(self, "username")
13254
+
12669
13255
 
12670
13256
  @pulumi.output_type
12671
13257
  class OpenSearchOpensearchUserConfig(dict):
@@ -12762,7 +13348,7 @@ class OpenSearchOpensearchUserConfig(dict):
12762
13348
  :param 'OpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
12763
13349
  :param 'OpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
12764
13350
  :param 'OpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
12765
- :param str opensearch_version: OpenSearch major version.
13351
+ :param str opensearch_version: Enum: `1`, `2`. OpenSearch major version.
12766
13352
  :param 'OpenSearchOpensearchUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
12767
13353
  :param 'OpenSearchOpensearchUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
12768
13354
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -12931,7 +13517,7 @@ class OpenSearchOpensearchUserConfig(dict):
12931
13517
  @pulumi.getter(name="opensearchVersion")
12932
13518
  def opensearch_version(self) -> Optional[str]:
12933
13519
  """
12934
- OpenSearch major version.
13520
+ Enum: `1`, `2`. OpenSearch major version.
12935
13521
  """
12936
13522
  return pulumi.get(self, "opensearch_version")
12937
13523
 
@@ -13036,7 +13622,7 @@ class OpenSearchOpensearchUserConfigIndexPattern(dict):
13036
13622
  """
13037
13623
  :param int max_index_count: Maximum number of indexes to keep.
13038
13624
  :param str pattern: fnmatch pattern.
13039
- :param str sorting_algorithm: Deletion sorting algorithm. The default value is `creation_date`.
13625
+ :param str sorting_algorithm: Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`.
13040
13626
  """
13041
13627
  pulumi.set(__self__, "max_index_count", max_index_count)
13042
13628
  pulumi.set(__self__, "pattern", pattern)
@@ -13063,7 +13649,7 @@ class OpenSearchOpensearchUserConfigIndexPattern(dict):
13063
13649
  @pulumi.getter(name="sortingAlgorithm")
13064
13650
  def sorting_algorithm(self) -> Optional[str]:
13065
13651
  """
13066
- Deletion sorting algorithm. The default value is `creation_date`.
13652
+ Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`.
13067
13653
  """
13068
13654
  return pulumi.get(self, "sorting_algorithm")
13069
13655
 
@@ -14037,12 +14623,12 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthen
14037
14623
  type: Optional[str] = None):
14038
14624
  """
14039
14625
  :param int allowed_tries: The number of login attempts allowed before login is blocked.
14040
- :param str authentication_backend: internal*authentication*backend*limiting.authentication*backend.
14626
+ :param str authentication_backend: Enum: `internal`. internal*authentication*backend*limiting.authentication*backend.
14041
14627
  :param int block_expiry_seconds: The duration of time that login remains blocked after a failed login.
14042
14628
  :param int max_blocked_clients: internal*authentication*backend*limiting.max*blocked_clients.
14043
14629
  :param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
14044
14630
  :param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
14045
- :param str type: internal*authentication*backend_limiting.type.
14631
+ :param str type: Enum: `username`. internal*authentication*backend_limiting.type.
14046
14632
  """
14047
14633
  if allowed_tries is not None:
14048
14634
  pulumi.set(__self__, "allowed_tries", allowed_tries)
@@ -14071,7 +14657,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthen
14071
14657
  @pulumi.getter(name="authenticationBackend")
14072
14658
  def authentication_backend(self) -> Optional[str]:
14073
14659
  """
14074
- internal*authentication*backend*limiting.authentication*backend.
14660
+ Enum: `internal`. internal*authentication*backend*limiting.authentication*backend.
14075
14661
  """
14076
14662
  return pulumi.get(self, "authentication_backend")
14077
14663
 
@@ -14111,7 +14697,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthen
14111
14697
  @pulumi.getter
14112
14698
  def type(self) -> Optional[str]:
14113
14699
  """
14114
- internal*authentication*backend_limiting.type.
14700
+ Enum: `username`. internal*authentication*backend_limiting.type.
14115
14701
  """
14116
14702
  return pulumi.get(self, "type")
14117
14703
 
@@ -14156,7 +14742,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
14156
14742
  :param int max_blocked_clients: The maximum number of blocked IP addresses.
14157
14743
  :param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
14158
14744
  :param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
14159
- :param str type: The type of rate limiting.
14745
+ :param str type: Enum: `ip`. The type of rate limiting.
14160
14746
  """
14161
14747
  if allowed_tries is not None:
14162
14748
  pulumi.set(__self__, "allowed_tries", allowed_tries)
@@ -14215,7 +14801,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
14215
14801
  @pulumi.getter
14216
14802
  def type(self) -> Optional[str]:
14217
14803
  """
14218
- The type of rate limiting.
14804
+ Enum: `ip`. The type of rate limiting.
14219
14805
  """
14220
14806
  return pulumi.get(self, "type")
14221
14807
 
@@ -15077,6 +15663,10 @@ class PgPg(dict):
15077
15663
  suggest = "max_connections"
15078
15664
  elif key == "replicaUri":
15079
15665
  suggest = "replica_uri"
15666
+ elif key == "standbyUris":
15667
+ suggest = "standby_uris"
15668
+ elif key == "syncingUris":
15669
+ suggest = "syncing_uris"
15080
15670
 
15081
15671
  if suggest:
15082
15672
  pulumi.log.warn(f"Key '{key}' not found in PgPg. Access the value via the '{suggest}' property getter instead.")
@@ -15090,32 +15680,46 @@ class PgPg(dict):
15090
15680
  return super().get(key, default)
15091
15681
 
15092
15682
  def __init__(__self__, *,
15683
+ bouncer: Optional[str] = None,
15093
15684
  dbname: Optional[str] = None,
15094
15685
  host: Optional[str] = None,
15095
15686
  max_connections: Optional[int] = None,
15687
+ params: Optional[Sequence['outputs.PgPgParam']] = None,
15096
15688
  password: Optional[str] = None,
15097
15689
  port: Optional[int] = None,
15098
15690
  replica_uri: Optional[str] = None,
15099
15691
  sslmode: Optional[str] = None,
15692
+ standby_uris: Optional[Sequence[str]] = None,
15693
+ syncing_uris: Optional[Sequence[str]] = None,
15100
15694
  uri: Optional[str] = None,
15695
+ uris: Optional[Sequence[str]] = None,
15101
15696
  user: Optional[str] = None):
15102
15697
  """
15698
+ :param str bouncer: Bouncer connection details
15103
15699
  :param str dbname: Primary PostgreSQL database name
15104
15700
  :param str host: PostgreSQL master node host IP or name
15105
15701
  :param int max_connections: Connection limit
15702
+ :param Sequence['PgPgParamArgs'] params: PostgreSQL connection parameters
15106
15703
  :param str password: PostgreSQL admin user password
15107
15704
  :param int port: PostgreSQL port
15108
15705
  :param str replica_uri: PostgreSQL replica URI for services with a replica
15109
15706
  :param str sslmode: PostgreSQL sslmode setting (currently always "require")
15707
+ :param Sequence[str] standby_uris: PostgreSQL standby connection URIs
15708
+ :param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs
15110
15709
  :param str uri: PostgreSQL master connection URI
15710
+ :param Sequence[str] uris: PostgreSQL master connection URIs
15111
15711
  :param str user: PostgreSQL admin user name
15112
15712
  """
15713
+ if bouncer is not None:
15714
+ pulumi.set(__self__, "bouncer", bouncer)
15113
15715
  if dbname is not None:
15114
15716
  pulumi.set(__self__, "dbname", dbname)
15115
15717
  if host is not None:
15116
15718
  pulumi.set(__self__, "host", host)
15117
15719
  if max_connections is not None:
15118
15720
  pulumi.set(__self__, "max_connections", max_connections)
15721
+ if params is not None:
15722
+ pulumi.set(__self__, "params", params)
15119
15723
  if password is not None:
15120
15724
  pulumi.set(__self__, "password", password)
15121
15725
  if port is not None:
@@ -15124,11 +15728,25 @@ class PgPg(dict):
15124
15728
  pulumi.set(__self__, "replica_uri", replica_uri)
15125
15729
  if sslmode is not None:
15126
15730
  pulumi.set(__self__, "sslmode", sslmode)
15731
+ if standby_uris is not None:
15732
+ pulumi.set(__self__, "standby_uris", standby_uris)
15733
+ if syncing_uris is not None:
15734
+ pulumi.set(__self__, "syncing_uris", syncing_uris)
15127
15735
  if uri is not None:
15128
15736
  pulumi.set(__self__, "uri", uri)
15737
+ if uris is not None:
15738
+ pulumi.set(__self__, "uris", uris)
15129
15739
  if user is not None:
15130
15740
  pulumi.set(__self__, "user", user)
15131
15741
 
15742
+ @property
15743
+ @pulumi.getter
15744
+ def bouncer(self) -> Optional[str]:
15745
+ """
15746
+ Bouncer connection details
15747
+ """
15748
+ return pulumi.get(self, "bouncer")
15749
+
15132
15750
  @property
15133
15751
  @pulumi.getter
15134
15752
  def dbname(self) -> Optional[str]:
@@ -15153,6 +15771,14 @@ class PgPg(dict):
15153
15771
  """
15154
15772
  return pulumi.get(self, "max_connections")
15155
15773
 
15774
+ @property
15775
+ @pulumi.getter
15776
+ def params(self) -> Optional[Sequence['outputs.PgPgParam']]:
15777
+ """
15778
+ PostgreSQL connection parameters
15779
+ """
15780
+ return pulumi.get(self, "params")
15781
+
15156
15782
  @property
15157
15783
  @pulumi.getter
15158
15784
  def password(self) -> Optional[str]:
@@ -15185,6 +15811,22 @@ class PgPg(dict):
15185
15811
  """
15186
15812
  return pulumi.get(self, "sslmode")
15187
15813
 
15814
+ @property
15815
+ @pulumi.getter(name="standbyUris")
15816
+ def standby_uris(self) -> Optional[Sequence[str]]:
15817
+ """
15818
+ PostgreSQL standby connection URIs
15819
+ """
15820
+ return pulumi.get(self, "standby_uris")
15821
+
15822
+ @property
15823
+ @pulumi.getter(name="syncingUris")
15824
+ def syncing_uris(self) -> Optional[Sequence[str]]:
15825
+ """
15826
+ PostgreSQL syncing connection URIs
15827
+ """
15828
+ return pulumi.get(self, "syncing_uris")
15829
+
15188
15830
  @property
15189
15831
  @pulumi.getter
15190
15832
  def uri(self) -> Optional[str]:
@@ -15193,6 +15835,110 @@ class PgPg(dict):
15193
15835
  """
15194
15836
  return pulumi.get(self, "uri")
15195
15837
 
15838
+ @property
15839
+ @pulumi.getter
15840
+ def uris(self) -> Optional[Sequence[str]]:
15841
+ """
15842
+ PostgreSQL master connection URIs
15843
+ """
15844
+ return pulumi.get(self, "uris")
15845
+
15846
+ @property
15847
+ @pulumi.getter
15848
+ def user(self) -> Optional[str]:
15849
+ """
15850
+ PostgreSQL admin user name
15851
+ """
15852
+ return pulumi.get(self, "user")
15853
+
15854
+
15855
+ @pulumi.output_type
15856
+ class PgPgParam(dict):
15857
+ @staticmethod
15858
+ def __key_warning(key: str):
15859
+ suggest = None
15860
+ if key == "databaseName":
15861
+ suggest = "database_name"
15862
+
15863
+ if suggest:
15864
+ pulumi.log.warn(f"Key '{key}' not found in PgPgParam. Access the value via the '{suggest}' property getter instead.")
15865
+
15866
+ def __getitem__(self, key: str) -> Any:
15867
+ PgPgParam.__key_warning(key)
15868
+ return super().__getitem__(key)
15869
+
15870
+ def get(self, key: str, default = None) -> Any:
15871
+ PgPgParam.__key_warning(key)
15872
+ return super().get(key, default)
15873
+
15874
+ def __init__(__self__, *,
15875
+ database_name: Optional[str] = None,
15876
+ host: Optional[str] = None,
15877
+ password: Optional[str] = None,
15878
+ port: Optional[int] = None,
15879
+ sslmode: Optional[str] = None,
15880
+ user: Optional[str] = None):
15881
+ """
15882
+ :param str database_name: Primary PostgreSQL database name
15883
+ :param str host: PostgreSQL host IP or name
15884
+ :param str password: PostgreSQL admin user password
15885
+ :param int port: PostgreSQL port
15886
+ :param str sslmode: PostgreSQL sslmode setting (currently always "require")
15887
+ :param str user: PostgreSQL admin user name
15888
+ """
15889
+ if database_name is not None:
15890
+ pulumi.set(__self__, "database_name", database_name)
15891
+ if host is not None:
15892
+ pulumi.set(__self__, "host", host)
15893
+ if password is not None:
15894
+ pulumi.set(__self__, "password", password)
15895
+ if port is not None:
15896
+ pulumi.set(__self__, "port", port)
15897
+ if sslmode is not None:
15898
+ pulumi.set(__self__, "sslmode", sslmode)
15899
+ if user is not None:
15900
+ pulumi.set(__self__, "user", user)
15901
+
15902
+ @property
15903
+ @pulumi.getter(name="databaseName")
15904
+ def database_name(self) -> Optional[str]:
15905
+ """
15906
+ Primary PostgreSQL database name
15907
+ """
15908
+ return pulumi.get(self, "database_name")
15909
+
15910
+ @property
15911
+ @pulumi.getter
15912
+ def host(self) -> Optional[str]:
15913
+ """
15914
+ PostgreSQL host IP or name
15915
+ """
15916
+ return pulumi.get(self, "host")
15917
+
15918
+ @property
15919
+ @pulumi.getter
15920
+ def password(self) -> Optional[str]:
15921
+ """
15922
+ PostgreSQL admin user password
15923
+ """
15924
+ return pulumi.get(self, "password")
15925
+
15926
+ @property
15927
+ @pulumi.getter
15928
+ def port(self) -> Optional[int]:
15929
+ """
15930
+ PostgreSQL port
15931
+ """
15932
+ return pulumi.get(self, "port")
15933
+
15934
+ @property
15935
+ @pulumi.getter
15936
+ def sslmode(self) -> Optional[str]:
15937
+ """
15938
+ PostgreSQL sslmode setting (currently always "require")
15939
+ """
15940
+ return pulumi.get(self, "sslmode")
15941
+
15196
15942
  @property
15197
15943
  @pulumi.getter
15198
15944
  def user(self) -> Optional[str]:
@@ -15318,7 +16064,7 @@ class PgPgUserConfig(dict):
15318
16064
  :param bool pg_read_replica: Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).
15319
16065
  :param str pg_service_to_fork_from: Name of the PG Service from which to fork (deprecated, use service*to*fork_from). This has effect only when a new service is being created.
15320
16066
  :param bool pg_stat_monitor_enable: Enable the pg*stat*monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg*stat*statements results for utility commands are unreliable. The default value is `false`.
15321
- :param str pg_version: PostgreSQL major version.
16067
+ :param str pg_version: Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`. PostgreSQL major version.
15322
16068
  :param 'PgPgUserConfigPgauditArgs' pgaudit: System-wide settings for the pgaudit extension
15323
16069
  :param 'PgPgUserConfigPgbouncerArgs' pgbouncer: PGBouncer connection pooling settings
15324
16070
  :param 'PgPgUserConfigPglookoutArgs' pglookout: System-wide settings for pglookout
@@ -15331,9 +16077,9 @@ class PgPgUserConfig(dict):
15331
16077
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
15332
16078
  :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
15333
16079
  :param bool static_ips: Use static public IP addresses.
15334
- :param str synchronous_replication: Synchronous replication type. Note that the service plan also needs to support synchronous replication.
16080
+ :param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
15335
16081
  :param 'PgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
15336
- :param str variant: Variant of the PostgreSQL service, may affect the features that are exposed by default.
16082
+ :param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
15337
16083
  :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
15338
16084
  """
15339
16085
  if additional_backup_regions is not None:
@@ -15531,7 +16277,7 @@ class PgPgUserConfig(dict):
15531
16277
  @pulumi.getter(name="pgVersion")
15532
16278
  def pg_version(self) -> Optional[str]:
15533
16279
  """
15534
- PostgreSQL major version.
16280
+ Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`. PostgreSQL major version.
15535
16281
  """
15536
16282
  return pulumi.get(self, "pg_version")
15537
16283
 
@@ -15635,7 +16381,7 @@ class PgPgUserConfig(dict):
15635
16381
  @pulumi.getter(name="synchronousReplication")
15636
16382
  def synchronous_replication(self) -> Optional[str]:
15637
16383
  """
15638
- Synchronous replication type. Note that the service plan also needs to support synchronous replication.
16384
+ Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
15639
16385
  """
15640
16386
  return pulumi.get(self, "synchronous_replication")
15641
16387
 
@@ -15651,7 +16397,7 @@ class PgPgUserConfig(dict):
15651
16397
  @pulumi.getter
15652
16398
  def variant(self) -> Optional[str]:
15653
16399
  """
15654
- Variant of the PostgreSQL service, may affect the features that are exposed by default.
16400
+ Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
15655
16401
  """
15656
16402
  return pulumi.get(self, "variant")
15657
16403
 
@@ -15727,7 +16473,7 @@ class PgPgUserConfigMigration(dict):
15727
16473
  :param int port: Port number of the server where to migrate data from.
15728
16474
  :param str dbname: Database name for bootstrapping the initial connection.
15729
16475
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
15730
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
16476
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
15731
16477
  :param str password: Password for authentication with the server where to migrate data from.
15732
16478
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
15733
16479
  :param str username: User name for authentication with the server where to migrate data from.
@@ -15783,7 +16529,7 @@ class PgPgUserConfigMigration(dict):
15783
16529
  @pulumi.getter
15784
16530
  def method(self) -> Optional[str]:
15785
16531
  """
15786
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
16532
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
15787
16533
  """
15788
16534
  return pulumi.get(self, "method")
15789
16535
 
@@ -15988,12 +16734,12 @@ class PgPgUserConfigPg(dict):
15988
16734
  :param int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.
15989
16735
  :param float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.
15990
16736
  :param int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.
15991
- :param str default_toast_compression: Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
16737
+ :param str default_toast_compression: Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
15992
16738
  :param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
15993
16739
  :param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
15994
16740
  :param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
15995
- :param str log_error_verbosity: Controls the amount of detail written in the server log for each message that is logged.
15996
- :param str log_line_prefix: Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
16741
+ :param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
16742
+ :param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
15997
16743
  :param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
15998
16744
  :param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
15999
16745
  :param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
@@ -16014,13 +16760,13 @@ class PgPgUserConfigPg(dict):
16014
16760
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
16015
16761
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
16016
16762
  :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
16017
- :param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
16763
+ :param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
16018
16764
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
16019
16765
  :param str timezone: PostgreSQL service timezone.
16020
16766
  :param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session.
16021
- :param str track_commit_timestamp: Record commit time of transactions.
16022
- :param str track_functions: Enables tracking of function call counts and time used.
16023
- :param str track_io_timing: Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
16767
+ :param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
16768
+ :param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
16769
+ :param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
16024
16770
  :param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.
16025
16771
  :param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.
16026
16772
  """
@@ -16239,7 +16985,7 @@ class PgPgUserConfigPg(dict):
16239
16985
  @pulumi.getter(name="defaultToastCompression")
16240
16986
  def default_toast_compression(self) -> Optional[str]:
16241
16987
  """
16242
- Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
16988
+ Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
16243
16989
  """
16244
16990
  return pulumi.get(self, "default_toast_compression")
16245
16991
 
@@ -16271,7 +17017,7 @@ class PgPgUserConfigPg(dict):
16271
17017
  @pulumi.getter(name="logErrorVerbosity")
16272
17018
  def log_error_verbosity(self) -> Optional[str]:
16273
17019
  """
16274
- Controls the amount of detail written in the server log for each message that is logged.
17020
+ Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
16275
17021
  """
16276
17022
  return pulumi.get(self, "log_error_verbosity")
16277
17023
 
@@ -16279,7 +17025,7 @@ class PgPgUserConfigPg(dict):
16279
17025
  @pulumi.getter(name="logLinePrefix")
16280
17026
  def log_line_prefix(self) -> Optional[str]:
16281
17027
  """
16282
- Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
17028
+ Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
16283
17029
  """
16284
17030
  return pulumi.get(self, "log_line_prefix")
16285
17031
 
@@ -16447,7 +17193,7 @@ class PgPgUserConfigPg(dict):
16447
17193
  @pulumi.getter(name="pgStatStatementsDotTrack")
16448
17194
  def pg_stat_statements_dot_track(self) -> Optional[str]:
16449
17195
  """
16450
- Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
17196
+ Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
16451
17197
  """
16452
17198
  return pulumi.get(self, "pg_stat_statements_dot_track")
16453
17199
 
@@ -16479,7 +17225,7 @@ class PgPgUserConfigPg(dict):
16479
17225
  @pulumi.getter(name="trackCommitTimestamp")
16480
17226
  def track_commit_timestamp(self) -> Optional[str]:
16481
17227
  """
16482
- Record commit time of transactions.
17228
+ Enum: `off`, `on`. Record commit time of transactions.
16483
17229
  """
16484
17230
  return pulumi.get(self, "track_commit_timestamp")
16485
17231
 
@@ -16487,7 +17233,7 @@ class PgPgUserConfigPg(dict):
16487
17233
  @pulumi.getter(name="trackFunctions")
16488
17234
  def track_functions(self) -> Optional[str]:
16489
17235
  """
16490
- Enables tracking of function call counts and time used.
17236
+ Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
16491
17237
  """
16492
17238
  return pulumi.get(self, "track_functions")
16493
17239
 
@@ -16495,7 +17241,7 @@ class PgPgUserConfigPg(dict):
16495
17241
  @pulumi.getter(name="trackIoTiming")
16496
17242
  def track_io_timing(self) -> Optional[str]:
16497
17243
  """
16498
- Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
17244
+ Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
16499
17245
  """
16500
17246
  return pulumi.get(self, "track_io_timing")
16501
17247
 
@@ -16681,7 +17427,7 @@ class PgPgUserConfigPgaudit(dict):
16681
17427
  :param bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. The default value is `false`.
16682
17428
  :param bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. The default value is `true`.
16683
17429
  :param bool log_client: Specifies whether log messages will be visible to a client process such as psql. The default value is `false`.
16684
- :param str log_level: Specifies the log level that will be used for log entries. The default value is `log`.
17430
+ :param str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`.
16685
17431
  :param int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. The default value is `-1`.
16686
17432
  :param bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. The default value is `true`.
16687
17433
  :param bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. The default value is `false`.
@@ -16750,7 +17496,7 @@ class PgPgUserConfigPgaudit(dict):
16750
17496
  @pulumi.getter(name="logLevel")
16751
17497
  def log_level(self) -> Optional[str]:
16752
17498
  """
16753
- Specifies the log level that will be used for log entries. The default value is `log`.
17499
+ Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`.
16754
17500
  """
16755
17501
  return pulumi.get(self, "log_level")
16756
17502
 
@@ -16883,7 +17629,7 @@ class PgPgUserConfigPgbouncer(dict):
16883
17629
  """
16884
17630
  :param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). The default value is `3600`.
16885
17631
  :param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.
16886
- :param str autodb_pool_mode: PGBouncer pool mode. The default value is `transaction`.
17632
+ :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`.
16887
17633
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. The default value is `0`.
16888
17634
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
16889
17635
  :param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. The default value is `0`.
@@ -16930,7 +17676,7 @@ class PgPgUserConfigPgbouncer(dict):
16930
17676
  @pulumi.getter(name="autodbPoolMode")
16931
17677
  def autodb_pool_mode(self) -> Optional[str]:
16932
17678
  """
16933
- PGBouncer pool mode. The default value is `transaction`.
17679
+ Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`.
16934
17680
  """
16935
17681
  return pulumi.get(self, "autodb_pool_mode")
16936
17682
 
@@ -17430,6 +18176,80 @@ class RedisComponent(dict):
17430
18176
  return pulumi.get(self, "usage")
17431
18177
 
17432
18178
 
18179
+ @pulumi.output_type
18180
+ class RedisRedis(dict):
18181
+ @staticmethod
18182
+ def __key_warning(key: str):
18183
+ suggest = None
18184
+ if key == "replicaUri":
18185
+ suggest = "replica_uri"
18186
+ elif key == "slaveUris":
18187
+ suggest = "slave_uris"
18188
+
18189
+ if suggest:
18190
+ pulumi.log.warn(f"Key '{key}' not found in RedisRedis. Access the value via the '{suggest}' property getter instead.")
18191
+
18192
+ def __getitem__(self, key: str) -> Any:
18193
+ RedisRedis.__key_warning(key)
18194
+ return super().__getitem__(key)
18195
+
18196
+ def get(self, key: str, default = None) -> Any:
18197
+ RedisRedis.__key_warning(key)
18198
+ return super().get(key, default)
18199
+
18200
+ def __init__(__self__, *,
18201
+ password: Optional[str] = None,
18202
+ replica_uri: Optional[str] = None,
18203
+ slave_uris: Optional[Sequence[str]] = None,
18204
+ uris: Optional[Sequence[str]] = None):
18205
+ """
18206
+ :param str password: Redis password.
18207
+ :param str replica_uri: Redis replica server URI.
18208
+ :param Sequence[str] slave_uris: Redis slave server URIs.
18209
+ :param Sequence[str] uris: Redis server URIs.
18210
+ """
18211
+ if password is not None:
18212
+ pulumi.set(__self__, "password", password)
18213
+ if replica_uri is not None:
18214
+ pulumi.set(__self__, "replica_uri", replica_uri)
18215
+ if slave_uris is not None:
18216
+ pulumi.set(__self__, "slave_uris", slave_uris)
18217
+ if uris is not None:
18218
+ pulumi.set(__self__, "uris", uris)
18219
+
18220
+ @property
18221
+ @pulumi.getter
18222
+ def password(self) -> Optional[str]:
18223
+ """
18224
+ Redis password.
18225
+ """
18226
+ return pulumi.get(self, "password")
18227
+
18228
+ @property
18229
+ @pulumi.getter(name="replicaUri")
18230
+ def replica_uri(self) -> Optional[str]:
18231
+ """
18232
+ Redis replica server URI.
18233
+ """
18234
+ return pulumi.get(self, "replica_uri")
18235
+
18236
+ @property
18237
+ @pulumi.getter(name="slaveUris")
18238
+ def slave_uris(self) -> Optional[Sequence[str]]:
18239
+ """
18240
+ Redis slave server URIs.
18241
+ """
18242
+ return pulumi.get(self, "slave_uris")
18243
+
18244
+ @property
18245
+ @pulumi.getter
18246
+ def uris(self) -> Optional[Sequence[str]]:
18247
+ """
18248
+ Redis server URIs.
18249
+ """
18250
+ return pulumi.get(self, "uris")
18251
+
18252
+
17433
18253
  @pulumi.output_type
17434
18254
  class RedisRedisUserConfig(dict):
17435
18255
  @staticmethod
@@ -17532,18 +18352,18 @@ class RedisRedisUserConfig(dict):
17532
18352
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
17533
18353
  :param 'RedisRedisUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
17534
18354
  :param str recovery_basebackup_name: Name of the basebackup to restore in forked service.
17535
- :param str redis_acl_channels_default: Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
18355
+ :param str redis_acl_channels_default: Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
17536
18356
  :param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service.
17537
18357
  :param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. The default value is `1`.
17538
18358
  :param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`.
17539
- :param str redis_maxmemory_policy: Redis maxmemory-policy. The default value is `noeviction`.
18359
+ :param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`.
17540
18360
  :param str redis_notify_keyspace_events: Set notify-keyspace-events option.
17541
18361
  :param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service.
17542
- :param str redis_persistence: When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
18362
+ :param str redis_persistence: Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
17543
18363
  :param int redis_pubsub_client_output_buffer_limit: Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.
17544
18364
  :param bool redis_ssl: Require SSL to access Redis. The default value is `true`.
17545
18365
  :param int redis_timeout: Redis idle connection timeout in seconds. The default value is `300`.
17546
- :param str redis_version: Redis major version.
18366
+ :param str redis_version: Enum: `7.0`. Redis major version.
17547
18367
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
17548
18368
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
17549
18369
  :param bool static_ips: Use static public IP addresses.
@@ -17686,7 +18506,7 @@ class RedisRedisUserConfig(dict):
17686
18506
  @pulumi.getter(name="redisAclChannelsDefault")
17687
18507
  def redis_acl_channels_default(self) -> Optional[str]:
17688
18508
  """
17689
- Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
18509
+ Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
17690
18510
  """
17691
18511
  return pulumi.get(self, "redis_acl_channels_default")
17692
18512
 
@@ -17718,7 +18538,7 @@ class RedisRedisUserConfig(dict):
17718
18538
  @pulumi.getter(name="redisMaxmemoryPolicy")
17719
18539
  def redis_maxmemory_policy(self) -> Optional[str]:
17720
18540
  """
17721
- Redis maxmemory-policy. The default value is `noeviction`.
18541
+ Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`.
17722
18542
  """
17723
18543
  return pulumi.get(self, "redis_maxmemory_policy")
17724
18544
 
@@ -17742,7 +18562,7 @@ class RedisRedisUserConfig(dict):
17742
18562
  @pulumi.getter(name="redisPersistence")
17743
18563
  def redis_persistence(self) -> Optional[str]:
17744
18564
  """
17745
- When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
18565
+ Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
17746
18566
  """
17747
18567
  return pulumi.get(self, "redis_persistence")
17748
18568
 
@@ -17774,7 +18594,7 @@ class RedisRedisUserConfig(dict):
17774
18594
  @pulumi.getter(name="redisVersion")
17775
18595
  def redis_version(self) -> Optional[str]:
17776
18596
  """
17777
- Redis major version.
18597
+ Enum: `7.0`. Redis major version.
17778
18598
  """
17779
18599
  return pulumi.get(self, "redis_version")
17780
18600
 
@@ -17866,7 +18686,7 @@ class RedisRedisUserConfigMigration(dict):
17866
18686
  :param int port: Port number of the server where to migrate data from.
17867
18687
  :param str dbname: Database name for bootstrapping the initial connection.
17868
18688
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
17869
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
18689
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
17870
18690
  :param str password: Password for authentication with the server where to migrate data from.
17871
18691
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
17872
18692
  :param str username: User name for authentication with the server where to migrate data from.
@@ -17922,7 +18742,7 @@ class RedisRedisUserConfigMigration(dict):
17922
18742
  @pulumi.getter
17923
18743
  def method(self) -> Optional[str]:
17924
18744
  """
17925
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
18745
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
17926
18746
  """
17927
18747
  return pulumi.get(self, "method")
17928
18748
 
@@ -18211,13 +19031,13 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18211
19031
  skip_broken_messages: Optional[int] = None):
18212
19032
  """
18213
19033
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
18214
- :param str data_format: Message data format. The default value is `JSONEachRow`.
19034
+ :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`.
18215
19035
  :param str group_name: Kafka consumers group. The default value is `clickhouse`.
18216
19036
  :param str name: Name of the table.
18217
19037
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
18218
- :param str auto_offset_reset: Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
18219
- :param str date_time_input_format: Method to read DateTime from text input formats. The default value is `basic`.
18220
- :param str handle_error_mode: How to handle errors for Kafka engine. The default value is `default`.
19038
+ :param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
19039
+ :param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`.
19040
+ :param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`.
18221
19041
  :param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.
18222
19042
  :param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.
18223
19043
  :param int num_consumers: The number of consumers per table per replica. The default value is `1`.
@@ -18258,7 +19078,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18258
19078
  @pulumi.getter(name="dataFormat")
18259
19079
  def data_format(self) -> str:
18260
19080
  """
18261
- Message data format. The default value is `JSONEachRow`.
19081
+ Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`.
18262
19082
  """
18263
19083
  return pulumi.get(self, "data_format")
18264
19084
 
@@ -18290,7 +19110,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18290
19110
  @pulumi.getter(name="autoOffsetReset")
18291
19111
  def auto_offset_reset(self) -> Optional[str]:
18292
19112
  """
18293
- Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
19113
+ Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
18294
19114
  """
18295
19115
  return pulumi.get(self, "auto_offset_reset")
18296
19116
 
@@ -18298,7 +19118,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18298
19118
  @pulumi.getter(name="dateTimeInputFormat")
18299
19119
  def date_time_input_format(self) -> Optional[str]:
18300
19120
  """
18301
- Method to read DateTime from text input formats. The default value is `basic`.
19121
+ Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`.
18302
19122
  """
18303
19123
  return pulumi.get(self, "date_time_input_format")
18304
19124
 
@@ -18306,7 +19126,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18306
19126
  @pulumi.getter(name="handleErrorMode")
18307
19127
  def handle_error_mode(self) -> Optional[str]:
18308
19128
  """
18309
- How to handle errors for Kafka engine. The default value is `default`.
19129
+ Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`.
18310
19130
  """
18311
19131
  return pulumi.get(self, "handle_error_mode")
18312
19132
 
@@ -18455,6 +19275,8 @@ class ServiceIntegrationDatadogUserConfig(dict):
18455
19275
  suggest = None
18456
19276
  if key == "datadogDbmEnabled":
18457
19277
  suggest = "datadog_dbm_enabled"
19278
+ elif key == "datadogPgbouncerEnabled":
19279
+ suggest = "datadog_pgbouncer_enabled"
18458
19280
  elif key == "datadogTags":
18459
19281
  suggest = "datadog_tags"
18460
19282
  elif key == "excludeConsumerGroups":
@@ -18483,6 +19305,7 @@ class ServiceIntegrationDatadogUserConfig(dict):
18483
19305
 
18484
19306
  def __init__(__self__, *,
18485
19307
  datadog_dbm_enabled: Optional[bool] = None,
19308
+ datadog_pgbouncer_enabled: Optional[bool] = None,
18486
19309
  datadog_tags: Optional[Sequence['outputs.ServiceIntegrationDatadogUserConfigDatadogTag']] = None,
18487
19310
  exclude_consumer_groups: Optional[Sequence[str]] = None,
18488
19311
  exclude_topics: Optional[Sequence[str]] = None,
@@ -18494,6 +19317,7 @@ class ServiceIntegrationDatadogUserConfig(dict):
18494
19317
  redis: Optional['outputs.ServiceIntegrationDatadogUserConfigRedis'] = None):
18495
19318
  """
18496
19319
  :param bool datadog_dbm_enabled: Enable Datadog Database Monitoring.
19320
+ :param bool datadog_pgbouncer_enabled: Enable Datadog PgBouncer Metric Tracking.
18497
19321
  :param Sequence['ServiceIntegrationDatadogUserConfigDatadogTagArgs'] datadog_tags: Custom tags provided by user
18498
19322
  :param Sequence[str] exclude_consumer_groups: List of custom metrics.
18499
19323
  :param Sequence[str] exclude_topics: List of topics to exclude.
@@ -18506,6 +19330,8 @@ class ServiceIntegrationDatadogUserConfig(dict):
18506
19330
  """
18507
19331
  if datadog_dbm_enabled is not None:
18508
19332
  pulumi.set(__self__, "datadog_dbm_enabled", datadog_dbm_enabled)
19333
+ if datadog_pgbouncer_enabled is not None:
19334
+ pulumi.set(__self__, "datadog_pgbouncer_enabled", datadog_pgbouncer_enabled)
18509
19335
  if datadog_tags is not None:
18510
19336
  pulumi.set(__self__, "datadog_tags", datadog_tags)
18511
19337
  if exclude_consumer_groups is not None:
@@ -18533,6 +19359,14 @@ class ServiceIntegrationDatadogUserConfig(dict):
18533
19359
  """
18534
19360
  return pulumi.get(self, "datadog_dbm_enabled")
18535
19361
 
19362
+ @property
19363
+ @pulumi.getter(name="datadogPgbouncerEnabled")
19364
+ def datadog_pgbouncer_enabled(self) -> Optional[bool]:
19365
+ """
19366
+ Enable Datadog PgBouncer Metric Tracking.
19367
+ """
19368
+ return pulumi.get(self, "datadog_pgbouncer_enabled")
19369
+
18536
19370
  @property
18537
19371
  @pulumi.getter(name="datadogTags")
18538
19372
  def datadog_tags(self) -> Optional[Sequence['outputs.ServiceIntegrationDatadogUserConfigDatadogTag']]:
@@ -18794,7 +19628,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
18794
19628
  :param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with.
18795
19629
  :param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers.
18796
19630
  :param int max_partition_contexts: Maximum number of partition contexts to send.
18797
- :param str site: Datadog intake site. Defaults to datadoghq.com.
19631
+ :param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
18798
19632
  """
18799
19633
  pulumi.set(__self__, "datadog_api_key", datadog_api_key)
18800
19634
  if datadog_tags is not None:
@@ -18862,7 +19696,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
18862
19696
  @pulumi.getter
18863
19697
  def site(self) -> Optional[str]:
18864
19698
  """
18865
- Datadog intake site. Defaults to datadoghq.com.
19699
+ Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
18866
19700
  """
18867
19701
  return pulumi.get(self, "site")
18868
19702
 
@@ -19280,14 +20114,14 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
19280
20114
  ssl_endpoint_identification_algorithm: Optional[str] = None):
19281
20115
  """
19282
20116
  :param str bootstrap_servers: Bootstrap servers.
19283
- :param str security_protocol: Security protocol.
19284
- :param str sasl_mechanism: SASL mechanism used for connections to the Kafka server.
20117
+ :param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
20118
+ :param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
19285
20119
  :param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server.
19286
20120
  :param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server.
19287
20121
  :param str ssl_ca_cert: PEM-encoded CA certificate.
19288
20122
  :param str ssl_client_cert: PEM-encoded client certificate.
19289
20123
  :param str ssl_client_key: PEM-encoded client key.
19290
- :param str ssl_endpoint_identification_algorithm: The endpoint identification algorithm to validate server hostname using server certificate.
20124
+ :param str ssl_endpoint_identification_algorithm: Enum: `https`, ``. The endpoint identification algorithm to validate server hostname using server certificate.
19291
20125
  """
19292
20126
  pulumi.set(__self__, "bootstrap_servers", bootstrap_servers)
19293
20127
  pulumi.set(__self__, "security_protocol", security_protocol)
@@ -19318,7 +20152,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
19318
20152
  @pulumi.getter(name="securityProtocol")
19319
20153
  def security_protocol(self) -> str:
19320
20154
  """
19321
- Security protocol.
20155
+ Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
19322
20156
  """
19323
20157
  return pulumi.get(self, "security_protocol")
19324
20158
 
@@ -19326,7 +20160,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
19326
20160
  @pulumi.getter(name="saslMechanism")
19327
20161
  def sasl_mechanism(self) -> Optional[str]:
19328
20162
  """
19329
- SASL mechanism used for connections to the Kafka server.
20163
+ Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
19330
20164
  """
19331
20165
  return pulumi.get(self, "sasl_mechanism")
19332
20166
 
@@ -19374,7 +20208,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
19374
20208
  @pulumi.getter(name="sslEndpointIdentificationAlgorithm")
19375
20209
  def ssl_endpoint_identification_algorithm(self) -> Optional[str]:
19376
20210
  """
19377
- The endpoint identification algorithm to validate server hostname using server certificate.
20211
+ Enum: `https`, ``. The endpoint identification algorithm to validate server hostname using server certificate.
19378
20212
  """
19379
20213
  return pulumi.get(self, "ssl_endpoint_identification_algorithm")
19380
20214
 
@@ -19508,7 +20342,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
19508
20342
  :param str password: Password.
19509
20343
  :param str ssl_client_certificate: Client certificate.
19510
20344
  :param str ssl_client_key: Client key.
19511
- :param str ssl_mode: SSL Mode. The default value is `verify-full`.
20345
+ :param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`.
19512
20346
  :param str ssl_root_cert: SSL Root Cert.
19513
20347
  """
19514
20348
  pulumi.set(__self__, "host", host)
@@ -19587,7 +20421,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
19587
20421
  @pulumi.getter(name="sslMode")
19588
20422
  def ssl_mode(self) -> Optional[str]:
19589
20423
  """
19590
- SSL Mode. The default value is `verify-full`.
20424
+ Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`.
19591
20425
  """
19592
20426
  return pulumi.get(self, "ssl_mode")
19593
20427
 
@@ -19627,7 +20461,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
19627
20461
  basic_auth_password: Optional[str] = None,
19628
20462
  basic_auth_username: Optional[str] = None):
19629
20463
  """
19630
- :param str authentication: Authentication method.
20464
+ :param str authentication: Enum: `none`, `basic`. Authentication method.
19631
20465
  :param str url: Schema Registry URL.
19632
20466
  :param str basic_auth_password: Basic authentication password.
19633
20467
  :param str basic_auth_username: Basic authentication user name.
@@ -19643,7 +20477,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
19643
20477
  @pulumi.getter
19644
20478
  def authentication(self) -> str:
19645
20479
  """
19646
- Authentication method.
20480
+ Enum: `none`, `basic`. Authentication method.
19647
20481
  """
19648
20482
  return pulumi.get(self, "authentication")
19649
20483
 
@@ -19803,7 +20637,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
19803
20637
  max_message_size: Optional[int] = None,
19804
20638
  sd: Optional[str] = None):
19805
20639
  """
19806
- :param str format: Message format. The default value is `rfc5424`.
20640
+ :param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`.
19807
20641
  :param int port: Rsyslog server port. The default value is `514`.
19808
20642
  :param str server: Rsyslog server IP address or hostname.
19809
20643
  :param bool tls: Require TLS. The default value is `true`.
@@ -19835,7 +20669,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
19835
20669
  @pulumi.getter
19836
20670
  def format(self) -> str:
19837
20671
  """
19838
- Message format. The default value is `rfc5424`.
20672
+ Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`.
19839
20673
  """
19840
20674
  return pulumi.get(self, "format")
19841
20675
 
@@ -20381,7 +21215,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
20381
21215
  :param int consumer_fetch_min_bytes: The minimum amount of data the server should return for a fetch request.
20382
21216
  :param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker.
20383
21217
  :param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
20384
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
21218
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
20385
21219
  :param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing.
20386
21220
  :param int producer_max_request_size: The maximum request size in bytes.
20387
21221
  """
@@ -20426,7 +21260,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
20426
21260
  @pulumi.getter(name="producerCompressionType")
20427
21261
  def producer_compression_type(self) -> Optional[str]:
20428
21262
  """
20429
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
21263
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
20430
21264
  """
20431
21265
  return pulumi.get(self, "producer_compression_type")
20432
21266
 
@@ -21176,6 +22010,24 @@ class GetAccountAuthenticationSamlFieldMappingResult(dict):
21176
22010
  return pulumi.get(self, "real_name")
21177
22011
 
21178
22012
 
22013
+ @pulumi.output_type
22014
+ class GetCassandaCassandraResult(dict):
22015
+ def __init__(__self__, *,
22016
+ uris: Sequence[str]):
22017
+ """
22018
+ :param Sequence[str] uris: Cassandra server URIs.
22019
+ """
22020
+ pulumi.set(__self__, "uris", uris)
22021
+
22022
+ @property
22023
+ @pulumi.getter
22024
+ def uris(self) -> Sequence[str]:
22025
+ """
22026
+ Cassandra server URIs.
22027
+ """
22028
+ return pulumi.get(self, "uris")
22029
+
22030
+
21179
22031
  @pulumi.output_type
21180
22032
  class GetCassandaCassandraUserConfigResult(dict):
21181
22033
  def __init__(__self__, *,
@@ -21200,7 +22052,7 @@ class GetCassandaCassandraUserConfigResult(dict):
21200
22052
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
21201
22053
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
21202
22054
  :param 'GetCassandaCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
21203
- :param str cassandra_version: Cassandra version.
22055
+ :param str cassandra_version: Enum: `3`, `4`, `4.1`. Cassandra version.
21204
22056
  :param Sequence['GetCassandaCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
21205
22057
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
21206
22058
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -21285,7 +22137,7 @@ class GetCassandaCassandraUserConfigResult(dict):
21285
22137
  @pulumi.getter(name="cassandraVersion")
21286
22138
  def cassandra_version(self) -> Optional[str]:
21287
22139
  """
21288
- Cassandra version.
22140
+ Enum: `3`, `4`, `4.1`. Cassandra version.
21289
22141
  """
21290
22142
  return pulumi.get(self, "cassandra_version")
21291
22143
 
@@ -21663,6 +22515,24 @@ class GetCassandaTechEmailResult(dict):
21663
22515
  return pulumi.get(self, "email")
21664
22516
 
21665
22517
 
22518
+ @pulumi.output_type
22519
+ class GetCassandraCassandraResult(dict):
22520
+ def __init__(__self__, *,
22521
+ uris: Sequence[str]):
22522
+ """
22523
+ :param Sequence[str] uris: Cassandra server URIs.
22524
+ """
22525
+ pulumi.set(__self__, "uris", uris)
22526
+
22527
+ @property
22528
+ @pulumi.getter
22529
+ def uris(self) -> Sequence[str]:
22530
+ """
22531
+ Cassandra server URIs.
22532
+ """
22533
+ return pulumi.get(self, "uris")
22534
+
22535
+
21666
22536
  @pulumi.output_type
21667
22537
  class GetCassandraCassandraUserConfigResult(dict):
21668
22538
  def __init__(__self__, *,
@@ -21687,7 +22557,7 @@ class GetCassandraCassandraUserConfigResult(dict):
21687
22557
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
21688
22558
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
21689
22559
  :param 'GetCassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
21690
- :param str cassandra_version: Cassandra version.
22560
+ :param str cassandra_version: Enum: `3`, `4`, `4.1`. Cassandra version.
21691
22561
  :param Sequence['GetCassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
21692
22562
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
21693
22563
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -21772,7 +22642,7 @@ class GetCassandraCassandraUserConfigResult(dict):
21772
22642
  @pulumi.getter(name="cassandraVersion")
21773
22643
  def cassandra_version(self) -> Optional[str]:
21774
22644
  """
21775
- Cassandra version.
22645
+ Enum: `3`, `4`, `4.1`. Cassandra version.
21776
22646
  """
21777
22647
  return pulumi.get(self, "cassandra_version")
21778
22648
 
@@ -22150,6 +23020,24 @@ class GetCassandraTechEmailResult(dict):
22150
23020
  return pulumi.get(self, "email")
22151
23021
 
22152
23022
 
23023
+ @pulumi.output_type
23024
+ class GetClickhouseClickhouseResult(dict):
23025
+ def __init__(__self__, *,
23026
+ uris: Sequence[str]):
23027
+ """
23028
+ :param Sequence[str] uris: Clickhouse server URIs.
23029
+ """
23030
+ pulumi.set(__self__, "uris", uris)
23031
+
23032
+ @property
23033
+ @pulumi.getter
23034
+ def uris(self) -> Sequence[str]:
23035
+ """
23036
+ Clickhouse server URIs.
23037
+ """
23038
+ return pulumi.get(self, "uris")
23039
+
23040
+
22153
23041
  @pulumi.output_type
22154
23042
  class GetClickhouseClickhouseUserConfigResult(dict):
22155
23043
  def __init__(__self__, *,
@@ -22756,6 +23644,57 @@ class GetDragonflyComponentResult(dict):
22756
23644
  return pulumi.get(self, "usage")
22757
23645
 
22758
23646
 
23647
+ @pulumi.output_type
23648
+ class GetDragonflyDragonflyResult(dict):
23649
+ def __init__(__self__, *,
23650
+ password: str,
23651
+ replica_uri: str,
23652
+ slave_uris: Sequence[str],
23653
+ uris: Sequence[str]):
23654
+ """
23655
+ :param str password: Dragonfly password.
23656
+ :param str replica_uri: Dragonfly replica server URI.
23657
+ :param Sequence[str] slave_uris: Dragonfly slave server URIs.
23658
+ :param Sequence[str] uris: Dragonfly server URIs.
23659
+ """
23660
+ pulumi.set(__self__, "password", password)
23661
+ pulumi.set(__self__, "replica_uri", replica_uri)
23662
+ pulumi.set(__self__, "slave_uris", slave_uris)
23663
+ pulumi.set(__self__, "uris", uris)
23664
+
23665
+ @property
23666
+ @pulumi.getter
23667
+ def password(self) -> str:
23668
+ """
23669
+ Dragonfly password.
23670
+ """
23671
+ return pulumi.get(self, "password")
23672
+
23673
+ @property
23674
+ @pulumi.getter(name="replicaUri")
23675
+ def replica_uri(self) -> str:
23676
+ """
23677
+ Dragonfly replica server URI.
23678
+ """
23679
+ return pulumi.get(self, "replica_uri")
23680
+
23681
+ @property
23682
+ @pulumi.getter(name="slaveUris")
23683
+ def slave_uris(self) -> Sequence[str]:
23684
+ """
23685
+ Dragonfly slave server URIs.
23686
+ """
23687
+ return pulumi.get(self, "slave_uris")
23688
+
23689
+ @property
23690
+ @pulumi.getter
23691
+ def uris(self) -> Sequence[str]:
23692
+ """
23693
+ Dragonfly server URIs.
23694
+ """
23695
+ return pulumi.get(self, "uris")
23696
+
23697
+
22759
23698
  @pulumi.output_type
22760
23699
  class GetDragonflyDragonflyUserConfigResult(dict):
22761
23700
  def __init__(__self__, *,
@@ -22776,7 +23715,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22776
23715
  static_ips: Optional[bool] = None):
22777
23716
  """
22778
23717
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
22779
- :param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
23718
+ :param str dragonfly_persistence: Enum: `off`, `rdb`. When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
22780
23719
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
22781
23720
  :param Sequence['GetDragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
22782
23721
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -22834,7 +23773,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22834
23773
  @pulumi.getter(name="dragonflyPersistence")
22835
23774
  def dragonfly_persistence(self) -> Optional[str]:
22836
23775
  """
22837
- When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
23776
+ Enum: `off`, `rdb`. When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
22838
23777
  """
22839
23778
  return pulumi.get(self, "dragonfly_persistence")
22840
23779
 
@@ -22992,7 +23931,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
22992
23931
  :param int port: Port number of the server where to migrate data from.
22993
23932
  :param str dbname: Database name for bootstrapping the initial connection.
22994
23933
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
22995
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
23934
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
22996
23935
  :param str password: Password for authentication with the server where to migrate data from.
22997
23936
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
22998
23937
  :param str username: User name for authentication with the server where to migrate data from.
@@ -23048,7 +23987,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
23048
23987
  @pulumi.getter
23049
23988
  def method(self) -> Optional[str]:
23050
23989
  """
23051
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
23990
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
23052
23991
  """
23053
23992
  return pulumi.get(self, "method")
23054
23993
 
@@ -23433,7 +24372,7 @@ class GetFlinkFlinkUserConfigResult(dict):
23433
24372
  static_ips: Optional[bool] = None):
23434
24373
  """
23435
24374
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
23436
- :param str flink_version: Flink major version.
24375
+ :param str flink_version: Enum: `1.16`. Flink major version.
23437
24376
  :param Sequence['GetFlinkFlinkUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
23438
24377
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
23439
24378
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -23476,7 +24415,7 @@ class GetFlinkFlinkUserConfigResult(dict):
23476
24415
  @pulumi.getter(name="flinkVersion")
23477
24416
  def flink_version(self) -> Optional[str]:
23478
24417
  """
23479
- Flink major version.
24418
+ Enum: `1.16`. Flink major version.
23480
24419
  """
23481
24420
  return pulumi.get(self, "flink_version")
23482
24421
 
@@ -23772,6 +24711,24 @@ class GetGrafanaComponentResult(dict):
23772
24711
  return pulumi.get(self, "usage")
23773
24712
 
23774
24713
 
24714
+ @pulumi.output_type
24715
+ class GetGrafanaGrafanaResult(dict):
24716
+ def __init__(__self__, *,
24717
+ uris: Sequence[str]):
24718
+ """
24719
+ :param Sequence[str] uris: Grafana server URIs.
24720
+ """
24721
+ pulumi.set(__self__, "uris", uris)
24722
+
24723
+ @property
24724
+ @pulumi.getter
24725
+ def uris(self) -> Sequence[str]:
24726
+ """
24727
+ Grafana server URIs.
24728
+ """
24729
+ return pulumi.get(self, "uris")
24730
+
24731
+
23775
24732
  @pulumi.output_type
23776
24733
  class GetGrafanaGrafanaUserConfigResult(dict):
23777
24734
  def __init__(__self__, *,
@@ -23820,9 +24777,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23820
24777
  """
23821
24778
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
23822
24779
  :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
23823
- :param str alerting_error_or_timeout: Default error or timeout setting for new alerting rules.
24780
+ :param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
23824
24781
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.
23825
- :param str alerting_nodata_or_nullvalues: Default value for 'no data or null values' for new alerting rules.
24782
+ :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
23826
24783
  :param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
23827
24784
  :param 'GetGrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
23828
24785
  :param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
@@ -23830,7 +24787,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23830
24787
  :param 'GetGrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
23831
24788
  :param 'GetGrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
23832
24789
  :param 'GetGrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
23833
- :param str cookie_samesite: Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
24790
+ :param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
23834
24791
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
23835
24792
  :param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
23836
24793
  :param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h.
@@ -23858,7 +24815,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23858
24815
  :param bool static_ips: Use static public IP addresses.
23859
24816
  :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
23860
24817
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
23861
- :param str user_auto_assign_org_role: Set role for new signups. Defaults to Viewer.
24818
+ :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
23862
24819
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
23863
24820
  """
23864
24821
  if additional_backup_regions is not None:
@@ -23966,7 +24923,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23966
24923
  @pulumi.getter(name="alertingErrorOrTimeout")
23967
24924
  def alerting_error_or_timeout(self) -> Optional[str]:
23968
24925
  """
23969
- Default error or timeout setting for new alerting rules.
24926
+ Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
23970
24927
  """
23971
24928
  return pulumi.get(self, "alerting_error_or_timeout")
23972
24929
 
@@ -23982,7 +24939,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23982
24939
  @pulumi.getter(name="alertingNodataOrNullvalues")
23983
24940
  def alerting_nodata_or_nullvalues(self) -> Optional[str]:
23984
24941
  """
23985
- Default value for 'no data or null values' for new alerting rules.
24942
+ Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
23986
24943
  """
23987
24944
  return pulumi.get(self, "alerting_nodata_or_nullvalues")
23988
24945
 
@@ -24046,7 +25003,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
24046
25003
  @pulumi.getter(name="cookieSamesite")
24047
25004
  def cookie_samesite(self) -> Optional[str]:
24048
25005
  """
24049
- Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
25006
+ Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
24050
25007
  """
24051
25008
  return pulumi.get(self, "cookie_samesite")
24052
25009
 
@@ -24273,7 +25230,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
24273
25230
  @pulumi.getter(name="userAutoAssignOrgRole")
24274
25231
  def user_auto_assign_org_role(self) -> Optional[str]:
24275
25232
  """
24276
- Set role for new signups. Defaults to Viewer.
25233
+ Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
24277
25234
  """
24278
25235
  return pulumi.get(self, "user_auto_assign_org_role")
24279
25236
 
@@ -24849,7 +25806,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
24849
25806
  """
24850
25807
  :param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.
24851
25808
  :param str bucket_url: Bucket URL for S3.
24852
- :param str provider: Provider type.
25809
+ :param str provider: Enum: `s3`. Provider type.
24853
25810
  :param str secret_key: S3 secret key.
24854
25811
  """
24855
25812
  pulumi.set(__self__, "access_key", access_key)
@@ -24877,7 +25834,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
24877
25834
  @pulumi.getter
24878
25835
  def provider(self) -> str:
24879
25836
  """
24880
- Provider type.
25837
+ Enum: `s3`. Provider type.
24881
25838
  """
24882
25839
  return pulumi.get(self, "provider")
24883
25840
 
@@ -24995,7 +25952,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
24995
25952
  :param str from_name: Name used in outgoing emails, defaults to Grafana.
24996
25953
  :param str password: Password for SMTP authentication.
24997
25954
  :param bool skip_verify: Skip verifying server certificate. Defaults to false.
24998
- :param str starttls_policy: Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
25955
+ :param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
24999
25956
  :param str username: Username for SMTP authentication.
25000
25957
  """
25001
25958
  pulumi.set(__self__, "from_address", from_address)
@@ -25064,7 +26021,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
25064
26021
  @pulumi.getter(name="starttlsPolicy")
25065
26022
  def starttls_policy(self) -> Optional[str]:
25066
26023
  """
25067
- Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
26024
+ Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
25068
26025
  """
25069
26026
  return pulumi.get(self, "starttls_policy")
25070
26027
 
@@ -25251,11 +26208,20 @@ class GetInfluxDbComponentResult(dict):
25251
26208
  @pulumi.output_type
25252
26209
  class GetInfluxDbInfluxdbResult(dict):
25253
26210
  def __init__(__self__, *,
25254
- database_name: str):
26211
+ database_name: str,
26212
+ password: str,
26213
+ uris: Sequence[str],
26214
+ username: str):
25255
26215
  """
25256
26216
  :param str database_name: Name of the default InfluxDB database
26217
+ :param str password: InfluxDB password
26218
+ :param Sequence[str] uris: InfluxDB server URIs.
26219
+ :param str username: InfluxDB username
25257
26220
  """
25258
26221
  pulumi.set(__self__, "database_name", database_name)
26222
+ pulumi.set(__self__, "password", password)
26223
+ pulumi.set(__self__, "uris", uris)
26224
+ pulumi.set(__self__, "username", username)
25259
26225
 
25260
26226
  @property
25261
26227
  @pulumi.getter(name="databaseName")
@@ -25265,6 +26231,30 @@ class GetInfluxDbInfluxdbResult(dict):
25265
26231
  """
25266
26232
  return pulumi.get(self, "database_name")
25267
26233
 
26234
+ @property
26235
+ @pulumi.getter
26236
+ def password(self) -> str:
26237
+ """
26238
+ InfluxDB password
26239
+ """
26240
+ return pulumi.get(self, "password")
26241
+
26242
+ @property
26243
+ @pulumi.getter
26244
+ def uris(self) -> Sequence[str]:
26245
+ """
26246
+ InfluxDB server URIs.
26247
+ """
26248
+ return pulumi.get(self, "uris")
26249
+
26250
+ @property
26251
+ @pulumi.getter
26252
+ def username(self) -> str:
26253
+ """
26254
+ InfluxDB username
26255
+ """
26256
+ return pulumi.get(self, "username")
26257
+
25268
26258
 
25269
26259
  @pulumi.output_type
25270
26260
  class GetInfluxDbInfluxdbUserConfigResult(dict):
@@ -26074,10 +27064,10 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26074
27064
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
26075
27065
  session_timeout_ms: Optional[int] = None):
26076
27066
  """
26077
- :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
26078
- :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
27067
+ :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
27068
+ :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
26079
27069
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
26080
- :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
27070
+ :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
26081
27071
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
26082
27072
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
26083
27073
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -26085,7 +27075,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26085
27075
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
26086
27076
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
26087
27077
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
26088
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27078
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
26089
27079
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
26090
27080
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
26091
27081
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -26128,7 +27118,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26128
27118
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
26129
27119
  def connector_client_config_override_policy(self) -> Optional[str]:
26130
27120
  """
26131
- Defines what client configurations can be overridden by the connector. Default is None.
27121
+ Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
26132
27122
  """
26133
27123
  return pulumi.get(self, "connector_client_config_override_policy")
26134
27124
 
@@ -26136,7 +27126,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26136
27126
  @pulumi.getter(name="consumerAutoOffsetReset")
26137
27127
  def consumer_auto_offset_reset(self) -> Optional[str]:
26138
27128
  """
26139
- What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
27129
+ Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
26140
27130
  """
26141
27131
  return pulumi.get(self, "consumer_auto_offset_reset")
26142
27132
 
@@ -26152,7 +27142,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26152
27142
  @pulumi.getter(name="consumerIsolationLevel")
26153
27143
  def consumer_isolation_level(self) -> Optional[str]:
26154
27144
  """
26155
- Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
27145
+ Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
26156
27146
  """
26157
27147
  return pulumi.get(self, "consumer_isolation_level")
26158
27148
 
@@ -26216,7 +27206,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26216
27206
  @pulumi.getter(name="producerCompressionType")
26217
27207
  def producer_compression_type(self) -> Optional[str]:
26218
27208
  """
26219
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27209
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
26220
27210
  """
26221
27211
  return pulumi.get(self, "producer_compression_type")
26222
27212
 
@@ -26470,19 +27460,22 @@ class GetKafkaKafkaResult(dict):
26470
27460
  access_key: str,
26471
27461
  connect_uri: str,
26472
27462
  rest_uri: str,
26473
- schema_registry_uri: str):
27463
+ schema_registry_uri: str,
27464
+ uris: Sequence[str]):
26474
27465
  """
26475
27466
  :param str access_cert: The Kafka client certificate.
26476
27467
  :param str access_key: The Kafka client certificate key.
26477
27468
  :param str connect_uri: The Kafka Connect URI.
26478
27469
  :param str rest_uri: The Kafka REST URI.
26479
27470
  :param str schema_registry_uri: The Schema Registry URI.
27471
+ :param Sequence[str] uris: Kafka server URIs.
26480
27472
  """
26481
27473
  pulumi.set(__self__, "access_cert", access_cert)
26482
27474
  pulumi.set(__self__, "access_key", access_key)
26483
27475
  pulumi.set(__self__, "connect_uri", connect_uri)
26484
27476
  pulumi.set(__self__, "rest_uri", rest_uri)
26485
27477
  pulumi.set(__self__, "schema_registry_uri", schema_registry_uri)
27478
+ pulumi.set(__self__, "uris", uris)
26486
27479
 
26487
27480
  @property
26488
27481
  @pulumi.getter(name="accessCert")
@@ -26524,6 +27517,14 @@ class GetKafkaKafkaResult(dict):
26524
27517
  """
26525
27518
  return pulumi.get(self, "schema_registry_uri")
26526
27519
 
27520
+ @property
27521
+ @pulumi.getter
27522
+ def uris(self) -> Sequence[str]:
27523
+ """
27524
+ Kafka server URIs.
27525
+ """
27526
+ return pulumi.get(self, "uris")
27527
+
26527
27528
 
26528
27529
  @pulumi.output_type
26529
27530
  class GetKafkaKafkaUserConfigResult(dict):
@@ -26564,7 +27565,7 @@ class GetKafkaKafkaUserConfigResult(dict):
26564
27565
  :param bool kafka_rest: Enable Kafka-REST service. The default value is `false`.
26565
27566
  :param bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
26566
27567
  :param 'GetKafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
26567
- :param str kafka_version: Kafka major version.
27568
+ :param str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`. Kafka major version.
26568
27569
  :param 'GetKafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
26569
27570
  :param 'GetKafkaKafkaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
26570
27571
  :param 'GetKafkaKafkaUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
@@ -26733,7 +27734,7 @@ class GetKafkaKafkaUserConfigResult(dict):
26733
27734
  @pulumi.getter(name="kafkaVersion")
26734
27735
  def kafka_version(self) -> Optional[str]:
26735
27736
  """
26736
- Kafka major version.
27737
+ Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`. Kafka major version.
26737
27738
  """
26738
27739
  return pulumi.get(self, "kafka_version")
26739
27740
 
@@ -26883,7 +27884,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26883
27884
  transaction_state_log_segment_bytes: Optional[int] = None):
26884
27885
  """
26885
27886
  :param bool auto_create_topics_enable: Enable auto creation of topics.
26886
- :param str compression_type: Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
27887
+ :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
26887
27888
  :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
26888
27889
  :param int default_replication_factor: Replication factor for autocreated topics.
26889
27890
  :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
@@ -26893,7 +27894,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26893
27894
  :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
26894
27895
  :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
26895
27896
  :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
26896
- :param str log_cleanup_policy: The default cleanup policy for segments beyond the retention window.
27897
+ :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
26897
27898
  :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk.
26898
27899
  :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
26899
27900
  :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index.
@@ -26902,7 +27903,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26902
27903
  :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
26903
27904
  :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
26904
27905
  :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
26905
- :param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
27906
+ :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
26906
27907
  :param bool log_preallocate: Should pre allocate file when create new segment?
26907
27908
  :param int log_retention_bytes: The maximum size of the log before deleting messages.
26908
27909
  :param int log_retention_hours: The number of hours to keep a log file before deleting it.
@@ -27034,7 +28035,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27034
28035
  @pulumi.getter(name="compressionType")
27035
28036
  def compression_type(self) -> Optional[str]:
27036
28037
  """
27037
- Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
28038
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
27038
28039
  """
27039
28040
  return pulumi.get(self, "compression_type")
27040
28041
 
@@ -27114,7 +28115,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27114
28115
  @pulumi.getter(name="logCleanupPolicy")
27115
28116
  def log_cleanup_policy(self) -> Optional[str]:
27116
28117
  """
27117
- The default cleanup policy for segments beyond the retention window.
28118
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
27118
28119
  """
27119
28120
  return pulumi.get(self, "log_cleanup_policy")
27120
28121
 
@@ -27186,7 +28187,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27186
28187
  @pulumi.getter(name="logMessageTimestampType")
27187
28188
  def log_message_timestamp_type(self) -> Optional[str]:
27188
28189
  """
27189
- Define whether the timestamp in the message is message create time or log append time.
28190
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
27190
28191
  """
27191
28192
  return pulumi.get(self, "log_message_timestamp_type")
27192
28193
 
@@ -27442,10 +28443,10 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27442
28443
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
27443
28444
  session_timeout_ms: Optional[int] = None):
27444
28445
  """
27445
- :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
27446
- :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
28446
+ :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
28447
+ :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
27447
28448
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
27448
- :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
28449
+ :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
27449
28450
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
27450
28451
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
27451
28452
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -27453,7 +28454,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27453
28454
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
27454
28455
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
27455
28456
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
27456
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
28457
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27457
28458
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
27458
28459
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
27459
28460
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -27496,7 +28497,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27496
28497
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
27497
28498
  def connector_client_config_override_policy(self) -> Optional[str]:
27498
28499
  """
27499
- Defines what client configurations can be overridden by the connector. Default is None.
28500
+ Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
27500
28501
  """
27501
28502
  return pulumi.get(self, "connector_client_config_override_policy")
27502
28503
 
@@ -27504,7 +28505,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27504
28505
  @pulumi.getter(name="consumerAutoOffsetReset")
27505
28506
  def consumer_auto_offset_reset(self) -> Optional[str]:
27506
28507
  """
27507
- What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
28508
+ Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
27508
28509
  """
27509
28510
  return pulumi.get(self, "consumer_auto_offset_reset")
27510
28511
 
@@ -27520,7 +28521,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27520
28521
  @pulumi.getter(name="consumerIsolationLevel")
27521
28522
  def consumer_isolation_level(self) -> Optional[str]:
27522
28523
  """
27523
- Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
28524
+ Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
27524
28525
  """
27525
28526
  return pulumi.get(self, "consumer_isolation_level")
27526
28527
 
@@ -27584,7 +28585,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27584
28585
  @pulumi.getter(name="producerCompressionType")
27585
28586
  def producer_compression_type(self) -> Optional[str]:
27586
28587
  """
27587
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
28588
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27588
28589
  """
27589
28590
  return pulumi.get(self, "producer_compression_type")
27590
28591
 
@@ -27637,11 +28638,11 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27637
28638
  """
27638
28639
  :param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.
27639
28640
  :param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.
27640
- :param int consumer_request_timeout_ms: The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
27641
- :param str name_strategy: Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
28641
+ :param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
28642
+ :param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
27642
28643
  :param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`.
27643
- :param str producer_acks: The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
27644
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
28644
+ :param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
28645
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27645
28646
  :param int producer_linger_ms: Wait for up to the given delay to allow batching records together. The default value is `0`.
27646
28647
  :param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.
27647
28648
  :param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.
@@ -27687,7 +28688,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27687
28688
  @pulumi.getter(name="consumerRequestTimeoutMs")
27688
28689
  def consumer_request_timeout_ms(self) -> Optional[int]:
27689
28690
  """
27690
- The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
28691
+ Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
27691
28692
  """
27692
28693
  return pulumi.get(self, "consumer_request_timeout_ms")
27693
28694
 
@@ -27695,7 +28696,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27695
28696
  @pulumi.getter(name="nameStrategy")
27696
28697
  def name_strategy(self) -> Optional[str]:
27697
28698
  """
27698
- Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
28699
+ Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
27699
28700
  """
27700
28701
  return pulumi.get(self, "name_strategy")
27701
28702
 
@@ -27711,7 +28712,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27711
28712
  @pulumi.getter(name="producerAcks")
27712
28713
  def producer_acks(self) -> Optional[str]:
27713
28714
  """
27714
- The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
28715
+ Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
27715
28716
  """
27716
28717
  return pulumi.get(self, "producer_acks")
27717
28718
 
@@ -27719,7 +28720,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27719
28720
  @pulumi.getter(name="producerCompressionType")
27720
28721
  def producer_compression_type(self) -> Optional[str]:
27721
28722
  """
27722
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
28723
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27723
28724
  """
27724
28725
  return pulumi.get(self, "producer_compression_type")
27725
28726
 
@@ -29044,6 +30045,35 @@ class GetM3AggregatorComponentResult(dict):
29044
30045
  return pulumi.get(self, "usage")
29045
30046
 
29046
30047
 
30048
+ @pulumi.output_type
30049
+ class GetM3AggregatorM3aggregatorResult(dict):
30050
+ def __init__(__self__, *,
30051
+ aggregator_http_uri: str,
30052
+ uris: Sequence[str]):
30053
+ """
30054
+ :param str aggregator_http_uri: M3 Aggregator HTTP URI.
30055
+ :param Sequence[str] uris: M3 Aggregator server URIs.
30056
+ """
30057
+ pulumi.set(__self__, "aggregator_http_uri", aggregator_http_uri)
30058
+ pulumi.set(__self__, "uris", uris)
30059
+
30060
+ @property
30061
+ @pulumi.getter(name="aggregatorHttpUri")
30062
+ def aggregator_http_uri(self) -> str:
30063
+ """
30064
+ M3 Aggregator HTTP URI.
30065
+ """
30066
+ return pulumi.get(self, "aggregator_http_uri")
30067
+
30068
+ @property
30069
+ @pulumi.getter
30070
+ def uris(self) -> Sequence[str]:
30071
+ """
30072
+ M3 Aggregator server URIs.
30073
+ """
30074
+ return pulumi.get(self, "uris")
30075
+
30076
+
29047
30077
  @pulumi.output_type
29048
30078
  class GetM3AggregatorM3aggregatorUserConfigResult(dict):
29049
30079
  def __init__(__self__, *,
@@ -29060,8 +30090,8 @@ class GetM3AggregatorM3aggregatorUserConfigResult(dict):
29060
30090
  :param Sequence['GetM3AggregatorM3aggregatorUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
29061
30091
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
29062
30092
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
29063
- :param str m3_version: M3 major version (deprecated, use m3aggregator_version).
29064
- :param str m3aggregator_version: M3 major version (the minimum compatible version).
30093
+ :param str m3_version: Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3aggregator_version).
30094
+ :param str m3aggregator_version: Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).
29065
30095
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
29066
30096
  :param bool static_ips: Use static public IP addresses.
29067
30097
  """
@@ -29121,7 +30151,7 @@ class GetM3AggregatorM3aggregatorUserConfigResult(dict):
29121
30151
  @pulumi.getter(name="m3Version")
29122
30152
  def m3_version(self) -> Optional[str]:
29123
30153
  """
29124
- M3 major version (deprecated, use m3aggregator_version).
30154
+ Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3aggregator_version).
29125
30155
  """
29126
30156
  return pulumi.get(self, "m3_version")
29127
30157
 
@@ -29129,7 +30159,7 @@ class GetM3AggregatorM3aggregatorUserConfigResult(dict):
29129
30159
  @pulumi.getter(name="m3aggregatorVersion")
29130
30160
  def m3aggregator_version(self) -> Optional[str]:
29131
30161
  """
29132
- M3 major version (the minimum compatible version).
30162
+ Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).
29133
30163
  """
29134
30164
  return pulumi.get(self, "m3aggregator_version")
29135
30165
 
@@ -29351,6 +30381,79 @@ class GetM3DbComponentResult(dict):
29351
30381
  return pulumi.get(self, "usage")
29352
30382
 
29353
30383
 
30384
+ @pulumi.output_type
30385
+ class GetM3DbM3dbResult(dict):
30386
+ def __init__(__self__, *,
30387
+ http_cluster_uri: str,
30388
+ http_node_uri: str,
30389
+ influxdb_uri: str,
30390
+ prometheus_remote_read_uri: str,
30391
+ prometheus_remote_write_uri: str,
30392
+ uris: Sequence[str]):
30393
+ """
30394
+ :param str http_cluster_uri: M3DB cluster URI.
30395
+ :param str http_node_uri: M3DB node URI.
30396
+ :param str influxdb_uri: InfluxDB URI.
30397
+ :param str prometheus_remote_read_uri: Prometheus remote read URI.
30398
+ :param str prometheus_remote_write_uri: Prometheus remote write URI.
30399
+ :param Sequence[str] uris: M3DB server URIs.
30400
+ """
30401
+ pulumi.set(__self__, "http_cluster_uri", http_cluster_uri)
30402
+ pulumi.set(__self__, "http_node_uri", http_node_uri)
30403
+ pulumi.set(__self__, "influxdb_uri", influxdb_uri)
30404
+ pulumi.set(__self__, "prometheus_remote_read_uri", prometheus_remote_read_uri)
30405
+ pulumi.set(__self__, "prometheus_remote_write_uri", prometheus_remote_write_uri)
30406
+ pulumi.set(__self__, "uris", uris)
30407
+
30408
+ @property
30409
+ @pulumi.getter(name="httpClusterUri")
30410
+ def http_cluster_uri(self) -> str:
30411
+ """
30412
+ M3DB cluster URI.
30413
+ """
30414
+ return pulumi.get(self, "http_cluster_uri")
30415
+
30416
+ @property
30417
+ @pulumi.getter(name="httpNodeUri")
30418
+ def http_node_uri(self) -> str:
30419
+ """
30420
+ M3DB node URI.
30421
+ """
30422
+ return pulumi.get(self, "http_node_uri")
30423
+
30424
+ @property
30425
+ @pulumi.getter(name="influxdbUri")
30426
+ def influxdb_uri(self) -> str:
30427
+ """
30428
+ InfluxDB URI.
30429
+ """
30430
+ return pulumi.get(self, "influxdb_uri")
30431
+
30432
+ @property
30433
+ @pulumi.getter(name="prometheusRemoteReadUri")
30434
+ def prometheus_remote_read_uri(self) -> str:
30435
+ """
30436
+ Prometheus remote read URI.
30437
+ """
30438
+ return pulumi.get(self, "prometheus_remote_read_uri")
30439
+
30440
+ @property
30441
+ @pulumi.getter(name="prometheusRemoteWriteUri")
30442
+ def prometheus_remote_write_uri(self) -> str:
30443
+ """
30444
+ Prometheus remote write URI.
30445
+ """
30446
+ return pulumi.get(self, "prometheus_remote_write_uri")
30447
+
30448
+ @property
30449
+ @pulumi.getter
30450
+ def uris(self) -> Sequence[str]:
30451
+ """
30452
+ M3DB server URIs.
30453
+ """
30454
+ return pulumi.get(self, "uris")
30455
+
30456
+
29354
30457
  @pulumi.output_type
29355
30458
  class GetM3DbM3dbUserConfigResult(dict):
29356
30459
  def __init__(__self__, *,
@@ -29380,9 +30483,9 @@ class GetM3DbM3dbUserConfigResult(dict):
29380
30483
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
29381
30484
  :param 'GetM3DbM3dbUserConfigLimitsArgs' limits: M3 limits
29382
30485
  :param 'GetM3DbM3dbUserConfigM3Args' m3: M3 specific configuration options
29383
- :param str m3_version: M3 major version (deprecated, use m3db_version).
30486
+ :param str m3_version: Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3db_version).
29384
30487
  :param bool m3coordinator_enable_graphite_carbon_ingest: Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only.
29385
- :param str m3db_version: M3 major version (the minimum compatible version).
30488
+ :param str m3db_version: Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).
29386
30489
  :param Sequence['GetM3DbM3dbUserConfigNamespaceArgs'] namespaces: List of M3 namespaces
29387
30490
  :param 'GetM3DbM3dbUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
29388
30491
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -29492,7 +30595,7 @@ class GetM3DbM3dbUserConfigResult(dict):
29492
30595
  @pulumi.getter(name="m3Version")
29493
30596
  def m3_version(self) -> Optional[str]:
29494
30597
  """
29495
- M3 major version (deprecated, use m3db_version).
30598
+ Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3db_version).
29496
30599
  """
29497
30600
  return pulumi.get(self, "m3_version")
29498
30601
 
@@ -29508,7 +30611,7 @@ class GetM3DbM3dbUserConfigResult(dict):
29508
30611
  @pulumi.getter(name="m3dbVersion")
29509
30612
  def m3db_version(self) -> Optional[str]:
29510
30613
  """
29511
- M3 major version (the minimum compatible version).
30614
+ Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).
29512
30615
  """
29513
30616
  return pulumi.get(self, "m3db_version")
29514
30617
 
@@ -29745,7 +30848,7 @@ class GetM3DbM3dbUserConfigNamespaceResult(dict):
29745
30848
  resolution: Optional[str] = None):
29746
30849
  """
29747
30850
  :param str name: The name of the namespace.
29748
- :param str type: The type of aggregation (aggregated/unaggregated).
30851
+ :param str type: Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated).
29749
30852
  :param 'GetM3DbM3dbUserConfigNamespaceOptionsArgs' options: Namespace options
29750
30853
  :param str resolution: The resolution for an aggregated namespace.
29751
30854
  """
@@ -29768,7 +30871,7 @@ class GetM3DbM3dbUserConfigNamespaceResult(dict):
29768
30871
  @pulumi.getter
29769
30872
  def type(self) -> str:
29770
30873
  """
29771
- The type of aggregation (aggregated/unaggregated).
30874
+ Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated).
29772
30875
  """
29773
30876
  return pulumi.get(self, "type")
29774
30877
 
@@ -30290,6 +31393,141 @@ class GetMySqlComponentResult(dict):
30290
31393
  return pulumi.get(self, "usage")
30291
31394
 
30292
31395
 
31396
+ @pulumi.output_type
31397
+ class GetMySqlMysqlResult(dict):
31398
+ def __init__(__self__, *,
31399
+ params: Sequence['outputs.GetMySqlMysqlParamResult'],
31400
+ replica_uri: str,
31401
+ standby_uris: Sequence[str],
31402
+ syncing_uris: Sequence[str],
31403
+ uris: Sequence[str]):
31404
+ """
31405
+ :param Sequence['GetMySqlMysqlParamArgs'] params: MySQL connection parameters
31406
+ :param str replica_uri: MySQL replica URI for services with a replica
31407
+ :param Sequence[str] standby_uris: MySQL standby connection URIs
31408
+ :param Sequence[str] syncing_uris: MySQL syncing connection URIs
31409
+ :param Sequence[str] uris: MySQL master connection URIs
31410
+ """
31411
+ pulumi.set(__self__, "params", params)
31412
+ pulumi.set(__self__, "replica_uri", replica_uri)
31413
+ pulumi.set(__self__, "standby_uris", standby_uris)
31414
+ pulumi.set(__self__, "syncing_uris", syncing_uris)
31415
+ pulumi.set(__self__, "uris", uris)
31416
+
31417
+ @property
31418
+ @pulumi.getter
31419
+ def params(self) -> Sequence['outputs.GetMySqlMysqlParamResult']:
31420
+ """
31421
+ MySQL connection parameters
31422
+ """
31423
+ return pulumi.get(self, "params")
31424
+
31425
+ @property
31426
+ @pulumi.getter(name="replicaUri")
31427
+ def replica_uri(self) -> str:
31428
+ """
31429
+ MySQL replica URI for services with a replica
31430
+ """
31431
+ return pulumi.get(self, "replica_uri")
31432
+
31433
+ @property
31434
+ @pulumi.getter(name="standbyUris")
31435
+ def standby_uris(self) -> Sequence[str]:
31436
+ """
31437
+ MySQL standby connection URIs
31438
+ """
31439
+ return pulumi.get(self, "standby_uris")
31440
+
31441
+ @property
31442
+ @pulumi.getter(name="syncingUris")
31443
+ def syncing_uris(self) -> Sequence[str]:
31444
+ """
31445
+ MySQL syncing connection URIs
31446
+ """
31447
+ return pulumi.get(self, "syncing_uris")
31448
+
31449
+ @property
31450
+ @pulumi.getter
31451
+ def uris(self) -> Sequence[str]:
31452
+ """
31453
+ MySQL master connection URIs
31454
+ """
31455
+ return pulumi.get(self, "uris")
31456
+
31457
+
31458
+ @pulumi.output_type
31459
+ class GetMySqlMysqlParamResult(dict):
31460
+ def __init__(__self__, *,
31461
+ database_name: str,
31462
+ host: str,
31463
+ password: str,
31464
+ port: int,
31465
+ sslmode: str,
31466
+ user: str):
31467
+ """
31468
+ :param str database_name: Primary MySQL database name
31469
+ :param str host: MySQL host IP or name
31470
+ :param str password: MySQL admin user password
31471
+ :param int port: MySQL port
31472
+ :param str sslmode: MySQL sslmode setting (currently always "require")
31473
+ :param str user: MySQL admin user name
31474
+ """
31475
+ pulumi.set(__self__, "database_name", database_name)
31476
+ pulumi.set(__self__, "host", host)
31477
+ pulumi.set(__self__, "password", password)
31478
+ pulumi.set(__self__, "port", port)
31479
+ pulumi.set(__self__, "sslmode", sslmode)
31480
+ pulumi.set(__self__, "user", user)
31481
+
31482
+ @property
31483
+ @pulumi.getter(name="databaseName")
31484
+ def database_name(self) -> str:
31485
+ """
31486
+ Primary MySQL database name
31487
+ """
31488
+ return pulumi.get(self, "database_name")
31489
+
31490
+ @property
31491
+ @pulumi.getter
31492
+ def host(self) -> str:
31493
+ """
31494
+ MySQL host IP or name
31495
+ """
31496
+ return pulumi.get(self, "host")
31497
+
31498
+ @property
31499
+ @pulumi.getter
31500
+ def password(self) -> str:
31501
+ """
31502
+ MySQL admin user password
31503
+ """
31504
+ return pulumi.get(self, "password")
31505
+
31506
+ @property
31507
+ @pulumi.getter
31508
+ def port(self) -> int:
31509
+ """
31510
+ MySQL port
31511
+ """
31512
+ return pulumi.get(self, "port")
31513
+
31514
+ @property
31515
+ @pulumi.getter
31516
+ def sslmode(self) -> str:
31517
+ """
31518
+ MySQL sslmode setting (currently always "require")
31519
+ """
31520
+ return pulumi.get(self, "sslmode")
31521
+
31522
+ @property
31523
+ @pulumi.getter
31524
+ def user(self) -> str:
31525
+ """
31526
+ MySQL admin user name
31527
+ """
31528
+ return pulumi.get(self, "user")
31529
+
31530
+
30293
31531
  @pulumi.output_type
30294
31532
  class GetMySqlMysqlUserConfigResult(dict):
30295
31533
  def __init__(__self__, *,
@@ -30325,7 +31563,7 @@ class GetMySqlMysqlUserConfigResult(dict):
30325
31563
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
30326
31564
  :param 'GetMySqlMysqlUserConfigMigrationArgs' migration: Migrate data from existing server
30327
31565
  :param 'GetMySqlMysqlUserConfigMysqlArgs' mysql: mysql.conf configuration values
30328
- :param str mysql_version: MySQL major version.
31566
+ :param str mysql_version: Enum: `8`. MySQL major version.
30329
31567
  :param 'GetMySqlMysqlUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
30330
31568
  :param 'GetMySqlMysqlUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
30331
31569
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -30471,7 +31709,7 @@ class GetMySqlMysqlUserConfigResult(dict):
30471
31709
  @pulumi.getter(name="mysqlVersion")
30472
31710
  def mysql_version(self) -> Optional[str]:
30473
31711
  """
30474
- MySQL major version.
31712
+ Enum: `8`. MySQL major version.
30475
31713
  """
30476
31714
  return pulumi.get(self, "mysql_version")
30477
31715
 
@@ -30586,7 +31824,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
30586
31824
  :param int port: Port number of the server where to migrate data from.
30587
31825
  :param str dbname: Database name for bootstrapping the initial connection.
30588
31826
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
30589
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
31827
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
30590
31828
  :param str password: Password for authentication with the server where to migrate data from.
30591
31829
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
30592
31830
  :param str username: User name for authentication with the server where to migrate data from.
@@ -30642,7 +31880,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
30642
31880
  @pulumi.getter
30643
31881
  def method(self) -> Optional[str]:
30644
31882
  """
30645
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
31883
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
30646
31884
  """
30647
31885
  return pulumi.get(self, "method")
30648
31886
 
@@ -30722,7 +31960,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
30722
31960
  :param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).
30723
31961
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.
30724
31962
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it.
30725
- :param str internal_tmp_mem_storage_engine: The storage engine for in-memory internal temporary tables.
31963
+ :param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
30726
31964
  :param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.
30727
31965
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
30728
31966
  :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
@@ -30937,7 +32175,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
30937
32175
  @pulumi.getter(name="internalTmpMemStorageEngine")
30938
32176
  def internal_tmp_mem_storage_engine(self) -> Optional[str]:
30939
32177
  """
30940
- The storage engine for in-memory internal temporary tables.
32178
+ Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
30941
32179
  """
30942
32180
  return pulumi.get(self, "internal_tmp_mem_storage_engine")
30943
32181
 
@@ -31341,11 +32579,31 @@ class GetOpenSearchComponentResult(dict):
31341
32579
  @pulumi.output_type
31342
32580
  class GetOpenSearchOpensearchResult(dict):
31343
32581
  def __init__(__self__, *,
31344
- opensearch_dashboards_uri: str):
32582
+ kibana_uri: str,
32583
+ opensearch_dashboards_uri: str,
32584
+ password: str,
32585
+ uris: Sequence[str],
32586
+ username: str):
31345
32587
  """
32588
+ :param str kibana_uri: URI for Kibana dashboard frontend
31346
32589
  :param str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend
32590
+ :param str password: OpenSearch password
32591
+ :param Sequence[str] uris: OpenSearch server URIs.
32592
+ :param str username: OpenSearch username
31347
32593
  """
32594
+ pulumi.set(__self__, "kibana_uri", kibana_uri)
31348
32595
  pulumi.set(__self__, "opensearch_dashboards_uri", opensearch_dashboards_uri)
32596
+ pulumi.set(__self__, "password", password)
32597
+ pulumi.set(__self__, "uris", uris)
32598
+ pulumi.set(__self__, "username", username)
32599
+
32600
+ @property
32601
+ @pulumi.getter(name="kibanaUri")
32602
+ def kibana_uri(self) -> str:
32603
+ """
32604
+ URI for Kibana dashboard frontend
32605
+ """
32606
+ return pulumi.get(self, "kibana_uri")
31349
32607
 
31350
32608
  @property
31351
32609
  @pulumi.getter(name="opensearchDashboardsUri")
@@ -31355,6 +32613,30 @@ class GetOpenSearchOpensearchResult(dict):
31355
32613
  """
31356
32614
  return pulumi.get(self, "opensearch_dashboards_uri")
31357
32615
 
32616
+ @property
32617
+ @pulumi.getter
32618
+ def password(self) -> str:
32619
+ """
32620
+ OpenSearch password
32621
+ """
32622
+ return pulumi.get(self, "password")
32623
+
32624
+ @property
32625
+ @pulumi.getter
32626
+ def uris(self) -> Sequence[str]:
32627
+ """
32628
+ OpenSearch server URIs.
32629
+ """
32630
+ return pulumi.get(self, "uris")
32631
+
32632
+ @property
32633
+ @pulumi.getter
32634
+ def username(self) -> str:
32635
+ """
32636
+ OpenSearch username
32637
+ """
32638
+ return pulumi.get(self, "username")
32639
+
31358
32640
 
31359
32641
  @pulumi.output_type
31360
32642
  class GetOpenSearchOpensearchUserConfigResult(dict):
@@ -31396,7 +32678,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31396
32678
  :param 'GetOpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
31397
32679
  :param 'GetOpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
31398
32680
  :param 'GetOpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
31399
- :param str opensearch_version: OpenSearch major version.
32681
+ :param str opensearch_version: Enum: `1`, `2`. OpenSearch major version.
31400
32682
  :param 'GetOpenSearchOpensearchUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
31401
32683
  :param 'GetOpenSearchOpensearchUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
31402
32684
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -31565,7 +32847,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31565
32847
  @pulumi.getter(name="opensearchVersion")
31566
32848
  def opensearch_version(self) -> Optional[str]:
31567
32849
  """
31568
- OpenSearch major version.
32850
+ Enum: `1`, `2`. OpenSearch major version.
31569
32851
  """
31570
32852
  return pulumi.get(self, "opensearch_version")
31571
32853
 
@@ -31651,7 +32933,7 @@ class GetOpenSearchOpensearchUserConfigIndexPatternResult(dict):
31651
32933
  """
31652
32934
  :param int max_index_count: Maximum number of indexes to keep.
31653
32935
  :param str pattern: fnmatch pattern.
31654
- :param str sorting_algorithm: Deletion sorting algorithm. The default value is `creation_date`.
32936
+ :param str sorting_algorithm: Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`.
31655
32937
  """
31656
32938
  pulumi.set(__self__, "max_index_count", max_index_count)
31657
32939
  pulumi.set(__self__, "pattern", pattern)
@@ -31678,7 +32960,7 @@ class GetOpenSearchOpensearchUserConfigIndexPatternResult(dict):
31678
32960
  @pulumi.getter(name="sortingAlgorithm")
31679
32961
  def sorting_algorithm(self) -> Optional[str]:
31680
32962
  """
31681
- Deletion sorting algorithm. The default value is `creation_date`.
32963
+ Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`.
31682
32964
  """
31683
32965
  return pulumi.get(self, "sorting_algorithm")
31684
32966
 
@@ -32453,12 +33735,12 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAut
32453
33735
  type: Optional[str] = None):
32454
33736
  """
32455
33737
  :param int allowed_tries: The number of login attempts allowed before login is blocked.
32456
- :param str authentication_backend: internal_authentication_backend_limiting.authentication_backend.
33738
+ :param str authentication_backend: Enum: `internal`. internal_authentication_backend_limiting.authentication_backend.
32457
33739
  :param int block_expiry_seconds: The duration of time that login remains blocked after a failed login.
32458
33740
  :param int max_blocked_clients: internal_authentication_backend_limiting.max_blocked_clients.
32459
33741
  :param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
32460
33742
  :param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
32461
- :param str type: internal_authentication_backend_limiting.type.
33743
+ :param str type: Enum: `username`. internal_authentication_backend_limiting.type.
32462
33744
  """
32463
33745
  if allowed_tries is not None:
32464
33746
  pulumi.set(__self__, "allowed_tries", allowed_tries)
@@ -32487,7 +33769,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAut
32487
33769
  @pulumi.getter(name="authenticationBackend")
32488
33770
  def authentication_backend(self) -> Optional[str]:
32489
33771
  """
32490
- internal_authentication_backend_limiting.authentication_backend.
33772
+ Enum: `internal`. internal_authentication_backend_limiting.authentication_backend.
32491
33773
  """
32492
33774
  return pulumi.get(self, "authentication_backend")
32493
33775
 
@@ -32527,7 +33809,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAut
32527
33809
  @pulumi.getter
32528
33810
  def type(self) -> Optional[str]:
32529
33811
  """
32530
- internal_authentication_backend_limiting.type.
33812
+ Enum: `username`. internal_authentication_backend_limiting.type.
32531
33813
  """
32532
33814
  return pulumi.get(self, "type")
32533
33815
 
@@ -32547,7 +33829,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimit
32547
33829
  :param int max_blocked_clients: The maximum number of blocked IP addresses.
32548
33830
  :param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
32549
33831
  :param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
32550
- :param str type: The type of rate limiting.
33832
+ :param str type: Enum: `ip`. The type of rate limiting.
32551
33833
  """
32552
33834
  if allowed_tries is not None:
32553
33835
  pulumi.set(__self__, "allowed_tries", allowed_tries)
@@ -32606,7 +33888,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimit
32606
33888
  @pulumi.getter
32607
33889
  def type(self) -> Optional[str]:
32608
33890
  """
32609
- The type of rate limiting.
33891
+ Enum: `ip`. The type of rate limiting.
32610
33892
  """
32611
33893
  return pulumi.get(self, "type")
32612
33894
 
@@ -33044,36 +34326,59 @@ class GetPgComponentResult(dict):
33044
34326
  @pulumi.output_type
33045
34327
  class GetPgPgResult(dict):
33046
34328
  def __init__(__self__, *,
34329
+ bouncer: str,
33047
34330
  dbname: str,
33048
34331
  host: str,
33049
34332
  max_connections: int,
34333
+ params: Sequence['outputs.GetPgPgParamResult'],
33050
34334
  password: str,
33051
34335
  port: int,
33052
34336
  replica_uri: str,
33053
34337
  sslmode: str,
34338
+ standby_uris: Sequence[str],
34339
+ syncing_uris: Sequence[str],
33054
34340
  uri: str,
34341
+ uris: Sequence[str],
33055
34342
  user: str):
33056
34343
  """
34344
+ :param str bouncer: Bouncer connection details
33057
34345
  :param str dbname: Primary PostgreSQL database name
33058
34346
  :param str host: PostgreSQL master node host IP or name
33059
34347
  :param int max_connections: Connection limit
34348
+ :param Sequence['GetPgPgParamArgs'] params: PostgreSQL connection parameters
33060
34349
  :param str password: PostgreSQL admin user password
33061
34350
  :param int port: PostgreSQL port
33062
34351
  :param str replica_uri: PostgreSQL replica URI for services with a replica
33063
34352
  :param str sslmode: PostgreSQL sslmode setting (currently always "require")
34353
+ :param Sequence[str] standby_uris: PostgreSQL standby connection URIs
34354
+ :param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs
33064
34355
  :param str uri: PostgreSQL master connection URI
34356
+ :param Sequence[str] uris: PostgreSQL master connection URIs
33065
34357
  :param str user: PostgreSQL admin user name
33066
34358
  """
34359
+ pulumi.set(__self__, "bouncer", bouncer)
33067
34360
  pulumi.set(__self__, "dbname", dbname)
33068
34361
  pulumi.set(__self__, "host", host)
33069
34362
  pulumi.set(__self__, "max_connections", max_connections)
34363
+ pulumi.set(__self__, "params", params)
33070
34364
  pulumi.set(__self__, "password", password)
33071
34365
  pulumi.set(__self__, "port", port)
33072
34366
  pulumi.set(__self__, "replica_uri", replica_uri)
33073
34367
  pulumi.set(__self__, "sslmode", sslmode)
34368
+ pulumi.set(__self__, "standby_uris", standby_uris)
34369
+ pulumi.set(__self__, "syncing_uris", syncing_uris)
33074
34370
  pulumi.set(__self__, "uri", uri)
34371
+ pulumi.set(__self__, "uris", uris)
33075
34372
  pulumi.set(__self__, "user", user)
33076
34373
 
34374
+ @property
34375
+ @pulumi.getter
34376
+ def bouncer(self) -> str:
34377
+ """
34378
+ Bouncer connection details
34379
+ """
34380
+ return pulumi.get(self, "bouncer")
34381
+
33077
34382
  @property
33078
34383
  @pulumi.getter
33079
34384
  def dbname(self) -> str:
@@ -33098,6 +34403,14 @@ class GetPgPgResult(dict):
33098
34403
  """
33099
34404
  return pulumi.get(self, "max_connections")
33100
34405
 
34406
+ @property
34407
+ @pulumi.getter
34408
+ def params(self) -> Sequence['outputs.GetPgPgParamResult']:
34409
+ """
34410
+ PostgreSQL connection parameters
34411
+ """
34412
+ return pulumi.get(self, "params")
34413
+
33101
34414
  @property
33102
34415
  @pulumi.getter
33103
34416
  def password(self) -> str:
@@ -33130,6 +34443,22 @@ class GetPgPgResult(dict):
33130
34443
  """
33131
34444
  return pulumi.get(self, "sslmode")
33132
34445
 
34446
+ @property
34447
+ @pulumi.getter(name="standbyUris")
34448
+ def standby_uris(self) -> Sequence[str]:
34449
+ """
34450
+ PostgreSQL standby connection URIs
34451
+ """
34452
+ return pulumi.get(self, "standby_uris")
34453
+
34454
+ @property
34455
+ @pulumi.getter(name="syncingUris")
34456
+ def syncing_uris(self) -> Sequence[str]:
34457
+ """
34458
+ PostgreSQL syncing connection URIs
34459
+ """
34460
+ return pulumi.get(self, "syncing_uris")
34461
+
33133
34462
  @property
33134
34463
  @pulumi.getter
33135
34464
  def uri(self) -> str:
@@ -33138,6 +34467,87 @@ class GetPgPgResult(dict):
33138
34467
  """
33139
34468
  return pulumi.get(self, "uri")
33140
34469
 
34470
+ @property
34471
+ @pulumi.getter
34472
+ def uris(self) -> Sequence[str]:
34473
+ """
34474
+ PostgreSQL master connection URIs
34475
+ """
34476
+ return pulumi.get(self, "uris")
34477
+
34478
+ @property
34479
+ @pulumi.getter
34480
+ def user(self) -> str:
34481
+ """
34482
+ PostgreSQL admin user name
34483
+ """
34484
+ return pulumi.get(self, "user")
34485
+
34486
+
34487
+ @pulumi.output_type
34488
+ class GetPgPgParamResult(dict):
34489
+ def __init__(__self__, *,
34490
+ database_name: str,
34491
+ host: str,
34492
+ password: str,
34493
+ port: int,
34494
+ sslmode: str,
34495
+ user: str):
34496
+ """
34497
+ :param str database_name: Primary PostgreSQL database name
34498
+ :param str host: PostgreSQL host IP or name
34499
+ :param str password: PostgreSQL admin user password
34500
+ :param int port: PostgreSQL port
34501
+ :param str sslmode: PostgreSQL sslmode setting (currently always "require")
34502
+ :param str user: PostgreSQL admin user name
34503
+ """
34504
+ pulumi.set(__self__, "database_name", database_name)
34505
+ pulumi.set(__self__, "host", host)
34506
+ pulumi.set(__self__, "password", password)
34507
+ pulumi.set(__self__, "port", port)
34508
+ pulumi.set(__self__, "sslmode", sslmode)
34509
+ pulumi.set(__self__, "user", user)
34510
+
34511
+ @property
34512
+ @pulumi.getter(name="databaseName")
34513
+ def database_name(self) -> str:
34514
+ """
34515
+ Primary PostgreSQL database name
34516
+ """
34517
+ return pulumi.get(self, "database_name")
34518
+
34519
+ @property
34520
+ @pulumi.getter
34521
+ def host(self) -> str:
34522
+ """
34523
+ PostgreSQL host IP or name
34524
+ """
34525
+ return pulumi.get(self, "host")
34526
+
34527
+ @property
34528
+ @pulumi.getter
34529
+ def password(self) -> str:
34530
+ """
34531
+ PostgreSQL admin user password
34532
+ """
34533
+ return pulumi.get(self, "password")
34534
+
34535
+ @property
34536
+ @pulumi.getter
34537
+ def port(self) -> int:
34538
+ """
34539
+ PostgreSQL port
34540
+ """
34541
+ return pulumi.get(self, "port")
34542
+
34543
+ @property
34544
+ @pulumi.getter
34545
+ def sslmode(self) -> str:
34546
+ """
34547
+ PostgreSQL sslmode setting (currently always "require")
34548
+ """
34549
+ return pulumi.get(self, "sslmode")
34550
+
33141
34551
  @property
33142
34552
  @pulumi.getter
33143
34553
  def user(self) -> str:
@@ -33198,7 +34608,7 @@ class GetPgPgUserConfigResult(dict):
33198
34608
  :param bool pg_read_replica: Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).
33199
34609
  :param str pg_service_to_fork_from: Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created.
33200
34610
  :param bool pg_stat_monitor_enable: Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable. The default value is `false`.
33201
- :param str pg_version: PostgreSQL major version.
34611
+ :param str pg_version: Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`. PostgreSQL major version.
33202
34612
  :param 'GetPgPgUserConfigPgauditArgs' pgaudit: System-wide settings for the pgaudit extension
33203
34613
  :param 'GetPgPgUserConfigPgbouncerArgs' pgbouncer: PGBouncer connection pooling settings
33204
34614
  :param 'GetPgPgUserConfigPglookoutArgs' pglookout: System-wide settings for pglookout
@@ -33211,9 +34621,9 @@ class GetPgPgUserConfigResult(dict):
33211
34621
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
33212
34622
  :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
33213
34623
  :param bool static_ips: Use static public IP addresses.
33214
- :param str synchronous_replication: Synchronous replication type. Note that the service plan also needs to support synchronous replication.
34624
+ :param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
33215
34625
  :param 'GetPgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
33216
- :param str variant: Variant of the PostgreSQL service, may affect the features that are exposed by default.
34626
+ :param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
33217
34627
  :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
33218
34628
  """
33219
34629
  if additional_backup_regions is not None:
@@ -33411,7 +34821,7 @@ class GetPgPgUserConfigResult(dict):
33411
34821
  @pulumi.getter(name="pgVersion")
33412
34822
  def pg_version(self) -> Optional[str]:
33413
34823
  """
33414
- PostgreSQL major version.
34824
+ Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`. PostgreSQL major version.
33415
34825
  """
33416
34826
  return pulumi.get(self, "pg_version")
33417
34827
 
@@ -33515,7 +34925,7 @@ class GetPgPgUserConfigResult(dict):
33515
34925
  @pulumi.getter(name="synchronousReplication")
33516
34926
  def synchronous_replication(self) -> Optional[str]:
33517
34927
  """
33518
- Synchronous replication type. Note that the service plan also needs to support synchronous replication.
34928
+ Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
33519
34929
  """
33520
34930
  return pulumi.get(self, "synchronous_replication")
33521
34931
 
@@ -33531,7 +34941,7 @@ class GetPgPgUserConfigResult(dict):
33531
34941
  @pulumi.getter
33532
34942
  def variant(self) -> Optional[str]:
33533
34943
  """
33534
- Variant of the PostgreSQL service, may affect the features that are exposed by default.
34944
+ Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
33535
34945
  """
33536
34946
  return pulumi.get(self, "variant")
33537
34947
 
@@ -33590,7 +35000,7 @@ class GetPgPgUserConfigMigrationResult(dict):
33590
35000
  :param int port: Port number of the server where to migrate data from.
33591
35001
  :param str dbname: Database name for bootstrapping the initial connection.
33592
35002
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
33593
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
35003
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
33594
35004
  :param str password: Password for authentication with the server where to migrate data from.
33595
35005
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
33596
35006
  :param str username: User name for authentication with the server where to migrate data from.
@@ -33646,7 +35056,7 @@ class GetPgPgUserConfigMigrationResult(dict):
33646
35056
  @pulumi.getter
33647
35057
  def method(self) -> Optional[str]:
33648
35058
  """
33649
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
35059
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
33650
35060
  """
33651
35061
  return pulumi.get(self, "method")
33652
35062
 
@@ -33742,12 +35152,12 @@ class GetPgPgUserConfigPgResult(dict):
33742
35152
  :param int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.
33743
35153
  :param float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.
33744
35154
  :param int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.
33745
- :param str default_toast_compression: Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
35155
+ :param str default_toast_compression: Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
33746
35156
  :param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
33747
35157
  :param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
33748
35158
  :param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
33749
- :param str log_error_verbosity: Controls the amount of detail written in the server log for each message that is logged.
33750
- :param str log_line_prefix: Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
35159
+ :param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
35160
+ :param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
33751
35161
  :param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
33752
35162
  :param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
33753
35163
  :param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
@@ -33768,13 +35178,13 @@ class GetPgPgUserConfigPgResult(dict):
33768
35178
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
33769
35179
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
33770
35180
  :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
33771
- :param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
35181
+ :param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
33772
35182
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
33773
35183
  :param str timezone: PostgreSQL service timezone.
33774
35184
  :param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session.
33775
- :param str track_commit_timestamp: Record commit time of transactions.
33776
- :param str track_functions: Enables tracking of function call counts and time used.
33777
- :param str track_io_timing: Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
35185
+ :param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
35186
+ :param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
35187
+ :param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
33778
35188
  :param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.
33779
35189
  :param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.
33780
35190
  """
@@ -33993,7 +35403,7 @@ class GetPgPgUserConfigPgResult(dict):
33993
35403
  @pulumi.getter(name="defaultToastCompression")
33994
35404
  def default_toast_compression(self) -> Optional[str]:
33995
35405
  """
33996
- Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
35406
+ Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
33997
35407
  """
33998
35408
  return pulumi.get(self, "default_toast_compression")
33999
35409
 
@@ -34025,7 +35435,7 @@ class GetPgPgUserConfigPgResult(dict):
34025
35435
  @pulumi.getter(name="logErrorVerbosity")
34026
35436
  def log_error_verbosity(self) -> Optional[str]:
34027
35437
  """
34028
- Controls the amount of detail written in the server log for each message that is logged.
35438
+ Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
34029
35439
  """
34030
35440
  return pulumi.get(self, "log_error_verbosity")
34031
35441
 
@@ -34033,7 +35443,7 @@ class GetPgPgUserConfigPgResult(dict):
34033
35443
  @pulumi.getter(name="logLinePrefix")
34034
35444
  def log_line_prefix(self) -> Optional[str]:
34035
35445
  """
34036
- Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
35446
+ Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
34037
35447
  """
34038
35448
  return pulumi.get(self, "log_line_prefix")
34039
35449
 
@@ -34201,7 +35611,7 @@ class GetPgPgUserConfigPgResult(dict):
34201
35611
  @pulumi.getter(name="pgStatStatementsDotTrack")
34202
35612
  def pg_stat_statements_dot_track(self) -> Optional[str]:
34203
35613
  """
34204
- Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
35614
+ Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
34205
35615
  """
34206
35616
  return pulumi.get(self, "pg_stat_statements_dot_track")
34207
35617
 
@@ -34233,7 +35643,7 @@ class GetPgPgUserConfigPgResult(dict):
34233
35643
  @pulumi.getter(name="trackCommitTimestamp")
34234
35644
  def track_commit_timestamp(self) -> Optional[str]:
34235
35645
  """
34236
- Record commit time of transactions.
35646
+ Enum: `off`, `on`. Record commit time of transactions.
34237
35647
  """
34238
35648
  return pulumi.get(self, "track_commit_timestamp")
34239
35649
 
@@ -34241,7 +35651,7 @@ class GetPgPgUserConfigPgResult(dict):
34241
35651
  @pulumi.getter(name="trackFunctions")
34242
35652
  def track_functions(self) -> Optional[str]:
34243
35653
  """
34244
- Enables tracking of function call counts and time used.
35654
+ Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
34245
35655
  """
34246
35656
  return pulumi.get(self, "track_functions")
34247
35657
 
@@ -34249,7 +35659,7 @@ class GetPgPgUserConfigPgResult(dict):
34249
35659
  @pulumi.getter(name="trackIoTiming")
34250
35660
  def track_io_timing(self) -> Optional[str]:
34251
35661
  """
34252
- Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
35662
+ Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
34253
35663
  """
34254
35664
  return pulumi.get(self, "track_io_timing")
34255
35665
 
@@ -34373,7 +35783,7 @@ class GetPgPgUserConfigPgauditResult(dict):
34373
35783
  :param bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. The default value is `false`.
34374
35784
  :param bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. The default value is `true`.
34375
35785
  :param bool log_client: Specifies whether log messages will be visible to a client process such as psql. The default value is `false`.
34376
- :param str log_level: Specifies the log level that will be used for log entries. The default value is `log`.
35786
+ :param str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`.
34377
35787
  :param int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. The default value is `-1`.
34378
35788
  :param bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. The default value is `true`.
34379
35789
  :param bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. The default value is `false`.
@@ -34442,7 +35852,7 @@ class GetPgPgUserConfigPgauditResult(dict):
34442
35852
  @pulumi.getter(name="logLevel")
34443
35853
  def log_level(self) -> Optional[str]:
34444
35854
  """
34445
- Specifies the log level that will be used for log entries. The default value is `log`.
35855
+ Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`.
34446
35856
  """
34447
35857
  return pulumi.get(self, "log_level")
34448
35858
 
@@ -34542,7 +35952,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
34542
35952
  """
34543
35953
  :param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). The default value is `3600`.
34544
35954
  :param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.
34545
- :param str autodb_pool_mode: PGBouncer pool mode. The default value is `transaction`.
35955
+ :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`.
34546
35956
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. The default value is `0`.
34547
35957
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
34548
35958
  :param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. The default value is `0`.
@@ -34589,7 +35999,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
34589
35999
  @pulumi.getter(name="autodbPoolMode")
34590
36000
  def autodb_pool_mode(self) -> Optional[str]:
34591
36001
  """
34592
- PGBouncer pool mode. The default value is `transaction`.
36002
+ Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`.
34593
36003
  """
34594
36004
  return pulumi.get(self, "autodb_pool_mode")
34595
36005
 
@@ -35009,6 +36419,57 @@ class GetRedisComponentResult(dict):
35009
36419
  return pulumi.get(self, "usage")
35010
36420
 
35011
36421
 
36422
+ @pulumi.output_type
36423
+ class GetRedisRediResult(dict):
36424
+ def __init__(__self__, *,
36425
+ password: str,
36426
+ replica_uri: str,
36427
+ slave_uris: Sequence[str],
36428
+ uris: Sequence[str]):
36429
+ """
36430
+ :param str password: Redis password.
36431
+ :param str replica_uri: Redis replica server URI.
36432
+ :param Sequence[str] slave_uris: Redis slave server URIs.
36433
+ :param Sequence[str] uris: Redis server URIs.
36434
+ """
36435
+ pulumi.set(__self__, "password", password)
36436
+ pulumi.set(__self__, "replica_uri", replica_uri)
36437
+ pulumi.set(__self__, "slave_uris", slave_uris)
36438
+ pulumi.set(__self__, "uris", uris)
36439
+
36440
+ @property
36441
+ @pulumi.getter
36442
+ def password(self) -> str:
36443
+ """
36444
+ Redis password.
36445
+ """
36446
+ return pulumi.get(self, "password")
36447
+
36448
+ @property
36449
+ @pulumi.getter(name="replicaUri")
36450
+ def replica_uri(self) -> str:
36451
+ """
36452
+ Redis replica server URI.
36453
+ """
36454
+ return pulumi.get(self, "replica_uri")
36455
+
36456
+ @property
36457
+ @pulumi.getter(name="slaveUris")
36458
+ def slave_uris(self) -> Sequence[str]:
36459
+ """
36460
+ Redis slave server URIs.
36461
+ """
36462
+ return pulumi.get(self, "slave_uris")
36463
+
36464
+ @property
36465
+ @pulumi.getter
36466
+ def uris(self) -> Sequence[str]:
36467
+ """
36468
+ Redis server URIs.
36469
+ """
36470
+ return pulumi.get(self, "uris")
36471
+
36472
+
35012
36473
  @pulumi.output_type
35013
36474
  class GetRedisRedisUserConfigResult(dict):
35014
36475
  def __init__(__self__, *,
@@ -35048,18 +36509,18 @@ class GetRedisRedisUserConfigResult(dict):
35048
36509
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
35049
36510
  :param 'GetRedisRedisUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
35050
36511
  :param str recovery_basebackup_name: Name of the basebackup to restore in forked service.
35051
- :param str redis_acl_channels_default: Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
36512
+ :param str redis_acl_channels_default: Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
35052
36513
  :param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service.
35053
36514
  :param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. The default value is `1`.
35054
36515
  :param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`.
35055
- :param str redis_maxmemory_policy: Redis maxmemory-policy. The default value is `noeviction`.
36516
+ :param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`.
35056
36517
  :param str redis_notify_keyspace_events: Set notify-keyspace-events option.
35057
36518
  :param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service.
35058
- :param str redis_persistence: When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
36519
+ :param str redis_persistence: Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
35059
36520
  :param int redis_pubsub_client_output_buffer_limit: Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.
35060
36521
  :param bool redis_ssl: Require SSL to access Redis. The default value is `true`.
35061
36522
  :param int redis_timeout: Redis idle connection timeout in seconds. The default value is `300`.
35062
- :param str redis_version: Redis major version.
36523
+ :param str redis_version: Enum: `7.0`. Redis major version.
35063
36524
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
35064
36525
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
35065
36526
  :param bool static_ips: Use static public IP addresses.
@@ -35202,7 +36663,7 @@ class GetRedisRedisUserConfigResult(dict):
35202
36663
  @pulumi.getter(name="redisAclChannelsDefault")
35203
36664
  def redis_acl_channels_default(self) -> Optional[str]:
35204
36665
  """
35205
- Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
36666
+ Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
35206
36667
  """
35207
36668
  return pulumi.get(self, "redis_acl_channels_default")
35208
36669
 
@@ -35234,7 +36695,7 @@ class GetRedisRedisUserConfigResult(dict):
35234
36695
  @pulumi.getter(name="redisMaxmemoryPolicy")
35235
36696
  def redis_maxmemory_policy(self) -> Optional[str]:
35236
36697
  """
35237
- Redis maxmemory-policy. The default value is `noeviction`.
36698
+ Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`.
35238
36699
  """
35239
36700
  return pulumi.get(self, "redis_maxmemory_policy")
35240
36701
 
@@ -35258,7 +36719,7 @@ class GetRedisRedisUserConfigResult(dict):
35258
36719
  @pulumi.getter(name="redisPersistence")
35259
36720
  def redis_persistence(self) -> Optional[str]:
35260
36721
  """
35261
- When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
36722
+ Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
35262
36723
  """
35263
36724
  return pulumi.get(self, "redis_persistence")
35264
36725
 
@@ -35290,7 +36751,7 @@ class GetRedisRedisUserConfigResult(dict):
35290
36751
  @pulumi.getter(name="redisVersion")
35291
36752
  def redis_version(self) -> Optional[str]:
35292
36753
  """
35293
- Redis major version.
36754
+ Enum: `7.0`. Redis major version.
35294
36755
  """
35295
36756
  return pulumi.get(self, "redis_version")
35296
36757
 
@@ -35365,7 +36826,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
35365
36826
  :param int port: Port number of the server where to migrate data from.
35366
36827
  :param str dbname: Database name for bootstrapping the initial connection.
35367
36828
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
35368
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
36829
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
35369
36830
  :param str password: Password for authentication with the server where to migrate data from.
35370
36831
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
35371
36832
  :param str username: User name for authentication with the server where to migrate data from.
@@ -35421,7 +36882,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
35421
36882
  @pulumi.getter
35422
36883
  def method(self) -> Optional[str]:
35423
36884
  """
35424
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
36885
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
35425
36886
  """
35426
36887
  return pulumi.get(self, "method")
35427
36888
 
@@ -35656,13 +37117,13 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35656
37117
  skip_broken_messages: Optional[int] = None):
35657
37118
  """
35658
37119
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
35659
- :param str data_format: Message data format. The default value is `JSONEachRow`.
37120
+ :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`.
35660
37121
  :param str group_name: Kafka consumers group. The default value is `clickhouse`.
35661
37122
  :param str name: Name of the table.
35662
37123
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
35663
- :param str auto_offset_reset: Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
35664
- :param str date_time_input_format: Method to read DateTime from text input formats. The default value is `basic`.
35665
- :param str handle_error_mode: How to handle errors for Kafka engine. The default value is `default`.
37124
+ :param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
37125
+ :param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`.
37126
+ :param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`.
35666
37127
  :param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.
35667
37128
  :param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.
35668
37129
  :param int num_consumers: The number of consumers per table per replica. The default value is `1`.
@@ -35703,7 +37164,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35703
37164
  @pulumi.getter(name="dataFormat")
35704
37165
  def data_format(self) -> str:
35705
37166
  """
35706
- Message data format. The default value is `JSONEachRow`.
37167
+ Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`.
35707
37168
  """
35708
37169
  return pulumi.get(self, "data_format")
35709
37170
 
@@ -35735,7 +37196,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35735
37196
  @pulumi.getter(name="autoOffsetReset")
35736
37197
  def auto_offset_reset(self) -> Optional[str]:
35737
37198
  """
35738
- Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
37199
+ Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
35739
37200
  """
35740
37201
  return pulumi.get(self, "auto_offset_reset")
35741
37202
 
@@ -35743,7 +37204,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35743
37204
  @pulumi.getter(name="dateTimeInputFormat")
35744
37205
  def date_time_input_format(self) -> Optional[str]:
35745
37206
  """
35746
- Method to read DateTime from text input formats. The default value is `basic`.
37207
+ Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`.
35747
37208
  """
35748
37209
  return pulumi.get(self, "date_time_input_format")
35749
37210
 
@@ -35751,7 +37212,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35751
37212
  @pulumi.getter(name="handleErrorMode")
35752
37213
  def handle_error_mode(self) -> Optional[str]:
35753
37214
  """
35754
- How to handle errors for Kafka engine. The default value is `default`.
37215
+ Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`.
35755
37216
  """
35756
37217
  return pulumi.get(self, "handle_error_mode")
35757
37218
 
@@ -35897,6 +37358,7 @@ class GetServiceIntegrationClickhousePostgresqlUserConfigDatabaseResult(dict):
35897
37358
  class GetServiceIntegrationDatadogUserConfigResult(dict):
35898
37359
  def __init__(__self__, *,
35899
37360
  datadog_dbm_enabled: Optional[bool] = None,
37361
+ datadog_pgbouncer_enabled: Optional[bool] = None,
35900
37362
  datadog_tags: Optional[Sequence['outputs.GetServiceIntegrationDatadogUserConfigDatadogTagResult']] = None,
35901
37363
  exclude_consumer_groups: Optional[Sequence[str]] = None,
35902
37364
  exclude_topics: Optional[Sequence[str]] = None,
@@ -35908,6 +37370,7 @@ class GetServiceIntegrationDatadogUserConfigResult(dict):
35908
37370
  redis: Optional['outputs.GetServiceIntegrationDatadogUserConfigRedisResult'] = None):
35909
37371
  """
35910
37372
  :param bool datadog_dbm_enabled: Enable Datadog Database Monitoring.
37373
+ :param bool datadog_pgbouncer_enabled: Enable Datadog PgBouncer Metric Tracking.
35911
37374
  :param Sequence['GetServiceIntegrationDatadogUserConfigDatadogTagArgs'] datadog_tags: Custom tags provided by user
35912
37375
  :param Sequence[str] exclude_consumer_groups: List of custom metrics.
35913
37376
  :param Sequence[str] exclude_topics: List of topics to exclude.
@@ -35920,6 +37383,8 @@ class GetServiceIntegrationDatadogUserConfigResult(dict):
35920
37383
  """
35921
37384
  if datadog_dbm_enabled is not None:
35922
37385
  pulumi.set(__self__, "datadog_dbm_enabled", datadog_dbm_enabled)
37386
+ if datadog_pgbouncer_enabled is not None:
37387
+ pulumi.set(__self__, "datadog_pgbouncer_enabled", datadog_pgbouncer_enabled)
35923
37388
  if datadog_tags is not None:
35924
37389
  pulumi.set(__self__, "datadog_tags", datadog_tags)
35925
37390
  if exclude_consumer_groups is not None:
@@ -35947,6 +37412,14 @@ class GetServiceIntegrationDatadogUserConfigResult(dict):
35947
37412
  """
35948
37413
  return pulumi.get(self, "datadog_dbm_enabled")
35949
37414
 
37415
+ @property
37416
+ @pulumi.getter(name="datadogPgbouncerEnabled")
37417
+ def datadog_pgbouncer_enabled(self) -> Optional[bool]:
37418
+ """
37419
+ Enable Datadog PgBouncer Metric Tracking.
37420
+ """
37421
+ return pulumi.get(self, "datadog_pgbouncer_enabled")
37422
+
35950
37423
  @property
35951
37424
  @pulumi.getter(name="datadogTags")
35952
37425
  def datadog_tags(self) -> Optional[Sequence['outputs.GetServiceIntegrationDatadogUserConfigDatadogTagResult']]:
@@ -36141,7 +37614,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
36141
37614
  :param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with.
36142
37615
  :param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers.
36143
37616
  :param int max_partition_contexts: Maximum number of partition contexts to send.
36144
- :param str site: Datadog intake site. Defaults to datadoghq.com.
37617
+ :param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
36145
37618
  """
36146
37619
  pulumi.set(__self__, "datadog_api_key", datadog_api_key)
36147
37620
  if datadog_tags is not None:
@@ -36209,7 +37682,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
36209
37682
  @pulumi.getter
36210
37683
  def site(self) -> Optional[str]:
36211
37684
  """
36212
- Datadog intake site. Defaults to datadoghq.com.
37685
+ Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
36213
37686
  """
36214
37687
  return pulumi.get(self, "site")
36215
37688
 
@@ -36495,14 +37968,14 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
36495
37968
  ssl_endpoint_identification_algorithm: Optional[str] = None):
36496
37969
  """
36497
37970
  :param str bootstrap_servers: Bootstrap servers.
36498
- :param str security_protocol: Security protocol.
36499
- :param str sasl_mechanism: SASL mechanism used for connections to the Kafka server.
37971
+ :param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
37972
+ :param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
36500
37973
  :param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server.
36501
37974
  :param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server.
36502
37975
  :param str ssl_ca_cert: PEM-encoded CA certificate.
36503
37976
  :param str ssl_client_cert: PEM-encoded client certificate.
36504
37977
  :param str ssl_client_key: PEM-encoded client key.
36505
- :param str ssl_endpoint_identification_algorithm: The endpoint identification algorithm to validate server hostname using server certificate.
37978
+ :param str ssl_endpoint_identification_algorithm: Enum: `https`, ``. The endpoint identification algorithm to validate server hostname using server certificate.
36506
37979
  """
36507
37980
  pulumi.set(__self__, "bootstrap_servers", bootstrap_servers)
36508
37981
  pulumi.set(__self__, "security_protocol", security_protocol)
@@ -36533,7 +38006,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
36533
38006
  @pulumi.getter(name="securityProtocol")
36534
38007
  def security_protocol(self) -> str:
36535
38008
  """
36536
- Security protocol.
38009
+ Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
36537
38010
  """
36538
38011
  return pulumi.get(self, "security_protocol")
36539
38012
 
@@ -36541,7 +38014,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
36541
38014
  @pulumi.getter(name="saslMechanism")
36542
38015
  def sasl_mechanism(self) -> Optional[str]:
36543
38016
  """
36544
- SASL mechanism used for connections to the Kafka server.
38017
+ Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
36545
38018
  """
36546
38019
  return pulumi.get(self, "sasl_mechanism")
36547
38020
 
@@ -36589,7 +38062,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
36589
38062
  @pulumi.getter(name="sslEndpointIdentificationAlgorithm")
36590
38063
  def ssl_endpoint_identification_algorithm(self) -> Optional[str]:
36591
38064
  """
36592
- The endpoint identification algorithm to validate server hostname using server certificate.
38065
+ Enum: `https`, ``. The endpoint identification algorithm to validate server hostname using server certificate.
36593
38066
  """
36594
38067
  return pulumi.get(self, "ssl_endpoint_identification_algorithm")
36595
38068
 
@@ -36679,7 +38152,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
36679
38152
  :param str password: Password.
36680
38153
  :param str ssl_client_certificate: Client certificate.
36681
38154
  :param str ssl_client_key: Client key.
36682
- :param str ssl_mode: SSL Mode. The default value is `verify-full`.
38155
+ :param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`.
36683
38156
  :param str ssl_root_cert: SSL Root Cert.
36684
38157
  """
36685
38158
  pulumi.set(__self__, "host", host)
@@ -36758,7 +38231,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
36758
38231
  @pulumi.getter(name="sslMode")
36759
38232
  def ssl_mode(self) -> Optional[str]:
36760
38233
  """
36761
- SSL Mode. The default value is `verify-full`.
38234
+ Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`.
36762
38235
  """
36763
38236
  return pulumi.get(self, "ssl_mode")
36764
38237
 
@@ -36779,7 +38252,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
36779
38252
  basic_auth_password: Optional[str] = None,
36780
38253
  basic_auth_username: Optional[str] = None):
36781
38254
  """
36782
- :param str authentication: Authentication method.
38255
+ :param str authentication: Enum: `none`, `basic`. Authentication method.
36783
38256
  :param str url: Schema Registry URL.
36784
38257
  :param str basic_auth_password: Basic authentication password.
36785
38258
  :param str basic_auth_username: Basic authentication user name.
@@ -36795,7 +38268,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
36795
38268
  @pulumi.getter
36796
38269
  def authentication(self) -> str:
36797
38270
  """
36798
- Authentication method.
38271
+ Enum: `none`, `basic`. Authentication method.
36799
38272
  """
36800
38273
  return pulumi.get(self, "authentication")
36801
38274
 
@@ -36900,7 +38373,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
36900
38373
  max_message_size: Optional[int] = None,
36901
38374
  sd: Optional[str] = None):
36902
38375
  """
36903
- :param str format: Message format. The default value is `rfc5424`.
38376
+ :param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`.
36904
38377
  :param int port: Rsyslog server port. The default value is `514`.
36905
38378
  :param str server: Rsyslog server IP address or hostname.
36906
38379
  :param bool tls: Require TLS. The default value is `true`.
@@ -36932,7 +38405,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
36932
38405
  @pulumi.getter
36933
38406
  def format(self) -> str:
36934
38407
  """
36935
- Message format. The default value is `rfc5424`.
38408
+ Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`.
36936
38409
  """
36937
38410
  return pulumi.get(self, "format")
36938
38411
 
@@ -37303,7 +38776,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
37303
38776
  :param int consumer_fetch_min_bytes: The minimum amount of data the server should return for a fetch request.
37304
38777
  :param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker.
37305
38778
  :param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
37306
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
38779
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
37307
38780
  :param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing.
37308
38781
  :param int producer_max_request_size: The maximum request size in bytes.
37309
38782
  """
@@ -37348,7 +38821,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
37348
38821
  @pulumi.getter(name="producerCompressionType")
37349
38822
  def producer_compression_type(self) -> Optional[str]:
37350
38823
  """
37351
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
38824
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
37352
38825
  """
37353
38826
  return pulumi.get(self, "producer_compression_type")
37354
38827