pulumi-aiven 6.16.0a1716504562__py3-none-any.whl → 6.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

Files changed (42) hide show
  1. pulumi_aiven/_inputs.py +1133 -397
  2. pulumi_aiven/cassandra.py +48 -1
  3. pulumi_aiven/clickhouse.py +48 -1
  4. pulumi_aiven/dragonfly.py +48 -1
  5. pulumi_aiven/flink.py +2 -2
  6. pulumi_aiven/flink_application_deployment.py +56 -30
  7. pulumi_aiven/gcp_privatelink.py +52 -30
  8. pulumi_aiven/gcp_privatelink_connection_approval.py +54 -30
  9. pulumi_aiven/get_cassanda.py +14 -1
  10. pulumi_aiven/get_cassandra.py +14 -1
  11. pulumi_aiven/get_clickhouse.py +14 -1
  12. pulumi_aiven/get_dragonfly.py +14 -1
  13. pulumi_aiven/get_gcp_privatelink.py +45 -2
  14. pulumi_aiven/get_grafana.py +14 -1
  15. pulumi_aiven/get_m3_aggregator.py +14 -1
  16. pulumi_aiven/get_m3_db.py +14 -1
  17. pulumi_aiven/get_mirror_maker_replication_flow.py +15 -2
  18. pulumi_aiven/get_my_sql.py +14 -1
  19. pulumi_aiven/get_organization_application_user.py +15 -8
  20. pulumi_aiven/get_redis.py +14 -1
  21. pulumi_aiven/get_service_integration.py +3 -3
  22. pulumi_aiven/get_service_integration_endpoint.py +1 -1
  23. pulumi_aiven/grafana.py +48 -1
  24. pulumi_aiven/influx_db.py +21 -2
  25. pulumi_aiven/kafka.py +20 -1
  26. pulumi_aiven/m3_aggregator.py +48 -1
  27. pulumi_aiven/m3_db.py +48 -1
  28. pulumi_aiven/mirror_maker_replication_flow.py +54 -7
  29. pulumi_aiven/my_sql.py +48 -1
  30. pulumi_aiven/open_search.py +21 -2
  31. pulumi_aiven/organization_application_user.py +52 -45
  32. pulumi_aiven/organization_application_user_token.py +15 -51
  33. pulumi_aiven/outputs.py +1758 -397
  34. pulumi_aiven/pg.py +2 -2
  35. pulumi_aiven/pulumi-plugin.json +1 -1
  36. pulumi_aiven/redis.py +48 -1
  37. pulumi_aiven/service_integration.py +7 -7
  38. pulumi_aiven/service_integration_endpoint.py +7 -7
  39. {pulumi_aiven-6.16.0a1716504562.dist-info → pulumi_aiven-6.17.0.dist-info}/METADATA +1 -1
  40. {pulumi_aiven-6.16.0a1716504562.dist-info → pulumi_aiven-6.17.0.dist-info}/RECORD +42 -42
  41. {pulumi_aiven-6.16.0a1716504562.dist-info → pulumi_aiven-6.17.0.dist-info}/WHEEL +0 -0
  42. {pulumi_aiven-6.16.0a1716504562.dist-info → pulumi_aiven-6.17.0.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py CHANGED
@@ -12,6 +12,7 @@ from . import outputs
12
12
 
13
13
  __all__ = [
14
14
  'AccountAuthenticationSamlFieldMapping',
15
+ 'CassandraCassandra',
15
16
  'CassandraCassandraUserConfig',
16
17
  'CassandraCassandraUserConfigCassandra',
17
18
  'CassandraCassandraUserConfigIpFilterObject',
@@ -21,6 +22,7 @@ __all__ = [
21
22
  'CassandraServiceIntegration',
22
23
  'CassandraTag',
23
24
  'CassandraTechEmail',
25
+ 'ClickhouseClickhouse',
24
26
  'ClickhouseClickhouseUserConfig',
25
27
  'ClickhouseClickhouseUserConfigIpFilterObject',
26
28
  'ClickhouseClickhouseUserConfigPrivateAccess',
@@ -33,6 +35,7 @@ __all__ = [
33
35
  'ClickhouseTag',
34
36
  'ClickhouseTechEmail',
35
37
  'DragonflyComponent',
38
+ 'DragonflyDragonfly',
36
39
  'DragonflyDragonflyUserConfig',
37
40
  'DragonflyDragonflyUserConfigIpFilterObject',
38
41
  'DragonflyDragonflyUserConfigMigration',
@@ -53,6 +56,7 @@ __all__ = [
53
56
  'FlinkTag',
54
57
  'FlinkTechEmail',
55
58
  'GrafanaComponent',
59
+ 'GrafanaGrafana',
56
60
  'GrafanaGrafanaUserConfig',
57
61
  'GrafanaGrafanaUserConfigAuthAzuread',
58
62
  'GrafanaGrafanaUserConfigAuthGenericOauth',
@@ -118,12 +122,14 @@ __all__ = [
118
122
  'KafkaTopicConfig',
119
123
  'KafkaTopicTag',
120
124
  'M3AggregatorComponent',
125
+ 'M3AggregatorM3aggregator',
121
126
  'M3AggregatorM3aggregatorUserConfig',
122
127
  'M3AggregatorM3aggregatorUserConfigIpFilterObject',
123
128
  'M3AggregatorServiceIntegration',
124
129
  'M3AggregatorTag',
125
130
  'M3AggregatorTechEmail',
126
131
  'M3DbComponent',
132
+ 'M3DbM3db',
127
133
  'M3DbM3dbUserConfig',
128
134
  'M3DbM3dbUserConfigIpFilterObject',
129
135
  'M3DbM3dbUserConfigLimits',
@@ -142,6 +148,8 @@ __all__ = [
142
148
  'M3DbTag',
143
149
  'M3DbTechEmail',
144
150
  'MySqlComponent',
151
+ 'MySqlMysql',
152
+ 'MySqlMysqlParam',
145
153
  'MySqlMysqlUserConfig',
146
154
  'MySqlMysqlUserConfigIpFilterObject',
147
155
  'MySqlMysqlUserConfigMigration',
@@ -171,13 +179,12 @@ __all__ = [
171
179
  'OpenSearchServiceIntegration',
172
180
  'OpenSearchTag',
173
181
  'OpenSearchTechEmail',
174
- 'OrganizationApplicationUserTimeouts',
175
- 'OrganizationApplicationUserTokenTimeouts',
176
182
  'OrganizationGroupProjectTimeouts',
177
183
  'OrganizationTimeouts',
178
184
  'OrganizationUserGroupMemberTimeouts',
179
185
  'PgComponent',
180
186
  'PgPg',
187
+ 'PgPgParam',
181
188
  'PgPgUserConfig',
182
189
  'PgPgUserConfigIpFilterObject',
183
190
  'PgPgUserConfigMigration',
@@ -195,6 +202,7 @@ __all__ = [
195
202
  'PgTechEmail',
196
203
  'ProjectTag',
197
204
  'RedisComponent',
205
+ 'RedisRedis',
198
206
  'RedisRedisUserConfig',
199
207
  'RedisRedisUserConfigIpFilterObject',
200
208
  'RedisRedisUserConfigMigration',
@@ -247,6 +255,7 @@ __all__ = [
247
255
  'ServiceIntegrationPrometheusUserConfigSourceMysql',
248
256
  'ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf',
249
257
  'GetAccountAuthenticationSamlFieldMappingResult',
258
+ 'GetCassandaCassandraResult',
250
259
  'GetCassandaCassandraUserConfigResult',
251
260
  'GetCassandaCassandraUserConfigCassandraResult',
252
261
  'GetCassandaCassandraUserConfigIpFilterObjectResult',
@@ -256,6 +265,7 @@ __all__ = [
256
265
  'GetCassandaServiceIntegrationResult',
257
266
  'GetCassandaTagResult',
258
267
  'GetCassandaTechEmailResult',
268
+ 'GetCassandraCassandraResult',
259
269
  'GetCassandraCassandraUserConfigResult',
260
270
  'GetCassandraCassandraUserConfigCassandraResult',
261
271
  'GetCassandraCassandraUserConfigIpFilterObjectResult',
@@ -265,6 +275,7 @@ __all__ = [
265
275
  'GetCassandraServiceIntegrationResult',
266
276
  'GetCassandraTagResult',
267
277
  'GetCassandraTechEmailResult',
278
+ 'GetClickhouseClickhouseResult',
268
279
  'GetClickhouseClickhouseUserConfigResult',
269
280
  'GetClickhouseClickhouseUserConfigIpFilterObjectResult',
270
281
  'GetClickhouseClickhouseUserConfigPrivateAccessResult',
@@ -275,6 +286,7 @@ __all__ = [
275
286
  'GetClickhouseTagResult',
276
287
  'GetClickhouseTechEmailResult',
277
288
  'GetDragonflyComponentResult',
289
+ 'GetDragonflyDragonflyResult',
278
290
  'GetDragonflyDragonflyUserConfigResult',
279
291
  'GetDragonflyDragonflyUserConfigIpFilterObjectResult',
280
292
  'GetDragonflyDragonflyUserConfigMigrationResult',
@@ -295,6 +307,7 @@ __all__ = [
295
307
  'GetFlinkTagResult',
296
308
  'GetFlinkTechEmailResult',
297
309
  'GetGrafanaComponentResult',
310
+ 'GetGrafanaGrafanaResult',
298
311
  'GetGrafanaGrafanaUserConfigResult',
299
312
  'GetGrafanaGrafanaUserConfigAuthAzureadResult',
300
313
  'GetGrafanaGrafanaUserConfigAuthGenericOauthResult',
@@ -360,12 +373,14 @@ __all__ = [
360
373
  'GetKafkaTopicConfigResult',
361
374
  'GetKafkaTopicTagResult',
362
375
  'GetM3AggregatorComponentResult',
376
+ 'GetM3AggregatorM3aggregatorResult',
363
377
  'GetM3AggregatorM3aggregatorUserConfigResult',
364
378
  'GetM3AggregatorM3aggregatorUserConfigIpFilterObjectResult',
365
379
  'GetM3AggregatorServiceIntegrationResult',
366
380
  'GetM3AggregatorTagResult',
367
381
  'GetM3AggregatorTechEmailResult',
368
382
  'GetM3DbComponentResult',
383
+ 'GetM3DbM3dbResult',
369
384
  'GetM3DbM3dbUserConfigResult',
370
385
  'GetM3DbM3dbUserConfigIpFilterObjectResult',
371
386
  'GetM3DbM3dbUserConfigLimitsResult',
@@ -384,6 +399,8 @@ __all__ = [
384
399
  'GetM3DbTagResult',
385
400
  'GetM3DbTechEmailResult',
386
401
  'GetMySqlComponentResult',
402
+ 'GetMySqlMysqlResult',
403
+ 'GetMySqlMysqlParamResult',
387
404
  'GetMySqlMysqlUserConfigResult',
388
405
  'GetMySqlMysqlUserConfigIpFilterObjectResult',
389
406
  'GetMySqlMysqlUserConfigMigrationResult',
@@ -415,6 +432,7 @@ __all__ = [
415
432
  'GetOpenSearchTechEmailResult',
416
433
  'GetPgComponentResult',
417
434
  'GetPgPgResult',
435
+ 'GetPgPgParamResult',
418
436
  'GetPgPgUserConfigResult',
419
437
  'GetPgPgUserConfigIpFilterObjectResult',
420
438
  'GetPgPgUserConfigMigrationResult',
@@ -432,6 +450,7 @@ __all__ = [
432
450
  'GetPgTechEmailResult',
433
451
  'GetProjectTagResult',
434
452
  'GetRedisComponentResult',
453
+ 'GetRedisRediResult',
435
454
  'GetRedisRedisUserConfigResult',
436
455
  'GetRedisRedisUserConfigIpFilterObjectResult',
437
456
  'GetRedisRedisUserConfigMigrationResult',
@@ -573,6 +592,25 @@ class AccountAuthenticationSamlFieldMapping(dict):
573
592
  return pulumi.get(self, "real_name")
574
593
 
575
594
 
595
+ @pulumi.output_type
596
+ class CassandraCassandra(dict):
597
+ def __init__(__self__, *,
598
+ uris: Optional[Sequence[str]] = None):
599
+ """
600
+ :param Sequence[str] uris: Cassandra server URIs.
601
+ """
602
+ if uris is not None:
603
+ pulumi.set(__self__, "uris", uris)
604
+
605
+ @property
606
+ @pulumi.getter
607
+ def uris(self) -> Optional[Sequence[str]]:
608
+ """
609
+ Cassandra server URIs.
610
+ """
611
+ return pulumi.get(self, "uris")
612
+
613
+
576
614
  @pulumi.output_type
577
615
  class CassandraCassandraUserConfig(dict):
578
616
  @staticmethod
@@ -642,7 +680,7 @@ class CassandraCassandraUserConfig(dict):
642
680
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
643
681
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
644
682
  :param 'CassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
645
- :param str cassandra_version: Cassandra version.
683
+ :param str cassandra_version: Enum: `3`, `4`, `4.1`, and newer. Cassandra version.
646
684
  :param Sequence['CassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
647
685
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
648
686
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -727,7 +765,7 @@ class CassandraCassandraUserConfig(dict):
727
765
  @pulumi.getter(name="cassandraVersion")
728
766
  def cassandra_version(self) -> Optional[str]:
729
767
  """
730
- Cassandra version.
768
+ Enum: `3`, `4`, `4.1`, and newer. Cassandra version.
731
769
  """
732
770
  return pulumi.get(self, "cassandra_version")
733
771
 
@@ -1170,6 +1208,25 @@ class CassandraTechEmail(dict):
1170
1208
  return pulumi.get(self, "email")
1171
1209
 
1172
1210
 
1211
+ @pulumi.output_type
1212
+ class ClickhouseClickhouse(dict):
1213
+ def __init__(__self__, *,
1214
+ uris: Optional[Sequence[str]] = None):
1215
+ """
1216
+ :param Sequence[str] uris: Clickhouse server URIs.
1217
+ """
1218
+ if uris is not None:
1219
+ pulumi.set(__self__, "uris", uris)
1220
+
1221
+ @property
1222
+ @pulumi.getter
1223
+ def uris(self) -> Optional[Sequence[str]]:
1224
+ """
1225
+ Clickhouse server URIs.
1226
+ """
1227
+ return pulumi.get(self, "uris")
1228
+
1229
+
1173
1230
  @pulumi.output_type
1174
1231
  class ClickhouseClickhouseUserConfig(dict):
1175
1232
  @staticmethod
@@ -2045,6 +2102,80 @@ class DragonflyComponent(dict):
2045
2102
  return pulumi.get(self, "usage")
2046
2103
 
2047
2104
 
2105
+ @pulumi.output_type
2106
+ class DragonflyDragonfly(dict):
2107
+ @staticmethod
2108
+ def __key_warning(key: str):
2109
+ suggest = None
2110
+ if key == "replicaUri":
2111
+ suggest = "replica_uri"
2112
+ elif key == "slaveUris":
2113
+ suggest = "slave_uris"
2114
+
2115
+ if suggest:
2116
+ pulumi.log.warn(f"Key '{key}' not found in DragonflyDragonfly. Access the value via the '{suggest}' property getter instead.")
2117
+
2118
+ def __getitem__(self, key: str) -> Any:
2119
+ DragonflyDragonfly.__key_warning(key)
2120
+ return super().__getitem__(key)
2121
+
2122
+ def get(self, key: str, default = None) -> Any:
2123
+ DragonflyDragonfly.__key_warning(key)
2124
+ return super().get(key, default)
2125
+
2126
+ def __init__(__self__, *,
2127
+ password: Optional[str] = None,
2128
+ replica_uri: Optional[str] = None,
2129
+ slave_uris: Optional[Sequence[str]] = None,
2130
+ uris: Optional[Sequence[str]] = None):
2131
+ """
2132
+ :param str password: Dragonfly password.
2133
+ :param str replica_uri: Dragonfly replica server URI.
2134
+ :param Sequence[str] slave_uris: Dragonfly slave server URIs.
2135
+ :param Sequence[str] uris: Dragonfly server URIs.
2136
+ """
2137
+ if password is not None:
2138
+ pulumi.set(__self__, "password", password)
2139
+ if replica_uri is not None:
2140
+ pulumi.set(__self__, "replica_uri", replica_uri)
2141
+ if slave_uris is not None:
2142
+ pulumi.set(__self__, "slave_uris", slave_uris)
2143
+ if uris is not None:
2144
+ pulumi.set(__self__, "uris", uris)
2145
+
2146
+ @property
2147
+ @pulumi.getter
2148
+ def password(self) -> Optional[str]:
2149
+ """
2150
+ Dragonfly password.
2151
+ """
2152
+ return pulumi.get(self, "password")
2153
+
2154
+ @property
2155
+ @pulumi.getter(name="replicaUri")
2156
+ def replica_uri(self) -> Optional[str]:
2157
+ """
2158
+ Dragonfly replica server URI.
2159
+ """
2160
+ return pulumi.get(self, "replica_uri")
2161
+
2162
+ @property
2163
+ @pulumi.getter(name="slaveUris")
2164
+ def slave_uris(self) -> Optional[Sequence[str]]:
2165
+ """
2166
+ Dragonfly slave server URIs.
2167
+ """
2168
+ return pulumi.get(self, "slave_uris")
2169
+
2170
+ @property
2171
+ @pulumi.getter
2172
+ def uris(self) -> Optional[Sequence[str]]:
2173
+ """
2174
+ Dragonfly server URIs.
2175
+ """
2176
+ return pulumi.get(self, "uris")
2177
+
2178
+
2048
2179
  @pulumi.output_type
2049
2180
  class DragonflyDragonflyUserConfig(dict):
2050
2181
  @staticmethod
@@ -2108,7 +2239,7 @@ class DragonflyDragonflyUserConfig(dict):
2108
2239
  static_ips: Optional[bool] = None):
2109
2240
  """
2110
2241
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
2111
- :param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2242
+ :param str dragonfly_persistence: Enum: `off`, `rdb`, `dfs`. When persistence is 'rdb' or 'dfs', Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is 'off', no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2112
2243
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
2113
2244
  :param Sequence['DragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
2114
2245
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -2166,7 +2297,7 @@ class DragonflyDragonflyUserConfig(dict):
2166
2297
  @pulumi.getter(name="dragonflyPersistence")
2167
2298
  def dragonfly_persistence(self) -> Optional[str]:
2168
2299
  """
2169
- When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2300
+ Enum: `off`, `rdb`, `dfs`. When persistence is 'rdb' or 'dfs', Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is 'off', no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
2170
2301
  """
2171
2302
  return pulumi.get(self, "dragonfly_persistence")
2172
2303
 
@@ -2341,7 +2472,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
2341
2472
  :param int port: Port number of the server where to migrate data from.
2342
2473
  :param str dbname: Database name for bootstrapping the initial connection.
2343
2474
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
2344
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2475
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2345
2476
  :param str password: Password for authentication with the server where to migrate data from.
2346
2477
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
2347
2478
  :param str username: User name for authentication with the server where to migrate data from.
@@ -2397,7 +2528,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
2397
2528
  @pulumi.getter
2398
2529
  def method(self) -> Optional[str]:
2399
2530
  """
2400
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2531
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
2401
2532
  """
2402
2533
  return pulumi.get(self, "method")
2403
2534
 
@@ -2917,7 +3048,7 @@ class FlinkFlinkUserConfig(dict):
2917
3048
  static_ips: Optional[bool] = None):
2918
3049
  """
2919
3050
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
2920
- :param str flink_version: Flink major version.
3051
+ :param str flink_version: Enum: `1.16`, `1.19`, and newer. Flink major version.
2921
3052
  :param Sequence['FlinkFlinkUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
2922
3053
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
2923
3054
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -2960,7 +3091,7 @@ class FlinkFlinkUserConfig(dict):
2960
3091
  @pulumi.getter(name="flinkVersion")
2961
3092
  def flink_version(self) -> Optional[str]:
2962
3093
  """
2963
- Flink major version.
3094
+ Enum: `1.16`, `1.19`, and newer. Flink major version.
2964
3095
  """
2965
3096
  return pulumi.get(self, "flink_version")
2966
3097
 
@@ -3302,6 +3433,25 @@ class GrafanaComponent(dict):
3302
3433
  return pulumi.get(self, "usage")
3303
3434
 
3304
3435
 
3436
+ @pulumi.output_type
3437
+ class GrafanaGrafana(dict):
3438
+ def __init__(__self__, *,
3439
+ uris: Optional[Sequence[str]] = None):
3440
+ """
3441
+ :param Sequence[str] uris: Grafana server URIs.
3442
+ """
3443
+ if uris is not None:
3444
+ pulumi.set(__self__, "uris", uris)
3445
+
3446
+ @property
3447
+ @pulumi.getter
3448
+ def uris(self) -> Optional[Sequence[str]]:
3449
+ """
3450
+ Grafana server URIs.
3451
+ """
3452
+ return pulumi.get(self, "uris")
3453
+
3454
+
3305
3455
  @pulumi.output_type
3306
3456
  class GrafanaGrafanaUserConfig(dict):
3307
3457
  @staticmethod
@@ -3449,9 +3599,9 @@ class GrafanaGrafanaUserConfig(dict):
3449
3599
  """
3450
3600
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
3451
3601
  :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3452
- :param str alerting_error_or_timeout: Default error or timeout setting for new alerting rules.
3602
+ :param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
3453
3603
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.
3454
- :param str alerting_nodata_or_nullvalues: Default value for 'no data or null values' for new alerting rules.
3604
+ :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
3455
3605
  :param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
3456
3606
  :param 'GrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
3457
3607
  :param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
@@ -3459,7 +3609,7 @@ class GrafanaGrafanaUserConfig(dict):
3459
3609
  :param 'GrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
3460
3610
  :param 'GrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
3461
3611
  :param 'GrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
3462
- :param str cookie_samesite: Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
3612
+ :param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
3463
3613
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
3464
3614
  :param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
3465
3615
  :param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h.
@@ -3487,7 +3637,7 @@ class GrafanaGrafanaUserConfig(dict):
3487
3637
  :param bool static_ips: Use static public IP addresses.
3488
3638
  :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3489
3639
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
3490
- :param str user_auto_assign_org_role: Set role for new signups. Defaults to Viewer.
3640
+ :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
3491
3641
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
3492
3642
  """
3493
3643
  if additional_backup_regions is not None:
@@ -3595,7 +3745,7 @@ class GrafanaGrafanaUserConfig(dict):
3595
3745
  @pulumi.getter(name="alertingErrorOrTimeout")
3596
3746
  def alerting_error_or_timeout(self) -> Optional[str]:
3597
3747
  """
3598
- Default error or timeout setting for new alerting rules.
3748
+ Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
3599
3749
  """
3600
3750
  return pulumi.get(self, "alerting_error_or_timeout")
3601
3751
 
@@ -3611,7 +3761,7 @@ class GrafanaGrafanaUserConfig(dict):
3611
3761
  @pulumi.getter(name="alertingNodataOrNullvalues")
3612
3762
  def alerting_nodata_or_nullvalues(self) -> Optional[str]:
3613
3763
  """
3614
- Default value for 'no data or null values' for new alerting rules.
3764
+ Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
3615
3765
  """
3616
3766
  return pulumi.get(self, "alerting_nodata_or_nullvalues")
3617
3767
 
@@ -3675,7 +3825,7 @@ class GrafanaGrafanaUserConfig(dict):
3675
3825
  @pulumi.getter(name="cookieSamesite")
3676
3826
  def cookie_samesite(self) -> Optional[str]:
3677
3827
  """
3678
- Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
3828
+ Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
3679
3829
  """
3680
3830
  return pulumi.get(self, "cookie_samesite")
3681
3831
 
@@ -3902,7 +4052,7 @@ class GrafanaGrafanaUserConfig(dict):
3902
4052
  @pulumi.getter(name="userAutoAssignOrgRole")
3903
4053
  def user_auto_assign_org_role(self) -> Optional[str]:
3904
4054
  """
3905
- Set role for new signups. Defaults to Viewer.
4055
+ Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
3906
4056
  """
3907
4057
  return pulumi.get(self, "user_auto_assign_org_role")
3908
4058
 
@@ -4673,7 +4823,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
4673
4823
  """
4674
4824
  :param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.
4675
4825
  :param str bucket_url: Bucket URL for S3.
4676
- :param str provider: Provider type.
4826
+ :param str provider: Enum: `s3`. Provider type.
4677
4827
  :param str secret_key: S3 secret key.
4678
4828
  """
4679
4829
  pulumi.set(__self__, "access_key", access_key)
@@ -4701,7 +4851,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
4701
4851
  @pulumi.getter
4702
4852
  def provider(self) -> str:
4703
4853
  """
4704
- Provider type.
4854
+ Enum: `s3`. Provider type.
4705
4855
  """
4706
4856
  return pulumi.get(self, "provider")
4707
4857
 
@@ -4842,7 +4992,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
4842
4992
  :param str from_name: Name used in outgoing emails, defaults to Grafana.
4843
4993
  :param str password: Password for SMTP authentication.
4844
4994
  :param bool skip_verify: Skip verifying server certificate. Defaults to false.
4845
- :param str starttls_policy: Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
4995
+ :param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
4846
4996
  :param str username: Username for SMTP authentication.
4847
4997
  """
4848
4998
  pulumi.set(__self__, "from_address", from_address)
@@ -4911,7 +5061,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
4911
5061
  @pulumi.getter(name="starttlsPolicy")
4912
5062
  def starttls_policy(self) -> Optional[str]:
4913
5063
  """
4914
- Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
5064
+ Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
4915
5065
  """
4916
5066
  return pulumi.get(self, "starttls_policy")
4917
5067
 
@@ -5161,12 +5311,24 @@ class InfluxDbInfluxdb(dict):
5161
5311
  return super().get(key, default)
5162
5312
 
5163
5313
  def __init__(__self__, *,
5164
- database_name: Optional[str] = None):
5314
+ database_name: Optional[str] = None,
5315
+ password: Optional[str] = None,
5316
+ uris: Optional[Sequence[str]] = None,
5317
+ username: Optional[str] = None):
5165
5318
  """
5166
5319
  :param str database_name: Name of the default InfluxDB database
5320
+ :param str password: InfluxDB password
5321
+ :param Sequence[str] uris: InfluxDB server URIs.
5322
+ :param str username: InfluxDB username
5167
5323
  """
5168
5324
  if database_name is not None:
5169
5325
  pulumi.set(__self__, "database_name", database_name)
5326
+ if password is not None:
5327
+ pulumi.set(__self__, "password", password)
5328
+ if uris is not None:
5329
+ pulumi.set(__self__, "uris", uris)
5330
+ if username is not None:
5331
+ pulumi.set(__self__, "username", username)
5170
5332
 
5171
5333
  @property
5172
5334
  @pulumi.getter(name="databaseName")
@@ -5176,6 +5338,30 @@ class InfluxDbInfluxdb(dict):
5176
5338
  """
5177
5339
  return pulumi.get(self, "database_name")
5178
5340
 
5341
+ @property
5342
+ @pulumi.getter
5343
+ def password(self) -> Optional[str]:
5344
+ """
5345
+ InfluxDB password
5346
+ """
5347
+ return pulumi.get(self, "password")
5348
+
5349
+ @property
5350
+ @pulumi.getter
5351
+ def uris(self) -> Optional[Sequence[str]]:
5352
+ """
5353
+ InfluxDB server URIs.
5354
+ """
5355
+ return pulumi.get(self, "uris")
5356
+
5357
+ @property
5358
+ @pulumi.getter
5359
+ def username(self) -> Optional[str]:
5360
+ """
5361
+ InfluxDB username
5362
+ """
5363
+ return pulumi.get(self, "username")
5364
+
5179
5365
 
5180
5366
  @pulumi.output_type
5181
5367
  class InfluxDbInfluxdbUserConfig(dict):
@@ -6210,10 +6396,10 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6210
6396
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
6211
6397
  session_timeout_ms: Optional[int] = None):
6212
6398
  """
6213
- :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
6214
- :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6399
+ :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
6400
+ :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6215
6401
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
6216
- :param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6402
+ :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6217
6403
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
6218
6404
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
6219
6405
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -6221,7 +6407,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6221
6407
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
6222
6408
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
6223
6409
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
6224
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
6410
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
6225
6411
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
6226
6412
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
6227
6413
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -6264,7 +6450,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6264
6450
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
6265
6451
  def connector_client_config_override_policy(self) -> Optional[str]:
6266
6452
  """
6267
- Defines what client configurations can be overridden by the connector. Default is None.
6453
+ Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
6268
6454
  """
6269
6455
  return pulumi.get(self, "connector_client_config_override_policy")
6270
6456
 
@@ -6272,7 +6458,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6272
6458
  @pulumi.getter(name="consumerAutoOffsetReset")
6273
6459
  def consumer_auto_offset_reset(self) -> Optional[str]:
6274
6460
  """
6275
- What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6461
+ Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6276
6462
  """
6277
6463
  return pulumi.get(self, "consumer_auto_offset_reset")
6278
6464
 
@@ -6288,7 +6474,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6288
6474
  @pulumi.getter(name="consumerIsolationLevel")
6289
6475
  def consumer_isolation_level(self) -> Optional[str]:
6290
6476
  """
6291
- Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6477
+ Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6292
6478
  """
6293
6479
  return pulumi.get(self, "consumer_isolation_level")
6294
6480
 
@@ -6352,7 +6538,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6352
6538
  @pulumi.getter(name="producerCompressionType")
6353
6539
  def producer_compression_type(self) -> Optional[str]:
6354
6540
  """
6355
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
6541
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
6356
6542
  """
6357
6543
  return pulumi.get(self, "producer_compression_type")
6358
6544
 
@@ -6703,13 +6889,15 @@ class KafkaKafka(dict):
6703
6889
  access_key: Optional[str] = None,
6704
6890
  connect_uri: Optional[str] = None,
6705
6891
  rest_uri: Optional[str] = None,
6706
- schema_registry_uri: Optional[str] = None):
6892
+ schema_registry_uri: Optional[str] = None,
6893
+ uris: Optional[Sequence[str]] = None):
6707
6894
  """
6708
6895
  :param str access_cert: The Kafka client certificate.
6709
6896
  :param str access_key: The Kafka client certificate key.
6710
6897
  :param str connect_uri: The Kafka Connect URI.
6711
6898
  :param str rest_uri: The Kafka REST URI.
6712
6899
  :param str schema_registry_uri: The Schema Registry URI.
6900
+ :param Sequence[str] uris: Kafka server URIs.
6713
6901
  """
6714
6902
  if access_cert is not None:
6715
6903
  pulumi.set(__self__, "access_cert", access_cert)
@@ -6721,6 +6909,8 @@ class KafkaKafka(dict):
6721
6909
  pulumi.set(__self__, "rest_uri", rest_uri)
6722
6910
  if schema_registry_uri is not None:
6723
6911
  pulumi.set(__self__, "schema_registry_uri", schema_registry_uri)
6912
+ if uris is not None:
6913
+ pulumi.set(__self__, "uris", uris)
6724
6914
 
6725
6915
  @property
6726
6916
  @pulumi.getter(name="accessCert")
@@ -6762,6 +6952,14 @@ class KafkaKafka(dict):
6762
6952
  """
6763
6953
  return pulumi.get(self, "schema_registry_uri")
6764
6954
 
6955
+ @property
6956
+ @pulumi.getter
6957
+ def uris(self) -> Optional[Sequence[str]]:
6958
+ """
6959
+ Kafka server URIs.
6960
+ """
6961
+ return pulumi.get(self, "uris")
6962
+
6765
6963
 
6766
6964
  @pulumi.output_type
6767
6965
  class KafkaKafkaUserConfig(dict):
@@ -6859,7 +7057,7 @@ class KafkaKafkaUserConfig(dict):
6859
7057
  :param bool kafka_rest: Enable Kafka-REST service. The default value is `false`.
6860
7058
  :param bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
6861
7059
  :param 'KafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
6862
- :param str kafka_version: Kafka major version.
7060
+ :param str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, and newer. Kafka major version.
6863
7061
  :param 'KafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
6864
7062
  :param 'KafkaKafkaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
6865
7063
  :param 'KafkaKafkaUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
@@ -7028,7 +7226,7 @@ class KafkaKafkaUserConfig(dict):
7028
7226
  @pulumi.getter(name="kafkaVersion")
7029
7227
  def kafka_version(self) -> Optional[str]:
7030
7228
  """
7031
- Kafka major version.
7229
+ Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, and newer. Kafka major version.
7032
7230
  """
7033
7231
  return pulumi.get(self, "kafka_version")
7034
7232
 
@@ -7285,7 +7483,7 @@ class KafkaKafkaUserConfigKafka(dict):
7285
7483
  transaction_state_log_segment_bytes: Optional[int] = None):
7286
7484
  """
7287
7485
  :param bool auto_create_topics_enable: Enable auto creation of topics.
7288
- :param str compression_type: Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
7486
+ :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
7289
7487
  :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
7290
7488
  :param int default_replication_factor: Replication factor for autocreated topics.
7291
7489
  :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
@@ -7295,7 +7493,7 @@ class KafkaKafkaUserConfigKafka(dict):
7295
7493
  :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
7296
7494
  :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
7297
7495
  :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
7298
- :param str log_cleanup_policy: The default cleanup policy for segments beyond the retention window.
7496
+ :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
7299
7497
  :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk.
7300
7498
  :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
7301
7499
  :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index.
@@ -7304,7 +7502,7 @@ class KafkaKafkaUserConfigKafka(dict):
7304
7502
  :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
7305
7503
  :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7306
7504
  :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
7307
- :param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
7505
+ :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
7308
7506
  :param bool log_preallocate: Should pre allocate file when create new segment?
7309
7507
  :param int log_retention_bytes: The maximum size of the log before deleting messages.
7310
7508
  :param int log_retention_hours: The number of hours to keep a log file before deleting it.
@@ -7436,7 +7634,7 @@ class KafkaKafkaUserConfigKafka(dict):
7436
7634
  @pulumi.getter(name="compressionType")
7437
7635
  def compression_type(self) -> Optional[str]:
7438
7636
  """
7439
- Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
7637
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
7440
7638
  """
7441
7639
  return pulumi.get(self, "compression_type")
7442
7640
 
@@ -7516,7 +7714,7 @@ class KafkaKafkaUserConfigKafka(dict):
7516
7714
  @pulumi.getter(name="logCleanupPolicy")
7517
7715
  def log_cleanup_policy(self) -> Optional[str]:
7518
7716
  """
7519
- The default cleanup policy for segments beyond the retention window.
7717
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
7520
7718
  """
7521
7719
  return pulumi.get(self, "log_cleanup_policy")
7522
7720
 
@@ -7588,7 +7786,7 @@ class KafkaKafkaUserConfigKafka(dict):
7588
7786
  @pulumi.getter(name="logMessageTimestampType")
7589
7787
  def log_message_timestamp_type(self) -> Optional[str]:
7590
7788
  """
7591
- Define whether the timestamp in the message is message create time or log append time.
7789
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
7592
7790
  """
7593
7791
  return pulumi.get(self, "log_message_timestamp_type")
7594
7792
 
@@ -7891,10 +8089,10 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7891
8089
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
7892
8090
  session_timeout_ms: Optional[int] = None):
7893
8091
  """
7894
- :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
7895
- :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
8092
+ :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
8093
+ :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
7896
8094
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
7897
- :param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8095
+ :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
7898
8096
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
7899
8097
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
7900
8098
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -7902,7 +8100,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7902
8100
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
7903
8101
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
7904
8102
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
7905
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8103
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
7906
8104
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
7907
8105
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
7908
8106
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -7945,7 +8143,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7945
8143
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
7946
8144
  def connector_client_config_override_policy(self) -> Optional[str]:
7947
8145
  """
7948
- Defines what client configurations can be overridden by the connector. Default is None.
8146
+ Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
7949
8147
  """
7950
8148
  return pulumi.get(self, "connector_client_config_override_policy")
7951
8149
 
@@ -7953,7 +8151,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7953
8151
  @pulumi.getter(name="consumerAutoOffsetReset")
7954
8152
  def consumer_auto_offset_reset(self) -> Optional[str]:
7955
8153
  """
7956
- What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
8154
+ Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
7957
8155
  """
7958
8156
  return pulumi.get(self, "consumer_auto_offset_reset")
7959
8157
 
@@ -7969,7 +8167,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7969
8167
  @pulumi.getter(name="consumerIsolationLevel")
7970
8168
  def consumer_isolation_level(self) -> Optional[str]:
7971
8169
  """
7972
- Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
8170
+ Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
7973
8171
  """
7974
8172
  return pulumi.get(self, "consumer_isolation_level")
7975
8173
 
@@ -8033,7 +8231,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8033
8231
  @pulumi.getter(name="producerCompressionType")
8034
8232
  def producer_compression_type(self) -> Optional[str]:
8035
8233
  """
8036
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8234
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8037
8235
  """
8038
8236
  return pulumi.get(self, "producer_compression_type")
8039
8237
 
@@ -8121,11 +8319,11 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8121
8319
  """
8122
8320
  :param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.
8123
8321
  :param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.
8124
- :param int consumer_request_timeout_ms: The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
8125
- :param str name_strategy: Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
8322
+ :param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
8323
+ :param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
8126
8324
  :param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`.
8127
- :param str producer_acks: The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
8128
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8325
+ :param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
8326
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8129
8327
  :param int producer_linger_ms: Wait for up to the given delay to allow batching records together. The default value is `0`.
8130
8328
  :param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.
8131
8329
  :param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.
@@ -8171,7 +8369,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8171
8369
  @pulumi.getter(name="consumerRequestTimeoutMs")
8172
8370
  def consumer_request_timeout_ms(self) -> Optional[int]:
8173
8371
  """
8174
- The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
8372
+ Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
8175
8373
  """
8176
8374
  return pulumi.get(self, "consumer_request_timeout_ms")
8177
8375
 
@@ -8179,7 +8377,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8179
8377
  @pulumi.getter(name="nameStrategy")
8180
8378
  def name_strategy(self) -> Optional[str]:
8181
8379
  """
8182
- Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
8380
+ Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
8183
8381
  """
8184
8382
  return pulumi.get(self, "name_strategy")
8185
8383
 
@@ -8195,7 +8393,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8195
8393
  @pulumi.getter(name="producerAcks")
8196
8394
  def producer_acks(self) -> Optional[str]:
8197
8395
  """
8198
- The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
8396
+ Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
8199
8397
  """
8200
8398
  return pulumi.get(self, "producer_acks")
8201
8399
 
@@ -8203,7 +8401,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8203
8401
  @pulumi.getter(name="producerCompressionType")
8204
8402
  def producer_compression_type(self) -> Optional[str]:
8205
8403
  """
8206
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8404
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8207
8405
  """
8208
8406
  return pulumi.get(self, "producer_compression_type")
8209
8407
 
@@ -9854,6 +10052,54 @@ class M3AggregatorComponent(dict):
9854
10052
  return pulumi.get(self, "usage")
9855
10053
 
9856
10054
 
10055
+ @pulumi.output_type
10056
+ class M3AggregatorM3aggregator(dict):
10057
+ @staticmethod
10058
+ def __key_warning(key: str):
10059
+ suggest = None
10060
+ if key == "aggregatorHttpUri":
10061
+ suggest = "aggregator_http_uri"
10062
+
10063
+ if suggest:
10064
+ pulumi.log.warn(f"Key '{key}' not found in M3AggregatorM3aggregator. Access the value via the '{suggest}' property getter instead.")
10065
+
10066
+ def __getitem__(self, key: str) -> Any:
10067
+ M3AggregatorM3aggregator.__key_warning(key)
10068
+ return super().__getitem__(key)
10069
+
10070
+ def get(self, key: str, default = None) -> Any:
10071
+ M3AggregatorM3aggregator.__key_warning(key)
10072
+ return super().get(key, default)
10073
+
10074
+ def __init__(__self__, *,
10075
+ aggregator_http_uri: Optional[str] = None,
10076
+ uris: Optional[Sequence[str]] = None):
10077
+ """
10078
+ :param str aggregator_http_uri: M3 Aggregator HTTP URI.
10079
+ :param Sequence[str] uris: M3 Aggregator server URIs.
10080
+ """
10081
+ if aggregator_http_uri is not None:
10082
+ pulumi.set(__self__, "aggregator_http_uri", aggregator_http_uri)
10083
+ if uris is not None:
10084
+ pulumi.set(__self__, "uris", uris)
10085
+
10086
+ @property
10087
+ @pulumi.getter(name="aggregatorHttpUri")
10088
+ def aggregator_http_uri(self) -> Optional[str]:
10089
+ """
10090
+ M3 Aggregator HTTP URI.
10091
+ """
10092
+ return pulumi.get(self, "aggregator_http_uri")
10093
+
10094
+ @property
10095
+ @pulumi.getter
10096
+ def uris(self) -> Optional[Sequence[str]]:
10097
+ """
10098
+ M3 Aggregator server URIs.
10099
+ """
10100
+ return pulumi.get(self, "uris")
10101
+
10102
+
9857
10103
  @pulumi.output_type
9858
10104
  class M3AggregatorM3aggregatorUserConfig(dict):
9859
10105
  @staticmethod
@@ -9901,8 +10147,8 @@ class M3AggregatorM3aggregatorUserConfig(dict):
9901
10147
  :param Sequence['M3AggregatorM3aggregatorUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
9902
10148
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
9903
10149
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
9904
- :param str m3_version: M3 major version (deprecated, use m3aggregator_version).
9905
- :param str m3aggregator_version: M3 major version (the minimum compatible version).
10150
+ :param str m3_version: Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (deprecated, use m3aggregator_version).
10151
+ :param str m3aggregator_version: Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (the minimum compatible version).
9906
10152
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
9907
10153
  :param bool static_ips: Use static public IP addresses.
9908
10154
  """
@@ -9962,7 +10208,7 @@ class M3AggregatorM3aggregatorUserConfig(dict):
9962
10208
  @pulumi.getter(name="m3Version")
9963
10209
  def m3_version(self) -> Optional[str]:
9964
10210
  """
9965
- M3 major version (deprecated, use m3aggregator_version).
10211
+ Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (deprecated, use m3aggregator_version).
9966
10212
  """
9967
10213
  return pulumi.get(self, "m3_version")
9968
10214
 
@@ -9970,7 +10216,7 @@ class M3AggregatorM3aggregatorUserConfig(dict):
9970
10216
  @pulumi.getter(name="m3aggregatorVersion")
9971
10217
  def m3aggregator_version(self) -> Optional[str]:
9972
10218
  """
9973
- M3 major version (the minimum compatible version).
10219
+ Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (the minimum compatible version).
9974
10220
  """
9975
10221
  return pulumi.get(self, "m3aggregator_version")
9976
10222
 
@@ -10238,6 +10484,110 @@ class M3DbComponent(dict):
10238
10484
  return pulumi.get(self, "usage")
10239
10485
 
10240
10486
 
10487
+ @pulumi.output_type
10488
+ class M3DbM3db(dict):
10489
+ @staticmethod
10490
+ def __key_warning(key: str):
10491
+ suggest = None
10492
+ if key == "httpClusterUri":
10493
+ suggest = "http_cluster_uri"
10494
+ elif key == "httpNodeUri":
10495
+ suggest = "http_node_uri"
10496
+ elif key == "influxdbUri":
10497
+ suggest = "influxdb_uri"
10498
+ elif key == "prometheusRemoteReadUri":
10499
+ suggest = "prometheus_remote_read_uri"
10500
+ elif key == "prometheusRemoteWriteUri":
10501
+ suggest = "prometheus_remote_write_uri"
10502
+
10503
+ if suggest:
10504
+ pulumi.log.warn(f"Key '{key}' not found in M3DbM3db. Access the value via the '{suggest}' property getter instead.")
10505
+
10506
+ def __getitem__(self, key: str) -> Any:
10507
+ M3DbM3db.__key_warning(key)
10508
+ return super().__getitem__(key)
10509
+
10510
+ def get(self, key: str, default = None) -> Any:
10511
+ M3DbM3db.__key_warning(key)
10512
+ return super().get(key, default)
10513
+
10514
+ def __init__(__self__, *,
10515
+ http_cluster_uri: Optional[str] = None,
10516
+ http_node_uri: Optional[str] = None,
10517
+ influxdb_uri: Optional[str] = None,
10518
+ prometheus_remote_read_uri: Optional[str] = None,
10519
+ prometheus_remote_write_uri: Optional[str] = None,
10520
+ uris: Optional[Sequence[str]] = None):
10521
+ """
10522
+ :param str http_cluster_uri: M3DB cluster URI.
10523
+ :param str http_node_uri: M3DB node URI.
10524
+ :param str influxdb_uri: InfluxDB URI.
10525
+ :param str prometheus_remote_read_uri: Prometheus remote read URI.
10526
+ :param str prometheus_remote_write_uri: Prometheus remote write URI.
10527
+ :param Sequence[str] uris: M3DB server URIs.
10528
+ """
10529
+ if http_cluster_uri is not None:
10530
+ pulumi.set(__self__, "http_cluster_uri", http_cluster_uri)
10531
+ if http_node_uri is not None:
10532
+ pulumi.set(__self__, "http_node_uri", http_node_uri)
10533
+ if influxdb_uri is not None:
10534
+ pulumi.set(__self__, "influxdb_uri", influxdb_uri)
10535
+ if prometheus_remote_read_uri is not None:
10536
+ pulumi.set(__self__, "prometheus_remote_read_uri", prometheus_remote_read_uri)
10537
+ if prometheus_remote_write_uri is not None:
10538
+ pulumi.set(__self__, "prometheus_remote_write_uri", prometheus_remote_write_uri)
10539
+ if uris is not None:
10540
+ pulumi.set(__self__, "uris", uris)
10541
+
10542
+ @property
10543
+ @pulumi.getter(name="httpClusterUri")
10544
+ def http_cluster_uri(self) -> Optional[str]:
10545
+ """
10546
+ M3DB cluster URI.
10547
+ """
10548
+ return pulumi.get(self, "http_cluster_uri")
10549
+
10550
+ @property
10551
+ @pulumi.getter(name="httpNodeUri")
10552
+ def http_node_uri(self) -> Optional[str]:
10553
+ """
10554
+ M3DB node URI.
10555
+ """
10556
+ return pulumi.get(self, "http_node_uri")
10557
+
10558
+ @property
10559
+ @pulumi.getter(name="influxdbUri")
10560
+ def influxdb_uri(self) -> Optional[str]:
10561
+ """
10562
+ InfluxDB URI.
10563
+ """
10564
+ return pulumi.get(self, "influxdb_uri")
10565
+
10566
+ @property
10567
+ @pulumi.getter(name="prometheusRemoteReadUri")
10568
+ def prometheus_remote_read_uri(self) -> Optional[str]:
10569
+ """
10570
+ Prometheus remote read URI.
10571
+ """
10572
+ return pulumi.get(self, "prometheus_remote_read_uri")
10573
+
10574
+ @property
10575
+ @pulumi.getter(name="prometheusRemoteWriteUri")
10576
+ def prometheus_remote_write_uri(self) -> Optional[str]:
10577
+ """
10578
+ Prometheus remote write URI.
10579
+ """
10580
+ return pulumi.get(self, "prometheus_remote_write_uri")
10581
+
10582
+ @property
10583
+ @pulumi.getter
10584
+ def uris(self) -> Optional[Sequence[str]]:
10585
+ """
10586
+ M3DB server URIs.
10587
+ """
10588
+ return pulumi.get(self, "uris")
10589
+
10590
+
10241
10591
  @pulumi.output_type
10242
10592
  class M3DbM3dbUserConfig(dict):
10243
10593
  @staticmethod
@@ -10310,9 +10660,9 @@ class M3DbM3dbUserConfig(dict):
10310
10660
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
10311
10661
  :param 'M3DbM3dbUserConfigLimitsArgs' limits: M3 limits
10312
10662
  :param 'M3DbM3dbUserConfigM3Args' m3: M3 specific configuration options
10313
- :param str m3_version: M3 major version (deprecated, use m3db_version).
10663
+ :param str m3_version: Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (deprecated, use m3db_version).
10314
10664
  :param bool m3coordinator_enable_graphite_carbon_ingest: Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only.
10315
- :param str m3db_version: M3 major version (the minimum compatible version).
10665
+ :param str m3db_version: Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (the minimum compatible version).
10316
10666
  :param Sequence['M3DbM3dbUserConfigNamespaceArgs'] namespaces: List of M3 namespaces
10317
10667
  :param 'M3DbM3dbUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
10318
10668
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -10422,7 +10772,7 @@ class M3DbM3dbUserConfig(dict):
10422
10772
  @pulumi.getter(name="m3Version")
10423
10773
  def m3_version(self) -> Optional[str]:
10424
10774
  """
10425
- M3 major version (deprecated, use m3db_version).
10775
+ Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (deprecated, use m3db_version).
10426
10776
  """
10427
10777
  return pulumi.get(self, "m3_version")
10428
10778
 
@@ -10438,7 +10788,7 @@ class M3DbM3dbUserConfig(dict):
10438
10788
  @pulumi.getter(name="m3dbVersion")
10439
10789
  def m3db_version(self) -> Optional[str]:
10440
10790
  """
10441
- M3 major version (the minimum compatible version).
10791
+ Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (the minimum compatible version).
10442
10792
  """
10443
10793
  return pulumi.get(self, "m3db_version")
10444
10794
 
@@ -10738,7 +11088,7 @@ class M3DbM3dbUserConfigNamespace(dict):
10738
11088
  resolution: Optional[str] = None):
10739
11089
  """
10740
11090
  :param str name: The name of the namespace.
10741
- :param str type: The type of aggregation (aggregated/unaggregated).
11091
+ :param str type: Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated).
10742
11092
  :param 'M3DbM3dbUserConfigNamespaceOptionsArgs' options: Namespace options
10743
11093
  :param str resolution: The resolution for an aggregated namespace.
10744
11094
  """
@@ -10761,7 +11111,7 @@ class M3DbM3dbUserConfigNamespace(dict):
10761
11111
  @pulumi.getter
10762
11112
  def type(self) -> str:
10763
11113
  """
10764
- The type of aggregation (aggregated/unaggregated).
11114
+ Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated).
10765
11115
  """
10766
11116
  return pulumi.get(self, "type")
10767
11117
 
@@ -11394,6 +11744,190 @@ class MySqlComponent(dict):
11394
11744
  return pulumi.get(self, "usage")
11395
11745
 
11396
11746
 
11747
+ @pulumi.output_type
11748
+ class MySqlMysql(dict):
11749
+ @staticmethod
11750
+ def __key_warning(key: str):
11751
+ suggest = None
11752
+ if key == "replicaUri":
11753
+ suggest = "replica_uri"
11754
+ elif key == "standbyUris":
11755
+ suggest = "standby_uris"
11756
+ elif key == "syncingUris":
11757
+ suggest = "syncing_uris"
11758
+
11759
+ if suggest:
11760
+ pulumi.log.warn(f"Key '{key}' not found in MySqlMysql. Access the value via the '{suggest}' property getter instead.")
11761
+
11762
+ def __getitem__(self, key: str) -> Any:
11763
+ MySqlMysql.__key_warning(key)
11764
+ return super().__getitem__(key)
11765
+
11766
+ def get(self, key: str, default = None) -> Any:
11767
+ MySqlMysql.__key_warning(key)
11768
+ return super().get(key, default)
11769
+
11770
+ def __init__(__self__, *,
11771
+ params: Optional[Sequence['outputs.MySqlMysqlParam']] = None,
11772
+ replica_uri: Optional[str] = None,
11773
+ standby_uris: Optional[Sequence[str]] = None,
11774
+ syncing_uris: Optional[Sequence[str]] = None,
11775
+ uris: Optional[Sequence[str]] = None):
11776
+ """
11777
+ :param Sequence['MySqlMysqlParamArgs'] params: MySQL connection parameters
11778
+ :param str replica_uri: MySQL replica URI for services with a replica
11779
+ :param Sequence[str] standby_uris: MySQL standby connection URIs
11780
+ :param Sequence[str] syncing_uris: MySQL syncing connection URIs
11781
+ :param Sequence[str] uris: MySQL master connection URIs
11782
+ """
11783
+ if params is not None:
11784
+ pulumi.set(__self__, "params", params)
11785
+ if replica_uri is not None:
11786
+ pulumi.set(__self__, "replica_uri", replica_uri)
11787
+ if standby_uris is not None:
11788
+ pulumi.set(__self__, "standby_uris", standby_uris)
11789
+ if syncing_uris is not None:
11790
+ pulumi.set(__self__, "syncing_uris", syncing_uris)
11791
+ if uris is not None:
11792
+ pulumi.set(__self__, "uris", uris)
11793
+
11794
+ @property
11795
+ @pulumi.getter
11796
+ def params(self) -> Optional[Sequence['outputs.MySqlMysqlParam']]:
11797
+ """
11798
+ MySQL connection parameters
11799
+ """
11800
+ return pulumi.get(self, "params")
11801
+
11802
+ @property
11803
+ @pulumi.getter(name="replicaUri")
11804
+ def replica_uri(self) -> Optional[str]:
11805
+ """
11806
+ MySQL replica URI for services with a replica
11807
+ """
11808
+ return pulumi.get(self, "replica_uri")
11809
+
11810
+ @property
11811
+ @pulumi.getter(name="standbyUris")
11812
+ def standby_uris(self) -> Optional[Sequence[str]]:
11813
+ """
11814
+ MySQL standby connection URIs
11815
+ """
11816
+ return pulumi.get(self, "standby_uris")
11817
+
11818
+ @property
11819
+ @pulumi.getter(name="syncingUris")
11820
+ def syncing_uris(self) -> Optional[Sequence[str]]:
11821
+ """
11822
+ MySQL syncing connection URIs
11823
+ """
11824
+ return pulumi.get(self, "syncing_uris")
11825
+
11826
+ @property
11827
+ @pulumi.getter
11828
+ def uris(self) -> Optional[Sequence[str]]:
11829
+ """
11830
+ MySQL master connection URIs
11831
+ """
11832
+ return pulumi.get(self, "uris")
11833
+
11834
+
11835
+ @pulumi.output_type
11836
+ class MySqlMysqlParam(dict):
11837
+ @staticmethod
11838
+ def __key_warning(key: str):
11839
+ suggest = None
11840
+ if key == "databaseName":
11841
+ suggest = "database_name"
11842
+
11843
+ if suggest:
11844
+ pulumi.log.warn(f"Key '{key}' not found in MySqlMysqlParam. Access the value via the '{suggest}' property getter instead.")
11845
+
11846
+ def __getitem__(self, key: str) -> Any:
11847
+ MySqlMysqlParam.__key_warning(key)
11848
+ return super().__getitem__(key)
11849
+
11850
+ def get(self, key: str, default = None) -> Any:
11851
+ MySqlMysqlParam.__key_warning(key)
11852
+ return super().get(key, default)
11853
+
11854
+ def __init__(__self__, *,
11855
+ database_name: Optional[str] = None,
11856
+ host: Optional[str] = None,
11857
+ password: Optional[str] = None,
11858
+ port: Optional[int] = None,
11859
+ sslmode: Optional[str] = None,
11860
+ user: Optional[str] = None):
11861
+ """
11862
+ :param str database_name: Primary MySQL database name
11863
+ :param str host: MySQL host IP or name
11864
+ :param str password: MySQL admin user password
11865
+ :param int port: MySQL port
11866
+ :param str sslmode: MySQL sslmode setting (currently always "require")
11867
+ :param str user: MySQL admin user name
11868
+ """
11869
+ if database_name is not None:
11870
+ pulumi.set(__self__, "database_name", database_name)
11871
+ if host is not None:
11872
+ pulumi.set(__self__, "host", host)
11873
+ if password is not None:
11874
+ pulumi.set(__self__, "password", password)
11875
+ if port is not None:
11876
+ pulumi.set(__self__, "port", port)
11877
+ if sslmode is not None:
11878
+ pulumi.set(__self__, "sslmode", sslmode)
11879
+ if user is not None:
11880
+ pulumi.set(__self__, "user", user)
11881
+
11882
+ @property
11883
+ @pulumi.getter(name="databaseName")
11884
+ def database_name(self) -> Optional[str]:
11885
+ """
11886
+ Primary MySQL database name
11887
+ """
11888
+ return pulumi.get(self, "database_name")
11889
+
11890
+ @property
11891
+ @pulumi.getter
11892
+ def host(self) -> Optional[str]:
11893
+ """
11894
+ MySQL host IP or name
11895
+ """
11896
+ return pulumi.get(self, "host")
11897
+
11898
+ @property
11899
+ @pulumi.getter
11900
+ def password(self) -> Optional[str]:
11901
+ """
11902
+ MySQL admin user password
11903
+ """
11904
+ return pulumi.get(self, "password")
11905
+
11906
+ @property
11907
+ @pulumi.getter
11908
+ def port(self) -> Optional[int]:
11909
+ """
11910
+ MySQL port
11911
+ """
11912
+ return pulumi.get(self, "port")
11913
+
11914
+ @property
11915
+ @pulumi.getter
11916
+ def sslmode(self) -> Optional[str]:
11917
+ """
11918
+ MySQL sslmode setting (currently always "require")
11919
+ """
11920
+ return pulumi.get(self, "sslmode")
11921
+
11922
+ @property
11923
+ @pulumi.getter
11924
+ def user(self) -> Optional[str]:
11925
+ """
11926
+ MySQL admin user name
11927
+ """
11928
+ return pulumi.get(self, "user")
11929
+
11930
+
11397
11931
  @pulumi.output_type
11398
11932
  class MySqlMysqlUserConfig(dict):
11399
11933
  @staticmethod
@@ -11480,7 +12014,7 @@ class MySqlMysqlUserConfig(dict):
11480
12014
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
11481
12015
  :param 'MySqlMysqlUserConfigMigrationArgs' migration: Migrate data from existing server
11482
12016
  :param 'MySqlMysqlUserConfigMysqlArgs' mysql: mysql.conf configuration values
11483
- :param str mysql_version: MySQL major version.
12017
+ :param str mysql_version: Enum: `8`, and newer. MySQL major version.
11484
12018
  :param 'MySqlMysqlUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
11485
12019
  :param 'MySqlMysqlUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
11486
12020
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -11626,7 +12160,7 @@ class MySqlMysqlUserConfig(dict):
11626
12160
  @pulumi.getter(name="mysqlVersion")
11627
12161
  def mysql_version(self) -> Optional[str]:
11628
12162
  """
11629
- MySQL major version.
12163
+ Enum: `8`, and newer. MySQL major version.
11630
12164
  """
11631
12165
  return pulumi.get(self, "mysql_version")
11632
12166
 
@@ -11758,7 +12292,7 @@ class MySqlMysqlUserConfigMigration(dict):
11758
12292
  :param int port: Port number of the server where to migrate data from.
11759
12293
  :param str dbname: Database name for bootstrapping the initial connection.
11760
12294
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
11761
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
12295
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
11762
12296
  :param str password: Password for authentication with the server where to migrate data from.
11763
12297
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
11764
12298
  :param str username: User name for authentication with the server where to migrate data from.
@@ -11814,7 +12348,7 @@ class MySqlMysqlUserConfigMigration(dict):
11814
12348
  @pulumi.getter
11815
12349
  def method(self) -> Optional[str]:
11816
12350
  """
11817
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
12351
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
11818
12352
  """
11819
12353
  return pulumi.get(self, "method")
11820
12354
 
@@ -11969,7 +12503,7 @@ class MySqlMysqlUserConfigMysql(dict):
11969
12503
  :param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).
11970
12504
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.
11971
12505
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it.
11972
- :param str internal_tmp_mem_storage_engine: The storage engine for in-memory internal temporary tables.
12506
+ :param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
11973
12507
  :param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Default is 10s.
11974
12508
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
11975
12509
  :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
@@ -12184,7 +12718,7 @@ class MySqlMysqlUserConfigMysql(dict):
12184
12718
  @pulumi.getter(name="internalTmpMemStorageEngine")
12185
12719
  def internal_tmp_mem_storage_engine(self) -> Optional[str]:
12186
12720
  """
12187
- The storage engine for in-memory internal temporary tables.
12721
+ Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
12188
12722
  """
12189
12723
  return pulumi.get(self, "internal_tmp_mem_storage_engine")
12190
12724
 
@@ -12636,7 +13170,9 @@ class OpenSearchOpensearch(dict):
12636
13170
  @staticmethod
12637
13171
  def __key_warning(key: str):
12638
13172
  suggest = None
12639
- if key == "opensearchDashboardsUri":
13173
+ if key == "kibanaUri":
13174
+ suggest = "kibana_uri"
13175
+ elif key == "opensearchDashboardsUri":
12640
13176
  suggest = "opensearch_dashboards_uri"
12641
13177
 
12642
13178
  if suggest:
@@ -12651,12 +13187,36 @@ class OpenSearchOpensearch(dict):
12651
13187
  return super().get(key, default)
12652
13188
 
12653
13189
  def __init__(__self__, *,
12654
- opensearch_dashboards_uri: Optional[str] = None):
13190
+ kibana_uri: Optional[str] = None,
13191
+ opensearch_dashboards_uri: Optional[str] = None,
13192
+ password: Optional[str] = None,
13193
+ uris: Optional[Sequence[str]] = None,
13194
+ username: Optional[str] = None):
12655
13195
  """
13196
+ :param str kibana_uri: URI for Kibana dashboard frontend
12656
13197
  :param str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend
13198
+ :param str password: OpenSearch password
13199
+ :param Sequence[str] uris: OpenSearch server URIs.
13200
+ :param str username: OpenSearch username
12657
13201
  """
13202
+ if kibana_uri is not None:
13203
+ pulumi.set(__self__, "kibana_uri", kibana_uri)
12658
13204
  if opensearch_dashboards_uri is not None:
12659
13205
  pulumi.set(__self__, "opensearch_dashboards_uri", opensearch_dashboards_uri)
13206
+ if password is not None:
13207
+ pulumi.set(__self__, "password", password)
13208
+ if uris is not None:
13209
+ pulumi.set(__self__, "uris", uris)
13210
+ if username is not None:
13211
+ pulumi.set(__self__, "username", username)
13212
+
13213
+ @property
13214
+ @pulumi.getter(name="kibanaUri")
13215
+ def kibana_uri(self) -> Optional[str]:
13216
+ """
13217
+ URI for Kibana dashboard frontend
13218
+ """
13219
+ return pulumi.get(self, "kibana_uri")
12660
13220
 
12661
13221
  @property
12662
13222
  @pulumi.getter(name="opensearchDashboardsUri")
@@ -12666,6 +13226,30 @@ class OpenSearchOpensearch(dict):
12666
13226
  """
12667
13227
  return pulumi.get(self, "opensearch_dashboards_uri")
12668
13228
 
13229
+ @property
13230
+ @pulumi.getter
13231
+ def password(self) -> Optional[str]:
13232
+ """
13233
+ OpenSearch password
13234
+ """
13235
+ return pulumi.get(self, "password")
13236
+
13237
+ @property
13238
+ @pulumi.getter
13239
+ def uris(self) -> Optional[Sequence[str]]:
13240
+ """
13241
+ OpenSearch server URIs.
13242
+ """
13243
+ return pulumi.get(self, "uris")
13244
+
13245
+ @property
13246
+ @pulumi.getter
13247
+ def username(self) -> Optional[str]:
13248
+ """
13249
+ OpenSearch username
13250
+ """
13251
+ return pulumi.get(self, "username")
13252
+
12669
13253
 
12670
13254
  @pulumi.output_type
12671
13255
  class OpenSearchOpensearchUserConfig(dict):
@@ -12762,7 +13346,7 @@ class OpenSearchOpensearchUserConfig(dict):
12762
13346
  :param 'OpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
12763
13347
  :param 'OpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
12764
13348
  :param 'OpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
12765
- :param str opensearch_version: OpenSearch major version.
13349
+ :param str opensearch_version: Enum: `1`, `2`, and newer. OpenSearch major version.
12766
13350
  :param 'OpenSearchOpensearchUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
12767
13351
  :param 'OpenSearchOpensearchUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
12768
13352
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -12931,7 +13515,7 @@ class OpenSearchOpensearchUserConfig(dict):
12931
13515
  @pulumi.getter(name="opensearchVersion")
12932
13516
  def opensearch_version(self) -> Optional[str]:
12933
13517
  """
12934
- OpenSearch major version.
13518
+ Enum: `1`, `2`, and newer. OpenSearch major version.
12935
13519
  """
12936
13520
  return pulumi.get(self, "opensearch_version")
12937
13521
 
@@ -13036,7 +13620,7 @@ class OpenSearchOpensearchUserConfigIndexPattern(dict):
13036
13620
  """
13037
13621
  :param int max_index_count: Maximum number of indexes to keep.
13038
13622
  :param str pattern: fnmatch pattern.
13039
- :param str sorting_algorithm: Deletion sorting algorithm. The default value is `creation_date`.
13623
+ :param str sorting_algorithm: Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`.
13040
13624
  """
13041
13625
  pulumi.set(__self__, "max_index_count", max_index_count)
13042
13626
  pulumi.set(__self__, "pattern", pattern)
@@ -13063,7 +13647,7 @@ class OpenSearchOpensearchUserConfigIndexPattern(dict):
13063
13647
  @pulumi.getter(name="sortingAlgorithm")
13064
13648
  def sorting_algorithm(self) -> Optional[str]:
13065
13649
  """
13066
- Deletion sorting algorithm. The default value is `creation_date`.
13650
+ Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`.
13067
13651
  """
13068
13652
  return pulumi.get(self, "sorting_algorithm")
13069
13653
 
@@ -14037,12 +14621,12 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthen
14037
14621
  type: Optional[str] = None):
14038
14622
  """
14039
14623
  :param int allowed_tries: The number of login attempts allowed before login is blocked.
14040
- :param str authentication_backend: internal*authentication*backend*limiting.authentication*backend.
14624
+ :param str authentication_backend: Enum: `internal`. internal*authentication*backend*limiting.authentication*backend.
14041
14625
  :param int block_expiry_seconds: The duration of time that login remains blocked after a failed login.
14042
14626
  :param int max_blocked_clients: internal*authentication*backend*limiting.max*blocked_clients.
14043
14627
  :param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
14044
14628
  :param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
14045
- :param str type: internal*authentication*backend_limiting.type.
14629
+ :param str type: Enum: `username`. internal*authentication*backend_limiting.type.
14046
14630
  """
14047
14631
  if allowed_tries is not None:
14048
14632
  pulumi.set(__self__, "allowed_tries", allowed_tries)
@@ -14071,7 +14655,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthen
14071
14655
  @pulumi.getter(name="authenticationBackend")
14072
14656
  def authentication_backend(self) -> Optional[str]:
14073
14657
  """
14074
- internal*authentication*backend*limiting.authentication*backend.
14658
+ Enum: `internal`. internal*authentication*backend*limiting.authentication*backend.
14075
14659
  """
14076
14660
  return pulumi.get(self, "authentication_backend")
14077
14661
 
@@ -14111,7 +14695,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthen
14111
14695
  @pulumi.getter
14112
14696
  def type(self) -> Optional[str]:
14113
14697
  """
14114
- internal*authentication*backend_limiting.type.
14698
+ Enum: `username`. internal*authentication*backend_limiting.type.
14115
14699
  """
14116
14700
  return pulumi.get(self, "type")
14117
14701
 
@@ -14156,7 +14740,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
14156
14740
  :param int max_blocked_clients: The maximum number of blocked IP addresses.
14157
14741
  :param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
14158
14742
  :param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
14159
- :param str type: The type of rate limiting.
14743
+ :param str type: Enum: `ip`. The type of rate limiting.
14160
14744
  """
14161
14745
  if allowed_tries is not None:
14162
14746
  pulumi.set(__self__, "allowed_tries", allowed_tries)
@@ -14215,7 +14799,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
14215
14799
  @pulumi.getter
14216
14800
  def type(self) -> Optional[str]:
14217
14801
  """
14218
- The type of rate limiting.
14802
+ Enum: `ip`. The type of rate limiting.
14219
14803
  """
14220
14804
  return pulumi.get(self, "type")
14221
14805
 
@@ -14671,116 +15255,6 @@ class OpenSearchTechEmail(dict):
14671
15255
  return pulumi.get(self, "email")
14672
15256
 
14673
15257
 
14674
- @pulumi.output_type
14675
- class OrganizationApplicationUserTimeouts(dict):
14676
- def __init__(__self__, *,
14677
- create: Optional[str] = None,
14678
- delete: Optional[str] = None,
14679
- read: Optional[str] = None,
14680
- update: Optional[str] = None):
14681
- """
14682
- :param str create: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
14683
- :param str delete: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
14684
- :param str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
14685
- :param str update: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
14686
- """
14687
- if create is not None:
14688
- pulumi.set(__self__, "create", create)
14689
- if delete is not None:
14690
- pulumi.set(__self__, "delete", delete)
14691
- if read is not None:
14692
- pulumi.set(__self__, "read", read)
14693
- if update is not None:
14694
- pulumi.set(__self__, "update", update)
14695
-
14696
- @property
14697
- @pulumi.getter
14698
- def create(self) -> Optional[str]:
14699
- """
14700
- A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
14701
- """
14702
- return pulumi.get(self, "create")
14703
-
14704
- @property
14705
- @pulumi.getter
14706
- def delete(self) -> Optional[str]:
14707
- """
14708
- A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
14709
- """
14710
- return pulumi.get(self, "delete")
14711
-
14712
- @property
14713
- @pulumi.getter
14714
- def read(self) -> Optional[str]:
14715
- """
14716
- A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
14717
- """
14718
- return pulumi.get(self, "read")
14719
-
14720
- @property
14721
- @pulumi.getter
14722
- def update(self) -> Optional[str]:
14723
- """
14724
- A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
14725
- """
14726
- return pulumi.get(self, "update")
14727
-
14728
-
14729
- @pulumi.output_type
14730
- class OrganizationApplicationUserTokenTimeouts(dict):
14731
- def __init__(__self__, *,
14732
- create: Optional[str] = None,
14733
- delete: Optional[str] = None,
14734
- read: Optional[str] = None,
14735
- update: Optional[str] = None):
14736
- """
14737
- :param str create: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
14738
- :param str delete: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
14739
- :param str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
14740
- :param str update: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
14741
- """
14742
- if create is not None:
14743
- pulumi.set(__self__, "create", create)
14744
- if delete is not None:
14745
- pulumi.set(__self__, "delete", delete)
14746
- if read is not None:
14747
- pulumi.set(__self__, "read", read)
14748
- if update is not None:
14749
- pulumi.set(__self__, "update", update)
14750
-
14751
- @property
14752
- @pulumi.getter
14753
- def create(self) -> Optional[str]:
14754
- """
14755
- A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
14756
- """
14757
- return pulumi.get(self, "create")
14758
-
14759
- @property
14760
- @pulumi.getter
14761
- def delete(self) -> Optional[str]:
14762
- """
14763
- A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
14764
- """
14765
- return pulumi.get(self, "delete")
14766
-
14767
- @property
14768
- @pulumi.getter
14769
- def read(self) -> Optional[str]:
14770
- """
14771
- A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
14772
- """
14773
- return pulumi.get(self, "read")
14774
-
14775
- @property
14776
- @pulumi.getter
14777
- def update(self) -> Optional[str]:
14778
- """
14779
- A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
14780
- """
14781
- return pulumi.get(self, "update")
14782
-
14783
-
14784
15258
  @pulumi.output_type
14785
15259
  class OrganizationGroupProjectTimeouts(dict):
14786
15260
  def __init__(__self__, *,
@@ -15077,6 +15551,10 @@ class PgPg(dict):
15077
15551
  suggest = "max_connections"
15078
15552
  elif key == "replicaUri":
15079
15553
  suggest = "replica_uri"
15554
+ elif key == "standbyUris":
15555
+ suggest = "standby_uris"
15556
+ elif key == "syncingUris":
15557
+ suggest = "syncing_uris"
15080
15558
 
15081
15559
  if suggest:
15082
15560
  pulumi.log.warn(f"Key '{key}' not found in PgPg. Access the value via the '{suggest}' property getter instead.")
@@ -15090,32 +15568,46 @@ class PgPg(dict):
15090
15568
  return super().get(key, default)
15091
15569
 
15092
15570
  def __init__(__self__, *,
15571
+ bouncer: Optional[str] = None,
15093
15572
  dbname: Optional[str] = None,
15094
15573
  host: Optional[str] = None,
15095
15574
  max_connections: Optional[int] = None,
15575
+ params: Optional[Sequence['outputs.PgPgParam']] = None,
15096
15576
  password: Optional[str] = None,
15097
15577
  port: Optional[int] = None,
15098
15578
  replica_uri: Optional[str] = None,
15099
15579
  sslmode: Optional[str] = None,
15580
+ standby_uris: Optional[Sequence[str]] = None,
15581
+ syncing_uris: Optional[Sequence[str]] = None,
15100
15582
  uri: Optional[str] = None,
15583
+ uris: Optional[Sequence[str]] = None,
15101
15584
  user: Optional[str] = None):
15102
15585
  """
15586
+ :param str bouncer: Bouncer connection details
15103
15587
  :param str dbname: Primary PostgreSQL database name
15104
15588
  :param str host: PostgreSQL master node host IP or name
15105
15589
  :param int max_connections: Connection limit
15590
+ :param Sequence['PgPgParamArgs'] params: PostgreSQL connection parameters
15106
15591
  :param str password: PostgreSQL admin user password
15107
15592
  :param int port: PostgreSQL port
15108
15593
  :param str replica_uri: PostgreSQL replica URI for services with a replica
15109
15594
  :param str sslmode: PostgreSQL sslmode setting (currently always "require")
15595
+ :param Sequence[str] standby_uris: PostgreSQL standby connection URIs
15596
+ :param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs
15110
15597
  :param str uri: PostgreSQL master connection URI
15598
+ :param Sequence[str] uris: PostgreSQL master connection URIs
15111
15599
  :param str user: PostgreSQL admin user name
15112
15600
  """
15601
+ if bouncer is not None:
15602
+ pulumi.set(__self__, "bouncer", bouncer)
15113
15603
  if dbname is not None:
15114
15604
  pulumi.set(__self__, "dbname", dbname)
15115
15605
  if host is not None:
15116
15606
  pulumi.set(__self__, "host", host)
15117
15607
  if max_connections is not None:
15118
15608
  pulumi.set(__self__, "max_connections", max_connections)
15609
+ if params is not None:
15610
+ pulumi.set(__self__, "params", params)
15119
15611
  if password is not None:
15120
15612
  pulumi.set(__self__, "password", password)
15121
15613
  if port is not None:
@@ -15124,11 +15616,25 @@ class PgPg(dict):
15124
15616
  pulumi.set(__self__, "replica_uri", replica_uri)
15125
15617
  if sslmode is not None:
15126
15618
  pulumi.set(__self__, "sslmode", sslmode)
15619
+ if standby_uris is not None:
15620
+ pulumi.set(__self__, "standby_uris", standby_uris)
15621
+ if syncing_uris is not None:
15622
+ pulumi.set(__self__, "syncing_uris", syncing_uris)
15127
15623
  if uri is not None:
15128
15624
  pulumi.set(__self__, "uri", uri)
15625
+ if uris is not None:
15626
+ pulumi.set(__self__, "uris", uris)
15129
15627
  if user is not None:
15130
15628
  pulumi.set(__self__, "user", user)
15131
15629
 
15630
+ @property
15631
+ @pulumi.getter
15632
+ def bouncer(self) -> Optional[str]:
15633
+ """
15634
+ Bouncer connection details
15635
+ """
15636
+ return pulumi.get(self, "bouncer")
15637
+
15132
15638
  @property
15133
15639
  @pulumi.getter
15134
15640
  def dbname(self) -> Optional[str]:
@@ -15153,6 +15659,14 @@ class PgPg(dict):
15153
15659
  """
15154
15660
  return pulumi.get(self, "max_connections")
15155
15661
 
15662
+ @property
15663
+ @pulumi.getter
15664
+ def params(self) -> Optional[Sequence['outputs.PgPgParam']]:
15665
+ """
15666
+ PostgreSQL connection parameters
15667
+ """
15668
+ return pulumi.get(self, "params")
15669
+
15156
15670
  @property
15157
15671
  @pulumi.getter
15158
15672
  def password(self) -> Optional[str]:
@@ -15185,6 +15699,22 @@ class PgPg(dict):
15185
15699
  """
15186
15700
  return pulumi.get(self, "sslmode")
15187
15701
 
15702
+ @property
15703
+ @pulumi.getter(name="standbyUris")
15704
+ def standby_uris(self) -> Optional[Sequence[str]]:
15705
+ """
15706
+ PostgreSQL standby connection URIs
15707
+ """
15708
+ return pulumi.get(self, "standby_uris")
15709
+
15710
+ @property
15711
+ @pulumi.getter(name="syncingUris")
15712
+ def syncing_uris(self) -> Optional[Sequence[str]]:
15713
+ """
15714
+ PostgreSQL syncing connection URIs
15715
+ """
15716
+ return pulumi.get(self, "syncing_uris")
15717
+
15188
15718
  @property
15189
15719
  @pulumi.getter
15190
15720
  def uri(self) -> Optional[str]:
@@ -15193,6 +15723,110 @@ class PgPg(dict):
15193
15723
  """
15194
15724
  return pulumi.get(self, "uri")
15195
15725
 
15726
+ @property
15727
+ @pulumi.getter
15728
+ def uris(self) -> Optional[Sequence[str]]:
15729
+ """
15730
+ PostgreSQL master connection URIs
15731
+ """
15732
+ return pulumi.get(self, "uris")
15733
+
15734
+ @property
15735
+ @pulumi.getter
15736
+ def user(self) -> Optional[str]:
15737
+ """
15738
+ PostgreSQL admin user name
15739
+ """
15740
+ return pulumi.get(self, "user")
15741
+
15742
+
15743
+ @pulumi.output_type
15744
+ class PgPgParam(dict):
15745
+ @staticmethod
15746
+ def __key_warning(key: str):
15747
+ suggest = None
15748
+ if key == "databaseName":
15749
+ suggest = "database_name"
15750
+
15751
+ if suggest:
15752
+ pulumi.log.warn(f"Key '{key}' not found in PgPgParam. Access the value via the '{suggest}' property getter instead.")
15753
+
15754
+ def __getitem__(self, key: str) -> Any:
15755
+ PgPgParam.__key_warning(key)
15756
+ return super().__getitem__(key)
15757
+
15758
+ def get(self, key: str, default = None) -> Any:
15759
+ PgPgParam.__key_warning(key)
15760
+ return super().get(key, default)
15761
+
15762
+ def __init__(__self__, *,
15763
+ database_name: Optional[str] = None,
15764
+ host: Optional[str] = None,
15765
+ password: Optional[str] = None,
15766
+ port: Optional[int] = None,
15767
+ sslmode: Optional[str] = None,
15768
+ user: Optional[str] = None):
15769
+ """
15770
+ :param str database_name: Primary PostgreSQL database name
15771
+ :param str host: PostgreSQL host IP or name
15772
+ :param str password: PostgreSQL admin user password
15773
+ :param int port: PostgreSQL port
15774
+ :param str sslmode: PostgreSQL sslmode setting (currently always "require")
15775
+ :param str user: PostgreSQL admin user name
15776
+ """
15777
+ if database_name is not None:
15778
+ pulumi.set(__self__, "database_name", database_name)
15779
+ if host is not None:
15780
+ pulumi.set(__self__, "host", host)
15781
+ if password is not None:
15782
+ pulumi.set(__self__, "password", password)
15783
+ if port is not None:
15784
+ pulumi.set(__self__, "port", port)
15785
+ if sslmode is not None:
15786
+ pulumi.set(__self__, "sslmode", sslmode)
15787
+ if user is not None:
15788
+ pulumi.set(__self__, "user", user)
15789
+
15790
+ @property
15791
+ @pulumi.getter(name="databaseName")
15792
+ def database_name(self) -> Optional[str]:
15793
+ """
15794
+ Primary PostgreSQL database name
15795
+ """
15796
+ return pulumi.get(self, "database_name")
15797
+
15798
+ @property
15799
+ @pulumi.getter
15800
+ def host(self) -> Optional[str]:
15801
+ """
15802
+ PostgreSQL host IP or name
15803
+ """
15804
+ return pulumi.get(self, "host")
15805
+
15806
+ @property
15807
+ @pulumi.getter
15808
+ def password(self) -> Optional[str]:
15809
+ """
15810
+ PostgreSQL admin user password
15811
+ """
15812
+ return pulumi.get(self, "password")
15813
+
15814
+ @property
15815
+ @pulumi.getter
15816
+ def port(self) -> Optional[int]:
15817
+ """
15818
+ PostgreSQL port
15819
+ """
15820
+ return pulumi.get(self, "port")
15821
+
15822
+ @property
15823
+ @pulumi.getter
15824
+ def sslmode(self) -> Optional[str]:
15825
+ """
15826
+ PostgreSQL sslmode setting (currently always "require")
15827
+ """
15828
+ return pulumi.get(self, "sslmode")
15829
+
15196
15830
  @property
15197
15831
  @pulumi.getter
15198
15832
  def user(self) -> Optional[str]:
@@ -15318,7 +15952,7 @@ class PgPgUserConfig(dict):
15318
15952
  :param bool pg_read_replica: Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).
15319
15953
  :param str pg_service_to_fork_from: Name of the PG Service from which to fork (deprecated, use service*to*fork_from). This has effect only when a new service is being created.
15320
15954
  :param bool pg_stat_monitor_enable: Enable the pg*stat*monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg*stat*statements results for utility commands are unreliable. The default value is `false`.
15321
- :param str pg_version: PostgreSQL major version.
15955
+ :param str pg_version: Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`, and newer. PostgreSQL major version.
15322
15956
  :param 'PgPgUserConfigPgauditArgs' pgaudit: System-wide settings for the pgaudit extension
15323
15957
  :param 'PgPgUserConfigPgbouncerArgs' pgbouncer: PGBouncer connection pooling settings
15324
15958
  :param 'PgPgUserConfigPglookoutArgs' pglookout: System-wide settings for pglookout
@@ -15331,9 +15965,9 @@ class PgPgUserConfig(dict):
15331
15965
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
15332
15966
  :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
15333
15967
  :param bool static_ips: Use static public IP addresses.
15334
- :param str synchronous_replication: Synchronous replication type. Note that the service plan also needs to support synchronous replication.
15968
+ :param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
15335
15969
  :param 'PgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
15336
- :param str variant: Variant of the PostgreSQL service, may affect the features that are exposed by default.
15970
+ :param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
15337
15971
  :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
15338
15972
  """
15339
15973
  if additional_backup_regions is not None:
@@ -15531,7 +16165,7 @@ class PgPgUserConfig(dict):
15531
16165
  @pulumi.getter(name="pgVersion")
15532
16166
  def pg_version(self) -> Optional[str]:
15533
16167
  """
15534
- PostgreSQL major version.
16168
+ Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`, and newer. PostgreSQL major version.
15535
16169
  """
15536
16170
  return pulumi.get(self, "pg_version")
15537
16171
 
@@ -15635,7 +16269,7 @@ class PgPgUserConfig(dict):
15635
16269
  @pulumi.getter(name="synchronousReplication")
15636
16270
  def synchronous_replication(self) -> Optional[str]:
15637
16271
  """
15638
- Synchronous replication type. Note that the service plan also needs to support synchronous replication.
16272
+ Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
15639
16273
  """
15640
16274
  return pulumi.get(self, "synchronous_replication")
15641
16275
 
@@ -15651,7 +16285,7 @@ class PgPgUserConfig(dict):
15651
16285
  @pulumi.getter
15652
16286
  def variant(self) -> Optional[str]:
15653
16287
  """
15654
- Variant of the PostgreSQL service, may affect the features that are exposed by default.
16288
+ Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
15655
16289
  """
15656
16290
  return pulumi.get(self, "variant")
15657
16291
 
@@ -15727,7 +16361,7 @@ class PgPgUserConfigMigration(dict):
15727
16361
  :param int port: Port number of the server where to migrate data from.
15728
16362
  :param str dbname: Database name for bootstrapping the initial connection.
15729
16363
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
15730
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
16364
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
15731
16365
  :param str password: Password for authentication with the server where to migrate data from.
15732
16366
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
15733
16367
  :param str username: User name for authentication with the server where to migrate data from.
@@ -15783,7 +16417,7 @@ class PgPgUserConfigMigration(dict):
15783
16417
  @pulumi.getter
15784
16418
  def method(self) -> Optional[str]:
15785
16419
  """
15786
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
16420
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
15787
16421
  """
15788
16422
  return pulumi.get(self, "method")
15789
16423
 
@@ -15988,12 +16622,12 @@ class PgPgUserConfigPg(dict):
15988
16622
  :param int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.
15989
16623
  :param float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.
15990
16624
  :param int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.
15991
- :param str default_toast_compression: Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
16625
+ :param str default_toast_compression: Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
15992
16626
  :param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
15993
16627
  :param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
15994
16628
  :param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
15995
- :param str log_error_verbosity: Controls the amount of detail written in the server log for each message that is logged.
15996
- :param str log_line_prefix: Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
16629
+ :param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
16630
+ :param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log formats.
15997
16631
  :param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
15998
16632
  :param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
15999
16633
  :param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
@@ -16014,13 +16648,13 @@ class PgPgUserConfigPg(dict):
16014
16648
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
16015
16649
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
16016
16650
  :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
16017
- :param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
16651
+ :param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
16018
16652
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
16019
16653
  :param str timezone: PostgreSQL service timezone.
16020
16654
  :param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session.
16021
- :param str track_commit_timestamp: Record commit time of transactions.
16022
- :param str track_functions: Enables tracking of function call counts and time used.
16023
- :param str track_io_timing: Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
16655
+ :param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
16656
+ :param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
16657
+ :param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
16024
16658
  :param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.
16025
16659
  :param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.
16026
16660
  """
@@ -16239,7 +16873,7 @@ class PgPgUserConfigPg(dict):
16239
16873
  @pulumi.getter(name="defaultToastCompression")
16240
16874
  def default_toast_compression(self) -> Optional[str]:
16241
16875
  """
16242
- Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
16876
+ Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
16243
16877
  """
16244
16878
  return pulumi.get(self, "default_toast_compression")
16245
16879
 
@@ -16271,7 +16905,7 @@ class PgPgUserConfigPg(dict):
16271
16905
  @pulumi.getter(name="logErrorVerbosity")
16272
16906
  def log_error_verbosity(self) -> Optional[str]:
16273
16907
  """
16274
- Controls the amount of detail written in the server log for each message that is logged.
16908
+ Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
16275
16909
  """
16276
16910
  return pulumi.get(self, "log_error_verbosity")
16277
16911
 
@@ -16279,7 +16913,7 @@ class PgPgUserConfigPg(dict):
16279
16913
  @pulumi.getter(name="logLinePrefix")
16280
16914
  def log_line_prefix(self) -> Optional[str]:
16281
16915
  """
16282
- Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
16916
+ Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log formats.
16283
16917
  """
16284
16918
  return pulumi.get(self, "log_line_prefix")
16285
16919
 
@@ -16447,7 +17081,7 @@ class PgPgUserConfigPg(dict):
16447
17081
  @pulumi.getter(name="pgStatStatementsDotTrack")
16448
17082
  def pg_stat_statements_dot_track(self) -> Optional[str]:
16449
17083
  """
16450
- Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
17084
+ Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
16451
17085
  """
16452
17086
  return pulumi.get(self, "pg_stat_statements_dot_track")
16453
17087
 
@@ -16479,7 +17113,7 @@ class PgPgUserConfigPg(dict):
16479
17113
  @pulumi.getter(name="trackCommitTimestamp")
16480
17114
  def track_commit_timestamp(self) -> Optional[str]:
16481
17115
  """
16482
- Record commit time of transactions.
17116
+ Enum: `off`, `on`. Record commit time of transactions.
16483
17117
  """
16484
17118
  return pulumi.get(self, "track_commit_timestamp")
16485
17119
 
@@ -16487,7 +17121,7 @@ class PgPgUserConfigPg(dict):
16487
17121
  @pulumi.getter(name="trackFunctions")
16488
17122
  def track_functions(self) -> Optional[str]:
16489
17123
  """
16490
- Enables tracking of function call counts and time used.
17124
+ Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
16491
17125
  """
16492
17126
  return pulumi.get(self, "track_functions")
16493
17127
 
@@ -16495,7 +17129,7 @@ class PgPgUserConfigPg(dict):
16495
17129
  @pulumi.getter(name="trackIoTiming")
16496
17130
  def track_io_timing(self) -> Optional[str]:
16497
17131
  """
16498
- Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
17132
+ Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
16499
17133
  """
16500
17134
  return pulumi.get(self, "track_io_timing")
16501
17135
 
@@ -16681,7 +17315,7 @@ class PgPgUserConfigPgaudit(dict):
16681
17315
  :param bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. The default value is `false`.
16682
17316
  :param bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. The default value is `true`.
16683
17317
  :param bool log_client: Specifies whether log messages will be visible to a client process such as psql. The default value is `false`.
16684
- :param str log_level: Specifies the log level that will be used for log entries. The default value is `log`.
17318
+ :param str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`.
16685
17319
  :param int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. The default value is `-1`.
16686
17320
  :param bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. The default value is `true`.
16687
17321
  :param bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. The default value is `false`.
@@ -16750,7 +17384,7 @@ class PgPgUserConfigPgaudit(dict):
16750
17384
  @pulumi.getter(name="logLevel")
16751
17385
  def log_level(self) -> Optional[str]:
16752
17386
  """
16753
- Specifies the log level that will be used for log entries. The default value is `log`.
17387
+ Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`.
16754
17388
  """
16755
17389
  return pulumi.get(self, "log_level")
16756
17390
 
@@ -16883,7 +17517,7 @@ class PgPgUserConfigPgbouncer(dict):
16883
17517
  """
16884
17518
  :param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). The default value is `3600`.
16885
17519
  :param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.
16886
- :param str autodb_pool_mode: PGBouncer pool mode. The default value is `transaction`.
17520
+ :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`.
16887
17521
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. The default value is `0`.
16888
17522
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
16889
17523
  :param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. The default value is `0`.
@@ -16930,7 +17564,7 @@ class PgPgUserConfigPgbouncer(dict):
16930
17564
  @pulumi.getter(name="autodbPoolMode")
16931
17565
  def autodb_pool_mode(self) -> Optional[str]:
16932
17566
  """
16933
- PGBouncer pool mode. The default value is `transaction`.
17567
+ Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`.
16934
17568
  """
16935
17569
  return pulumi.get(self, "autodb_pool_mode")
16936
17570
 
@@ -17430,6 +18064,80 @@ class RedisComponent(dict):
17430
18064
  return pulumi.get(self, "usage")
17431
18065
 
17432
18066
 
18067
+ @pulumi.output_type
18068
+ class RedisRedis(dict):
18069
+ @staticmethod
18070
+ def __key_warning(key: str):
18071
+ suggest = None
18072
+ if key == "replicaUri":
18073
+ suggest = "replica_uri"
18074
+ elif key == "slaveUris":
18075
+ suggest = "slave_uris"
18076
+
18077
+ if suggest:
18078
+ pulumi.log.warn(f"Key '{key}' not found in RedisRedis. Access the value via the '{suggest}' property getter instead.")
18079
+
18080
+ def __getitem__(self, key: str) -> Any:
18081
+ RedisRedis.__key_warning(key)
18082
+ return super().__getitem__(key)
18083
+
18084
+ def get(self, key: str, default = None) -> Any:
18085
+ RedisRedis.__key_warning(key)
18086
+ return super().get(key, default)
18087
+
18088
+ def __init__(__self__, *,
18089
+ password: Optional[str] = None,
18090
+ replica_uri: Optional[str] = None,
18091
+ slave_uris: Optional[Sequence[str]] = None,
18092
+ uris: Optional[Sequence[str]] = None):
18093
+ """
18094
+ :param str password: Redis password.
18095
+ :param str replica_uri: Redis replica server URI.
18096
+ :param Sequence[str] slave_uris: Redis slave server URIs.
18097
+ :param Sequence[str] uris: Redis server URIs.
18098
+ """
18099
+ if password is not None:
18100
+ pulumi.set(__self__, "password", password)
18101
+ if replica_uri is not None:
18102
+ pulumi.set(__self__, "replica_uri", replica_uri)
18103
+ if slave_uris is not None:
18104
+ pulumi.set(__self__, "slave_uris", slave_uris)
18105
+ if uris is not None:
18106
+ pulumi.set(__self__, "uris", uris)
18107
+
18108
+ @property
18109
+ @pulumi.getter
18110
+ def password(self) -> Optional[str]:
18111
+ """
18112
+ Redis password.
18113
+ """
18114
+ return pulumi.get(self, "password")
18115
+
18116
+ @property
18117
+ @pulumi.getter(name="replicaUri")
18118
+ def replica_uri(self) -> Optional[str]:
18119
+ """
18120
+ Redis replica server URI.
18121
+ """
18122
+ return pulumi.get(self, "replica_uri")
18123
+
18124
+ @property
18125
+ @pulumi.getter(name="slaveUris")
18126
+ def slave_uris(self) -> Optional[Sequence[str]]:
18127
+ """
18128
+ Redis slave server URIs.
18129
+ """
18130
+ return pulumi.get(self, "slave_uris")
18131
+
18132
+ @property
18133
+ @pulumi.getter
18134
+ def uris(self) -> Optional[Sequence[str]]:
18135
+ """
18136
+ Redis server URIs.
18137
+ """
18138
+ return pulumi.get(self, "uris")
18139
+
18140
+
17433
18141
  @pulumi.output_type
17434
18142
  class RedisRedisUserConfig(dict):
17435
18143
  @staticmethod
@@ -17532,18 +18240,18 @@ class RedisRedisUserConfig(dict):
17532
18240
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
17533
18241
  :param 'RedisRedisUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
17534
18242
  :param str recovery_basebackup_name: Name of the basebackup to restore in forked service.
17535
- :param str redis_acl_channels_default: Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
18243
+ :param str redis_acl_channels_default: Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
17536
18244
  :param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service.
17537
18245
  :param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. The default value is `1`.
17538
18246
  :param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`.
17539
- :param str redis_maxmemory_policy: Redis maxmemory-policy. The default value is `noeviction`.
18247
+ :param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`.
17540
18248
  :param str redis_notify_keyspace_events: Set notify-keyspace-events option.
17541
18249
  :param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service.
17542
- :param str redis_persistence: When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
18250
+ :param str redis_persistence: Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is 'off', no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
17543
18251
  :param int redis_pubsub_client_output_buffer_limit: Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.
17544
18252
  :param bool redis_ssl: Require SSL to access Redis. The default value is `true`.
17545
18253
  :param int redis_timeout: Redis idle connection timeout in seconds. The default value is `300`.
17546
- :param str redis_version: Redis major version.
18254
+ :param str redis_version: Enum: `7.0`, and newer. Redis major version.
17547
18255
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
17548
18256
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
17549
18257
  :param bool static_ips: Use static public IP addresses.
@@ -17686,7 +18394,7 @@ class RedisRedisUserConfig(dict):
17686
18394
  @pulumi.getter(name="redisAclChannelsDefault")
17687
18395
  def redis_acl_channels_default(self) -> Optional[str]:
17688
18396
  """
17689
- Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
18397
+ Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
17690
18398
  """
17691
18399
  return pulumi.get(self, "redis_acl_channels_default")
17692
18400
 
@@ -17718,7 +18426,7 @@ class RedisRedisUserConfig(dict):
17718
18426
  @pulumi.getter(name="redisMaxmemoryPolicy")
17719
18427
  def redis_maxmemory_policy(self) -> Optional[str]:
17720
18428
  """
17721
- Redis maxmemory-policy. The default value is `noeviction`.
18429
+ Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`.
17722
18430
  """
17723
18431
  return pulumi.get(self, "redis_maxmemory_policy")
17724
18432
 
@@ -17742,7 +18450,7 @@ class RedisRedisUserConfig(dict):
17742
18450
  @pulumi.getter(name="redisPersistence")
17743
18451
  def redis_persistence(self) -> Optional[str]:
17744
18452
  """
17745
- When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
18453
+ Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is 'off', no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
17746
18454
  """
17747
18455
  return pulumi.get(self, "redis_persistence")
17748
18456
 
@@ -17774,7 +18482,7 @@ class RedisRedisUserConfig(dict):
17774
18482
  @pulumi.getter(name="redisVersion")
17775
18483
  def redis_version(self) -> Optional[str]:
17776
18484
  """
17777
- Redis major version.
18485
+ Enum: `7.0`, and newer. Redis major version.
17778
18486
  """
17779
18487
  return pulumi.get(self, "redis_version")
17780
18488
 
@@ -17866,7 +18574,7 @@ class RedisRedisUserConfigMigration(dict):
17866
18574
  :param int port: Port number of the server where to migrate data from.
17867
18575
  :param str dbname: Database name for bootstrapping the initial connection.
17868
18576
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
17869
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
18577
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
17870
18578
  :param str password: Password for authentication with the server where to migrate data from.
17871
18579
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
17872
18580
  :param str username: User name for authentication with the server where to migrate data from.
@@ -17922,7 +18630,7 @@ class RedisRedisUserConfigMigration(dict):
17922
18630
  @pulumi.getter
17923
18631
  def method(self) -> Optional[str]:
17924
18632
  """
17925
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
18633
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
17926
18634
  """
17927
18635
  return pulumi.get(self, "method")
17928
18636
 
@@ -18211,13 +18919,13 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18211
18919
  skip_broken_messages: Optional[int] = None):
18212
18920
  """
18213
18921
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
18214
- :param str data_format: Message data format. The default value is `JSONEachRow`.
18922
+ :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`.
18215
18923
  :param str group_name: Kafka consumers group. The default value is `clickhouse`.
18216
18924
  :param str name: Name of the table.
18217
18925
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
18218
- :param str auto_offset_reset: Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
18219
- :param str date_time_input_format: Method to read DateTime from text input formats. The default value is `basic`.
18220
- :param str handle_error_mode: How to handle errors for Kafka engine. The default value is `default`.
18926
+ :param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
18927
+ :param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`.
18928
+ :param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`.
18221
18929
  :param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.
18222
18930
  :param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.
18223
18931
  :param int num_consumers: The number of consumers per table per replica. The default value is `1`.
@@ -18258,7 +18966,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18258
18966
  @pulumi.getter(name="dataFormat")
18259
18967
  def data_format(self) -> str:
18260
18968
  """
18261
- Message data format. The default value is `JSONEachRow`.
18969
+ Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`.
18262
18970
  """
18263
18971
  return pulumi.get(self, "data_format")
18264
18972
 
@@ -18290,7 +18998,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18290
18998
  @pulumi.getter(name="autoOffsetReset")
18291
18999
  def auto_offset_reset(self) -> Optional[str]:
18292
19000
  """
18293
- Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
19001
+ Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
18294
19002
  """
18295
19003
  return pulumi.get(self, "auto_offset_reset")
18296
19004
 
@@ -18298,7 +19006,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18298
19006
  @pulumi.getter(name="dateTimeInputFormat")
18299
19007
  def date_time_input_format(self) -> Optional[str]:
18300
19008
  """
18301
- Method to read DateTime from text input formats. The default value is `basic`.
19009
+ Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`.
18302
19010
  """
18303
19011
  return pulumi.get(self, "date_time_input_format")
18304
19012
 
@@ -18306,7 +19014,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18306
19014
  @pulumi.getter(name="handleErrorMode")
18307
19015
  def handle_error_mode(self) -> Optional[str]:
18308
19016
  """
18309
- How to handle errors for Kafka engine. The default value is `default`.
19017
+ Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`.
18310
19018
  """
18311
19019
  return pulumi.get(self, "handle_error_mode")
18312
19020
 
@@ -18455,6 +19163,8 @@ class ServiceIntegrationDatadogUserConfig(dict):
18455
19163
  suggest = None
18456
19164
  if key == "datadogDbmEnabled":
18457
19165
  suggest = "datadog_dbm_enabled"
19166
+ elif key == "datadogPgbouncerEnabled":
19167
+ suggest = "datadog_pgbouncer_enabled"
18458
19168
  elif key == "datadogTags":
18459
19169
  suggest = "datadog_tags"
18460
19170
  elif key == "excludeConsumerGroups":
@@ -18483,6 +19193,7 @@ class ServiceIntegrationDatadogUserConfig(dict):
18483
19193
 
18484
19194
  def __init__(__self__, *,
18485
19195
  datadog_dbm_enabled: Optional[bool] = None,
19196
+ datadog_pgbouncer_enabled: Optional[bool] = None,
18486
19197
  datadog_tags: Optional[Sequence['outputs.ServiceIntegrationDatadogUserConfigDatadogTag']] = None,
18487
19198
  exclude_consumer_groups: Optional[Sequence[str]] = None,
18488
19199
  exclude_topics: Optional[Sequence[str]] = None,
@@ -18494,6 +19205,7 @@ class ServiceIntegrationDatadogUserConfig(dict):
18494
19205
  redis: Optional['outputs.ServiceIntegrationDatadogUserConfigRedis'] = None):
18495
19206
  """
18496
19207
  :param bool datadog_dbm_enabled: Enable Datadog Database Monitoring.
19208
+ :param bool datadog_pgbouncer_enabled: Enable Datadog PgBouncer Metric Tracking.
18497
19209
  :param Sequence['ServiceIntegrationDatadogUserConfigDatadogTagArgs'] datadog_tags: Custom tags provided by user
18498
19210
  :param Sequence[str] exclude_consumer_groups: List of custom metrics.
18499
19211
  :param Sequence[str] exclude_topics: List of topics to exclude.
@@ -18506,6 +19218,8 @@ class ServiceIntegrationDatadogUserConfig(dict):
18506
19218
  """
18507
19219
  if datadog_dbm_enabled is not None:
18508
19220
  pulumi.set(__self__, "datadog_dbm_enabled", datadog_dbm_enabled)
19221
+ if datadog_pgbouncer_enabled is not None:
19222
+ pulumi.set(__self__, "datadog_pgbouncer_enabled", datadog_pgbouncer_enabled)
18509
19223
  if datadog_tags is not None:
18510
19224
  pulumi.set(__self__, "datadog_tags", datadog_tags)
18511
19225
  if exclude_consumer_groups is not None:
@@ -18533,6 +19247,14 @@ class ServiceIntegrationDatadogUserConfig(dict):
18533
19247
  """
18534
19248
  return pulumi.get(self, "datadog_dbm_enabled")
18535
19249
 
19250
+ @property
19251
+ @pulumi.getter(name="datadogPgbouncerEnabled")
19252
+ def datadog_pgbouncer_enabled(self) -> Optional[bool]:
19253
+ """
19254
+ Enable Datadog PgBouncer Metric Tracking.
19255
+ """
19256
+ return pulumi.get(self, "datadog_pgbouncer_enabled")
19257
+
18536
19258
  @property
18537
19259
  @pulumi.getter(name="datadogTags")
18538
19260
  def datadog_tags(self) -> Optional[Sequence['outputs.ServiceIntegrationDatadogUserConfigDatadogTag']]:
@@ -18794,7 +19516,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
18794
19516
  :param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with.
18795
19517
  :param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers.
18796
19518
  :param int max_partition_contexts: Maximum number of partition contexts to send.
18797
- :param str site: Datadog intake site. Defaults to datadoghq.com.
19519
+ :param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
18798
19520
  """
18799
19521
  pulumi.set(__self__, "datadog_api_key", datadog_api_key)
18800
19522
  if datadog_tags is not None:
@@ -18862,7 +19584,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
18862
19584
  @pulumi.getter
18863
19585
  def site(self) -> Optional[str]:
18864
19586
  """
18865
- Datadog intake site. Defaults to datadoghq.com.
19587
+ Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
18866
19588
  """
18867
19589
  return pulumi.get(self, "site")
18868
19590
 
@@ -19280,14 +20002,14 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
19280
20002
  ssl_endpoint_identification_algorithm: Optional[str] = None):
19281
20003
  """
19282
20004
  :param str bootstrap_servers: Bootstrap servers.
19283
- :param str security_protocol: Security protocol.
19284
- :param str sasl_mechanism: SASL mechanism used for connections to the Kafka server.
20005
+ :param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
20006
+ :param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
19285
20007
  :param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server.
19286
20008
  :param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server.
19287
20009
  :param str ssl_ca_cert: PEM-encoded CA certificate.
19288
20010
  :param str ssl_client_cert: PEM-encoded client certificate.
19289
20011
  :param str ssl_client_key: PEM-encoded client key.
19290
- :param str ssl_endpoint_identification_algorithm: The endpoint identification algorithm to validate server hostname using server certificate.
20012
+ :param str ssl_endpoint_identification_algorithm: Enum: `https`. The endpoint identification algorithm to validate server hostname using server certificate.
19291
20013
  """
19292
20014
  pulumi.set(__self__, "bootstrap_servers", bootstrap_servers)
19293
20015
  pulumi.set(__self__, "security_protocol", security_protocol)
@@ -19318,7 +20040,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
19318
20040
  @pulumi.getter(name="securityProtocol")
19319
20041
  def security_protocol(self) -> str:
19320
20042
  """
19321
- Security protocol.
20043
+ Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
19322
20044
  """
19323
20045
  return pulumi.get(self, "security_protocol")
19324
20046
 
@@ -19326,7 +20048,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
19326
20048
  @pulumi.getter(name="saslMechanism")
19327
20049
  def sasl_mechanism(self) -> Optional[str]:
19328
20050
  """
19329
- SASL mechanism used for connections to the Kafka server.
20051
+ Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
19330
20052
  """
19331
20053
  return pulumi.get(self, "sasl_mechanism")
19332
20054
 
@@ -19374,7 +20096,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
19374
20096
  @pulumi.getter(name="sslEndpointIdentificationAlgorithm")
19375
20097
  def ssl_endpoint_identification_algorithm(self) -> Optional[str]:
19376
20098
  """
19377
- The endpoint identification algorithm to validate server hostname using server certificate.
20099
+ Enum: `https`. The endpoint identification algorithm to validate server hostname using server certificate.
19378
20100
  """
19379
20101
  return pulumi.get(self, "ssl_endpoint_identification_algorithm")
19380
20102
 
@@ -19508,7 +20230,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
19508
20230
  :param str password: Password.
19509
20231
  :param str ssl_client_certificate: Client certificate.
19510
20232
  :param str ssl_client_key: Client key.
19511
- :param str ssl_mode: SSL Mode. The default value is `verify-full`.
20233
+ :param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`.
19512
20234
  :param str ssl_root_cert: SSL Root Cert.
19513
20235
  """
19514
20236
  pulumi.set(__self__, "host", host)
@@ -19587,7 +20309,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
19587
20309
  @pulumi.getter(name="sslMode")
19588
20310
  def ssl_mode(self) -> Optional[str]:
19589
20311
  """
19590
- SSL Mode. The default value is `verify-full`.
20312
+ Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`.
19591
20313
  """
19592
20314
  return pulumi.get(self, "ssl_mode")
19593
20315
 
@@ -19627,7 +20349,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
19627
20349
  basic_auth_password: Optional[str] = None,
19628
20350
  basic_auth_username: Optional[str] = None):
19629
20351
  """
19630
- :param str authentication: Authentication method.
20352
+ :param str authentication: Enum: `none`, `basic`. Authentication method.
19631
20353
  :param str url: Schema Registry URL.
19632
20354
  :param str basic_auth_password: Basic authentication password.
19633
20355
  :param str basic_auth_username: Basic authentication user name.
@@ -19643,7 +20365,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
19643
20365
  @pulumi.getter
19644
20366
  def authentication(self) -> str:
19645
20367
  """
19646
- Authentication method.
20368
+ Enum: `none`, `basic`. Authentication method.
19647
20369
  """
19648
20370
  return pulumi.get(self, "authentication")
19649
20371
 
@@ -19803,7 +20525,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
19803
20525
  max_message_size: Optional[int] = None,
19804
20526
  sd: Optional[str] = None):
19805
20527
  """
19806
- :param str format: Message format. The default value is `rfc5424`.
20528
+ :param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`.
19807
20529
  :param int port: Rsyslog server port. The default value is `514`.
19808
20530
  :param str server: Rsyslog server IP address or hostname.
19809
20531
  :param bool tls: Require TLS. The default value is `true`.
@@ -19835,7 +20557,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
19835
20557
  @pulumi.getter
19836
20558
  def format(self) -> str:
19837
20559
  """
19838
- Message format. The default value is `rfc5424`.
20560
+ Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`.
19839
20561
  """
19840
20562
  return pulumi.get(self, "format")
19841
20563
 
@@ -20381,7 +21103,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
20381
21103
  :param int consumer_fetch_min_bytes: The minimum amount of data the server should return for a fetch request.
20382
21104
  :param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker.
20383
21105
  :param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
20384
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
21106
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
20385
21107
  :param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing.
20386
21108
  :param int producer_max_request_size: The maximum request size in bytes.
20387
21109
  """
@@ -20426,7 +21148,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
20426
21148
  @pulumi.getter(name="producerCompressionType")
20427
21149
  def producer_compression_type(self) -> Optional[str]:
20428
21150
  """
20429
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
21151
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
20430
21152
  """
20431
21153
  return pulumi.get(self, "producer_compression_type")
20432
21154
 
@@ -21176,6 +21898,24 @@ class GetAccountAuthenticationSamlFieldMappingResult(dict):
21176
21898
  return pulumi.get(self, "real_name")
21177
21899
 
21178
21900
 
21901
+ @pulumi.output_type
21902
+ class GetCassandaCassandraResult(dict):
21903
+ def __init__(__self__, *,
21904
+ uris: Sequence[str]):
21905
+ """
21906
+ :param Sequence[str] uris: Cassandra server URIs.
21907
+ """
21908
+ pulumi.set(__self__, "uris", uris)
21909
+
21910
+ @property
21911
+ @pulumi.getter
21912
+ def uris(self) -> Sequence[str]:
21913
+ """
21914
+ Cassandra server URIs.
21915
+ """
21916
+ return pulumi.get(self, "uris")
21917
+
21918
+
21179
21919
  @pulumi.output_type
21180
21920
  class GetCassandaCassandraUserConfigResult(dict):
21181
21921
  def __init__(__self__, *,
@@ -21200,7 +21940,7 @@ class GetCassandaCassandraUserConfigResult(dict):
21200
21940
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
21201
21941
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
21202
21942
  :param 'GetCassandaCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
21203
- :param str cassandra_version: Cassandra version.
21943
+ :param str cassandra_version: Enum: `3`, `4`, `4.1`, and newer. Cassandra version.
21204
21944
  :param Sequence['GetCassandaCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
21205
21945
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
21206
21946
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -21285,7 +22025,7 @@ class GetCassandaCassandraUserConfigResult(dict):
21285
22025
  @pulumi.getter(name="cassandraVersion")
21286
22026
  def cassandra_version(self) -> Optional[str]:
21287
22027
  """
21288
- Cassandra version.
22028
+ Enum: `3`, `4`, `4.1`, and newer. Cassandra version.
21289
22029
  """
21290
22030
  return pulumi.get(self, "cassandra_version")
21291
22031
 
@@ -21663,6 +22403,24 @@ class GetCassandaTechEmailResult(dict):
21663
22403
  return pulumi.get(self, "email")
21664
22404
 
21665
22405
 
22406
+ @pulumi.output_type
22407
+ class GetCassandraCassandraResult(dict):
22408
+ def __init__(__self__, *,
22409
+ uris: Sequence[str]):
22410
+ """
22411
+ :param Sequence[str] uris: Cassandra server URIs.
22412
+ """
22413
+ pulumi.set(__self__, "uris", uris)
22414
+
22415
+ @property
22416
+ @pulumi.getter
22417
+ def uris(self) -> Sequence[str]:
22418
+ """
22419
+ Cassandra server URIs.
22420
+ """
22421
+ return pulumi.get(self, "uris")
22422
+
22423
+
21666
22424
  @pulumi.output_type
21667
22425
  class GetCassandraCassandraUserConfigResult(dict):
21668
22426
  def __init__(__self__, *,
@@ -21687,7 +22445,7 @@ class GetCassandraCassandraUserConfigResult(dict):
21687
22445
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
21688
22446
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
21689
22447
  :param 'GetCassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
21690
- :param str cassandra_version: Cassandra version.
22448
+ :param str cassandra_version: Enum: `3`, `4`, `4.1`, and newer. Cassandra version.
21691
22449
  :param Sequence['GetCassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
21692
22450
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
21693
22451
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -21772,7 +22530,7 @@ class GetCassandraCassandraUserConfigResult(dict):
21772
22530
  @pulumi.getter(name="cassandraVersion")
21773
22531
  def cassandra_version(self) -> Optional[str]:
21774
22532
  """
21775
- Cassandra version.
22533
+ Enum: `3`, `4`, `4.1`, and newer. Cassandra version.
21776
22534
  """
21777
22535
  return pulumi.get(self, "cassandra_version")
21778
22536
 
@@ -22150,6 +22908,24 @@ class GetCassandraTechEmailResult(dict):
22150
22908
  return pulumi.get(self, "email")
22151
22909
 
22152
22910
 
22911
+ @pulumi.output_type
22912
+ class GetClickhouseClickhouseResult(dict):
22913
+ def __init__(__self__, *,
22914
+ uris: Sequence[str]):
22915
+ """
22916
+ :param Sequence[str] uris: Clickhouse server URIs.
22917
+ """
22918
+ pulumi.set(__self__, "uris", uris)
22919
+
22920
+ @property
22921
+ @pulumi.getter
22922
+ def uris(self) -> Sequence[str]:
22923
+ """
22924
+ Clickhouse server URIs.
22925
+ """
22926
+ return pulumi.get(self, "uris")
22927
+
22928
+
22153
22929
  @pulumi.output_type
22154
22930
  class GetClickhouseClickhouseUserConfigResult(dict):
22155
22931
  def __init__(__self__, *,
@@ -22756,6 +23532,57 @@ class GetDragonflyComponentResult(dict):
22756
23532
  return pulumi.get(self, "usage")
22757
23533
 
22758
23534
 
23535
+ @pulumi.output_type
23536
+ class GetDragonflyDragonflyResult(dict):
23537
+ def __init__(__self__, *,
23538
+ password: str,
23539
+ replica_uri: str,
23540
+ slave_uris: Sequence[str],
23541
+ uris: Sequence[str]):
23542
+ """
23543
+ :param str password: Dragonfly password.
23544
+ :param str replica_uri: Dragonfly replica server URI.
23545
+ :param Sequence[str] slave_uris: Dragonfly slave server URIs.
23546
+ :param Sequence[str] uris: Dragonfly server URIs.
23547
+ """
23548
+ pulumi.set(__self__, "password", password)
23549
+ pulumi.set(__self__, "replica_uri", replica_uri)
23550
+ pulumi.set(__self__, "slave_uris", slave_uris)
23551
+ pulumi.set(__self__, "uris", uris)
23552
+
23553
+ @property
23554
+ @pulumi.getter
23555
+ def password(self) -> str:
23556
+ """
23557
+ Dragonfly password.
23558
+ """
23559
+ return pulumi.get(self, "password")
23560
+
23561
+ @property
23562
+ @pulumi.getter(name="replicaUri")
23563
+ def replica_uri(self) -> str:
23564
+ """
23565
+ Dragonfly replica server URI.
23566
+ """
23567
+ return pulumi.get(self, "replica_uri")
23568
+
23569
+ @property
23570
+ @pulumi.getter(name="slaveUris")
23571
+ def slave_uris(self) -> Sequence[str]:
23572
+ """
23573
+ Dragonfly slave server URIs.
23574
+ """
23575
+ return pulumi.get(self, "slave_uris")
23576
+
23577
+ @property
23578
+ @pulumi.getter
23579
+ def uris(self) -> Sequence[str]:
23580
+ """
23581
+ Dragonfly server URIs.
23582
+ """
23583
+ return pulumi.get(self, "uris")
23584
+
23585
+
22759
23586
  @pulumi.output_type
22760
23587
  class GetDragonflyDragonflyUserConfigResult(dict):
22761
23588
  def __init__(__self__, *,
@@ -22776,7 +23603,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22776
23603
  static_ips: Optional[bool] = None):
22777
23604
  """
22778
23605
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
22779
- :param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
23606
+ :param str dragonfly_persistence: Enum: `off`, `rdb`, `dfs`. When persistence is 'rdb' or 'dfs', Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is 'off', no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
22780
23607
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
22781
23608
  :param Sequence['GetDragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
22782
23609
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -22834,7 +23661,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22834
23661
  @pulumi.getter(name="dragonflyPersistence")
22835
23662
  def dragonfly_persistence(self) -> Optional[str]:
22836
23663
  """
22837
- When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
23664
+ Enum: `off`, `rdb`, `dfs`. When persistence is 'rdb' or 'dfs', Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is 'off', no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
22838
23665
  """
22839
23666
  return pulumi.get(self, "dragonfly_persistence")
22840
23667
 
@@ -22992,7 +23819,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
22992
23819
  :param int port: Port number of the server where to migrate data from.
22993
23820
  :param str dbname: Database name for bootstrapping the initial connection.
22994
23821
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
22995
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
23822
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
22996
23823
  :param str password: Password for authentication with the server where to migrate data from.
22997
23824
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
22998
23825
  :param str username: User name for authentication with the server where to migrate data from.
@@ -23048,7 +23875,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
23048
23875
  @pulumi.getter
23049
23876
  def method(self) -> Optional[str]:
23050
23877
  """
23051
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
23878
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
23052
23879
  """
23053
23880
  return pulumi.get(self, "method")
23054
23881
 
@@ -23433,7 +24260,7 @@ class GetFlinkFlinkUserConfigResult(dict):
23433
24260
  static_ips: Optional[bool] = None):
23434
24261
  """
23435
24262
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
23436
- :param str flink_version: Flink major version.
24263
+ :param str flink_version: Enum: `1.16`, `1.19`, and newer. Flink major version.
23437
24264
  :param Sequence['GetFlinkFlinkUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
23438
24265
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
23439
24266
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -23476,7 +24303,7 @@ class GetFlinkFlinkUserConfigResult(dict):
23476
24303
  @pulumi.getter(name="flinkVersion")
23477
24304
  def flink_version(self) -> Optional[str]:
23478
24305
  """
23479
- Flink major version.
24306
+ Enum: `1.16`, `1.19`, and newer. Flink major version.
23480
24307
  """
23481
24308
  return pulumi.get(self, "flink_version")
23482
24309
 
@@ -23772,6 +24599,24 @@ class GetGrafanaComponentResult(dict):
23772
24599
  return pulumi.get(self, "usage")
23773
24600
 
23774
24601
 
24602
+ @pulumi.output_type
24603
+ class GetGrafanaGrafanaResult(dict):
24604
+ def __init__(__self__, *,
24605
+ uris: Sequence[str]):
24606
+ """
24607
+ :param Sequence[str] uris: Grafana server URIs.
24608
+ """
24609
+ pulumi.set(__self__, "uris", uris)
24610
+
24611
+ @property
24612
+ @pulumi.getter
24613
+ def uris(self) -> Sequence[str]:
24614
+ """
24615
+ Grafana server URIs.
24616
+ """
24617
+ return pulumi.get(self, "uris")
24618
+
24619
+
23775
24620
  @pulumi.output_type
23776
24621
  class GetGrafanaGrafanaUserConfigResult(dict):
23777
24622
  def __init__(__self__, *,
@@ -23820,9 +24665,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23820
24665
  """
23821
24666
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
23822
24667
  :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
23823
- :param str alerting_error_or_timeout: Default error or timeout setting for new alerting rules.
24668
+ :param str alerting_error_or_timeout: Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
23824
24669
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.
23825
- :param str alerting_nodata_or_nullvalues: Default value for 'no data or null values' for new alerting rules.
24670
+ :param str alerting_nodata_or_nullvalues: Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
23826
24671
  :param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
23827
24672
  :param 'GetGrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
23828
24673
  :param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
@@ -23830,7 +24675,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23830
24675
  :param 'GetGrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
23831
24676
  :param 'GetGrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
23832
24677
  :param 'GetGrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
23833
- :param str cookie_samesite: Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
24678
+ :param str cookie_samesite: Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
23834
24679
  :param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
23835
24680
  :param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
23836
24681
  :param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h.
@@ -23858,7 +24703,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23858
24703
  :param bool static_ips: Use static public IP addresses.
23859
24704
  :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
23860
24705
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
23861
- :param str user_auto_assign_org_role: Set role for new signups. Defaults to Viewer.
24706
+ :param str user_auto_assign_org_role: Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
23862
24707
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
23863
24708
  """
23864
24709
  if additional_backup_regions is not None:
@@ -23966,7 +24811,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23966
24811
  @pulumi.getter(name="alertingErrorOrTimeout")
23967
24812
  def alerting_error_or_timeout(self) -> Optional[str]:
23968
24813
  """
23969
- Default error or timeout setting for new alerting rules.
24814
+ Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.
23970
24815
  """
23971
24816
  return pulumi.get(self, "alerting_error_or_timeout")
23972
24817
 
@@ -23982,7 +24827,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23982
24827
  @pulumi.getter(name="alertingNodataOrNullvalues")
23983
24828
  def alerting_nodata_or_nullvalues(self) -> Optional[str]:
23984
24829
  """
23985
- Default value for 'no data or null values' for new alerting rules.
24830
+ Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.
23986
24831
  """
23987
24832
  return pulumi.get(self, "alerting_nodata_or_nullvalues")
23988
24833
 
@@ -24046,7 +24891,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
24046
24891
  @pulumi.getter(name="cookieSamesite")
24047
24892
  def cookie_samesite(self) -> Optional[str]:
24048
24893
  """
24049
- Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
24894
+ Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
24050
24895
  """
24051
24896
  return pulumi.get(self, "cookie_samesite")
24052
24897
 
@@ -24273,7 +25118,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
24273
25118
  @pulumi.getter(name="userAutoAssignOrgRole")
24274
25119
  def user_auto_assign_org_role(self) -> Optional[str]:
24275
25120
  """
24276
- Set role for new signups. Defaults to Viewer.
25121
+ Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.
24277
25122
  """
24278
25123
  return pulumi.get(self, "user_auto_assign_org_role")
24279
25124
 
@@ -24849,7 +25694,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
24849
25694
  """
24850
25695
  :param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.
24851
25696
  :param str bucket_url: Bucket URL for S3.
24852
- :param str provider: Provider type.
25697
+ :param str provider: Enum: `s3`. Provider type.
24853
25698
  :param str secret_key: S3 secret key.
24854
25699
  """
24855
25700
  pulumi.set(__self__, "access_key", access_key)
@@ -24877,7 +25722,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
24877
25722
  @pulumi.getter
24878
25723
  def provider(self) -> str:
24879
25724
  """
24880
- Provider type.
25725
+ Enum: `s3`. Provider type.
24881
25726
  """
24882
25727
  return pulumi.get(self, "provider")
24883
25728
 
@@ -24995,7 +25840,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
24995
25840
  :param str from_name: Name used in outgoing emails, defaults to Grafana.
24996
25841
  :param str password: Password for SMTP authentication.
24997
25842
  :param bool skip_verify: Skip verifying server certificate. Defaults to false.
24998
- :param str starttls_policy: Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
25843
+ :param str starttls_policy: Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
24999
25844
  :param str username: Username for SMTP authentication.
25000
25845
  """
25001
25846
  pulumi.set(__self__, "from_address", from_address)
@@ -25064,7 +25909,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
25064
25909
  @pulumi.getter(name="starttlsPolicy")
25065
25910
  def starttls_policy(self) -> Optional[str]:
25066
25911
  """
25067
- Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
25912
+ Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
25068
25913
  """
25069
25914
  return pulumi.get(self, "starttls_policy")
25070
25915
 
@@ -25251,11 +26096,20 @@ class GetInfluxDbComponentResult(dict):
25251
26096
  @pulumi.output_type
25252
26097
  class GetInfluxDbInfluxdbResult(dict):
25253
26098
  def __init__(__self__, *,
25254
- database_name: str):
26099
+ database_name: str,
26100
+ password: str,
26101
+ uris: Sequence[str],
26102
+ username: str):
25255
26103
  """
25256
26104
  :param str database_name: Name of the default InfluxDB database
26105
+ :param str password: InfluxDB password
26106
+ :param Sequence[str] uris: InfluxDB server URIs.
26107
+ :param str username: InfluxDB username
25257
26108
  """
25258
26109
  pulumi.set(__self__, "database_name", database_name)
26110
+ pulumi.set(__self__, "password", password)
26111
+ pulumi.set(__self__, "uris", uris)
26112
+ pulumi.set(__self__, "username", username)
25259
26113
 
25260
26114
  @property
25261
26115
  @pulumi.getter(name="databaseName")
@@ -25265,6 +26119,30 @@ class GetInfluxDbInfluxdbResult(dict):
25265
26119
  """
25266
26120
  return pulumi.get(self, "database_name")
25267
26121
 
26122
+ @property
26123
+ @pulumi.getter
26124
+ def password(self) -> str:
26125
+ """
26126
+ InfluxDB password
26127
+ """
26128
+ return pulumi.get(self, "password")
26129
+
26130
+ @property
26131
+ @pulumi.getter
26132
+ def uris(self) -> Sequence[str]:
26133
+ """
26134
+ InfluxDB server URIs.
26135
+ """
26136
+ return pulumi.get(self, "uris")
26137
+
26138
+ @property
26139
+ @pulumi.getter
26140
+ def username(self) -> str:
26141
+ """
26142
+ InfluxDB username
26143
+ """
26144
+ return pulumi.get(self, "username")
26145
+
25268
26146
 
25269
26147
  @pulumi.output_type
25270
26148
  class GetInfluxDbInfluxdbUserConfigResult(dict):
@@ -26074,10 +26952,10 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26074
26952
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
26075
26953
  session_timeout_ms: Optional[int] = None):
26076
26954
  """
26077
- :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
26078
- :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
26955
+ :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
26956
+ :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
26079
26957
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
26080
- :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
26958
+ :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
26081
26959
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
26082
26960
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
26083
26961
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -26085,7 +26963,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26085
26963
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
26086
26964
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
26087
26965
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
26088
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
26966
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
26089
26967
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
26090
26968
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
26091
26969
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -26128,7 +27006,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26128
27006
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
26129
27007
  def connector_client_config_override_policy(self) -> Optional[str]:
26130
27008
  """
26131
- Defines what client configurations can be overridden by the connector. Default is None.
27009
+ Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
26132
27010
  """
26133
27011
  return pulumi.get(self, "connector_client_config_override_policy")
26134
27012
 
@@ -26136,7 +27014,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26136
27014
  @pulumi.getter(name="consumerAutoOffsetReset")
26137
27015
  def consumer_auto_offset_reset(self) -> Optional[str]:
26138
27016
  """
26139
- What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
27017
+ Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
26140
27018
  """
26141
27019
  return pulumi.get(self, "consumer_auto_offset_reset")
26142
27020
 
@@ -26152,7 +27030,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26152
27030
  @pulumi.getter(name="consumerIsolationLevel")
26153
27031
  def consumer_isolation_level(self) -> Optional[str]:
26154
27032
  """
26155
- Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
27033
+ Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
26156
27034
  """
26157
27035
  return pulumi.get(self, "consumer_isolation_level")
26158
27036
 
@@ -26216,7 +27094,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26216
27094
  @pulumi.getter(name="producerCompressionType")
26217
27095
  def producer_compression_type(self) -> Optional[str]:
26218
27096
  """
26219
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27097
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
26220
27098
  """
26221
27099
  return pulumi.get(self, "producer_compression_type")
26222
27100
 
@@ -26470,19 +27348,22 @@ class GetKafkaKafkaResult(dict):
26470
27348
  access_key: str,
26471
27349
  connect_uri: str,
26472
27350
  rest_uri: str,
26473
- schema_registry_uri: str):
27351
+ schema_registry_uri: str,
27352
+ uris: Sequence[str]):
26474
27353
  """
26475
27354
  :param str access_cert: The Kafka client certificate.
26476
27355
  :param str access_key: The Kafka client certificate key.
26477
27356
  :param str connect_uri: The Kafka Connect URI.
26478
27357
  :param str rest_uri: The Kafka REST URI.
26479
27358
  :param str schema_registry_uri: The Schema Registry URI.
27359
+ :param Sequence[str] uris: Kafka server URIs.
26480
27360
  """
26481
27361
  pulumi.set(__self__, "access_cert", access_cert)
26482
27362
  pulumi.set(__self__, "access_key", access_key)
26483
27363
  pulumi.set(__self__, "connect_uri", connect_uri)
26484
27364
  pulumi.set(__self__, "rest_uri", rest_uri)
26485
27365
  pulumi.set(__self__, "schema_registry_uri", schema_registry_uri)
27366
+ pulumi.set(__self__, "uris", uris)
26486
27367
 
26487
27368
  @property
26488
27369
  @pulumi.getter(name="accessCert")
@@ -26524,6 +27405,14 @@ class GetKafkaKafkaResult(dict):
26524
27405
  """
26525
27406
  return pulumi.get(self, "schema_registry_uri")
26526
27407
 
27408
+ @property
27409
+ @pulumi.getter
27410
+ def uris(self) -> Sequence[str]:
27411
+ """
27412
+ Kafka server URIs.
27413
+ """
27414
+ return pulumi.get(self, "uris")
27415
+
26527
27416
 
26528
27417
  @pulumi.output_type
26529
27418
  class GetKafkaKafkaUserConfigResult(dict):
@@ -26564,7 +27453,7 @@ class GetKafkaKafkaUserConfigResult(dict):
26564
27453
  :param bool kafka_rest: Enable Kafka-REST service. The default value is `false`.
26565
27454
  :param bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
26566
27455
  :param 'GetKafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
26567
- :param str kafka_version: Kafka major version.
27456
+ :param str kafka_version: Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, and newer. Kafka major version.
26568
27457
  :param 'GetKafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
26569
27458
  :param 'GetKafkaKafkaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
26570
27459
  :param 'GetKafkaKafkaUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
@@ -26733,7 +27622,7 @@ class GetKafkaKafkaUserConfigResult(dict):
26733
27622
  @pulumi.getter(name="kafkaVersion")
26734
27623
  def kafka_version(self) -> Optional[str]:
26735
27624
  """
26736
- Kafka major version.
27625
+ Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`, and newer. Kafka major version.
26737
27626
  """
26738
27627
  return pulumi.get(self, "kafka_version")
26739
27628
 
@@ -26883,7 +27772,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26883
27772
  transaction_state_log_segment_bytes: Optional[int] = None):
26884
27773
  """
26885
27774
  :param bool auto_create_topics_enable: Enable auto creation of topics.
26886
- :param str compression_type: Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
27775
+ :param str compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
26887
27776
  :param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
26888
27777
  :param int default_replication_factor: Replication factor for autocreated topics.
26889
27778
  :param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
@@ -26893,7 +27782,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26893
27782
  :param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
26894
27783
  :param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
26895
27784
  :param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
26896
- :param str log_cleanup_policy: The default cleanup policy for segments beyond the retention window.
27785
+ :param str log_cleanup_policy: Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
26897
27786
  :param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk.
26898
27787
  :param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
26899
27788
  :param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index.
@@ -26902,7 +27791,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26902
27791
  :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
26903
27792
  :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
26904
27793
  :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
26905
- :param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
27794
+ :param str log_message_timestamp_type: Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
26906
27795
  :param bool log_preallocate: Should pre allocate file when create new segment?
26907
27796
  :param int log_retention_bytes: The maximum size of the log before deleting messages.
26908
27797
  :param int log_retention_hours: The number of hours to keep a log file before deleting it.
@@ -27034,7 +27923,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27034
27923
  @pulumi.getter(name="compressionType")
27035
27924
  def compression_type(self) -> Optional[str]:
27036
27925
  """
27037
- Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
27926
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
27038
27927
  """
27039
27928
  return pulumi.get(self, "compression_type")
27040
27929
 
@@ -27114,7 +28003,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27114
28003
  @pulumi.getter(name="logCleanupPolicy")
27115
28004
  def log_cleanup_policy(self) -> Optional[str]:
27116
28005
  """
27117
- The default cleanup policy for segments beyond the retention window.
28006
+ Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.
27118
28007
  """
27119
28008
  return pulumi.get(self, "log_cleanup_policy")
27120
28009
 
@@ -27186,7 +28075,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27186
28075
  @pulumi.getter(name="logMessageTimestampType")
27187
28076
  def log_message_timestamp_type(self) -> Optional[str]:
27188
28077
  """
27189
- Define whether the timestamp in the message is message create time or log append time.
28078
+ Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.
27190
28079
  """
27191
28080
  return pulumi.get(self, "log_message_timestamp_type")
27192
28081
 
@@ -27442,10 +28331,10 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27442
28331
  scheduled_rebalance_max_delay_ms: Optional[int] = None,
27443
28332
  session_timeout_ms: Optional[int] = None):
27444
28333
  """
27445
- :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
27446
- :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
28334
+ :param str connector_client_config_override_policy: Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
28335
+ :param str consumer_auto_offset_reset: Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
27447
28336
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
27448
- :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
28337
+ :param str consumer_isolation_level: Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
27449
28338
  :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
27450
28339
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
27451
28340
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
@@ -27453,7 +28342,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27453
28342
  :param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
27454
28343
  :param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
27455
28344
  :param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
27456
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
28345
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27457
28346
  :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
27458
28347
  :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
27459
28348
  :param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
@@ -27496,7 +28385,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27496
28385
  @pulumi.getter(name="connectorClientConfigOverridePolicy")
27497
28386
  def connector_client_config_override_policy(self) -> Optional[str]:
27498
28387
  """
27499
- Defines what client configurations can be overridden by the connector. Default is None.
28388
+ Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.
27500
28389
  """
27501
28390
  return pulumi.get(self, "connector_client_config_override_policy")
27502
28391
 
@@ -27504,7 +28393,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27504
28393
  @pulumi.getter(name="consumerAutoOffsetReset")
27505
28394
  def consumer_auto_offset_reset(self) -> Optional[str]:
27506
28395
  """
27507
- What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
28396
+ Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
27508
28397
  """
27509
28398
  return pulumi.get(self, "consumer_auto_offset_reset")
27510
28399
 
@@ -27520,7 +28409,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27520
28409
  @pulumi.getter(name="consumerIsolationLevel")
27521
28410
  def consumer_isolation_level(self) -> Optional[str]:
27522
28411
  """
27523
- Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
28412
+ Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
27524
28413
  """
27525
28414
  return pulumi.get(self, "consumer_isolation_level")
27526
28415
 
@@ -27584,7 +28473,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27584
28473
  @pulumi.getter(name="producerCompressionType")
27585
28474
  def producer_compression_type(self) -> Optional[str]:
27586
28475
  """
27587
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
28476
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27588
28477
  """
27589
28478
  return pulumi.get(self, "producer_compression_type")
27590
28479
 
@@ -27637,11 +28526,11 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27637
28526
  """
27638
28527
  :param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.
27639
28528
  :param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.
27640
- :param int consumer_request_timeout_ms: The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
27641
- :param str name_strategy: Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
28529
+ :param int consumer_request_timeout_ms: Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
28530
+ :param str name_strategy: Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
27642
28531
  :param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`.
27643
- :param str producer_acks: The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
27644
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
28532
+ :param str producer_acks: Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
28533
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27645
28534
  :param int producer_linger_ms: Wait for up to the given delay to allow batching records together. The default value is `0`.
27646
28535
  :param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.
27647
28536
  :param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.
@@ -27687,7 +28576,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27687
28576
  @pulumi.getter(name="consumerRequestTimeoutMs")
27688
28577
  def consumer_request_timeout_ms(self) -> Optional[int]:
27689
28578
  """
27690
- The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
28579
+ Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
27691
28580
  """
27692
28581
  return pulumi.get(self, "consumer_request_timeout_ms")
27693
28582
 
@@ -27695,7 +28584,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27695
28584
  @pulumi.getter(name="nameStrategy")
27696
28585
  def name_strategy(self) -> Optional[str]:
27697
28586
  """
27698
- Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
28587
+ Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
27699
28588
  """
27700
28589
  return pulumi.get(self, "name_strategy")
27701
28590
 
@@ -27711,7 +28600,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27711
28600
  @pulumi.getter(name="producerAcks")
27712
28601
  def producer_acks(self) -> Optional[str]:
27713
28602
  """
27714
- The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
28603
+ Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
27715
28604
  """
27716
28605
  return pulumi.get(self, "producer_acks")
27717
28606
 
@@ -27719,7 +28608,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
27719
28608
  @pulumi.getter(name="producerCompressionType")
27720
28609
  def producer_compression_type(self) -> Optional[str]:
27721
28610
  """
27722
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
28611
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
27723
28612
  """
27724
28613
  return pulumi.get(self, "producer_compression_type")
27725
28614
 
@@ -29044,6 +29933,35 @@ class GetM3AggregatorComponentResult(dict):
29044
29933
  return pulumi.get(self, "usage")
29045
29934
 
29046
29935
 
29936
+ @pulumi.output_type
29937
+ class GetM3AggregatorM3aggregatorResult(dict):
29938
+ def __init__(__self__, *,
29939
+ aggregator_http_uri: str,
29940
+ uris: Sequence[str]):
29941
+ """
29942
+ :param str aggregator_http_uri: M3 Aggregator HTTP URI.
29943
+ :param Sequence[str] uris: M3 Aggregator server URIs.
29944
+ """
29945
+ pulumi.set(__self__, "aggregator_http_uri", aggregator_http_uri)
29946
+ pulumi.set(__self__, "uris", uris)
29947
+
29948
+ @property
29949
+ @pulumi.getter(name="aggregatorHttpUri")
29950
+ def aggregator_http_uri(self) -> str:
29951
+ """
29952
+ M3 Aggregator HTTP URI.
29953
+ """
29954
+ return pulumi.get(self, "aggregator_http_uri")
29955
+
29956
+ @property
29957
+ @pulumi.getter
29958
+ def uris(self) -> Sequence[str]:
29959
+ """
29960
+ M3 Aggregator server URIs.
29961
+ """
29962
+ return pulumi.get(self, "uris")
29963
+
29964
+
29047
29965
  @pulumi.output_type
29048
29966
  class GetM3AggregatorM3aggregatorUserConfigResult(dict):
29049
29967
  def __init__(__self__, *,
@@ -29060,8 +29978,8 @@ class GetM3AggregatorM3aggregatorUserConfigResult(dict):
29060
29978
  :param Sequence['GetM3AggregatorM3aggregatorUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
29061
29979
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
29062
29980
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
29063
- :param str m3_version: M3 major version (deprecated, use m3aggregator_version).
29064
- :param str m3aggregator_version: M3 major version (the minimum compatible version).
29981
+ :param str m3_version: Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (deprecated, use m3aggregator_version).
29982
+ :param str m3aggregator_version: Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (the minimum compatible version).
29065
29983
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
29066
29984
  :param bool static_ips: Use static public IP addresses.
29067
29985
  """
@@ -29121,7 +30039,7 @@ class GetM3AggregatorM3aggregatorUserConfigResult(dict):
29121
30039
  @pulumi.getter(name="m3Version")
29122
30040
  def m3_version(self) -> Optional[str]:
29123
30041
  """
29124
- M3 major version (deprecated, use m3aggregator_version).
30042
+ Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (deprecated, use m3aggregator_version).
29125
30043
  """
29126
30044
  return pulumi.get(self, "m3_version")
29127
30045
 
@@ -29129,7 +30047,7 @@ class GetM3AggregatorM3aggregatorUserConfigResult(dict):
29129
30047
  @pulumi.getter(name="m3aggregatorVersion")
29130
30048
  def m3aggregator_version(self) -> Optional[str]:
29131
30049
  """
29132
- M3 major version (the minimum compatible version).
30050
+ Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (the minimum compatible version).
29133
30051
  """
29134
30052
  return pulumi.get(self, "m3aggregator_version")
29135
30053
 
@@ -29351,6 +30269,79 @@ class GetM3DbComponentResult(dict):
29351
30269
  return pulumi.get(self, "usage")
29352
30270
 
29353
30271
 
30272
+ @pulumi.output_type
30273
+ class GetM3DbM3dbResult(dict):
30274
+ def __init__(__self__, *,
30275
+ http_cluster_uri: str,
30276
+ http_node_uri: str,
30277
+ influxdb_uri: str,
30278
+ prometheus_remote_read_uri: str,
30279
+ prometheus_remote_write_uri: str,
30280
+ uris: Sequence[str]):
30281
+ """
30282
+ :param str http_cluster_uri: M3DB cluster URI.
30283
+ :param str http_node_uri: M3DB node URI.
30284
+ :param str influxdb_uri: InfluxDB URI.
30285
+ :param str prometheus_remote_read_uri: Prometheus remote read URI.
30286
+ :param str prometheus_remote_write_uri: Prometheus remote write URI.
30287
+ :param Sequence[str] uris: M3DB server URIs.
30288
+ """
30289
+ pulumi.set(__self__, "http_cluster_uri", http_cluster_uri)
30290
+ pulumi.set(__self__, "http_node_uri", http_node_uri)
30291
+ pulumi.set(__self__, "influxdb_uri", influxdb_uri)
30292
+ pulumi.set(__self__, "prometheus_remote_read_uri", prometheus_remote_read_uri)
30293
+ pulumi.set(__self__, "prometheus_remote_write_uri", prometheus_remote_write_uri)
30294
+ pulumi.set(__self__, "uris", uris)
30295
+
30296
+ @property
30297
+ @pulumi.getter(name="httpClusterUri")
30298
+ def http_cluster_uri(self) -> str:
30299
+ """
30300
+ M3DB cluster URI.
30301
+ """
30302
+ return pulumi.get(self, "http_cluster_uri")
30303
+
30304
+ @property
30305
+ @pulumi.getter(name="httpNodeUri")
30306
+ def http_node_uri(self) -> str:
30307
+ """
30308
+ M3DB node URI.
30309
+ """
30310
+ return pulumi.get(self, "http_node_uri")
30311
+
30312
+ @property
30313
+ @pulumi.getter(name="influxdbUri")
30314
+ def influxdb_uri(self) -> str:
30315
+ """
30316
+ InfluxDB URI.
30317
+ """
30318
+ return pulumi.get(self, "influxdb_uri")
30319
+
30320
+ @property
30321
+ @pulumi.getter(name="prometheusRemoteReadUri")
30322
+ def prometheus_remote_read_uri(self) -> str:
30323
+ """
30324
+ Prometheus remote read URI.
30325
+ """
30326
+ return pulumi.get(self, "prometheus_remote_read_uri")
30327
+
30328
+ @property
30329
+ @pulumi.getter(name="prometheusRemoteWriteUri")
30330
+ def prometheus_remote_write_uri(self) -> str:
30331
+ """
30332
+ Prometheus remote write URI.
30333
+ """
30334
+ return pulumi.get(self, "prometheus_remote_write_uri")
30335
+
30336
+ @property
30337
+ @pulumi.getter
30338
+ def uris(self) -> Sequence[str]:
30339
+ """
30340
+ M3DB server URIs.
30341
+ """
30342
+ return pulumi.get(self, "uris")
30343
+
30344
+
29354
30345
  @pulumi.output_type
29355
30346
  class GetM3DbM3dbUserConfigResult(dict):
29356
30347
  def __init__(__self__, *,
@@ -29380,9 +30371,9 @@ class GetM3DbM3dbUserConfigResult(dict):
29380
30371
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
29381
30372
  :param 'GetM3DbM3dbUserConfigLimitsArgs' limits: M3 limits
29382
30373
  :param 'GetM3DbM3dbUserConfigM3Args' m3: M3 specific configuration options
29383
- :param str m3_version: M3 major version (deprecated, use m3db_version).
30374
+ :param str m3_version: Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (deprecated, use m3db_version).
29384
30375
  :param bool m3coordinator_enable_graphite_carbon_ingest: Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only.
29385
- :param str m3db_version: M3 major version (the minimum compatible version).
30376
+ :param str m3db_version: Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (the minimum compatible version).
29386
30377
  :param Sequence['GetM3DbM3dbUserConfigNamespaceArgs'] namespaces: List of M3 namespaces
29387
30378
  :param 'GetM3DbM3dbUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
29388
30379
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -29492,7 +30483,7 @@ class GetM3DbM3dbUserConfigResult(dict):
29492
30483
  @pulumi.getter(name="m3Version")
29493
30484
  def m3_version(self) -> Optional[str]:
29494
30485
  """
29495
- M3 major version (deprecated, use m3db_version).
30486
+ Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (deprecated, use m3db_version).
29496
30487
  """
29497
30488
  return pulumi.get(self, "m3_version")
29498
30489
 
@@ -29508,7 +30499,7 @@ class GetM3DbM3dbUserConfigResult(dict):
29508
30499
  @pulumi.getter(name="m3dbVersion")
29509
30500
  def m3db_version(self) -> Optional[str]:
29510
30501
  """
29511
- M3 major version (the minimum compatible version).
30502
+ Enum: `1.1`, `1.2`, `1.5`, and newer. M3 major version (the minimum compatible version).
29512
30503
  """
29513
30504
  return pulumi.get(self, "m3db_version")
29514
30505
 
@@ -29745,7 +30736,7 @@ class GetM3DbM3dbUserConfigNamespaceResult(dict):
29745
30736
  resolution: Optional[str] = None):
29746
30737
  """
29747
30738
  :param str name: The name of the namespace.
29748
- :param str type: The type of aggregation (aggregated/unaggregated).
30739
+ :param str type: Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated).
29749
30740
  :param 'GetM3DbM3dbUserConfigNamespaceOptionsArgs' options: Namespace options
29750
30741
  :param str resolution: The resolution for an aggregated namespace.
29751
30742
  """
@@ -29768,7 +30759,7 @@ class GetM3DbM3dbUserConfigNamespaceResult(dict):
29768
30759
  @pulumi.getter
29769
30760
  def type(self) -> str:
29770
30761
  """
29771
- The type of aggregation (aggregated/unaggregated).
30762
+ Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated).
29772
30763
  """
29773
30764
  return pulumi.get(self, "type")
29774
30765
 
@@ -30290,6 +31281,141 @@ class GetMySqlComponentResult(dict):
30290
31281
  return pulumi.get(self, "usage")
30291
31282
 
30292
31283
 
31284
+ @pulumi.output_type
31285
+ class GetMySqlMysqlResult(dict):
31286
+ def __init__(__self__, *,
31287
+ params: Sequence['outputs.GetMySqlMysqlParamResult'],
31288
+ replica_uri: str,
31289
+ standby_uris: Sequence[str],
31290
+ syncing_uris: Sequence[str],
31291
+ uris: Sequence[str]):
31292
+ """
31293
+ :param Sequence['GetMySqlMysqlParamArgs'] params: MySQL connection parameters
31294
+ :param str replica_uri: MySQL replica URI for services with a replica
31295
+ :param Sequence[str] standby_uris: MySQL standby connection URIs
31296
+ :param Sequence[str] syncing_uris: MySQL syncing connection URIs
31297
+ :param Sequence[str] uris: MySQL master connection URIs
31298
+ """
31299
+ pulumi.set(__self__, "params", params)
31300
+ pulumi.set(__self__, "replica_uri", replica_uri)
31301
+ pulumi.set(__self__, "standby_uris", standby_uris)
31302
+ pulumi.set(__self__, "syncing_uris", syncing_uris)
31303
+ pulumi.set(__self__, "uris", uris)
31304
+
31305
+ @property
31306
+ @pulumi.getter
31307
+ def params(self) -> Sequence['outputs.GetMySqlMysqlParamResult']:
31308
+ """
31309
+ MySQL connection parameters
31310
+ """
31311
+ return pulumi.get(self, "params")
31312
+
31313
+ @property
31314
+ @pulumi.getter(name="replicaUri")
31315
+ def replica_uri(self) -> str:
31316
+ """
31317
+ MySQL replica URI for services with a replica
31318
+ """
31319
+ return pulumi.get(self, "replica_uri")
31320
+
31321
+ @property
31322
+ @pulumi.getter(name="standbyUris")
31323
+ def standby_uris(self) -> Sequence[str]:
31324
+ """
31325
+ MySQL standby connection URIs
31326
+ """
31327
+ return pulumi.get(self, "standby_uris")
31328
+
31329
+ @property
31330
+ @pulumi.getter(name="syncingUris")
31331
+ def syncing_uris(self) -> Sequence[str]:
31332
+ """
31333
+ MySQL syncing connection URIs
31334
+ """
31335
+ return pulumi.get(self, "syncing_uris")
31336
+
31337
+ @property
31338
+ @pulumi.getter
31339
+ def uris(self) -> Sequence[str]:
31340
+ """
31341
+ MySQL master connection URIs
31342
+ """
31343
+ return pulumi.get(self, "uris")
31344
+
31345
+
31346
+ @pulumi.output_type
31347
+ class GetMySqlMysqlParamResult(dict):
31348
+ def __init__(__self__, *,
31349
+ database_name: str,
31350
+ host: str,
31351
+ password: str,
31352
+ port: int,
31353
+ sslmode: str,
31354
+ user: str):
31355
+ """
31356
+ :param str database_name: Primary MySQL database name
31357
+ :param str host: MySQL host IP or name
31358
+ :param str password: MySQL admin user password
31359
+ :param int port: MySQL port
31360
+ :param str sslmode: MySQL sslmode setting (currently always "require")
31361
+ :param str user: MySQL admin user name
31362
+ """
31363
+ pulumi.set(__self__, "database_name", database_name)
31364
+ pulumi.set(__self__, "host", host)
31365
+ pulumi.set(__self__, "password", password)
31366
+ pulumi.set(__self__, "port", port)
31367
+ pulumi.set(__self__, "sslmode", sslmode)
31368
+ pulumi.set(__self__, "user", user)
31369
+
31370
+ @property
31371
+ @pulumi.getter(name="databaseName")
31372
+ def database_name(self) -> str:
31373
+ """
31374
+ Primary MySQL database name
31375
+ """
31376
+ return pulumi.get(self, "database_name")
31377
+
31378
+ @property
31379
+ @pulumi.getter
31380
+ def host(self) -> str:
31381
+ """
31382
+ MySQL host IP or name
31383
+ """
31384
+ return pulumi.get(self, "host")
31385
+
31386
+ @property
31387
+ @pulumi.getter
31388
+ def password(self) -> str:
31389
+ """
31390
+ MySQL admin user password
31391
+ """
31392
+ return pulumi.get(self, "password")
31393
+
31394
+ @property
31395
+ @pulumi.getter
31396
+ def port(self) -> int:
31397
+ """
31398
+ MySQL port
31399
+ """
31400
+ return pulumi.get(self, "port")
31401
+
31402
+ @property
31403
+ @pulumi.getter
31404
+ def sslmode(self) -> str:
31405
+ """
31406
+ MySQL sslmode setting (currently always "require")
31407
+ """
31408
+ return pulumi.get(self, "sslmode")
31409
+
31410
+ @property
31411
+ @pulumi.getter
31412
+ def user(self) -> str:
31413
+ """
31414
+ MySQL admin user name
31415
+ """
31416
+ return pulumi.get(self, "user")
31417
+
31418
+
30293
31419
  @pulumi.output_type
30294
31420
  class GetMySqlMysqlUserConfigResult(dict):
30295
31421
  def __init__(__self__, *,
@@ -30325,7 +31451,7 @@ class GetMySqlMysqlUserConfigResult(dict):
30325
31451
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
30326
31452
  :param 'GetMySqlMysqlUserConfigMigrationArgs' migration: Migrate data from existing server
30327
31453
  :param 'GetMySqlMysqlUserConfigMysqlArgs' mysql: mysql.conf configuration values
30328
- :param str mysql_version: MySQL major version.
31454
+ :param str mysql_version: Enum: `8`, and newer. MySQL major version.
30329
31455
  :param 'GetMySqlMysqlUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
30330
31456
  :param 'GetMySqlMysqlUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
30331
31457
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -30471,7 +31597,7 @@ class GetMySqlMysqlUserConfigResult(dict):
30471
31597
  @pulumi.getter(name="mysqlVersion")
30472
31598
  def mysql_version(self) -> Optional[str]:
30473
31599
  """
30474
- MySQL major version.
31600
+ Enum: `8`, and newer. MySQL major version.
30475
31601
  """
30476
31602
  return pulumi.get(self, "mysql_version")
30477
31603
 
@@ -30586,7 +31712,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
30586
31712
  :param int port: Port number of the server where to migrate data from.
30587
31713
  :param str dbname: Database name for bootstrapping the initial connection.
30588
31714
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
30589
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
31715
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
30590
31716
  :param str password: Password for authentication with the server where to migrate data from.
30591
31717
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
30592
31718
  :param str username: User name for authentication with the server where to migrate data from.
@@ -30642,7 +31768,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
30642
31768
  @pulumi.getter
30643
31769
  def method(self) -> Optional[str]:
30644
31770
  """
30645
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
31771
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
30646
31772
  """
30647
31773
  return pulumi.get(self, "method")
30648
31774
 
@@ -30722,7 +31848,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
30722
31848
  :param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).
30723
31849
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.
30724
31850
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it.
30725
- :param str internal_tmp_mem_storage_engine: The storage engine for in-memory internal temporary tables.
31851
+ :param str internal_tmp_mem_storage_engine: Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
30726
31852
  :param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.
30727
31853
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
30728
31854
  :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
@@ -30937,7 +32063,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
30937
32063
  @pulumi.getter(name="internalTmpMemStorageEngine")
30938
32064
  def internal_tmp_mem_storage_engine(self) -> Optional[str]:
30939
32065
  """
30940
- The storage engine for in-memory internal temporary tables.
32066
+ Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.
30941
32067
  """
30942
32068
  return pulumi.get(self, "internal_tmp_mem_storage_engine")
30943
32069
 
@@ -31341,11 +32467,31 @@ class GetOpenSearchComponentResult(dict):
31341
32467
  @pulumi.output_type
31342
32468
  class GetOpenSearchOpensearchResult(dict):
31343
32469
  def __init__(__self__, *,
31344
- opensearch_dashboards_uri: str):
32470
+ kibana_uri: str,
32471
+ opensearch_dashboards_uri: str,
32472
+ password: str,
32473
+ uris: Sequence[str],
32474
+ username: str):
31345
32475
  """
32476
+ :param str kibana_uri: URI for Kibana dashboard frontend
31346
32477
  :param str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend
32478
+ :param str password: OpenSearch password
32479
+ :param Sequence[str] uris: OpenSearch server URIs.
32480
+ :param str username: OpenSearch username
31347
32481
  """
32482
+ pulumi.set(__self__, "kibana_uri", kibana_uri)
31348
32483
  pulumi.set(__self__, "opensearch_dashboards_uri", opensearch_dashboards_uri)
32484
+ pulumi.set(__self__, "password", password)
32485
+ pulumi.set(__self__, "uris", uris)
32486
+ pulumi.set(__self__, "username", username)
32487
+
32488
+ @property
32489
+ @pulumi.getter(name="kibanaUri")
32490
+ def kibana_uri(self) -> str:
32491
+ """
32492
+ URI for Kibana dashboard frontend
32493
+ """
32494
+ return pulumi.get(self, "kibana_uri")
31349
32495
 
31350
32496
  @property
31351
32497
  @pulumi.getter(name="opensearchDashboardsUri")
@@ -31355,6 +32501,30 @@ class GetOpenSearchOpensearchResult(dict):
31355
32501
  """
31356
32502
  return pulumi.get(self, "opensearch_dashboards_uri")
31357
32503
 
32504
+ @property
32505
+ @pulumi.getter
32506
+ def password(self) -> str:
32507
+ """
32508
+ OpenSearch password
32509
+ """
32510
+ return pulumi.get(self, "password")
32511
+
32512
+ @property
32513
+ @pulumi.getter
32514
+ def uris(self) -> Sequence[str]:
32515
+ """
32516
+ OpenSearch server URIs.
32517
+ """
32518
+ return pulumi.get(self, "uris")
32519
+
32520
+ @property
32521
+ @pulumi.getter
32522
+ def username(self) -> str:
32523
+ """
32524
+ OpenSearch username
32525
+ """
32526
+ return pulumi.get(self, "username")
32527
+
31358
32528
 
31359
32529
  @pulumi.output_type
31360
32530
  class GetOpenSearchOpensearchUserConfigResult(dict):
@@ -31396,7 +32566,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31396
32566
  :param 'GetOpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
31397
32567
  :param 'GetOpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
31398
32568
  :param 'GetOpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
31399
- :param str opensearch_version: OpenSearch major version.
32569
+ :param str opensearch_version: Enum: `1`, `2`, and newer. OpenSearch major version.
31400
32570
  :param 'GetOpenSearchOpensearchUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
31401
32571
  :param 'GetOpenSearchOpensearchUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
31402
32572
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
@@ -31565,7 +32735,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31565
32735
  @pulumi.getter(name="opensearchVersion")
31566
32736
  def opensearch_version(self) -> Optional[str]:
31567
32737
  """
31568
- OpenSearch major version.
32738
+ Enum: `1`, `2`, and newer. OpenSearch major version.
31569
32739
  """
31570
32740
  return pulumi.get(self, "opensearch_version")
31571
32741
 
@@ -31651,7 +32821,7 @@ class GetOpenSearchOpensearchUserConfigIndexPatternResult(dict):
31651
32821
  """
31652
32822
  :param int max_index_count: Maximum number of indexes to keep.
31653
32823
  :param str pattern: fnmatch pattern.
31654
- :param str sorting_algorithm: Deletion sorting algorithm. The default value is `creation_date`.
32824
+ :param str sorting_algorithm: Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`.
31655
32825
  """
31656
32826
  pulumi.set(__self__, "max_index_count", max_index_count)
31657
32827
  pulumi.set(__self__, "pattern", pattern)
@@ -31678,7 +32848,7 @@ class GetOpenSearchOpensearchUserConfigIndexPatternResult(dict):
31678
32848
  @pulumi.getter(name="sortingAlgorithm")
31679
32849
  def sorting_algorithm(self) -> Optional[str]:
31680
32850
  """
31681
- Deletion sorting algorithm. The default value is `creation_date`.
32851
+ Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`.
31682
32852
  """
31683
32853
  return pulumi.get(self, "sorting_algorithm")
31684
32854
 
@@ -32453,12 +33623,12 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAut
32453
33623
  type: Optional[str] = None):
32454
33624
  """
32455
33625
  :param int allowed_tries: The number of login attempts allowed before login is blocked.
32456
- :param str authentication_backend: internal_authentication_backend_limiting.authentication_backend.
33626
+ :param str authentication_backend: Enum: `internal`. internal_authentication_backend_limiting.authentication_backend.
32457
33627
  :param int block_expiry_seconds: The duration of time that login remains blocked after a failed login.
32458
33628
  :param int max_blocked_clients: internal_authentication_backend_limiting.max_blocked_clients.
32459
33629
  :param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
32460
33630
  :param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
32461
- :param str type: internal_authentication_backend_limiting.type.
33631
+ :param str type: Enum: `username`. internal_authentication_backend_limiting.type.
32462
33632
  """
32463
33633
  if allowed_tries is not None:
32464
33634
  pulumi.set(__self__, "allowed_tries", allowed_tries)
@@ -32487,7 +33657,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAut
32487
33657
  @pulumi.getter(name="authenticationBackend")
32488
33658
  def authentication_backend(self) -> Optional[str]:
32489
33659
  """
32490
- internal_authentication_backend_limiting.authentication_backend.
33660
+ Enum: `internal`. internal_authentication_backend_limiting.authentication_backend.
32491
33661
  """
32492
33662
  return pulumi.get(self, "authentication_backend")
32493
33663
 
@@ -32527,7 +33697,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAut
32527
33697
  @pulumi.getter
32528
33698
  def type(self) -> Optional[str]:
32529
33699
  """
32530
- internal_authentication_backend_limiting.type.
33700
+ Enum: `username`. internal_authentication_backend_limiting.type.
32531
33701
  """
32532
33702
  return pulumi.get(self, "type")
32533
33703
 
@@ -32547,7 +33717,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimit
32547
33717
  :param int max_blocked_clients: The maximum number of blocked IP addresses.
32548
33718
  :param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
32549
33719
  :param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
32550
- :param str type: The type of rate limiting.
33720
+ :param str type: Enum: `ip`. The type of rate limiting.
32551
33721
  """
32552
33722
  if allowed_tries is not None:
32553
33723
  pulumi.set(__self__, "allowed_tries", allowed_tries)
@@ -32606,7 +33776,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimit
32606
33776
  @pulumi.getter
32607
33777
  def type(self) -> Optional[str]:
32608
33778
  """
32609
- The type of rate limiting.
33779
+ Enum: `ip`. The type of rate limiting.
32610
33780
  """
32611
33781
  return pulumi.get(self, "type")
32612
33782
 
@@ -33044,36 +34214,59 @@ class GetPgComponentResult(dict):
33044
34214
  @pulumi.output_type
33045
34215
  class GetPgPgResult(dict):
33046
34216
  def __init__(__self__, *,
34217
+ bouncer: str,
33047
34218
  dbname: str,
33048
34219
  host: str,
33049
34220
  max_connections: int,
34221
+ params: Sequence['outputs.GetPgPgParamResult'],
33050
34222
  password: str,
33051
34223
  port: int,
33052
34224
  replica_uri: str,
33053
34225
  sslmode: str,
34226
+ standby_uris: Sequence[str],
34227
+ syncing_uris: Sequence[str],
33054
34228
  uri: str,
34229
+ uris: Sequence[str],
33055
34230
  user: str):
33056
34231
  """
34232
+ :param str bouncer: Bouncer connection details
33057
34233
  :param str dbname: Primary PostgreSQL database name
33058
34234
  :param str host: PostgreSQL master node host IP or name
33059
34235
  :param int max_connections: Connection limit
34236
+ :param Sequence['GetPgPgParamArgs'] params: PostgreSQL connection parameters
33060
34237
  :param str password: PostgreSQL admin user password
33061
34238
  :param int port: PostgreSQL port
33062
34239
  :param str replica_uri: PostgreSQL replica URI for services with a replica
33063
34240
  :param str sslmode: PostgreSQL sslmode setting (currently always "require")
34241
+ :param Sequence[str] standby_uris: PostgreSQL standby connection URIs
34242
+ :param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs
33064
34243
  :param str uri: PostgreSQL master connection URI
34244
+ :param Sequence[str] uris: PostgreSQL master connection URIs
33065
34245
  :param str user: PostgreSQL admin user name
33066
34246
  """
34247
+ pulumi.set(__self__, "bouncer", bouncer)
33067
34248
  pulumi.set(__self__, "dbname", dbname)
33068
34249
  pulumi.set(__self__, "host", host)
33069
34250
  pulumi.set(__self__, "max_connections", max_connections)
34251
+ pulumi.set(__self__, "params", params)
33070
34252
  pulumi.set(__self__, "password", password)
33071
34253
  pulumi.set(__self__, "port", port)
33072
34254
  pulumi.set(__self__, "replica_uri", replica_uri)
33073
34255
  pulumi.set(__self__, "sslmode", sslmode)
34256
+ pulumi.set(__self__, "standby_uris", standby_uris)
34257
+ pulumi.set(__self__, "syncing_uris", syncing_uris)
33074
34258
  pulumi.set(__self__, "uri", uri)
34259
+ pulumi.set(__self__, "uris", uris)
33075
34260
  pulumi.set(__self__, "user", user)
33076
34261
 
34262
+ @property
34263
+ @pulumi.getter
34264
+ def bouncer(self) -> str:
34265
+ """
34266
+ Bouncer connection details
34267
+ """
34268
+ return pulumi.get(self, "bouncer")
34269
+
33077
34270
  @property
33078
34271
  @pulumi.getter
33079
34272
  def dbname(self) -> str:
@@ -33098,6 +34291,14 @@ class GetPgPgResult(dict):
33098
34291
  """
33099
34292
  return pulumi.get(self, "max_connections")
33100
34293
 
34294
+ @property
34295
+ @pulumi.getter
34296
+ def params(self) -> Sequence['outputs.GetPgPgParamResult']:
34297
+ """
34298
+ PostgreSQL connection parameters
34299
+ """
34300
+ return pulumi.get(self, "params")
34301
+
33101
34302
  @property
33102
34303
  @pulumi.getter
33103
34304
  def password(self) -> str:
@@ -33130,6 +34331,22 @@ class GetPgPgResult(dict):
33130
34331
  """
33131
34332
  return pulumi.get(self, "sslmode")
33132
34333
 
34334
+ @property
34335
+ @pulumi.getter(name="standbyUris")
34336
+ def standby_uris(self) -> Sequence[str]:
34337
+ """
34338
+ PostgreSQL standby connection URIs
34339
+ """
34340
+ return pulumi.get(self, "standby_uris")
34341
+
34342
+ @property
34343
+ @pulumi.getter(name="syncingUris")
34344
+ def syncing_uris(self) -> Sequence[str]:
34345
+ """
34346
+ PostgreSQL syncing connection URIs
34347
+ """
34348
+ return pulumi.get(self, "syncing_uris")
34349
+
33133
34350
  @property
33134
34351
  @pulumi.getter
33135
34352
  def uri(self) -> str:
@@ -33138,6 +34355,87 @@ class GetPgPgResult(dict):
33138
34355
  """
33139
34356
  return pulumi.get(self, "uri")
33140
34357
 
34358
+ @property
34359
+ @pulumi.getter
34360
+ def uris(self) -> Sequence[str]:
34361
+ """
34362
+ PostgreSQL master connection URIs
34363
+ """
34364
+ return pulumi.get(self, "uris")
34365
+
34366
+ @property
34367
+ @pulumi.getter
34368
+ def user(self) -> str:
34369
+ """
34370
+ PostgreSQL admin user name
34371
+ """
34372
+ return pulumi.get(self, "user")
34373
+
34374
+
34375
+ @pulumi.output_type
34376
+ class GetPgPgParamResult(dict):
34377
+ def __init__(__self__, *,
34378
+ database_name: str,
34379
+ host: str,
34380
+ password: str,
34381
+ port: int,
34382
+ sslmode: str,
34383
+ user: str):
34384
+ """
34385
+ :param str database_name: Primary PostgreSQL database name
34386
+ :param str host: PostgreSQL host IP or name
34387
+ :param str password: PostgreSQL admin user password
34388
+ :param int port: PostgreSQL port
34389
+ :param str sslmode: PostgreSQL sslmode setting (currently always "require")
34390
+ :param str user: PostgreSQL admin user name
34391
+ """
34392
+ pulumi.set(__self__, "database_name", database_name)
34393
+ pulumi.set(__self__, "host", host)
34394
+ pulumi.set(__self__, "password", password)
34395
+ pulumi.set(__self__, "port", port)
34396
+ pulumi.set(__self__, "sslmode", sslmode)
34397
+ pulumi.set(__self__, "user", user)
34398
+
34399
+ @property
34400
+ @pulumi.getter(name="databaseName")
34401
+ def database_name(self) -> str:
34402
+ """
34403
+ Primary PostgreSQL database name
34404
+ """
34405
+ return pulumi.get(self, "database_name")
34406
+
34407
+ @property
34408
+ @pulumi.getter
34409
+ def host(self) -> str:
34410
+ """
34411
+ PostgreSQL host IP or name
34412
+ """
34413
+ return pulumi.get(self, "host")
34414
+
34415
+ @property
34416
+ @pulumi.getter
34417
+ def password(self) -> str:
34418
+ """
34419
+ PostgreSQL admin user password
34420
+ """
34421
+ return pulumi.get(self, "password")
34422
+
34423
+ @property
34424
+ @pulumi.getter
34425
+ def port(self) -> int:
34426
+ """
34427
+ PostgreSQL port
34428
+ """
34429
+ return pulumi.get(self, "port")
34430
+
34431
+ @property
34432
+ @pulumi.getter
34433
+ def sslmode(self) -> str:
34434
+ """
34435
+ PostgreSQL sslmode setting (currently always "require")
34436
+ """
34437
+ return pulumi.get(self, "sslmode")
34438
+
33141
34439
  @property
33142
34440
  @pulumi.getter
33143
34441
  def user(self) -> str:
@@ -33198,7 +34496,7 @@ class GetPgPgUserConfigResult(dict):
33198
34496
  :param bool pg_read_replica: Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).
33199
34497
  :param str pg_service_to_fork_from: Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created.
33200
34498
  :param bool pg_stat_monitor_enable: Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable. The default value is `false`.
33201
- :param str pg_version: PostgreSQL major version.
34499
+ :param str pg_version: Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`, and newer. PostgreSQL major version.
33202
34500
  :param 'GetPgPgUserConfigPgauditArgs' pgaudit: System-wide settings for the pgaudit extension
33203
34501
  :param 'GetPgPgUserConfigPgbouncerArgs' pgbouncer: PGBouncer connection pooling settings
33204
34502
  :param 'GetPgPgUserConfigPglookoutArgs' pglookout: System-wide settings for pglookout
@@ -33211,9 +34509,9 @@ class GetPgPgUserConfigResult(dict):
33211
34509
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
33212
34510
  :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
33213
34511
  :param bool static_ips: Use static public IP addresses.
33214
- :param str synchronous_replication: Synchronous replication type. Note that the service plan also needs to support synchronous replication.
34512
+ :param str synchronous_replication: Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
33215
34513
  :param 'GetPgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
33216
- :param str variant: Variant of the PostgreSQL service, may affect the features that are exposed by default.
34514
+ :param str variant: Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
33217
34515
  :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
33218
34516
  """
33219
34517
  if additional_backup_regions is not None:
@@ -33411,7 +34709,7 @@ class GetPgPgUserConfigResult(dict):
33411
34709
  @pulumi.getter(name="pgVersion")
33412
34710
  def pg_version(self) -> Optional[str]:
33413
34711
  """
33414
- PostgreSQL major version.
34712
+ Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`, and newer. PostgreSQL major version.
33415
34713
  """
33416
34714
  return pulumi.get(self, "pg_version")
33417
34715
 
@@ -33515,7 +34813,7 @@ class GetPgPgUserConfigResult(dict):
33515
34813
  @pulumi.getter(name="synchronousReplication")
33516
34814
  def synchronous_replication(self) -> Optional[str]:
33517
34815
  """
33518
- Synchronous replication type. Note that the service plan also needs to support synchronous replication.
34816
+ Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.
33519
34817
  """
33520
34818
  return pulumi.get(self, "synchronous_replication")
33521
34819
 
@@ -33531,7 +34829,7 @@ class GetPgPgUserConfigResult(dict):
33531
34829
  @pulumi.getter
33532
34830
  def variant(self) -> Optional[str]:
33533
34831
  """
33534
- Variant of the PostgreSQL service, may affect the features that are exposed by default.
34832
+ Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.
33535
34833
  """
33536
34834
  return pulumi.get(self, "variant")
33537
34835
 
@@ -33590,7 +34888,7 @@ class GetPgPgUserConfigMigrationResult(dict):
33590
34888
  :param int port: Port number of the server where to migrate data from.
33591
34889
  :param str dbname: Database name for bootstrapping the initial connection.
33592
34890
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
33593
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
34891
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
33594
34892
  :param str password: Password for authentication with the server where to migrate data from.
33595
34893
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
33596
34894
  :param str username: User name for authentication with the server where to migrate data from.
@@ -33646,7 +34944,7 @@ class GetPgPgUserConfigMigrationResult(dict):
33646
34944
  @pulumi.getter
33647
34945
  def method(self) -> Optional[str]:
33648
34946
  """
33649
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
34947
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
33650
34948
  """
33651
34949
  return pulumi.get(self, "method")
33652
34950
 
@@ -33742,12 +35040,12 @@ class GetPgPgUserConfigPgResult(dict):
33742
35040
  :param int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.
33743
35041
  :param float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.
33744
35042
  :param int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.
33745
- :param str default_toast_compression: Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
35043
+ :param str default_toast_compression: Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
33746
35044
  :param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
33747
35045
  :param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
33748
35046
  :param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
33749
- :param str log_error_verbosity: Controls the amount of detail written in the server log for each message that is logged.
33750
- :param str log_line_prefix: Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
35047
+ :param str log_error_verbosity: Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
35048
+ :param str log_line_prefix: Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log formats.
33751
35049
  :param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
33752
35050
  :param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
33753
35051
  :param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
@@ -33768,13 +35066,13 @@ class GetPgPgUserConfigPgResult(dict):
33768
35066
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
33769
35067
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
33770
35068
  :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
33771
- :param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
35069
+ :param str pg_stat_statements_dot_track: Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
33772
35070
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
33773
35071
  :param str timezone: PostgreSQL service timezone.
33774
35072
  :param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session.
33775
- :param str track_commit_timestamp: Record commit time of transactions.
33776
- :param str track_functions: Enables tracking of function call counts and time used.
33777
- :param str track_io_timing: Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
35073
+ :param str track_commit_timestamp: Enum: `off`, `on`. Record commit time of transactions.
35074
+ :param str track_functions: Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
35075
+ :param str track_io_timing: Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
33778
35076
  :param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.
33779
35077
  :param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.
33780
35078
  """
@@ -33993,7 +35291,7 @@ class GetPgPgUserConfigPgResult(dict):
33993
35291
  @pulumi.getter(name="defaultToastCompression")
33994
35292
  def default_toast_compression(self) -> Optional[str]:
33995
35293
  """
33996
- Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
35294
+ Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
33997
35295
  """
33998
35296
  return pulumi.get(self, "default_toast_compression")
33999
35297
 
@@ -34025,7 +35323,7 @@ class GetPgPgUserConfigPgResult(dict):
34025
35323
  @pulumi.getter(name="logErrorVerbosity")
34026
35324
  def log_error_verbosity(self) -> Optional[str]:
34027
35325
  """
34028
- Controls the amount of detail written in the server log for each message that is logged.
35326
+ Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.
34029
35327
  """
34030
35328
  return pulumi.get(self, "log_error_verbosity")
34031
35329
 
@@ -34033,7 +35331,7 @@ class GetPgPgUserConfigPgResult(dict):
34033
35331
  @pulumi.getter(name="logLinePrefix")
34034
35332
  def log_line_prefix(self) -> Optional[str]:
34035
35333
  """
34036
- Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
35334
+ Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log formats.
34037
35335
  """
34038
35336
  return pulumi.get(self, "log_line_prefix")
34039
35337
 
@@ -34201,7 +35499,7 @@ class GetPgPgUserConfigPgResult(dict):
34201
35499
  @pulumi.getter(name="pgStatStatementsDotTrack")
34202
35500
  def pg_stat_statements_dot_track(self) -> Optional[str]:
34203
35501
  """
34204
- Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
35502
+ Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
34205
35503
  """
34206
35504
  return pulumi.get(self, "pg_stat_statements_dot_track")
34207
35505
 
@@ -34233,7 +35531,7 @@ class GetPgPgUserConfigPgResult(dict):
34233
35531
  @pulumi.getter(name="trackCommitTimestamp")
34234
35532
  def track_commit_timestamp(self) -> Optional[str]:
34235
35533
  """
34236
- Record commit time of transactions.
35534
+ Enum: `off`, `on`. Record commit time of transactions.
34237
35535
  """
34238
35536
  return pulumi.get(self, "track_commit_timestamp")
34239
35537
 
@@ -34241,7 +35539,7 @@ class GetPgPgUserConfigPgResult(dict):
34241
35539
  @pulumi.getter(name="trackFunctions")
34242
35540
  def track_functions(self) -> Optional[str]:
34243
35541
  """
34244
- Enables tracking of function call counts and time used.
35542
+ Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.
34245
35543
  """
34246
35544
  return pulumi.get(self, "track_functions")
34247
35545
 
@@ -34249,7 +35547,7 @@ class GetPgPgUserConfigPgResult(dict):
34249
35547
  @pulumi.getter(name="trackIoTiming")
34250
35548
  def track_io_timing(self) -> Optional[str]:
34251
35549
  """
34252
- Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
35550
+ Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
34253
35551
  """
34254
35552
  return pulumi.get(self, "track_io_timing")
34255
35553
 
@@ -34373,7 +35671,7 @@ class GetPgPgUserConfigPgauditResult(dict):
34373
35671
  :param bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. The default value is `false`.
34374
35672
  :param bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. The default value is `true`.
34375
35673
  :param bool log_client: Specifies whether log messages will be visible to a client process such as psql. The default value is `false`.
34376
- :param str log_level: Specifies the log level that will be used for log entries. The default value is `log`.
35674
+ :param str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`.
34377
35675
  :param int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. The default value is `-1`.
34378
35676
  :param bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. The default value is `true`.
34379
35677
  :param bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. The default value is `false`.
@@ -34442,7 +35740,7 @@ class GetPgPgUserConfigPgauditResult(dict):
34442
35740
  @pulumi.getter(name="logLevel")
34443
35741
  def log_level(self) -> Optional[str]:
34444
35742
  """
34445
- Specifies the log level that will be used for log entries. The default value is `log`.
35743
+ Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`.
34446
35744
  """
34447
35745
  return pulumi.get(self, "log_level")
34448
35746
 
@@ -34542,7 +35840,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
34542
35840
  """
34543
35841
  :param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). The default value is `3600`.
34544
35842
  :param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.
34545
- :param str autodb_pool_mode: PGBouncer pool mode. The default value is `transaction`.
35843
+ :param str autodb_pool_mode: Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`.
34546
35844
  :param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. The default value is `0`.
34547
35845
  :param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
34548
35846
  :param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. The default value is `0`.
@@ -34589,7 +35887,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
34589
35887
  @pulumi.getter(name="autodbPoolMode")
34590
35888
  def autodb_pool_mode(self) -> Optional[str]:
34591
35889
  """
34592
- PGBouncer pool mode. The default value is `transaction`.
35890
+ Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`.
34593
35891
  """
34594
35892
  return pulumi.get(self, "autodb_pool_mode")
34595
35893
 
@@ -35009,6 +36307,57 @@ class GetRedisComponentResult(dict):
35009
36307
  return pulumi.get(self, "usage")
35010
36308
 
35011
36309
 
36310
+ @pulumi.output_type
36311
+ class GetRedisRediResult(dict):
36312
+ def __init__(__self__, *,
36313
+ password: str,
36314
+ replica_uri: str,
36315
+ slave_uris: Sequence[str],
36316
+ uris: Sequence[str]):
36317
+ """
36318
+ :param str password: Redis password.
36319
+ :param str replica_uri: Redis replica server URI.
36320
+ :param Sequence[str] slave_uris: Redis slave server URIs.
36321
+ :param Sequence[str] uris: Redis server URIs.
36322
+ """
36323
+ pulumi.set(__self__, "password", password)
36324
+ pulumi.set(__self__, "replica_uri", replica_uri)
36325
+ pulumi.set(__self__, "slave_uris", slave_uris)
36326
+ pulumi.set(__self__, "uris", uris)
36327
+
36328
+ @property
36329
+ @pulumi.getter
36330
+ def password(self) -> str:
36331
+ """
36332
+ Redis password.
36333
+ """
36334
+ return pulumi.get(self, "password")
36335
+
36336
+ @property
36337
+ @pulumi.getter(name="replicaUri")
36338
+ def replica_uri(self) -> str:
36339
+ """
36340
+ Redis replica server URI.
36341
+ """
36342
+ return pulumi.get(self, "replica_uri")
36343
+
36344
+ @property
36345
+ @pulumi.getter(name="slaveUris")
36346
+ def slave_uris(self) -> Sequence[str]:
36347
+ """
36348
+ Redis slave server URIs.
36349
+ """
36350
+ return pulumi.get(self, "slave_uris")
36351
+
36352
+ @property
36353
+ @pulumi.getter
36354
+ def uris(self) -> Sequence[str]:
36355
+ """
36356
+ Redis server URIs.
36357
+ """
36358
+ return pulumi.get(self, "uris")
36359
+
36360
+
35012
36361
  @pulumi.output_type
35013
36362
  class GetRedisRedisUserConfigResult(dict):
35014
36363
  def __init__(__self__, *,
@@ -35048,18 +36397,18 @@ class GetRedisRedisUserConfigResult(dict):
35048
36397
  :param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
35049
36398
  :param 'GetRedisRedisUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
35050
36399
  :param str recovery_basebackup_name: Name of the basebackup to restore in forked service.
35051
- :param str redis_acl_channels_default: Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
36400
+ :param str redis_acl_channels_default: Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
35052
36401
  :param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service.
35053
36402
  :param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. The default value is `1`.
35054
36403
  :param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`.
35055
- :param str redis_maxmemory_policy: Redis maxmemory-policy. The default value is `noeviction`.
36404
+ :param str redis_maxmemory_policy: Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`.
35056
36405
  :param str redis_notify_keyspace_events: Set notify-keyspace-events option.
35057
36406
  :param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service.
35058
- :param str redis_persistence: When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
36407
+ :param str redis_persistence: Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is 'off', no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
35059
36408
  :param int redis_pubsub_client_output_buffer_limit: Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.
35060
36409
  :param bool redis_ssl: Require SSL to access Redis. The default value is `true`.
35061
36410
  :param int redis_timeout: Redis idle connection timeout in seconds. The default value is `300`.
35062
- :param str redis_version: Redis major version.
36411
+ :param str redis_version: Enum: `7.0`, and newer. Redis major version.
35063
36412
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
35064
36413
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
35065
36414
  :param bool static_ips: Use static public IP addresses.
@@ -35202,7 +36551,7 @@ class GetRedisRedisUserConfigResult(dict):
35202
36551
  @pulumi.getter(name="redisAclChannelsDefault")
35203
36552
  def redis_acl_channels_default(self) -> Optional[str]:
35204
36553
  """
35205
- Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
36554
+ Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
35206
36555
  """
35207
36556
  return pulumi.get(self, "redis_acl_channels_default")
35208
36557
 
@@ -35234,7 +36583,7 @@ class GetRedisRedisUserConfigResult(dict):
35234
36583
  @pulumi.getter(name="redisMaxmemoryPolicy")
35235
36584
  def redis_maxmemory_policy(self) -> Optional[str]:
35236
36585
  """
35237
- Redis maxmemory-policy. The default value is `noeviction`.
36586
+ Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`.
35238
36587
  """
35239
36588
  return pulumi.get(self, "redis_maxmemory_policy")
35240
36589
 
@@ -35258,7 +36607,7 @@ class GetRedisRedisUserConfigResult(dict):
35258
36607
  @pulumi.getter(name="redisPersistence")
35259
36608
  def redis_persistence(self) -> Optional[str]:
35260
36609
  """
35261
- When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
36610
+ Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is 'off', no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.
35262
36611
  """
35263
36612
  return pulumi.get(self, "redis_persistence")
35264
36613
 
@@ -35290,7 +36639,7 @@ class GetRedisRedisUserConfigResult(dict):
35290
36639
  @pulumi.getter(name="redisVersion")
35291
36640
  def redis_version(self) -> Optional[str]:
35292
36641
  """
35293
- Redis major version.
36642
+ Enum: `7.0`, and newer. Redis major version.
35294
36643
  """
35295
36644
  return pulumi.get(self, "redis_version")
35296
36645
 
@@ -35365,7 +36714,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
35365
36714
  :param int port: Port number of the server where to migrate data from.
35366
36715
  :param str dbname: Database name for bootstrapping the initial connection.
35367
36716
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
35368
- :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
36717
+ :param str method: Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
35369
36718
  :param str password: Password for authentication with the server where to migrate data from.
35370
36719
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
35371
36720
  :param str username: User name for authentication with the server where to migrate data from.
@@ -35421,7 +36770,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
35421
36770
  @pulumi.getter
35422
36771
  def method(self) -> Optional[str]:
35423
36772
  """
35424
- The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
36773
+ Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
35425
36774
  """
35426
36775
  return pulumi.get(self, "method")
35427
36776
 
@@ -35656,13 +37005,13 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35656
37005
  skip_broken_messages: Optional[int] = None):
35657
37006
  """
35658
37007
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
35659
- :param str data_format: Message data format. The default value is `JSONEachRow`.
37008
+ :param str data_format: Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`.
35660
37009
  :param str group_name: Kafka consumers group. The default value is `clickhouse`.
35661
37010
  :param str name: Name of the table.
35662
37011
  :param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
35663
- :param str auto_offset_reset: Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
35664
- :param str date_time_input_format: Method to read DateTime from text input formats. The default value is `basic`.
35665
- :param str handle_error_mode: How to handle errors for Kafka engine. The default value is `default`.
37012
+ :param str auto_offset_reset: Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
37013
+ :param str date_time_input_format: Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`.
37014
+ :param str handle_error_mode: Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`.
35666
37015
  :param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.
35667
37016
  :param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.
35668
37017
  :param int num_consumers: The number of consumers per table per replica. The default value is `1`.
@@ -35703,7 +37052,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35703
37052
  @pulumi.getter(name="dataFormat")
35704
37053
  def data_format(self) -> str:
35705
37054
  """
35706
- Message data format. The default value is `JSONEachRow`.
37055
+ Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`.
35707
37056
  """
35708
37057
  return pulumi.get(self, "data_format")
35709
37058
 
@@ -35735,7 +37084,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35735
37084
  @pulumi.getter(name="autoOffsetReset")
35736
37085
  def auto_offset_reset(self) -> Optional[str]:
35737
37086
  """
35738
- Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
37087
+ Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
35739
37088
  """
35740
37089
  return pulumi.get(self, "auto_offset_reset")
35741
37090
 
@@ -35743,7 +37092,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35743
37092
  @pulumi.getter(name="dateTimeInputFormat")
35744
37093
  def date_time_input_format(self) -> Optional[str]:
35745
37094
  """
35746
- Method to read DateTime from text input formats. The default value is `basic`.
37095
+ Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`.
35747
37096
  """
35748
37097
  return pulumi.get(self, "date_time_input_format")
35749
37098
 
@@ -35751,7 +37100,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
35751
37100
  @pulumi.getter(name="handleErrorMode")
35752
37101
  def handle_error_mode(self) -> Optional[str]:
35753
37102
  """
35754
- How to handle errors for Kafka engine. The default value is `default`.
37103
+ Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`.
35755
37104
  """
35756
37105
  return pulumi.get(self, "handle_error_mode")
35757
37106
 
@@ -35897,6 +37246,7 @@ class GetServiceIntegrationClickhousePostgresqlUserConfigDatabaseResult(dict):
35897
37246
  class GetServiceIntegrationDatadogUserConfigResult(dict):
35898
37247
  def __init__(__self__, *,
35899
37248
  datadog_dbm_enabled: Optional[bool] = None,
37249
+ datadog_pgbouncer_enabled: Optional[bool] = None,
35900
37250
  datadog_tags: Optional[Sequence['outputs.GetServiceIntegrationDatadogUserConfigDatadogTagResult']] = None,
35901
37251
  exclude_consumer_groups: Optional[Sequence[str]] = None,
35902
37252
  exclude_topics: Optional[Sequence[str]] = None,
@@ -35908,6 +37258,7 @@ class GetServiceIntegrationDatadogUserConfigResult(dict):
35908
37258
  redis: Optional['outputs.GetServiceIntegrationDatadogUserConfigRedisResult'] = None):
35909
37259
  """
35910
37260
  :param bool datadog_dbm_enabled: Enable Datadog Database Monitoring.
37261
+ :param bool datadog_pgbouncer_enabled: Enable Datadog PgBouncer Metric Tracking.
35911
37262
  :param Sequence['GetServiceIntegrationDatadogUserConfigDatadogTagArgs'] datadog_tags: Custom tags provided by user
35912
37263
  :param Sequence[str] exclude_consumer_groups: List of custom metrics.
35913
37264
  :param Sequence[str] exclude_topics: List of topics to exclude.
@@ -35920,6 +37271,8 @@ class GetServiceIntegrationDatadogUserConfigResult(dict):
35920
37271
  """
35921
37272
  if datadog_dbm_enabled is not None:
35922
37273
  pulumi.set(__self__, "datadog_dbm_enabled", datadog_dbm_enabled)
37274
+ if datadog_pgbouncer_enabled is not None:
37275
+ pulumi.set(__self__, "datadog_pgbouncer_enabled", datadog_pgbouncer_enabled)
35923
37276
  if datadog_tags is not None:
35924
37277
  pulumi.set(__self__, "datadog_tags", datadog_tags)
35925
37278
  if exclude_consumer_groups is not None:
@@ -35947,6 +37300,14 @@ class GetServiceIntegrationDatadogUserConfigResult(dict):
35947
37300
  """
35948
37301
  return pulumi.get(self, "datadog_dbm_enabled")
35949
37302
 
37303
+ @property
37304
+ @pulumi.getter(name="datadogPgbouncerEnabled")
37305
+ def datadog_pgbouncer_enabled(self) -> Optional[bool]:
37306
+ """
37307
+ Enable Datadog PgBouncer Metric Tracking.
37308
+ """
37309
+ return pulumi.get(self, "datadog_pgbouncer_enabled")
37310
+
35950
37311
  @property
35951
37312
  @pulumi.getter(name="datadogTags")
35952
37313
  def datadog_tags(self) -> Optional[Sequence['outputs.GetServiceIntegrationDatadogUserConfigDatadogTagResult']]:
@@ -36141,7 +37502,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
36141
37502
  :param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with.
36142
37503
  :param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers.
36143
37504
  :param int max_partition_contexts: Maximum number of partition contexts to send.
36144
- :param str site: Datadog intake site. Defaults to datadoghq.com.
37505
+ :param str site: Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
36145
37506
  """
36146
37507
  pulumi.set(__self__, "datadog_api_key", datadog_api_key)
36147
37508
  if datadog_tags is not None:
@@ -36209,7 +37570,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
36209
37570
  @pulumi.getter
36210
37571
  def site(self) -> Optional[str]:
36211
37572
  """
36212
- Datadog intake site. Defaults to datadoghq.com.
37573
+ Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.
36213
37574
  """
36214
37575
  return pulumi.get(self, "site")
36215
37576
 
@@ -36495,14 +37856,14 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
36495
37856
  ssl_endpoint_identification_algorithm: Optional[str] = None):
36496
37857
  """
36497
37858
  :param str bootstrap_servers: Bootstrap servers.
36498
- :param str security_protocol: Security protocol.
36499
- :param str sasl_mechanism: SASL mechanism used for connections to the Kafka server.
37859
+ :param str security_protocol: Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
37860
+ :param str sasl_mechanism: Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
36500
37861
  :param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server.
36501
37862
  :param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server.
36502
37863
  :param str ssl_ca_cert: PEM-encoded CA certificate.
36503
37864
  :param str ssl_client_cert: PEM-encoded client certificate.
36504
37865
  :param str ssl_client_key: PEM-encoded client key.
36505
- :param str ssl_endpoint_identification_algorithm: The endpoint identification algorithm to validate server hostname using server certificate.
37866
+ :param str ssl_endpoint_identification_algorithm: Enum: `https`. The endpoint identification algorithm to validate server hostname using server certificate.
36506
37867
  """
36507
37868
  pulumi.set(__self__, "bootstrap_servers", bootstrap_servers)
36508
37869
  pulumi.set(__self__, "security_protocol", security_protocol)
@@ -36533,7 +37894,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
36533
37894
  @pulumi.getter(name="securityProtocol")
36534
37895
  def security_protocol(self) -> str:
36535
37896
  """
36536
- Security protocol.
37897
+ Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.
36537
37898
  """
36538
37899
  return pulumi.get(self, "security_protocol")
36539
37900
 
@@ -36541,7 +37902,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
36541
37902
  @pulumi.getter(name="saslMechanism")
36542
37903
  def sasl_mechanism(self) -> Optional[str]:
36543
37904
  """
36544
- SASL mechanism used for connections to the Kafka server.
37905
+ Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.
36545
37906
  """
36546
37907
  return pulumi.get(self, "sasl_mechanism")
36547
37908
 
@@ -36589,7 +37950,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
36589
37950
  @pulumi.getter(name="sslEndpointIdentificationAlgorithm")
36590
37951
  def ssl_endpoint_identification_algorithm(self) -> Optional[str]:
36591
37952
  """
36592
- The endpoint identification algorithm to validate server hostname using server certificate.
37953
+ Enum: `https`. The endpoint identification algorithm to validate server hostname using server certificate.
36593
37954
  """
36594
37955
  return pulumi.get(self, "ssl_endpoint_identification_algorithm")
36595
37956
 
@@ -36679,7 +38040,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
36679
38040
  :param str password: Password.
36680
38041
  :param str ssl_client_certificate: Client certificate.
36681
38042
  :param str ssl_client_key: Client key.
36682
- :param str ssl_mode: SSL Mode. The default value is `verify-full`.
38043
+ :param str ssl_mode: Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`.
36683
38044
  :param str ssl_root_cert: SSL Root Cert.
36684
38045
  """
36685
38046
  pulumi.set(__self__, "host", host)
@@ -36758,7 +38119,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
36758
38119
  @pulumi.getter(name="sslMode")
36759
38120
  def ssl_mode(self) -> Optional[str]:
36760
38121
  """
36761
- SSL Mode. The default value is `verify-full`.
38122
+ Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`.
36762
38123
  """
36763
38124
  return pulumi.get(self, "ssl_mode")
36764
38125
 
@@ -36779,7 +38140,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
36779
38140
  basic_auth_password: Optional[str] = None,
36780
38141
  basic_auth_username: Optional[str] = None):
36781
38142
  """
36782
- :param str authentication: Authentication method.
38143
+ :param str authentication: Enum: `none`, `basic`. Authentication method.
36783
38144
  :param str url: Schema Registry URL.
36784
38145
  :param str basic_auth_password: Basic authentication password.
36785
38146
  :param str basic_auth_username: Basic authentication user name.
@@ -36795,7 +38156,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
36795
38156
  @pulumi.getter
36796
38157
  def authentication(self) -> str:
36797
38158
  """
36798
- Authentication method.
38159
+ Enum: `none`, `basic`. Authentication method.
36799
38160
  """
36800
38161
  return pulumi.get(self, "authentication")
36801
38162
 
@@ -36900,7 +38261,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
36900
38261
  max_message_size: Optional[int] = None,
36901
38262
  sd: Optional[str] = None):
36902
38263
  """
36903
- :param str format: Message format. The default value is `rfc5424`.
38264
+ :param str format: Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`.
36904
38265
  :param int port: Rsyslog server port. The default value is `514`.
36905
38266
  :param str server: Rsyslog server IP address or hostname.
36906
38267
  :param bool tls: Require TLS. The default value is `true`.
@@ -36932,7 +38293,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
36932
38293
  @pulumi.getter
36933
38294
  def format(self) -> str:
36934
38295
  """
36935
- Message format. The default value is `rfc5424`.
38296
+ Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`.
36936
38297
  """
36937
38298
  return pulumi.get(self, "format")
36938
38299
 
@@ -37303,7 +38664,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
37303
38664
  :param int consumer_fetch_min_bytes: The minimum amount of data the server should return for a fetch request.
37304
38665
  :param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker.
37305
38666
  :param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
37306
- :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
38667
+ :param str producer_compression_type: Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
37307
38668
  :param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing.
37308
38669
  :param int producer_max_request_size: The maximum request size in bytes.
37309
38670
  """
@@ -37348,7 +38709,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
37348
38709
  @pulumi.getter(name="producerCompressionType")
37349
38710
  def producer_compression_type(self) -> Optional[str]:
37350
38711
  """
37351
- Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
38712
+ Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
37352
38713
  """
37353
38714
  return pulumi.get(self, "producer_compression_type")
37354
38715