pulumi-aiven 6.16.0__py3-none-any.whl → 6.16.0a1715859567__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-aiven might be problematic. Click here for more details.
- pulumi_aiven/_inputs.py +181 -1061
- pulumi_aiven/cassandra.py +1 -48
- pulumi_aiven/clickhouse.py +1 -48
- pulumi_aiven/dragonfly.py +1 -48
- pulumi_aiven/flink.py +2 -2
- pulumi_aiven/flink_application_deployment.py +30 -56
- pulumi_aiven/gcp_privatelink.py +30 -52
- pulumi_aiven/gcp_privatelink_connection_approval.py +30 -54
- pulumi_aiven/get_cassanda.py +1 -14
- pulumi_aiven/get_cassandra.py +1 -14
- pulumi_aiven/get_clickhouse.py +1 -14
- pulumi_aiven/get_dragonfly.py +1 -14
- pulumi_aiven/get_gcp_privatelink.py +2 -45
- pulumi_aiven/get_grafana.py +1 -14
- pulumi_aiven/get_m3_aggregator.py +1 -14
- pulumi_aiven/get_m3_db.py +1 -14
- pulumi_aiven/get_mirror_maker_replication_flow.py +1 -1
- pulumi_aiven/get_my_sql.py +1 -14
- pulumi_aiven/get_redis.py +1 -14
- pulumi_aiven/get_service_integration.py +3 -3
- pulumi_aiven/get_service_integration_endpoint.py +1 -1
- pulumi_aiven/grafana.py +1 -48
- pulumi_aiven/influx_db.py +2 -21
- pulumi_aiven/kafka.py +1 -20
- pulumi_aiven/m3_aggregator.py +1 -48
- pulumi_aiven/m3_db.py +1 -48
- pulumi_aiven/mirror_maker_replication_flow.py +7 -7
- pulumi_aiven/my_sql.py +1 -48
- pulumi_aiven/open_search.py +2 -21
- pulumi_aiven/outputs.py +285 -1758
- pulumi_aiven/pg.py +2 -2
- pulumi_aiven/pulumi-plugin.json +1 -1
- pulumi_aiven/redis.py +1 -48
- pulumi_aiven/service_integration.py +7 -7
- pulumi_aiven/service_integration_endpoint.py +7 -7
- {pulumi_aiven-6.16.0.dist-info → pulumi_aiven-6.16.0a1715859567.dist-info}/METADATA +1 -1
- {pulumi_aiven-6.16.0.dist-info → pulumi_aiven-6.16.0a1715859567.dist-info}/RECORD +39 -39
- {pulumi_aiven-6.16.0.dist-info → pulumi_aiven-6.16.0a1715859567.dist-info}/WHEEL +0 -0
- {pulumi_aiven-6.16.0.dist-info → pulumi_aiven-6.16.0a1715859567.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py
CHANGED
|
@@ -12,7 +12,6 @@ from . import outputs
|
|
|
12
12
|
|
|
13
13
|
__all__ = [
|
|
14
14
|
'AccountAuthenticationSamlFieldMapping',
|
|
15
|
-
'CassandraCassandra',
|
|
16
15
|
'CassandraCassandraUserConfig',
|
|
17
16
|
'CassandraCassandraUserConfigCassandra',
|
|
18
17
|
'CassandraCassandraUserConfigIpFilterObject',
|
|
@@ -22,7 +21,6 @@ __all__ = [
|
|
|
22
21
|
'CassandraServiceIntegration',
|
|
23
22
|
'CassandraTag',
|
|
24
23
|
'CassandraTechEmail',
|
|
25
|
-
'ClickhouseClickhouse',
|
|
26
24
|
'ClickhouseClickhouseUserConfig',
|
|
27
25
|
'ClickhouseClickhouseUserConfigIpFilterObject',
|
|
28
26
|
'ClickhouseClickhouseUserConfigPrivateAccess',
|
|
@@ -35,7 +33,6 @@ __all__ = [
|
|
|
35
33
|
'ClickhouseTag',
|
|
36
34
|
'ClickhouseTechEmail',
|
|
37
35
|
'DragonflyComponent',
|
|
38
|
-
'DragonflyDragonfly',
|
|
39
36
|
'DragonflyDragonflyUserConfig',
|
|
40
37
|
'DragonflyDragonflyUserConfigIpFilterObject',
|
|
41
38
|
'DragonflyDragonflyUserConfigMigration',
|
|
@@ -56,7 +53,6 @@ __all__ = [
|
|
|
56
53
|
'FlinkTag',
|
|
57
54
|
'FlinkTechEmail',
|
|
58
55
|
'GrafanaComponent',
|
|
59
|
-
'GrafanaGrafana',
|
|
60
56
|
'GrafanaGrafanaUserConfig',
|
|
61
57
|
'GrafanaGrafanaUserConfigAuthAzuread',
|
|
62
58
|
'GrafanaGrafanaUserConfigAuthGenericOauth',
|
|
@@ -122,14 +118,12 @@ __all__ = [
|
|
|
122
118
|
'KafkaTopicConfig',
|
|
123
119
|
'KafkaTopicTag',
|
|
124
120
|
'M3AggregatorComponent',
|
|
125
|
-
'M3AggregatorM3aggregator',
|
|
126
121
|
'M3AggregatorM3aggregatorUserConfig',
|
|
127
122
|
'M3AggregatorM3aggregatorUserConfigIpFilterObject',
|
|
128
123
|
'M3AggregatorServiceIntegration',
|
|
129
124
|
'M3AggregatorTag',
|
|
130
125
|
'M3AggregatorTechEmail',
|
|
131
126
|
'M3DbComponent',
|
|
132
|
-
'M3DbM3db',
|
|
133
127
|
'M3DbM3dbUserConfig',
|
|
134
128
|
'M3DbM3dbUserConfigIpFilterObject',
|
|
135
129
|
'M3DbM3dbUserConfigLimits',
|
|
@@ -148,8 +142,6 @@ __all__ = [
|
|
|
148
142
|
'M3DbTag',
|
|
149
143
|
'M3DbTechEmail',
|
|
150
144
|
'MySqlComponent',
|
|
151
|
-
'MySqlMysql',
|
|
152
|
-
'MySqlMysqlParam',
|
|
153
145
|
'MySqlMysqlUserConfig',
|
|
154
146
|
'MySqlMysqlUserConfigIpFilterObject',
|
|
155
147
|
'MySqlMysqlUserConfigMigration',
|
|
@@ -186,7 +178,6 @@ __all__ = [
|
|
|
186
178
|
'OrganizationUserGroupMemberTimeouts',
|
|
187
179
|
'PgComponent',
|
|
188
180
|
'PgPg',
|
|
189
|
-
'PgPgParam',
|
|
190
181
|
'PgPgUserConfig',
|
|
191
182
|
'PgPgUserConfigIpFilterObject',
|
|
192
183
|
'PgPgUserConfigMigration',
|
|
@@ -204,7 +195,6 @@ __all__ = [
|
|
|
204
195
|
'PgTechEmail',
|
|
205
196
|
'ProjectTag',
|
|
206
197
|
'RedisComponent',
|
|
207
|
-
'RedisRedis',
|
|
208
198
|
'RedisRedisUserConfig',
|
|
209
199
|
'RedisRedisUserConfigIpFilterObject',
|
|
210
200
|
'RedisRedisUserConfigMigration',
|
|
@@ -257,7 +247,6 @@ __all__ = [
|
|
|
257
247
|
'ServiceIntegrationPrometheusUserConfigSourceMysql',
|
|
258
248
|
'ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf',
|
|
259
249
|
'GetAccountAuthenticationSamlFieldMappingResult',
|
|
260
|
-
'GetCassandaCassandraResult',
|
|
261
250
|
'GetCassandaCassandraUserConfigResult',
|
|
262
251
|
'GetCassandaCassandraUserConfigCassandraResult',
|
|
263
252
|
'GetCassandaCassandraUserConfigIpFilterObjectResult',
|
|
@@ -267,7 +256,6 @@ __all__ = [
|
|
|
267
256
|
'GetCassandaServiceIntegrationResult',
|
|
268
257
|
'GetCassandaTagResult',
|
|
269
258
|
'GetCassandaTechEmailResult',
|
|
270
|
-
'GetCassandraCassandraResult',
|
|
271
259
|
'GetCassandraCassandraUserConfigResult',
|
|
272
260
|
'GetCassandraCassandraUserConfigCassandraResult',
|
|
273
261
|
'GetCassandraCassandraUserConfigIpFilterObjectResult',
|
|
@@ -277,7 +265,6 @@ __all__ = [
|
|
|
277
265
|
'GetCassandraServiceIntegrationResult',
|
|
278
266
|
'GetCassandraTagResult',
|
|
279
267
|
'GetCassandraTechEmailResult',
|
|
280
|
-
'GetClickhouseClickhouseResult',
|
|
281
268
|
'GetClickhouseClickhouseUserConfigResult',
|
|
282
269
|
'GetClickhouseClickhouseUserConfigIpFilterObjectResult',
|
|
283
270
|
'GetClickhouseClickhouseUserConfigPrivateAccessResult',
|
|
@@ -288,7 +275,6 @@ __all__ = [
|
|
|
288
275
|
'GetClickhouseTagResult',
|
|
289
276
|
'GetClickhouseTechEmailResult',
|
|
290
277
|
'GetDragonflyComponentResult',
|
|
291
|
-
'GetDragonflyDragonflyResult',
|
|
292
278
|
'GetDragonflyDragonflyUserConfigResult',
|
|
293
279
|
'GetDragonflyDragonflyUserConfigIpFilterObjectResult',
|
|
294
280
|
'GetDragonflyDragonflyUserConfigMigrationResult',
|
|
@@ -309,7 +295,6 @@ __all__ = [
|
|
|
309
295
|
'GetFlinkTagResult',
|
|
310
296
|
'GetFlinkTechEmailResult',
|
|
311
297
|
'GetGrafanaComponentResult',
|
|
312
|
-
'GetGrafanaGrafanaResult',
|
|
313
298
|
'GetGrafanaGrafanaUserConfigResult',
|
|
314
299
|
'GetGrafanaGrafanaUserConfigAuthAzureadResult',
|
|
315
300
|
'GetGrafanaGrafanaUserConfigAuthGenericOauthResult',
|
|
@@ -375,14 +360,12 @@ __all__ = [
|
|
|
375
360
|
'GetKafkaTopicConfigResult',
|
|
376
361
|
'GetKafkaTopicTagResult',
|
|
377
362
|
'GetM3AggregatorComponentResult',
|
|
378
|
-
'GetM3AggregatorM3aggregatorResult',
|
|
379
363
|
'GetM3AggregatorM3aggregatorUserConfigResult',
|
|
380
364
|
'GetM3AggregatorM3aggregatorUserConfigIpFilterObjectResult',
|
|
381
365
|
'GetM3AggregatorServiceIntegrationResult',
|
|
382
366
|
'GetM3AggregatorTagResult',
|
|
383
367
|
'GetM3AggregatorTechEmailResult',
|
|
384
368
|
'GetM3DbComponentResult',
|
|
385
|
-
'GetM3DbM3dbResult',
|
|
386
369
|
'GetM3DbM3dbUserConfigResult',
|
|
387
370
|
'GetM3DbM3dbUserConfigIpFilterObjectResult',
|
|
388
371
|
'GetM3DbM3dbUserConfigLimitsResult',
|
|
@@ -401,8 +384,6 @@ __all__ = [
|
|
|
401
384
|
'GetM3DbTagResult',
|
|
402
385
|
'GetM3DbTechEmailResult',
|
|
403
386
|
'GetMySqlComponentResult',
|
|
404
|
-
'GetMySqlMysqlResult',
|
|
405
|
-
'GetMySqlMysqlParamResult',
|
|
406
387
|
'GetMySqlMysqlUserConfigResult',
|
|
407
388
|
'GetMySqlMysqlUserConfigIpFilterObjectResult',
|
|
408
389
|
'GetMySqlMysqlUserConfigMigrationResult',
|
|
@@ -434,7 +415,6 @@ __all__ = [
|
|
|
434
415
|
'GetOpenSearchTechEmailResult',
|
|
435
416
|
'GetPgComponentResult',
|
|
436
417
|
'GetPgPgResult',
|
|
437
|
-
'GetPgPgParamResult',
|
|
438
418
|
'GetPgPgUserConfigResult',
|
|
439
419
|
'GetPgPgUserConfigIpFilterObjectResult',
|
|
440
420
|
'GetPgPgUserConfigMigrationResult',
|
|
@@ -452,7 +432,6 @@ __all__ = [
|
|
|
452
432
|
'GetPgTechEmailResult',
|
|
453
433
|
'GetProjectTagResult',
|
|
454
434
|
'GetRedisComponentResult',
|
|
455
|
-
'GetRedisRediResult',
|
|
456
435
|
'GetRedisRedisUserConfigResult',
|
|
457
436
|
'GetRedisRedisUserConfigIpFilterObjectResult',
|
|
458
437
|
'GetRedisRedisUserConfigMigrationResult',
|
|
@@ -594,25 +573,6 @@ class AccountAuthenticationSamlFieldMapping(dict):
|
|
|
594
573
|
return pulumi.get(self, "real_name")
|
|
595
574
|
|
|
596
575
|
|
|
597
|
-
@pulumi.output_type
|
|
598
|
-
class CassandraCassandra(dict):
|
|
599
|
-
def __init__(__self__, *,
|
|
600
|
-
uris: Optional[Sequence[str]] = None):
|
|
601
|
-
"""
|
|
602
|
-
:param Sequence[str] uris: Cassandra server URIs.
|
|
603
|
-
"""
|
|
604
|
-
if uris is not None:
|
|
605
|
-
pulumi.set(__self__, "uris", uris)
|
|
606
|
-
|
|
607
|
-
@property
|
|
608
|
-
@pulumi.getter
|
|
609
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
610
|
-
"""
|
|
611
|
-
Cassandra server URIs.
|
|
612
|
-
"""
|
|
613
|
-
return pulumi.get(self, "uris")
|
|
614
|
-
|
|
615
|
-
|
|
616
576
|
@pulumi.output_type
|
|
617
577
|
class CassandraCassandraUserConfig(dict):
|
|
618
578
|
@staticmethod
|
|
@@ -682,7 +642,7 @@ class CassandraCassandraUserConfig(dict):
|
|
|
682
642
|
:param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
|
|
683
643
|
:param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
|
|
684
644
|
:param 'CassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
|
|
685
|
-
:param str cassandra_version:
|
|
645
|
+
:param str cassandra_version: Cassandra version.
|
|
686
646
|
:param Sequence['CassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
|
|
687
647
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
688
648
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
@@ -767,7 +727,7 @@ class CassandraCassandraUserConfig(dict):
|
|
|
767
727
|
@pulumi.getter(name="cassandraVersion")
|
|
768
728
|
def cassandra_version(self) -> Optional[str]:
|
|
769
729
|
"""
|
|
770
|
-
|
|
730
|
+
Cassandra version.
|
|
771
731
|
"""
|
|
772
732
|
return pulumi.get(self, "cassandra_version")
|
|
773
733
|
|
|
@@ -1210,25 +1170,6 @@ class CassandraTechEmail(dict):
|
|
|
1210
1170
|
return pulumi.get(self, "email")
|
|
1211
1171
|
|
|
1212
1172
|
|
|
1213
|
-
@pulumi.output_type
|
|
1214
|
-
class ClickhouseClickhouse(dict):
|
|
1215
|
-
def __init__(__self__, *,
|
|
1216
|
-
uris: Optional[Sequence[str]] = None):
|
|
1217
|
-
"""
|
|
1218
|
-
:param Sequence[str] uris: Clickhouse server URIs.
|
|
1219
|
-
"""
|
|
1220
|
-
if uris is not None:
|
|
1221
|
-
pulumi.set(__self__, "uris", uris)
|
|
1222
|
-
|
|
1223
|
-
@property
|
|
1224
|
-
@pulumi.getter
|
|
1225
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
1226
|
-
"""
|
|
1227
|
-
Clickhouse server URIs.
|
|
1228
|
-
"""
|
|
1229
|
-
return pulumi.get(self, "uris")
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
1173
|
@pulumi.output_type
|
|
1233
1174
|
class ClickhouseClickhouseUserConfig(dict):
|
|
1234
1175
|
@staticmethod
|
|
@@ -2104,80 +2045,6 @@ class DragonflyComponent(dict):
|
|
|
2104
2045
|
return pulumi.get(self, "usage")
|
|
2105
2046
|
|
|
2106
2047
|
|
|
2107
|
-
@pulumi.output_type
|
|
2108
|
-
class DragonflyDragonfly(dict):
|
|
2109
|
-
@staticmethod
|
|
2110
|
-
def __key_warning(key: str):
|
|
2111
|
-
suggest = None
|
|
2112
|
-
if key == "replicaUri":
|
|
2113
|
-
suggest = "replica_uri"
|
|
2114
|
-
elif key == "slaveUris":
|
|
2115
|
-
suggest = "slave_uris"
|
|
2116
|
-
|
|
2117
|
-
if suggest:
|
|
2118
|
-
pulumi.log.warn(f"Key '{key}' not found in DragonflyDragonfly. Access the value via the '{suggest}' property getter instead.")
|
|
2119
|
-
|
|
2120
|
-
def __getitem__(self, key: str) -> Any:
|
|
2121
|
-
DragonflyDragonfly.__key_warning(key)
|
|
2122
|
-
return super().__getitem__(key)
|
|
2123
|
-
|
|
2124
|
-
def get(self, key: str, default = None) -> Any:
|
|
2125
|
-
DragonflyDragonfly.__key_warning(key)
|
|
2126
|
-
return super().get(key, default)
|
|
2127
|
-
|
|
2128
|
-
def __init__(__self__, *,
|
|
2129
|
-
password: Optional[str] = None,
|
|
2130
|
-
replica_uri: Optional[str] = None,
|
|
2131
|
-
slave_uris: Optional[Sequence[str]] = None,
|
|
2132
|
-
uris: Optional[Sequence[str]] = None):
|
|
2133
|
-
"""
|
|
2134
|
-
:param str password: Dragonfly password.
|
|
2135
|
-
:param str replica_uri: Dragonfly replica server URI.
|
|
2136
|
-
:param Sequence[str] slave_uris: Dragonfly slave server URIs.
|
|
2137
|
-
:param Sequence[str] uris: Dragonfly server URIs.
|
|
2138
|
-
"""
|
|
2139
|
-
if password is not None:
|
|
2140
|
-
pulumi.set(__self__, "password", password)
|
|
2141
|
-
if replica_uri is not None:
|
|
2142
|
-
pulumi.set(__self__, "replica_uri", replica_uri)
|
|
2143
|
-
if slave_uris is not None:
|
|
2144
|
-
pulumi.set(__self__, "slave_uris", slave_uris)
|
|
2145
|
-
if uris is not None:
|
|
2146
|
-
pulumi.set(__self__, "uris", uris)
|
|
2147
|
-
|
|
2148
|
-
@property
|
|
2149
|
-
@pulumi.getter
|
|
2150
|
-
def password(self) -> Optional[str]:
|
|
2151
|
-
"""
|
|
2152
|
-
Dragonfly password.
|
|
2153
|
-
"""
|
|
2154
|
-
return pulumi.get(self, "password")
|
|
2155
|
-
|
|
2156
|
-
@property
|
|
2157
|
-
@pulumi.getter(name="replicaUri")
|
|
2158
|
-
def replica_uri(self) -> Optional[str]:
|
|
2159
|
-
"""
|
|
2160
|
-
Dragonfly replica server URI.
|
|
2161
|
-
"""
|
|
2162
|
-
return pulumi.get(self, "replica_uri")
|
|
2163
|
-
|
|
2164
|
-
@property
|
|
2165
|
-
@pulumi.getter(name="slaveUris")
|
|
2166
|
-
def slave_uris(self) -> Optional[Sequence[str]]:
|
|
2167
|
-
"""
|
|
2168
|
-
Dragonfly slave server URIs.
|
|
2169
|
-
"""
|
|
2170
|
-
return pulumi.get(self, "slave_uris")
|
|
2171
|
-
|
|
2172
|
-
@property
|
|
2173
|
-
@pulumi.getter
|
|
2174
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
2175
|
-
"""
|
|
2176
|
-
Dragonfly server URIs.
|
|
2177
|
-
"""
|
|
2178
|
-
return pulumi.get(self, "uris")
|
|
2179
|
-
|
|
2180
|
-
|
|
2181
2048
|
@pulumi.output_type
|
|
2182
2049
|
class DragonflyDragonflyUserConfig(dict):
|
|
2183
2050
|
@staticmethod
|
|
@@ -2241,7 +2108,7 @@ class DragonflyDragonflyUserConfig(dict):
|
|
|
2241
2108
|
static_ips: Optional[bool] = None):
|
|
2242
2109
|
"""
|
|
2243
2110
|
:param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
|
|
2244
|
-
:param str dragonfly_persistence:
|
|
2111
|
+
:param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
2245
2112
|
:param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
|
|
2246
2113
|
:param Sequence['DragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
|
|
2247
2114
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
@@ -2299,7 +2166,7 @@ class DragonflyDragonflyUserConfig(dict):
|
|
|
2299
2166
|
@pulumi.getter(name="dragonflyPersistence")
|
|
2300
2167
|
def dragonfly_persistence(self) -> Optional[str]:
|
|
2301
2168
|
"""
|
|
2302
|
-
|
|
2169
|
+
When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
2303
2170
|
"""
|
|
2304
2171
|
return pulumi.get(self, "dragonfly_persistence")
|
|
2305
2172
|
|
|
@@ -2474,7 +2341,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
|
|
|
2474
2341
|
:param int port: Port number of the server where to migrate data from.
|
|
2475
2342
|
:param str dbname: Database name for bootstrapping the initial connection.
|
|
2476
2343
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
|
|
2477
|
-
:param str method:
|
|
2344
|
+
:param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
2478
2345
|
:param str password: Password for authentication with the server where to migrate data from.
|
|
2479
2346
|
:param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
|
|
2480
2347
|
:param str username: User name for authentication with the server where to migrate data from.
|
|
@@ -2530,7 +2397,7 @@ class DragonflyDragonflyUserConfigMigration(dict):
|
|
|
2530
2397
|
@pulumi.getter
|
|
2531
2398
|
def method(self) -> Optional[str]:
|
|
2532
2399
|
"""
|
|
2533
|
-
|
|
2400
|
+
The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
2534
2401
|
"""
|
|
2535
2402
|
return pulumi.get(self, "method")
|
|
2536
2403
|
|
|
@@ -3050,7 +2917,7 @@ class FlinkFlinkUserConfig(dict):
|
|
|
3050
2917
|
static_ips: Optional[bool] = None):
|
|
3051
2918
|
"""
|
|
3052
2919
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
3053
|
-
:param str flink_version:
|
|
2920
|
+
:param str flink_version: Flink major version.
|
|
3054
2921
|
:param Sequence['FlinkFlinkUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
|
|
3055
2922
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
3056
2923
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
@@ -3093,7 +2960,7 @@ class FlinkFlinkUserConfig(dict):
|
|
|
3093
2960
|
@pulumi.getter(name="flinkVersion")
|
|
3094
2961
|
def flink_version(self) -> Optional[str]:
|
|
3095
2962
|
"""
|
|
3096
|
-
|
|
2963
|
+
Flink major version.
|
|
3097
2964
|
"""
|
|
3098
2965
|
return pulumi.get(self, "flink_version")
|
|
3099
2966
|
|
|
@@ -3435,25 +3302,6 @@ class GrafanaComponent(dict):
|
|
|
3435
3302
|
return pulumi.get(self, "usage")
|
|
3436
3303
|
|
|
3437
3304
|
|
|
3438
|
-
@pulumi.output_type
|
|
3439
|
-
class GrafanaGrafana(dict):
|
|
3440
|
-
def __init__(__self__, *,
|
|
3441
|
-
uris: Optional[Sequence[str]] = None):
|
|
3442
|
-
"""
|
|
3443
|
-
:param Sequence[str] uris: Grafana server URIs.
|
|
3444
|
-
"""
|
|
3445
|
-
if uris is not None:
|
|
3446
|
-
pulumi.set(__self__, "uris", uris)
|
|
3447
|
-
|
|
3448
|
-
@property
|
|
3449
|
-
@pulumi.getter
|
|
3450
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
3451
|
-
"""
|
|
3452
|
-
Grafana server URIs.
|
|
3453
|
-
"""
|
|
3454
|
-
return pulumi.get(self, "uris")
|
|
3455
|
-
|
|
3456
|
-
|
|
3457
3305
|
@pulumi.output_type
|
|
3458
3306
|
class GrafanaGrafanaUserConfig(dict):
|
|
3459
3307
|
@staticmethod
|
|
@@ -3601,9 +3449,9 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3601
3449
|
"""
|
|
3602
3450
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
3603
3451
|
:param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
|
|
3604
|
-
:param str alerting_error_or_timeout:
|
|
3452
|
+
:param str alerting_error_or_timeout: Default error or timeout setting for new alerting rules.
|
|
3605
3453
|
:param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.
|
|
3606
|
-
:param str alerting_nodata_or_nullvalues:
|
|
3454
|
+
:param str alerting_nodata_or_nullvalues: Default value for 'no data or null values' for new alerting rules.
|
|
3607
3455
|
:param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
|
|
3608
3456
|
:param 'GrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
|
|
3609
3457
|
:param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
|
|
@@ -3611,7 +3459,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3611
3459
|
:param 'GrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
|
|
3612
3460
|
:param 'GrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
|
|
3613
3461
|
:param 'GrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
|
|
3614
|
-
:param str cookie_samesite:
|
|
3462
|
+
:param str cookie_samesite: Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
|
|
3615
3463
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
|
|
3616
3464
|
:param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
|
|
3617
3465
|
:param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h.
|
|
@@ -3639,7 +3487,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3639
3487
|
:param bool static_ips: Use static public IP addresses.
|
|
3640
3488
|
:param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
|
|
3641
3489
|
:param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
|
|
3642
|
-
:param str user_auto_assign_org_role:
|
|
3490
|
+
:param str user_auto_assign_org_role: Set role for new signups. Defaults to Viewer.
|
|
3643
3491
|
:param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
|
|
3644
3492
|
"""
|
|
3645
3493
|
if additional_backup_regions is not None:
|
|
@@ -3747,7 +3595,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3747
3595
|
@pulumi.getter(name="alertingErrorOrTimeout")
|
|
3748
3596
|
def alerting_error_or_timeout(self) -> Optional[str]:
|
|
3749
3597
|
"""
|
|
3750
|
-
|
|
3598
|
+
Default error or timeout setting for new alerting rules.
|
|
3751
3599
|
"""
|
|
3752
3600
|
return pulumi.get(self, "alerting_error_or_timeout")
|
|
3753
3601
|
|
|
@@ -3763,7 +3611,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3763
3611
|
@pulumi.getter(name="alertingNodataOrNullvalues")
|
|
3764
3612
|
def alerting_nodata_or_nullvalues(self) -> Optional[str]:
|
|
3765
3613
|
"""
|
|
3766
|
-
|
|
3614
|
+
Default value for 'no data or null values' for new alerting rules.
|
|
3767
3615
|
"""
|
|
3768
3616
|
return pulumi.get(self, "alerting_nodata_or_nullvalues")
|
|
3769
3617
|
|
|
@@ -3827,7 +3675,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
3827
3675
|
@pulumi.getter(name="cookieSamesite")
|
|
3828
3676
|
def cookie_samesite(self) -> Optional[str]:
|
|
3829
3677
|
"""
|
|
3830
|
-
|
|
3678
|
+
Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
|
|
3831
3679
|
"""
|
|
3832
3680
|
return pulumi.get(self, "cookie_samesite")
|
|
3833
3681
|
|
|
@@ -4054,7 +3902,7 @@ class GrafanaGrafanaUserConfig(dict):
|
|
|
4054
3902
|
@pulumi.getter(name="userAutoAssignOrgRole")
|
|
4055
3903
|
def user_auto_assign_org_role(self) -> Optional[str]:
|
|
4056
3904
|
"""
|
|
4057
|
-
|
|
3905
|
+
Set role for new signups. Defaults to Viewer.
|
|
4058
3906
|
"""
|
|
4059
3907
|
return pulumi.get(self, "user_auto_assign_org_role")
|
|
4060
3908
|
|
|
@@ -4825,7 +4673,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
|
|
|
4825
4673
|
"""
|
|
4826
4674
|
:param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.
|
|
4827
4675
|
:param str bucket_url: Bucket URL for S3.
|
|
4828
|
-
:param str provider:
|
|
4676
|
+
:param str provider: Provider type.
|
|
4829
4677
|
:param str secret_key: S3 secret key.
|
|
4830
4678
|
"""
|
|
4831
4679
|
pulumi.set(__self__, "access_key", access_key)
|
|
@@ -4853,7 +4701,7 @@ class GrafanaGrafanaUserConfigExternalImageStorage(dict):
|
|
|
4853
4701
|
@pulumi.getter
|
|
4854
4702
|
def provider(self) -> str:
|
|
4855
4703
|
"""
|
|
4856
|
-
|
|
4704
|
+
Provider type.
|
|
4857
4705
|
"""
|
|
4858
4706
|
return pulumi.get(self, "provider")
|
|
4859
4707
|
|
|
@@ -4994,7 +4842,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
|
|
|
4994
4842
|
:param str from_name: Name used in outgoing emails, defaults to Grafana.
|
|
4995
4843
|
:param str password: Password for SMTP authentication.
|
|
4996
4844
|
:param bool skip_verify: Skip verifying server certificate. Defaults to false.
|
|
4997
|
-
:param str starttls_policy:
|
|
4845
|
+
:param str starttls_policy: Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
|
|
4998
4846
|
:param str username: Username for SMTP authentication.
|
|
4999
4847
|
"""
|
|
5000
4848
|
pulumi.set(__self__, "from_address", from_address)
|
|
@@ -5063,7 +4911,7 @@ class GrafanaGrafanaUserConfigSmtpServer(dict):
|
|
|
5063
4911
|
@pulumi.getter(name="starttlsPolicy")
|
|
5064
4912
|
def starttls_policy(self) -> Optional[str]:
|
|
5065
4913
|
"""
|
|
5066
|
-
|
|
4914
|
+
Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
|
|
5067
4915
|
"""
|
|
5068
4916
|
return pulumi.get(self, "starttls_policy")
|
|
5069
4917
|
|
|
@@ -5313,24 +5161,12 @@ class InfluxDbInfluxdb(dict):
|
|
|
5313
5161
|
return super().get(key, default)
|
|
5314
5162
|
|
|
5315
5163
|
def __init__(__self__, *,
|
|
5316
|
-
database_name: Optional[str] = None
|
|
5317
|
-
password: Optional[str] = None,
|
|
5318
|
-
uris: Optional[Sequence[str]] = None,
|
|
5319
|
-
username: Optional[str] = None):
|
|
5164
|
+
database_name: Optional[str] = None):
|
|
5320
5165
|
"""
|
|
5321
5166
|
:param str database_name: Name of the default InfluxDB database
|
|
5322
|
-
:param str password: InfluxDB password
|
|
5323
|
-
:param Sequence[str] uris: InfluxDB server URIs.
|
|
5324
|
-
:param str username: InfluxDB username
|
|
5325
5167
|
"""
|
|
5326
5168
|
if database_name is not None:
|
|
5327
5169
|
pulumi.set(__self__, "database_name", database_name)
|
|
5328
|
-
if password is not None:
|
|
5329
|
-
pulumi.set(__self__, "password", password)
|
|
5330
|
-
if uris is not None:
|
|
5331
|
-
pulumi.set(__self__, "uris", uris)
|
|
5332
|
-
if username is not None:
|
|
5333
|
-
pulumi.set(__self__, "username", username)
|
|
5334
5170
|
|
|
5335
5171
|
@property
|
|
5336
5172
|
@pulumi.getter(name="databaseName")
|
|
@@ -5340,30 +5176,6 @@ class InfluxDbInfluxdb(dict):
|
|
|
5340
5176
|
"""
|
|
5341
5177
|
return pulumi.get(self, "database_name")
|
|
5342
5178
|
|
|
5343
|
-
@property
|
|
5344
|
-
@pulumi.getter
|
|
5345
|
-
def password(self) -> Optional[str]:
|
|
5346
|
-
"""
|
|
5347
|
-
InfluxDB password
|
|
5348
|
-
"""
|
|
5349
|
-
return pulumi.get(self, "password")
|
|
5350
|
-
|
|
5351
|
-
@property
|
|
5352
|
-
@pulumi.getter
|
|
5353
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
5354
|
-
"""
|
|
5355
|
-
InfluxDB server URIs.
|
|
5356
|
-
"""
|
|
5357
|
-
return pulumi.get(self, "uris")
|
|
5358
|
-
|
|
5359
|
-
@property
|
|
5360
|
-
@pulumi.getter
|
|
5361
|
-
def username(self) -> Optional[str]:
|
|
5362
|
-
"""
|
|
5363
|
-
InfluxDB username
|
|
5364
|
-
"""
|
|
5365
|
-
return pulumi.get(self, "username")
|
|
5366
|
-
|
|
5367
5179
|
|
|
5368
5180
|
@pulumi.output_type
|
|
5369
5181
|
class InfluxDbInfluxdbUserConfig(dict):
|
|
@@ -6398,10 +6210,10 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6398
6210
|
scheduled_rebalance_max_delay_ms: Optional[int] = None,
|
|
6399
6211
|
session_timeout_ms: Optional[int] = None):
|
|
6400
6212
|
"""
|
|
6401
|
-
:param str connector_client_config_override_policy:
|
|
6402
|
-
:param str consumer_auto_offset_reset:
|
|
6213
|
+
:param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
|
|
6214
|
+
:param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
6403
6215
|
:param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
6404
|
-
:param str consumer_isolation_level:
|
|
6216
|
+
:param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
6405
6217
|
:param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
6406
6218
|
:param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
6407
6219
|
:param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
@@ -6409,7 +6221,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6409
6221
|
:param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
|
|
6410
6222
|
:param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
|
|
6411
6223
|
:param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
6412
|
-
:param str producer_compression_type:
|
|
6224
|
+
:param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
6413
6225
|
:param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
|
|
6414
6226
|
:param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
6415
6227
|
:param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
@@ -6452,7 +6264,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6452
6264
|
@pulumi.getter(name="connectorClientConfigOverridePolicy")
|
|
6453
6265
|
def connector_client_config_override_policy(self) -> Optional[str]:
|
|
6454
6266
|
"""
|
|
6455
|
-
|
|
6267
|
+
Defines what client configurations can be overridden by the connector. Default is None.
|
|
6456
6268
|
"""
|
|
6457
6269
|
return pulumi.get(self, "connector_client_config_override_policy")
|
|
6458
6270
|
|
|
@@ -6460,7 +6272,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6460
6272
|
@pulumi.getter(name="consumerAutoOffsetReset")
|
|
6461
6273
|
def consumer_auto_offset_reset(self) -> Optional[str]:
|
|
6462
6274
|
"""
|
|
6463
|
-
|
|
6275
|
+
What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
6464
6276
|
"""
|
|
6465
6277
|
return pulumi.get(self, "consumer_auto_offset_reset")
|
|
6466
6278
|
|
|
@@ -6476,7 +6288,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6476
6288
|
@pulumi.getter(name="consumerIsolationLevel")
|
|
6477
6289
|
def consumer_isolation_level(self) -> Optional[str]:
|
|
6478
6290
|
"""
|
|
6479
|
-
|
|
6291
|
+
Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
6480
6292
|
"""
|
|
6481
6293
|
return pulumi.get(self, "consumer_isolation_level")
|
|
6482
6294
|
|
|
@@ -6540,7 +6352,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
|
|
|
6540
6352
|
@pulumi.getter(name="producerCompressionType")
|
|
6541
6353
|
def producer_compression_type(self) -> Optional[str]:
|
|
6542
6354
|
"""
|
|
6543
|
-
|
|
6355
|
+
Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
6544
6356
|
"""
|
|
6545
6357
|
return pulumi.get(self, "producer_compression_type")
|
|
6546
6358
|
|
|
@@ -6891,15 +6703,13 @@ class KafkaKafka(dict):
|
|
|
6891
6703
|
access_key: Optional[str] = None,
|
|
6892
6704
|
connect_uri: Optional[str] = None,
|
|
6893
6705
|
rest_uri: Optional[str] = None,
|
|
6894
|
-
schema_registry_uri: Optional[str] = None
|
|
6895
|
-
uris: Optional[Sequence[str]] = None):
|
|
6706
|
+
schema_registry_uri: Optional[str] = None):
|
|
6896
6707
|
"""
|
|
6897
6708
|
:param str access_cert: The Kafka client certificate.
|
|
6898
6709
|
:param str access_key: The Kafka client certificate key.
|
|
6899
6710
|
:param str connect_uri: The Kafka Connect URI.
|
|
6900
6711
|
:param str rest_uri: The Kafka REST URI.
|
|
6901
6712
|
:param str schema_registry_uri: The Schema Registry URI.
|
|
6902
|
-
:param Sequence[str] uris: Kafka server URIs.
|
|
6903
6713
|
"""
|
|
6904
6714
|
if access_cert is not None:
|
|
6905
6715
|
pulumi.set(__self__, "access_cert", access_cert)
|
|
@@ -6911,8 +6721,6 @@ class KafkaKafka(dict):
|
|
|
6911
6721
|
pulumi.set(__self__, "rest_uri", rest_uri)
|
|
6912
6722
|
if schema_registry_uri is not None:
|
|
6913
6723
|
pulumi.set(__self__, "schema_registry_uri", schema_registry_uri)
|
|
6914
|
-
if uris is not None:
|
|
6915
|
-
pulumi.set(__self__, "uris", uris)
|
|
6916
6724
|
|
|
6917
6725
|
@property
|
|
6918
6726
|
@pulumi.getter(name="accessCert")
|
|
@@ -6954,14 +6762,6 @@ class KafkaKafka(dict):
|
|
|
6954
6762
|
"""
|
|
6955
6763
|
return pulumi.get(self, "schema_registry_uri")
|
|
6956
6764
|
|
|
6957
|
-
@property
|
|
6958
|
-
@pulumi.getter
|
|
6959
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
6960
|
-
"""
|
|
6961
|
-
Kafka server URIs.
|
|
6962
|
-
"""
|
|
6963
|
-
return pulumi.get(self, "uris")
|
|
6964
|
-
|
|
6965
6765
|
|
|
6966
6766
|
@pulumi.output_type
|
|
6967
6767
|
class KafkaKafkaUserConfig(dict):
|
|
@@ -7059,7 +6859,7 @@ class KafkaKafkaUserConfig(dict):
|
|
|
7059
6859
|
:param bool kafka_rest: Enable Kafka-REST service. The default value is `false`.
|
|
7060
6860
|
:param bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
|
|
7061
6861
|
:param 'KafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
|
|
7062
|
-
:param str kafka_version:
|
|
6862
|
+
:param str kafka_version: Kafka major version.
|
|
7063
6863
|
:param 'KafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
7064
6864
|
:param 'KafkaKafkaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
7065
6865
|
:param 'KafkaKafkaUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
|
|
@@ -7228,7 +7028,7 @@ class KafkaKafkaUserConfig(dict):
|
|
|
7228
7028
|
@pulumi.getter(name="kafkaVersion")
|
|
7229
7029
|
def kafka_version(self) -> Optional[str]:
|
|
7230
7030
|
"""
|
|
7231
|
-
|
|
7031
|
+
Kafka major version.
|
|
7232
7032
|
"""
|
|
7233
7033
|
return pulumi.get(self, "kafka_version")
|
|
7234
7034
|
|
|
@@ -7485,7 +7285,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7485
7285
|
transaction_state_log_segment_bytes: Optional[int] = None):
|
|
7486
7286
|
"""
|
|
7487
7287
|
:param bool auto_create_topics_enable: Enable auto creation of topics.
|
|
7488
|
-
:param str compression_type:
|
|
7288
|
+
:param str compression_type: Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
|
|
7489
7289
|
:param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
|
|
7490
7290
|
:param int default_replication_factor: Replication factor for autocreated topics.
|
|
7491
7291
|
:param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
|
|
@@ -7495,7 +7295,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7495
7295
|
:param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
7496
7296
|
:param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
|
|
7497
7297
|
:param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
7498
|
-
:param str log_cleanup_policy:
|
|
7298
|
+
:param str log_cleanup_policy: The default cleanup policy for segments beyond the retention window.
|
|
7499
7299
|
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk.
|
|
7500
7300
|
:param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
|
|
7501
7301
|
:param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index.
|
|
@@ -7504,7 +7304,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7504
7304
|
:param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
7505
7305
|
:param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
7506
7306
|
:param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
|
|
7507
|
-
:param str log_message_timestamp_type:
|
|
7307
|
+
:param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
|
|
7508
7308
|
:param bool log_preallocate: Should pre allocate file when create new segment?
|
|
7509
7309
|
:param int log_retention_bytes: The maximum size of the log before deleting messages.
|
|
7510
7310
|
:param int log_retention_hours: The number of hours to keep a log file before deleting it.
|
|
@@ -7636,7 +7436,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7636
7436
|
@pulumi.getter(name="compressionType")
|
|
7637
7437
|
def compression_type(self) -> Optional[str]:
|
|
7638
7438
|
"""
|
|
7639
|
-
|
|
7439
|
+
Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
|
|
7640
7440
|
"""
|
|
7641
7441
|
return pulumi.get(self, "compression_type")
|
|
7642
7442
|
|
|
@@ -7716,7 +7516,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7716
7516
|
@pulumi.getter(name="logCleanupPolicy")
|
|
7717
7517
|
def log_cleanup_policy(self) -> Optional[str]:
|
|
7718
7518
|
"""
|
|
7719
|
-
|
|
7519
|
+
The default cleanup policy for segments beyond the retention window.
|
|
7720
7520
|
"""
|
|
7721
7521
|
return pulumi.get(self, "log_cleanup_policy")
|
|
7722
7522
|
|
|
@@ -7788,7 +7588,7 @@ class KafkaKafkaUserConfigKafka(dict):
|
|
|
7788
7588
|
@pulumi.getter(name="logMessageTimestampType")
|
|
7789
7589
|
def log_message_timestamp_type(self) -> Optional[str]:
|
|
7790
7590
|
"""
|
|
7791
|
-
|
|
7591
|
+
Define whether the timestamp in the message is message create time or log append time.
|
|
7792
7592
|
"""
|
|
7793
7593
|
return pulumi.get(self, "log_message_timestamp_type")
|
|
7794
7594
|
|
|
@@ -8091,10 +7891,10 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8091
7891
|
scheduled_rebalance_max_delay_ms: Optional[int] = None,
|
|
8092
7892
|
session_timeout_ms: Optional[int] = None):
|
|
8093
7893
|
"""
|
|
8094
|
-
:param str connector_client_config_override_policy:
|
|
8095
|
-
:param str consumer_auto_offset_reset:
|
|
7894
|
+
:param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
|
|
7895
|
+
:param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
8096
7896
|
:param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
8097
|
-
:param str consumer_isolation_level:
|
|
7897
|
+
:param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
8098
7898
|
:param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
8099
7899
|
:param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
8100
7900
|
:param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
@@ -8102,7 +7902,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8102
7902
|
:param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
|
|
8103
7903
|
:param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
|
|
8104
7904
|
:param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
8105
|
-
:param str producer_compression_type:
|
|
7905
|
+
:param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
8106
7906
|
:param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
|
|
8107
7907
|
:param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
8108
7908
|
:param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
@@ -8145,7 +7945,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8145
7945
|
@pulumi.getter(name="connectorClientConfigOverridePolicy")
|
|
8146
7946
|
def connector_client_config_override_policy(self) -> Optional[str]:
|
|
8147
7947
|
"""
|
|
8148
|
-
|
|
7948
|
+
Defines what client configurations can be overridden by the connector. Default is None.
|
|
8149
7949
|
"""
|
|
8150
7950
|
return pulumi.get(self, "connector_client_config_override_policy")
|
|
8151
7951
|
|
|
@@ -8153,7 +7953,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8153
7953
|
@pulumi.getter(name="consumerAutoOffsetReset")
|
|
8154
7954
|
def consumer_auto_offset_reset(self) -> Optional[str]:
|
|
8155
7955
|
"""
|
|
8156
|
-
|
|
7956
|
+
What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
8157
7957
|
"""
|
|
8158
7958
|
return pulumi.get(self, "consumer_auto_offset_reset")
|
|
8159
7959
|
|
|
@@ -8169,7 +7969,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8169
7969
|
@pulumi.getter(name="consumerIsolationLevel")
|
|
8170
7970
|
def consumer_isolation_level(self) -> Optional[str]:
|
|
8171
7971
|
"""
|
|
8172
|
-
|
|
7972
|
+
Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
|
|
8173
7973
|
"""
|
|
8174
7974
|
return pulumi.get(self, "consumer_isolation_level")
|
|
8175
7975
|
|
|
@@ -8233,7 +8033,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
|
|
|
8233
8033
|
@pulumi.getter(name="producerCompressionType")
|
|
8234
8034
|
def producer_compression_type(self) -> Optional[str]:
|
|
8235
8035
|
"""
|
|
8236
|
-
|
|
8036
|
+
Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
8237
8037
|
"""
|
|
8238
8038
|
return pulumi.get(self, "producer_compression_type")
|
|
8239
8039
|
|
|
@@ -8321,11 +8121,11 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
8321
8121
|
"""
|
|
8322
8122
|
:param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.
|
|
8323
8123
|
:param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.
|
|
8324
|
-
:param int consumer_request_timeout_ms:
|
|
8325
|
-
:param str name_strategy:
|
|
8124
|
+
:param int consumer_request_timeout_ms: The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
|
|
8125
|
+
:param str name_strategy: Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
|
|
8326
8126
|
:param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`.
|
|
8327
|
-
:param str producer_acks:
|
|
8328
|
-
:param str producer_compression_type:
|
|
8127
|
+
:param str producer_acks: The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
|
|
8128
|
+
:param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
8329
8129
|
:param int producer_linger_ms: Wait for up to the given delay to allow batching records together. The default value is `0`.
|
|
8330
8130
|
:param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.
|
|
8331
8131
|
:param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.
|
|
@@ -8371,7 +8171,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
8371
8171
|
@pulumi.getter(name="consumerRequestTimeoutMs")
|
|
8372
8172
|
def consumer_request_timeout_ms(self) -> Optional[int]:
|
|
8373
8173
|
"""
|
|
8374
|
-
|
|
8174
|
+
The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
|
|
8375
8175
|
"""
|
|
8376
8176
|
return pulumi.get(self, "consumer_request_timeout_ms")
|
|
8377
8177
|
|
|
@@ -8379,7 +8179,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
8379
8179
|
@pulumi.getter(name="nameStrategy")
|
|
8380
8180
|
def name_strategy(self) -> Optional[str]:
|
|
8381
8181
|
"""
|
|
8382
|
-
|
|
8182
|
+
Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
|
|
8383
8183
|
"""
|
|
8384
8184
|
return pulumi.get(self, "name_strategy")
|
|
8385
8185
|
|
|
@@ -8395,7 +8195,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
8395
8195
|
@pulumi.getter(name="producerAcks")
|
|
8396
8196
|
def producer_acks(self) -> Optional[str]:
|
|
8397
8197
|
"""
|
|
8398
|
-
|
|
8198
|
+
The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
|
|
8399
8199
|
"""
|
|
8400
8200
|
return pulumi.get(self, "producer_acks")
|
|
8401
8201
|
|
|
@@ -8403,7 +8203,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
|
|
|
8403
8203
|
@pulumi.getter(name="producerCompressionType")
|
|
8404
8204
|
def producer_compression_type(self) -> Optional[str]:
|
|
8405
8205
|
"""
|
|
8406
|
-
|
|
8206
|
+
Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
8407
8207
|
"""
|
|
8408
8208
|
return pulumi.get(self, "producer_compression_type")
|
|
8409
8209
|
|
|
@@ -10054,54 +9854,6 @@ class M3AggregatorComponent(dict):
|
|
|
10054
9854
|
return pulumi.get(self, "usage")
|
|
10055
9855
|
|
|
10056
9856
|
|
|
10057
|
-
@pulumi.output_type
|
|
10058
|
-
class M3AggregatorM3aggregator(dict):
|
|
10059
|
-
@staticmethod
|
|
10060
|
-
def __key_warning(key: str):
|
|
10061
|
-
suggest = None
|
|
10062
|
-
if key == "aggregatorHttpUri":
|
|
10063
|
-
suggest = "aggregator_http_uri"
|
|
10064
|
-
|
|
10065
|
-
if suggest:
|
|
10066
|
-
pulumi.log.warn(f"Key '{key}' not found in M3AggregatorM3aggregator. Access the value via the '{suggest}' property getter instead.")
|
|
10067
|
-
|
|
10068
|
-
def __getitem__(self, key: str) -> Any:
|
|
10069
|
-
M3AggregatorM3aggregator.__key_warning(key)
|
|
10070
|
-
return super().__getitem__(key)
|
|
10071
|
-
|
|
10072
|
-
def get(self, key: str, default = None) -> Any:
|
|
10073
|
-
M3AggregatorM3aggregator.__key_warning(key)
|
|
10074
|
-
return super().get(key, default)
|
|
10075
|
-
|
|
10076
|
-
def __init__(__self__, *,
|
|
10077
|
-
aggregator_http_uri: Optional[str] = None,
|
|
10078
|
-
uris: Optional[Sequence[str]] = None):
|
|
10079
|
-
"""
|
|
10080
|
-
:param str aggregator_http_uri: M3 Aggregator HTTP URI.
|
|
10081
|
-
:param Sequence[str] uris: M3 Aggregator server URIs.
|
|
10082
|
-
"""
|
|
10083
|
-
if aggregator_http_uri is not None:
|
|
10084
|
-
pulumi.set(__self__, "aggregator_http_uri", aggregator_http_uri)
|
|
10085
|
-
if uris is not None:
|
|
10086
|
-
pulumi.set(__self__, "uris", uris)
|
|
10087
|
-
|
|
10088
|
-
@property
|
|
10089
|
-
@pulumi.getter(name="aggregatorHttpUri")
|
|
10090
|
-
def aggregator_http_uri(self) -> Optional[str]:
|
|
10091
|
-
"""
|
|
10092
|
-
M3 Aggregator HTTP URI.
|
|
10093
|
-
"""
|
|
10094
|
-
return pulumi.get(self, "aggregator_http_uri")
|
|
10095
|
-
|
|
10096
|
-
@property
|
|
10097
|
-
@pulumi.getter
|
|
10098
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
10099
|
-
"""
|
|
10100
|
-
M3 Aggregator server URIs.
|
|
10101
|
-
"""
|
|
10102
|
-
return pulumi.get(self, "uris")
|
|
10103
|
-
|
|
10104
|
-
|
|
10105
9857
|
@pulumi.output_type
|
|
10106
9858
|
class M3AggregatorM3aggregatorUserConfig(dict):
|
|
10107
9859
|
@staticmethod
|
|
@@ -10149,8 +9901,8 @@ class M3AggregatorM3aggregatorUserConfig(dict):
|
|
|
10149
9901
|
:param Sequence['M3AggregatorM3aggregatorUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
|
|
10150
9902
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
10151
9903
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
10152
|
-
:param str m3_version:
|
|
10153
|
-
:param str m3aggregator_version:
|
|
9904
|
+
:param str m3_version: M3 major version (deprecated, use m3aggregator_version).
|
|
9905
|
+
:param str m3aggregator_version: M3 major version (the minimum compatible version).
|
|
10154
9906
|
:param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
|
|
10155
9907
|
:param bool static_ips: Use static public IP addresses.
|
|
10156
9908
|
"""
|
|
@@ -10210,7 +9962,7 @@ class M3AggregatorM3aggregatorUserConfig(dict):
|
|
|
10210
9962
|
@pulumi.getter(name="m3Version")
|
|
10211
9963
|
def m3_version(self) -> Optional[str]:
|
|
10212
9964
|
"""
|
|
10213
|
-
|
|
9965
|
+
M3 major version (deprecated, use m3aggregator_version).
|
|
10214
9966
|
"""
|
|
10215
9967
|
return pulumi.get(self, "m3_version")
|
|
10216
9968
|
|
|
@@ -10218,7 +9970,7 @@ class M3AggregatorM3aggregatorUserConfig(dict):
|
|
|
10218
9970
|
@pulumi.getter(name="m3aggregatorVersion")
|
|
10219
9971
|
def m3aggregator_version(self) -> Optional[str]:
|
|
10220
9972
|
"""
|
|
10221
|
-
|
|
9973
|
+
M3 major version (the minimum compatible version).
|
|
10222
9974
|
"""
|
|
10223
9975
|
return pulumi.get(self, "m3aggregator_version")
|
|
10224
9976
|
|
|
@@ -10486,110 +10238,6 @@ class M3DbComponent(dict):
|
|
|
10486
10238
|
return pulumi.get(self, "usage")
|
|
10487
10239
|
|
|
10488
10240
|
|
|
10489
|
-
@pulumi.output_type
|
|
10490
|
-
class M3DbM3db(dict):
|
|
10491
|
-
@staticmethod
|
|
10492
|
-
def __key_warning(key: str):
|
|
10493
|
-
suggest = None
|
|
10494
|
-
if key == "httpClusterUri":
|
|
10495
|
-
suggest = "http_cluster_uri"
|
|
10496
|
-
elif key == "httpNodeUri":
|
|
10497
|
-
suggest = "http_node_uri"
|
|
10498
|
-
elif key == "influxdbUri":
|
|
10499
|
-
suggest = "influxdb_uri"
|
|
10500
|
-
elif key == "prometheusRemoteReadUri":
|
|
10501
|
-
suggest = "prometheus_remote_read_uri"
|
|
10502
|
-
elif key == "prometheusRemoteWriteUri":
|
|
10503
|
-
suggest = "prometheus_remote_write_uri"
|
|
10504
|
-
|
|
10505
|
-
if suggest:
|
|
10506
|
-
pulumi.log.warn(f"Key '{key}' not found in M3DbM3db. Access the value via the '{suggest}' property getter instead.")
|
|
10507
|
-
|
|
10508
|
-
def __getitem__(self, key: str) -> Any:
|
|
10509
|
-
M3DbM3db.__key_warning(key)
|
|
10510
|
-
return super().__getitem__(key)
|
|
10511
|
-
|
|
10512
|
-
def get(self, key: str, default = None) -> Any:
|
|
10513
|
-
M3DbM3db.__key_warning(key)
|
|
10514
|
-
return super().get(key, default)
|
|
10515
|
-
|
|
10516
|
-
def __init__(__self__, *,
|
|
10517
|
-
http_cluster_uri: Optional[str] = None,
|
|
10518
|
-
http_node_uri: Optional[str] = None,
|
|
10519
|
-
influxdb_uri: Optional[str] = None,
|
|
10520
|
-
prometheus_remote_read_uri: Optional[str] = None,
|
|
10521
|
-
prometheus_remote_write_uri: Optional[str] = None,
|
|
10522
|
-
uris: Optional[Sequence[str]] = None):
|
|
10523
|
-
"""
|
|
10524
|
-
:param str http_cluster_uri: M3DB cluster URI.
|
|
10525
|
-
:param str http_node_uri: M3DB node URI.
|
|
10526
|
-
:param str influxdb_uri: InfluxDB URI.
|
|
10527
|
-
:param str prometheus_remote_read_uri: Prometheus remote read URI.
|
|
10528
|
-
:param str prometheus_remote_write_uri: Prometheus remote write URI.
|
|
10529
|
-
:param Sequence[str] uris: M3DB server URIs.
|
|
10530
|
-
"""
|
|
10531
|
-
if http_cluster_uri is not None:
|
|
10532
|
-
pulumi.set(__self__, "http_cluster_uri", http_cluster_uri)
|
|
10533
|
-
if http_node_uri is not None:
|
|
10534
|
-
pulumi.set(__self__, "http_node_uri", http_node_uri)
|
|
10535
|
-
if influxdb_uri is not None:
|
|
10536
|
-
pulumi.set(__self__, "influxdb_uri", influxdb_uri)
|
|
10537
|
-
if prometheus_remote_read_uri is not None:
|
|
10538
|
-
pulumi.set(__self__, "prometheus_remote_read_uri", prometheus_remote_read_uri)
|
|
10539
|
-
if prometheus_remote_write_uri is not None:
|
|
10540
|
-
pulumi.set(__self__, "prometheus_remote_write_uri", prometheus_remote_write_uri)
|
|
10541
|
-
if uris is not None:
|
|
10542
|
-
pulumi.set(__self__, "uris", uris)
|
|
10543
|
-
|
|
10544
|
-
@property
|
|
10545
|
-
@pulumi.getter(name="httpClusterUri")
|
|
10546
|
-
def http_cluster_uri(self) -> Optional[str]:
|
|
10547
|
-
"""
|
|
10548
|
-
M3DB cluster URI.
|
|
10549
|
-
"""
|
|
10550
|
-
return pulumi.get(self, "http_cluster_uri")
|
|
10551
|
-
|
|
10552
|
-
@property
|
|
10553
|
-
@pulumi.getter(name="httpNodeUri")
|
|
10554
|
-
def http_node_uri(self) -> Optional[str]:
|
|
10555
|
-
"""
|
|
10556
|
-
M3DB node URI.
|
|
10557
|
-
"""
|
|
10558
|
-
return pulumi.get(self, "http_node_uri")
|
|
10559
|
-
|
|
10560
|
-
@property
|
|
10561
|
-
@pulumi.getter(name="influxdbUri")
|
|
10562
|
-
def influxdb_uri(self) -> Optional[str]:
|
|
10563
|
-
"""
|
|
10564
|
-
InfluxDB URI.
|
|
10565
|
-
"""
|
|
10566
|
-
return pulumi.get(self, "influxdb_uri")
|
|
10567
|
-
|
|
10568
|
-
@property
|
|
10569
|
-
@pulumi.getter(name="prometheusRemoteReadUri")
|
|
10570
|
-
def prometheus_remote_read_uri(self) -> Optional[str]:
|
|
10571
|
-
"""
|
|
10572
|
-
Prometheus remote read URI.
|
|
10573
|
-
"""
|
|
10574
|
-
return pulumi.get(self, "prometheus_remote_read_uri")
|
|
10575
|
-
|
|
10576
|
-
@property
|
|
10577
|
-
@pulumi.getter(name="prometheusRemoteWriteUri")
|
|
10578
|
-
def prometheus_remote_write_uri(self) -> Optional[str]:
|
|
10579
|
-
"""
|
|
10580
|
-
Prometheus remote write URI.
|
|
10581
|
-
"""
|
|
10582
|
-
return pulumi.get(self, "prometheus_remote_write_uri")
|
|
10583
|
-
|
|
10584
|
-
@property
|
|
10585
|
-
@pulumi.getter
|
|
10586
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
10587
|
-
"""
|
|
10588
|
-
M3DB server URIs.
|
|
10589
|
-
"""
|
|
10590
|
-
return pulumi.get(self, "uris")
|
|
10591
|
-
|
|
10592
|
-
|
|
10593
10241
|
@pulumi.output_type
|
|
10594
10242
|
class M3DbM3dbUserConfig(dict):
|
|
10595
10243
|
@staticmethod
|
|
@@ -10662,9 +10310,9 @@ class M3DbM3dbUserConfig(dict):
|
|
|
10662
10310
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
10663
10311
|
:param 'M3DbM3dbUserConfigLimitsArgs' limits: M3 limits
|
|
10664
10312
|
:param 'M3DbM3dbUserConfigM3Args' m3: M3 specific configuration options
|
|
10665
|
-
:param str m3_version:
|
|
10313
|
+
:param str m3_version: M3 major version (deprecated, use m3db_version).
|
|
10666
10314
|
:param bool m3coordinator_enable_graphite_carbon_ingest: Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only.
|
|
10667
|
-
:param str m3db_version:
|
|
10315
|
+
:param str m3db_version: M3 major version (the minimum compatible version).
|
|
10668
10316
|
:param Sequence['M3DbM3dbUserConfigNamespaceArgs'] namespaces: List of M3 namespaces
|
|
10669
10317
|
:param 'M3DbM3dbUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
10670
10318
|
:param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
|
|
@@ -10774,7 +10422,7 @@ class M3DbM3dbUserConfig(dict):
|
|
|
10774
10422
|
@pulumi.getter(name="m3Version")
|
|
10775
10423
|
def m3_version(self) -> Optional[str]:
|
|
10776
10424
|
"""
|
|
10777
|
-
|
|
10425
|
+
M3 major version (deprecated, use m3db_version).
|
|
10778
10426
|
"""
|
|
10779
10427
|
return pulumi.get(self, "m3_version")
|
|
10780
10428
|
|
|
@@ -10790,7 +10438,7 @@ class M3DbM3dbUserConfig(dict):
|
|
|
10790
10438
|
@pulumi.getter(name="m3dbVersion")
|
|
10791
10439
|
def m3db_version(self) -> Optional[str]:
|
|
10792
10440
|
"""
|
|
10793
|
-
|
|
10441
|
+
M3 major version (the minimum compatible version).
|
|
10794
10442
|
"""
|
|
10795
10443
|
return pulumi.get(self, "m3db_version")
|
|
10796
10444
|
|
|
@@ -11090,7 +10738,7 @@ class M3DbM3dbUserConfigNamespace(dict):
|
|
|
11090
10738
|
resolution: Optional[str] = None):
|
|
11091
10739
|
"""
|
|
11092
10740
|
:param str name: The name of the namespace.
|
|
11093
|
-
:param str type:
|
|
10741
|
+
:param str type: The type of aggregation (aggregated/unaggregated).
|
|
11094
10742
|
:param 'M3DbM3dbUserConfigNamespaceOptionsArgs' options: Namespace options
|
|
11095
10743
|
:param str resolution: The resolution for an aggregated namespace.
|
|
11096
10744
|
"""
|
|
@@ -11113,7 +10761,7 @@ class M3DbM3dbUserConfigNamespace(dict):
|
|
|
11113
10761
|
@pulumi.getter
|
|
11114
10762
|
def type(self) -> str:
|
|
11115
10763
|
"""
|
|
11116
|
-
|
|
10764
|
+
The type of aggregation (aggregated/unaggregated).
|
|
11117
10765
|
"""
|
|
11118
10766
|
return pulumi.get(self, "type")
|
|
11119
10767
|
|
|
@@ -11746,190 +11394,6 @@ class MySqlComponent(dict):
|
|
|
11746
11394
|
return pulumi.get(self, "usage")
|
|
11747
11395
|
|
|
11748
11396
|
|
|
11749
|
-
@pulumi.output_type
|
|
11750
|
-
class MySqlMysql(dict):
|
|
11751
|
-
@staticmethod
|
|
11752
|
-
def __key_warning(key: str):
|
|
11753
|
-
suggest = None
|
|
11754
|
-
if key == "replicaUri":
|
|
11755
|
-
suggest = "replica_uri"
|
|
11756
|
-
elif key == "standbyUris":
|
|
11757
|
-
suggest = "standby_uris"
|
|
11758
|
-
elif key == "syncingUris":
|
|
11759
|
-
suggest = "syncing_uris"
|
|
11760
|
-
|
|
11761
|
-
if suggest:
|
|
11762
|
-
pulumi.log.warn(f"Key '{key}' not found in MySqlMysql. Access the value via the '{suggest}' property getter instead.")
|
|
11763
|
-
|
|
11764
|
-
def __getitem__(self, key: str) -> Any:
|
|
11765
|
-
MySqlMysql.__key_warning(key)
|
|
11766
|
-
return super().__getitem__(key)
|
|
11767
|
-
|
|
11768
|
-
def get(self, key: str, default = None) -> Any:
|
|
11769
|
-
MySqlMysql.__key_warning(key)
|
|
11770
|
-
return super().get(key, default)
|
|
11771
|
-
|
|
11772
|
-
def __init__(__self__, *,
|
|
11773
|
-
params: Optional[Sequence['outputs.MySqlMysqlParam']] = None,
|
|
11774
|
-
replica_uri: Optional[str] = None,
|
|
11775
|
-
standby_uris: Optional[Sequence[str]] = None,
|
|
11776
|
-
syncing_uris: Optional[Sequence[str]] = None,
|
|
11777
|
-
uris: Optional[Sequence[str]] = None):
|
|
11778
|
-
"""
|
|
11779
|
-
:param Sequence['MySqlMysqlParamArgs'] params: MySQL connection parameters
|
|
11780
|
-
:param str replica_uri: MySQL replica URI for services with a replica
|
|
11781
|
-
:param Sequence[str] standby_uris: MySQL standby connection URIs
|
|
11782
|
-
:param Sequence[str] syncing_uris: MySQL syncing connection URIs
|
|
11783
|
-
:param Sequence[str] uris: MySQL master connection URIs
|
|
11784
|
-
"""
|
|
11785
|
-
if params is not None:
|
|
11786
|
-
pulumi.set(__self__, "params", params)
|
|
11787
|
-
if replica_uri is not None:
|
|
11788
|
-
pulumi.set(__self__, "replica_uri", replica_uri)
|
|
11789
|
-
if standby_uris is not None:
|
|
11790
|
-
pulumi.set(__self__, "standby_uris", standby_uris)
|
|
11791
|
-
if syncing_uris is not None:
|
|
11792
|
-
pulumi.set(__self__, "syncing_uris", syncing_uris)
|
|
11793
|
-
if uris is not None:
|
|
11794
|
-
pulumi.set(__self__, "uris", uris)
|
|
11795
|
-
|
|
11796
|
-
@property
|
|
11797
|
-
@pulumi.getter
|
|
11798
|
-
def params(self) -> Optional[Sequence['outputs.MySqlMysqlParam']]:
|
|
11799
|
-
"""
|
|
11800
|
-
MySQL connection parameters
|
|
11801
|
-
"""
|
|
11802
|
-
return pulumi.get(self, "params")
|
|
11803
|
-
|
|
11804
|
-
@property
|
|
11805
|
-
@pulumi.getter(name="replicaUri")
|
|
11806
|
-
def replica_uri(self) -> Optional[str]:
|
|
11807
|
-
"""
|
|
11808
|
-
MySQL replica URI for services with a replica
|
|
11809
|
-
"""
|
|
11810
|
-
return pulumi.get(self, "replica_uri")
|
|
11811
|
-
|
|
11812
|
-
@property
|
|
11813
|
-
@pulumi.getter(name="standbyUris")
|
|
11814
|
-
def standby_uris(self) -> Optional[Sequence[str]]:
|
|
11815
|
-
"""
|
|
11816
|
-
MySQL standby connection URIs
|
|
11817
|
-
"""
|
|
11818
|
-
return pulumi.get(self, "standby_uris")
|
|
11819
|
-
|
|
11820
|
-
@property
|
|
11821
|
-
@pulumi.getter(name="syncingUris")
|
|
11822
|
-
def syncing_uris(self) -> Optional[Sequence[str]]:
|
|
11823
|
-
"""
|
|
11824
|
-
MySQL syncing connection URIs
|
|
11825
|
-
"""
|
|
11826
|
-
return pulumi.get(self, "syncing_uris")
|
|
11827
|
-
|
|
11828
|
-
@property
|
|
11829
|
-
@pulumi.getter
|
|
11830
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
11831
|
-
"""
|
|
11832
|
-
MySQL master connection URIs
|
|
11833
|
-
"""
|
|
11834
|
-
return pulumi.get(self, "uris")
|
|
11835
|
-
|
|
11836
|
-
|
|
11837
|
-
@pulumi.output_type
|
|
11838
|
-
class MySqlMysqlParam(dict):
|
|
11839
|
-
@staticmethod
|
|
11840
|
-
def __key_warning(key: str):
|
|
11841
|
-
suggest = None
|
|
11842
|
-
if key == "databaseName":
|
|
11843
|
-
suggest = "database_name"
|
|
11844
|
-
|
|
11845
|
-
if suggest:
|
|
11846
|
-
pulumi.log.warn(f"Key '{key}' not found in MySqlMysqlParam. Access the value via the '{suggest}' property getter instead.")
|
|
11847
|
-
|
|
11848
|
-
def __getitem__(self, key: str) -> Any:
|
|
11849
|
-
MySqlMysqlParam.__key_warning(key)
|
|
11850
|
-
return super().__getitem__(key)
|
|
11851
|
-
|
|
11852
|
-
def get(self, key: str, default = None) -> Any:
|
|
11853
|
-
MySqlMysqlParam.__key_warning(key)
|
|
11854
|
-
return super().get(key, default)
|
|
11855
|
-
|
|
11856
|
-
def __init__(__self__, *,
|
|
11857
|
-
database_name: Optional[str] = None,
|
|
11858
|
-
host: Optional[str] = None,
|
|
11859
|
-
password: Optional[str] = None,
|
|
11860
|
-
port: Optional[int] = None,
|
|
11861
|
-
sslmode: Optional[str] = None,
|
|
11862
|
-
user: Optional[str] = None):
|
|
11863
|
-
"""
|
|
11864
|
-
:param str database_name: Primary MySQL database name
|
|
11865
|
-
:param str host: MySQL host IP or name
|
|
11866
|
-
:param str password: MySQL admin user password
|
|
11867
|
-
:param int port: MySQL port
|
|
11868
|
-
:param str sslmode: MySQL sslmode setting (currently always "require")
|
|
11869
|
-
:param str user: MySQL admin user name
|
|
11870
|
-
"""
|
|
11871
|
-
if database_name is not None:
|
|
11872
|
-
pulumi.set(__self__, "database_name", database_name)
|
|
11873
|
-
if host is not None:
|
|
11874
|
-
pulumi.set(__self__, "host", host)
|
|
11875
|
-
if password is not None:
|
|
11876
|
-
pulumi.set(__self__, "password", password)
|
|
11877
|
-
if port is not None:
|
|
11878
|
-
pulumi.set(__self__, "port", port)
|
|
11879
|
-
if sslmode is not None:
|
|
11880
|
-
pulumi.set(__self__, "sslmode", sslmode)
|
|
11881
|
-
if user is not None:
|
|
11882
|
-
pulumi.set(__self__, "user", user)
|
|
11883
|
-
|
|
11884
|
-
@property
|
|
11885
|
-
@pulumi.getter(name="databaseName")
|
|
11886
|
-
def database_name(self) -> Optional[str]:
|
|
11887
|
-
"""
|
|
11888
|
-
Primary MySQL database name
|
|
11889
|
-
"""
|
|
11890
|
-
return pulumi.get(self, "database_name")
|
|
11891
|
-
|
|
11892
|
-
@property
|
|
11893
|
-
@pulumi.getter
|
|
11894
|
-
def host(self) -> Optional[str]:
|
|
11895
|
-
"""
|
|
11896
|
-
MySQL host IP or name
|
|
11897
|
-
"""
|
|
11898
|
-
return pulumi.get(self, "host")
|
|
11899
|
-
|
|
11900
|
-
@property
|
|
11901
|
-
@pulumi.getter
|
|
11902
|
-
def password(self) -> Optional[str]:
|
|
11903
|
-
"""
|
|
11904
|
-
MySQL admin user password
|
|
11905
|
-
"""
|
|
11906
|
-
return pulumi.get(self, "password")
|
|
11907
|
-
|
|
11908
|
-
@property
|
|
11909
|
-
@pulumi.getter
|
|
11910
|
-
def port(self) -> Optional[int]:
|
|
11911
|
-
"""
|
|
11912
|
-
MySQL port
|
|
11913
|
-
"""
|
|
11914
|
-
return pulumi.get(self, "port")
|
|
11915
|
-
|
|
11916
|
-
@property
|
|
11917
|
-
@pulumi.getter
|
|
11918
|
-
def sslmode(self) -> Optional[str]:
|
|
11919
|
-
"""
|
|
11920
|
-
MySQL sslmode setting (currently always "require")
|
|
11921
|
-
"""
|
|
11922
|
-
return pulumi.get(self, "sslmode")
|
|
11923
|
-
|
|
11924
|
-
@property
|
|
11925
|
-
@pulumi.getter
|
|
11926
|
-
def user(self) -> Optional[str]:
|
|
11927
|
-
"""
|
|
11928
|
-
MySQL admin user name
|
|
11929
|
-
"""
|
|
11930
|
-
return pulumi.get(self, "user")
|
|
11931
|
-
|
|
11932
|
-
|
|
11933
11397
|
@pulumi.output_type
|
|
11934
11398
|
class MySqlMysqlUserConfig(dict):
|
|
11935
11399
|
@staticmethod
|
|
@@ -12016,7 +11480,7 @@ class MySqlMysqlUserConfig(dict):
|
|
|
12016
11480
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
12017
11481
|
:param 'MySqlMysqlUserConfigMigrationArgs' migration: Migrate data from existing server
|
|
12018
11482
|
:param 'MySqlMysqlUserConfigMysqlArgs' mysql: mysql.conf configuration values
|
|
12019
|
-
:param str mysql_version:
|
|
11483
|
+
:param str mysql_version: MySQL major version.
|
|
12020
11484
|
:param 'MySqlMysqlUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
12021
11485
|
:param 'MySqlMysqlUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
12022
11486
|
:param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
|
|
@@ -12162,7 +11626,7 @@ class MySqlMysqlUserConfig(dict):
|
|
|
12162
11626
|
@pulumi.getter(name="mysqlVersion")
|
|
12163
11627
|
def mysql_version(self) -> Optional[str]:
|
|
12164
11628
|
"""
|
|
12165
|
-
|
|
11629
|
+
MySQL major version.
|
|
12166
11630
|
"""
|
|
12167
11631
|
return pulumi.get(self, "mysql_version")
|
|
12168
11632
|
|
|
@@ -12294,7 +11758,7 @@ class MySqlMysqlUserConfigMigration(dict):
|
|
|
12294
11758
|
:param int port: Port number of the server where to migrate data from.
|
|
12295
11759
|
:param str dbname: Database name for bootstrapping the initial connection.
|
|
12296
11760
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
|
|
12297
|
-
:param str method:
|
|
11761
|
+
:param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
12298
11762
|
:param str password: Password for authentication with the server where to migrate data from.
|
|
12299
11763
|
:param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
|
|
12300
11764
|
:param str username: User name for authentication with the server where to migrate data from.
|
|
@@ -12350,7 +11814,7 @@ class MySqlMysqlUserConfigMigration(dict):
|
|
|
12350
11814
|
@pulumi.getter
|
|
12351
11815
|
def method(self) -> Optional[str]:
|
|
12352
11816
|
"""
|
|
12353
|
-
|
|
11817
|
+
The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
12354
11818
|
"""
|
|
12355
11819
|
return pulumi.get(self, "method")
|
|
12356
11820
|
|
|
@@ -12505,7 +11969,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
12505
11969
|
:param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).
|
|
12506
11970
|
:param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.
|
|
12507
11971
|
:param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it.
|
|
12508
|
-
:param str internal_tmp_mem_storage_engine:
|
|
11972
|
+
:param str internal_tmp_mem_storage_engine: The storage engine for in-memory internal temporary tables.
|
|
12509
11973
|
:param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Default is 10s.
|
|
12510
11974
|
:param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
12511
11975
|
:param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
|
|
@@ -12720,7 +12184,7 @@ class MySqlMysqlUserConfigMysql(dict):
|
|
|
12720
12184
|
@pulumi.getter(name="internalTmpMemStorageEngine")
|
|
12721
12185
|
def internal_tmp_mem_storage_engine(self) -> Optional[str]:
|
|
12722
12186
|
"""
|
|
12723
|
-
|
|
12187
|
+
The storage engine for in-memory internal temporary tables.
|
|
12724
12188
|
"""
|
|
12725
12189
|
return pulumi.get(self, "internal_tmp_mem_storage_engine")
|
|
12726
12190
|
|
|
@@ -13172,9 +12636,7 @@ class OpenSearchOpensearch(dict):
|
|
|
13172
12636
|
@staticmethod
|
|
13173
12637
|
def __key_warning(key: str):
|
|
13174
12638
|
suggest = None
|
|
13175
|
-
if key == "
|
|
13176
|
-
suggest = "kibana_uri"
|
|
13177
|
-
elif key == "opensearchDashboardsUri":
|
|
12639
|
+
if key == "opensearchDashboardsUri":
|
|
13178
12640
|
suggest = "opensearch_dashboards_uri"
|
|
13179
12641
|
|
|
13180
12642
|
if suggest:
|
|
@@ -13189,36 +12651,12 @@ class OpenSearchOpensearch(dict):
|
|
|
13189
12651
|
return super().get(key, default)
|
|
13190
12652
|
|
|
13191
12653
|
def __init__(__self__, *,
|
|
13192
|
-
|
|
13193
|
-
opensearch_dashboards_uri: Optional[str] = None,
|
|
13194
|
-
password: Optional[str] = None,
|
|
13195
|
-
uris: Optional[Sequence[str]] = None,
|
|
13196
|
-
username: Optional[str] = None):
|
|
12654
|
+
opensearch_dashboards_uri: Optional[str] = None):
|
|
13197
12655
|
"""
|
|
13198
|
-
:param str kibana_uri: URI for Kibana dashboard frontend
|
|
13199
12656
|
:param str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend
|
|
13200
|
-
:param str password: OpenSearch password
|
|
13201
|
-
:param Sequence[str] uris: OpenSearch server URIs.
|
|
13202
|
-
:param str username: OpenSearch username
|
|
13203
12657
|
"""
|
|
13204
|
-
if kibana_uri is not None:
|
|
13205
|
-
pulumi.set(__self__, "kibana_uri", kibana_uri)
|
|
13206
12658
|
if opensearch_dashboards_uri is not None:
|
|
13207
12659
|
pulumi.set(__self__, "opensearch_dashboards_uri", opensearch_dashboards_uri)
|
|
13208
|
-
if password is not None:
|
|
13209
|
-
pulumi.set(__self__, "password", password)
|
|
13210
|
-
if uris is not None:
|
|
13211
|
-
pulumi.set(__self__, "uris", uris)
|
|
13212
|
-
if username is not None:
|
|
13213
|
-
pulumi.set(__self__, "username", username)
|
|
13214
|
-
|
|
13215
|
-
@property
|
|
13216
|
-
@pulumi.getter(name="kibanaUri")
|
|
13217
|
-
def kibana_uri(self) -> Optional[str]:
|
|
13218
|
-
"""
|
|
13219
|
-
URI for Kibana dashboard frontend
|
|
13220
|
-
"""
|
|
13221
|
-
return pulumi.get(self, "kibana_uri")
|
|
13222
12660
|
|
|
13223
12661
|
@property
|
|
13224
12662
|
@pulumi.getter(name="opensearchDashboardsUri")
|
|
@@ -13228,30 +12666,6 @@ class OpenSearchOpensearch(dict):
|
|
|
13228
12666
|
"""
|
|
13229
12667
|
return pulumi.get(self, "opensearch_dashboards_uri")
|
|
13230
12668
|
|
|
13231
|
-
@property
|
|
13232
|
-
@pulumi.getter
|
|
13233
|
-
def password(self) -> Optional[str]:
|
|
13234
|
-
"""
|
|
13235
|
-
OpenSearch password
|
|
13236
|
-
"""
|
|
13237
|
-
return pulumi.get(self, "password")
|
|
13238
|
-
|
|
13239
|
-
@property
|
|
13240
|
-
@pulumi.getter
|
|
13241
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
13242
|
-
"""
|
|
13243
|
-
OpenSearch server URIs.
|
|
13244
|
-
"""
|
|
13245
|
-
return pulumi.get(self, "uris")
|
|
13246
|
-
|
|
13247
|
-
@property
|
|
13248
|
-
@pulumi.getter
|
|
13249
|
-
def username(self) -> Optional[str]:
|
|
13250
|
-
"""
|
|
13251
|
-
OpenSearch username
|
|
13252
|
-
"""
|
|
13253
|
-
return pulumi.get(self, "username")
|
|
13254
|
-
|
|
13255
12669
|
|
|
13256
12670
|
@pulumi.output_type
|
|
13257
12671
|
class OpenSearchOpensearchUserConfig(dict):
|
|
@@ -13348,7 +12762,7 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
13348
12762
|
:param 'OpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
|
|
13349
12763
|
:param 'OpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
|
|
13350
12764
|
:param 'OpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
|
|
13351
|
-
:param str opensearch_version:
|
|
12765
|
+
:param str opensearch_version: OpenSearch major version.
|
|
13352
12766
|
:param 'OpenSearchOpensearchUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
13353
12767
|
:param 'OpenSearchOpensearchUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
13354
12768
|
:param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
|
|
@@ -13517,7 +12931,7 @@ class OpenSearchOpensearchUserConfig(dict):
|
|
|
13517
12931
|
@pulumi.getter(name="opensearchVersion")
|
|
13518
12932
|
def opensearch_version(self) -> Optional[str]:
|
|
13519
12933
|
"""
|
|
13520
|
-
|
|
12934
|
+
OpenSearch major version.
|
|
13521
12935
|
"""
|
|
13522
12936
|
return pulumi.get(self, "opensearch_version")
|
|
13523
12937
|
|
|
@@ -13622,7 +13036,7 @@ class OpenSearchOpensearchUserConfigIndexPattern(dict):
|
|
|
13622
13036
|
"""
|
|
13623
13037
|
:param int max_index_count: Maximum number of indexes to keep.
|
|
13624
13038
|
:param str pattern: fnmatch pattern.
|
|
13625
|
-
:param str sorting_algorithm:
|
|
13039
|
+
:param str sorting_algorithm: Deletion sorting algorithm. The default value is `creation_date`.
|
|
13626
13040
|
"""
|
|
13627
13041
|
pulumi.set(__self__, "max_index_count", max_index_count)
|
|
13628
13042
|
pulumi.set(__self__, "pattern", pattern)
|
|
@@ -13649,7 +13063,7 @@ class OpenSearchOpensearchUserConfigIndexPattern(dict):
|
|
|
13649
13063
|
@pulumi.getter(name="sortingAlgorithm")
|
|
13650
13064
|
def sorting_algorithm(self) -> Optional[str]:
|
|
13651
13065
|
"""
|
|
13652
|
-
|
|
13066
|
+
Deletion sorting algorithm. The default value is `creation_date`.
|
|
13653
13067
|
"""
|
|
13654
13068
|
return pulumi.get(self, "sorting_algorithm")
|
|
13655
13069
|
|
|
@@ -14623,12 +14037,12 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthen
|
|
|
14623
14037
|
type: Optional[str] = None):
|
|
14624
14038
|
"""
|
|
14625
14039
|
:param int allowed_tries: The number of login attempts allowed before login is blocked.
|
|
14626
|
-
:param str authentication_backend:
|
|
14040
|
+
:param str authentication_backend: internal*authentication*backend*limiting.authentication*backend.
|
|
14627
14041
|
:param int block_expiry_seconds: The duration of time that login remains blocked after a failed login.
|
|
14628
14042
|
:param int max_blocked_clients: internal*authentication*backend*limiting.max*blocked_clients.
|
|
14629
14043
|
:param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
|
|
14630
14044
|
:param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
|
|
14631
|
-
:param str type:
|
|
14045
|
+
:param str type: internal*authentication*backend_limiting.type.
|
|
14632
14046
|
"""
|
|
14633
14047
|
if allowed_tries is not None:
|
|
14634
14048
|
pulumi.set(__self__, "allowed_tries", allowed_tries)
|
|
@@ -14657,7 +14071,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthen
|
|
|
14657
14071
|
@pulumi.getter(name="authenticationBackend")
|
|
14658
14072
|
def authentication_backend(self) -> Optional[str]:
|
|
14659
14073
|
"""
|
|
14660
|
-
|
|
14074
|
+
internal*authentication*backend*limiting.authentication*backend.
|
|
14661
14075
|
"""
|
|
14662
14076
|
return pulumi.get(self, "authentication_backend")
|
|
14663
14077
|
|
|
@@ -14697,7 +14111,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAuthen
|
|
|
14697
14111
|
@pulumi.getter
|
|
14698
14112
|
def type(self) -> Optional[str]:
|
|
14699
14113
|
"""
|
|
14700
|
-
|
|
14114
|
+
internal*authentication*backend_limiting.type.
|
|
14701
14115
|
"""
|
|
14702
14116
|
return pulumi.get(self, "type")
|
|
14703
14117
|
|
|
@@ -14742,7 +14156,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
|
|
|
14742
14156
|
:param int max_blocked_clients: The maximum number of blocked IP addresses.
|
|
14743
14157
|
:param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
|
|
14744
14158
|
:param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
|
|
14745
|
-
:param str type:
|
|
14159
|
+
:param str type: The type of rate limiting.
|
|
14746
14160
|
"""
|
|
14747
14161
|
if allowed_tries is not None:
|
|
14748
14162
|
pulumi.set(__self__, "allowed_tries", allowed_tries)
|
|
@@ -14801,7 +14215,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
|
|
|
14801
14215
|
@pulumi.getter
|
|
14802
14216
|
def type(self) -> Optional[str]:
|
|
14803
14217
|
"""
|
|
14804
|
-
|
|
14218
|
+
The type of rate limiting.
|
|
14805
14219
|
"""
|
|
14806
14220
|
return pulumi.get(self, "type")
|
|
14807
14221
|
|
|
@@ -15663,10 +15077,6 @@ class PgPg(dict):
|
|
|
15663
15077
|
suggest = "max_connections"
|
|
15664
15078
|
elif key == "replicaUri":
|
|
15665
15079
|
suggest = "replica_uri"
|
|
15666
|
-
elif key == "standbyUris":
|
|
15667
|
-
suggest = "standby_uris"
|
|
15668
|
-
elif key == "syncingUris":
|
|
15669
|
-
suggest = "syncing_uris"
|
|
15670
15080
|
|
|
15671
15081
|
if suggest:
|
|
15672
15082
|
pulumi.log.warn(f"Key '{key}' not found in PgPg. Access the value via the '{suggest}' property getter instead.")
|
|
@@ -15680,46 +15090,32 @@ class PgPg(dict):
|
|
|
15680
15090
|
return super().get(key, default)
|
|
15681
15091
|
|
|
15682
15092
|
def __init__(__self__, *,
|
|
15683
|
-
bouncer: Optional[str] = None,
|
|
15684
15093
|
dbname: Optional[str] = None,
|
|
15685
15094
|
host: Optional[str] = None,
|
|
15686
15095
|
max_connections: Optional[int] = None,
|
|
15687
|
-
params: Optional[Sequence['outputs.PgPgParam']] = None,
|
|
15688
15096
|
password: Optional[str] = None,
|
|
15689
15097
|
port: Optional[int] = None,
|
|
15690
15098
|
replica_uri: Optional[str] = None,
|
|
15691
15099
|
sslmode: Optional[str] = None,
|
|
15692
|
-
standby_uris: Optional[Sequence[str]] = None,
|
|
15693
|
-
syncing_uris: Optional[Sequence[str]] = None,
|
|
15694
15100
|
uri: Optional[str] = None,
|
|
15695
|
-
uris: Optional[Sequence[str]] = None,
|
|
15696
15101
|
user: Optional[str] = None):
|
|
15697
15102
|
"""
|
|
15698
|
-
:param str bouncer: Bouncer connection details
|
|
15699
15103
|
:param str dbname: Primary PostgreSQL database name
|
|
15700
15104
|
:param str host: PostgreSQL master node host IP or name
|
|
15701
15105
|
:param int max_connections: Connection limit
|
|
15702
|
-
:param Sequence['PgPgParamArgs'] params: PostgreSQL connection parameters
|
|
15703
15106
|
:param str password: PostgreSQL admin user password
|
|
15704
15107
|
:param int port: PostgreSQL port
|
|
15705
15108
|
:param str replica_uri: PostgreSQL replica URI for services with a replica
|
|
15706
15109
|
:param str sslmode: PostgreSQL sslmode setting (currently always "require")
|
|
15707
|
-
:param Sequence[str] standby_uris: PostgreSQL standby connection URIs
|
|
15708
|
-
:param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs
|
|
15709
15110
|
:param str uri: PostgreSQL master connection URI
|
|
15710
|
-
:param Sequence[str] uris: PostgreSQL master connection URIs
|
|
15711
15111
|
:param str user: PostgreSQL admin user name
|
|
15712
15112
|
"""
|
|
15713
|
-
if bouncer is not None:
|
|
15714
|
-
pulumi.set(__self__, "bouncer", bouncer)
|
|
15715
15113
|
if dbname is not None:
|
|
15716
15114
|
pulumi.set(__self__, "dbname", dbname)
|
|
15717
15115
|
if host is not None:
|
|
15718
15116
|
pulumi.set(__self__, "host", host)
|
|
15719
15117
|
if max_connections is not None:
|
|
15720
15118
|
pulumi.set(__self__, "max_connections", max_connections)
|
|
15721
|
-
if params is not None:
|
|
15722
|
-
pulumi.set(__self__, "params", params)
|
|
15723
15119
|
if password is not None:
|
|
15724
15120
|
pulumi.set(__self__, "password", password)
|
|
15725
15121
|
if port is not None:
|
|
@@ -15728,25 +15124,11 @@ class PgPg(dict):
|
|
|
15728
15124
|
pulumi.set(__self__, "replica_uri", replica_uri)
|
|
15729
15125
|
if sslmode is not None:
|
|
15730
15126
|
pulumi.set(__self__, "sslmode", sslmode)
|
|
15731
|
-
if standby_uris is not None:
|
|
15732
|
-
pulumi.set(__self__, "standby_uris", standby_uris)
|
|
15733
|
-
if syncing_uris is not None:
|
|
15734
|
-
pulumi.set(__self__, "syncing_uris", syncing_uris)
|
|
15735
15127
|
if uri is not None:
|
|
15736
15128
|
pulumi.set(__self__, "uri", uri)
|
|
15737
|
-
if uris is not None:
|
|
15738
|
-
pulumi.set(__self__, "uris", uris)
|
|
15739
15129
|
if user is not None:
|
|
15740
15130
|
pulumi.set(__self__, "user", user)
|
|
15741
15131
|
|
|
15742
|
-
@property
|
|
15743
|
-
@pulumi.getter
|
|
15744
|
-
def bouncer(self) -> Optional[str]:
|
|
15745
|
-
"""
|
|
15746
|
-
Bouncer connection details
|
|
15747
|
-
"""
|
|
15748
|
-
return pulumi.get(self, "bouncer")
|
|
15749
|
-
|
|
15750
15132
|
@property
|
|
15751
15133
|
@pulumi.getter
|
|
15752
15134
|
def dbname(self) -> Optional[str]:
|
|
@@ -15771,14 +15153,6 @@ class PgPg(dict):
|
|
|
15771
15153
|
"""
|
|
15772
15154
|
return pulumi.get(self, "max_connections")
|
|
15773
15155
|
|
|
15774
|
-
@property
|
|
15775
|
-
@pulumi.getter
|
|
15776
|
-
def params(self) -> Optional[Sequence['outputs.PgPgParam']]:
|
|
15777
|
-
"""
|
|
15778
|
-
PostgreSQL connection parameters
|
|
15779
|
-
"""
|
|
15780
|
-
return pulumi.get(self, "params")
|
|
15781
|
-
|
|
15782
15156
|
@property
|
|
15783
15157
|
@pulumi.getter
|
|
15784
15158
|
def password(self) -> Optional[str]:
|
|
@@ -15811,22 +15185,6 @@ class PgPg(dict):
|
|
|
15811
15185
|
"""
|
|
15812
15186
|
return pulumi.get(self, "sslmode")
|
|
15813
15187
|
|
|
15814
|
-
@property
|
|
15815
|
-
@pulumi.getter(name="standbyUris")
|
|
15816
|
-
def standby_uris(self) -> Optional[Sequence[str]]:
|
|
15817
|
-
"""
|
|
15818
|
-
PostgreSQL standby connection URIs
|
|
15819
|
-
"""
|
|
15820
|
-
return pulumi.get(self, "standby_uris")
|
|
15821
|
-
|
|
15822
|
-
@property
|
|
15823
|
-
@pulumi.getter(name="syncingUris")
|
|
15824
|
-
def syncing_uris(self) -> Optional[Sequence[str]]:
|
|
15825
|
-
"""
|
|
15826
|
-
PostgreSQL syncing connection URIs
|
|
15827
|
-
"""
|
|
15828
|
-
return pulumi.get(self, "syncing_uris")
|
|
15829
|
-
|
|
15830
15188
|
@property
|
|
15831
15189
|
@pulumi.getter
|
|
15832
15190
|
def uri(self) -> Optional[str]:
|
|
@@ -15835,110 +15193,6 @@ class PgPg(dict):
|
|
|
15835
15193
|
"""
|
|
15836
15194
|
return pulumi.get(self, "uri")
|
|
15837
15195
|
|
|
15838
|
-
@property
|
|
15839
|
-
@pulumi.getter
|
|
15840
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
15841
|
-
"""
|
|
15842
|
-
PostgreSQL master connection URIs
|
|
15843
|
-
"""
|
|
15844
|
-
return pulumi.get(self, "uris")
|
|
15845
|
-
|
|
15846
|
-
@property
|
|
15847
|
-
@pulumi.getter
|
|
15848
|
-
def user(self) -> Optional[str]:
|
|
15849
|
-
"""
|
|
15850
|
-
PostgreSQL admin user name
|
|
15851
|
-
"""
|
|
15852
|
-
return pulumi.get(self, "user")
|
|
15853
|
-
|
|
15854
|
-
|
|
15855
|
-
@pulumi.output_type
|
|
15856
|
-
class PgPgParam(dict):
|
|
15857
|
-
@staticmethod
|
|
15858
|
-
def __key_warning(key: str):
|
|
15859
|
-
suggest = None
|
|
15860
|
-
if key == "databaseName":
|
|
15861
|
-
suggest = "database_name"
|
|
15862
|
-
|
|
15863
|
-
if suggest:
|
|
15864
|
-
pulumi.log.warn(f"Key '{key}' not found in PgPgParam. Access the value via the '{suggest}' property getter instead.")
|
|
15865
|
-
|
|
15866
|
-
def __getitem__(self, key: str) -> Any:
|
|
15867
|
-
PgPgParam.__key_warning(key)
|
|
15868
|
-
return super().__getitem__(key)
|
|
15869
|
-
|
|
15870
|
-
def get(self, key: str, default = None) -> Any:
|
|
15871
|
-
PgPgParam.__key_warning(key)
|
|
15872
|
-
return super().get(key, default)
|
|
15873
|
-
|
|
15874
|
-
def __init__(__self__, *,
|
|
15875
|
-
database_name: Optional[str] = None,
|
|
15876
|
-
host: Optional[str] = None,
|
|
15877
|
-
password: Optional[str] = None,
|
|
15878
|
-
port: Optional[int] = None,
|
|
15879
|
-
sslmode: Optional[str] = None,
|
|
15880
|
-
user: Optional[str] = None):
|
|
15881
|
-
"""
|
|
15882
|
-
:param str database_name: Primary PostgreSQL database name
|
|
15883
|
-
:param str host: PostgreSQL host IP or name
|
|
15884
|
-
:param str password: PostgreSQL admin user password
|
|
15885
|
-
:param int port: PostgreSQL port
|
|
15886
|
-
:param str sslmode: PostgreSQL sslmode setting (currently always "require")
|
|
15887
|
-
:param str user: PostgreSQL admin user name
|
|
15888
|
-
"""
|
|
15889
|
-
if database_name is not None:
|
|
15890
|
-
pulumi.set(__self__, "database_name", database_name)
|
|
15891
|
-
if host is not None:
|
|
15892
|
-
pulumi.set(__self__, "host", host)
|
|
15893
|
-
if password is not None:
|
|
15894
|
-
pulumi.set(__self__, "password", password)
|
|
15895
|
-
if port is not None:
|
|
15896
|
-
pulumi.set(__self__, "port", port)
|
|
15897
|
-
if sslmode is not None:
|
|
15898
|
-
pulumi.set(__self__, "sslmode", sslmode)
|
|
15899
|
-
if user is not None:
|
|
15900
|
-
pulumi.set(__self__, "user", user)
|
|
15901
|
-
|
|
15902
|
-
@property
|
|
15903
|
-
@pulumi.getter(name="databaseName")
|
|
15904
|
-
def database_name(self) -> Optional[str]:
|
|
15905
|
-
"""
|
|
15906
|
-
Primary PostgreSQL database name
|
|
15907
|
-
"""
|
|
15908
|
-
return pulumi.get(self, "database_name")
|
|
15909
|
-
|
|
15910
|
-
@property
|
|
15911
|
-
@pulumi.getter
|
|
15912
|
-
def host(self) -> Optional[str]:
|
|
15913
|
-
"""
|
|
15914
|
-
PostgreSQL host IP or name
|
|
15915
|
-
"""
|
|
15916
|
-
return pulumi.get(self, "host")
|
|
15917
|
-
|
|
15918
|
-
@property
|
|
15919
|
-
@pulumi.getter
|
|
15920
|
-
def password(self) -> Optional[str]:
|
|
15921
|
-
"""
|
|
15922
|
-
PostgreSQL admin user password
|
|
15923
|
-
"""
|
|
15924
|
-
return pulumi.get(self, "password")
|
|
15925
|
-
|
|
15926
|
-
@property
|
|
15927
|
-
@pulumi.getter
|
|
15928
|
-
def port(self) -> Optional[int]:
|
|
15929
|
-
"""
|
|
15930
|
-
PostgreSQL port
|
|
15931
|
-
"""
|
|
15932
|
-
return pulumi.get(self, "port")
|
|
15933
|
-
|
|
15934
|
-
@property
|
|
15935
|
-
@pulumi.getter
|
|
15936
|
-
def sslmode(self) -> Optional[str]:
|
|
15937
|
-
"""
|
|
15938
|
-
PostgreSQL sslmode setting (currently always "require")
|
|
15939
|
-
"""
|
|
15940
|
-
return pulumi.get(self, "sslmode")
|
|
15941
|
-
|
|
15942
15196
|
@property
|
|
15943
15197
|
@pulumi.getter
|
|
15944
15198
|
def user(self) -> Optional[str]:
|
|
@@ -16064,7 +15318,7 @@ class PgPgUserConfig(dict):
|
|
|
16064
15318
|
:param bool pg_read_replica: Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).
|
|
16065
15319
|
:param str pg_service_to_fork_from: Name of the PG Service from which to fork (deprecated, use service*to*fork_from). This has effect only when a new service is being created.
|
|
16066
15320
|
:param bool pg_stat_monitor_enable: Enable the pg*stat*monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg*stat*statements results for utility commands are unreliable. The default value is `false`.
|
|
16067
|
-
:param str pg_version:
|
|
15321
|
+
:param str pg_version: PostgreSQL major version.
|
|
16068
15322
|
:param 'PgPgUserConfigPgauditArgs' pgaudit: System-wide settings for the pgaudit extension
|
|
16069
15323
|
:param 'PgPgUserConfigPgbouncerArgs' pgbouncer: PGBouncer connection pooling settings
|
|
16070
15324
|
:param 'PgPgUserConfigPglookoutArgs' pglookout: System-wide settings for pglookout
|
|
@@ -16077,9 +15331,9 @@ class PgPgUserConfig(dict):
|
|
|
16077
15331
|
:param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
|
|
16078
15332
|
:param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
|
|
16079
15333
|
:param bool static_ips: Use static public IP addresses.
|
|
16080
|
-
:param str synchronous_replication:
|
|
15334
|
+
:param str synchronous_replication: Synchronous replication type. Note that the service plan also needs to support synchronous replication.
|
|
16081
15335
|
:param 'PgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
|
|
16082
|
-
:param str variant:
|
|
15336
|
+
:param str variant: Variant of the PostgreSQL service, may affect the features that are exposed by default.
|
|
16083
15337
|
:param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
|
|
16084
15338
|
"""
|
|
16085
15339
|
if additional_backup_regions is not None:
|
|
@@ -16277,7 +15531,7 @@ class PgPgUserConfig(dict):
|
|
|
16277
15531
|
@pulumi.getter(name="pgVersion")
|
|
16278
15532
|
def pg_version(self) -> Optional[str]:
|
|
16279
15533
|
"""
|
|
16280
|
-
|
|
15534
|
+
PostgreSQL major version.
|
|
16281
15535
|
"""
|
|
16282
15536
|
return pulumi.get(self, "pg_version")
|
|
16283
15537
|
|
|
@@ -16381,7 +15635,7 @@ class PgPgUserConfig(dict):
|
|
|
16381
15635
|
@pulumi.getter(name="synchronousReplication")
|
|
16382
15636
|
def synchronous_replication(self) -> Optional[str]:
|
|
16383
15637
|
"""
|
|
16384
|
-
|
|
15638
|
+
Synchronous replication type. Note that the service plan also needs to support synchronous replication.
|
|
16385
15639
|
"""
|
|
16386
15640
|
return pulumi.get(self, "synchronous_replication")
|
|
16387
15641
|
|
|
@@ -16397,7 +15651,7 @@ class PgPgUserConfig(dict):
|
|
|
16397
15651
|
@pulumi.getter
|
|
16398
15652
|
def variant(self) -> Optional[str]:
|
|
16399
15653
|
"""
|
|
16400
|
-
|
|
15654
|
+
Variant of the PostgreSQL service, may affect the features that are exposed by default.
|
|
16401
15655
|
"""
|
|
16402
15656
|
return pulumi.get(self, "variant")
|
|
16403
15657
|
|
|
@@ -16473,7 +15727,7 @@ class PgPgUserConfigMigration(dict):
|
|
|
16473
15727
|
:param int port: Port number of the server where to migrate data from.
|
|
16474
15728
|
:param str dbname: Database name for bootstrapping the initial connection.
|
|
16475
15729
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
|
|
16476
|
-
:param str method:
|
|
15730
|
+
:param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
16477
15731
|
:param str password: Password for authentication with the server where to migrate data from.
|
|
16478
15732
|
:param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
|
|
16479
15733
|
:param str username: User name for authentication with the server where to migrate data from.
|
|
@@ -16529,7 +15783,7 @@ class PgPgUserConfigMigration(dict):
|
|
|
16529
15783
|
@pulumi.getter
|
|
16530
15784
|
def method(self) -> Optional[str]:
|
|
16531
15785
|
"""
|
|
16532
|
-
|
|
15786
|
+
The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
16533
15787
|
"""
|
|
16534
15788
|
return pulumi.get(self, "method")
|
|
16535
15789
|
|
|
@@ -16734,12 +15988,12 @@ class PgPgUserConfigPg(dict):
|
|
|
16734
15988
|
:param int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.
|
|
16735
15989
|
:param float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter*lru*multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter*lru*maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.
|
|
16736
15990
|
:param int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.
|
|
16737
|
-
:param str default_toast_compression:
|
|
15991
|
+
:param str default_toast_compression: Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
|
|
16738
15992
|
:param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
|
|
16739
15993
|
:param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
|
|
16740
15994
|
:param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
|
|
16741
|
-
:param str log_error_verbosity:
|
|
16742
|
-
:param str log_line_prefix:
|
|
15995
|
+
:param str log_error_verbosity: Controls the amount of detail written in the server log for each message that is logged.
|
|
15996
|
+
:param str log_line_prefix: Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
|
|
16743
15997
|
:param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
|
|
16744
15998
|
:param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
|
|
16745
15999
|
:param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
|
|
@@ -16760,13 +16014,13 @@ class PgPgUserConfigPg(dict):
|
|
|
16760
16014
|
:param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
|
|
16761
16015
|
:param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
|
|
16762
16016
|
:param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
|
|
16763
|
-
:param str pg_stat_statements_dot_track:
|
|
16017
|
+
:param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
|
|
16764
16018
|
:param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
16765
16019
|
:param str timezone: PostgreSQL service timezone.
|
|
16766
16020
|
:param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session.
|
|
16767
|
-
:param str track_commit_timestamp:
|
|
16768
|
-
:param str track_functions:
|
|
16769
|
-
:param str track_io_timing:
|
|
16021
|
+
:param str track_commit_timestamp: Record commit time of transactions.
|
|
16022
|
+
:param str track_functions: Enables tracking of function call counts and time used.
|
|
16023
|
+
:param str track_io_timing: Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
|
|
16770
16024
|
:param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.
|
|
16771
16025
|
:param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.
|
|
16772
16026
|
"""
|
|
@@ -16985,7 +16239,7 @@ class PgPgUserConfigPg(dict):
|
|
|
16985
16239
|
@pulumi.getter(name="defaultToastCompression")
|
|
16986
16240
|
def default_toast_compression(self) -> Optional[str]:
|
|
16987
16241
|
"""
|
|
16988
|
-
|
|
16242
|
+
Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
|
|
16989
16243
|
"""
|
|
16990
16244
|
return pulumi.get(self, "default_toast_compression")
|
|
16991
16245
|
|
|
@@ -17017,7 +16271,7 @@ class PgPgUserConfigPg(dict):
|
|
|
17017
16271
|
@pulumi.getter(name="logErrorVerbosity")
|
|
17018
16272
|
def log_error_verbosity(self) -> Optional[str]:
|
|
17019
16273
|
"""
|
|
17020
|
-
|
|
16274
|
+
Controls the amount of detail written in the server log for each message that is logged.
|
|
17021
16275
|
"""
|
|
17022
16276
|
return pulumi.get(self, "log_error_verbosity")
|
|
17023
16277
|
|
|
@@ -17025,7 +16279,7 @@ class PgPgUserConfigPg(dict):
|
|
|
17025
16279
|
@pulumi.getter(name="logLinePrefix")
|
|
17026
16280
|
def log_line_prefix(self) -> Optional[str]:
|
|
17027
16281
|
"""
|
|
17028
|
-
|
|
16282
|
+
Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
|
|
17029
16283
|
"""
|
|
17030
16284
|
return pulumi.get(self, "log_line_prefix")
|
|
17031
16285
|
|
|
@@ -17193,7 +16447,7 @@ class PgPgUserConfigPg(dict):
|
|
|
17193
16447
|
@pulumi.getter(name="pgStatStatementsDotTrack")
|
|
17194
16448
|
def pg_stat_statements_dot_track(self) -> Optional[str]:
|
|
17195
16449
|
"""
|
|
17196
|
-
|
|
16450
|
+
Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
|
|
17197
16451
|
"""
|
|
17198
16452
|
return pulumi.get(self, "pg_stat_statements_dot_track")
|
|
17199
16453
|
|
|
@@ -17225,7 +16479,7 @@ class PgPgUserConfigPg(dict):
|
|
|
17225
16479
|
@pulumi.getter(name="trackCommitTimestamp")
|
|
17226
16480
|
def track_commit_timestamp(self) -> Optional[str]:
|
|
17227
16481
|
"""
|
|
17228
|
-
|
|
16482
|
+
Record commit time of transactions.
|
|
17229
16483
|
"""
|
|
17230
16484
|
return pulumi.get(self, "track_commit_timestamp")
|
|
17231
16485
|
|
|
@@ -17233,7 +16487,7 @@ class PgPgUserConfigPg(dict):
|
|
|
17233
16487
|
@pulumi.getter(name="trackFunctions")
|
|
17234
16488
|
def track_functions(self) -> Optional[str]:
|
|
17235
16489
|
"""
|
|
17236
|
-
|
|
16490
|
+
Enables tracking of function call counts and time used.
|
|
17237
16491
|
"""
|
|
17238
16492
|
return pulumi.get(self, "track_functions")
|
|
17239
16493
|
|
|
@@ -17241,7 +16495,7 @@ class PgPgUserConfigPg(dict):
|
|
|
17241
16495
|
@pulumi.getter(name="trackIoTiming")
|
|
17242
16496
|
def track_io_timing(self) -> Optional[str]:
|
|
17243
16497
|
"""
|
|
17244
|
-
|
|
16498
|
+
Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
|
|
17245
16499
|
"""
|
|
17246
16500
|
return pulumi.get(self, "track_io_timing")
|
|
17247
16501
|
|
|
@@ -17427,7 +16681,7 @@ class PgPgUserConfigPgaudit(dict):
|
|
|
17427
16681
|
:param bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. The default value is `false`.
|
|
17428
16682
|
:param bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. The default value is `true`.
|
|
17429
16683
|
:param bool log_client: Specifies whether log messages will be visible to a client process such as psql. The default value is `false`.
|
|
17430
|
-
:param str log_level:
|
|
16684
|
+
:param str log_level: Specifies the log level that will be used for log entries. The default value is `log`.
|
|
17431
16685
|
:param int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. The default value is `-1`.
|
|
17432
16686
|
:param bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. The default value is `true`.
|
|
17433
16687
|
:param bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. The default value is `false`.
|
|
@@ -17496,7 +16750,7 @@ class PgPgUserConfigPgaudit(dict):
|
|
|
17496
16750
|
@pulumi.getter(name="logLevel")
|
|
17497
16751
|
def log_level(self) -> Optional[str]:
|
|
17498
16752
|
"""
|
|
17499
|
-
|
|
16753
|
+
Specifies the log level that will be used for log entries. The default value is `log`.
|
|
17500
16754
|
"""
|
|
17501
16755
|
return pulumi.get(self, "log_level")
|
|
17502
16756
|
|
|
@@ -17629,7 +16883,7 @@ class PgPgUserConfigPgbouncer(dict):
|
|
|
17629
16883
|
"""
|
|
17630
16884
|
:param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). The default value is `3600`.
|
|
17631
16885
|
:param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.
|
|
17632
|
-
:param str autodb_pool_mode:
|
|
16886
|
+
:param str autodb_pool_mode: PGBouncer pool mode. The default value is `transaction`.
|
|
17633
16887
|
:param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. The default value is `0`.
|
|
17634
16888
|
:param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
|
|
17635
16889
|
:param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. The default value is `0`.
|
|
@@ -17676,7 +16930,7 @@ class PgPgUserConfigPgbouncer(dict):
|
|
|
17676
16930
|
@pulumi.getter(name="autodbPoolMode")
|
|
17677
16931
|
def autodb_pool_mode(self) -> Optional[str]:
|
|
17678
16932
|
"""
|
|
17679
|
-
|
|
16933
|
+
PGBouncer pool mode. The default value is `transaction`.
|
|
17680
16934
|
"""
|
|
17681
16935
|
return pulumi.get(self, "autodb_pool_mode")
|
|
17682
16936
|
|
|
@@ -18176,80 +17430,6 @@ class RedisComponent(dict):
|
|
|
18176
17430
|
return pulumi.get(self, "usage")
|
|
18177
17431
|
|
|
18178
17432
|
|
|
18179
|
-
@pulumi.output_type
|
|
18180
|
-
class RedisRedis(dict):
|
|
18181
|
-
@staticmethod
|
|
18182
|
-
def __key_warning(key: str):
|
|
18183
|
-
suggest = None
|
|
18184
|
-
if key == "replicaUri":
|
|
18185
|
-
suggest = "replica_uri"
|
|
18186
|
-
elif key == "slaveUris":
|
|
18187
|
-
suggest = "slave_uris"
|
|
18188
|
-
|
|
18189
|
-
if suggest:
|
|
18190
|
-
pulumi.log.warn(f"Key '{key}' not found in RedisRedis. Access the value via the '{suggest}' property getter instead.")
|
|
18191
|
-
|
|
18192
|
-
def __getitem__(self, key: str) -> Any:
|
|
18193
|
-
RedisRedis.__key_warning(key)
|
|
18194
|
-
return super().__getitem__(key)
|
|
18195
|
-
|
|
18196
|
-
def get(self, key: str, default = None) -> Any:
|
|
18197
|
-
RedisRedis.__key_warning(key)
|
|
18198
|
-
return super().get(key, default)
|
|
18199
|
-
|
|
18200
|
-
def __init__(__self__, *,
|
|
18201
|
-
password: Optional[str] = None,
|
|
18202
|
-
replica_uri: Optional[str] = None,
|
|
18203
|
-
slave_uris: Optional[Sequence[str]] = None,
|
|
18204
|
-
uris: Optional[Sequence[str]] = None):
|
|
18205
|
-
"""
|
|
18206
|
-
:param str password: Redis password.
|
|
18207
|
-
:param str replica_uri: Redis replica server URI.
|
|
18208
|
-
:param Sequence[str] slave_uris: Redis slave server URIs.
|
|
18209
|
-
:param Sequence[str] uris: Redis server URIs.
|
|
18210
|
-
"""
|
|
18211
|
-
if password is not None:
|
|
18212
|
-
pulumi.set(__self__, "password", password)
|
|
18213
|
-
if replica_uri is not None:
|
|
18214
|
-
pulumi.set(__self__, "replica_uri", replica_uri)
|
|
18215
|
-
if slave_uris is not None:
|
|
18216
|
-
pulumi.set(__self__, "slave_uris", slave_uris)
|
|
18217
|
-
if uris is not None:
|
|
18218
|
-
pulumi.set(__self__, "uris", uris)
|
|
18219
|
-
|
|
18220
|
-
@property
|
|
18221
|
-
@pulumi.getter
|
|
18222
|
-
def password(self) -> Optional[str]:
|
|
18223
|
-
"""
|
|
18224
|
-
Redis password.
|
|
18225
|
-
"""
|
|
18226
|
-
return pulumi.get(self, "password")
|
|
18227
|
-
|
|
18228
|
-
@property
|
|
18229
|
-
@pulumi.getter(name="replicaUri")
|
|
18230
|
-
def replica_uri(self) -> Optional[str]:
|
|
18231
|
-
"""
|
|
18232
|
-
Redis replica server URI.
|
|
18233
|
-
"""
|
|
18234
|
-
return pulumi.get(self, "replica_uri")
|
|
18235
|
-
|
|
18236
|
-
@property
|
|
18237
|
-
@pulumi.getter(name="slaveUris")
|
|
18238
|
-
def slave_uris(self) -> Optional[Sequence[str]]:
|
|
18239
|
-
"""
|
|
18240
|
-
Redis slave server URIs.
|
|
18241
|
-
"""
|
|
18242
|
-
return pulumi.get(self, "slave_uris")
|
|
18243
|
-
|
|
18244
|
-
@property
|
|
18245
|
-
@pulumi.getter
|
|
18246
|
-
def uris(self) -> Optional[Sequence[str]]:
|
|
18247
|
-
"""
|
|
18248
|
-
Redis server URIs.
|
|
18249
|
-
"""
|
|
18250
|
-
return pulumi.get(self, "uris")
|
|
18251
|
-
|
|
18252
|
-
|
|
18253
17433
|
@pulumi.output_type
|
|
18254
17434
|
class RedisRedisUserConfig(dict):
|
|
18255
17435
|
@staticmethod
|
|
@@ -18352,18 +17532,18 @@ class RedisRedisUserConfig(dict):
|
|
|
18352
17532
|
:param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
|
|
18353
17533
|
:param 'RedisRedisUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
|
|
18354
17534
|
:param str recovery_basebackup_name: Name of the basebackup to restore in forked service.
|
|
18355
|
-
:param str redis_acl_channels_default:
|
|
17535
|
+
:param str redis_acl_channels_default: Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
|
|
18356
17536
|
:param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service.
|
|
18357
17537
|
:param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. The default value is `1`.
|
|
18358
17538
|
:param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`.
|
|
18359
|
-
:param str redis_maxmemory_policy:
|
|
17539
|
+
:param str redis_maxmemory_policy: Redis maxmemory-policy. The default value is `noeviction`.
|
|
18360
17540
|
:param str redis_notify_keyspace_events: Set notify-keyspace-events option.
|
|
18361
17541
|
:param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service.
|
|
18362
|
-
:param str redis_persistence:
|
|
17542
|
+
:param str redis_persistence: When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
18363
17543
|
:param int redis_pubsub_client_output_buffer_limit: Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.
|
|
18364
17544
|
:param bool redis_ssl: Require SSL to access Redis. The default value is `true`.
|
|
18365
17545
|
:param int redis_timeout: Redis idle connection timeout in seconds. The default value is `300`.
|
|
18366
|
-
:param str redis_version:
|
|
17546
|
+
:param str redis_version: Redis major version.
|
|
18367
17547
|
:param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
|
|
18368
17548
|
:param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
|
|
18369
17549
|
:param bool static_ips: Use static public IP addresses.
|
|
@@ -18506,7 +17686,7 @@ class RedisRedisUserConfig(dict):
|
|
|
18506
17686
|
@pulumi.getter(name="redisAclChannelsDefault")
|
|
18507
17687
|
def redis_acl_channels_default(self) -> Optional[str]:
|
|
18508
17688
|
"""
|
|
18509
|
-
|
|
17689
|
+
Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
|
|
18510
17690
|
"""
|
|
18511
17691
|
return pulumi.get(self, "redis_acl_channels_default")
|
|
18512
17692
|
|
|
@@ -18538,7 +17718,7 @@ class RedisRedisUserConfig(dict):
|
|
|
18538
17718
|
@pulumi.getter(name="redisMaxmemoryPolicy")
|
|
18539
17719
|
def redis_maxmemory_policy(self) -> Optional[str]:
|
|
18540
17720
|
"""
|
|
18541
|
-
|
|
17721
|
+
Redis maxmemory-policy. The default value is `noeviction`.
|
|
18542
17722
|
"""
|
|
18543
17723
|
return pulumi.get(self, "redis_maxmemory_policy")
|
|
18544
17724
|
|
|
@@ -18562,7 +17742,7 @@ class RedisRedisUserConfig(dict):
|
|
|
18562
17742
|
@pulumi.getter(name="redisPersistence")
|
|
18563
17743
|
def redis_persistence(self) -> Optional[str]:
|
|
18564
17744
|
"""
|
|
18565
|
-
|
|
17745
|
+
When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
18566
17746
|
"""
|
|
18567
17747
|
return pulumi.get(self, "redis_persistence")
|
|
18568
17748
|
|
|
@@ -18594,7 +17774,7 @@ class RedisRedisUserConfig(dict):
|
|
|
18594
17774
|
@pulumi.getter(name="redisVersion")
|
|
18595
17775
|
def redis_version(self) -> Optional[str]:
|
|
18596
17776
|
"""
|
|
18597
|
-
|
|
17777
|
+
Redis major version.
|
|
18598
17778
|
"""
|
|
18599
17779
|
return pulumi.get(self, "redis_version")
|
|
18600
17780
|
|
|
@@ -18686,7 +17866,7 @@ class RedisRedisUserConfigMigration(dict):
|
|
|
18686
17866
|
:param int port: Port number of the server where to migrate data from.
|
|
18687
17867
|
:param str dbname: Database name for bootstrapping the initial connection.
|
|
18688
17868
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
|
|
18689
|
-
:param str method:
|
|
17869
|
+
:param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
18690
17870
|
:param str password: Password for authentication with the server where to migrate data from.
|
|
18691
17871
|
:param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
|
|
18692
17872
|
:param str username: User name for authentication with the server where to migrate data from.
|
|
@@ -18742,7 +17922,7 @@ class RedisRedisUserConfigMigration(dict):
|
|
|
18742
17922
|
@pulumi.getter
|
|
18743
17923
|
def method(self) -> Optional[str]:
|
|
18744
17924
|
"""
|
|
18745
|
-
|
|
17925
|
+
The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
18746
17926
|
"""
|
|
18747
17927
|
return pulumi.get(self, "method")
|
|
18748
17928
|
|
|
@@ -19031,13 +18211,13 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19031
18211
|
skip_broken_messages: Optional[int] = None):
|
|
19032
18212
|
"""
|
|
19033
18213
|
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
|
|
19034
|
-
:param str data_format:
|
|
18214
|
+
:param str data_format: Message data format. The default value is `JSONEachRow`.
|
|
19035
18215
|
:param str group_name: Kafka consumers group. The default value is `clickhouse`.
|
|
19036
18216
|
:param str name: Name of the table.
|
|
19037
18217
|
:param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
|
|
19038
|
-
:param str auto_offset_reset:
|
|
19039
|
-
:param str date_time_input_format:
|
|
19040
|
-
:param str handle_error_mode:
|
|
18218
|
+
:param str auto_offset_reset: Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
|
|
18219
|
+
:param str date_time_input_format: Method to read DateTime from text input formats. The default value is `basic`.
|
|
18220
|
+
:param str handle_error_mode: How to handle errors for Kafka engine. The default value is `default`.
|
|
19041
18221
|
:param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.
|
|
19042
18222
|
:param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.
|
|
19043
18223
|
:param int num_consumers: The number of consumers per table per replica. The default value is `1`.
|
|
@@ -19078,7 +18258,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19078
18258
|
@pulumi.getter(name="dataFormat")
|
|
19079
18259
|
def data_format(self) -> str:
|
|
19080
18260
|
"""
|
|
19081
|
-
|
|
18261
|
+
Message data format. The default value is `JSONEachRow`.
|
|
19082
18262
|
"""
|
|
19083
18263
|
return pulumi.get(self, "data_format")
|
|
19084
18264
|
|
|
@@ -19110,7 +18290,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19110
18290
|
@pulumi.getter(name="autoOffsetReset")
|
|
19111
18291
|
def auto_offset_reset(self) -> Optional[str]:
|
|
19112
18292
|
"""
|
|
19113
|
-
|
|
18293
|
+
Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
|
|
19114
18294
|
"""
|
|
19115
18295
|
return pulumi.get(self, "auto_offset_reset")
|
|
19116
18296
|
|
|
@@ -19118,7 +18298,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19118
18298
|
@pulumi.getter(name="dateTimeInputFormat")
|
|
19119
18299
|
def date_time_input_format(self) -> Optional[str]:
|
|
19120
18300
|
"""
|
|
19121
|
-
|
|
18301
|
+
Method to read DateTime from text input formats. The default value is `basic`.
|
|
19122
18302
|
"""
|
|
19123
18303
|
return pulumi.get(self, "date_time_input_format")
|
|
19124
18304
|
|
|
@@ -19126,7 +18306,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
|
|
|
19126
18306
|
@pulumi.getter(name="handleErrorMode")
|
|
19127
18307
|
def handle_error_mode(self) -> Optional[str]:
|
|
19128
18308
|
"""
|
|
19129
|
-
|
|
18309
|
+
How to handle errors for Kafka engine. The default value is `default`.
|
|
19130
18310
|
"""
|
|
19131
18311
|
return pulumi.get(self, "handle_error_mode")
|
|
19132
18312
|
|
|
@@ -19275,8 +18455,6 @@ class ServiceIntegrationDatadogUserConfig(dict):
|
|
|
19275
18455
|
suggest = None
|
|
19276
18456
|
if key == "datadogDbmEnabled":
|
|
19277
18457
|
suggest = "datadog_dbm_enabled"
|
|
19278
|
-
elif key == "datadogPgbouncerEnabled":
|
|
19279
|
-
suggest = "datadog_pgbouncer_enabled"
|
|
19280
18458
|
elif key == "datadogTags":
|
|
19281
18459
|
suggest = "datadog_tags"
|
|
19282
18460
|
elif key == "excludeConsumerGroups":
|
|
@@ -19305,7 +18483,6 @@ class ServiceIntegrationDatadogUserConfig(dict):
|
|
|
19305
18483
|
|
|
19306
18484
|
def __init__(__self__, *,
|
|
19307
18485
|
datadog_dbm_enabled: Optional[bool] = None,
|
|
19308
|
-
datadog_pgbouncer_enabled: Optional[bool] = None,
|
|
19309
18486
|
datadog_tags: Optional[Sequence['outputs.ServiceIntegrationDatadogUserConfigDatadogTag']] = None,
|
|
19310
18487
|
exclude_consumer_groups: Optional[Sequence[str]] = None,
|
|
19311
18488
|
exclude_topics: Optional[Sequence[str]] = None,
|
|
@@ -19317,7 +18494,6 @@ class ServiceIntegrationDatadogUserConfig(dict):
|
|
|
19317
18494
|
redis: Optional['outputs.ServiceIntegrationDatadogUserConfigRedis'] = None):
|
|
19318
18495
|
"""
|
|
19319
18496
|
:param bool datadog_dbm_enabled: Enable Datadog Database Monitoring.
|
|
19320
|
-
:param bool datadog_pgbouncer_enabled: Enable Datadog PgBouncer Metric Tracking.
|
|
19321
18497
|
:param Sequence['ServiceIntegrationDatadogUserConfigDatadogTagArgs'] datadog_tags: Custom tags provided by user
|
|
19322
18498
|
:param Sequence[str] exclude_consumer_groups: List of custom metrics.
|
|
19323
18499
|
:param Sequence[str] exclude_topics: List of topics to exclude.
|
|
@@ -19330,8 +18506,6 @@ class ServiceIntegrationDatadogUserConfig(dict):
|
|
|
19330
18506
|
"""
|
|
19331
18507
|
if datadog_dbm_enabled is not None:
|
|
19332
18508
|
pulumi.set(__self__, "datadog_dbm_enabled", datadog_dbm_enabled)
|
|
19333
|
-
if datadog_pgbouncer_enabled is not None:
|
|
19334
|
-
pulumi.set(__self__, "datadog_pgbouncer_enabled", datadog_pgbouncer_enabled)
|
|
19335
18509
|
if datadog_tags is not None:
|
|
19336
18510
|
pulumi.set(__self__, "datadog_tags", datadog_tags)
|
|
19337
18511
|
if exclude_consumer_groups is not None:
|
|
@@ -19359,14 +18533,6 @@ class ServiceIntegrationDatadogUserConfig(dict):
|
|
|
19359
18533
|
"""
|
|
19360
18534
|
return pulumi.get(self, "datadog_dbm_enabled")
|
|
19361
18535
|
|
|
19362
|
-
@property
|
|
19363
|
-
@pulumi.getter(name="datadogPgbouncerEnabled")
|
|
19364
|
-
def datadog_pgbouncer_enabled(self) -> Optional[bool]:
|
|
19365
|
-
"""
|
|
19366
|
-
Enable Datadog PgBouncer Metric Tracking.
|
|
19367
|
-
"""
|
|
19368
|
-
return pulumi.get(self, "datadog_pgbouncer_enabled")
|
|
19369
|
-
|
|
19370
18536
|
@property
|
|
19371
18537
|
@pulumi.getter(name="datadogTags")
|
|
19372
18538
|
def datadog_tags(self) -> Optional[Sequence['outputs.ServiceIntegrationDatadogUserConfigDatadogTag']]:
|
|
@@ -19628,7 +18794,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
|
|
|
19628
18794
|
:param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with.
|
|
19629
18795
|
:param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers.
|
|
19630
18796
|
:param int max_partition_contexts: Maximum number of partition contexts to send.
|
|
19631
|
-
:param str site:
|
|
18797
|
+
:param str site: Datadog intake site. Defaults to datadoghq.com.
|
|
19632
18798
|
"""
|
|
19633
18799
|
pulumi.set(__self__, "datadog_api_key", datadog_api_key)
|
|
19634
18800
|
if datadog_tags is not None:
|
|
@@ -19696,7 +18862,7 @@ class ServiceIntegrationEndpointDatadogUserConfig(dict):
|
|
|
19696
18862
|
@pulumi.getter
|
|
19697
18863
|
def site(self) -> Optional[str]:
|
|
19698
18864
|
"""
|
|
19699
|
-
|
|
18865
|
+
Datadog intake site. Defaults to datadoghq.com.
|
|
19700
18866
|
"""
|
|
19701
18867
|
return pulumi.get(self, "site")
|
|
19702
18868
|
|
|
@@ -20114,14 +19280,14 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
|
|
|
20114
19280
|
ssl_endpoint_identification_algorithm: Optional[str] = None):
|
|
20115
19281
|
"""
|
|
20116
19282
|
:param str bootstrap_servers: Bootstrap servers.
|
|
20117
|
-
:param str security_protocol:
|
|
20118
|
-
:param str sasl_mechanism:
|
|
19283
|
+
:param str security_protocol: Security protocol.
|
|
19284
|
+
:param str sasl_mechanism: SASL mechanism used for connections to the Kafka server.
|
|
20119
19285
|
:param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server.
|
|
20120
19286
|
:param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server.
|
|
20121
19287
|
:param str ssl_ca_cert: PEM-encoded CA certificate.
|
|
20122
19288
|
:param str ssl_client_cert: PEM-encoded client certificate.
|
|
20123
19289
|
:param str ssl_client_key: PEM-encoded client key.
|
|
20124
|
-
:param str ssl_endpoint_identification_algorithm:
|
|
19290
|
+
:param str ssl_endpoint_identification_algorithm: The endpoint identification algorithm to validate server hostname using server certificate.
|
|
20125
19291
|
"""
|
|
20126
19292
|
pulumi.set(__self__, "bootstrap_servers", bootstrap_servers)
|
|
20127
19293
|
pulumi.set(__self__, "security_protocol", security_protocol)
|
|
@@ -20152,7 +19318,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
|
|
|
20152
19318
|
@pulumi.getter(name="securityProtocol")
|
|
20153
19319
|
def security_protocol(self) -> str:
|
|
20154
19320
|
"""
|
|
20155
|
-
|
|
19321
|
+
Security protocol.
|
|
20156
19322
|
"""
|
|
20157
19323
|
return pulumi.get(self, "security_protocol")
|
|
20158
19324
|
|
|
@@ -20160,7 +19326,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
|
|
|
20160
19326
|
@pulumi.getter(name="saslMechanism")
|
|
20161
19327
|
def sasl_mechanism(self) -> Optional[str]:
|
|
20162
19328
|
"""
|
|
20163
|
-
|
|
19329
|
+
SASL mechanism used for connections to the Kafka server.
|
|
20164
19330
|
"""
|
|
20165
19331
|
return pulumi.get(self, "sasl_mechanism")
|
|
20166
19332
|
|
|
@@ -20208,7 +19374,7 @@ class ServiceIntegrationEndpointExternalKafkaUserConfig(dict):
|
|
|
20208
19374
|
@pulumi.getter(name="sslEndpointIdentificationAlgorithm")
|
|
20209
19375
|
def ssl_endpoint_identification_algorithm(self) -> Optional[str]:
|
|
20210
19376
|
"""
|
|
20211
|
-
|
|
19377
|
+
The endpoint identification algorithm to validate server hostname using server certificate.
|
|
20212
19378
|
"""
|
|
20213
19379
|
return pulumi.get(self, "ssl_endpoint_identification_algorithm")
|
|
20214
19380
|
|
|
@@ -20342,7 +19508,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
|
|
|
20342
19508
|
:param str password: Password.
|
|
20343
19509
|
:param str ssl_client_certificate: Client certificate.
|
|
20344
19510
|
:param str ssl_client_key: Client key.
|
|
20345
|
-
:param str ssl_mode:
|
|
19511
|
+
:param str ssl_mode: SSL Mode. The default value is `verify-full`.
|
|
20346
19512
|
:param str ssl_root_cert: SSL Root Cert.
|
|
20347
19513
|
"""
|
|
20348
19514
|
pulumi.set(__self__, "host", host)
|
|
@@ -20421,7 +19587,7 @@ class ServiceIntegrationEndpointExternalPostgresql(dict):
|
|
|
20421
19587
|
@pulumi.getter(name="sslMode")
|
|
20422
19588
|
def ssl_mode(self) -> Optional[str]:
|
|
20423
19589
|
"""
|
|
20424
|
-
|
|
19590
|
+
SSL Mode. The default value is `verify-full`.
|
|
20425
19591
|
"""
|
|
20426
19592
|
return pulumi.get(self, "ssl_mode")
|
|
20427
19593
|
|
|
@@ -20461,7 +19627,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
|
|
|
20461
19627
|
basic_auth_password: Optional[str] = None,
|
|
20462
19628
|
basic_auth_username: Optional[str] = None):
|
|
20463
19629
|
"""
|
|
20464
|
-
:param str authentication:
|
|
19630
|
+
:param str authentication: Authentication method.
|
|
20465
19631
|
:param str url: Schema Registry URL.
|
|
20466
19632
|
:param str basic_auth_password: Basic authentication password.
|
|
20467
19633
|
:param str basic_auth_username: Basic authentication user name.
|
|
@@ -20477,7 +19643,7 @@ class ServiceIntegrationEndpointExternalSchemaRegistryUserConfig(dict):
|
|
|
20477
19643
|
@pulumi.getter
|
|
20478
19644
|
def authentication(self) -> str:
|
|
20479
19645
|
"""
|
|
20480
|
-
|
|
19646
|
+
Authentication method.
|
|
20481
19647
|
"""
|
|
20482
19648
|
return pulumi.get(self, "authentication")
|
|
20483
19649
|
|
|
@@ -20637,7 +19803,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
|
|
|
20637
19803
|
max_message_size: Optional[int] = None,
|
|
20638
19804
|
sd: Optional[str] = None):
|
|
20639
19805
|
"""
|
|
20640
|
-
:param str format:
|
|
19806
|
+
:param str format: Message format. The default value is `rfc5424`.
|
|
20641
19807
|
:param int port: Rsyslog server port. The default value is `514`.
|
|
20642
19808
|
:param str server: Rsyslog server IP address or hostname.
|
|
20643
19809
|
:param bool tls: Require TLS. The default value is `true`.
|
|
@@ -20669,7 +19835,7 @@ class ServiceIntegrationEndpointRsyslogUserConfig(dict):
|
|
|
20669
19835
|
@pulumi.getter
|
|
20670
19836
|
def format(self) -> str:
|
|
20671
19837
|
"""
|
|
20672
|
-
|
|
19838
|
+
Message format. The default value is `rfc5424`.
|
|
20673
19839
|
"""
|
|
20674
19840
|
return pulumi.get(self, "format")
|
|
20675
19841
|
|
|
@@ -21215,7 +20381,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
|
|
|
21215
20381
|
:param int consumer_fetch_min_bytes: The minimum amount of data the server should return for a fetch request.
|
|
21216
20382
|
:param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker.
|
|
21217
20383
|
:param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
|
|
21218
|
-
:param str producer_compression_type:
|
|
20384
|
+
:param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
21219
20385
|
:param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing.
|
|
21220
20386
|
:param int producer_max_request_size: The maximum request size in bytes.
|
|
21221
20387
|
"""
|
|
@@ -21260,7 +20426,7 @@ class ServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
|
|
|
21260
20426
|
@pulumi.getter(name="producerCompressionType")
|
|
21261
20427
|
def producer_compression_type(self) -> Optional[str]:
|
|
21262
20428
|
"""
|
|
21263
|
-
|
|
20429
|
+
Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
21264
20430
|
"""
|
|
21265
20431
|
return pulumi.get(self, "producer_compression_type")
|
|
21266
20432
|
|
|
@@ -22010,24 +21176,6 @@ class GetAccountAuthenticationSamlFieldMappingResult(dict):
|
|
|
22010
21176
|
return pulumi.get(self, "real_name")
|
|
22011
21177
|
|
|
22012
21178
|
|
|
22013
|
-
@pulumi.output_type
|
|
22014
|
-
class GetCassandaCassandraResult(dict):
|
|
22015
|
-
def __init__(__self__, *,
|
|
22016
|
-
uris: Sequence[str]):
|
|
22017
|
-
"""
|
|
22018
|
-
:param Sequence[str] uris: Cassandra server URIs.
|
|
22019
|
-
"""
|
|
22020
|
-
pulumi.set(__self__, "uris", uris)
|
|
22021
|
-
|
|
22022
|
-
@property
|
|
22023
|
-
@pulumi.getter
|
|
22024
|
-
def uris(self) -> Sequence[str]:
|
|
22025
|
-
"""
|
|
22026
|
-
Cassandra server URIs.
|
|
22027
|
-
"""
|
|
22028
|
-
return pulumi.get(self, "uris")
|
|
22029
|
-
|
|
22030
|
-
|
|
22031
21179
|
@pulumi.output_type
|
|
22032
21180
|
class GetCassandaCassandraUserConfigResult(dict):
|
|
22033
21181
|
def __init__(__self__, *,
|
|
@@ -22052,7 +21200,7 @@ class GetCassandaCassandraUserConfigResult(dict):
|
|
|
22052
21200
|
:param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
|
|
22053
21201
|
:param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
|
|
22054
21202
|
:param 'GetCassandaCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
|
|
22055
|
-
:param str cassandra_version:
|
|
21203
|
+
:param str cassandra_version: Cassandra version.
|
|
22056
21204
|
:param Sequence['GetCassandaCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
|
|
22057
21205
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
22058
21206
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
@@ -22137,7 +21285,7 @@ class GetCassandaCassandraUserConfigResult(dict):
|
|
|
22137
21285
|
@pulumi.getter(name="cassandraVersion")
|
|
22138
21286
|
def cassandra_version(self) -> Optional[str]:
|
|
22139
21287
|
"""
|
|
22140
|
-
|
|
21288
|
+
Cassandra version.
|
|
22141
21289
|
"""
|
|
22142
21290
|
return pulumi.get(self, "cassandra_version")
|
|
22143
21291
|
|
|
@@ -22515,24 +21663,6 @@ class GetCassandaTechEmailResult(dict):
|
|
|
22515
21663
|
return pulumi.get(self, "email")
|
|
22516
21664
|
|
|
22517
21665
|
|
|
22518
|
-
@pulumi.output_type
|
|
22519
|
-
class GetCassandraCassandraResult(dict):
|
|
22520
|
-
def __init__(__self__, *,
|
|
22521
|
-
uris: Sequence[str]):
|
|
22522
|
-
"""
|
|
22523
|
-
:param Sequence[str] uris: Cassandra server URIs.
|
|
22524
|
-
"""
|
|
22525
|
-
pulumi.set(__self__, "uris", uris)
|
|
22526
|
-
|
|
22527
|
-
@property
|
|
22528
|
-
@pulumi.getter
|
|
22529
|
-
def uris(self) -> Sequence[str]:
|
|
22530
|
-
"""
|
|
22531
|
-
Cassandra server URIs.
|
|
22532
|
-
"""
|
|
22533
|
-
return pulumi.get(self, "uris")
|
|
22534
|
-
|
|
22535
|
-
|
|
22536
21666
|
@pulumi.output_type
|
|
22537
21667
|
class GetCassandraCassandraUserConfigResult(dict):
|
|
22538
21668
|
def __init__(__self__, *,
|
|
@@ -22557,7 +21687,7 @@ class GetCassandraCassandraUserConfigResult(dict):
|
|
|
22557
21687
|
:param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
|
|
22558
21688
|
:param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
|
|
22559
21689
|
:param 'GetCassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
|
|
22560
|
-
:param str cassandra_version:
|
|
21690
|
+
:param str cassandra_version: Cassandra version.
|
|
22561
21691
|
:param Sequence['GetCassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
|
|
22562
21692
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
22563
21693
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
@@ -22642,7 +21772,7 @@ class GetCassandraCassandraUserConfigResult(dict):
|
|
|
22642
21772
|
@pulumi.getter(name="cassandraVersion")
|
|
22643
21773
|
def cassandra_version(self) -> Optional[str]:
|
|
22644
21774
|
"""
|
|
22645
|
-
|
|
21775
|
+
Cassandra version.
|
|
22646
21776
|
"""
|
|
22647
21777
|
return pulumi.get(self, "cassandra_version")
|
|
22648
21778
|
|
|
@@ -23020,24 +22150,6 @@ class GetCassandraTechEmailResult(dict):
|
|
|
23020
22150
|
return pulumi.get(self, "email")
|
|
23021
22151
|
|
|
23022
22152
|
|
|
23023
|
-
@pulumi.output_type
|
|
23024
|
-
class GetClickhouseClickhouseResult(dict):
|
|
23025
|
-
def __init__(__self__, *,
|
|
23026
|
-
uris: Sequence[str]):
|
|
23027
|
-
"""
|
|
23028
|
-
:param Sequence[str] uris: Clickhouse server URIs.
|
|
23029
|
-
"""
|
|
23030
|
-
pulumi.set(__self__, "uris", uris)
|
|
23031
|
-
|
|
23032
|
-
@property
|
|
23033
|
-
@pulumi.getter
|
|
23034
|
-
def uris(self) -> Sequence[str]:
|
|
23035
|
-
"""
|
|
23036
|
-
Clickhouse server URIs.
|
|
23037
|
-
"""
|
|
23038
|
-
return pulumi.get(self, "uris")
|
|
23039
|
-
|
|
23040
|
-
|
|
23041
22153
|
@pulumi.output_type
|
|
23042
22154
|
class GetClickhouseClickhouseUserConfigResult(dict):
|
|
23043
22155
|
def __init__(__self__, *,
|
|
@@ -23644,57 +22756,6 @@ class GetDragonflyComponentResult(dict):
|
|
|
23644
22756
|
return pulumi.get(self, "usage")
|
|
23645
22757
|
|
|
23646
22758
|
|
|
23647
|
-
@pulumi.output_type
|
|
23648
|
-
class GetDragonflyDragonflyResult(dict):
|
|
23649
|
-
def __init__(__self__, *,
|
|
23650
|
-
password: str,
|
|
23651
|
-
replica_uri: str,
|
|
23652
|
-
slave_uris: Sequence[str],
|
|
23653
|
-
uris: Sequence[str]):
|
|
23654
|
-
"""
|
|
23655
|
-
:param str password: Dragonfly password.
|
|
23656
|
-
:param str replica_uri: Dragonfly replica server URI.
|
|
23657
|
-
:param Sequence[str] slave_uris: Dragonfly slave server URIs.
|
|
23658
|
-
:param Sequence[str] uris: Dragonfly server URIs.
|
|
23659
|
-
"""
|
|
23660
|
-
pulumi.set(__self__, "password", password)
|
|
23661
|
-
pulumi.set(__self__, "replica_uri", replica_uri)
|
|
23662
|
-
pulumi.set(__self__, "slave_uris", slave_uris)
|
|
23663
|
-
pulumi.set(__self__, "uris", uris)
|
|
23664
|
-
|
|
23665
|
-
@property
|
|
23666
|
-
@pulumi.getter
|
|
23667
|
-
def password(self) -> str:
|
|
23668
|
-
"""
|
|
23669
|
-
Dragonfly password.
|
|
23670
|
-
"""
|
|
23671
|
-
return pulumi.get(self, "password")
|
|
23672
|
-
|
|
23673
|
-
@property
|
|
23674
|
-
@pulumi.getter(name="replicaUri")
|
|
23675
|
-
def replica_uri(self) -> str:
|
|
23676
|
-
"""
|
|
23677
|
-
Dragonfly replica server URI.
|
|
23678
|
-
"""
|
|
23679
|
-
return pulumi.get(self, "replica_uri")
|
|
23680
|
-
|
|
23681
|
-
@property
|
|
23682
|
-
@pulumi.getter(name="slaveUris")
|
|
23683
|
-
def slave_uris(self) -> Sequence[str]:
|
|
23684
|
-
"""
|
|
23685
|
-
Dragonfly slave server URIs.
|
|
23686
|
-
"""
|
|
23687
|
-
return pulumi.get(self, "slave_uris")
|
|
23688
|
-
|
|
23689
|
-
@property
|
|
23690
|
-
@pulumi.getter
|
|
23691
|
-
def uris(self) -> Sequence[str]:
|
|
23692
|
-
"""
|
|
23693
|
-
Dragonfly server URIs.
|
|
23694
|
-
"""
|
|
23695
|
-
return pulumi.get(self, "uris")
|
|
23696
|
-
|
|
23697
|
-
|
|
23698
22759
|
@pulumi.output_type
|
|
23699
22760
|
class GetDragonflyDragonflyUserConfigResult(dict):
|
|
23700
22761
|
def __init__(__self__, *,
|
|
@@ -23715,7 +22776,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
|
|
|
23715
22776
|
static_ips: Optional[bool] = None):
|
|
23716
22777
|
"""
|
|
23717
22778
|
:param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
|
|
23718
|
-
:param str dragonfly_persistence:
|
|
22779
|
+
:param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
23719
22780
|
:param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
|
|
23720
22781
|
:param Sequence['GetDragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
|
|
23721
22782
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
@@ -23773,7 +22834,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
|
|
|
23773
22834
|
@pulumi.getter(name="dragonflyPersistence")
|
|
23774
22835
|
def dragonfly_persistence(self) -> Optional[str]:
|
|
23775
22836
|
"""
|
|
23776
|
-
|
|
22837
|
+
When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
23777
22838
|
"""
|
|
23778
22839
|
return pulumi.get(self, "dragonfly_persistence")
|
|
23779
22840
|
|
|
@@ -23931,7 +22992,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
|
|
|
23931
22992
|
:param int port: Port number of the server where to migrate data from.
|
|
23932
22993
|
:param str dbname: Database name for bootstrapping the initial connection.
|
|
23933
22994
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
|
|
23934
|
-
:param str method:
|
|
22995
|
+
:param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
23935
22996
|
:param str password: Password for authentication with the server where to migrate data from.
|
|
23936
22997
|
:param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
|
|
23937
22998
|
:param str username: User name for authentication with the server where to migrate data from.
|
|
@@ -23987,7 +23048,7 @@ class GetDragonflyDragonflyUserConfigMigrationResult(dict):
|
|
|
23987
23048
|
@pulumi.getter
|
|
23988
23049
|
def method(self) -> Optional[str]:
|
|
23989
23050
|
"""
|
|
23990
|
-
|
|
23051
|
+
The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
23991
23052
|
"""
|
|
23992
23053
|
return pulumi.get(self, "method")
|
|
23993
23054
|
|
|
@@ -24372,7 +23433,7 @@ class GetFlinkFlinkUserConfigResult(dict):
|
|
|
24372
23433
|
static_ips: Optional[bool] = None):
|
|
24373
23434
|
"""
|
|
24374
23435
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
24375
|
-
:param str flink_version:
|
|
23436
|
+
:param str flink_version: Flink major version.
|
|
24376
23437
|
:param Sequence['GetFlinkFlinkUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
|
|
24377
23438
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
24378
23439
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
@@ -24415,7 +23476,7 @@ class GetFlinkFlinkUserConfigResult(dict):
|
|
|
24415
23476
|
@pulumi.getter(name="flinkVersion")
|
|
24416
23477
|
def flink_version(self) -> Optional[str]:
|
|
24417
23478
|
"""
|
|
24418
|
-
|
|
23479
|
+
Flink major version.
|
|
24419
23480
|
"""
|
|
24420
23481
|
return pulumi.get(self, "flink_version")
|
|
24421
23482
|
|
|
@@ -24711,24 +23772,6 @@ class GetGrafanaComponentResult(dict):
|
|
|
24711
23772
|
return pulumi.get(self, "usage")
|
|
24712
23773
|
|
|
24713
23774
|
|
|
24714
|
-
@pulumi.output_type
|
|
24715
|
-
class GetGrafanaGrafanaResult(dict):
|
|
24716
|
-
def __init__(__self__, *,
|
|
24717
|
-
uris: Sequence[str]):
|
|
24718
|
-
"""
|
|
24719
|
-
:param Sequence[str] uris: Grafana server URIs.
|
|
24720
|
-
"""
|
|
24721
|
-
pulumi.set(__self__, "uris", uris)
|
|
24722
|
-
|
|
24723
|
-
@property
|
|
24724
|
-
@pulumi.getter
|
|
24725
|
-
def uris(self) -> Sequence[str]:
|
|
24726
|
-
"""
|
|
24727
|
-
Grafana server URIs.
|
|
24728
|
-
"""
|
|
24729
|
-
return pulumi.get(self, "uris")
|
|
24730
|
-
|
|
24731
|
-
|
|
24732
23775
|
@pulumi.output_type
|
|
24733
23776
|
class GetGrafanaGrafanaUserConfigResult(dict):
|
|
24734
23777
|
def __init__(__self__, *,
|
|
@@ -24777,9 +23820,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
24777
23820
|
"""
|
|
24778
23821
|
:param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
|
|
24779
23822
|
:param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
|
|
24780
|
-
:param str alerting_error_or_timeout:
|
|
23823
|
+
:param str alerting_error_or_timeout: Default error or timeout setting for new alerting rules.
|
|
24781
23824
|
:param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.
|
|
24782
|
-
:param str alerting_nodata_or_nullvalues:
|
|
23825
|
+
:param str alerting_nodata_or_nullvalues: Default value for 'no data or null values' for new alerting rules.
|
|
24783
23826
|
:param bool allow_embedding: Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.
|
|
24784
23827
|
:param 'GetGrafanaGrafanaUserConfigAuthAzureadArgs' auth_azuread: Azure AD OAuth integration
|
|
24785
23828
|
:param bool auth_basic_enabled: Enable or disable basic authentication form, used by Grafana built-in login.
|
|
@@ -24787,7 +23830,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
24787
23830
|
:param 'GetGrafanaGrafanaUserConfigAuthGithubArgs' auth_github: Github Auth integration
|
|
24788
23831
|
:param 'GetGrafanaGrafanaUserConfigAuthGitlabArgs' auth_gitlab: GitLab Auth integration
|
|
24789
23832
|
:param 'GetGrafanaGrafanaUserConfigAuthGoogleArgs' auth_google: Google Auth integration
|
|
24790
|
-
:param str cookie_samesite:
|
|
23833
|
+
:param str cookie_samesite: Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
|
|
24791
23834
|
:param str custom_domain: Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.
|
|
24792
23835
|
:param bool dashboard_previews_enabled: This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.
|
|
24793
23836
|
:param str dashboards_min_refresh_interval: Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h.
|
|
@@ -24815,7 +23858,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
24815
23858
|
:param bool static_ips: Use static public IP addresses.
|
|
24816
23859
|
:param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
|
|
24817
23860
|
:param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
|
|
24818
|
-
:param str user_auto_assign_org_role:
|
|
23861
|
+
:param str user_auto_assign_org_role: Set role for new signups. Defaults to Viewer.
|
|
24819
23862
|
:param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
|
|
24820
23863
|
"""
|
|
24821
23864
|
if additional_backup_regions is not None:
|
|
@@ -24923,7 +23966,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
24923
23966
|
@pulumi.getter(name="alertingErrorOrTimeout")
|
|
24924
23967
|
def alerting_error_or_timeout(self) -> Optional[str]:
|
|
24925
23968
|
"""
|
|
24926
|
-
|
|
23969
|
+
Default error or timeout setting for new alerting rules.
|
|
24927
23970
|
"""
|
|
24928
23971
|
return pulumi.get(self, "alerting_error_or_timeout")
|
|
24929
23972
|
|
|
@@ -24939,7 +23982,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
24939
23982
|
@pulumi.getter(name="alertingNodataOrNullvalues")
|
|
24940
23983
|
def alerting_nodata_or_nullvalues(self) -> Optional[str]:
|
|
24941
23984
|
"""
|
|
24942
|
-
|
|
23985
|
+
Default value for 'no data or null values' for new alerting rules.
|
|
24943
23986
|
"""
|
|
24944
23987
|
return pulumi.get(self, "alerting_nodata_or_nullvalues")
|
|
24945
23988
|
|
|
@@ -25003,7 +24046,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
25003
24046
|
@pulumi.getter(name="cookieSamesite")
|
|
25004
24047
|
def cookie_samesite(self) -> Optional[str]:
|
|
25005
24048
|
"""
|
|
25006
|
-
|
|
24049
|
+
Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.
|
|
25007
24050
|
"""
|
|
25008
24051
|
return pulumi.get(self, "cookie_samesite")
|
|
25009
24052
|
|
|
@@ -25230,7 +24273,7 @@ class GetGrafanaGrafanaUserConfigResult(dict):
|
|
|
25230
24273
|
@pulumi.getter(name="userAutoAssignOrgRole")
|
|
25231
24274
|
def user_auto_assign_org_role(self) -> Optional[str]:
|
|
25232
24275
|
"""
|
|
25233
|
-
|
|
24276
|
+
Set role for new signups. Defaults to Viewer.
|
|
25234
24277
|
"""
|
|
25235
24278
|
return pulumi.get(self, "user_auto_assign_org_role")
|
|
25236
24279
|
|
|
@@ -25806,7 +24849,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
|
|
|
25806
24849
|
"""
|
|
25807
24850
|
:param str access_key: S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.
|
|
25808
24851
|
:param str bucket_url: Bucket URL for S3.
|
|
25809
|
-
:param str provider:
|
|
24852
|
+
:param str provider: Provider type.
|
|
25810
24853
|
:param str secret_key: S3 secret key.
|
|
25811
24854
|
"""
|
|
25812
24855
|
pulumi.set(__self__, "access_key", access_key)
|
|
@@ -25834,7 +24877,7 @@ class GetGrafanaGrafanaUserConfigExternalImageStorageResult(dict):
|
|
|
25834
24877
|
@pulumi.getter
|
|
25835
24878
|
def provider(self) -> str:
|
|
25836
24879
|
"""
|
|
25837
|
-
|
|
24880
|
+
Provider type.
|
|
25838
24881
|
"""
|
|
25839
24882
|
return pulumi.get(self, "provider")
|
|
25840
24883
|
|
|
@@ -25952,7 +24995,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
|
|
|
25952
24995
|
:param str from_name: Name used in outgoing emails, defaults to Grafana.
|
|
25953
24996
|
:param str password: Password for SMTP authentication.
|
|
25954
24997
|
:param bool skip_verify: Skip verifying server certificate. Defaults to false.
|
|
25955
|
-
:param str starttls_policy:
|
|
24998
|
+
:param str starttls_policy: Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
|
|
25956
24999
|
:param str username: Username for SMTP authentication.
|
|
25957
25000
|
"""
|
|
25958
25001
|
pulumi.set(__self__, "from_address", from_address)
|
|
@@ -26021,7 +25064,7 @@ class GetGrafanaGrafanaUserConfigSmtpServerResult(dict):
|
|
|
26021
25064
|
@pulumi.getter(name="starttlsPolicy")
|
|
26022
25065
|
def starttls_policy(self) -> Optional[str]:
|
|
26023
25066
|
"""
|
|
26024
|
-
|
|
25067
|
+
Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.
|
|
26025
25068
|
"""
|
|
26026
25069
|
return pulumi.get(self, "starttls_policy")
|
|
26027
25070
|
|
|
@@ -26208,20 +25251,11 @@ class GetInfluxDbComponentResult(dict):
|
|
|
26208
25251
|
@pulumi.output_type
|
|
26209
25252
|
class GetInfluxDbInfluxdbResult(dict):
|
|
26210
25253
|
def __init__(__self__, *,
|
|
26211
|
-
database_name: str
|
|
26212
|
-
password: str,
|
|
26213
|
-
uris: Sequence[str],
|
|
26214
|
-
username: str):
|
|
25254
|
+
database_name: str):
|
|
26215
25255
|
"""
|
|
26216
25256
|
:param str database_name: Name of the default InfluxDB database
|
|
26217
|
-
:param str password: InfluxDB password
|
|
26218
|
-
:param Sequence[str] uris: InfluxDB server URIs.
|
|
26219
|
-
:param str username: InfluxDB username
|
|
26220
25257
|
"""
|
|
26221
25258
|
pulumi.set(__self__, "database_name", database_name)
|
|
26222
|
-
pulumi.set(__self__, "password", password)
|
|
26223
|
-
pulumi.set(__self__, "uris", uris)
|
|
26224
|
-
pulumi.set(__self__, "username", username)
|
|
26225
25259
|
|
|
26226
25260
|
@property
|
|
26227
25261
|
@pulumi.getter(name="databaseName")
|
|
@@ -26231,30 +25265,6 @@ class GetInfluxDbInfluxdbResult(dict):
|
|
|
26231
25265
|
"""
|
|
26232
25266
|
return pulumi.get(self, "database_name")
|
|
26233
25267
|
|
|
26234
|
-
@property
|
|
26235
|
-
@pulumi.getter
|
|
26236
|
-
def password(self) -> str:
|
|
26237
|
-
"""
|
|
26238
|
-
InfluxDB password
|
|
26239
|
-
"""
|
|
26240
|
-
return pulumi.get(self, "password")
|
|
26241
|
-
|
|
26242
|
-
@property
|
|
26243
|
-
@pulumi.getter
|
|
26244
|
-
def uris(self) -> Sequence[str]:
|
|
26245
|
-
"""
|
|
26246
|
-
InfluxDB server URIs.
|
|
26247
|
-
"""
|
|
26248
|
-
return pulumi.get(self, "uris")
|
|
26249
|
-
|
|
26250
|
-
@property
|
|
26251
|
-
@pulumi.getter
|
|
26252
|
-
def username(self) -> str:
|
|
26253
|
-
"""
|
|
26254
|
-
InfluxDB username
|
|
26255
|
-
"""
|
|
26256
|
-
return pulumi.get(self, "username")
|
|
26257
|
-
|
|
26258
25268
|
|
|
26259
25269
|
@pulumi.output_type
|
|
26260
25270
|
class GetInfluxDbInfluxdbUserConfigResult(dict):
|
|
@@ -27064,10 +26074,10 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
27064
26074
|
scheduled_rebalance_max_delay_ms: Optional[int] = None,
|
|
27065
26075
|
session_timeout_ms: Optional[int] = None):
|
|
27066
26076
|
"""
|
|
27067
|
-
:param str connector_client_config_override_policy:
|
|
27068
|
-
:param str consumer_auto_offset_reset:
|
|
26077
|
+
:param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
|
|
26078
|
+
:param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
27069
26079
|
:param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
27070
|
-
:param str consumer_isolation_level:
|
|
26080
|
+
:param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
27071
26081
|
:param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
27072
26082
|
:param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
27073
26083
|
:param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
@@ -27075,7 +26085,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
27075
26085
|
:param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
|
|
27076
26086
|
:param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
|
|
27077
26087
|
:param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
27078
|
-
:param str producer_compression_type:
|
|
26088
|
+
:param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
27079
26089
|
:param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
|
|
27080
26090
|
:param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
27081
26091
|
:param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
@@ -27118,7 +26128,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
27118
26128
|
@pulumi.getter(name="connectorClientConfigOverridePolicy")
|
|
27119
26129
|
def connector_client_config_override_policy(self) -> Optional[str]:
|
|
27120
26130
|
"""
|
|
27121
|
-
|
|
26131
|
+
Defines what client configurations can be overridden by the connector. Default is None.
|
|
27122
26132
|
"""
|
|
27123
26133
|
return pulumi.get(self, "connector_client_config_override_policy")
|
|
27124
26134
|
|
|
@@ -27126,7 +26136,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
27126
26136
|
@pulumi.getter(name="consumerAutoOffsetReset")
|
|
27127
26137
|
def consumer_auto_offset_reset(self) -> Optional[str]:
|
|
27128
26138
|
"""
|
|
27129
|
-
|
|
26139
|
+
What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
27130
26140
|
"""
|
|
27131
26141
|
return pulumi.get(self, "consumer_auto_offset_reset")
|
|
27132
26142
|
|
|
@@ -27142,7 +26152,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
27142
26152
|
@pulumi.getter(name="consumerIsolationLevel")
|
|
27143
26153
|
def consumer_isolation_level(self) -> Optional[str]:
|
|
27144
26154
|
"""
|
|
27145
|
-
|
|
26155
|
+
Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
27146
26156
|
"""
|
|
27147
26157
|
return pulumi.get(self, "consumer_isolation_level")
|
|
27148
26158
|
|
|
@@ -27206,7 +26216,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
|
|
|
27206
26216
|
@pulumi.getter(name="producerCompressionType")
|
|
27207
26217
|
def producer_compression_type(self) -> Optional[str]:
|
|
27208
26218
|
"""
|
|
27209
|
-
|
|
26219
|
+
Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
27210
26220
|
"""
|
|
27211
26221
|
return pulumi.get(self, "producer_compression_type")
|
|
27212
26222
|
|
|
@@ -27460,22 +26470,19 @@ class GetKafkaKafkaResult(dict):
|
|
|
27460
26470
|
access_key: str,
|
|
27461
26471
|
connect_uri: str,
|
|
27462
26472
|
rest_uri: str,
|
|
27463
|
-
schema_registry_uri: str
|
|
27464
|
-
uris: Sequence[str]):
|
|
26473
|
+
schema_registry_uri: str):
|
|
27465
26474
|
"""
|
|
27466
26475
|
:param str access_cert: The Kafka client certificate.
|
|
27467
26476
|
:param str access_key: The Kafka client certificate key.
|
|
27468
26477
|
:param str connect_uri: The Kafka Connect URI.
|
|
27469
26478
|
:param str rest_uri: The Kafka REST URI.
|
|
27470
26479
|
:param str schema_registry_uri: The Schema Registry URI.
|
|
27471
|
-
:param Sequence[str] uris: Kafka server URIs.
|
|
27472
26480
|
"""
|
|
27473
26481
|
pulumi.set(__self__, "access_cert", access_cert)
|
|
27474
26482
|
pulumi.set(__self__, "access_key", access_key)
|
|
27475
26483
|
pulumi.set(__self__, "connect_uri", connect_uri)
|
|
27476
26484
|
pulumi.set(__self__, "rest_uri", rest_uri)
|
|
27477
26485
|
pulumi.set(__self__, "schema_registry_uri", schema_registry_uri)
|
|
27478
|
-
pulumi.set(__self__, "uris", uris)
|
|
27479
26486
|
|
|
27480
26487
|
@property
|
|
27481
26488
|
@pulumi.getter(name="accessCert")
|
|
@@ -27517,14 +26524,6 @@ class GetKafkaKafkaResult(dict):
|
|
|
27517
26524
|
"""
|
|
27518
26525
|
return pulumi.get(self, "schema_registry_uri")
|
|
27519
26526
|
|
|
27520
|
-
@property
|
|
27521
|
-
@pulumi.getter
|
|
27522
|
-
def uris(self) -> Sequence[str]:
|
|
27523
|
-
"""
|
|
27524
|
-
Kafka server URIs.
|
|
27525
|
-
"""
|
|
27526
|
-
return pulumi.get(self, "uris")
|
|
27527
|
-
|
|
27528
26527
|
|
|
27529
26528
|
@pulumi.output_type
|
|
27530
26529
|
class GetKafkaKafkaUserConfigResult(dict):
|
|
@@ -27565,7 +26564,7 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
27565
26564
|
:param bool kafka_rest: Enable Kafka-REST service. The default value is `false`.
|
|
27566
26565
|
:param bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
|
|
27567
26566
|
:param 'GetKafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
|
|
27568
|
-
:param str kafka_version:
|
|
26567
|
+
:param str kafka_version: Kafka major version.
|
|
27569
26568
|
:param 'GetKafkaKafkaUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
27570
26569
|
:param 'GetKafkaKafkaUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
27571
26570
|
:param 'GetKafkaKafkaUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
|
|
@@ -27734,7 +26733,7 @@ class GetKafkaKafkaUserConfigResult(dict):
|
|
|
27734
26733
|
@pulumi.getter(name="kafkaVersion")
|
|
27735
26734
|
def kafka_version(self) -> Optional[str]:
|
|
27736
26735
|
"""
|
|
27737
|
-
|
|
26736
|
+
Kafka major version.
|
|
27738
26737
|
"""
|
|
27739
26738
|
return pulumi.get(self, "kafka_version")
|
|
27740
26739
|
|
|
@@ -27884,7 +26883,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
27884
26883
|
transaction_state_log_segment_bytes: Optional[int] = None):
|
|
27885
26884
|
"""
|
|
27886
26885
|
:param bool auto_create_topics_enable: Enable auto creation of topics.
|
|
27887
|
-
:param str compression_type:
|
|
26886
|
+
:param str compression_type: Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
|
|
27888
26887
|
:param int connections_max_idle_ms: Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.
|
|
27889
26888
|
:param int default_replication_factor: Replication factor for autocreated topics.
|
|
27890
26889
|
:param int group_initial_rebalance_delay_ms: The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.
|
|
@@ -27894,7 +26893,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
27894
26893
|
:param int log_cleaner_max_compaction_lag_ms: The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
|
|
27895
26894
|
:param float log_cleaner_min_cleanable_ratio: Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.
|
|
27896
26895
|
:param int log_cleaner_min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
|
|
27897
|
-
:param str log_cleanup_policy:
|
|
26896
|
+
:param str log_cleanup_policy: The default cleanup policy for segments beyond the retention window.
|
|
27898
26897
|
:param int log_flush_interval_messages: The number of messages accumulated on a log partition before messages are flushed to disk.
|
|
27899
26898
|
:param int log_flush_interval_ms: The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
|
|
27900
26899
|
:param int log_index_interval_bytes: The interval with which Kafka adds an entry to the offset index.
|
|
@@ -27903,7 +26902,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
27903
26902
|
:param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
|
|
27904
26903
|
:param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
|
|
27905
26904
|
:param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
|
|
27906
|
-
:param str log_message_timestamp_type:
|
|
26905
|
+
:param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
|
|
27907
26906
|
:param bool log_preallocate: Should pre allocate file when create new segment?
|
|
27908
26907
|
:param int log_retention_bytes: The maximum size of the log before deleting messages.
|
|
27909
26908
|
:param int log_retention_hours: The number of hours to keep a log file before deleting it.
|
|
@@ -28035,7 +27034,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
28035
27034
|
@pulumi.getter(name="compressionType")
|
|
28036
27035
|
def compression_type(self) -> Optional[str]:
|
|
28037
27036
|
"""
|
|
28038
|
-
|
|
27037
|
+
Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.
|
|
28039
27038
|
"""
|
|
28040
27039
|
return pulumi.get(self, "compression_type")
|
|
28041
27040
|
|
|
@@ -28115,7 +27114,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
28115
27114
|
@pulumi.getter(name="logCleanupPolicy")
|
|
28116
27115
|
def log_cleanup_policy(self) -> Optional[str]:
|
|
28117
27116
|
"""
|
|
28118
|
-
|
|
27117
|
+
The default cleanup policy for segments beyond the retention window.
|
|
28119
27118
|
"""
|
|
28120
27119
|
return pulumi.get(self, "log_cleanup_policy")
|
|
28121
27120
|
|
|
@@ -28187,7 +27186,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
|
|
|
28187
27186
|
@pulumi.getter(name="logMessageTimestampType")
|
|
28188
27187
|
def log_message_timestamp_type(self) -> Optional[str]:
|
|
28189
27188
|
"""
|
|
28190
|
-
|
|
27189
|
+
Define whether the timestamp in the message is message create time or log append time.
|
|
28191
27190
|
"""
|
|
28192
27191
|
return pulumi.get(self, "log_message_timestamp_type")
|
|
28193
27192
|
|
|
@@ -28443,10 +27442,10 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
28443
27442
|
scheduled_rebalance_max_delay_ms: Optional[int] = None,
|
|
28444
27443
|
session_timeout_ms: Optional[int] = None):
|
|
28445
27444
|
"""
|
|
28446
|
-
:param str connector_client_config_override_policy:
|
|
28447
|
-
:param str consumer_auto_offset_reset:
|
|
27445
|
+
:param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
|
|
27446
|
+
:param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
28448
27447
|
:param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
|
|
28449
|
-
:param str consumer_isolation_level:
|
|
27448
|
+
:param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
28450
27449
|
:param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
|
|
28451
27450
|
:param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
|
|
28452
27451
|
:param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
|
|
@@ -28454,7 +27453,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
28454
27453
|
:param int offset_flush_timeout_ms: Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
|
|
28455
27454
|
:param int producer_batch_size: This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
|
|
28456
27455
|
:param int producer_buffer_memory: The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
|
|
28457
|
-
:param str producer_compression_type:
|
|
27456
|
+
:param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
28458
27457
|
:param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
|
|
28459
27458
|
:param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
|
|
28460
27459
|
:param int scheduled_rebalance_max_delay_ms: The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
|
|
@@ -28497,7 +27496,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
28497
27496
|
@pulumi.getter(name="connectorClientConfigOverridePolicy")
|
|
28498
27497
|
def connector_client_config_override_policy(self) -> Optional[str]:
|
|
28499
27498
|
"""
|
|
28500
|
-
|
|
27499
|
+
Defines what client configurations can be overridden by the connector. Default is None.
|
|
28501
27500
|
"""
|
|
28502
27501
|
return pulumi.get(self, "connector_client_config_override_policy")
|
|
28503
27502
|
|
|
@@ -28505,7 +27504,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
28505
27504
|
@pulumi.getter(name="consumerAutoOffsetReset")
|
|
28506
27505
|
def consumer_auto_offset_reset(self) -> Optional[str]:
|
|
28507
27506
|
"""
|
|
28508
|
-
|
|
27507
|
+
What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
|
|
28509
27508
|
"""
|
|
28510
27509
|
return pulumi.get(self, "consumer_auto_offset_reset")
|
|
28511
27510
|
|
|
@@ -28521,7 +27520,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
28521
27520
|
@pulumi.getter(name="consumerIsolationLevel")
|
|
28522
27521
|
def consumer_isolation_level(self) -> Optional[str]:
|
|
28523
27522
|
"""
|
|
28524
|
-
|
|
27523
|
+
Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
|
|
28525
27524
|
"""
|
|
28526
27525
|
return pulumi.get(self, "consumer_isolation_level")
|
|
28527
27526
|
|
|
@@ -28585,7 +27584,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
|
|
|
28585
27584
|
@pulumi.getter(name="producerCompressionType")
|
|
28586
27585
|
def producer_compression_type(self) -> Optional[str]:
|
|
28587
27586
|
"""
|
|
28588
|
-
|
|
27587
|
+
Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
28589
27588
|
"""
|
|
28590
27589
|
return pulumi.get(self, "producer_compression_type")
|
|
28591
27590
|
|
|
@@ -28638,11 +27637,11 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
28638
27637
|
"""
|
|
28639
27638
|
:param bool consumer_enable_auto_commit: If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.
|
|
28640
27639
|
:param int consumer_request_max_bytes: Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.
|
|
28641
|
-
:param int consumer_request_timeout_ms:
|
|
28642
|
-
:param str name_strategy:
|
|
27640
|
+
:param int consumer_request_timeout_ms: The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
|
|
27641
|
+
:param str name_strategy: Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
|
|
28643
27642
|
:param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`.
|
|
28644
|
-
:param str producer_acks:
|
|
28645
|
-
:param str producer_compression_type:
|
|
27643
|
+
:param str producer_acks: The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
|
|
27644
|
+
:param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
28646
27645
|
:param int producer_linger_ms: Wait for up to the given delay to allow batching records together. The default value is `0`.
|
|
28647
27646
|
:param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.
|
|
28648
27647
|
:param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.
|
|
@@ -28688,7 +27687,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
28688
27687
|
@pulumi.getter(name="consumerRequestTimeoutMs")
|
|
28689
27688
|
def consumer_request_timeout_ms(self) -> Optional[int]:
|
|
28690
27689
|
"""
|
|
28691
|
-
|
|
27690
|
+
The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.
|
|
28692
27691
|
"""
|
|
28693
27692
|
return pulumi.get(self, "consumer_request_timeout_ms")
|
|
28694
27693
|
|
|
@@ -28696,7 +27695,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
28696
27695
|
@pulumi.getter(name="nameStrategy")
|
|
28697
27696
|
def name_strategy(self) -> Optional[str]:
|
|
28698
27697
|
"""
|
|
28699
|
-
|
|
27698
|
+
Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.
|
|
28700
27699
|
"""
|
|
28701
27700
|
return pulumi.get(self, "name_strategy")
|
|
28702
27701
|
|
|
@@ -28712,7 +27711,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
28712
27711
|
@pulumi.getter(name="producerAcks")
|
|
28713
27712
|
def producer_acks(self) -> Optional[str]:
|
|
28714
27713
|
"""
|
|
28715
|
-
|
|
27714
|
+
The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
|
|
28716
27715
|
"""
|
|
28717
27716
|
return pulumi.get(self, "producer_acks")
|
|
28718
27717
|
|
|
@@ -28720,7 +27719,7 @@ class GetKafkaKafkaUserConfigKafkaRestConfigResult(dict):
|
|
|
28720
27719
|
@pulumi.getter(name="producerCompressionType")
|
|
28721
27720
|
def producer_compression_type(self) -> Optional[str]:
|
|
28722
27721
|
"""
|
|
28723
|
-
|
|
27722
|
+
Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
28724
27723
|
"""
|
|
28725
27724
|
return pulumi.get(self, "producer_compression_type")
|
|
28726
27725
|
|
|
@@ -30045,35 +29044,6 @@ class GetM3AggregatorComponentResult(dict):
|
|
|
30045
29044
|
return pulumi.get(self, "usage")
|
|
30046
29045
|
|
|
30047
29046
|
|
|
30048
|
-
@pulumi.output_type
|
|
30049
|
-
class GetM3AggregatorM3aggregatorResult(dict):
|
|
30050
|
-
def __init__(__self__, *,
|
|
30051
|
-
aggregator_http_uri: str,
|
|
30052
|
-
uris: Sequence[str]):
|
|
30053
|
-
"""
|
|
30054
|
-
:param str aggregator_http_uri: M3 Aggregator HTTP URI.
|
|
30055
|
-
:param Sequence[str] uris: M3 Aggregator server URIs.
|
|
30056
|
-
"""
|
|
30057
|
-
pulumi.set(__self__, "aggregator_http_uri", aggregator_http_uri)
|
|
30058
|
-
pulumi.set(__self__, "uris", uris)
|
|
30059
|
-
|
|
30060
|
-
@property
|
|
30061
|
-
@pulumi.getter(name="aggregatorHttpUri")
|
|
30062
|
-
def aggregator_http_uri(self) -> str:
|
|
30063
|
-
"""
|
|
30064
|
-
M3 Aggregator HTTP URI.
|
|
30065
|
-
"""
|
|
30066
|
-
return pulumi.get(self, "aggregator_http_uri")
|
|
30067
|
-
|
|
30068
|
-
@property
|
|
30069
|
-
@pulumi.getter
|
|
30070
|
-
def uris(self) -> Sequence[str]:
|
|
30071
|
-
"""
|
|
30072
|
-
M3 Aggregator server URIs.
|
|
30073
|
-
"""
|
|
30074
|
-
return pulumi.get(self, "uris")
|
|
30075
|
-
|
|
30076
|
-
|
|
30077
29047
|
@pulumi.output_type
|
|
30078
29048
|
class GetM3AggregatorM3aggregatorUserConfigResult(dict):
|
|
30079
29049
|
def __init__(__self__, *,
|
|
@@ -30090,8 +29060,8 @@ class GetM3AggregatorM3aggregatorUserConfigResult(dict):
|
|
|
30090
29060
|
:param Sequence['GetM3AggregatorM3aggregatorUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
|
|
30091
29061
|
:param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
30092
29062
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
30093
|
-
:param str m3_version:
|
|
30094
|
-
:param str m3aggregator_version:
|
|
29063
|
+
:param str m3_version: M3 major version (deprecated, use m3aggregator_version).
|
|
29064
|
+
:param str m3aggregator_version: M3 major version (the minimum compatible version).
|
|
30095
29065
|
:param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
|
|
30096
29066
|
:param bool static_ips: Use static public IP addresses.
|
|
30097
29067
|
"""
|
|
@@ -30151,7 +29121,7 @@ class GetM3AggregatorM3aggregatorUserConfigResult(dict):
|
|
|
30151
29121
|
@pulumi.getter(name="m3Version")
|
|
30152
29122
|
def m3_version(self) -> Optional[str]:
|
|
30153
29123
|
"""
|
|
30154
|
-
|
|
29124
|
+
M3 major version (deprecated, use m3aggregator_version).
|
|
30155
29125
|
"""
|
|
30156
29126
|
return pulumi.get(self, "m3_version")
|
|
30157
29127
|
|
|
@@ -30159,7 +29129,7 @@ class GetM3AggregatorM3aggregatorUserConfigResult(dict):
|
|
|
30159
29129
|
@pulumi.getter(name="m3aggregatorVersion")
|
|
30160
29130
|
def m3aggregator_version(self) -> Optional[str]:
|
|
30161
29131
|
"""
|
|
30162
|
-
|
|
29132
|
+
M3 major version (the minimum compatible version).
|
|
30163
29133
|
"""
|
|
30164
29134
|
return pulumi.get(self, "m3aggregator_version")
|
|
30165
29135
|
|
|
@@ -30381,79 +29351,6 @@ class GetM3DbComponentResult(dict):
|
|
|
30381
29351
|
return pulumi.get(self, "usage")
|
|
30382
29352
|
|
|
30383
29353
|
|
|
30384
|
-
@pulumi.output_type
|
|
30385
|
-
class GetM3DbM3dbResult(dict):
|
|
30386
|
-
def __init__(__self__, *,
|
|
30387
|
-
http_cluster_uri: str,
|
|
30388
|
-
http_node_uri: str,
|
|
30389
|
-
influxdb_uri: str,
|
|
30390
|
-
prometheus_remote_read_uri: str,
|
|
30391
|
-
prometheus_remote_write_uri: str,
|
|
30392
|
-
uris: Sequence[str]):
|
|
30393
|
-
"""
|
|
30394
|
-
:param str http_cluster_uri: M3DB cluster URI.
|
|
30395
|
-
:param str http_node_uri: M3DB node URI.
|
|
30396
|
-
:param str influxdb_uri: InfluxDB URI.
|
|
30397
|
-
:param str prometheus_remote_read_uri: Prometheus remote read URI.
|
|
30398
|
-
:param str prometheus_remote_write_uri: Prometheus remote write URI.
|
|
30399
|
-
:param Sequence[str] uris: M3DB server URIs.
|
|
30400
|
-
"""
|
|
30401
|
-
pulumi.set(__self__, "http_cluster_uri", http_cluster_uri)
|
|
30402
|
-
pulumi.set(__self__, "http_node_uri", http_node_uri)
|
|
30403
|
-
pulumi.set(__self__, "influxdb_uri", influxdb_uri)
|
|
30404
|
-
pulumi.set(__self__, "prometheus_remote_read_uri", prometheus_remote_read_uri)
|
|
30405
|
-
pulumi.set(__self__, "prometheus_remote_write_uri", prometheus_remote_write_uri)
|
|
30406
|
-
pulumi.set(__self__, "uris", uris)
|
|
30407
|
-
|
|
30408
|
-
@property
|
|
30409
|
-
@pulumi.getter(name="httpClusterUri")
|
|
30410
|
-
def http_cluster_uri(self) -> str:
|
|
30411
|
-
"""
|
|
30412
|
-
M3DB cluster URI.
|
|
30413
|
-
"""
|
|
30414
|
-
return pulumi.get(self, "http_cluster_uri")
|
|
30415
|
-
|
|
30416
|
-
@property
|
|
30417
|
-
@pulumi.getter(name="httpNodeUri")
|
|
30418
|
-
def http_node_uri(self) -> str:
|
|
30419
|
-
"""
|
|
30420
|
-
M3DB node URI.
|
|
30421
|
-
"""
|
|
30422
|
-
return pulumi.get(self, "http_node_uri")
|
|
30423
|
-
|
|
30424
|
-
@property
|
|
30425
|
-
@pulumi.getter(name="influxdbUri")
|
|
30426
|
-
def influxdb_uri(self) -> str:
|
|
30427
|
-
"""
|
|
30428
|
-
InfluxDB URI.
|
|
30429
|
-
"""
|
|
30430
|
-
return pulumi.get(self, "influxdb_uri")
|
|
30431
|
-
|
|
30432
|
-
@property
|
|
30433
|
-
@pulumi.getter(name="prometheusRemoteReadUri")
|
|
30434
|
-
def prometheus_remote_read_uri(self) -> str:
|
|
30435
|
-
"""
|
|
30436
|
-
Prometheus remote read URI.
|
|
30437
|
-
"""
|
|
30438
|
-
return pulumi.get(self, "prometheus_remote_read_uri")
|
|
30439
|
-
|
|
30440
|
-
@property
|
|
30441
|
-
@pulumi.getter(name="prometheusRemoteWriteUri")
|
|
30442
|
-
def prometheus_remote_write_uri(self) -> str:
|
|
30443
|
-
"""
|
|
30444
|
-
Prometheus remote write URI.
|
|
30445
|
-
"""
|
|
30446
|
-
return pulumi.get(self, "prometheus_remote_write_uri")
|
|
30447
|
-
|
|
30448
|
-
@property
|
|
30449
|
-
@pulumi.getter
|
|
30450
|
-
def uris(self) -> Sequence[str]:
|
|
30451
|
-
"""
|
|
30452
|
-
M3DB server URIs.
|
|
30453
|
-
"""
|
|
30454
|
-
return pulumi.get(self, "uris")
|
|
30455
|
-
|
|
30456
|
-
|
|
30457
29354
|
@pulumi.output_type
|
|
30458
29355
|
class GetM3DbM3dbUserConfigResult(dict):
|
|
30459
29356
|
def __init__(__self__, *,
|
|
@@ -30483,9 +29380,9 @@ class GetM3DbM3dbUserConfigResult(dict):
|
|
|
30483
29380
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
30484
29381
|
:param 'GetM3DbM3dbUserConfigLimitsArgs' limits: M3 limits
|
|
30485
29382
|
:param 'GetM3DbM3dbUserConfigM3Args' m3: M3 specific configuration options
|
|
30486
|
-
:param str m3_version:
|
|
29383
|
+
:param str m3_version: M3 major version (deprecated, use m3db_version).
|
|
30487
29384
|
:param bool m3coordinator_enable_graphite_carbon_ingest: Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only.
|
|
30488
|
-
:param str m3db_version:
|
|
29385
|
+
:param str m3db_version: M3 major version (the minimum compatible version).
|
|
30489
29386
|
:param Sequence['GetM3DbM3dbUserConfigNamespaceArgs'] namespaces: List of M3 namespaces
|
|
30490
29387
|
:param 'GetM3DbM3dbUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
30491
29388
|
:param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
|
|
@@ -30595,7 +29492,7 @@ class GetM3DbM3dbUserConfigResult(dict):
|
|
|
30595
29492
|
@pulumi.getter(name="m3Version")
|
|
30596
29493
|
def m3_version(self) -> Optional[str]:
|
|
30597
29494
|
"""
|
|
30598
|
-
|
|
29495
|
+
M3 major version (deprecated, use m3db_version).
|
|
30599
29496
|
"""
|
|
30600
29497
|
return pulumi.get(self, "m3_version")
|
|
30601
29498
|
|
|
@@ -30611,7 +29508,7 @@ class GetM3DbM3dbUserConfigResult(dict):
|
|
|
30611
29508
|
@pulumi.getter(name="m3dbVersion")
|
|
30612
29509
|
def m3db_version(self) -> Optional[str]:
|
|
30613
29510
|
"""
|
|
30614
|
-
|
|
29511
|
+
M3 major version (the minimum compatible version).
|
|
30615
29512
|
"""
|
|
30616
29513
|
return pulumi.get(self, "m3db_version")
|
|
30617
29514
|
|
|
@@ -30848,7 +29745,7 @@ class GetM3DbM3dbUserConfigNamespaceResult(dict):
|
|
|
30848
29745
|
resolution: Optional[str] = None):
|
|
30849
29746
|
"""
|
|
30850
29747
|
:param str name: The name of the namespace.
|
|
30851
|
-
:param str type:
|
|
29748
|
+
:param str type: The type of aggregation (aggregated/unaggregated).
|
|
30852
29749
|
:param 'GetM3DbM3dbUserConfigNamespaceOptionsArgs' options: Namespace options
|
|
30853
29750
|
:param str resolution: The resolution for an aggregated namespace.
|
|
30854
29751
|
"""
|
|
@@ -30871,7 +29768,7 @@ class GetM3DbM3dbUserConfigNamespaceResult(dict):
|
|
|
30871
29768
|
@pulumi.getter
|
|
30872
29769
|
def type(self) -> str:
|
|
30873
29770
|
"""
|
|
30874
|
-
|
|
29771
|
+
The type of aggregation (aggregated/unaggregated).
|
|
30875
29772
|
"""
|
|
30876
29773
|
return pulumi.get(self, "type")
|
|
30877
29774
|
|
|
@@ -31393,141 +30290,6 @@ class GetMySqlComponentResult(dict):
|
|
|
31393
30290
|
return pulumi.get(self, "usage")
|
|
31394
30291
|
|
|
31395
30292
|
|
|
31396
|
-
@pulumi.output_type
|
|
31397
|
-
class GetMySqlMysqlResult(dict):
|
|
31398
|
-
def __init__(__self__, *,
|
|
31399
|
-
params: Sequence['outputs.GetMySqlMysqlParamResult'],
|
|
31400
|
-
replica_uri: str,
|
|
31401
|
-
standby_uris: Sequence[str],
|
|
31402
|
-
syncing_uris: Sequence[str],
|
|
31403
|
-
uris: Sequence[str]):
|
|
31404
|
-
"""
|
|
31405
|
-
:param Sequence['GetMySqlMysqlParamArgs'] params: MySQL connection parameters
|
|
31406
|
-
:param str replica_uri: MySQL replica URI for services with a replica
|
|
31407
|
-
:param Sequence[str] standby_uris: MySQL standby connection URIs
|
|
31408
|
-
:param Sequence[str] syncing_uris: MySQL syncing connection URIs
|
|
31409
|
-
:param Sequence[str] uris: MySQL master connection URIs
|
|
31410
|
-
"""
|
|
31411
|
-
pulumi.set(__self__, "params", params)
|
|
31412
|
-
pulumi.set(__self__, "replica_uri", replica_uri)
|
|
31413
|
-
pulumi.set(__self__, "standby_uris", standby_uris)
|
|
31414
|
-
pulumi.set(__self__, "syncing_uris", syncing_uris)
|
|
31415
|
-
pulumi.set(__self__, "uris", uris)
|
|
31416
|
-
|
|
31417
|
-
@property
|
|
31418
|
-
@pulumi.getter
|
|
31419
|
-
def params(self) -> Sequence['outputs.GetMySqlMysqlParamResult']:
|
|
31420
|
-
"""
|
|
31421
|
-
MySQL connection parameters
|
|
31422
|
-
"""
|
|
31423
|
-
return pulumi.get(self, "params")
|
|
31424
|
-
|
|
31425
|
-
@property
|
|
31426
|
-
@pulumi.getter(name="replicaUri")
|
|
31427
|
-
def replica_uri(self) -> str:
|
|
31428
|
-
"""
|
|
31429
|
-
MySQL replica URI for services with a replica
|
|
31430
|
-
"""
|
|
31431
|
-
return pulumi.get(self, "replica_uri")
|
|
31432
|
-
|
|
31433
|
-
@property
|
|
31434
|
-
@pulumi.getter(name="standbyUris")
|
|
31435
|
-
def standby_uris(self) -> Sequence[str]:
|
|
31436
|
-
"""
|
|
31437
|
-
MySQL standby connection URIs
|
|
31438
|
-
"""
|
|
31439
|
-
return pulumi.get(self, "standby_uris")
|
|
31440
|
-
|
|
31441
|
-
@property
|
|
31442
|
-
@pulumi.getter(name="syncingUris")
|
|
31443
|
-
def syncing_uris(self) -> Sequence[str]:
|
|
31444
|
-
"""
|
|
31445
|
-
MySQL syncing connection URIs
|
|
31446
|
-
"""
|
|
31447
|
-
return pulumi.get(self, "syncing_uris")
|
|
31448
|
-
|
|
31449
|
-
@property
|
|
31450
|
-
@pulumi.getter
|
|
31451
|
-
def uris(self) -> Sequence[str]:
|
|
31452
|
-
"""
|
|
31453
|
-
MySQL master connection URIs
|
|
31454
|
-
"""
|
|
31455
|
-
return pulumi.get(self, "uris")
|
|
31456
|
-
|
|
31457
|
-
|
|
31458
|
-
@pulumi.output_type
|
|
31459
|
-
class GetMySqlMysqlParamResult(dict):
|
|
31460
|
-
def __init__(__self__, *,
|
|
31461
|
-
database_name: str,
|
|
31462
|
-
host: str,
|
|
31463
|
-
password: str,
|
|
31464
|
-
port: int,
|
|
31465
|
-
sslmode: str,
|
|
31466
|
-
user: str):
|
|
31467
|
-
"""
|
|
31468
|
-
:param str database_name: Primary MySQL database name
|
|
31469
|
-
:param str host: MySQL host IP or name
|
|
31470
|
-
:param str password: MySQL admin user password
|
|
31471
|
-
:param int port: MySQL port
|
|
31472
|
-
:param str sslmode: MySQL sslmode setting (currently always "require")
|
|
31473
|
-
:param str user: MySQL admin user name
|
|
31474
|
-
"""
|
|
31475
|
-
pulumi.set(__self__, "database_name", database_name)
|
|
31476
|
-
pulumi.set(__self__, "host", host)
|
|
31477
|
-
pulumi.set(__self__, "password", password)
|
|
31478
|
-
pulumi.set(__self__, "port", port)
|
|
31479
|
-
pulumi.set(__self__, "sslmode", sslmode)
|
|
31480
|
-
pulumi.set(__self__, "user", user)
|
|
31481
|
-
|
|
31482
|
-
@property
|
|
31483
|
-
@pulumi.getter(name="databaseName")
|
|
31484
|
-
def database_name(self) -> str:
|
|
31485
|
-
"""
|
|
31486
|
-
Primary MySQL database name
|
|
31487
|
-
"""
|
|
31488
|
-
return pulumi.get(self, "database_name")
|
|
31489
|
-
|
|
31490
|
-
@property
|
|
31491
|
-
@pulumi.getter
|
|
31492
|
-
def host(self) -> str:
|
|
31493
|
-
"""
|
|
31494
|
-
MySQL host IP or name
|
|
31495
|
-
"""
|
|
31496
|
-
return pulumi.get(self, "host")
|
|
31497
|
-
|
|
31498
|
-
@property
|
|
31499
|
-
@pulumi.getter
|
|
31500
|
-
def password(self) -> str:
|
|
31501
|
-
"""
|
|
31502
|
-
MySQL admin user password
|
|
31503
|
-
"""
|
|
31504
|
-
return pulumi.get(self, "password")
|
|
31505
|
-
|
|
31506
|
-
@property
|
|
31507
|
-
@pulumi.getter
|
|
31508
|
-
def port(self) -> int:
|
|
31509
|
-
"""
|
|
31510
|
-
MySQL port
|
|
31511
|
-
"""
|
|
31512
|
-
return pulumi.get(self, "port")
|
|
31513
|
-
|
|
31514
|
-
@property
|
|
31515
|
-
@pulumi.getter
|
|
31516
|
-
def sslmode(self) -> str:
|
|
31517
|
-
"""
|
|
31518
|
-
MySQL sslmode setting (currently always "require")
|
|
31519
|
-
"""
|
|
31520
|
-
return pulumi.get(self, "sslmode")
|
|
31521
|
-
|
|
31522
|
-
@property
|
|
31523
|
-
@pulumi.getter
|
|
31524
|
-
def user(self) -> str:
|
|
31525
|
-
"""
|
|
31526
|
-
MySQL admin user name
|
|
31527
|
-
"""
|
|
31528
|
-
return pulumi.get(self, "user")
|
|
31529
|
-
|
|
31530
|
-
|
|
31531
30293
|
@pulumi.output_type
|
|
31532
30294
|
class GetMySqlMysqlUserConfigResult(dict):
|
|
31533
30295
|
def __init__(__self__, *,
|
|
@@ -31563,7 +30325,7 @@ class GetMySqlMysqlUserConfigResult(dict):
|
|
|
31563
30325
|
:param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
|
|
31564
30326
|
:param 'GetMySqlMysqlUserConfigMigrationArgs' migration: Migrate data from existing server
|
|
31565
30327
|
:param 'GetMySqlMysqlUserConfigMysqlArgs' mysql: mysql.conf configuration values
|
|
31566
|
-
:param str mysql_version:
|
|
30328
|
+
:param str mysql_version: MySQL major version.
|
|
31567
30329
|
:param 'GetMySqlMysqlUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
31568
30330
|
:param 'GetMySqlMysqlUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
31569
30331
|
:param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
|
|
@@ -31709,7 +30471,7 @@ class GetMySqlMysqlUserConfigResult(dict):
|
|
|
31709
30471
|
@pulumi.getter(name="mysqlVersion")
|
|
31710
30472
|
def mysql_version(self) -> Optional[str]:
|
|
31711
30473
|
"""
|
|
31712
|
-
|
|
30474
|
+
MySQL major version.
|
|
31713
30475
|
"""
|
|
31714
30476
|
return pulumi.get(self, "mysql_version")
|
|
31715
30477
|
|
|
@@ -31824,7 +30586,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
|
|
|
31824
30586
|
:param int port: Port number of the server where to migrate data from.
|
|
31825
30587
|
:param str dbname: Database name for bootstrapping the initial connection.
|
|
31826
30588
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
|
|
31827
|
-
:param str method:
|
|
30589
|
+
:param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
31828
30590
|
:param str password: Password for authentication with the server where to migrate data from.
|
|
31829
30591
|
:param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
|
|
31830
30592
|
:param str username: User name for authentication with the server where to migrate data from.
|
|
@@ -31880,7 +30642,7 @@ class GetMySqlMysqlUserConfigMigrationResult(dict):
|
|
|
31880
30642
|
@pulumi.getter
|
|
31881
30643
|
def method(self) -> Optional[str]:
|
|
31882
30644
|
"""
|
|
31883
|
-
|
|
30645
|
+
The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
31884
30646
|
"""
|
|
31885
30647
|
return pulumi.get(self, "method")
|
|
31886
30648
|
|
|
@@ -31960,7 +30722,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
31960
30722
|
:param int innodb_thread_concurrency: Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).
|
|
31961
30723
|
:param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.
|
|
31962
30724
|
:param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it.
|
|
31963
|
-
:param str internal_tmp_mem_storage_engine:
|
|
30725
|
+
:param str internal_tmp_mem_storage_engine: The storage engine for in-memory internal temporary tables.
|
|
31964
30726
|
:param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.
|
|
31965
30727
|
:param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
|
|
31966
30728
|
:param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
|
|
@@ -32175,7 +30937,7 @@ class GetMySqlMysqlUserConfigMysqlResult(dict):
|
|
|
32175
30937
|
@pulumi.getter(name="internalTmpMemStorageEngine")
|
|
32176
30938
|
def internal_tmp_mem_storage_engine(self) -> Optional[str]:
|
|
32177
30939
|
"""
|
|
32178
|
-
|
|
30940
|
+
The storage engine for in-memory internal temporary tables.
|
|
32179
30941
|
"""
|
|
32180
30942
|
return pulumi.get(self, "internal_tmp_mem_storage_engine")
|
|
32181
30943
|
|
|
@@ -32579,31 +31341,11 @@ class GetOpenSearchComponentResult(dict):
|
|
|
32579
31341
|
@pulumi.output_type
|
|
32580
31342
|
class GetOpenSearchOpensearchResult(dict):
|
|
32581
31343
|
def __init__(__self__, *,
|
|
32582
|
-
|
|
32583
|
-
opensearch_dashboards_uri: str,
|
|
32584
|
-
password: str,
|
|
32585
|
-
uris: Sequence[str],
|
|
32586
|
-
username: str):
|
|
31344
|
+
opensearch_dashboards_uri: str):
|
|
32587
31345
|
"""
|
|
32588
|
-
:param str kibana_uri: URI for Kibana dashboard frontend
|
|
32589
31346
|
:param str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend
|
|
32590
|
-
:param str password: OpenSearch password
|
|
32591
|
-
:param Sequence[str] uris: OpenSearch server URIs.
|
|
32592
|
-
:param str username: OpenSearch username
|
|
32593
31347
|
"""
|
|
32594
|
-
pulumi.set(__self__, "kibana_uri", kibana_uri)
|
|
32595
31348
|
pulumi.set(__self__, "opensearch_dashboards_uri", opensearch_dashboards_uri)
|
|
32596
|
-
pulumi.set(__self__, "password", password)
|
|
32597
|
-
pulumi.set(__self__, "uris", uris)
|
|
32598
|
-
pulumi.set(__self__, "username", username)
|
|
32599
|
-
|
|
32600
|
-
@property
|
|
32601
|
-
@pulumi.getter(name="kibanaUri")
|
|
32602
|
-
def kibana_uri(self) -> str:
|
|
32603
|
-
"""
|
|
32604
|
-
URI for Kibana dashboard frontend
|
|
32605
|
-
"""
|
|
32606
|
-
return pulumi.get(self, "kibana_uri")
|
|
32607
31349
|
|
|
32608
31350
|
@property
|
|
32609
31351
|
@pulumi.getter(name="opensearchDashboardsUri")
|
|
@@ -32613,30 +31355,6 @@ class GetOpenSearchOpensearchResult(dict):
|
|
|
32613
31355
|
"""
|
|
32614
31356
|
return pulumi.get(self, "opensearch_dashboards_uri")
|
|
32615
31357
|
|
|
32616
|
-
@property
|
|
32617
|
-
@pulumi.getter
|
|
32618
|
-
def password(self) -> str:
|
|
32619
|
-
"""
|
|
32620
|
-
OpenSearch password
|
|
32621
|
-
"""
|
|
32622
|
-
return pulumi.get(self, "password")
|
|
32623
|
-
|
|
32624
|
-
@property
|
|
32625
|
-
@pulumi.getter
|
|
32626
|
-
def uris(self) -> Sequence[str]:
|
|
32627
|
-
"""
|
|
32628
|
-
OpenSearch server URIs.
|
|
32629
|
-
"""
|
|
32630
|
-
return pulumi.get(self, "uris")
|
|
32631
|
-
|
|
32632
|
-
@property
|
|
32633
|
-
@pulumi.getter
|
|
32634
|
-
def username(self) -> str:
|
|
32635
|
-
"""
|
|
32636
|
-
OpenSearch username
|
|
32637
|
-
"""
|
|
32638
|
-
return pulumi.get(self, "username")
|
|
32639
|
-
|
|
32640
31358
|
|
|
32641
31359
|
@pulumi.output_type
|
|
32642
31360
|
class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
@@ -32678,7 +31396,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
32678
31396
|
:param 'GetOpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
|
|
32679
31397
|
:param 'GetOpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
|
|
32680
31398
|
:param 'GetOpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
|
|
32681
|
-
:param str opensearch_version:
|
|
31399
|
+
:param str opensearch_version: OpenSearch major version.
|
|
32682
31400
|
:param 'GetOpenSearchOpensearchUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
|
|
32683
31401
|
:param 'GetOpenSearchOpensearchUserConfigPrivatelinkAccessArgs' privatelink_access: Allow access to selected service components through Privatelink
|
|
32684
31402
|
:param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
|
|
@@ -32847,7 +31565,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
|
|
|
32847
31565
|
@pulumi.getter(name="opensearchVersion")
|
|
32848
31566
|
def opensearch_version(self) -> Optional[str]:
|
|
32849
31567
|
"""
|
|
32850
|
-
|
|
31568
|
+
OpenSearch major version.
|
|
32851
31569
|
"""
|
|
32852
31570
|
return pulumi.get(self, "opensearch_version")
|
|
32853
31571
|
|
|
@@ -32933,7 +31651,7 @@ class GetOpenSearchOpensearchUserConfigIndexPatternResult(dict):
|
|
|
32933
31651
|
"""
|
|
32934
31652
|
:param int max_index_count: Maximum number of indexes to keep.
|
|
32935
31653
|
:param str pattern: fnmatch pattern.
|
|
32936
|
-
:param str sorting_algorithm:
|
|
31654
|
+
:param str sorting_algorithm: Deletion sorting algorithm. The default value is `creation_date`.
|
|
32937
31655
|
"""
|
|
32938
31656
|
pulumi.set(__self__, "max_index_count", max_index_count)
|
|
32939
31657
|
pulumi.set(__self__, "pattern", pattern)
|
|
@@ -32960,7 +31678,7 @@ class GetOpenSearchOpensearchUserConfigIndexPatternResult(dict):
|
|
|
32960
31678
|
@pulumi.getter(name="sortingAlgorithm")
|
|
32961
31679
|
def sorting_algorithm(self) -> Optional[str]:
|
|
32962
31680
|
"""
|
|
32963
|
-
|
|
31681
|
+
Deletion sorting algorithm. The default value is `creation_date`.
|
|
32964
31682
|
"""
|
|
32965
31683
|
return pulumi.get(self, "sorting_algorithm")
|
|
32966
31684
|
|
|
@@ -33735,12 +32453,12 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAut
|
|
|
33735
32453
|
type: Optional[str] = None):
|
|
33736
32454
|
"""
|
|
33737
32455
|
:param int allowed_tries: The number of login attempts allowed before login is blocked.
|
|
33738
|
-
:param str authentication_backend:
|
|
32456
|
+
:param str authentication_backend: internal_authentication_backend_limiting.authentication_backend.
|
|
33739
32457
|
:param int block_expiry_seconds: The duration of time that login remains blocked after a failed login.
|
|
33740
32458
|
:param int max_blocked_clients: internal_authentication_backend_limiting.max_blocked_clients.
|
|
33741
32459
|
:param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
|
|
33742
32460
|
:param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
|
|
33743
|
-
:param str type:
|
|
32461
|
+
:param str type: internal_authentication_backend_limiting.type.
|
|
33744
32462
|
"""
|
|
33745
32463
|
if allowed_tries is not None:
|
|
33746
32464
|
pulumi.set(__self__, "allowed_tries", allowed_tries)
|
|
@@ -33769,7 +32487,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAut
|
|
|
33769
32487
|
@pulumi.getter(name="authenticationBackend")
|
|
33770
32488
|
def authentication_backend(self) -> Optional[str]:
|
|
33771
32489
|
"""
|
|
33772
|
-
|
|
32490
|
+
internal_authentication_backend_limiting.authentication_backend.
|
|
33773
32491
|
"""
|
|
33774
32492
|
return pulumi.get(self, "authentication_backend")
|
|
33775
32493
|
|
|
@@ -33809,7 +32527,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersInternalAut
|
|
|
33809
32527
|
@pulumi.getter
|
|
33810
32528
|
def type(self) -> Optional[str]:
|
|
33811
32529
|
"""
|
|
33812
|
-
|
|
32530
|
+
internal_authentication_backend_limiting.type.
|
|
33813
32531
|
"""
|
|
33814
32532
|
return pulumi.get(self, "type")
|
|
33815
32533
|
|
|
@@ -33829,7 +32547,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimit
|
|
|
33829
32547
|
:param int max_blocked_clients: The maximum number of blocked IP addresses.
|
|
33830
32548
|
:param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
|
|
33831
32549
|
:param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
|
|
33832
|
-
:param str type:
|
|
32550
|
+
:param str type: The type of rate limiting.
|
|
33833
32551
|
"""
|
|
33834
32552
|
if allowed_tries is not None:
|
|
33835
32553
|
pulumi.set(__self__, "allowed_tries", allowed_tries)
|
|
@@ -33888,7 +32606,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimit
|
|
|
33888
32606
|
@pulumi.getter
|
|
33889
32607
|
def type(self) -> Optional[str]:
|
|
33890
32608
|
"""
|
|
33891
|
-
|
|
32609
|
+
The type of rate limiting.
|
|
33892
32610
|
"""
|
|
33893
32611
|
return pulumi.get(self, "type")
|
|
33894
32612
|
|
|
@@ -34326,59 +33044,36 @@ class GetPgComponentResult(dict):
|
|
|
34326
33044
|
@pulumi.output_type
|
|
34327
33045
|
class GetPgPgResult(dict):
|
|
34328
33046
|
def __init__(__self__, *,
|
|
34329
|
-
bouncer: str,
|
|
34330
33047
|
dbname: str,
|
|
34331
33048
|
host: str,
|
|
34332
33049
|
max_connections: int,
|
|
34333
|
-
params: Sequence['outputs.GetPgPgParamResult'],
|
|
34334
33050
|
password: str,
|
|
34335
33051
|
port: int,
|
|
34336
33052
|
replica_uri: str,
|
|
34337
33053
|
sslmode: str,
|
|
34338
|
-
standby_uris: Sequence[str],
|
|
34339
|
-
syncing_uris: Sequence[str],
|
|
34340
33054
|
uri: str,
|
|
34341
|
-
uris: Sequence[str],
|
|
34342
33055
|
user: str):
|
|
34343
33056
|
"""
|
|
34344
|
-
:param str bouncer: Bouncer connection details
|
|
34345
33057
|
:param str dbname: Primary PostgreSQL database name
|
|
34346
33058
|
:param str host: PostgreSQL master node host IP or name
|
|
34347
33059
|
:param int max_connections: Connection limit
|
|
34348
|
-
:param Sequence['GetPgPgParamArgs'] params: PostgreSQL connection parameters
|
|
34349
33060
|
:param str password: PostgreSQL admin user password
|
|
34350
33061
|
:param int port: PostgreSQL port
|
|
34351
33062
|
:param str replica_uri: PostgreSQL replica URI for services with a replica
|
|
34352
33063
|
:param str sslmode: PostgreSQL sslmode setting (currently always "require")
|
|
34353
|
-
:param Sequence[str] standby_uris: PostgreSQL standby connection URIs
|
|
34354
|
-
:param Sequence[str] syncing_uris: PostgreSQL syncing connection URIs
|
|
34355
33064
|
:param str uri: PostgreSQL master connection URI
|
|
34356
|
-
:param Sequence[str] uris: PostgreSQL master connection URIs
|
|
34357
33065
|
:param str user: PostgreSQL admin user name
|
|
34358
33066
|
"""
|
|
34359
|
-
pulumi.set(__self__, "bouncer", bouncer)
|
|
34360
33067
|
pulumi.set(__self__, "dbname", dbname)
|
|
34361
33068
|
pulumi.set(__self__, "host", host)
|
|
34362
33069
|
pulumi.set(__self__, "max_connections", max_connections)
|
|
34363
|
-
pulumi.set(__self__, "params", params)
|
|
34364
33070
|
pulumi.set(__self__, "password", password)
|
|
34365
33071
|
pulumi.set(__self__, "port", port)
|
|
34366
33072
|
pulumi.set(__self__, "replica_uri", replica_uri)
|
|
34367
33073
|
pulumi.set(__self__, "sslmode", sslmode)
|
|
34368
|
-
pulumi.set(__self__, "standby_uris", standby_uris)
|
|
34369
|
-
pulumi.set(__self__, "syncing_uris", syncing_uris)
|
|
34370
33074
|
pulumi.set(__self__, "uri", uri)
|
|
34371
|
-
pulumi.set(__self__, "uris", uris)
|
|
34372
33075
|
pulumi.set(__self__, "user", user)
|
|
34373
33076
|
|
|
34374
|
-
@property
|
|
34375
|
-
@pulumi.getter
|
|
34376
|
-
def bouncer(self) -> str:
|
|
34377
|
-
"""
|
|
34378
|
-
Bouncer connection details
|
|
34379
|
-
"""
|
|
34380
|
-
return pulumi.get(self, "bouncer")
|
|
34381
|
-
|
|
34382
33077
|
@property
|
|
34383
33078
|
@pulumi.getter
|
|
34384
33079
|
def dbname(self) -> str:
|
|
@@ -34403,14 +33098,6 @@ class GetPgPgResult(dict):
|
|
|
34403
33098
|
"""
|
|
34404
33099
|
return pulumi.get(self, "max_connections")
|
|
34405
33100
|
|
|
34406
|
-
@property
|
|
34407
|
-
@pulumi.getter
|
|
34408
|
-
def params(self) -> Sequence['outputs.GetPgPgParamResult']:
|
|
34409
|
-
"""
|
|
34410
|
-
PostgreSQL connection parameters
|
|
34411
|
-
"""
|
|
34412
|
-
return pulumi.get(self, "params")
|
|
34413
|
-
|
|
34414
33101
|
@property
|
|
34415
33102
|
@pulumi.getter
|
|
34416
33103
|
def password(self) -> str:
|
|
@@ -34443,22 +33130,6 @@ class GetPgPgResult(dict):
|
|
|
34443
33130
|
"""
|
|
34444
33131
|
return pulumi.get(self, "sslmode")
|
|
34445
33132
|
|
|
34446
|
-
@property
|
|
34447
|
-
@pulumi.getter(name="standbyUris")
|
|
34448
|
-
def standby_uris(self) -> Sequence[str]:
|
|
34449
|
-
"""
|
|
34450
|
-
PostgreSQL standby connection URIs
|
|
34451
|
-
"""
|
|
34452
|
-
return pulumi.get(self, "standby_uris")
|
|
34453
|
-
|
|
34454
|
-
@property
|
|
34455
|
-
@pulumi.getter(name="syncingUris")
|
|
34456
|
-
def syncing_uris(self) -> Sequence[str]:
|
|
34457
|
-
"""
|
|
34458
|
-
PostgreSQL syncing connection URIs
|
|
34459
|
-
"""
|
|
34460
|
-
return pulumi.get(self, "syncing_uris")
|
|
34461
|
-
|
|
34462
33133
|
@property
|
|
34463
33134
|
@pulumi.getter
|
|
34464
33135
|
def uri(self) -> str:
|
|
@@ -34467,87 +33138,6 @@ class GetPgPgResult(dict):
|
|
|
34467
33138
|
"""
|
|
34468
33139
|
return pulumi.get(self, "uri")
|
|
34469
33140
|
|
|
34470
|
-
@property
|
|
34471
|
-
@pulumi.getter
|
|
34472
|
-
def uris(self) -> Sequence[str]:
|
|
34473
|
-
"""
|
|
34474
|
-
PostgreSQL master connection URIs
|
|
34475
|
-
"""
|
|
34476
|
-
return pulumi.get(self, "uris")
|
|
34477
|
-
|
|
34478
|
-
@property
|
|
34479
|
-
@pulumi.getter
|
|
34480
|
-
def user(self) -> str:
|
|
34481
|
-
"""
|
|
34482
|
-
PostgreSQL admin user name
|
|
34483
|
-
"""
|
|
34484
|
-
return pulumi.get(self, "user")
|
|
34485
|
-
|
|
34486
|
-
|
|
34487
|
-
@pulumi.output_type
|
|
34488
|
-
class GetPgPgParamResult(dict):
|
|
34489
|
-
def __init__(__self__, *,
|
|
34490
|
-
database_name: str,
|
|
34491
|
-
host: str,
|
|
34492
|
-
password: str,
|
|
34493
|
-
port: int,
|
|
34494
|
-
sslmode: str,
|
|
34495
|
-
user: str):
|
|
34496
|
-
"""
|
|
34497
|
-
:param str database_name: Primary PostgreSQL database name
|
|
34498
|
-
:param str host: PostgreSQL host IP or name
|
|
34499
|
-
:param str password: PostgreSQL admin user password
|
|
34500
|
-
:param int port: PostgreSQL port
|
|
34501
|
-
:param str sslmode: PostgreSQL sslmode setting (currently always "require")
|
|
34502
|
-
:param str user: PostgreSQL admin user name
|
|
34503
|
-
"""
|
|
34504
|
-
pulumi.set(__self__, "database_name", database_name)
|
|
34505
|
-
pulumi.set(__self__, "host", host)
|
|
34506
|
-
pulumi.set(__self__, "password", password)
|
|
34507
|
-
pulumi.set(__self__, "port", port)
|
|
34508
|
-
pulumi.set(__self__, "sslmode", sslmode)
|
|
34509
|
-
pulumi.set(__self__, "user", user)
|
|
34510
|
-
|
|
34511
|
-
@property
|
|
34512
|
-
@pulumi.getter(name="databaseName")
|
|
34513
|
-
def database_name(self) -> str:
|
|
34514
|
-
"""
|
|
34515
|
-
Primary PostgreSQL database name
|
|
34516
|
-
"""
|
|
34517
|
-
return pulumi.get(self, "database_name")
|
|
34518
|
-
|
|
34519
|
-
@property
|
|
34520
|
-
@pulumi.getter
|
|
34521
|
-
def host(self) -> str:
|
|
34522
|
-
"""
|
|
34523
|
-
PostgreSQL host IP or name
|
|
34524
|
-
"""
|
|
34525
|
-
return pulumi.get(self, "host")
|
|
34526
|
-
|
|
34527
|
-
@property
|
|
34528
|
-
@pulumi.getter
|
|
34529
|
-
def password(self) -> str:
|
|
34530
|
-
"""
|
|
34531
|
-
PostgreSQL admin user password
|
|
34532
|
-
"""
|
|
34533
|
-
return pulumi.get(self, "password")
|
|
34534
|
-
|
|
34535
|
-
@property
|
|
34536
|
-
@pulumi.getter
|
|
34537
|
-
def port(self) -> int:
|
|
34538
|
-
"""
|
|
34539
|
-
PostgreSQL port
|
|
34540
|
-
"""
|
|
34541
|
-
return pulumi.get(self, "port")
|
|
34542
|
-
|
|
34543
|
-
@property
|
|
34544
|
-
@pulumi.getter
|
|
34545
|
-
def sslmode(self) -> str:
|
|
34546
|
-
"""
|
|
34547
|
-
PostgreSQL sslmode setting (currently always "require")
|
|
34548
|
-
"""
|
|
34549
|
-
return pulumi.get(self, "sslmode")
|
|
34550
|
-
|
|
34551
33141
|
@property
|
|
34552
33142
|
@pulumi.getter
|
|
34553
33143
|
def user(self) -> str:
|
|
@@ -34608,7 +33198,7 @@ class GetPgPgUserConfigResult(dict):
|
|
|
34608
33198
|
:param bool pg_read_replica: Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).
|
|
34609
33199
|
:param str pg_service_to_fork_from: Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created.
|
|
34610
33200
|
:param bool pg_stat_monitor_enable: Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable. The default value is `false`.
|
|
34611
|
-
:param str pg_version:
|
|
33201
|
+
:param str pg_version: PostgreSQL major version.
|
|
34612
33202
|
:param 'GetPgPgUserConfigPgauditArgs' pgaudit: System-wide settings for the pgaudit extension
|
|
34613
33203
|
:param 'GetPgPgUserConfigPgbouncerArgs' pgbouncer: PGBouncer connection pooling settings
|
|
34614
33204
|
:param 'GetPgPgUserConfigPglookoutArgs' pglookout: System-wide settings for pglookout
|
|
@@ -34621,9 +33211,9 @@ class GetPgPgUserConfigResult(dict):
|
|
|
34621
33211
|
:param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
|
|
34622
33212
|
:param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
|
|
34623
33213
|
:param bool static_ips: Use static public IP addresses.
|
|
34624
|
-
:param str synchronous_replication:
|
|
33214
|
+
:param str synchronous_replication: Synchronous replication type. Note that the service plan also needs to support synchronous replication.
|
|
34625
33215
|
:param 'GetPgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
|
|
34626
|
-
:param str variant:
|
|
33216
|
+
:param str variant: Variant of the PostgreSQL service, may affect the features that are exposed by default.
|
|
34627
33217
|
:param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
|
|
34628
33218
|
"""
|
|
34629
33219
|
if additional_backup_regions is not None:
|
|
@@ -34821,7 +33411,7 @@ class GetPgPgUserConfigResult(dict):
|
|
|
34821
33411
|
@pulumi.getter(name="pgVersion")
|
|
34822
33412
|
def pg_version(self) -> Optional[str]:
|
|
34823
33413
|
"""
|
|
34824
|
-
|
|
33414
|
+
PostgreSQL major version.
|
|
34825
33415
|
"""
|
|
34826
33416
|
return pulumi.get(self, "pg_version")
|
|
34827
33417
|
|
|
@@ -34925,7 +33515,7 @@ class GetPgPgUserConfigResult(dict):
|
|
|
34925
33515
|
@pulumi.getter(name="synchronousReplication")
|
|
34926
33516
|
def synchronous_replication(self) -> Optional[str]:
|
|
34927
33517
|
"""
|
|
34928
|
-
|
|
33518
|
+
Synchronous replication type. Note that the service plan also needs to support synchronous replication.
|
|
34929
33519
|
"""
|
|
34930
33520
|
return pulumi.get(self, "synchronous_replication")
|
|
34931
33521
|
|
|
@@ -34941,7 +33531,7 @@ class GetPgPgUserConfigResult(dict):
|
|
|
34941
33531
|
@pulumi.getter
|
|
34942
33532
|
def variant(self) -> Optional[str]:
|
|
34943
33533
|
"""
|
|
34944
|
-
|
|
33534
|
+
Variant of the PostgreSQL service, may affect the features that are exposed by default.
|
|
34945
33535
|
"""
|
|
34946
33536
|
return pulumi.get(self, "variant")
|
|
34947
33537
|
|
|
@@ -35000,7 +33590,7 @@ class GetPgPgUserConfigMigrationResult(dict):
|
|
|
35000
33590
|
:param int port: Port number of the server where to migrate data from.
|
|
35001
33591
|
:param str dbname: Database name for bootstrapping the initial connection.
|
|
35002
33592
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
|
|
35003
|
-
:param str method:
|
|
33593
|
+
:param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
35004
33594
|
:param str password: Password for authentication with the server where to migrate data from.
|
|
35005
33595
|
:param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
|
|
35006
33596
|
:param str username: User name for authentication with the server where to migrate data from.
|
|
@@ -35056,7 +33646,7 @@ class GetPgPgUserConfigMigrationResult(dict):
|
|
|
35056
33646
|
@pulumi.getter
|
|
35057
33647
|
def method(self) -> Optional[str]:
|
|
35058
33648
|
"""
|
|
35059
|
-
|
|
33649
|
+
The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
35060
33650
|
"""
|
|
35061
33651
|
return pulumi.get(self, "method")
|
|
35062
33652
|
|
|
@@ -35152,12 +33742,12 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
35152
33742
|
:param int bgwriter_lru_maxpages: In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.
|
|
35153
33743
|
:param float bgwriter_lru_multiplier: The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.
|
|
35154
33744
|
:param int deadlock_timeout: This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.
|
|
35155
|
-
:param str default_toast_compression:
|
|
33745
|
+
:param str default_toast_compression: Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
|
|
35156
33746
|
:param int idle_in_transaction_session_timeout: Time out sessions with open transactions after this number of milliseconds.
|
|
35157
33747
|
:param bool jit: Controls system-wide use of Just-in-Time Compilation (JIT).
|
|
35158
33748
|
:param int log_autovacuum_min_duration: Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.
|
|
35159
|
-
:param str log_error_verbosity:
|
|
35160
|
-
:param str log_line_prefix:
|
|
33749
|
+
:param str log_error_verbosity: Controls the amount of detail written in the server log for each message that is logged.
|
|
33750
|
+
:param str log_line_prefix: Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
|
|
35161
33751
|
:param int log_min_duration_statement: Log statements that take more than this number of milliseconds to run, -1 disables.
|
|
35162
33752
|
:param int log_temp_files: Log statements for each temporary file created larger than this number of kilobytes, -1 disables.
|
|
35163
33753
|
:param int max_files_per_process: PostgreSQL maximum number of files that can be open per process.
|
|
@@ -35178,13 +33768,13 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
35178
33768
|
:param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
|
|
35179
33769
|
:param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
|
|
35180
33770
|
:param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
|
|
35181
|
-
:param str pg_stat_statements_dot_track:
|
|
33771
|
+
:param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
|
|
35182
33772
|
:param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
|
|
35183
33773
|
:param str timezone: PostgreSQL service timezone.
|
|
35184
33774
|
:param int track_activity_query_size: Specifies the number of bytes reserved to track the currently executing command for each active session.
|
|
35185
|
-
:param str track_commit_timestamp:
|
|
35186
|
-
:param str track_functions:
|
|
35187
|
-
:param str track_io_timing:
|
|
33775
|
+
:param str track_commit_timestamp: Record commit time of transactions.
|
|
33776
|
+
:param str track_functions: Enables tracking of function call counts and time used.
|
|
33777
|
+
:param str track_io_timing: Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
|
|
35188
33778
|
:param int wal_sender_timeout: Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.
|
|
35189
33779
|
:param int wal_writer_delay: WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.
|
|
35190
33780
|
"""
|
|
@@ -35403,7 +33993,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
35403
33993
|
@pulumi.getter(name="defaultToastCompression")
|
|
35404
33994
|
def default_toast_compression(self) -> Optional[str]:
|
|
35405
33995
|
"""
|
|
35406
|
-
|
|
33996
|
+
Specifies the default TOAST compression method for values of compressible columns (the default is lz4).
|
|
35407
33997
|
"""
|
|
35408
33998
|
return pulumi.get(self, "default_toast_compression")
|
|
35409
33999
|
|
|
@@ -35435,7 +34025,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
35435
34025
|
@pulumi.getter(name="logErrorVerbosity")
|
|
35436
34026
|
def log_error_verbosity(self) -> Optional[str]:
|
|
35437
34027
|
"""
|
|
35438
|
-
|
|
34028
|
+
Controls the amount of detail written in the server log for each message that is logged.
|
|
35439
34029
|
"""
|
|
35440
34030
|
return pulumi.get(self, "log_error_verbosity")
|
|
35441
34031
|
|
|
@@ -35443,7 +34033,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
35443
34033
|
@pulumi.getter(name="logLinePrefix")
|
|
35444
34034
|
def log_line_prefix(self) -> Optional[str]:
|
|
35445
34035
|
"""
|
|
35446
|
-
|
|
34036
|
+
Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.
|
|
35447
34037
|
"""
|
|
35448
34038
|
return pulumi.get(self, "log_line_prefix")
|
|
35449
34039
|
|
|
@@ -35611,7 +34201,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
35611
34201
|
@pulumi.getter(name="pgStatStatementsDotTrack")
|
|
35612
34202
|
def pg_stat_statements_dot_track(self) -> Optional[str]:
|
|
35613
34203
|
"""
|
|
35614
|
-
|
|
34204
|
+
Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
|
|
35615
34205
|
"""
|
|
35616
34206
|
return pulumi.get(self, "pg_stat_statements_dot_track")
|
|
35617
34207
|
|
|
@@ -35643,7 +34233,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
35643
34233
|
@pulumi.getter(name="trackCommitTimestamp")
|
|
35644
34234
|
def track_commit_timestamp(self) -> Optional[str]:
|
|
35645
34235
|
"""
|
|
35646
|
-
|
|
34236
|
+
Record commit time of transactions.
|
|
35647
34237
|
"""
|
|
35648
34238
|
return pulumi.get(self, "track_commit_timestamp")
|
|
35649
34239
|
|
|
@@ -35651,7 +34241,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
35651
34241
|
@pulumi.getter(name="trackFunctions")
|
|
35652
34242
|
def track_functions(self) -> Optional[str]:
|
|
35653
34243
|
"""
|
|
35654
|
-
|
|
34244
|
+
Enables tracking of function call counts and time used.
|
|
35655
34245
|
"""
|
|
35656
34246
|
return pulumi.get(self, "track_functions")
|
|
35657
34247
|
|
|
@@ -35659,7 +34249,7 @@ class GetPgPgUserConfigPgResult(dict):
|
|
|
35659
34249
|
@pulumi.getter(name="trackIoTiming")
|
|
35660
34250
|
def track_io_timing(self) -> Optional[str]:
|
|
35661
34251
|
"""
|
|
35662
|
-
|
|
34252
|
+
Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.
|
|
35663
34253
|
"""
|
|
35664
34254
|
return pulumi.get(self, "track_io_timing")
|
|
35665
34255
|
|
|
@@ -35783,7 +34373,7 @@ class GetPgPgUserConfigPgauditResult(dict):
|
|
|
35783
34373
|
:param bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. The default value is `false`.
|
|
35784
34374
|
:param bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. The default value is `true`.
|
|
35785
34375
|
:param bool log_client: Specifies whether log messages will be visible to a client process such as psql. The default value is `false`.
|
|
35786
|
-
:param str log_level:
|
|
34376
|
+
:param str log_level: Specifies the log level that will be used for log entries. The default value is `log`.
|
|
35787
34377
|
:param int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. The default value is `-1`.
|
|
35788
34378
|
:param bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. The default value is `true`.
|
|
35789
34379
|
:param bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. The default value is `false`.
|
|
@@ -35852,7 +34442,7 @@ class GetPgPgUserConfigPgauditResult(dict):
|
|
|
35852
34442
|
@pulumi.getter(name="logLevel")
|
|
35853
34443
|
def log_level(self) -> Optional[str]:
|
|
35854
34444
|
"""
|
|
35855
|
-
|
|
34445
|
+
Specifies the log level that will be used for log entries. The default value is `log`.
|
|
35856
34446
|
"""
|
|
35857
34447
|
return pulumi.get(self, "log_level")
|
|
35858
34448
|
|
|
@@ -35952,7 +34542,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
|
|
|
35952
34542
|
"""
|
|
35953
34543
|
:param int autodb_idle_timeout: If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). The default value is `3600`.
|
|
35954
34544
|
:param int autodb_max_db_connections: Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.
|
|
35955
|
-
:param str autodb_pool_mode:
|
|
34545
|
+
:param str autodb_pool_mode: PGBouncer pool mode. The default value is `transaction`.
|
|
35956
34546
|
:param int autodb_pool_size: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. The default value is `0`.
|
|
35957
34547
|
:param Sequence[str] ignore_startup_parameters: List of parameters to ignore when given in startup packet.
|
|
35958
34548
|
:param int min_pool_size: Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. The default value is `0`.
|
|
@@ -35999,7 +34589,7 @@ class GetPgPgUserConfigPgbouncerResult(dict):
|
|
|
35999
34589
|
@pulumi.getter(name="autodbPoolMode")
|
|
36000
34590
|
def autodb_pool_mode(self) -> Optional[str]:
|
|
36001
34591
|
"""
|
|
36002
|
-
|
|
34592
|
+
PGBouncer pool mode. The default value is `transaction`.
|
|
36003
34593
|
"""
|
|
36004
34594
|
return pulumi.get(self, "autodb_pool_mode")
|
|
36005
34595
|
|
|
@@ -36419,57 +35009,6 @@ class GetRedisComponentResult(dict):
|
|
|
36419
35009
|
return pulumi.get(self, "usage")
|
|
36420
35010
|
|
|
36421
35011
|
|
|
36422
|
-
@pulumi.output_type
|
|
36423
|
-
class GetRedisRediResult(dict):
|
|
36424
|
-
def __init__(__self__, *,
|
|
36425
|
-
password: str,
|
|
36426
|
-
replica_uri: str,
|
|
36427
|
-
slave_uris: Sequence[str],
|
|
36428
|
-
uris: Sequence[str]):
|
|
36429
|
-
"""
|
|
36430
|
-
:param str password: Redis password.
|
|
36431
|
-
:param str replica_uri: Redis replica server URI.
|
|
36432
|
-
:param Sequence[str] slave_uris: Redis slave server URIs.
|
|
36433
|
-
:param Sequence[str] uris: Redis server URIs.
|
|
36434
|
-
"""
|
|
36435
|
-
pulumi.set(__self__, "password", password)
|
|
36436
|
-
pulumi.set(__self__, "replica_uri", replica_uri)
|
|
36437
|
-
pulumi.set(__self__, "slave_uris", slave_uris)
|
|
36438
|
-
pulumi.set(__self__, "uris", uris)
|
|
36439
|
-
|
|
36440
|
-
@property
|
|
36441
|
-
@pulumi.getter
|
|
36442
|
-
def password(self) -> str:
|
|
36443
|
-
"""
|
|
36444
|
-
Redis password.
|
|
36445
|
-
"""
|
|
36446
|
-
return pulumi.get(self, "password")
|
|
36447
|
-
|
|
36448
|
-
@property
|
|
36449
|
-
@pulumi.getter(name="replicaUri")
|
|
36450
|
-
def replica_uri(self) -> str:
|
|
36451
|
-
"""
|
|
36452
|
-
Redis replica server URI.
|
|
36453
|
-
"""
|
|
36454
|
-
return pulumi.get(self, "replica_uri")
|
|
36455
|
-
|
|
36456
|
-
@property
|
|
36457
|
-
@pulumi.getter(name="slaveUris")
|
|
36458
|
-
def slave_uris(self) -> Sequence[str]:
|
|
36459
|
-
"""
|
|
36460
|
-
Redis slave server URIs.
|
|
36461
|
-
"""
|
|
36462
|
-
return pulumi.get(self, "slave_uris")
|
|
36463
|
-
|
|
36464
|
-
@property
|
|
36465
|
-
@pulumi.getter
|
|
36466
|
-
def uris(self) -> Sequence[str]:
|
|
36467
|
-
"""
|
|
36468
|
-
Redis server URIs.
|
|
36469
|
-
"""
|
|
36470
|
-
return pulumi.get(self, "uris")
|
|
36471
|
-
|
|
36472
|
-
|
|
36473
35012
|
@pulumi.output_type
|
|
36474
35013
|
class GetRedisRedisUserConfigResult(dict):
|
|
36475
35014
|
def __init__(__self__, *,
|
|
@@ -36509,18 +35048,18 @@ class GetRedisRedisUserConfigResult(dict):
|
|
|
36509
35048
|
:param str project_to_fork_from: Name of another project to fork a service from. This has effect only when a new service is being created.
|
|
36510
35049
|
:param 'GetRedisRedisUserConfigPublicAccessArgs' public_access: Allow access to selected service ports from the public Internet
|
|
36511
35050
|
:param str recovery_basebackup_name: Name of the basebackup to restore in forked service.
|
|
36512
|
-
:param str redis_acl_channels_default:
|
|
35051
|
+
:param str redis_acl_channels_default: Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
|
|
36513
35052
|
:param int redis_io_threads: Set Redis IO thread count. Changing this will cause a restart of the Redis service.
|
|
36514
35053
|
:param int redis_lfu_decay_time: LFU maxmemory-policy counter decay time in minutes. The default value is `1`.
|
|
36515
35054
|
:param int redis_lfu_log_factor: Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`.
|
|
36516
|
-
:param str redis_maxmemory_policy:
|
|
35055
|
+
:param str redis_maxmemory_policy: Redis maxmemory-policy. The default value is `noeviction`.
|
|
36517
35056
|
:param str redis_notify_keyspace_events: Set notify-keyspace-events option.
|
|
36518
35057
|
:param int redis_number_of_databases: Set number of Redis databases. Changing this will cause a restart of the Redis service.
|
|
36519
|
-
:param str redis_persistence:
|
|
35058
|
+
:param str redis_persistence: When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
36520
35059
|
:param int redis_pubsub_client_output_buffer_limit: Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.
|
|
36521
35060
|
:param bool redis_ssl: Require SSL to access Redis. The default value is `true`.
|
|
36522
35061
|
:param int redis_timeout: Redis idle connection timeout in seconds. The default value is `300`.
|
|
36523
|
-
:param str redis_version:
|
|
35062
|
+
:param str redis_version: Redis major version.
|
|
36524
35063
|
:param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
|
|
36525
35064
|
:param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
|
|
36526
35065
|
:param bool static_ips: Use static public IP addresses.
|
|
@@ -36663,7 +35202,7 @@ class GetRedisRedisUserConfigResult(dict):
|
|
|
36663
35202
|
@pulumi.getter(name="redisAclChannelsDefault")
|
|
36664
35203
|
def redis_acl_channels_default(self) -> Optional[str]:
|
|
36665
35204
|
"""
|
|
36666
|
-
|
|
35205
|
+
Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.
|
|
36667
35206
|
"""
|
|
36668
35207
|
return pulumi.get(self, "redis_acl_channels_default")
|
|
36669
35208
|
|
|
@@ -36695,7 +35234,7 @@ class GetRedisRedisUserConfigResult(dict):
|
|
|
36695
35234
|
@pulumi.getter(name="redisMaxmemoryPolicy")
|
|
36696
35235
|
def redis_maxmemory_policy(self) -> Optional[str]:
|
|
36697
35236
|
"""
|
|
36698
|
-
|
|
35237
|
+
Redis maxmemory-policy. The default value is `noeviction`.
|
|
36699
35238
|
"""
|
|
36700
35239
|
return pulumi.get(self, "redis_maxmemory_policy")
|
|
36701
35240
|
|
|
@@ -36719,7 +35258,7 @@ class GetRedisRedisUserConfigResult(dict):
|
|
|
36719
35258
|
@pulumi.getter(name="redisPersistence")
|
|
36720
35259
|
def redis_persistence(self) -> Optional[str]:
|
|
36721
35260
|
"""
|
|
36722
|
-
|
|
35261
|
+
When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
|
|
36723
35262
|
"""
|
|
36724
35263
|
return pulumi.get(self, "redis_persistence")
|
|
36725
35264
|
|
|
@@ -36751,7 +35290,7 @@ class GetRedisRedisUserConfigResult(dict):
|
|
|
36751
35290
|
@pulumi.getter(name="redisVersion")
|
|
36752
35291
|
def redis_version(self) -> Optional[str]:
|
|
36753
35292
|
"""
|
|
36754
|
-
|
|
35293
|
+
Redis major version.
|
|
36755
35294
|
"""
|
|
36756
35295
|
return pulumi.get(self, "redis_version")
|
|
36757
35296
|
|
|
@@ -36826,7 +35365,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
|
|
|
36826
35365
|
:param int port: Port number of the server where to migrate data from.
|
|
36827
35366
|
:param str dbname: Database name for bootstrapping the initial connection.
|
|
36828
35367
|
:param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
|
|
36829
|
-
:param str method:
|
|
35368
|
+
:param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
36830
35369
|
:param str password: Password for authentication with the server where to migrate data from.
|
|
36831
35370
|
:param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
|
|
36832
35371
|
:param str username: User name for authentication with the server where to migrate data from.
|
|
@@ -36882,7 +35421,7 @@ class GetRedisRedisUserConfigMigrationResult(dict):
|
|
|
36882
35421
|
@pulumi.getter
|
|
36883
35422
|
def method(self) -> Optional[str]:
|
|
36884
35423
|
"""
|
|
36885
|
-
|
|
35424
|
+
The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
|
|
36886
35425
|
"""
|
|
36887
35426
|
return pulumi.get(self, "method")
|
|
36888
35427
|
|
|
@@ -37117,13 +35656,13 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
37117
35656
|
skip_broken_messages: Optional[int] = None):
|
|
37118
35657
|
"""
|
|
37119
35658
|
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
|
|
37120
|
-
:param str data_format:
|
|
35659
|
+
:param str data_format: Message data format. The default value is `JSONEachRow`.
|
|
37121
35660
|
:param str group_name: Kafka consumers group. The default value is `clickhouse`.
|
|
37122
35661
|
:param str name: Name of the table.
|
|
37123
35662
|
:param Sequence['GetServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
|
|
37124
|
-
:param str auto_offset_reset:
|
|
37125
|
-
:param str date_time_input_format:
|
|
37126
|
-
:param str handle_error_mode:
|
|
35663
|
+
:param str auto_offset_reset: Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
|
|
35664
|
+
:param str date_time_input_format: Method to read DateTime from text input formats. The default value is `basic`.
|
|
35665
|
+
:param str handle_error_mode: How to handle errors for Kafka engine. The default value is `default`.
|
|
37127
35666
|
:param int max_block_size: Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.
|
|
37128
35667
|
:param int max_rows_per_message: The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.
|
|
37129
35668
|
:param int num_consumers: The number of consumers per table per replica. The default value is `1`.
|
|
@@ -37164,7 +35703,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
37164
35703
|
@pulumi.getter(name="dataFormat")
|
|
37165
35704
|
def data_format(self) -> str:
|
|
37166
35705
|
"""
|
|
37167
|
-
|
|
35706
|
+
Message data format. The default value is `JSONEachRow`.
|
|
37168
35707
|
"""
|
|
37169
35708
|
return pulumi.get(self, "data_format")
|
|
37170
35709
|
|
|
@@ -37196,7 +35735,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
37196
35735
|
@pulumi.getter(name="autoOffsetReset")
|
|
37197
35736
|
def auto_offset_reset(self) -> Optional[str]:
|
|
37198
35737
|
"""
|
|
37199
|
-
|
|
35738
|
+
Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
|
|
37200
35739
|
"""
|
|
37201
35740
|
return pulumi.get(self, "auto_offset_reset")
|
|
37202
35741
|
|
|
@@ -37204,7 +35743,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
37204
35743
|
@pulumi.getter(name="dateTimeInputFormat")
|
|
37205
35744
|
def date_time_input_format(self) -> Optional[str]:
|
|
37206
35745
|
"""
|
|
37207
|
-
|
|
35746
|
+
Method to read DateTime from text input formats. The default value is `basic`.
|
|
37208
35747
|
"""
|
|
37209
35748
|
return pulumi.get(self, "date_time_input_format")
|
|
37210
35749
|
|
|
@@ -37212,7 +35751,7 @@ class GetServiceIntegrationClickhouseKafkaUserConfigTableResult(dict):
|
|
|
37212
35751
|
@pulumi.getter(name="handleErrorMode")
|
|
37213
35752
|
def handle_error_mode(self) -> Optional[str]:
|
|
37214
35753
|
"""
|
|
37215
|
-
|
|
35754
|
+
How to handle errors for Kafka engine. The default value is `default`.
|
|
37216
35755
|
"""
|
|
37217
35756
|
return pulumi.get(self, "handle_error_mode")
|
|
37218
35757
|
|
|
@@ -37358,7 +35897,6 @@ class GetServiceIntegrationClickhousePostgresqlUserConfigDatabaseResult(dict):
|
|
|
37358
35897
|
class GetServiceIntegrationDatadogUserConfigResult(dict):
|
|
37359
35898
|
def __init__(__self__, *,
|
|
37360
35899
|
datadog_dbm_enabled: Optional[bool] = None,
|
|
37361
|
-
datadog_pgbouncer_enabled: Optional[bool] = None,
|
|
37362
35900
|
datadog_tags: Optional[Sequence['outputs.GetServiceIntegrationDatadogUserConfigDatadogTagResult']] = None,
|
|
37363
35901
|
exclude_consumer_groups: Optional[Sequence[str]] = None,
|
|
37364
35902
|
exclude_topics: Optional[Sequence[str]] = None,
|
|
@@ -37370,7 +35908,6 @@ class GetServiceIntegrationDatadogUserConfigResult(dict):
|
|
|
37370
35908
|
redis: Optional['outputs.GetServiceIntegrationDatadogUserConfigRedisResult'] = None):
|
|
37371
35909
|
"""
|
|
37372
35910
|
:param bool datadog_dbm_enabled: Enable Datadog Database Monitoring.
|
|
37373
|
-
:param bool datadog_pgbouncer_enabled: Enable Datadog PgBouncer Metric Tracking.
|
|
37374
35911
|
:param Sequence['GetServiceIntegrationDatadogUserConfigDatadogTagArgs'] datadog_tags: Custom tags provided by user
|
|
37375
35912
|
:param Sequence[str] exclude_consumer_groups: List of custom metrics.
|
|
37376
35913
|
:param Sequence[str] exclude_topics: List of topics to exclude.
|
|
@@ -37383,8 +35920,6 @@ class GetServiceIntegrationDatadogUserConfigResult(dict):
|
|
|
37383
35920
|
"""
|
|
37384
35921
|
if datadog_dbm_enabled is not None:
|
|
37385
35922
|
pulumi.set(__self__, "datadog_dbm_enabled", datadog_dbm_enabled)
|
|
37386
|
-
if datadog_pgbouncer_enabled is not None:
|
|
37387
|
-
pulumi.set(__self__, "datadog_pgbouncer_enabled", datadog_pgbouncer_enabled)
|
|
37388
35923
|
if datadog_tags is not None:
|
|
37389
35924
|
pulumi.set(__self__, "datadog_tags", datadog_tags)
|
|
37390
35925
|
if exclude_consumer_groups is not None:
|
|
@@ -37412,14 +35947,6 @@ class GetServiceIntegrationDatadogUserConfigResult(dict):
|
|
|
37412
35947
|
"""
|
|
37413
35948
|
return pulumi.get(self, "datadog_dbm_enabled")
|
|
37414
35949
|
|
|
37415
|
-
@property
|
|
37416
|
-
@pulumi.getter(name="datadogPgbouncerEnabled")
|
|
37417
|
-
def datadog_pgbouncer_enabled(self) -> Optional[bool]:
|
|
37418
|
-
"""
|
|
37419
|
-
Enable Datadog PgBouncer Metric Tracking.
|
|
37420
|
-
"""
|
|
37421
|
-
return pulumi.get(self, "datadog_pgbouncer_enabled")
|
|
37422
|
-
|
|
37423
35950
|
@property
|
|
37424
35951
|
@pulumi.getter(name="datadogTags")
|
|
37425
35952
|
def datadog_tags(self) -> Optional[Sequence['outputs.GetServiceIntegrationDatadogUserConfigDatadogTagResult']]:
|
|
@@ -37614,7 +36141,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
|
|
|
37614
36141
|
:param int kafka_consumer_check_instances: Number of separate instances to fetch kafka consumer statistics with.
|
|
37615
36142
|
:param int kafka_consumer_stats_timeout: Number of seconds that datadog will wait to get consumer statistics from brokers.
|
|
37616
36143
|
:param int max_partition_contexts: Maximum number of partition contexts to send.
|
|
37617
|
-
:param str site:
|
|
36144
|
+
:param str site: Datadog intake site. Defaults to datadoghq.com.
|
|
37618
36145
|
"""
|
|
37619
36146
|
pulumi.set(__self__, "datadog_api_key", datadog_api_key)
|
|
37620
36147
|
if datadog_tags is not None:
|
|
@@ -37682,7 +36209,7 @@ class GetServiceIntegrationEndpointDatadogUserConfigResult(dict):
|
|
|
37682
36209
|
@pulumi.getter
|
|
37683
36210
|
def site(self) -> Optional[str]:
|
|
37684
36211
|
"""
|
|
37685
|
-
|
|
36212
|
+
Datadog intake site. Defaults to datadoghq.com.
|
|
37686
36213
|
"""
|
|
37687
36214
|
return pulumi.get(self, "site")
|
|
37688
36215
|
|
|
@@ -37968,14 +36495,14 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
|
|
|
37968
36495
|
ssl_endpoint_identification_algorithm: Optional[str] = None):
|
|
37969
36496
|
"""
|
|
37970
36497
|
:param str bootstrap_servers: Bootstrap servers.
|
|
37971
|
-
:param str security_protocol:
|
|
37972
|
-
:param str sasl_mechanism:
|
|
36498
|
+
:param str security_protocol: Security protocol.
|
|
36499
|
+
:param str sasl_mechanism: SASL mechanism used for connections to the Kafka server.
|
|
37973
36500
|
:param str sasl_plain_password: Password for SASL PLAIN mechanism in the Kafka server.
|
|
37974
36501
|
:param str sasl_plain_username: Username for SASL PLAIN mechanism in the Kafka server.
|
|
37975
36502
|
:param str ssl_ca_cert: PEM-encoded CA certificate.
|
|
37976
36503
|
:param str ssl_client_cert: PEM-encoded client certificate.
|
|
37977
36504
|
:param str ssl_client_key: PEM-encoded client key.
|
|
37978
|
-
:param str ssl_endpoint_identification_algorithm:
|
|
36505
|
+
:param str ssl_endpoint_identification_algorithm: The endpoint identification algorithm to validate server hostname using server certificate.
|
|
37979
36506
|
"""
|
|
37980
36507
|
pulumi.set(__self__, "bootstrap_servers", bootstrap_servers)
|
|
37981
36508
|
pulumi.set(__self__, "security_protocol", security_protocol)
|
|
@@ -38006,7 +36533,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
|
|
|
38006
36533
|
@pulumi.getter(name="securityProtocol")
|
|
38007
36534
|
def security_protocol(self) -> str:
|
|
38008
36535
|
"""
|
|
38009
|
-
|
|
36536
|
+
Security protocol.
|
|
38010
36537
|
"""
|
|
38011
36538
|
return pulumi.get(self, "security_protocol")
|
|
38012
36539
|
|
|
@@ -38014,7 +36541,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
|
|
|
38014
36541
|
@pulumi.getter(name="saslMechanism")
|
|
38015
36542
|
def sasl_mechanism(self) -> Optional[str]:
|
|
38016
36543
|
"""
|
|
38017
|
-
|
|
36544
|
+
SASL mechanism used for connections to the Kafka server.
|
|
38018
36545
|
"""
|
|
38019
36546
|
return pulumi.get(self, "sasl_mechanism")
|
|
38020
36547
|
|
|
@@ -38062,7 +36589,7 @@ class GetServiceIntegrationEndpointExternalKafkaUserConfigResult(dict):
|
|
|
38062
36589
|
@pulumi.getter(name="sslEndpointIdentificationAlgorithm")
|
|
38063
36590
|
def ssl_endpoint_identification_algorithm(self) -> Optional[str]:
|
|
38064
36591
|
"""
|
|
38065
|
-
|
|
36592
|
+
The endpoint identification algorithm to validate server hostname using server certificate.
|
|
38066
36593
|
"""
|
|
38067
36594
|
return pulumi.get(self, "ssl_endpoint_identification_algorithm")
|
|
38068
36595
|
|
|
@@ -38152,7 +36679,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
|
|
|
38152
36679
|
:param str password: Password.
|
|
38153
36680
|
:param str ssl_client_certificate: Client certificate.
|
|
38154
36681
|
:param str ssl_client_key: Client key.
|
|
38155
|
-
:param str ssl_mode:
|
|
36682
|
+
:param str ssl_mode: SSL Mode. The default value is `verify-full`.
|
|
38156
36683
|
:param str ssl_root_cert: SSL Root Cert.
|
|
38157
36684
|
"""
|
|
38158
36685
|
pulumi.set(__self__, "host", host)
|
|
@@ -38231,7 +36758,7 @@ class GetServiceIntegrationEndpointExternalPostgresqlResult(dict):
|
|
|
38231
36758
|
@pulumi.getter(name="sslMode")
|
|
38232
36759
|
def ssl_mode(self) -> Optional[str]:
|
|
38233
36760
|
"""
|
|
38234
|
-
|
|
36761
|
+
SSL Mode. The default value is `verify-full`.
|
|
38235
36762
|
"""
|
|
38236
36763
|
return pulumi.get(self, "ssl_mode")
|
|
38237
36764
|
|
|
@@ -38252,7 +36779,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
|
|
|
38252
36779
|
basic_auth_password: Optional[str] = None,
|
|
38253
36780
|
basic_auth_username: Optional[str] = None):
|
|
38254
36781
|
"""
|
|
38255
|
-
:param str authentication:
|
|
36782
|
+
:param str authentication: Authentication method.
|
|
38256
36783
|
:param str url: Schema Registry URL.
|
|
38257
36784
|
:param str basic_auth_password: Basic authentication password.
|
|
38258
36785
|
:param str basic_auth_username: Basic authentication user name.
|
|
@@ -38268,7 +36795,7 @@ class GetServiceIntegrationEndpointExternalSchemaRegistryUserConfigResult(dict):
|
|
|
38268
36795
|
@pulumi.getter
|
|
38269
36796
|
def authentication(self) -> str:
|
|
38270
36797
|
"""
|
|
38271
|
-
|
|
36798
|
+
Authentication method.
|
|
38272
36799
|
"""
|
|
38273
36800
|
return pulumi.get(self, "authentication")
|
|
38274
36801
|
|
|
@@ -38373,7 +36900,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
|
|
|
38373
36900
|
max_message_size: Optional[int] = None,
|
|
38374
36901
|
sd: Optional[str] = None):
|
|
38375
36902
|
"""
|
|
38376
|
-
:param str format:
|
|
36903
|
+
:param str format: Message format. The default value is `rfc5424`.
|
|
38377
36904
|
:param int port: Rsyslog server port. The default value is `514`.
|
|
38378
36905
|
:param str server: Rsyslog server IP address or hostname.
|
|
38379
36906
|
:param bool tls: Require TLS. The default value is `true`.
|
|
@@ -38405,7 +36932,7 @@ class GetServiceIntegrationEndpointRsyslogUserConfigResult(dict):
|
|
|
38405
36932
|
@pulumi.getter
|
|
38406
36933
|
def format(self) -> str:
|
|
38407
36934
|
"""
|
|
38408
|
-
|
|
36935
|
+
Message format. The default value is `rfc5424`.
|
|
38409
36936
|
"""
|
|
38410
36937
|
return pulumi.get(self, "format")
|
|
38411
36938
|
|
|
@@ -38776,7 +37303,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
|
|
|
38776
37303
|
:param int consumer_fetch_min_bytes: The minimum amount of data the server should return for a fetch request.
|
|
38777
37304
|
:param int producer_batch_size: The batch size in bytes producer will attempt to collect before publishing to broker.
|
|
38778
37305
|
:param int producer_buffer_memory: The amount of bytes producer can use for buffering data before publishing to broker.
|
|
38779
|
-
:param str producer_compression_type:
|
|
37306
|
+
:param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
38780
37307
|
:param int producer_linger_ms: The linger time (ms) for waiting new data to arrive for publishing.
|
|
38781
37308
|
:param int producer_max_request_size: The maximum request size in bytes.
|
|
38782
37309
|
"""
|
|
@@ -38821,7 +37348,7 @@ class GetServiceIntegrationKafkaMirrormakerUserConfigKafkaMirrormakerResult(dict
|
|
|
38821
37348
|
@pulumi.getter(name="producerCompressionType")
|
|
38822
37349
|
def producer_compression_type(self) -> Optional[str]:
|
|
38823
37350
|
"""
|
|
38824
|
-
|
|
37351
|
+
Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
|
|
38825
37352
|
"""
|
|
38826
37353
|
return pulumi.get(self, "producer_compression_type")
|
|
38827
37354
|
|