pulumi-aiven 6.14.0a1714540524__py3-none-any.whl → 6.14.0a1714596828__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

Files changed (113) hide show
  1. pulumi_aiven/_inputs.py +88 -150
  2. pulumi_aiven/account_authentication.py +2 -2
  3. pulumi_aiven/account_team_member.py +2 -2
  4. pulumi_aiven/aws_privatelink.py +14 -14
  5. pulumi_aiven/azure_privatelink.py +14 -14
  6. pulumi_aiven/azure_privatelink_connection_approval.py +14 -14
  7. pulumi_aiven/billing_group.py +18 -16
  8. pulumi_aiven/cassandra.py +16 -44
  9. pulumi_aiven/cassandra_user.py +16 -16
  10. pulumi_aiven/clickhouse.py +16 -44
  11. pulumi_aiven/clickhouse_database.py +16 -16
  12. pulumi_aiven/clickhouse_grant.py +14 -14
  13. pulumi_aiven/clickhouse_role.py +16 -16
  14. pulumi_aiven/clickhouse_user.py +14 -14
  15. pulumi_aiven/connection_pool.py +16 -16
  16. pulumi_aiven/dragonfly.py +124 -263
  17. pulumi_aiven/flink.py +16 -16
  18. pulumi_aiven/flink_application.py +16 -16
  19. pulumi_aiven/flink_application_deployment.py +21 -21
  20. pulumi_aiven/flink_application_version.py +14 -14
  21. pulumi_aiven/gcp_privatelink.py +21 -21
  22. pulumi_aiven/gcp_privatelink_connection_approval.py +21 -21
  23. pulumi_aiven/gcp_vpc_peering_connection.py +2 -2
  24. pulumi_aiven/get_aws_privatelink.py +6 -6
  25. pulumi_aiven/get_azure_privatelink.py +6 -6
  26. pulumi_aiven/get_billing_group.py +2 -2
  27. pulumi_aiven/get_cassanda.py +5 -18
  28. pulumi_aiven/get_cassandra.py +5 -18
  29. pulumi_aiven/get_cassandra_user.py +6 -6
  30. pulumi_aiven/get_clickhouse.py +5 -18
  31. pulumi_aiven/get_clickhouse_database.py +6 -6
  32. pulumi_aiven/get_clickhouse_user.py +6 -6
  33. pulumi_aiven/get_connection_pool.py +6 -6
  34. pulumi_aiven/get_dragonfly.py +27 -20
  35. pulumi_aiven/get_flink.py +4 -4
  36. pulumi_aiven/get_flink_application.py +6 -6
  37. pulumi_aiven/get_flink_application_version.py +6 -6
  38. pulumi_aiven/get_grafana.py +5 -18
  39. pulumi_aiven/get_kafka.py +13 -13
  40. pulumi_aiven/get_kafka_acl.py +6 -6
  41. pulumi_aiven/get_kafka_connect.py +5 -18
  42. pulumi_aiven/get_kafka_connector.py +6 -6
  43. pulumi_aiven/get_kafka_mirror_maker.py +5 -18
  44. pulumi_aiven/get_kafka_schema.py +6 -6
  45. pulumi_aiven/get_kafka_schema_configuration.py +6 -6
  46. pulumi_aiven/get_kafka_schema_registry_acl.py +6 -6
  47. pulumi_aiven/get_kafka_topic.py +16 -16
  48. pulumi_aiven/get_kafka_user.py +6 -6
  49. pulumi_aiven/get_m3_aggregator.py +5 -18
  50. pulumi_aiven/get_m3_db.py +5 -18
  51. pulumi_aiven/get_m3db_user.py +6 -6
  52. pulumi_aiven/get_mirror_maker_replication_flow.py +6 -6
  53. pulumi_aiven/get_my_sql.py +5 -18
  54. pulumi_aiven/get_mysql_database.py +6 -6
  55. pulumi_aiven/get_mysql_user.py +6 -6
  56. pulumi_aiven/get_open_search.py +4 -4
  57. pulumi_aiven/get_open_search_acl_config.py +6 -6
  58. pulumi_aiven/get_open_search_acl_rule.py +6 -6
  59. pulumi_aiven/get_opensearch_security_plugin_config.py +6 -6
  60. pulumi_aiven/get_opensearch_user.py +6 -6
  61. pulumi_aiven/get_pg.py +4 -4
  62. pulumi_aiven/get_pg_database.py +6 -6
  63. pulumi_aiven/get_pg_user.py +6 -6
  64. pulumi_aiven/get_project.py +17 -17
  65. pulumi_aiven/get_project_user.py +7 -7
  66. pulumi_aiven/get_redis.py +5 -18
  67. pulumi_aiven/get_redis_user.py +6 -6
  68. pulumi_aiven/get_service_integration.py +19 -29
  69. pulumi_aiven/grafana.py +78 -261
  70. pulumi_aiven/influx_db.py +21 -21
  71. pulumi_aiven/influxdb_database.py +21 -21
  72. pulumi_aiven/influxdb_user.py +21 -21
  73. pulumi_aiven/kafka.py +43 -43
  74. pulumi_aiven/kafka_acl.py +16 -16
  75. pulumi_aiven/kafka_connect.py +78 -261
  76. pulumi_aiven/kafka_connector.py +14 -14
  77. pulumi_aiven/kafka_mirror_maker.py +78 -261
  78. pulumi_aiven/kafka_schema.py +14 -14
  79. pulumi_aiven/kafka_schema_configuration.py +16 -16
  80. pulumi_aiven/kafka_schema_registry_acl.py +54 -14
  81. pulumi_aiven/kafka_topic.py +54 -54
  82. pulumi_aiven/kafka_user.py +16 -16
  83. pulumi_aiven/m3_aggregator.py +78 -261
  84. pulumi_aiven/m3_db.py +78 -261
  85. pulumi_aiven/m3db_user.py +14 -14
  86. pulumi_aiven/mirror_maker_replication_flow.py +14 -14
  87. pulumi_aiven/my_sql.py +78 -261
  88. pulumi_aiven/mysql_database.py +16 -16
  89. pulumi_aiven/mysql_user.py +16 -16
  90. pulumi_aiven/open_search.py +14 -14
  91. pulumi_aiven/open_search_acl_config.py +16 -20
  92. pulumi_aiven/open_search_acl_rule.py +16 -16
  93. pulumi_aiven/opensearch_security_plugin_config.py +16 -16
  94. pulumi_aiven/opensearch_user.py +16 -16
  95. pulumi_aiven/organization_group_project.py +20 -6
  96. pulumi_aiven/organization_user.py +2 -20
  97. pulumi_aiven/outputs.py +147 -266
  98. pulumi_aiven/pg.py +16 -16
  99. pulumi_aiven/pg_database.py +16 -16
  100. pulumi_aiven/pg_user.py +16 -16
  101. pulumi_aiven/project.py +71 -71
  102. pulumi_aiven/project_user.py +22 -22
  103. pulumi_aiven/project_vpc.py +7 -7
  104. pulumi_aiven/redis.py +78 -261
  105. pulumi_aiven/redis_user.py +16 -16
  106. pulumi_aiven/service_integration.py +54 -54
  107. pulumi_aiven/static_ip.py +7 -7
  108. pulumi_aiven/transit_gateway_vpc_attachment.py +2 -2
  109. {pulumi_aiven-6.14.0a1714540524.dist-info → pulumi_aiven-6.14.0a1714596828.dist-info}/METADATA +1 -1
  110. pulumi_aiven-6.14.0a1714596828.dist-info/RECORD +150 -0
  111. pulumi_aiven-6.14.0a1714540524.dist-info/RECORD +0 -150
  112. {pulumi_aiven-6.14.0a1714540524.dist-info → pulumi_aiven-6.14.0a1714596828.dist-info}/WHEEL +0 -0
  113. {pulumi_aiven-6.14.0a1714540524.dist-info → pulumi_aiven-6.14.0a1714596828.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py CHANGED
@@ -12,7 +12,6 @@ from . import outputs
12
12
 
13
13
  __all__ = [
14
14
  'AccountAuthenticationSamlFieldMapping',
15
- 'CassandraCassandra',
16
15
  'CassandraCassandraUserConfig',
17
16
  'CassandraCassandraUserConfigCassandra',
18
17
  'CassandraCassandraUserConfigIpFilterObject',
@@ -22,7 +21,6 @@ __all__ = [
22
21
  'CassandraServiceIntegration',
23
22
  'CassandraTag',
24
23
  'CassandraTechEmail',
25
- 'ClickhouseClickhouse',
26
24
  'ClickhouseClickhouseUserConfig',
27
25
  'ClickhouseClickhouseUserConfigIpFilterObject',
28
26
  'ClickhouseClickhouseUserConfigPrivateAccess',
@@ -35,7 +33,6 @@ __all__ = [
35
33
  'ClickhouseTag',
36
34
  'ClickhouseTechEmail',
37
35
  'DragonflyComponent',
38
- 'DragonflyDragonfly',
39
36
  'DragonflyDragonflyUserConfig',
40
37
  'DragonflyDragonflyUserConfigIpFilterObject',
41
38
  'DragonflyDragonflyUserConfigMigration',
@@ -56,7 +53,6 @@ __all__ = [
56
53
  'FlinkTag',
57
54
  'FlinkTechEmail',
58
55
  'GrafanaComponent',
59
- 'GrafanaGrafana',
60
56
  'GrafanaGrafanaUserConfig',
61
57
  'GrafanaGrafanaUserConfigAuthAzuread',
62
58
  'GrafanaGrafanaUserConfigAuthGenericOauth',
@@ -86,7 +82,6 @@ __all__ = [
86
82
  'InfluxDbTechEmail',
87
83
  'KafkaComponent',
88
84
  'KafkaConnectComponent',
89
- 'KafkaConnectKafkaConnect',
90
85
  'KafkaConnectKafkaConnectUserConfig',
91
86
  'KafkaConnectKafkaConnectUserConfigIpFilterObject',
92
87
  'KafkaConnectKafkaConnectUserConfigKafkaConnect',
@@ -111,7 +106,6 @@ __all__ = [
111
106
  'KafkaKafkaUserConfigTieredStorage',
112
107
  'KafkaKafkaUserConfigTieredStorageLocalCache',
113
108
  'KafkaMirrorMakerComponent',
114
- 'KafkaMirrorMakerKafkaMirrormaker',
115
109
  'KafkaMirrorMakerKafkaMirrormakerUserConfig',
116
110
  'KafkaMirrorMakerKafkaMirrormakerUserConfigIpFilterObject',
117
111
  'KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker',
@@ -124,14 +118,12 @@ __all__ = [
124
118
  'KafkaTopicConfig',
125
119
  'KafkaTopicTag',
126
120
  'M3AggregatorComponent',
127
- 'M3AggregatorM3aggregator',
128
121
  'M3AggregatorM3aggregatorUserConfig',
129
122
  'M3AggregatorM3aggregatorUserConfigIpFilterObject',
130
123
  'M3AggregatorServiceIntegration',
131
124
  'M3AggregatorTag',
132
125
  'M3AggregatorTechEmail',
133
126
  'M3DbComponent',
134
- 'M3DbM3db',
135
127
  'M3DbM3dbUserConfig',
136
128
  'M3DbM3dbUserConfigIpFilterObject',
137
129
  'M3DbM3dbUserConfigLimits',
@@ -150,7 +142,6 @@ __all__ = [
150
142
  'M3DbTag',
151
143
  'M3DbTechEmail',
152
144
  'MySqlComponent',
153
- 'MySqlMysql',
154
145
  'MySqlMysqlUserConfig',
155
146
  'MySqlMysqlUserConfigIpFilterObject',
156
147
  'MySqlMysqlUserConfigMigration',
@@ -202,7 +193,6 @@ __all__ = [
202
193
  'PgTechEmail',
203
194
  'ProjectTag',
204
195
  'RedisComponent',
205
- 'RedisRedi',
206
196
  'RedisRedisUserConfig',
207
197
  'RedisRedisUserConfigIpFilterObject',
208
198
  'RedisRedisUserConfigMigration',
@@ -255,7 +245,6 @@ __all__ = [
255
245
  'ServiceIntegrationPrometheusUserConfigSourceMysql',
256
246
  'ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf',
257
247
  'GetAccountAuthenticationSamlFieldMappingResult',
258
- 'GetCassandaCassandraResult',
259
248
  'GetCassandaCassandraUserConfigResult',
260
249
  'GetCassandaCassandraUserConfigCassandraResult',
261
250
  'GetCassandaCassandraUserConfigIpFilterObjectResult',
@@ -265,7 +254,6 @@ __all__ = [
265
254
  'GetCassandaServiceIntegrationResult',
266
255
  'GetCassandaTagResult',
267
256
  'GetCassandaTechEmailResult',
268
- 'GetCassandraCassandraResult',
269
257
  'GetCassandraCassandraUserConfigResult',
270
258
  'GetCassandraCassandraUserConfigCassandraResult',
271
259
  'GetCassandraCassandraUserConfigIpFilterObjectResult',
@@ -275,7 +263,6 @@ __all__ = [
275
263
  'GetCassandraServiceIntegrationResult',
276
264
  'GetCassandraTagResult',
277
265
  'GetCassandraTechEmailResult',
278
- 'GetClickhouseClickhouseResult',
279
266
  'GetClickhouseClickhouseUserConfigResult',
280
267
  'GetClickhouseClickhouseUserConfigIpFilterObjectResult',
281
268
  'GetClickhouseClickhouseUserConfigPrivateAccessResult',
@@ -286,7 +273,6 @@ __all__ = [
286
273
  'GetClickhouseTagResult',
287
274
  'GetClickhouseTechEmailResult',
288
275
  'GetDragonflyComponentResult',
289
- 'GetDragonflyDragonflyResult',
290
276
  'GetDragonflyDragonflyUserConfigResult',
291
277
  'GetDragonflyDragonflyUserConfigIpFilterObjectResult',
292
278
  'GetDragonflyDragonflyUserConfigMigrationResult',
@@ -307,7 +293,6 @@ __all__ = [
307
293
  'GetFlinkTagResult',
308
294
  'GetFlinkTechEmailResult',
309
295
  'GetGrafanaComponentResult',
310
- 'GetGrafanaGrafanaResult',
311
296
  'GetGrafanaGrafanaUserConfigResult',
312
297
  'GetGrafanaGrafanaUserConfigAuthAzureadResult',
313
298
  'GetGrafanaGrafanaUserConfigAuthGenericOauthResult',
@@ -337,7 +322,6 @@ __all__ = [
337
322
  'GetInfluxDbTechEmailResult',
338
323
  'GetKafkaComponentResult',
339
324
  'GetKafkaConnectComponentResult',
340
- 'GetKafkaConnectKafkaConnectResult',
341
325
  'GetKafkaConnectKafkaConnectUserConfigResult',
342
326
  'GetKafkaConnectKafkaConnectUserConfigIpFilterObjectResult',
343
327
  'GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult',
@@ -362,7 +346,6 @@ __all__ = [
362
346
  'GetKafkaKafkaUserConfigTieredStorageResult',
363
347
  'GetKafkaKafkaUserConfigTieredStorageLocalCacheResult',
364
348
  'GetKafkaMirrorMakerComponentResult',
365
- 'GetKafkaMirrorMakerKafkaMirrormakerResult',
366
349
  'GetKafkaMirrorMakerKafkaMirrormakerUserConfigResult',
367
350
  'GetKafkaMirrorMakerKafkaMirrormakerUserConfigIpFilterObjectResult',
368
351
  'GetKafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormakerResult',
@@ -375,14 +358,12 @@ __all__ = [
375
358
  'GetKafkaTopicConfigResult',
376
359
  'GetKafkaTopicTagResult',
377
360
  'GetM3AggregatorComponentResult',
378
- 'GetM3AggregatorM3aggregatorResult',
379
361
  'GetM3AggregatorM3aggregatorUserConfigResult',
380
362
  'GetM3AggregatorM3aggregatorUserConfigIpFilterObjectResult',
381
363
  'GetM3AggregatorServiceIntegrationResult',
382
364
  'GetM3AggregatorTagResult',
383
365
  'GetM3AggregatorTechEmailResult',
384
366
  'GetM3DbComponentResult',
385
- 'GetM3DbM3dbResult',
386
367
  'GetM3DbM3dbUserConfigResult',
387
368
  'GetM3DbM3dbUserConfigIpFilterObjectResult',
388
369
  'GetM3DbM3dbUserConfigLimitsResult',
@@ -401,7 +382,6 @@ __all__ = [
401
382
  'GetM3DbTagResult',
402
383
  'GetM3DbTechEmailResult',
403
384
  'GetMySqlComponentResult',
404
- 'GetMySqlMysqlResult',
405
385
  'GetMySqlMysqlUserConfigResult',
406
386
  'GetMySqlMysqlUserConfigIpFilterObjectResult',
407
387
  'GetMySqlMysqlUserConfigMigrationResult',
@@ -450,7 +430,6 @@ __all__ = [
450
430
  'GetPgTechEmailResult',
451
431
  'GetProjectTagResult',
452
432
  'GetRedisComponentResult',
453
- 'GetRedisRediResult',
454
433
  'GetRedisRedisUserConfigResult',
455
434
  'GetRedisRedisUserConfigIpFilterObjectResult',
456
435
  'GetRedisRedisUserConfigMigrationResult',
@@ -592,12 +571,6 @@ class AccountAuthenticationSamlFieldMapping(dict):
592
571
  return pulumi.get(self, "real_name")
593
572
 
594
573
 
595
- @pulumi.output_type
596
- class CassandraCassandra(dict):
597
- def __init__(__self__):
598
- pass
599
-
600
-
601
574
  @pulumi.output_type
602
575
  class CassandraCassandraUserConfig(dict):
603
576
  @staticmethod
@@ -666,7 +639,7 @@ class CassandraCassandraUserConfig(dict):
666
639
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
667
640
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
668
641
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
669
- :param 'CassandraCassandraUserConfigCassandraArgs' cassandra: cassandra configuration values
642
+ :param 'CassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
670
643
  :param str cassandra_version: Cassandra version.
671
644
  :param Sequence['CassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
672
645
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -744,7 +717,7 @@ class CassandraCassandraUserConfig(dict):
744
717
  @pulumi.getter
745
718
  def cassandra(self) -> Optional['outputs.CassandraCassandraUserConfigCassandra']:
746
719
  """
747
- cassandra configuration values
720
+ Cassandra configuration values
748
721
  """
749
722
  return pulumi.get(self, "cassandra")
750
723
 
@@ -1195,12 +1168,6 @@ class CassandraTechEmail(dict):
1195
1168
  return pulumi.get(self, "email")
1196
1169
 
1197
1170
 
1198
- @pulumi.output_type
1199
- class ClickhouseClickhouse(dict):
1200
- def __init__(__self__):
1201
- pass
1202
-
1203
-
1204
1171
  @pulumi.output_type
1205
1172
  class ClickhouseClickhouseUserConfig(dict):
1206
1173
  @staticmethod
@@ -2076,12 +2043,6 @@ class DragonflyComponent(dict):
2076
2043
  return pulumi.get(self, "usage")
2077
2044
 
2078
2045
 
2079
- @pulumi.output_type
2080
- class DragonflyDragonfly(dict):
2081
- def __init__(__self__):
2082
- pass
2083
-
2084
-
2085
2046
  @pulumi.output_type
2086
2047
  class DragonflyDragonflyUserConfig(dict):
2087
2048
  @staticmethod
@@ -2089,6 +2050,8 @@ class DragonflyDragonflyUserConfig(dict):
2089
2050
  suggest = None
2090
2051
  if key == "cacheMode":
2091
2052
  suggest = "cache_mode"
2053
+ elif key == "dragonflyPersistence":
2054
+ suggest = "dragonfly_persistence"
2092
2055
  elif key == "dragonflySsl":
2093
2056
  suggest = "dragonfly_ssl"
2094
2057
  elif key == "ipFilterObjects":
@@ -2127,6 +2090,7 @@ class DragonflyDragonflyUserConfig(dict):
2127
2090
 
2128
2091
  def __init__(__self__, *,
2129
2092
  cache_mode: Optional[bool] = None,
2093
+ dragonfly_persistence: Optional[str] = None,
2130
2094
  dragonfly_ssl: Optional[bool] = None,
2131
2095
  ip_filter_objects: Optional[Sequence['outputs.DragonflyDragonflyUserConfigIpFilterObject']] = None,
2132
2096
  ip_filter_strings: Optional[Sequence[str]] = None,
@@ -2142,6 +2106,7 @@ class DragonflyDragonflyUserConfig(dict):
2142
2106
  static_ips: Optional[bool] = None):
2143
2107
  """
2144
2108
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
2109
+ :param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2145
2110
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
2146
2111
  :param Sequence['DragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
2147
2112
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -2158,6 +2123,8 @@ class DragonflyDragonflyUserConfig(dict):
2158
2123
  """
2159
2124
  if cache_mode is not None:
2160
2125
  pulumi.set(__self__, "cache_mode", cache_mode)
2126
+ if dragonfly_persistence is not None:
2127
+ pulumi.set(__self__, "dragonfly_persistence", dragonfly_persistence)
2161
2128
  if dragonfly_ssl is not None:
2162
2129
  pulumi.set(__self__, "dragonfly_ssl", dragonfly_ssl)
2163
2130
  if ip_filter_objects is not None:
@@ -2193,6 +2160,14 @@ class DragonflyDragonflyUserConfig(dict):
2193
2160
  """
2194
2161
  return pulumi.get(self, "cache_mode")
2195
2162
 
2163
+ @property
2164
+ @pulumi.getter(name="dragonflyPersistence")
2165
+ def dragonfly_persistence(self) -> Optional[str]:
2166
+ """
2167
+ When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2168
+ """
2169
+ return pulumi.get(self, "dragonfly_persistence")
2170
+
2196
2171
  @property
2197
2172
  @pulumi.getter(name="dragonflySsl")
2198
2173
  def dragonfly_ssl(self) -> Optional[bool]:
@@ -3325,12 +3300,6 @@ class GrafanaComponent(dict):
3325
3300
  return pulumi.get(self, "usage")
3326
3301
 
3327
3302
 
3328
- @pulumi.output_type
3329
- class GrafanaGrafana(dict):
3330
- def __init__(__self__):
3331
- pass
3332
-
3333
-
3334
3303
  @pulumi.output_type
3335
3304
  class GrafanaGrafanaUserConfig(dict):
3336
3305
  @staticmethod
@@ -3477,7 +3446,7 @@ class GrafanaGrafanaUserConfig(dict):
3477
3446
  viewers_can_edit: Optional[bool] = None):
3478
3447
  """
3479
3448
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
3480
- :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
3449
+ :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3481
3450
  :param str alerting_error_or_timeout: Default error or timeout setting for new alerting rules.
3482
3451
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.
3483
3452
  :param str alerting_nodata_or_nullvalues: Default value for 'no data or null values' for new alerting rules.
@@ -3514,7 +3483,7 @@ class GrafanaGrafanaUserConfig(dict):
3514
3483
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
3515
3484
  :param 'GrafanaGrafanaUserConfigSmtpServerArgs' smtp_server: SMTP server settings
3516
3485
  :param bool static_ips: Use static public IP addresses.
3517
- :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3486
+ :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3518
3487
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
3519
3488
  :param str user_auto_assign_org_role: Set role for new signups. Defaults to Viewer.
3520
3489
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
@@ -3610,16 +3579,13 @@ class GrafanaGrafanaUserConfig(dict):
3610
3579
  """
3611
3580
  Additional Cloud Regions for Backup Replication.
3612
3581
  """
3613
- warnings.warn("""This property is deprecated.""", DeprecationWarning)
3614
- pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
3615
-
3616
3582
  return pulumi.get(self, "additional_backup_regions")
3617
3583
 
3618
3584
  @property
3619
3585
  @pulumi.getter(name="alertingEnabled")
3620
3586
  def alerting_enabled(self) -> Optional[bool]:
3621
3587
  """
3622
- Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
3588
+ Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3623
3589
  """
3624
3590
  return pulumi.get(self, "alerting_enabled")
3625
3591
 
@@ -3918,7 +3884,7 @@ class GrafanaGrafanaUserConfig(dict):
3918
3884
  @pulumi.getter(name="unifiedAlertingEnabled")
3919
3885
  def unified_alerting_enabled(self) -> Optional[bool]:
3920
3886
  """
3921
- Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3887
+ Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3922
3888
  """
3923
3889
  return pulumi.get(self, "unified_alerting_enabled")
3924
3890
 
@@ -5977,12 +5943,6 @@ class KafkaConnectComponent(dict):
5977
5943
  return pulumi.get(self, "usage")
5978
5944
 
5979
5945
 
5980
- @pulumi.output_type
5981
- class KafkaConnectKafkaConnect(dict):
5982
- def __init__(__self__):
5983
- pass
5984
-
5985
-
5986
5946
  @pulumi.output_type
5987
5947
  class KafkaConnectKafkaConnectUserConfig(dict):
5988
5948
  @staticmethod
@@ -6251,8 +6211,8 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6251
6211
  :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
6252
6212
  :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6253
6213
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
6254
- :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
6255
- :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
6214
+ :param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6215
+ :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
6256
6216
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
6257
6217
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
6258
6218
  :param int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
@@ -6326,7 +6286,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6326
6286
  @pulumi.getter(name="consumerIsolationLevel")
6327
6287
  def consumer_isolation_level(self) -> Optional[str]:
6328
6288
  """
6329
- Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
6289
+ Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6330
6290
  """
6331
6291
  return pulumi.get(self, "consumer_isolation_level")
6332
6292
 
@@ -6334,7 +6294,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6334
6294
  @pulumi.getter(name="consumerMaxPartitionFetchBytes")
6335
6295
  def consumer_max_partition_fetch_bytes(self) -> Optional[int]:
6336
6296
  """
6337
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
6297
+ Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
6338
6298
  """
6339
6299
  return pulumi.get(self, "consumer_max_partition_fetch_bytes")
6340
6300
 
@@ -6743,11 +6703,11 @@ class KafkaKafka(dict):
6743
6703
  rest_uri: Optional[str] = None,
6744
6704
  schema_registry_uri: Optional[str] = None):
6745
6705
  """
6746
- :param str access_cert: The Kafka client certificate
6747
- :param str access_key: The Kafka client certificate key
6748
- :param str connect_uri: The Kafka Connect URI, if any
6749
- :param str rest_uri: The Kafka REST URI, if any
6750
- :param str schema_registry_uri: The Schema Registry URI, if any
6706
+ :param str access_cert: The Kafka client certificate.
6707
+ :param str access_key: The Kafka client certificate key.
6708
+ :param str connect_uri: The Kafka Connect URI.
6709
+ :param str rest_uri: The Kafka REST URI.
6710
+ :param str schema_registry_uri: The Schema Registry URI.
6751
6711
  """
6752
6712
  if access_cert is not None:
6753
6713
  pulumi.set(__self__, "access_cert", access_cert)
@@ -6764,7 +6724,7 @@ class KafkaKafka(dict):
6764
6724
  @pulumi.getter(name="accessCert")
6765
6725
  def access_cert(self) -> Optional[str]:
6766
6726
  """
6767
- The Kafka client certificate
6727
+ The Kafka client certificate.
6768
6728
  """
6769
6729
  return pulumi.get(self, "access_cert")
6770
6730
 
@@ -6772,7 +6732,7 @@ class KafkaKafka(dict):
6772
6732
  @pulumi.getter(name="accessKey")
6773
6733
  def access_key(self) -> Optional[str]:
6774
6734
  """
6775
- The Kafka client certificate key
6735
+ The Kafka client certificate key.
6776
6736
  """
6777
6737
  return pulumi.get(self, "access_key")
6778
6738
 
@@ -6780,7 +6740,7 @@ class KafkaKafka(dict):
6780
6740
  @pulumi.getter(name="connectUri")
6781
6741
  def connect_uri(self) -> Optional[str]:
6782
6742
  """
6783
- The Kafka Connect URI, if any
6743
+ The Kafka Connect URI.
6784
6744
  """
6785
6745
  return pulumi.get(self, "connect_uri")
6786
6746
 
@@ -6788,7 +6748,7 @@ class KafkaKafka(dict):
6788
6748
  @pulumi.getter(name="restUri")
6789
6749
  def rest_uri(self) -> Optional[str]:
6790
6750
  """
6791
- The Kafka REST URI, if any
6751
+ The Kafka REST URI.
6792
6752
  """
6793
6753
  return pulumi.get(self, "rest_uri")
6794
6754
 
@@ -6796,7 +6756,7 @@ class KafkaKafka(dict):
6796
6756
  @pulumi.getter(name="schemaRegistryUri")
6797
6757
  def schema_registry_uri(self) -> Optional[str]:
6798
6758
  """
6799
- The Schema Registry URI, if any
6759
+ The Schema Registry URI.
6800
6760
  """
6801
6761
  return pulumi.get(self, "schema_registry_uri")
6802
6762
 
@@ -7340,7 +7300,7 @@ class KafkaKafkaUserConfigKafka(dict):
7340
7300
  :param int log_index_size_max_bytes: The maximum size in bytes of the offset index.
7341
7301
  :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
7342
7302
  :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
7343
- :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
7303
+ :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7344
7304
  :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
7345
7305
  :param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
7346
7306
  :param bool log_preallocate: Should pre allocate file when create new segment?
@@ -7362,7 +7322,7 @@ class KafkaKafkaUserConfigKafka(dict):
7362
7322
  :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
7363
7323
  :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
7364
7324
  :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
7365
- :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .
7325
+ :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
7366
7326
  :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
7367
7327
  :param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
7368
7328
  :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
@@ -7610,7 +7570,7 @@ class KafkaKafkaUserConfigKafka(dict):
7610
7570
  @pulumi.getter(name="logMessageDownconversionEnable")
7611
7571
  def log_message_downconversion_enable(self) -> Optional[bool]:
7612
7572
  """
7613
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
7573
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7614
7574
  """
7615
7575
  return pulumi.get(self, "log_message_downconversion_enable")
7616
7576
 
@@ -7786,7 +7746,7 @@ class KafkaKafkaUserConfigKafka(dict):
7786
7746
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
7787
7747
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
7788
7748
  """
7789
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .
7749
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
7790
7750
  """
7791
7751
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
7792
7752
 
@@ -7933,7 +7893,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7933
7893
  :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
7934
7894
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
7935
7895
  :param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
7936
- :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
7896
+ :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
7937
7897
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
7938
7898
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
7939
7899
  :param int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
@@ -8015,7 +7975,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
8015
7975
  @pulumi.getter(name="consumerMaxPartitionFetchBytes")
8016
7976
  def consumer_max_partition_fetch_bytes(self) -> Optional[int]:
8017
7977
  """
8018
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
7978
+ Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
8019
7979
  """
8020
7980
  return pulumi.get(self, "consumer_max_partition_fetch_bytes")
8021
7981
 
@@ -8791,12 +8751,6 @@ class KafkaMirrorMakerComponent(dict):
8791
8751
  return pulumi.get(self, "usage")
8792
8752
 
8793
8753
 
8794
- @pulumi.output_type
8795
- class KafkaMirrorMakerKafkaMirrormaker(dict):
8796
- def __init__(__self__):
8797
- pass
8798
-
8799
-
8800
8754
  @pulumi.output_type
8801
8755
  class KafkaMirrorMakerKafkaMirrormakerUserConfig(dict):
8802
8756
  @staticmethod
@@ -9018,7 +8972,7 @@ class KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
9018
8972
  :param int refresh_groups_interval_seconds: Frequency of consumer group refresh in seconds. Defaults to 600 seconds (10 minutes).
9019
8973
  :param bool refresh_topics_enabled: Whether to periodically check for new topics and partitions. Defaults to 'true'.
9020
8974
  :param int refresh_topics_interval_seconds: Frequency of topic and partitions refresh in seconds. Defaults to 600 seconds (10 minutes).
9021
- :param bool sync_group_offsets_enabled: Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to __consumer_offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.
8975
+ :param bool sync_group_offsets_enabled: Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to _*consumer*offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.
9022
8976
  :param int sync_group_offsets_interval_seconds: Frequency at which consumer group offsets are synced (default: 60, every minute).
9023
8977
  :param bool sync_topic_configs_enabled: Whether to periodically configure remote topics to match their corresponding upstream topics.
9024
8978
  :param int tasks_max_per_cpu: 'tasks.max' is set to this multiplied by the number of CPUs in the service. The default value is `1`.
@@ -9126,7 +9080,7 @@ class KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
9126
9080
  @pulumi.getter(name="syncGroupOffsetsEnabled")
9127
9081
  def sync_group_offsets_enabled(self) -> Optional[bool]:
9128
9082
  """
9129
- Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to __consumer_offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.
9083
+ Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to _*consumer*offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.
9130
9084
  """
9131
9085
  return pulumi.get(self, "sync_group_offsets_enabled")
9132
9086
 
@@ -9898,12 +9852,6 @@ class M3AggregatorComponent(dict):
9898
9852
  return pulumi.get(self, "usage")
9899
9853
 
9900
9854
 
9901
- @pulumi.output_type
9902
- class M3AggregatorM3aggregator(dict):
9903
- def __init__(__self__):
9904
- pass
9905
-
9906
-
9907
9855
  @pulumi.output_type
9908
9856
  class M3AggregatorM3aggregatorUserConfig(dict):
9909
9857
  @staticmethod
@@ -10288,12 +10236,6 @@ class M3DbComponent(dict):
10288
10236
  return pulumi.get(self, "usage")
10289
10237
 
10290
10238
 
10291
- @pulumi.output_type
10292
- class M3DbM3db(dict):
10293
- def __init__(__self__):
10294
- pass
10295
-
10296
-
10297
10239
  @pulumi.output_type
10298
10240
  class M3DbM3dbUserConfig(dict):
10299
10241
  @staticmethod
@@ -10421,9 +10363,6 @@ class M3DbM3dbUserConfig(dict):
10421
10363
  """
10422
10364
  Additional Cloud Regions for Backup Replication.
10423
10365
  """
10424
- warnings.warn("""This property is deprecated.""", DeprecationWarning)
10425
- pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
10426
-
10427
10366
  return pulumi.get(self, "additional_backup_regions")
10428
10367
 
10429
10368
  @property
@@ -10635,7 +10574,7 @@ class M3DbM3dbUserConfigLimits(dict):
10635
10574
  """
10636
10575
  :param int max_recently_queried_series_blocks: The maximum number of blocks that can be read in a given lookback period.
10637
10576
  :param int max_recently_queried_series_disk_bytes_read: The maximum number of disk bytes that can be read in a given lookback period.
10638
- :param str max_recently_queried_series_lookback: The lookback period for 'max_recently_queried_series_blocks' and 'max_recently_queried_series_disk_bytes_read'.
10577
+ :param str max_recently_queried_series_lookback: The lookback period for 'max*recently*queried*series*blocks' and 'max*recently*queried*series*disk*bytes*read'.
10639
10578
  :param int query_docs: The maximum number of docs fetched in single query.
10640
10579
  :param bool query_require_exhaustive: When query limits are exceeded, whether to return error or return partial results.
10641
10580
  :param int query_series: The maximum number of series fetched in single query.
@@ -10673,7 +10612,7 @@ class M3DbM3dbUserConfigLimits(dict):
10673
10612
  @pulumi.getter(name="maxRecentlyQueriedSeriesLookback")
10674
10613
  def max_recently_queried_series_lookback(self) -> Optional[str]:
10675
10614
  """
10676
- The lookback period for 'max_recently_queried_series_blocks' and 'max_recently_queried_series_disk_bytes_read'.
10615
+ The lookback period for 'max*recently*queried*series*blocks' and 'max*recently*queried*series*disk*bytes*read'.
10677
10616
  """
10678
10617
  return pulumi.get(self, "max_recently_queried_series_lookback")
10679
10618
 
@@ -11084,7 +11023,7 @@ class M3DbM3dbUserConfigRulesMapping(dict):
11084
11023
  namespaces_strings: Optional[Sequence[str]] = None,
11085
11024
  tags: Optional[Sequence['outputs.M3DbM3dbUserConfigRulesMappingTag']] = None):
11086
11025
  """
11087
- :param str filter: Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.
11026
+ :param str filter: Matching metric names with wildcards (using **name**:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.
11088
11027
  :param Sequence[str] aggregations: List of aggregations to be applied.
11089
11028
  :param bool drop: Only store the derived metric (as specified in the roll-up rules), if any.
11090
11029
  :param str name: The (optional) name of the rule.
@@ -11113,7 +11052,7 @@ class M3DbM3dbUserConfigRulesMapping(dict):
11113
11052
  @pulumi.getter
11114
11053
  def filter(self) -> str:
11115
11054
  """
11116
- Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.
11055
+ Matching metric names with wildcards (using **name**:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.
11117
11056
  """
11118
11057
  return pulumi.get(self, "filter")
11119
11058
 
@@ -11453,12 +11392,6 @@ class MySqlComponent(dict):
11453
11392
  return pulumi.get(self, "usage")
11454
11393
 
11455
11394
 
11456
- @pulumi.output_type
11457
- class MySqlMysql(dict):
11458
- def __init__(__self__):
11459
- pass
11460
-
11461
-
11462
11395
  @pulumi.output_type
11463
11396
  class MySqlMysqlUserConfig(dict):
11464
11397
  @staticmethod
@@ -12035,17 +11968,17 @@ class MySqlMysqlUserConfigMysql(dict):
12035
11968
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.
12036
11969
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it.
12037
11970
  :param str internal_tmp_mem_storage_engine: The storage engine for in-memory internal temporary tables.
12038
- :param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.
11971
+ :param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Default is 10s.
12039
11972
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
12040
- :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
11973
+ :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
12041
11974
  :param int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.
12042
11975
  :param int net_read_timeout: The number of seconds to wait for more data from a connection before aborting the read.
12043
11976
  :param int net_write_timeout: The number of seconds to wait for a block to be written to a connection before aborting the write.
12044
- :param bool slow_query_log: Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off.
11977
+ :param bool slow_query_log: Slow query log enables capturing of slow queries. Setting slow*query*log to false also truncates the mysql.slow_log table. Default is off.
12045
11978
  :param int sort_buffer_size: Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K).
12046
11979
  :param str sql_mode: Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned.
12047
11980
  :param bool sql_require_primary_key: Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.
12048
- :param int tmp_table_size: Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).
11981
+ :param int tmp_table_size: Limits the size of internal in-memory tables. Also set max*heap*table_size. Default is 16777216 (16M).
12049
11982
  :param int wait_timeout: The number of seconds the server waits for activity on a noninteractive connection before closing it.
12050
11983
  """
12051
11984
  if connect_timeout is not None:
@@ -12257,7 +12190,7 @@ class MySqlMysqlUserConfigMysql(dict):
12257
12190
  @pulumi.getter(name="longQueryTime")
12258
12191
  def long_query_time(self) -> Optional[float]:
12259
12192
  """
12260
- The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.
12193
+ The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Default is 10s.
12261
12194
  """
12262
12195
  return pulumi.get(self, "long_query_time")
12263
12196
 
@@ -12273,7 +12206,7 @@ class MySqlMysqlUserConfigMysql(dict):
12273
12206
  @pulumi.getter(name="maxHeapTableSize")
12274
12207
  def max_heap_table_size(self) -> Optional[int]:
12275
12208
  """
12276
- Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
12209
+ Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
12277
12210
  """
12278
12211
  return pulumi.get(self, "max_heap_table_size")
12279
12212
 
@@ -12305,7 +12238,7 @@ class MySqlMysqlUserConfigMysql(dict):
12305
12238
  @pulumi.getter(name="slowQueryLog")
12306
12239
  def slow_query_log(self) -> Optional[bool]:
12307
12240
  """
12308
- Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off.
12241
+ Slow query log enables capturing of slow queries. Setting slow*query*log to false also truncates the mysql.slow_log table. Default is off.
12309
12242
  """
12310
12243
  return pulumi.get(self, "slow_query_log")
12311
12244
 
@@ -12337,7 +12270,7 @@ class MySqlMysqlUserConfigMysql(dict):
12337
12270
  @pulumi.getter(name="tmpTableSize")
12338
12271
  def tmp_table_size(self) -> Optional[int]:
12339
12272
  """
12340
- Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).
12273
+ Limits the size of internal in-memory tables. Also set max*heap*table_size. Default is 16777216 (16M).
12341
12274
  """
12342
12275
  return pulumi.get(self, "tmp_table_size")
12343
12276
 
@@ -12823,7 +12756,7 @@ class OpenSearchOpensearchUserConfig(dict):
12823
12756
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
12824
12757
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
12825
12758
  :param bool keep_index_refresh_interval: Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true.
12826
- :param int max_index_count: use index_patterns instead. The default value is `0`.
12759
+ :param int max_index_count: Use index_patterns instead. The default value is `0`.
12827
12760
  :param 'OpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
12828
12761
  :param 'OpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
12829
12762
  :param 'OpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
@@ -12891,9 +12824,6 @@ class OpenSearchOpensearchUserConfig(dict):
12891
12824
  """
12892
12825
  Additional Cloud Regions for Backup Replication.
12893
12826
  """
12894
- warnings.warn("""This property is deprecated.""", DeprecationWarning)
12895
- pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
12896
-
12897
12827
  return pulumi.get(self, "additional_backup_regions")
12898
12828
 
12899
12829
  @property
@@ -12967,7 +12897,7 @@ class OpenSearchOpensearchUserConfig(dict):
12967
12897
  @pulumi.getter(name="maxIndexCount")
12968
12898
  def max_index_count(self) -> Optional[int]:
12969
12899
  """
12970
- use index_patterns instead. The default value is `0`.
12900
+ Use index_patterns instead. The default value is `0`.
12971
12901
  """
12972
12902
  return pulumi.get(self, "max_index_count")
12973
12903
 
@@ -15971,7 +15901,7 @@ class PgPgUserConfigPg(dict):
15971
15901
  :param int pg_partman_bgw_dot_interval: Sets the time interval to run pg_partman's scheduled tasks.
15972
15902
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
15973
15903
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
15974
- :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets .
15904
+ :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
15975
15905
  :param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
15976
15906
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
15977
15907
  :param str timezone: PostgreSQL service timezone.
@@ -16397,7 +16327,7 @@ class PgPgUserConfigPg(dict):
16397
16327
  @pulumi.getter(name="pgStatMonitorDotPgsmMaxBuckets")
16398
16328
  def pg_stat_monitor_dot_pgsm_max_buckets(self) -> Optional[int]:
16399
16329
  """
16400
- Sets the maximum number of buckets .
16330
+ Sets the maximum number of buckets.
16401
16331
  """
16402
16332
  return pulumi.get(self, "pg_stat_monitor_dot_pgsm_max_buckets")
16403
16333
 
@@ -17243,8 +17173,8 @@ class ProjectTag(dict):
17243
17173
  key: str,
17244
17174
  value: str):
17245
17175
  """
17246
- :param str key: Project tag key
17247
- :param str value: Project tag value
17176
+ :param str key: Project tag key.
17177
+ :param str value: Project tag value.
17248
17178
  """
17249
17179
  pulumi.set(__self__, "key", key)
17250
17180
  pulumi.set(__self__, "value", value)
@@ -17253,7 +17183,7 @@ class ProjectTag(dict):
17253
17183
  @pulumi.getter
17254
17184
  def key(self) -> str:
17255
17185
  """
17256
- Project tag key
17186
+ Project tag key.
17257
17187
  """
17258
17188
  return pulumi.get(self, "key")
17259
17189
 
@@ -17261,7 +17191,7 @@ class ProjectTag(dict):
17261
17191
  @pulumi.getter
17262
17192
  def value(self) -> str:
17263
17193
  """
17264
- Project tag value
17194
+ Project tag value.
17265
17195
  """
17266
17196
  return pulumi.get(self, "value")
17267
17197
 
@@ -17388,12 +17318,6 @@ class RedisComponent(dict):
17388
17318
  return pulumi.get(self, "usage")
17389
17319
 
17390
17320
 
17391
- @pulumi.output_type
17392
- class RedisRedi(dict):
17393
- def __init__(__self__):
17394
- pass
17395
-
17396
-
17397
17321
  @pulumi.output_type
17398
17322
  class RedisRedisUserConfig(dict):
17399
17323
  @staticmethod
@@ -17569,9 +17493,6 @@ class RedisRedisUserConfig(dict):
17569
17493
  """
17570
17494
  Additional Cloud Regions for Backup Replication.
17571
17495
  """
17572
- warnings.warn("""This property is deprecated.""", DeprecationWarning)
17573
- pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
17574
-
17575
17496
  return pulumi.get(self, "additional_backup_regions")
17576
17497
 
17577
17498
  @property
@@ -18608,7 +18529,9 @@ class ServiceIntegrationDatadogUserConfigOpensearch(dict):
18608
18529
  @staticmethod
18609
18530
  def __key_warning(key: str):
18610
18531
  suggest = None
18611
- if key == "indexStatsEnabled":
18532
+ if key == "clusterStatsEnabled":
18533
+ suggest = "cluster_stats_enabled"
18534
+ elif key == "indexStatsEnabled":
18612
18535
  suggest = "index_stats_enabled"
18613
18536
  elif key == "pendingTaskStatsEnabled":
18614
18537
  suggest = "pending_task_stats_enabled"
@@ -18627,14 +18550,18 @@ class ServiceIntegrationDatadogUserConfigOpensearch(dict):
18627
18550
  return super().get(key, default)
18628
18551
 
18629
18552
  def __init__(__self__, *,
18553
+ cluster_stats_enabled: Optional[bool] = None,
18630
18554
  index_stats_enabled: Optional[bool] = None,
18631
18555
  pending_task_stats_enabled: Optional[bool] = None,
18632
18556
  pshard_stats_enabled: Optional[bool] = None):
18633
18557
  """
18558
+ :param bool cluster_stats_enabled: Enable Datadog Opensearch Cluster Monitoring.
18634
18559
  :param bool index_stats_enabled: Enable Datadog Opensearch Index Monitoring.
18635
18560
  :param bool pending_task_stats_enabled: Enable Datadog Opensearch Pending Task Monitoring.
18636
18561
  :param bool pshard_stats_enabled: Enable Datadog Opensearch Primary Shard Monitoring.
18637
18562
  """
18563
+ if cluster_stats_enabled is not None:
18564
+ pulumi.set(__self__, "cluster_stats_enabled", cluster_stats_enabled)
18638
18565
  if index_stats_enabled is not None:
18639
18566
  pulumi.set(__self__, "index_stats_enabled", index_stats_enabled)
18640
18567
  if pending_task_stats_enabled is not None:
@@ -18642,6 +18569,14 @@ class ServiceIntegrationDatadogUserConfigOpensearch(dict):
18642
18569
  if pshard_stats_enabled is not None:
18643
18570
  pulumi.set(__self__, "pshard_stats_enabled", pshard_stats_enabled)
18644
18571
 
18572
+ @property
18573
+ @pulumi.getter(name="clusterStatsEnabled")
18574
+ def cluster_stats_enabled(self) -> Optional[bool]:
18575
+ """
18576
+ Enable Datadog Opensearch Cluster Monitoring.
18577
+ """
18578
+ return pulumi.get(self, "cluster_stats_enabled")
18579
+
18645
18580
  @property
18646
18581
  @pulumi.getter(name="indexStatsEnabled")
18647
18582
  def index_stats_enabled(self) -> Optional[bool]:
@@ -19103,7 +19038,7 @@ class ServiceIntegrationEndpointExternalGoogleCloudBigquery(dict):
19103
19038
  service_account_credentials: str):
19104
19039
  """
19105
19040
  :param str project_id: GCP project id.
19106
- :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
19041
+ :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
19107
19042
  """
19108
19043
  pulumi.set(__self__, "project_id", project_id)
19109
19044
  pulumi.set(__self__, "service_account_credentials", service_account_credentials)
@@ -19120,7 +19055,7 @@ class ServiceIntegrationEndpointExternalGoogleCloudBigquery(dict):
19120
19055
  @pulumi.getter(name="serviceAccountCredentials")
19121
19056
  def service_account_credentials(self) -> str:
19122
19057
  """
19123
- This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
19058
+ This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
19124
19059
  """
19125
19060
  return pulumi.get(self, "service_account_credentials")
19126
19061
 
@@ -19155,7 +19090,7 @@ class ServiceIntegrationEndpointExternalGoogleCloudLoggingUserConfig(dict):
19155
19090
  """
19156
19091
  :param str log_id: Google Cloud Logging log id.
19157
19092
  :param str project_id: GCP project id.
19158
- :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
19093
+ :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
19159
19094
  """
19160
19095
  pulumi.set(__self__, "log_id", log_id)
19161
19096
  pulumi.set(__self__, "project_id", project_id)
@@ -19181,7 +19116,7 @@ class ServiceIntegrationEndpointExternalGoogleCloudLoggingUserConfig(dict):
19181
19116
  @pulumi.getter(name="serviceAccountCredentials")
19182
19117
  def service_account_credentials(self) -> str:
19183
19118
  """
19184
- This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
19119
+ This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
19185
19120
  """
19186
19121
  return pulumi.get(self, "service_account_credentials")
19187
19122
 
@@ -20633,7 +20568,7 @@ class ServiceIntegrationMetricsUserConfigSourceMysqlTelegraf(dict):
20633
20568
  perf_events_statements_time_limit: Optional[int] = None):
20634
20569
  """
20635
20570
  :param bool gather_event_waits: Gather metrics from PERFORMANCE*SCHEMA.EVENT*WAITS.
20636
- :param bool gather_file_events_stats: gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20571
+ :param bool gather_file_events_stats: Gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20637
20572
  :param bool gather_index_io_waits: Gather metrics from PERFORMANCE*SCHEMA.TABLE*IO*WAITS*SUMMARY*BY*INDEX_USAGE.
20638
20573
  :param bool gather_info_schema_auto_inc: Gather auto_increment columns and max values from information schema.
20639
20574
  :param bool gather_innodb_metrics: Gather metrics from INFORMATION*SCHEMA.INNODB*METRICS.
@@ -20688,7 +20623,7 @@ class ServiceIntegrationMetricsUserConfigSourceMysqlTelegraf(dict):
20688
20623
  @pulumi.getter(name="gatherFileEventsStats")
20689
20624
  def gather_file_events_stats(self) -> Optional[bool]:
20690
20625
  """
20691
- gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20626
+ Gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20692
20627
  """
20693
20628
  return pulumi.get(self, "gather_file_events_stats")
20694
20629
 
@@ -20906,7 +20841,7 @@ class ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf(dict):
20906
20841
  perf_events_statements_time_limit: Optional[int] = None):
20907
20842
  """
20908
20843
  :param bool gather_event_waits: Gather metrics from PERFORMANCE*SCHEMA.EVENT*WAITS.
20909
- :param bool gather_file_events_stats: gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20844
+ :param bool gather_file_events_stats: Gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20910
20845
  :param bool gather_index_io_waits: Gather metrics from PERFORMANCE*SCHEMA.TABLE*IO*WAITS*SUMMARY*BY*INDEX_USAGE.
20911
20846
  :param bool gather_info_schema_auto_inc: Gather auto_increment columns and max values from information schema.
20912
20847
  :param bool gather_innodb_metrics: Gather metrics from INFORMATION*SCHEMA.INNODB*METRICS.
@@ -20961,7 +20896,7 @@ class ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf(dict):
20961
20896
  @pulumi.getter(name="gatherFileEventsStats")
20962
20897
  def gather_file_events_stats(self) -> Optional[bool]:
20963
20898
  """
20964
- gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20899
+ Gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20965
20900
  """
20966
20901
  return pulumi.get(self, "gather_file_events_stats")
20967
20902
 
@@ -21129,12 +21064,6 @@ class GetAccountAuthenticationSamlFieldMappingResult(dict):
21129
21064
  return pulumi.get(self, "real_name")
21130
21065
 
21131
21066
 
21132
- @pulumi.output_type
21133
- class GetCassandaCassandraResult(dict):
21134
- def __init__(__self__):
21135
- pass
21136
-
21137
-
21138
21067
  @pulumi.output_type
21139
21068
  class GetCassandaCassandraUserConfigResult(dict):
21140
21069
  def __init__(__self__, *,
@@ -21158,7 +21087,7 @@ class GetCassandaCassandraUserConfigResult(dict):
21158
21087
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
21159
21088
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
21160
21089
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
21161
- :param 'GetCassandaCassandraUserConfigCassandraArgs' cassandra: cassandra configuration values
21090
+ :param 'GetCassandaCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
21162
21091
  :param str cassandra_version: Cassandra version.
21163
21092
  :param Sequence['GetCassandaCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
21164
21093
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -21236,7 +21165,7 @@ class GetCassandaCassandraUserConfigResult(dict):
21236
21165
  @pulumi.getter
21237
21166
  def cassandra(self) -> Optional['outputs.GetCassandaCassandraUserConfigCassandraResult']:
21238
21167
  """
21239
- cassandra configuration values
21168
+ Cassandra configuration values
21240
21169
  """
21241
21170
  return pulumi.get(self, "cassandra")
21242
21171
 
@@ -21622,12 +21551,6 @@ class GetCassandaTechEmailResult(dict):
21622
21551
  return pulumi.get(self, "email")
21623
21552
 
21624
21553
 
21625
- @pulumi.output_type
21626
- class GetCassandraCassandraResult(dict):
21627
- def __init__(__self__):
21628
- pass
21629
-
21630
-
21631
21554
  @pulumi.output_type
21632
21555
  class GetCassandraCassandraUserConfigResult(dict):
21633
21556
  def __init__(__self__, *,
@@ -21651,7 +21574,7 @@ class GetCassandraCassandraUserConfigResult(dict):
21651
21574
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
21652
21575
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
21653
21576
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
21654
- :param 'GetCassandraCassandraUserConfigCassandraArgs' cassandra: cassandra configuration values
21577
+ :param 'GetCassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
21655
21578
  :param str cassandra_version: Cassandra version.
21656
21579
  :param Sequence['GetCassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
21657
21580
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -21729,7 +21652,7 @@ class GetCassandraCassandraUserConfigResult(dict):
21729
21652
  @pulumi.getter
21730
21653
  def cassandra(self) -> Optional['outputs.GetCassandraCassandraUserConfigCassandraResult']:
21731
21654
  """
21732
- cassandra configuration values
21655
+ Cassandra configuration values
21733
21656
  """
21734
21657
  return pulumi.get(self, "cassandra")
21735
21658
 
@@ -22115,12 +22038,6 @@ class GetCassandraTechEmailResult(dict):
22115
22038
  return pulumi.get(self, "email")
22116
22039
 
22117
22040
 
22118
- @pulumi.output_type
22119
- class GetClickhouseClickhouseResult(dict):
22120
- def __init__(__self__):
22121
- pass
22122
-
22123
-
22124
22041
  @pulumi.output_type
22125
22042
  class GetClickhouseClickhouseUserConfigResult(dict):
22126
22043
  def __init__(__self__, *,
@@ -22727,16 +22644,11 @@ class GetDragonflyComponentResult(dict):
22727
22644
  return pulumi.get(self, "usage")
22728
22645
 
22729
22646
 
22730
- @pulumi.output_type
22731
- class GetDragonflyDragonflyResult(dict):
22732
- def __init__(__self__):
22733
- pass
22734
-
22735
-
22736
22647
  @pulumi.output_type
22737
22648
  class GetDragonflyDragonflyUserConfigResult(dict):
22738
22649
  def __init__(__self__, *,
22739
22650
  cache_mode: Optional[bool] = None,
22651
+ dragonfly_persistence: Optional[str] = None,
22740
22652
  dragonfly_ssl: Optional[bool] = None,
22741
22653
  ip_filter_objects: Optional[Sequence['outputs.GetDragonflyDragonflyUserConfigIpFilterObjectResult']] = None,
22742
22654
  ip_filter_strings: Optional[Sequence[str]] = None,
@@ -22752,6 +22664,7 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22752
22664
  static_ips: Optional[bool] = None):
22753
22665
  """
22754
22666
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
22667
+ :param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
22755
22668
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
22756
22669
  :param Sequence['GetDragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
22757
22670
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -22768,6 +22681,8 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22768
22681
  """
22769
22682
  if cache_mode is not None:
22770
22683
  pulumi.set(__self__, "cache_mode", cache_mode)
22684
+ if dragonfly_persistence is not None:
22685
+ pulumi.set(__self__, "dragonfly_persistence", dragonfly_persistence)
22771
22686
  if dragonfly_ssl is not None:
22772
22687
  pulumi.set(__self__, "dragonfly_ssl", dragonfly_ssl)
22773
22688
  if ip_filter_objects is not None:
@@ -22803,6 +22718,14 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22803
22718
  """
22804
22719
  return pulumi.get(self, "cache_mode")
22805
22720
 
22721
+ @property
22722
+ @pulumi.getter(name="dragonflyPersistence")
22723
+ def dragonfly_persistence(self) -> Optional[str]:
22724
+ """
22725
+ When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
22726
+ """
22727
+ return pulumi.get(self, "dragonfly_persistence")
22728
+
22806
22729
  @property
22807
22730
  @pulumi.getter(name="dragonflySsl")
22808
22731
  def dragonfly_ssl(self) -> Optional[bool]:
@@ -23737,12 +23660,6 @@ class GetGrafanaComponentResult(dict):
23737
23660
  return pulumi.get(self, "usage")
23738
23661
 
23739
23662
 
23740
- @pulumi.output_type
23741
- class GetGrafanaGrafanaResult(dict):
23742
- def __init__(__self__):
23743
- pass
23744
-
23745
-
23746
23663
  @pulumi.output_type
23747
23664
  class GetGrafanaGrafanaUserConfigResult(dict):
23748
23665
  def __init__(__self__, *,
@@ -23923,9 +23840,6 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23923
23840
  """
23924
23841
  Additional Cloud Regions for Backup Replication.
23925
23842
  """
23926
- warnings.warn("""This property is deprecated.""", DeprecationWarning)
23927
- pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
23928
-
23929
23843
  return pulumi.get(self, "additional_backup_regions")
23930
23844
 
23931
23845
  @property
@@ -25865,12 +25779,6 @@ class GetKafkaConnectComponentResult(dict):
25865
25779
  return pulumi.get(self, "usage")
25866
25780
 
25867
25781
 
25868
- @pulumi.output_type
25869
- class GetKafkaConnectKafkaConnectResult(dict):
25870
- def __init__(__self__):
25871
- pass
25872
-
25873
-
25874
25782
  @pulumi.output_type
25875
25783
  class GetKafkaConnectKafkaConnectUserConfigResult(dict):
25876
25784
  def __init__(__self__, *,
@@ -26058,7 +25966,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26058
25966
  :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
26059
25967
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
26060
25968
  :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
26061
- :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
25969
+ :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
26062
25970
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
26063
25971
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
26064
25972
  :param int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
@@ -26140,7 +26048,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26140
26048
  @pulumi.getter(name="consumerMaxPartitionFetchBytes")
26141
26049
  def consumer_max_partition_fetch_bytes(self) -> Optional[int]:
26142
26050
  """
26143
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
26051
+ Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
26144
26052
  """
26145
26053
  return pulumi.get(self, "consumer_max_partition_fetch_bytes")
26146
26054
 
@@ -26452,11 +26360,11 @@ class GetKafkaKafkaResult(dict):
26452
26360
  rest_uri: str,
26453
26361
  schema_registry_uri: str):
26454
26362
  """
26455
- :param str access_cert: The Kafka client certificate
26456
- :param str access_key: The Kafka client certificate key
26457
- :param str connect_uri: The Kafka Connect URI, if any
26458
- :param str rest_uri: The Kafka REST URI, if any
26459
- :param str schema_registry_uri: The Schema Registry URI, if any
26363
+ :param str access_cert: The Kafka client certificate.
26364
+ :param str access_key: The Kafka client certificate key.
26365
+ :param str connect_uri: The Kafka Connect URI.
26366
+ :param str rest_uri: The Kafka REST URI.
26367
+ :param str schema_registry_uri: The Schema Registry URI.
26460
26368
  """
26461
26369
  pulumi.set(__self__, "access_cert", access_cert)
26462
26370
  pulumi.set(__self__, "access_key", access_key)
@@ -26468,7 +26376,7 @@ class GetKafkaKafkaResult(dict):
26468
26376
  @pulumi.getter(name="accessCert")
26469
26377
  def access_cert(self) -> str:
26470
26378
  """
26471
- The Kafka client certificate
26379
+ The Kafka client certificate.
26472
26380
  """
26473
26381
  return pulumi.get(self, "access_cert")
26474
26382
 
@@ -26476,7 +26384,7 @@ class GetKafkaKafkaResult(dict):
26476
26384
  @pulumi.getter(name="accessKey")
26477
26385
  def access_key(self) -> str:
26478
26386
  """
26479
- The Kafka client certificate key
26387
+ The Kafka client certificate key.
26480
26388
  """
26481
26389
  return pulumi.get(self, "access_key")
26482
26390
 
@@ -26484,7 +26392,7 @@ class GetKafkaKafkaResult(dict):
26484
26392
  @pulumi.getter(name="connectUri")
26485
26393
  def connect_uri(self) -> str:
26486
26394
  """
26487
- The Kafka Connect URI, if any
26395
+ The Kafka Connect URI.
26488
26396
  """
26489
26397
  return pulumi.get(self, "connect_uri")
26490
26398
 
@@ -26492,7 +26400,7 @@ class GetKafkaKafkaResult(dict):
26492
26400
  @pulumi.getter(name="restUri")
26493
26401
  def rest_uri(self) -> str:
26494
26402
  """
26495
- The Kafka REST URI, if any
26403
+ The Kafka REST URI.
26496
26404
  """
26497
26405
  return pulumi.get(self, "rest_uri")
26498
26406
 
@@ -26500,7 +26408,7 @@ class GetKafkaKafkaResult(dict):
26500
26408
  @pulumi.getter(name="schemaRegistryUri")
26501
26409
  def schema_registry_uri(self) -> str:
26502
26410
  """
26503
- The Schema Registry URI, if any
26411
+ The Schema Registry URI.
26504
26412
  """
26505
26413
  return pulumi.get(self, "schema_registry_uri")
26506
26414
 
@@ -26880,7 +26788,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26880
26788
  :param int log_index_size_max_bytes: The maximum size in bytes of the offset index.
26881
26789
  :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
26882
26790
  :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
26883
- :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
26791
+ :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
26884
26792
  :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
26885
26793
  :param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
26886
26794
  :param bool log_preallocate: Should pre allocate file when create new segment?
@@ -26902,7 +26810,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26902
26810
  :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
26903
26811
  :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
26904
26812
  :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
26905
- :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .
26813
+ :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
26906
26814
  :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
26907
26815
  :param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
26908
26816
  :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
@@ -27150,7 +27058,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27150
27058
  @pulumi.getter(name="logMessageDownconversionEnable")
27151
27059
  def log_message_downconversion_enable(self) -> Optional[bool]:
27152
27060
  """
27153
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
27061
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
27154
27062
  """
27155
27063
  return pulumi.get(self, "log_message_downconversion_enable")
27156
27064
 
@@ -27326,7 +27234,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27326
27234
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
27327
27235
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
27328
27236
  """
27329
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .
27237
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
27330
27238
  """
27331
27239
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
27332
27240
 
@@ -27426,7 +27334,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27426
27334
  :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
27427
27335
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
27428
27336
  :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
27429
- :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
27337
+ :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
27430
27338
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
27431
27339
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
27432
27340
  :param int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
@@ -27508,7 +27416,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27508
27416
  @pulumi.getter(name="consumerMaxPartitionFetchBytes")
27509
27417
  def consumer_max_partition_fetch_bytes(self) -> Optional[int]:
27510
27418
  """
27511
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
27419
+ Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
27512
27420
  """
27513
27421
  return pulumi.get(self, "consumer_max_partition_fetch_bytes")
27514
27422
 
@@ -28123,12 +28031,6 @@ class GetKafkaMirrorMakerComponentResult(dict):
28123
28031
  return pulumi.get(self, "usage")
28124
28032
 
28125
28033
 
28126
- @pulumi.output_type
28127
- class GetKafkaMirrorMakerKafkaMirrormakerResult(dict):
28128
- def __init__(__self__):
28129
- pass
28130
-
28131
-
28132
28034
  @pulumi.output_type
28133
28035
  class GetKafkaMirrorMakerKafkaMirrormakerUserConfigResult(dict):
28134
28036
  def __init__(__self__, *,
@@ -29030,12 +28932,6 @@ class GetM3AggregatorComponentResult(dict):
29030
28932
  return pulumi.get(self, "usage")
29031
28933
 
29032
28934
 
29033
- @pulumi.output_type
29034
- class GetM3AggregatorM3aggregatorResult(dict):
29035
- def __init__(__self__):
29036
- pass
29037
-
29038
-
29039
28935
  @pulumi.output_type
29040
28936
  class GetM3AggregatorM3aggregatorUserConfigResult(dict):
29041
28937
  def __init__(__self__, *,
@@ -29343,12 +29239,6 @@ class GetM3DbComponentResult(dict):
29343
29239
  return pulumi.get(self, "usage")
29344
29240
 
29345
29241
 
29346
- @pulumi.output_type
29347
- class GetM3DbM3dbResult(dict):
29348
- def __init__(__self__):
29349
- pass
29350
-
29351
-
29352
29242
  @pulumi.output_type
29353
29243
  class GetM3DbM3dbUserConfigResult(dict):
29354
29244
  def __init__(__self__, *,
@@ -29433,9 +29323,6 @@ class GetM3DbM3dbUserConfigResult(dict):
29433
29323
  """
29434
29324
  Additional Cloud Regions for Backup Replication.
29435
29325
  """
29436
- warnings.warn("""This property is deprecated.""", DeprecationWarning)
29437
- pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
29438
-
29439
29326
  return pulumi.get(self, "additional_backup_regions")
29440
29327
 
29441
29328
  @property
@@ -30291,12 +30178,6 @@ class GetMySqlComponentResult(dict):
30291
30178
  return pulumi.get(self, "usage")
30292
30179
 
30293
30180
 
30294
- @pulumi.output_type
30295
- class GetMySqlMysqlResult(dict):
30296
- def __init__(__self__):
30297
- pass
30298
-
30299
-
30300
30181
  @pulumi.output_type
30301
30182
  class GetMySqlMysqlUserConfigResult(dict):
30302
30183
  def __init__(__self__, *,
@@ -31399,7 +31280,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31399
31280
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
31400
31281
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
31401
31282
  :param bool keep_index_refresh_interval: Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true.
31402
- :param int max_index_count: use index_patterns instead. The default value is `0`.
31283
+ :param int max_index_count: Use index_patterns instead. The default value is `0`.
31403
31284
  :param 'GetOpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
31404
31285
  :param 'GetOpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
31405
31286
  :param 'GetOpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
@@ -31467,9 +31348,6 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31467
31348
  """
31468
31349
  Additional Cloud Regions for Backup Replication.
31469
31350
  """
31470
- warnings.warn("""This property is deprecated.""", DeprecationWarning)
31471
- pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
31472
-
31473
31351
  return pulumi.get(self, "additional_backup_regions")
31474
31352
 
31475
31353
  @property
@@ -31543,7 +31421,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31543
31421
  @pulumi.getter(name="maxIndexCount")
31544
31422
  def max_index_count(self) -> Optional[int]:
31545
31423
  """
31546
- use index_patterns instead. The default value is `0`.
31424
+ Use index_patterns instead. The default value is `0`.
31547
31425
  """
31548
31426
  return pulumi.get(self, "max_index_count")
31549
31427
 
@@ -33777,7 +33655,7 @@ class GetPgPgUserConfigPgResult(dict):
33777
33655
  :param int pg_partman_bgw_dot_interval: Sets the time interval to run pg_partman's scheduled tasks.
33778
33656
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
33779
33657
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
33780
- :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets .
33658
+ :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
33781
33659
  :param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
33782
33660
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
33783
33661
  :param str timezone: PostgreSQL service timezone.
@@ -34203,7 +34081,7 @@ class GetPgPgUserConfigPgResult(dict):
34203
34081
  @pulumi.getter(name="pgStatMonitorDotPgsmMaxBuckets")
34204
34082
  def pg_stat_monitor_dot_pgsm_max_buckets(self) -> Optional[int]:
34205
34083
  """
34206
- Sets the maximum number of buckets .
34084
+ Sets the maximum number of buckets.
34207
34085
  """
34208
34086
  return pulumi.get(self, "pg_stat_monitor_dot_pgsm_max_buckets")
34209
34087
 
@@ -34901,8 +34779,8 @@ class GetProjectTagResult(dict):
34901
34779
  key: str,
34902
34780
  value: str):
34903
34781
  """
34904
- :param str key: Project tag key
34905
- :param str value: Project tag value
34782
+ :param str key: Project tag key.
34783
+ :param str value: Project tag value.
34906
34784
  """
34907
34785
  pulumi.set(__self__, "key", key)
34908
34786
  pulumi.set(__self__, "value", value)
@@ -34911,7 +34789,7 @@ class GetProjectTagResult(dict):
34911
34789
  @pulumi.getter
34912
34790
  def key(self) -> str:
34913
34791
  """
34914
- Project tag key
34792
+ Project tag key.
34915
34793
  """
34916
34794
  return pulumi.get(self, "key")
34917
34795
 
@@ -34919,7 +34797,7 @@ class GetProjectTagResult(dict):
34919
34797
  @pulumi.getter
34920
34798
  def value(self) -> str:
34921
34799
  """
34922
- Project tag value
34800
+ Project tag value.
34923
34801
  """
34924
34802
  return pulumi.get(self, "value")
34925
34803
 
@@ -35019,12 +34897,6 @@ class GetRedisComponentResult(dict):
35019
34897
  return pulumi.get(self, "usage")
35020
34898
 
35021
34899
 
35022
- @pulumi.output_type
35023
- class GetRedisRediResult(dict):
35024
- def __init__(__self__):
35025
- pass
35026
-
35027
-
35028
34900
  @pulumi.output_type
35029
34901
  class GetRedisRedisUserConfigResult(dict):
35030
34902
  def __init__(__self__, *,
@@ -35137,9 +35009,6 @@ class GetRedisRedisUserConfigResult(dict):
35137
35009
  """
35138
35010
  Additional Cloud Regions for Backup Replication.
35139
35011
  """
35140
- warnings.warn("""This property is deprecated.""", DeprecationWarning)
35141
- pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
35142
-
35143
35012
  return pulumi.get(self, "additional_backup_regions")
35144
35013
 
35145
35014
  @property
@@ -36072,14 +35941,18 @@ class GetServiceIntegrationDatadogUserConfigDatadogTagResult(dict):
36072
35941
  @pulumi.output_type
36073
35942
  class GetServiceIntegrationDatadogUserConfigOpensearchResult(dict):
36074
35943
  def __init__(__self__, *,
35944
+ cluster_stats_enabled: Optional[bool] = None,
36075
35945
  index_stats_enabled: Optional[bool] = None,
36076
35946
  pending_task_stats_enabled: Optional[bool] = None,
36077
35947
  pshard_stats_enabled: Optional[bool] = None):
36078
35948
  """
35949
+ :param bool cluster_stats_enabled: Enable Datadog Opensearch Cluster Monitoring.
36079
35950
  :param bool index_stats_enabled: Enable Datadog Opensearch Index Monitoring.
36080
35951
  :param bool pending_task_stats_enabled: Enable Datadog Opensearch Pending Task Monitoring.
36081
35952
  :param bool pshard_stats_enabled: Enable Datadog Opensearch Primary Shard Monitoring.
36082
35953
  """
35954
+ if cluster_stats_enabled is not None:
35955
+ pulumi.set(__self__, "cluster_stats_enabled", cluster_stats_enabled)
36083
35956
  if index_stats_enabled is not None:
36084
35957
  pulumi.set(__self__, "index_stats_enabled", index_stats_enabled)
36085
35958
  if pending_task_stats_enabled is not None:
@@ -36087,6 +35960,14 @@ class GetServiceIntegrationDatadogUserConfigOpensearchResult(dict):
36087
35960
  if pshard_stats_enabled is not None:
36088
35961
  pulumi.set(__self__, "pshard_stats_enabled", pshard_stats_enabled)
36089
35962
 
35963
+ @property
35964
+ @pulumi.getter(name="clusterStatsEnabled")
35965
+ def cluster_stats_enabled(self) -> Optional[bool]:
35966
+ """
35967
+ Enable Datadog Opensearch Cluster Monitoring.
35968
+ """
35969
+ return pulumi.get(self, "cluster_stats_enabled")
35970
+
36090
35971
  @property
36091
35972
  @pulumi.getter(name="indexStatsEnabled")
36092
35973
  def index_stats_enabled(self) -> Optional[bool]:
@@ -36426,7 +36307,7 @@ class GetServiceIntegrationEndpointExternalGoogleCloudBigqueryResult(dict):
36426
36307
  service_account_credentials: str):
36427
36308
  """
36428
36309
  :param str project_id: GCP project id.
36429
- :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
36310
+ :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
36430
36311
  """
36431
36312
  pulumi.set(__self__, "project_id", project_id)
36432
36313
  pulumi.set(__self__, "service_account_credentials", service_account_credentials)
@@ -36443,7 +36324,7 @@ class GetServiceIntegrationEndpointExternalGoogleCloudBigqueryResult(dict):
36443
36324
  @pulumi.getter(name="serviceAccountCredentials")
36444
36325
  def service_account_credentials(self) -> str:
36445
36326
  """
36446
- This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
36327
+ This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
36447
36328
  """
36448
36329
  return pulumi.get(self, "service_account_credentials")
36449
36330
 
@@ -36457,7 +36338,7 @@ class GetServiceIntegrationEndpointExternalGoogleCloudLoggingUserConfigResult(di
36457
36338
  """
36458
36339
  :param str log_id: Google Cloud Logging log id.
36459
36340
  :param str project_id: GCP project id.
36460
- :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
36341
+ :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
36461
36342
  """
36462
36343
  pulumi.set(__self__, "log_id", log_id)
36463
36344
  pulumi.set(__self__, "project_id", project_id)
@@ -36483,7 +36364,7 @@ class GetServiceIntegrationEndpointExternalGoogleCloudLoggingUserConfigResult(di
36483
36364
  @pulumi.getter(name="serviceAccountCredentials")
36484
36365
  def service_account_credentials(self) -> str:
36485
36366
  """
36486
- This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
36367
+ This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
36487
36368
  """
36488
36369
  return pulumi.get(self, "service_account_credentials")
36489
36370
 
@@ -37524,7 +37405,7 @@ class GetServiceIntegrationMetricsUserConfigSourceMysqlTelegrafResult(dict):
37524
37405
  perf_events_statements_time_limit: Optional[int] = None):
37525
37406
  """
37526
37407
  :param bool gather_event_waits: Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
37527
- :param bool gather_file_events_stats: gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37408
+ :param bool gather_file_events_stats: Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37528
37409
  :param bool gather_index_io_waits: Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
37529
37410
  :param bool gather_info_schema_auto_inc: Gather auto_increment columns and max values from information schema.
37530
37411
  :param bool gather_innodb_metrics: Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
@@ -37579,7 +37460,7 @@ class GetServiceIntegrationMetricsUserConfigSourceMysqlTelegrafResult(dict):
37579
37460
  @pulumi.getter(name="gatherFileEventsStats")
37580
37461
  def gather_file_events_stats(self) -> Optional[bool]:
37581
37462
  """
37582
- gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37463
+ Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37583
37464
  """
37584
37465
  return pulumi.get(self, "gather_file_events_stats")
37585
37466
 
@@ -37737,7 +37618,7 @@ class GetServiceIntegrationPrometheusUserConfigSourceMysqlTelegrafResult(dict):
37737
37618
  perf_events_statements_time_limit: Optional[int] = None):
37738
37619
  """
37739
37620
  :param bool gather_event_waits: Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
37740
- :param bool gather_file_events_stats: gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37621
+ :param bool gather_file_events_stats: Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37741
37622
  :param bool gather_index_io_waits: Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
37742
37623
  :param bool gather_info_schema_auto_inc: Gather auto_increment columns and max values from information schema.
37743
37624
  :param bool gather_innodb_metrics: Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
@@ -37792,7 +37673,7 @@ class GetServiceIntegrationPrometheusUserConfigSourceMysqlTelegrafResult(dict):
37792
37673
  @pulumi.getter(name="gatherFileEventsStats")
37793
37674
  def gather_file_events_stats(self) -> Optional[bool]:
37794
37675
  """
37795
- gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37676
+ Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37796
37677
  """
37797
37678
  return pulumi.get(self, "gather_file_events_stats")
37798
37679