pulumi-aiven 6.14.0__py3-none-any.whl → 6.14.0a1711516617__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

Files changed (132) hide show
  1. pulumi_aiven/_inputs.py +262 -200
  2. pulumi_aiven/account.py +6 -2
  3. pulumi_aiven/account_authentication.py +2 -2
  4. pulumi_aiven/account_team.py +6 -6
  5. pulumi_aiven/account_team_member.py +10 -6
  6. pulumi_aiven/account_team_project.py +12 -12
  7. pulumi_aiven/aws_privatelink.py +22 -18
  8. pulumi_aiven/aws_vpc_peering_connection.py +12 -8
  9. pulumi_aiven/azure_privatelink.py +22 -18
  10. pulumi_aiven/azure_privatelink_connection_approval.py +14 -112
  11. pulumi_aiven/azure_vpc_peering_connection.py +10 -6
  12. pulumi_aiven/billing_group.py +18 -18
  13. pulumi_aiven/cassandra.py +57 -25
  14. pulumi_aiven/cassandra_user.py +22 -18
  15. pulumi_aiven/clickhouse.py +57 -25
  16. pulumi_aiven/clickhouse_database.py +26 -24
  17. pulumi_aiven/clickhouse_grant.py +37 -35
  18. pulumi_aiven/clickhouse_role.py +20 -16
  19. pulumi_aiven/clickhouse_user.py +22 -18
  20. pulumi_aiven/connection_pool.py +28 -24
  21. pulumi_aiven/dragonfly.py +263 -124
  22. pulumi_aiven/flink.py +36 -32
  23. pulumi_aiven/flink_application.py +24 -22
  24. pulumi_aiven/flink_application_deployment.py +21 -21
  25. pulumi_aiven/flink_application_version.py +14 -96
  26. pulumi_aiven/gcp_privatelink.py +21 -21
  27. pulumi_aiven/gcp_privatelink_connection_approval.py +21 -21
  28. pulumi_aiven/gcp_vpc_peering_connection.py +8 -4
  29. pulumi_aiven/get_account.py +4 -0
  30. pulumi_aiven/get_aws_privatelink.py +14 -10
  31. pulumi_aiven/get_aws_vpc_peering_connection.py +10 -6
  32. pulumi_aiven/get_azure_privatelink.py +14 -10
  33. pulumi_aiven/get_azure_vpc_peering_connection.py +6 -2
  34. pulumi_aiven/get_billing_group.py +2 -2
  35. pulumi_aiven/get_cassanda.py +24 -7
  36. pulumi_aiven/get_cassandra.py +24 -7
  37. pulumi_aiven/get_cassandra_user.py +14 -10
  38. pulumi_aiven/get_clickhouse.py +24 -7
  39. pulumi_aiven/get_clickhouse_database.py +14 -10
  40. pulumi_aiven/get_clickhouse_user.py +14 -10
  41. pulumi_aiven/get_connection_pool.py +14 -10
  42. pulumi_aiven/get_dragonfly.py +20 -27
  43. pulumi_aiven/get_flink.py +10 -6
  44. pulumi_aiven/get_flink_application.py +12 -8
  45. pulumi_aiven/get_flink_application_version.py +12 -8
  46. pulumi_aiven/get_gcp_vpc_peering_connection.py +6 -2
  47. pulumi_aiven/get_grafana.py +24 -7
  48. pulumi_aiven/get_kafka.py +17 -13
  49. pulumi_aiven/get_kafka_acl.py +14 -10
  50. pulumi_aiven/get_kafka_connect.py +24 -7
  51. pulumi_aiven/get_kafka_connector.py +14 -10
  52. pulumi_aiven/get_kafka_mirror_maker.py +24 -7
  53. pulumi_aiven/get_kafka_schema.py +14 -10
  54. pulumi_aiven/get_kafka_schema_configuration.py +14 -10
  55. pulumi_aiven/get_kafka_schema_registry_acl.py +6 -6
  56. pulumi_aiven/get_kafka_topic.py +20 -16
  57. pulumi_aiven/get_kafka_user.py +14 -10
  58. pulumi_aiven/get_m3_aggregator.py +24 -7
  59. pulumi_aiven/get_m3_db.py +24 -7
  60. pulumi_aiven/get_m3db_user.py +14 -10
  61. pulumi_aiven/get_mirror_maker_replication_flow.py +18 -14
  62. pulumi_aiven/get_my_sql.py +24 -7
  63. pulumi_aiven/get_mysql_database.py +14 -10
  64. pulumi_aiven/get_mysql_user.py +14 -10
  65. pulumi_aiven/get_open_search.py +10 -6
  66. pulumi_aiven/get_open_search_acl_config.py +14 -10
  67. pulumi_aiven/get_open_search_acl_rule.py +14 -10
  68. pulumi_aiven/get_opensearch_security_plugin_config.py +14 -10
  69. pulumi_aiven/get_opensearch_user.py +14 -10
  70. pulumi_aiven/get_organization.py +4 -0
  71. pulumi_aiven/get_organization_user_group.py +6 -2
  72. pulumi_aiven/get_pg.py +10 -6
  73. pulumi_aiven/get_pg_database.py +14 -10
  74. pulumi_aiven/get_pg_user.py +14 -10
  75. pulumi_aiven/get_project.py +21 -17
  76. pulumi_aiven/get_project_user.py +13 -9
  77. pulumi_aiven/get_project_vpc.py +6 -2
  78. pulumi_aiven/get_redis.py +24 -7
  79. pulumi_aiven/get_redis_user.py +14 -10
  80. pulumi_aiven/get_service_component.py +8 -4
  81. pulumi_aiven/get_service_integration.py +33 -19
  82. pulumi_aiven/get_service_integration_endpoint.py +6 -2
  83. pulumi_aiven/get_transit_gateway_vpc_attachment.py +6 -2
  84. pulumi_aiven/grafana.py +267 -82
  85. pulumi_aiven/influx_db.py +21 -21
  86. pulumi_aiven/influxdb_database.py +21 -21
  87. pulumi_aiven/influxdb_user.py +21 -21
  88. pulumi_aiven/kafka.py +54 -50
  89. pulumi_aiven/kafka_acl.py +24 -20
  90. pulumi_aiven/kafka_connect.py +267 -80
  91. pulumi_aiven/kafka_connector.py +30 -26
  92. pulumi_aiven/kafka_mirror_maker.py +267 -80
  93. pulumi_aiven/kafka_schema.py +22 -18
  94. pulumi_aiven/kafka_schema_configuration.py +24 -20
  95. pulumi_aiven/kafka_schema_registry_acl.py +14 -54
  96. pulumi_aiven/kafka_topic.py +46 -84
  97. pulumi_aiven/kafka_user.py +22 -18
  98. pulumi_aiven/m3_aggregator.py +267 -80
  99. pulumi_aiven/m3_db.py +267 -80
  100. pulumi_aiven/m3db_user.py +20 -16
  101. pulumi_aiven/mirror_maker_replication_flow.py +26 -22
  102. pulumi_aiven/my_sql.py +267 -80
  103. pulumi_aiven/mysql_database.py +24 -20
  104. pulumi_aiven/mysql_user.py +22 -18
  105. pulumi_aiven/open_search.py +31 -27
  106. pulumi_aiven/open_search_acl_config.py +36 -28
  107. pulumi_aiven/open_search_acl_rule.py +40 -36
  108. pulumi_aiven/opensearch_security_plugin_config.py +32 -28
  109. pulumi_aiven/opensearch_user.py +22 -18
  110. pulumi_aiven/organization.py +6 -2
  111. pulumi_aiven/organization_group_project.py +0 -46
  112. pulumi_aiven/organization_user.py +20 -2
  113. pulumi_aiven/organization_user_group.py +6 -4
  114. pulumi_aiven/organization_user_group_member.py +10 -8
  115. pulumi_aiven/organizational_unit.py +6 -6
  116. pulumi_aiven/outputs.py +390 -271
  117. pulumi_aiven/pg.py +30 -30
  118. pulumi_aiven/pg_database.py +24 -20
  119. pulumi_aiven/pg_user.py +22 -18
  120. pulumi_aiven/project.py +79 -75
  121. pulumi_aiven/project_user.py +28 -24
  122. pulumi_aiven/project_vpc.py +7 -31
  123. pulumi_aiven/redis.py +267 -80
  124. pulumi_aiven/redis_user.py +22 -18
  125. pulumi_aiven/service_integration.py +58 -54
  126. pulumi_aiven/static_ip.py +7 -7
  127. pulumi_aiven/transit_gateway_vpc_attachment.py +8 -4
  128. {pulumi_aiven-6.14.0.dist-info → pulumi_aiven-6.14.0a1711516617.dist-info}/METADATA +1 -1
  129. pulumi_aiven-6.14.0a1711516617.dist-info/RECORD +150 -0
  130. pulumi_aiven-6.14.0.dist-info/RECORD +0 -150
  131. {pulumi_aiven-6.14.0.dist-info → pulumi_aiven-6.14.0a1711516617.dist-info}/WHEEL +0 -0
  132. {pulumi_aiven-6.14.0.dist-info → pulumi_aiven-6.14.0a1711516617.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py CHANGED
@@ -12,6 +12,7 @@ from . import outputs
12
12
 
13
13
  __all__ = [
14
14
  'AccountAuthenticationSamlFieldMapping',
15
+ 'CassandraCassandra',
15
16
  'CassandraCassandraUserConfig',
16
17
  'CassandraCassandraUserConfigCassandra',
17
18
  'CassandraCassandraUserConfigIpFilterObject',
@@ -21,6 +22,7 @@ __all__ = [
21
22
  'CassandraServiceIntegration',
22
23
  'CassandraTag',
23
24
  'CassandraTechEmail',
25
+ 'ClickhouseClickhouse',
24
26
  'ClickhouseClickhouseUserConfig',
25
27
  'ClickhouseClickhouseUserConfigIpFilterObject',
26
28
  'ClickhouseClickhouseUserConfigPrivateAccess',
@@ -33,6 +35,7 @@ __all__ = [
33
35
  'ClickhouseTag',
34
36
  'ClickhouseTechEmail',
35
37
  'DragonflyComponent',
38
+ 'DragonflyDragonfly',
36
39
  'DragonflyDragonflyUserConfig',
37
40
  'DragonflyDragonflyUserConfigIpFilterObject',
38
41
  'DragonflyDragonflyUserConfigMigration',
@@ -53,6 +56,7 @@ __all__ = [
53
56
  'FlinkTag',
54
57
  'FlinkTechEmail',
55
58
  'GrafanaComponent',
59
+ 'GrafanaGrafana',
56
60
  'GrafanaGrafanaUserConfig',
57
61
  'GrafanaGrafanaUserConfigAuthAzuread',
58
62
  'GrafanaGrafanaUserConfigAuthGenericOauth',
@@ -82,6 +86,7 @@ __all__ = [
82
86
  'InfluxDbTechEmail',
83
87
  'KafkaComponent',
84
88
  'KafkaConnectComponent',
89
+ 'KafkaConnectKafkaConnect',
85
90
  'KafkaConnectKafkaConnectUserConfig',
86
91
  'KafkaConnectKafkaConnectUserConfigIpFilterObject',
87
92
  'KafkaConnectKafkaConnectUserConfigKafkaConnect',
@@ -106,6 +111,7 @@ __all__ = [
106
111
  'KafkaKafkaUserConfigTieredStorage',
107
112
  'KafkaKafkaUserConfigTieredStorageLocalCache',
108
113
  'KafkaMirrorMakerComponent',
114
+ 'KafkaMirrorMakerKafkaMirrormaker',
109
115
  'KafkaMirrorMakerKafkaMirrormakerUserConfig',
110
116
  'KafkaMirrorMakerKafkaMirrormakerUserConfigIpFilterObject',
111
117
  'KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker',
@@ -118,12 +124,14 @@ __all__ = [
118
124
  'KafkaTopicConfig',
119
125
  'KafkaTopicTag',
120
126
  'M3AggregatorComponent',
127
+ 'M3AggregatorM3aggregator',
121
128
  'M3AggregatorM3aggregatorUserConfig',
122
129
  'M3AggregatorM3aggregatorUserConfigIpFilterObject',
123
130
  'M3AggregatorServiceIntegration',
124
131
  'M3AggregatorTag',
125
132
  'M3AggregatorTechEmail',
126
133
  'M3DbComponent',
134
+ 'M3DbM3db',
127
135
  'M3DbM3dbUserConfig',
128
136
  'M3DbM3dbUserConfigIpFilterObject',
129
137
  'M3DbM3dbUserConfigLimits',
@@ -142,6 +150,7 @@ __all__ = [
142
150
  'M3DbTag',
143
151
  'M3DbTechEmail',
144
152
  'MySqlComponent',
153
+ 'MySqlMysql',
145
154
  'MySqlMysqlUserConfig',
146
155
  'MySqlMysqlUserConfigIpFilterObject',
147
156
  'MySqlMysqlUserConfigMigration',
@@ -193,6 +202,7 @@ __all__ = [
193
202
  'PgTechEmail',
194
203
  'ProjectTag',
195
204
  'RedisComponent',
205
+ 'RedisRedi',
196
206
  'RedisRedisUserConfig',
197
207
  'RedisRedisUserConfigIpFilterObject',
198
208
  'RedisRedisUserConfigMigration',
@@ -245,6 +255,7 @@ __all__ = [
245
255
  'ServiceIntegrationPrometheusUserConfigSourceMysql',
246
256
  'ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf',
247
257
  'GetAccountAuthenticationSamlFieldMappingResult',
258
+ 'GetCassandaCassandraResult',
248
259
  'GetCassandaCassandraUserConfigResult',
249
260
  'GetCassandaCassandraUserConfigCassandraResult',
250
261
  'GetCassandaCassandraUserConfigIpFilterObjectResult',
@@ -254,6 +265,7 @@ __all__ = [
254
265
  'GetCassandaServiceIntegrationResult',
255
266
  'GetCassandaTagResult',
256
267
  'GetCassandaTechEmailResult',
268
+ 'GetCassandraCassandraResult',
257
269
  'GetCassandraCassandraUserConfigResult',
258
270
  'GetCassandraCassandraUserConfigCassandraResult',
259
271
  'GetCassandraCassandraUserConfigIpFilterObjectResult',
@@ -263,6 +275,7 @@ __all__ = [
263
275
  'GetCassandraServiceIntegrationResult',
264
276
  'GetCassandraTagResult',
265
277
  'GetCassandraTechEmailResult',
278
+ 'GetClickhouseClickhouseResult',
266
279
  'GetClickhouseClickhouseUserConfigResult',
267
280
  'GetClickhouseClickhouseUserConfigIpFilterObjectResult',
268
281
  'GetClickhouseClickhouseUserConfigPrivateAccessResult',
@@ -273,6 +286,7 @@ __all__ = [
273
286
  'GetClickhouseTagResult',
274
287
  'GetClickhouseTechEmailResult',
275
288
  'GetDragonflyComponentResult',
289
+ 'GetDragonflyDragonflyResult',
276
290
  'GetDragonflyDragonflyUserConfigResult',
277
291
  'GetDragonflyDragonflyUserConfigIpFilterObjectResult',
278
292
  'GetDragonflyDragonflyUserConfigMigrationResult',
@@ -293,6 +307,7 @@ __all__ = [
293
307
  'GetFlinkTagResult',
294
308
  'GetFlinkTechEmailResult',
295
309
  'GetGrafanaComponentResult',
310
+ 'GetGrafanaGrafanaResult',
296
311
  'GetGrafanaGrafanaUserConfigResult',
297
312
  'GetGrafanaGrafanaUserConfigAuthAzureadResult',
298
313
  'GetGrafanaGrafanaUserConfigAuthGenericOauthResult',
@@ -322,6 +337,7 @@ __all__ = [
322
337
  'GetInfluxDbTechEmailResult',
323
338
  'GetKafkaComponentResult',
324
339
  'GetKafkaConnectComponentResult',
340
+ 'GetKafkaConnectKafkaConnectResult',
325
341
  'GetKafkaConnectKafkaConnectUserConfigResult',
326
342
  'GetKafkaConnectKafkaConnectUserConfigIpFilterObjectResult',
327
343
  'GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult',
@@ -346,6 +362,7 @@ __all__ = [
346
362
  'GetKafkaKafkaUserConfigTieredStorageResult',
347
363
  'GetKafkaKafkaUserConfigTieredStorageLocalCacheResult',
348
364
  'GetKafkaMirrorMakerComponentResult',
365
+ 'GetKafkaMirrorMakerKafkaMirrormakerResult',
349
366
  'GetKafkaMirrorMakerKafkaMirrormakerUserConfigResult',
350
367
  'GetKafkaMirrorMakerKafkaMirrormakerUserConfigIpFilterObjectResult',
351
368
  'GetKafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormakerResult',
@@ -358,12 +375,14 @@ __all__ = [
358
375
  'GetKafkaTopicConfigResult',
359
376
  'GetKafkaTopicTagResult',
360
377
  'GetM3AggregatorComponentResult',
378
+ 'GetM3AggregatorM3aggregatorResult',
361
379
  'GetM3AggregatorM3aggregatorUserConfigResult',
362
380
  'GetM3AggregatorM3aggregatorUserConfigIpFilterObjectResult',
363
381
  'GetM3AggregatorServiceIntegrationResult',
364
382
  'GetM3AggregatorTagResult',
365
383
  'GetM3AggregatorTechEmailResult',
366
384
  'GetM3DbComponentResult',
385
+ 'GetM3DbM3dbResult',
367
386
  'GetM3DbM3dbUserConfigResult',
368
387
  'GetM3DbM3dbUserConfigIpFilterObjectResult',
369
388
  'GetM3DbM3dbUserConfigLimitsResult',
@@ -382,6 +401,7 @@ __all__ = [
382
401
  'GetM3DbTagResult',
383
402
  'GetM3DbTechEmailResult',
384
403
  'GetMySqlComponentResult',
404
+ 'GetMySqlMysqlResult',
385
405
  'GetMySqlMysqlUserConfigResult',
386
406
  'GetMySqlMysqlUserConfigIpFilterObjectResult',
387
407
  'GetMySqlMysqlUserConfigMigrationResult',
@@ -430,6 +450,7 @@ __all__ = [
430
450
  'GetPgTechEmailResult',
431
451
  'GetProjectTagResult',
432
452
  'GetRedisComponentResult',
453
+ 'GetRedisRediResult',
433
454
  'GetRedisRedisUserConfigResult',
434
455
  'GetRedisRedisUserConfigIpFilterObjectResult',
435
456
  'GetRedisRedisUserConfigMigrationResult',
@@ -571,6 +592,12 @@ class AccountAuthenticationSamlFieldMapping(dict):
571
592
  return pulumi.get(self, "real_name")
572
593
 
573
594
 
595
+ @pulumi.output_type
596
+ class CassandraCassandra(dict):
597
+ def __init__(__self__):
598
+ pass
599
+
600
+
574
601
  @pulumi.output_type
575
602
  class CassandraCassandraUserConfig(dict):
576
603
  @staticmethod
@@ -639,7 +666,7 @@ class CassandraCassandraUserConfig(dict):
639
666
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
640
667
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
641
668
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
642
- :param 'CassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
669
+ :param 'CassandraCassandraUserConfigCassandraArgs' cassandra: cassandra configuration values
643
670
  :param str cassandra_version: Cassandra version.
644
671
  :param Sequence['CassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
645
672
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -717,7 +744,7 @@ class CassandraCassandraUserConfig(dict):
717
744
  @pulumi.getter
718
745
  def cassandra(self) -> Optional['outputs.CassandraCassandraUserConfigCassandra']:
719
746
  """
720
- Cassandra configuration values
747
+ cassandra configuration values
721
748
  """
722
749
  return pulumi.get(self, "cassandra")
723
750
 
@@ -937,7 +964,7 @@ class CassandraCassandraUserConfigPublicAccess(dict):
937
964
  def __init__(__self__, *,
938
965
  prometheus: Optional[bool] = None):
939
966
  """
940
- :param bool prometheus: Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
967
+ :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
941
968
  """
942
969
  if prometheus is not None:
943
970
  pulumi.set(__self__, "prometheus", prometheus)
@@ -946,7 +973,7 @@ class CassandraCassandraUserConfigPublicAccess(dict):
946
973
  @pulumi.getter
947
974
  def prometheus(self) -> Optional[bool]:
948
975
  """
949
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
976
+ Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
950
977
  """
951
978
  return pulumi.get(self, "prometheus")
952
979
 
@@ -1168,6 +1195,12 @@ class CassandraTechEmail(dict):
1168
1195
  return pulumi.get(self, "email")
1169
1196
 
1170
1197
 
1198
+ @pulumi.output_type
1199
+ class ClickhouseClickhouse(dict):
1200
+ def __init__(__self__):
1201
+ pass
1202
+
1203
+
1171
1204
  @pulumi.output_type
1172
1205
  class ClickhouseClickhouseUserConfig(dict):
1173
1206
  @staticmethod
@@ -1481,10 +1514,10 @@ class ClickhouseClickhouseUserConfigPrivatelinkAccess(dict):
1481
1514
  clickhouse_mysql: Optional[bool] = None,
1482
1515
  prometheus: Optional[bool] = None):
1483
1516
  """
1484
- :param bool clickhouse: Enable clickhouse.
1485
- :param bool clickhouse_https: Enable clickhouse_https.
1486
- :param bool clickhouse_mysql: Enable clickhouse_mysql.
1487
- :param bool prometheus: Enable prometheus.
1517
+ :param bool clickhouse: Allow clients to connect to clickhouse with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1518
+ :param bool clickhouse_https: Allow clients to connect to clickhouse_https with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1519
+ :param bool clickhouse_mysql: Allow clients to connect to clickhouse_mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1520
+ :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1488
1521
  """
1489
1522
  if clickhouse is not None:
1490
1523
  pulumi.set(__self__, "clickhouse", clickhouse)
@@ -1499,7 +1532,7 @@ class ClickhouseClickhouseUserConfigPrivatelinkAccess(dict):
1499
1532
  @pulumi.getter
1500
1533
  def clickhouse(self) -> Optional[bool]:
1501
1534
  """
1502
- Enable clickhouse.
1535
+ Allow clients to connect to clickhouse with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1503
1536
  """
1504
1537
  return pulumi.get(self, "clickhouse")
1505
1538
 
@@ -1507,7 +1540,7 @@ class ClickhouseClickhouseUserConfigPrivatelinkAccess(dict):
1507
1540
  @pulumi.getter(name="clickhouseHttps")
1508
1541
  def clickhouse_https(self) -> Optional[bool]:
1509
1542
  """
1510
- Enable clickhouse_https.
1543
+ Allow clients to connect to clickhouse_https with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1511
1544
  """
1512
1545
  return pulumi.get(self, "clickhouse_https")
1513
1546
 
@@ -1515,7 +1548,7 @@ class ClickhouseClickhouseUserConfigPrivatelinkAccess(dict):
1515
1548
  @pulumi.getter(name="clickhouseMysql")
1516
1549
  def clickhouse_mysql(self) -> Optional[bool]:
1517
1550
  """
1518
- Enable clickhouse_mysql.
1551
+ Allow clients to connect to clickhouse_mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1519
1552
  """
1520
1553
  return pulumi.get(self, "clickhouse_mysql")
1521
1554
 
@@ -1523,7 +1556,7 @@ class ClickhouseClickhouseUserConfigPrivatelinkAccess(dict):
1523
1556
  @pulumi.getter
1524
1557
  def prometheus(self) -> Optional[bool]:
1525
1558
  """
1526
- Enable prometheus.
1559
+ Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1527
1560
  """
1528
1561
  return pulumi.get(self, "prometheus")
1529
1562
 
@@ -1555,10 +1588,10 @@ class ClickhouseClickhouseUserConfigPublicAccess(dict):
1555
1588
  clickhouse_mysql: Optional[bool] = None,
1556
1589
  prometheus: Optional[bool] = None):
1557
1590
  """
1558
- :param bool clickhouse: Allow clients to connect to clickhouse from the public internet for service nodes that are in a project VPC or another type of private network.
1559
- :param bool clickhouse_https: Allow clients to connect to clickhouse_https from the public internet for service nodes that are in a project VPC or another type of private network.
1560
- :param bool clickhouse_mysql: Allow clients to connect to clickhouse_mysql from the public internet for service nodes that are in a project VPC or another type of private network.
1561
- :param bool prometheus: Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
1591
+ :param bool clickhouse: Allow clients to connect to clickhouse with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1592
+ :param bool clickhouse_https: Allow clients to connect to clickhouse_https with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1593
+ :param bool clickhouse_mysql: Allow clients to connect to clickhouse_mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1594
+ :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1562
1595
  """
1563
1596
  if clickhouse is not None:
1564
1597
  pulumi.set(__self__, "clickhouse", clickhouse)
@@ -1573,7 +1606,7 @@ class ClickhouseClickhouseUserConfigPublicAccess(dict):
1573
1606
  @pulumi.getter
1574
1607
  def clickhouse(self) -> Optional[bool]:
1575
1608
  """
1576
- Allow clients to connect to clickhouse from the public internet for service nodes that are in a project VPC or another type of private network.
1609
+ Allow clients to connect to clickhouse with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1577
1610
  """
1578
1611
  return pulumi.get(self, "clickhouse")
1579
1612
 
@@ -1581,7 +1614,7 @@ class ClickhouseClickhouseUserConfigPublicAccess(dict):
1581
1614
  @pulumi.getter(name="clickhouseHttps")
1582
1615
  def clickhouse_https(self) -> Optional[bool]:
1583
1616
  """
1584
- Allow clients to connect to clickhouse_https from the public internet for service nodes that are in a project VPC or another type of private network.
1617
+ Allow clients to connect to clickhouse_https with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1585
1618
  """
1586
1619
  return pulumi.get(self, "clickhouse_https")
1587
1620
 
@@ -1589,7 +1622,7 @@ class ClickhouseClickhouseUserConfigPublicAccess(dict):
1589
1622
  @pulumi.getter(name="clickhouseMysql")
1590
1623
  def clickhouse_mysql(self) -> Optional[bool]:
1591
1624
  """
1592
- Allow clients to connect to clickhouse_mysql from the public internet for service nodes that are in a project VPC or another type of private network.
1625
+ Allow clients to connect to clickhouse_mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1593
1626
  """
1594
1627
  return pulumi.get(self, "clickhouse_mysql")
1595
1628
 
@@ -1597,7 +1630,7 @@ class ClickhouseClickhouseUserConfigPublicAccess(dict):
1597
1630
  @pulumi.getter
1598
1631
  def prometheus(self) -> Optional[bool]:
1599
1632
  """
1600
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
1633
+ Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
1601
1634
  """
1602
1635
  return pulumi.get(self, "prometheus")
1603
1636
 
@@ -2043,6 +2076,12 @@ class DragonflyComponent(dict):
2043
2076
  return pulumi.get(self, "usage")
2044
2077
 
2045
2078
 
2079
+ @pulumi.output_type
2080
+ class DragonflyDragonfly(dict):
2081
+ def __init__(__self__):
2082
+ pass
2083
+
2084
+
2046
2085
  @pulumi.output_type
2047
2086
  class DragonflyDragonflyUserConfig(dict):
2048
2087
  @staticmethod
@@ -2050,8 +2089,6 @@ class DragonflyDragonflyUserConfig(dict):
2050
2089
  suggest = None
2051
2090
  if key == "cacheMode":
2052
2091
  suggest = "cache_mode"
2053
- elif key == "dragonflyPersistence":
2054
- suggest = "dragonfly_persistence"
2055
2092
  elif key == "dragonflySsl":
2056
2093
  suggest = "dragonfly_ssl"
2057
2094
  elif key == "ipFilterObjects":
@@ -2090,7 +2127,6 @@ class DragonflyDragonflyUserConfig(dict):
2090
2127
 
2091
2128
  def __init__(__self__, *,
2092
2129
  cache_mode: Optional[bool] = None,
2093
- dragonfly_persistence: Optional[str] = None,
2094
2130
  dragonfly_ssl: Optional[bool] = None,
2095
2131
  ip_filter_objects: Optional[Sequence['outputs.DragonflyDragonflyUserConfigIpFilterObject']] = None,
2096
2132
  ip_filter_strings: Optional[Sequence[str]] = None,
@@ -2106,7 +2142,6 @@ class DragonflyDragonflyUserConfig(dict):
2106
2142
  static_ips: Optional[bool] = None):
2107
2143
  """
2108
2144
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
2109
- :param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2110
2145
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
2111
2146
  :param Sequence['DragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
2112
2147
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -2123,8 +2158,6 @@ class DragonflyDragonflyUserConfig(dict):
2123
2158
  """
2124
2159
  if cache_mode is not None:
2125
2160
  pulumi.set(__self__, "cache_mode", cache_mode)
2126
- if dragonfly_persistence is not None:
2127
- pulumi.set(__self__, "dragonfly_persistence", dragonfly_persistence)
2128
2161
  if dragonfly_ssl is not None:
2129
2162
  pulumi.set(__self__, "dragonfly_ssl", dragonfly_ssl)
2130
2163
  if ip_filter_objects is not None:
@@ -2160,14 +2193,6 @@ class DragonflyDragonflyUserConfig(dict):
2160
2193
  """
2161
2194
  return pulumi.get(self, "cache_mode")
2162
2195
 
2163
- @property
2164
- @pulumi.getter(name="dragonflyPersistence")
2165
- def dragonfly_persistence(self) -> Optional[str]:
2166
- """
2167
- When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
2168
- """
2169
- return pulumi.get(self, "dragonfly_persistence")
2170
-
2171
2196
  @property
2172
2197
  @pulumi.getter(name="dragonflySsl")
2173
2198
  def dragonfly_ssl(self) -> Optional[bool]:
@@ -3300,6 +3325,12 @@ class GrafanaComponent(dict):
3300
3325
  return pulumi.get(self, "usage")
3301
3326
 
3302
3327
 
3328
+ @pulumi.output_type
3329
+ class GrafanaGrafana(dict):
3330
+ def __init__(__self__):
3331
+ pass
3332
+
3333
+
3303
3334
  @pulumi.output_type
3304
3335
  class GrafanaGrafanaUserConfig(dict):
3305
3336
  @staticmethod
@@ -3446,7 +3477,7 @@ class GrafanaGrafanaUserConfig(dict):
3446
3477
  viewers_can_edit: Optional[bool] = None):
3447
3478
  """
3448
3479
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
3449
- :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3480
+ :param bool alerting_enabled: Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
3450
3481
  :param str alerting_error_or_timeout: Default error or timeout setting for new alerting rules.
3451
3482
  :param int alerting_max_annotations_to_keep: Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.
3452
3483
  :param str alerting_nodata_or_nullvalues: Default value for 'no data or null values' for new alerting rules.
@@ -3483,7 +3514,7 @@ class GrafanaGrafanaUserConfig(dict):
3483
3514
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
3484
3515
  :param 'GrafanaGrafanaUserConfigSmtpServerArgs' smtp_server: SMTP server settings
3485
3516
  :param bool static_ips: Use static public IP addresses.
3486
- :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3517
+ :param bool unified_alerting_enabled: Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3487
3518
  :param bool user_auto_assign_org: Auto-assign new users on signup to main organization. Defaults to false.
3488
3519
  :param str user_auto_assign_org_role: Set role for new signups. Defaults to Viewer.
3489
3520
  :param bool viewers_can_edit: Users with view-only permission can edit but not save dashboards.
@@ -3579,13 +3610,16 @@ class GrafanaGrafanaUserConfig(dict):
3579
3610
  """
3580
3611
  Additional Cloud Regions for Backup Replication.
3581
3612
  """
3613
+ warnings.warn("""This property is deprecated.""", DeprecationWarning)
3614
+ pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
3615
+
3582
3616
  return pulumi.get(self, "additional_backup_regions")
3583
3617
 
3584
3618
  @property
3585
3619
  @pulumi.getter(name="alertingEnabled")
3586
3620
  def alerting_enabled(self) -> Optional[bool]:
3587
3621
  """
3588
- Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified*alerting*enabled.
3622
+ Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.
3589
3623
  """
3590
3624
  return pulumi.get(self, "alerting_enabled")
3591
3625
 
@@ -3884,7 +3918,7 @@ class GrafanaGrafanaUserConfig(dict):
3884
3918
  @pulumi.getter(name="unifiedAlertingEnabled")
3885
3919
  def unified_alerting_enabled(self) -> Optional[bool]:
3886
3920
  """
3887
- Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified*alerting*enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3921
+ Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.
3888
3922
  """
3889
3923
  return pulumi.get(self, "unified_alerting_enabled")
3890
3924
 
@@ -5943,6 +5977,12 @@ class KafkaConnectComponent(dict):
5943
5977
  return pulumi.get(self, "usage")
5944
5978
 
5945
5979
 
5980
+ @pulumi.output_type
5981
+ class KafkaConnectKafkaConnect(dict):
5982
+ def __init__(__self__):
5983
+ pass
5984
+
5985
+
5946
5986
  @pulumi.output_type
5947
5987
  class KafkaConnectKafkaConnectUserConfig(dict):
5948
5988
  @staticmethod
@@ -6211,8 +6251,8 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6211
6251
  :param str connector_client_config_override_policy: Defines what client configurations can be overridden by the connector. Default is None.
6212
6252
  :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
6213
6253
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
6214
- :param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6215
- :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
6254
+ :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
6255
+ :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
6216
6256
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
6217
6257
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
6218
6258
  :param int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
@@ -6286,7 +6326,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6286
6326
  @pulumi.getter(name="consumerIsolationLevel")
6287
6327
  def consumer_isolation_level(self) -> Optional[str]:
6288
6328
  """
6289
- Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
6329
+ Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
6290
6330
  """
6291
6331
  return pulumi.get(self, "consumer_isolation_level")
6292
6332
 
@@ -6294,7 +6334,7 @@ class KafkaConnectKafkaConnectUserConfigKafkaConnect(dict):
6294
6334
  @pulumi.getter(name="consumerMaxPartitionFetchBytes")
6295
6335
  def consumer_max_partition_fetch_bytes(self) -> Optional[int]:
6296
6336
  """
6297
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
6337
+ Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
6298
6338
  """
6299
6339
  return pulumi.get(self, "consumer_max_partition_fetch_bytes")
6300
6340
 
@@ -6703,11 +6743,11 @@ class KafkaKafka(dict):
6703
6743
  rest_uri: Optional[str] = None,
6704
6744
  schema_registry_uri: Optional[str] = None):
6705
6745
  """
6706
- :param str access_cert: The Kafka client certificate.
6707
- :param str access_key: The Kafka client certificate key.
6708
- :param str connect_uri: The Kafka Connect URI.
6709
- :param str rest_uri: The Kafka REST URI.
6710
- :param str schema_registry_uri: The Schema Registry URI.
6746
+ :param str access_cert: The Kafka client certificate
6747
+ :param str access_key: The Kafka client certificate key
6748
+ :param str connect_uri: The Kafka Connect URI, if any
6749
+ :param str rest_uri: The Kafka REST URI, if any
6750
+ :param str schema_registry_uri: The Schema Registry URI, if any
6711
6751
  """
6712
6752
  if access_cert is not None:
6713
6753
  pulumi.set(__self__, "access_cert", access_cert)
@@ -6724,7 +6764,7 @@ class KafkaKafka(dict):
6724
6764
  @pulumi.getter(name="accessCert")
6725
6765
  def access_cert(self) -> Optional[str]:
6726
6766
  """
6727
- The Kafka client certificate.
6767
+ The Kafka client certificate
6728
6768
  """
6729
6769
  return pulumi.get(self, "access_cert")
6730
6770
 
@@ -6732,7 +6772,7 @@ class KafkaKafka(dict):
6732
6772
  @pulumi.getter(name="accessKey")
6733
6773
  def access_key(self) -> Optional[str]:
6734
6774
  """
6735
- The Kafka client certificate key.
6775
+ The Kafka client certificate key
6736
6776
  """
6737
6777
  return pulumi.get(self, "access_key")
6738
6778
 
@@ -6740,7 +6780,7 @@ class KafkaKafka(dict):
6740
6780
  @pulumi.getter(name="connectUri")
6741
6781
  def connect_uri(self) -> Optional[str]:
6742
6782
  """
6743
- The Kafka Connect URI.
6783
+ The Kafka Connect URI, if any
6744
6784
  """
6745
6785
  return pulumi.get(self, "connect_uri")
6746
6786
 
@@ -6748,7 +6788,7 @@ class KafkaKafka(dict):
6748
6788
  @pulumi.getter(name="restUri")
6749
6789
  def rest_uri(self) -> Optional[str]:
6750
6790
  """
6751
- The Kafka REST URI.
6791
+ The Kafka REST URI, if any
6752
6792
  """
6753
6793
  return pulumi.get(self, "rest_uri")
6754
6794
 
@@ -6756,7 +6796,7 @@ class KafkaKafka(dict):
6756
6796
  @pulumi.getter(name="schemaRegistryUri")
6757
6797
  def schema_registry_uri(self) -> Optional[str]:
6758
6798
  """
6759
- The Schema Registry URI.
6799
+ The Schema Registry URI, if any
6760
6800
  """
6761
6801
  return pulumi.get(self, "schema_registry_uri")
6762
6802
 
@@ -7300,7 +7340,7 @@ class KafkaKafkaUserConfigKafka(dict):
7300
7340
  :param int log_index_size_max_bytes: The maximum size in bytes of the offset index.
7301
7341
  :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
7302
7342
  :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
7303
- :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7343
+ :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
7304
7344
  :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
7305
7345
  :param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
7306
7346
  :param bool log_preallocate: Should pre allocate file when create new segment?
@@ -7322,7 +7362,7 @@ class KafkaKafkaUserConfigKafka(dict):
7322
7362
  :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
7323
7363
  :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
7324
7364
  :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
7325
- :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
7365
+ :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .
7326
7366
  :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
7327
7367
  :param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
7328
7368
  :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
@@ -7570,7 +7610,7 @@ class KafkaKafkaUserConfigKafka(dict):
7570
7610
  @pulumi.getter(name="logMessageDownconversionEnable")
7571
7611
  def log_message_downconversion_enable(self) -> Optional[bool]:
7572
7612
  """
7573
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
7613
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
7574
7614
  """
7575
7615
  return pulumi.get(self, "log_message_downconversion_enable")
7576
7616
 
@@ -7746,7 +7786,7 @@ class KafkaKafkaUserConfigKafka(dict):
7746
7786
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
7747
7787
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
7748
7788
  """
7749
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
7789
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .
7750
7790
  """
7751
7791
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
7752
7792
 
@@ -7893,7 +7933,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7893
7933
  :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
7894
7934
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
7895
7935
  :param str consumer_isolation_level: Transaction read isolation level. read*uncommitted is the default, but read*committed can be used if consume-exactly-once behavior is desired.
7896
- :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
7936
+ :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
7897
7937
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
7898
7938
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
7899
7939
  :param int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
@@ -7975,7 +8015,7 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
7975
8015
  @pulumi.getter(name="consumerMaxPartitionFetchBytes")
7976
8016
  def consumer_max_partition_fetch_bytes(self) -> Optional[int]:
7977
8017
  """
7978
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
8018
+ Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
7979
8019
  """
7980
8020
  return pulumi.get(self, "consumer_max_partition_fetch_bytes")
7981
8021
 
@@ -8124,8 +8164,8 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8124
8164
  :param bool name_strategy_validation: If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`.
8125
8165
  :param str producer_acks: The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.
8126
8166
  :param str producer_compression_type: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
8127
- :param int producer_linger_ms: Wait for up to the given delay to allow batching records together. The default value is `0`.
8128
- :param int producer_max_request_size: The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.
8167
+ :param int producer_linger_ms: This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
8168
+ :param int producer_max_request_size: This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
8129
8169
  :param int simpleconsumer_pool_size_max: Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.
8130
8170
  """
8131
8171
  if consumer_enable_auto_commit is not None:
@@ -8209,7 +8249,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8209
8249
  @pulumi.getter(name="producerLingerMs")
8210
8250
  def producer_linger_ms(self) -> Optional[int]:
8211
8251
  """
8212
- Wait for up to the given delay to allow batching records together. The default value is `0`.
8252
+ This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.
8213
8253
  """
8214
8254
  return pulumi.get(self, "producer_linger_ms")
8215
8255
 
@@ -8217,7 +8257,7 @@ class KafkaKafkaUserConfigKafkaRestConfig(dict):
8217
8257
  @pulumi.getter(name="producerMaxRequestSize")
8218
8258
  def producer_max_request_size(self) -> Optional[int]:
8219
8259
  """
8220
- The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.
8260
+ This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
8221
8261
  """
8222
8262
  return pulumi.get(self, "producer_max_request_size")
8223
8263
 
@@ -8260,9 +8300,9 @@ class KafkaKafkaUserConfigPrivateAccess(dict):
8260
8300
  prometheus: Optional[bool] = None,
8261
8301
  schema_registry: Optional[bool] = None):
8262
8302
  """
8263
- :param bool kafka: Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8264
- :param bool kafka_connect: Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8265
- :param bool kafka_rest: Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8303
+ :param bool kafka: Kafka broker configuration values
8304
+ :param bool kafka_connect: Enable Kafka Connect service. The default value is `false`.
8305
+ :param bool kafka_rest: Enable Kafka-REST service. The default value is `false`.
8266
8306
  :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8267
8307
  :param bool schema_registry: Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8268
8308
  """
@@ -8281,7 +8321,7 @@ class KafkaKafkaUserConfigPrivateAccess(dict):
8281
8321
  @pulumi.getter
8282
8322
  def kafka(self) -> Optional[bool]:
8283
8323
  """
8284
- Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8324
+ Kafka broker configuration values
8285
8325
  """
8286
8326
  return pulumi.get(self, "kafka")
8287
8327
 
@@ -8289,7 +8329,7 @@ class KafkaKafkaUserConfigPrivateAccess(dict):
8289
8329
  @pulumi.getter(name="kafkaConnect")
8290
8330
  def kafka_connect(self) -> Optional[bool]:
8291
8331
  """
8292
- Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8332
+ Enable Kafka Connect service. The default value is `false`.
8293
8333
  """
8294
8334
  return pulumi.get(self, "kafka_connect")
8295
8335
 
@@ -8297,7 +8337,7 @@ class KafkaKafkaUserConfigPrivateAccess(dict):
8297
8337
  @pulumi.getter(name="kafkaRest")
8298
8338
  def kafka_rest(self) -> Optional[bool]:
8299
8339
  """
8300
- Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8340
+ Enable Kafka-REST service. The default value is `false`.
8301
8341
  """
8302
8342
  return pulumi.get(self, "kafka_rest")
8303
8343
 
@@ -8350,11 +8390,11 @@ class KafkaKafkaUserConfigPrivatelinkAccess(dict):
8350
8390
  schema_registry: Optional[bool] = None):
8351
8391
  """
8352
8392
  :param bool jolokia: Enable jolokia.
8353
- :param bool kafka: Enable kafka.
8354
- :param bool kafka_connect: Enable kafka_connect.
8355
- :param bool kafka_rest: Enable kafka_rest.
8356
- :param bool prometheus: Enable prometheus.
8357
- :param bool schema_registry: Enable schema_registry.
8393
+ :param bool kafka: Kafka broker configuration values
8394
+ :param bool kafka_connect: Enable Kafka Connect service. The default value is `false`.
8395
+ :param bool kafka_rest: Enable Kafka-REST service. The default value is `false`.
8396
+ :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8397
+ :param bool schema_registry: Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8358
8398
  """
8359
8399
  if jolokia is not None:
8360
8400
  pulumi.set(__self__, "jolokia", jolokia)
@@ -8381,7 +8421,7 @@ class KafkaKafkaUserConfigPrivatelinkAccess(dict):
8381
8421
  @pulumi.getter
8382
8422
  def kafka(self) -> Optional[bool]:
8383
8423
  """
8384
- Enable kafka.
8424
+ Kafka broker configuration values
8385
8425
  """
8386
8426
  return pulumi.get(self, "kafka")
8387
8427
 
@@ -8389,7 +8429,7 @@ class KafkaKafkaUserConfigPrivatelinkAccess(dict):
8389
8429
  @pulumi.getter(name="kafkaConnect")
8390
8430
  def kafka_connect(self) -> Optional[bool]:
8391
8431
  """
8392
- Enable kafka_connect.
8432
+ Enable Kafka Connect service. The default value is `false`.
8393
8433
  """
8394
8434
  return pulumi.get(self, "kafka_connect")
8395
8435
 
@@ -8397,7 +8437,7 @@ class KafkaKafkaUserConfigPrivatelinkAccess(dict):
8397
8437
  @pulumi.getter(name="kafkaRest")
8398
8438
  def kafka_rest(self) -> Optional[bool]:
8399
8439
  """
8400
- Enable kafka_rest.
8440
+ Enable Kafka-REST service. The default value is `false`.
8401
8441
  """
8402
8442
  return pulumi.get(self, "kafka_rest")
8403
8443
 
@@ -8405,7 +8445,7 @@ class KafkaKafkaUserConfigPrivatelinkAccess(dict):
8405
8445
  @pulumi.getter
8406
8446
  def prometheus(self) -> Optional[bool]:
8407
8447
  """
8408
- Enable prometheus.
8448
+ Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8409
8449
  """
8410
8450
  return pulumi.get(self, "prometheus")
8411
8451
 
@@ -8413,7 +8453,7 @@ class KafkaKafkaUserConfigPrivatelinkAccess(dict):
8413
8453
  @pulumi.getter(name="schemaRegistry")
8414
8454
  def schema_registry(self) -> Optional[bool]:
8415
8455
  """
8416
- Enable schema_registry.
8456
+ Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8417
8457
  """
8418
8458
  return pulumi.get(self, "schema_registry")
8419
8459
 
@@ -8448,11 +8488,11 @@ class KafkaKafkaUserConfigPublicAccess(dict):
8448
8488
  prometheus: Optional[bool] = None,
8449
8489
  schema_registry: Optional[bool] = None):
8450
8490
  """
8451
- :param bool kafka: Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
8452
- :param bool kafka_connect: Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
8453
- :param bool kafka_rest: Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
8454
- :param bool prometheus: Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
8455
- :param bool schema_registry: Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
8491
+ :param bool kafka: Kafka broker configuration values
8492
+ :param bool kafka_connect: Enable Kafka Connect service. The default value is `false`.
8493
+ :param bool kafka_rest: Enable Kafka-REST service. The default value is `false`.
8494
+ :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8495
+ :param bool schema_registry: Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8456
8496
  """
8457
8497
  if kafka is not None:
8458
8498
  pulumi.set(__self__, "kafka", kafka)
@@ -8469,7 +8509,7 @@ class KafkaKafkaUserConfigPublicAccess(dict):
8469
8509
  @pulumi.getter
8470
8510
  def kafka(self) -> Optional[bool]:
8471
8511
  """
8472
- Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
8512
+ Kafka broker configuration values
8473
8513
  """
8474
8514
  return pulumi.get(self, "kafka")
8475
8515
 
@@ -8477,7 +8517,7 @@ class KafkaKafkaUserConfigPublicAccess(dict):
8477
8517
  @pulumi.getter(name="kafkaConnect")
8478
8518
  def kafka_connect(self) -> Optional[bool]:
8479
8519
  """
8480
- Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
8520
+ Enable Kafka Connect service. The default value is `false`.
8481
8521
  """
8482
8522
  return pulumi.get(self, "kafka_connect")
8483
8523
 
@@ -8485,7 +8525,7 @@ class KafkaKafkaUserConfigPublicAccess(dict):
8485
8525
  @pulumi.getter(name="kafkaRest")
8486
8526
  def kafka_rest(self) -> Optional[bool]:
8487
8527
  """
8488
- Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
8528
+ Enable Kafka-REST service. The default value is `false`.
8489
8529
  """
8490
8530
  return pulumi.get(self, "kafka_rest")
8491
8531
 
@@ -8493,7 +8533,7 @@ class KafkaKafkaUserConfigPublicAccess(dict):
8493
8533
  @pulumi.getter
8494
8534
  def prometheus(self) -> Optional[bool]:
8495
8535
  """
8496
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
8536
+ Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8497
8537
  """
8498
8538
  return pulumi.get(self, "prometheus")
8499
8539
 
@@ -8501,7 +8541,7 @@ class KafkaKafkaUserConfigPublicAccess(dict):
8501
8541
  @pulumi.getter(name="schemaRegistry")
8502
8542
  def schema_registry(self) -> Optional[bool]:
8503
8543
  """
8504
- Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
8544
+ Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
8505
8545
  """
8506
8546
  return pulumi.get(self, "schema_registry")
8507
8547
 
@@ -8751,6 +8791,12 @@ class KafkaMirrorMakerComponent(dict):
8751
8791
  return pulumi.get(self, "usage")
8752
8792
 
8753
8793
 
8794
+ @pulumi.output_type
8795
+ class KafkaMirrorMakerKafkaMirrormaker(dict):
8796
+ def __init__(__self__):
8797
+ pass
8798
+
8799
+
8754
8800
  @pulumi.output_type
8755
8801
  class KafkaMirrorMakerKafkaMirrormakerUserConfig(dict):
8756
8802
  @staticmethod
@@ -8972,7 +9018,7 @@ class KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
8972
9018
  :param int refresh_groups_interval_seconds: Frequency of consumer group refresh in seconds. Defaults to 600 seconds (10 minutes).
8973
9019
  :param bool refresh_topics_enabled: Whether to periodically check for new topics and partitions. Defaults to 'true'.
8974
9020
  :param int refresh_topics_interval_seconds: Frequency of topic and partitions refresh in seconds. Defaults to 600 seconds (10 minutes).
8975
- :param bool sync_group_offsets_enabled: Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to _*consumer*offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.
9021
+ :param bool sync_group_offsets_enabled: Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to __consumer_offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.
8976
9022
  :param int sync_group_offsets_interval_seconds: Frequency at which consumer group offsets are synced (default: 60, every minute).
8977
9023
  :param bool sync_topic_configs_enabled: Whether to periodically configure remote topics to match their corresponding upstream topics.
8978
9024
  :param int tasks_max_per_cpu: 'tasks.max' is set to this multiplied by the number of CPUs in the service. The default value is `1`.
@@ -9080,7 +9126,7 @@ class KafkaMirrorMakerKafkaMirrormakerUserConfigKafkaMirrormaker(dict):
9080
9126
  @pulumi.getter(name="syncGroupOffsetsEnabled")
9081
9127
  def sync_group_offsets_enabled(self) -> Optional[bool]:
9082
9128
  """
9083
- Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to _*consumer*offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.
9129
+ Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to __consumer_offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.
9084
9130
  """
9085
9131
  return pulumi.get(self, "sync_group_offsets_enabled")
9086
9132
 
@@ -9852,6 +9898,12 @@ class M3AggregatorComponent(dict):
9852
9898
  return pulumi.get(self, "usage")
9853
9899
 
9854
9900
 
9901
+ @pulumi.output_type
9902
+ class M3AggregatorM3aggregator(dict):
9903
+ def __init__(__self__):
9904
+ pass
9905
+
9906
+
9855
9907
  @pulumi.output_type
9856
9908
  class M3AggregatorM3aggregatorUserConfig(dict):
9857
9909
  @staticmethod
@@ -10236,6 +10288,12 @@ class M3DbComponent(dict):
10236
10288
  return pulumi.get(self, "usage")
10237
10289
 
10238
10290
 
10291
+ @pulumi.output_type
10292
+ class M3DbM3db(dict):
10293
+ def __init__(__self__):
10294
+ pass
10295
+
10296
+
10239
10297
  @pulumi.output_type
10240
10298
  class M3DbM3dbUserConfig(dict):
10241
10299
  @staticmethod
@@ -10363,6 +10421,9 @@ class M3DbM3dbUserConfig(dict):
10363
10421
  """
10364
10422
  Additional Cloud Regions for Backup Replication.
10365
10423
  """
10424
+ warnings.warn("""This property is deprecated.""", DeprecationWarning)
10425
+ pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
10426
+
10366
10427
  return pulumi.get(self, "additional_backup_regions")
10367
10428
 
10368
10429
  @property
@@ -10574,7 +10635,7 @@ class M3DbM3dbUserConfigLimits(dict):
10574
10635
  """
10575
10636
  :param int max_recently_queried_series_blocks: The maximum number of blocks that can be read in a given lookback period.
10576
10637
  :param int max_recently_queried_series_disk_bytes_read: The maximum number of disk bytes that can be read in a given lookback period.
10577
- :param str max_recently_queried_series_lookback: The lookback period for 'max*recently*queried*series*blocks' and 'max*recently*queried*series*disk*bytes*read'.
10638
+ :param str max_recently_queried_series_lookback: The lookback period for 'max_recently_queried_series_blocks' and 'max_recently_queried_series_disk_bytes_read'.
10578
10639
  :param int query_docs: The maximum number of docs fetched in single query.
10579
10640
  :param bool query_require_exhaustive: When query limits are exceeded, whether to return error or return partial results.
10580
10641
  :param int query_series: The maximum number of series fetched in single query.
@@ -10612,7 +10673,7 @@ class M3DbM3dbUserConfigLimits(dict):
10612
10673
  @pulumi.getter(name="maxRecentlyQueriedSeriesLookback")
10613
10674
  def max_recently_queried_series_lookback(self) -> Optional[str]:
10614
10675
  """
10615
- The lookback period for 'max*recently*queried*series*blocks' and 'max*recently*queried*series*disk*bytes*read'.
10676
+ The lookback period for 'max_recently_queried_series_blocks' and 'max_recently_queried_series_disk_bytes_read'.
10616
10677
  """
10617
10678
  return pulumi.get(self, "max_recently_queried_series_lookback")
10618
10679
 
@@ -11023,7 +11084,7 @@ class M3DbM3dbUserConfigRulesMapping(dict):
11023
11084
  namespaces_strings: Optional[Sequence[str]] = None,
11024
11085
  tags: Optional[Sequence['outputs.M3DbM3dbUserConfigRulesMappingTag']] = None):
11025
11086
  """
11026
- :param str filter: Matching metric names with wildcards (using **name**:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.
11087
+ :param str filter: Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.
11027
11088
  :param Sequence[str] aggregations: List of aggregations to be applied.
11028
11089
  :param bool drop: Only store the derived metric (as specified in the roll-up rules), if any.
11029
11090
  :param str name: The (optional) name of the rule.
@@ -11052,7 +11113,7 @@ class M3DbM3dbUserConfigRulesMapping(dict):
11052
11113
  @pulumi.getter
11053
11114
  def filter(self) -> str:
11054
11115
  """
11055
- Matching metric names with wildcards (using **name**:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.
11116
+ Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.
11056
11117
  """
11057
11118
  return pulumi.get(self, "filter")
11058
11119
 
@@ -11392,6 +11453,12 @@ class MySqlComponent(dict):
11392
11453
  return pulumi.get(self, "usage")
11393
11454
 
11394
11455
 
11456
+ @pulumi.output_type
11457
+ class MySqlMysql(dict):
11458
+ def __init__(__self__):
11459
+ pass
11460
+
11461
+
11395
11462
  @pulumi.output_type
11396
11463
  class MySqlMysqlUserConfig(dict):
11397
11464
  @staticmethod
@@ -11968,17 +12035,17 @@ class MySqlMysqlUserConfigMysql(dict):
11968
12035
  :param int innodb_write_io_threads: The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.
11969
12036
  :param int interactive_timeout: The number of seconds the server waits for activity on an interactive connection before closing it.
11970
12037
  :param str internal_tmp_mem_storage_engine: The storage engine for in-memory internal temporary tables.
11971
- :param float long_query_time: The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Default is 10s.
12038
+ :param float long_query_time: The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.
11972
12039
  :param int max_allowed_packet: Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).
11973
- :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
12040
+ :param int max_heap_table_size: Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
11974
12041
  :param int net_buffer_length: Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.
11975
12042
  :param int net_read_timeout: The number of seconds to wait for more data from a connection before aborting the read.
11976
12043
  :param int net_write_timeout: The number of seconds to wait for a block to be written to a connection before aborting the write.
11977
- :param bool slow_query_log: Slow query log enables capturing of slow queries. Setting slow*query*log to false also truncates the mysql.slow_log table. Default is off.
12044
+ :param bool slow_query_log: Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off.
11978
12045
  :param int sort_buffer_size: Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K).
11979
12046
  :param str sql_mode: Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned.
11980
12047
  :param bool sql_require_primary_key: Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.
11981
- :param int tmp_table_size: Limits the size of internal in-memory tables. Also set max*heap*table_size. Default is 16777216 (16M).
12048
+ :param int tmp_table_size: Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).
11982
12049
  :param int wait_timeout: The number of seconds the server waits for activity on a noninteractive connection before closing it.
11983
12050
  """
11984
12051
  if connect_timeout is not None:
@@ -12190,7 +12257,7 @@ class MySqlMysqlUserConfigMysql(dict):
12190
12257
  @pulumi.getter(name="longQueryTime")
12191
12258
  def long_query_time(self) -> Optional[float]:
12192
12259
  """
12193
- The slow*query*logs work as SQL statements that take more than long*query*time seconds to execute. Default is 10s.
12260
+ The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.
12194
12261
  """
12195
12262
  return pulumi.get(self, "long_query_time")
12196
12263
 
@@ -12206,7 +12273,7 @@ class MySqlMysqlUserConfigMysql(dict):
12206
12273
  @pulumi.getter(name="maxHeapTableSize")
12207
12274
  def max_heap_table_size(self) -> Optional[int]:
12208
12275
  """
12209
- Limits the size of internal in-memory tables. Also set tmp*table*size. Default is 16777216 (16M).
12276
+ Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).
12210
12277
  """
12211
12278
  return pulumi.get(self, "max_heap_table_size")
12212
12279
 
@@ -12238,7 +12305,7 @@ class MySqlMysqlUserConfigMysql(dict):
12238
12305
  @pulumi.getter(name="slowQueryLog")
12239
12306
  def slow_query_log(self) -> Optional[bool]:
12240
12307
  """
12241
- Slow query log enables capturing of slow queries. Setting slow*query*log to false also truncates the mysql.slow_log table. Default is off.
12308
+ Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off.
12242
12309
  """
12243
12310
  return pulumi.get(self, "slow_query_log")
12244
12311
 
@@ -12270,7 +12337,7 @@ class MySqlMysqlUserConfigMysql(dict):
12270
12337
  @pulumi.getter(name="tmpTableSize")
12271
12338
  def tmp_table_size(self) -> Optional[int]:
12272
12339
  """
12273
- Limits the size of internal in-memory tables. Also set max*heap*table_size. Default is 16777216 (16M).
12340
+ Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).
12274
12341
  """
12275
12342
  return pulumi.get(self, "tmp_table_size")
12276
12343
 
@@ -12756,7 +12823,7 @@ class OpenSearchOpensearchUserConfig(dict):
12756
12823
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
12757
12824
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
12758
12825
  :param bool keep_index_refresh_interval: Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true.
12759
- :param int max_index_count: Use index_patterns instead. The default value is `0`.
12826
+ :param int max_index_count: use index_patterns instead. The default value is `0`.
12760
12827
  :param 'OpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
12761
12828
  :param 'OpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
12762
12829
  :param 'OpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
@@ -12824,6 +12891,9 @@ class OpenSearchOpensearchUserConfig(dict):
12824
12891
  """
12825
12892
  Additional Cloud Regions for Backup Replication.
12826
12893
  """
12894
+ warnings.warn("""This property is deprecated.""", DeprecationWarning)
12895
+ pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
12896
+
12827
12897
  return pulumi.get(self, "additional_backup_regions")
12828
12898
 
12829
12899
  @property
@@ -12897,7 +12967,7 @@ class OpenSearchOpensearchUserConfig(dict):
12897
12967
  @pulumi.getter(name="maxIndexCount")
12898
12968
  def max_index_count(self) -> Optional[int]:
12899
12969
  """
12900
- Use index_patterns instead. The default value is `0`.
12970
+ use index_patterns instead. The default value is `0`.
12901
12971
  """
12902
12972
  return pulumi.get(self, "max_index_count")
12903
12973
 
@@ -13498,10 +13568,10 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
13498
13568
  :param int http_max_header_size: The max size of allowed headers, in bytes.
13499
13569
  :param int http_max_initial_line_length: The max length of an HTTP URL, in bytes.
13500
13570
  :param int indices_fielddata_cache_size: Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.
13501
- :param int indices_memory_index_buffer_size: Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.
13571
+ :param int indices_memory_index_buffer_size: Percentage value. Default is 10%! (MISSING)Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.
13502
13572
  :param int indices_memory_max_index_buffer_size: Absolute value. Default is unbound. Doesn't work without indices.memory.index*buffer*size. Maximum amount of heap used for query cache, an absolute indices.memory.index*buffer*size maximum hard limit.
13503
13573
  :param int indices_memory_min_index_buffer_size: Absolute value. Default is 48mb. Doesn't work without indices.memory.index*buffer*size. Minimum amount of heap used for query cache, an absolute indices.memory.index*buffer*size minimal hard limit.
13504
- :param int indices_queries_cache_size: Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.
13574
+ :param int indices_queries_cache_size: Percentage value. Default is 10%! (MISSING)Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.
13505
13575
  :param int indices_query_bool_max_clause_count: Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.
13506
13576
  :param int indices_recovery_max_bytes_per_sec: Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.
13507
13577
  :param int indices_recovery_max_concurrent_file_chunks: Number of file chunks sent in parallel for each recovery. Defaults to 2.
@@ -13721,7 +13791,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
13721
13791
  @pulumi.getter(name="indicesMemoryIndexBufferSize")
13722
13792
  def indices_memory_index_buffer_size(self) -> Optional[int]:
13723
13793
  """
13724
- Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.
13794
+ Percentage value. Default is 10%! (MISSING)Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.
13725
13795
  """
13726
13796
  return pulumi.get(self, "indices_memory_index_buffer_size")
13727
13797
 
@@ -13745,7 +13815,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
13745
13815
  @pulumi.getter(name="indicesQueriesCacheSize")
13746
13816
  def indices_queries_cache_size(self) -> Optional[int]:
13747
13817
  """
13748
- Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.
13818
+ Percentage value. Default is 10%! (MISSING)Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.
13749
13819
  """
13750
13820
  return pulumi.get(self, "indices_queries_cache_size")
13751
13821
 
@@ -14151,10 +14221,10 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
14151
14221
  """
14152
14222
  :param int allowed_tries: The number of login attempts allowed before login is blocked.
14153
14223
  :param int block_expiry_seconds: The duration of time that login remains blocked after a failed login.
14154
- :param int max_blocked_clients: The maximum number of blocked IP addresses.
14224
+ :param int max_blocked_clients: internal*authentication*backend*limiting.max*blocked_clients.
14155
14225
  :param int max_tracked_clients: The maximum number of tracked IP addresses that have failed login.
14156
14226
  :param int time_window_seconds: The window of time in which the value for `allowed_tries` is enforced.
14157
- :param str type: The type of rate limiting.
14227
+ :param str type: internal*authentication*backend_limiting.type.
14158
14228
  """
14159
14229
  if allowed_tries is not None:
14160
14230
  pulumi.set(__self__, "allowed_tries", allowed_tries)
@@ -14189,7 +14259,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
14189
14259
  @pulumi.getter(name="maxBlockedClients")
14190
14260
  def max_blocked_clients(self) -> Optional[int]:
14191
14261
  """
14192
- The maximum number of blocked IP addresses.
14262
+ internal*authentication*backend*limiting.max*blocked_clients.
14193
14263
  """
14194
14264
  return pulumi.get(self, "max_blocked_clients")
14195
14265
 
@@ -14213,7 +14283,7 @@ class OpenSearchOpensearchUserConfigOpensearchAuthFailureListenersIpRateLimiting
14213
14283
  @pulumi.getter
14214
14284
  def type(self) -> Optional[str]:
14215
14285
  """
14216
- The type of rate limiting.
14286
+ internal*authentication*backend_limiting.type.
14217
14287
  """
14218
14288
  return pulumi.get(self, "type")
14219
14289
 
@@ -14244,7 +14314,7 @@ class OpenSearchOpensearchUserConfigOpensearchDashboards(dict):
14244
14314
  max_old_space_size: Optional[int] = None,
14245
14315
  opensearch_request_timeout: Optional[int] = None):
14246
14316
  """
14247
- :param bool enabled: Enable or disable OpenSearch Dashboards. The default value is `true`.
14317
+ :param bool enabled: Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. The default value is `true`.
14248
14318
  :param int max_old_space_size: Limits the maximum amount of memory (in MiB) the OpenSearch Dashboards process can use. This sets the max*old*space_size option of the nodejs running the OpenSearch Dashboards. Note: the memory reserved by OpenSearch Dashboards is not available for OpenSearch. The default value is `128`.
14249
14319
  :param int opensearch_request_timeout: Timeout in milliseconds for requests made by OpenSearch Dashboards towards OpenSearch. The default value is `30000`.
14250
14320
  """
@@ -14259,7 +14329,7 @@ class OpenSearchOpensearchUserConfigOpensearchDashboards(dict):
14259
14329
  @pulumi.getter
14260
14330
  def enabled(self) -> Optional[bool]:
14261
14331
  """
14262
- Enable or disable OpenSearch Dashboards. The default value is `true`.
14332
+ Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. The default value is `true`.
14263
14333
  """
14264
14334
  return pulumi.get(self, "enabled")
14265
14335
 
@@ -14304,8 +14374,8 @@ class OpenSearchOpensearchUserConfigPrivateAccess(dict):
14304
14374
  opensearch_dashboards: Optional[bool] = None,
14305
14375
  prometheus: Optional[bool] = None):
14306
14376
  """
14307
- :param bool opensearch: Allow clients to connect to opensearch with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
14308
- :param bool opensearch_dashboards: Allow clients to connect to opensearch_dashboards with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
14377
+ :param bool opensearch: OpenSearch settings
14378
+ :param bool opensearch_dashboards: OpenSearch Dashboards settings
14309
14379
  :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
14310
14380
  """
14311
14381
  if opensearch is not None:
@@ -14319,7 +14389,7 @@ class OpenSearchOpensearchUserConfigPrivateAccess(dict):
14319
14389
  @pulumi.getter
14320
14390
  def opensearch(self) -> Optional[bool]:
14321
14391
  """
14322
- Allow clients to connect to opensearch with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
14392
+ OpenSearch settings
14323
14393
  """
14324
14394
  return pulumi.get(self, "opensearch")
14325
14395
 
@@ -14327,7 +14397,7 @@ class OpenSearchOpensearchUserConfigPrivateAccess(dict):
14327
14397
  @pulumi.getter(name="opensearchDashboards")
14328
14398
  def opensearch_dashboards(self) -> Optional[bool]:
14329
14399
  """
14330
- Allow clients to connect to opensearch_dashboards with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
14400
+ OpenSearch Dashboards settings
14331
14401
  """
14332
14402
  return pulumi.get(self, "opensearch_dashboards")
14333
14403
 
@@ -14364,9 +14434,9 @@ class OpenSearchOpensearchUserConfigPrivatelinkAccess(dict):
14364
14434
  opensearch_dashboards: Optional[bool] = None,
14365
14435
  prometheus: Optional[bool] = None):
14366
14436
  """
14367
- :param bool opensearch: Enable opensearch.
14368
- :param bool opensearch_dashboards: Enable opensearch_dashboards.
14369
- :param bool prometheus: Enable prometheus.
14437
+ :param bool opensearch: OpenSearch settings
14438
+ :param bool opensearch_dashboards: OpenSearch Dashboards settings
14439
+ :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
14370
14440
  """
14371
14441
  if opensearch is not None:
14372
14442
  pulumi.set(__self__, "opensearch", opensearch)
@@ -14379,7 +14449,7 @@ class OpenSearchOpensearchUserConfigPrivatelinkAccess(dict):
14379
14449
  @pulumi.getter
14380
14450
  def opensearch(self) -> Optional[bool]:
14381
14451
  """
14382
- Enable opensearch.
14452
+ OpenSearch settings
14383
14453
  """
14384
14454
  return pulumi.get(self, "opensearch")
14385
14455
 
@@ -14387,7 +14457,7 @@ class OpenSearchOpensearchUserConfigPrivatelinkAccess(dict):
14387
14457
  @pulumi.getter(name="opensearchDashboards")
14388
14458
  def opensearch_dashboards(self) -> Optional[bool]:
14389
14459
  """
14390
- Enable opensearch_dashboards.
14460
+ OpenSearch Dashboards settings
14391
14461
  """
14392
14462
  return pulumi.get(self, "opensearch_dashboards")
14393
14463
 
@@ -14395,7 +14465,7 @@ class OpenSearchOpensearchUserConfigPrivatelinkAccess(dict):
14395
14465
  @pulumi.getter
14396
14466
  def prometheus(self) -> Optional[bool]:
14397
14467
  """
14398
- Enable prometheus.
14468
+ Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
14399
14469
  """
14400
14470
  return pulumi.get(self, "prometheus")
14401
14471
 
@@ -14424,9 +14494,9 @@ class OpenSearchOpensearchUserConfigPublicAccess(dict):
14424
14494
  opensearch_dashboards: Optional[bool] = None,
14425
14495
  prometheus: Optional[bool] = None):
14426
14496
  """
14427
- :param bool opensearch: Allow clients to connect to opensearch from the public internet for service nodes that are in a project VPC or another type of private network.
14428
- :param bool opensearch_dashboards: Allow clients to connect to opensearch_dashboards from the public internet for service nodes that are in a project VPC or another type of private network.
14429
- :param bool prometheus: Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
14497
+ :param bool opensearch: OpenSearch settings
14498
+ :param bool opensearch_dashboards: OpenSearch Dashboards settings
14499
+ :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
14430
14500
  """
14431
14501
  if opensearch is not None:
14432
14502
  pulumi.set(__self__, "opensearch", opensearch)
@@ -14439,7 +14509,7 @@ class OpenSearchOpensearchUserConfigPublicAccess(dict):
14439
14509
  @pulumi.getter
14440
14510
  def opensearch(self) -> Optional[bool]:
14441
14511
  """
14442
- Allow clients to connect to opensearch from the public internet for service nodes that are in a project VPC or another type of private network.
14512
+ OpenSearch settings
14443
14513
  """
14444
14514
  return pulumi.get(self, "opensearch")
14445
14515
 
@@ -14447,7 +14517,7 @@ class OpenSearchOpensearchUserConfigPublicAccess(dict):
14447
14517
  @pulumi.getter(name="opensearchDashboards")
14448
14518
  def opensearch_dashboards(self) -> Optional[bool]:
14449
14519
  """
14450
- Allow clients to connect to opensearch_dashboards from the public internet for service nodes that are in a project VPC or another type of private network.
14520
+ OpenSearch Dashboards settings
14451
14521
  """
14452
14522
  return pulumi.get(self, "opensearch_dashboards")
14453
14523
 
@@ -14455,7 +14525,7 @@ class OpenSearchOpensearchUserConfigPublicAccess(dict):
14455
14525
  @pulumi.getter
14456
14526
  def prometheus(self) -> Optional[bool]:
14457
14527
  """
14458
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
14528
+ Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
14459
14529
  """
14460
14530
  return pulumi.get(self, "prometheus")
14461
14531
 
@@ -14498,13 +14568,13 @@ class OpenSearchOpensearchUserConfigSaml(dict):
14498
14568
  roles_key: Optional[str] = None,
14499
14569
  subject_key: Optional[str] = None):
14500
14570
  """
14501
- :param bool enabled: Enables or disables SAML-based authentication for OpenSearch. When enabled, users can authenticate using SAML with an Identity Provider. The default value is `true`.
14571
+ :param bool enabled: Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. The default value is `true`.
14502
14572
  :param str idp_entity_id: The unique identifier for the Identity Provider (IdP) entity that is used for SAML authentication. This value is typically provided by the IdP.
14503
14573
  :param str idp_metadata_url: The URL of the SAML metadata for the Identity Provider (IdP). This is used to configure SAML-based authentication with the IdP.
14504
14574
  :param str sp_entity_id: The unique identifier for the Service Provider (SP) entity that is used for SAML authentication. This value is typically provided by the SP.
14505
14575
  :param str idp_pemtrustedcas_content: This parameter specifies the PEM-encoded root certificate authority (CA) content for the SAML identity provider (IdP) server verification. The root CA content is used to verify the SSL/TLS certificate presented by the server.
14506
- :param str roles_key: Optional. Specifies the attribute in the SAML response where role information is stored, if available. Role attributes are not required for SAML authentication, but can be included in SAML assertions by most Identity Providers (IdPs) to determine user access levels or permissions.
14507
- :param str subject_key: Optional. Specifies the attribute in the SAML response where the subject identifier is stored. If not configured, the NameID attribute is used by default.
14576
+ :param str roles_key: The key in the JSON payload that stores the user’s roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT.
14577
+ :param str subject_key: The key in the JSON payload that stores the user’s name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional.
14508
14578
  """
14509
14579
  pulumi.set(__self__, "enabled", enabled)
14510
14580
  pulumi.set(__self__, "idp_entity_id", idp_entity_id)
@@ -14521,7 +14591,7 @@ class OpenSearchOpensearchUserConfigSaml(dict):
14521
14591
  @pulumi.getter
14522
14592
  def enabled(self) -> bool:
14523
14593
  """
14524
- Enables or disables SAML-based authentication for OpenSearch. When enabled, users can authenticate using SAML with an Identity Provider. The default value is `true`.
14594
+ Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. The default value is `true`.
14525
14595
  """
14526
14596
  return pulumi.get(self, "enabled")
14527
14597
 
@@ -14561,7 +14631,7 @@ class OpenSearchOpensearchUserConfigSaml(dict):
14561
14631
  @pulumi.getter(name="rolesKey")
14562
14632
  def roles_key(self) -> Optional[str]:
14563
14633
  """
14564
- Optional. Specifies the attribute in the SAML response where role information is stored, if available. Role attributes are not required for SAML authentication, but can be included in SAML assertions by most Identity Providers (IdPs) to determine user access levels or permissions.
14634
+ The key in the JSON payload that stores the user’s roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT.
14565
14635
  """
14566
14636
  return pulumi.get(self, "roles_key")
14567
14637
 
@@ -14569,7 +14639,7 @@ class OpenSearchOpensearchUserConfigSaml(dict):
14569
14639
  @pulumi.getter(name="subjectKey")
14570
14640
  def subject_key(self) -> Optional[str]:
14571
14641
  """
14572
- Optional. Specifies the attribute in the SAML response where the subject identifier is stored. If not configured, the NameID attribute is used by default.
14642
+ The key in the JSON payload that stores the user’s name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional.
14573
14643
  """
14574
14644
  return pulumi.get(self, "subject_key")
14575
14645
 
@@ -15217,12 +15287,12 @@ class PgPgUserConfig(dict):
15217
15287
  :param str recovery_target_time: Recovery target time when forking a service. This has effect only when a new service is being created.
15218
15288
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
15219
15289
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
15220
- :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
15290
+ :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20%. This setting adjusts the shared_buffers configuration value.
15221
15291
  :param bool static_ips: Use static public IP addresses.
15222
15292
  :param str synchronous_replication: Synchronous replication type. Note that the service plan also needs to support synchronous replication.
15223
15293
  :param 'PgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
15224
15294
  :param str variant: Variant of the PostgreSQL service, may affect the features that are exposed by default.
15225
- :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
15295
+ :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075%!o(MISSING)f total RAM (up to 32MB).
15226
15296
  """
15227
15297
  if additional_backup_regions is not None:
15228
15298
  pulumi.set(__self__, "additional_backup_regions", additional_backup_regions)
@@ -15507,7 +15577,7 @@ class PgPgUserConfig(dict):
15507
15577
  @pulumi.getter(name="sharedBuffersPercentage")
15508
15578
  def shared_buffers_percentage(self) -> Optional[float]:
15509
15579
  """
15510
- Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
15580
+ Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20%. This setting adjusts the shared_buffers configuration value.
15511
15581
  """
15512
15582
  return pulumi.get(self, "shared_buffers_percentage")
15513
15583
 
@@ -15547,7 +15617,7 @@ class PgPgUserConfig(dict):
15547
15617
  @pulumi.getter(name="workMem")
15548
15618
  def work_mem(self) -> Optional[int]:
15549
15619
  """
15550
- Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
15620
+ Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075%!o(MISSING)f total RAM (up to 32MB).
15551
15621
  """
15552
15622
  return pulumi.get(self, "work_mem")
15553
15623
 
@@ -15613,10 +15683,10 @@ class PgPgUserConfigMigration(dict):
15613
15683
  """
15614
15684
  :param str host: Hostname or IP address of the server where to migrate data from.
15615
15685
  :param int port: Port number of the server where to migrate data from.
15616
- :param str dbname: Database name for bootstrapping the initial connection.
15686
+ :param str dbname: Primary PostgreSQL database name
15617
15687
  :param str ignore_dbs: Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).
15618
15688
  :param str method: The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).
15619
- :param str password: Password for authentication with the server where to migrate data from.
15689
+ :param str password: PostgreSQL admin user password
15620
15690
  :param bool ssl: The server where to migrate data from is secured with SSL. The default value is `true`.
15621
15691
  :param str username: User name for authentication with the server where to migrate data from.
15622
15692
  """
@@ -15655,7 +15725,7 @@ class PgPgUserConfigMigration(dict):
15655
15725
  @pulumi.getter
15656
15726
  def dbname(self) -> Optional[str]:
15657
15727
  """
15658
- Database name for bootstrapping the initial connection.
15728
+ Primary PostgreSQL database name
15659
15729
  """
15660
15730
  return pulumi.get(self, "dbname")
15661
15731
 
@@ -15679,7 +15749,7 @@ class PgPgUserConfigMigration(dict):
15679
15749
  @pulumi.getter
15680
15750
  def password(self) -> Optional[str]:
15681
15751
  """
15682
- Password for authentication with the server where to migrate data from.
15752
+ PostgreSQL admin user password
15683
15753
  """
15684
15754
  return pulumi.get(self, "password")
15685
15755
 
@@ -15862,14 +15932,14 @@ class PgPgUserConfigPg(dict):
15862
15932
  wal_sender_timeout: Optional[int] = None,
15863
15933
  wal_writer_delay: Optional[int] = None):
15864
15934
  """
15865
- :param float autovacuum_analyze_scale_factor: Specifies a fraction of the table size to add to autovacuum*analyze*threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size).
15935
+ :param float autovacuum_analyze_scale_factor: Specifies a fraction of the table size to add to autovacuum*analyze*threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20%!o(MISSING)f table size).
15866
15936
  :param int autovacuum_analyze_threshold: Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.
15867
15937
  :param int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.
15868
15938
  :param int autovacuum_max_workers: Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.
15869
15939
  :param int autovacuum_naptime: Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute.
15870
15940
  :param int autovacuum_vacuum_cost_delay: Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum*cost*delay value will be used. The default value is 20 milliseconds.
15871
15941
  :param int autovacuum_vacuum_cost_limit: Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum*cost*limit value will be used.
15872
- :param float autovacuum_vacuum_scale_factor: Specifies a fraction of the table size to add to autovacuum*vacuum*threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size).
15942
+ :param float autovacuum_vacuum_scale_factor: Specifies a fraction of the table size to add to autovacuum*vacuum*threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20%!o(MISSING)f table size).
15873
15943
  :param int autovacuum_vacuum_threshold: Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples.
15874
15944
  :param int bgwriter_delay: Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.
15875
15945
  :param int bgwriter_flush_after: Whenever more than bgwriter*flush*after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.
@@ -15901,7 +15971,7 @@ class PgPgUserConfigPg(dict):
15901
15971
  :param int pg_partman_bgw_dot_interval: Sets the time interval to run pg_partman's scheduled tasks.
15902
15972
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
15903
15973
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
15904
- :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
15974
+ :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets .
15905
15975
  :param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
15906
15976
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
15907
15977
  :param str timezone: PostgreSQL service timezone.
@@ -16015,7 +16085,7 @@ class PgPgUserConfigPg(dict):
16015
16085
  @pulumi.getter(name="autovacuumAnalyzeScaleFactor")
16016
16086
  def autovacuum_analyze_scale_factor(self) -> Optional[float]:
16017
16087
  """
16018
- Specifies a fraction of the table size to add to autovacuum*analyze*threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size).
16088
+ Specifies a fraction of the table size to add to autovacuum*analyze*threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20%!o(MISSING)f table size).
16019
16089
  """
16020
16090
  return pulumi.get(self, "autovacuum_analyze_scale_factor")
16021
16091
 
@@ -16071,7 +16141,7 @@ class PgPgUserConfigPg(dict):
16071
16141
  @pulumi.getter(name="autovacuumVacuumScaleFactor")
16072
16142
  def autovacuum_vacuum_scale_factor(self) -> Optional[float]:
16073
16143
  """
16074
- Specifies a fraction of the table size to add to autovacuum*vacuum*threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size).
16144
+ Specifies a fraction of the table size to add to autovacuum*vacuum*threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20%!o(MISSING)f table size).
16075
16145
  """
16076
16146
  return pulumi.get(self, "autovacuum_vacuum_scale_factor")
16077
16147
 
@@ -16327,7 +16397,7 @@ class PgPgUserConfigPg(dict):
16327
16397
  @pulumi.getter(name="pgStatMonitorDotPgsmMaxBuckets")
16328
16398
  def pg_stat_monitor_dot_pgsm_max_buckets(self) -> Optional[int]:
16329
16399
  """
16330
- Sets the maximum number of buckets.
16400
+ Sets the maximum number of buckets .
16331
16401
  """
16332
16402
  return pulumi.get(self, "pg_stat_monitor_dot_pgsm_max_buckets")
16333
16403
 
@@ -16914,8 +16984,8 @@ class PgPgUserConfigPrivateAccess(dict):
16914
16984
  pgbouncer: Optional[bool] = None,
16915
16985
  prometheus: Optional[bool] = None):
16916
16986
  """
16917
- :param bool pg: Allow clients to connect to pg with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
16918
- :param bool pgbouncer: Allow clients to connect to pgbouncer with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
16987
+ :param bool pg: postgresql.conf configuration values
16988
+ :param bool pgbouncer: PGBouncer connection pooling settings
16919
16989
  :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
16920
16990
  """
16921
16991
  if pg is not None:
@@ -16929,7 +16999,7 @@ class PgPgUserConfigPrivateAccess(dict):
16929
16999
  @pulumi.getter
16930
17000
  def pg(self) -> Optional[bool]:
16931
17001
  """
16932
- Allow clients to connect to pg with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
17002
+ postgresql.conf configuration values
16933
17003
  """
16934
17004
  return pulumi.get(self, "pg")
16935
17005
 
@@ -16937,7 +17007,7 @@ class PgPgUserConfigPrivateAccess(dict):
16937
17007
  @pulumi.getter
16938
17008
  def pgbouncer(self) -> Optional[bool]:
16939
17009
  """
16940
- Allow clients to connect to pgbouncer with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
17010
+ PGBouncer connection pooling settings
16941
17011
  """
16942
17012
  return pulumi.get(self, "pgbouncer")
16943
17013
 
@@ -16957,9 +17027,9 @@ class PgPgUserConfigPrivatelinkAccess(dict):
16957
17027
  pgbouncer: Optional[bool] = None,
16958
17028
  prometheus: Optional[bool] = None):
16959
17029
  """
16960
- :param bool pg: Enable pg.
16961
- :param bool pgbouncer: Enable pgbouncer.
16962
- :param bool prometheus: Enable prometheus.
17030
+ :param bool pg: postgresql.conf configuration values
17031
+ :param bool pgbouncer: PGBouncer connection pooling settings
17032
+ :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
16963
17033
  """
16964
17034
  if pg is not None:
16965
17035
  pulumi.set(__self__, "pg", pg)
@@ -16972,7 +17042,7 @@ class PgPgUserConfigPrivatelinkAccess(dict):
16972
17042
  @pulumi.getter
16973
17043
  def pg(self) -> Optional[bool]:
16974
17044
  """
16975
- Enable pg.
17045
+ postgresql.conf configuration values
16976
17046
  """
16977
17047
  return pulumi.get(self, "pg")
16978
17048
 
@@ -16980,7 +17050,7 @@ class PgPgUserConfigPrivatelinkAccess(dict):
16980
17050
  @pulumi.getter
16981
17051
  def pgbouncer(self) -> Optional[bool]:
16982
17052
  """
16983
- Enable pgbouncer.
17053
+ PGBouncer connection pooling settings
16984
17054
  """
16985
17055
  return pulumi.get(self, "pgbouncer")
16986
17056
 
@@ -16988,7 +17058,7 @@ class PgPgUserConfigPrivatelinkAccess(dict):
16988
17058
  @pulumi.getter
16989
17059
  def prometheus(self) -> Optional[bool]:
16990
17060
  """
16991
- Enable prometheus.
17061
+ Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
16992
17062
  """
16993
17063
  return pulumi.get(self, "prometheus")
16994
17064
 
@@ -17000,9 +17070,9 @@ class PgPgUserConfigPublicAccess(dict):
17000
17070
  pgbouncer: Optional[bool] = None,
17001
17071
  prometheus: Optional[bool] = None):
17002
17072
  """
17003
- :param bool pg: Allow clients to connect to pg from the public internet for service nodes that are in a project VPC or another type of private network.
17004
- :param bool pgbouncer: Allow clients to connect to pgbouncer from the public internet for service nodes that are in a project VPC or another type of private network.
17005
- :param bool prometheus: Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
17073
+ :param bool pg: postgresql.conf configuration values
17074
+ :param bool pgbouncer: PGBouncer connection pooling settings
17075
+ :param bool prometheus: Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
17006
17076
  """
17007
17077
  if pg is not None:
17008
17078
  pulumi.set(__self__, "pg", pg)
@@ -17015,7 +17085,7 @@ class PgPgUserConfigPublicAccess(dict):
17015
17085
  @pulumi.getter
17016
17086
  def pg(self) -> Optional[bool]:
17017
17087
  """
17018
- Allow clients to connect to pg from the public internet for service nodes that are in a project VPC or another type of private network.
17088
+ postgresql.conf configuration values
17019
17089
  """
17020
17090
  return pulumi.get(self, "pg")
17021
17091
 
@@ -17023,7 +17093,7 @@ class PgPgUserConfigPublicAccess(dict):
17023
17093
  @pulumi.getter
17024
17094
  def pgbouncer(self) -> Optional[bool]:
17025
17095
  """
17026
- Allow clients to connect to pgbouncer from the public internet for service nodes that are in a project VPC or another type of private network.
17096
+ PGBouncer connection pooling settings
17027
17097
  """
17028
17098
  return pulumi.get(self, "pgbouncer")
17029
17099
 
@@ -17031,7 +17101,7 @@ class PgPgUserConfigPublicAccess(dict):
17031
17101
  @pulumi.getter
17032
17102
  def prometheus(self) -> Optional[bool]:
17033
17103
  """
17034
- Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
17104
+ Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
17035
17105
  """
17036
17106
  return pulumi.get(self, "prometheus")
17037
17107
 
@@ -17173,8 +17243,8 @@ class ProjectTag(dict):
17173
17243
  key: str,
17174
17244
  value: str):
17175
17245
  """
17176
- :param str key: Project tag key.
17177
- :param str value: Project tag value.
17246
+ :param str key: Project tag key
17247
+ :param str value: Project tag value
17178
17248
  """
17179
17249
  pulumi.set(__self__, "key", key)
17180
17250
  pulumi.set(__self__, "value", value)
@@ -17183,7 +17253,7 @@ class ProjectTag(dict):
17183
17253
  @pulumi.getter
17184
17254
  def key(self) -> str:
17185
17255
  """
17186
- Project tag key.
17256
+ Project tag key
17187
17257
  """
17188
17258
  return pulumi.get(self, "key")
17189
17259
 
@@ -17191,7 +17261,7 @@ class ProjectTag(dict):
17191
17261
  @pulumi.getter
17192
17262
  def value(self) -> str:
17193
17263
  """
17194
- Project tag value.
17264
+ Project tag value
17195
17265
  """
17196
17266
  return pulumi.get(self, "value")
17197
17267
 
@@ -17318,6 +17388,12 @@ class RedisComponent(dict):
17318
17388
  return pulumi.get(self, "usage")
17319
17389
 
17320
17390
 
17391
+ @pulumi.output_type
17392
+ class RedisRedi(dict):
17393
+ def __init__(__self__):
17394
+ pass
17395
+
17396
+
17321
17397
  @pulumi.output_type
17322
17398
  class RedisRedisUserConfig(dict):
17323
17399
  @staticmethod
@@ -17493,6 +17569,9 @@ class RedisRedisUserConfig(dict):
17493
17569
  """
17494
17570
  Additional Cloud Regions for Backup Replication.
17495
17571
  """
17572
+ warnings.warn("""This property is deprecated.""", DeprecationWarning)
17573
+ pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
17574
+
17496
17575
  return pulumi.get(self, "additional_backup_regions")
17497
17576
 
17498
17577
  @property
@@ -18101,7 +18180,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18101
18180
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableColumnArgs'] columns: Table columns
18102
18181
  :param str data_format: Message data format. The default value is `JSONEachRow`.
18103
18182
  :param str group_name: Kafka consumers group. The default value is `clickhouse`.
18104
- :param str name: Name of the table.
18183
+ :param str name: Column name.
18105
18184
  :param Sequence['ServiceIntegrationClickhouseKafkaUserConfigTableTopicArgs'] topics: Kafka topics
18106
18185
  :param str auto_offset_reset: Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
18107
18186
  :param str date_time_input_format: Method to read DateTime from text input formats. The default value is `basic`.
@@ -18162,7 +18241,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTable(dict):
18162
18241
  @pulumi.getter
18163
18242
  def name(self) -> str:
18164
18243
  """
18165
- Name of the table.
18244
+ Column name.
18166
18245
  """
18167
18246
  return pulumi.get(self, "name")
18168
18247
 
@@ -18273,7 +18352,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableTopic(dict):
18273
18352
  def __init__(__self__, *,
18274
18353
  name: str):
18275
18354
  """
18276
- :param str name: Name of the topic.
18355
+ :param str name: Column name.
18277
18356
  """
18278
18357
  pulumi.set(__self__, "name", name)
18279
18358
 
@@ -18281,7 +18360,7 @@ class ServiceIntegrationClickhouseKafkaUserConfigTableTopic(dict):
18281
18360
  @pulumi.getter
18282
18361
  def name(self) -> str:
18283
18362
  """
18284
- Name of the topic.
18363
+ Column name.
18285
18364
  """
18286
18365
  return pulumi.get(self, "name")
18287
18366
 
@@ -18529,9 +18608,7 @@ class ServiceIntegrationDatadogUserConfigOpensearch(dict):
18529
18608
  @staticmethod
18530
18609
  def __key_warning(key: str):
18531
18610
  suggest = None
18532
- if key == "clusterStatsEnabled":
18533
- suggest = "cluster_stats_enabled"
18534
- elif key == "indexStatsEnabled":
18611
+ if key == "indexStatsEnabled":
18535
18612
  suggest = "index_stats_enabled"
18536
18613
  elif key == "pendingTaskStatsEnabled":
18537
18614
  suggest = "pending_task_stats_enabled"
@@ -18550,18 +18627,14 @@ class ServiceIntegrationDatadogUserConfigOpensearch(dict):
18550
18627
  return super().get(key, default)
18551
18628
 
18552
18629
  def __init__(__self__, *,
18553
- cluster_stats_enabled: Optional[bool] = None,
18554
18630
  index_stats_enabled: Optional[bool] = None,
18555
18631
  pending_task_stats_enabled: Optional[bool] = None,
18556
18632
  pshard_stats_enabled: Optional[bool] = None):
18557
18633
  """
18558
- :param bool cluster_stats_enabled: Enable Datadog Opensearch Cluster Monitoring.
18559
18634
  :param bool index_stats_enabled: Enable Datadog Opensearch Index Monitoring.
18560
18635
  :param bool pending_task_stats_enabled: Enable Datadog Opensearch Pending Task Monitoring.
18561
18636
  :param bool pshard_stats_enabled: Enable Datadog Opensearch Primary Shard Monitoring.
18562
18637
  """
18563
- if cluster_stats_enabled is not None:
18564
- pulumi.set(__self__, "cluster_stats_enabled", cluster_stats_enabled)
18565
18638
  if index_stats_enabled is not None:
18566
18639
  pulumi.set(__self__, "index_stats_enabled", index_stats_enabled)
18567
18640
  if pending_task_stats_enabled is not None:
@@ -18569,14 +18642,6 @@ class ServiceIntegrationDatadogUserConfigOpensearch(dict):
18569
18642
  if pshard_stats_enabled is not None:
18570
18643
  pulumi.set(__self__, "pshard_stats_enabled", pshard_stats_enabled)
18571
18644
 
18572
- @property
18573
- @pulumi.getter(name="clusterStatsEnabled")
18574
- def cluster_stats_enabled(self) -> Optional[bool]:
18575
- """
18576
- Enable Datadog Opensearch Cluster Monitoring.
18577
- """
18578
- return pulumi.get(self, "cluster_stats_enabled")
18579
-
18580
18645
  @property
18581
18646
  @pulumi.getter(name="indexStatsEnabled")
18582
18647
  def index_stats_enabled(self) -> Optional[bool]:
@@ -19038,7 +19103,7 @@ class ServiceIntegrationEndpointExternalGoogleCloudBigquery(dict):
19038
19103
  service_account_credentials: str):
19039
19104
  """
19040
19105
  :param str project_id: GCP project id.
19041
- :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
19106
+ :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
19042
19107
  """
19043
19108
  pulumi.set(__self__, "project_id", project_id)
19044
19109
  pulumi.set(__self__, "service_account_credentials", service_account_credentials)
@@ -19055,7 +19120,7 @@ class ServiceIntegrationEndpointExternalGoogleCloudBigquery(dict):
19055
19120
  @pulumi.getter(name="serviceAccountCredentials")
19056
19121
  def service_account_credentials(self) -> str:
19057
19122
  """
19058
- This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
19123
+ This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
19059
19124
  """
19060
19125
  return pulumi.get(self, "service_account_credentials")
19061
19126
 
@@ -19090,7 +19155,7 @@ class ServiceIntegrationEndpointExternalGoogleCloudLoggingUserConfig(dict):
19090
19155
  """
19091
19156
  :param str log_id: Google Cloud Logging log id.
19092
19157
  :param str project_id: GCP project id.
19093
- :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
19158
+ :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
19094
19159
  """
19095
19160
  pulumi.set(__self__, "log_id", log_id)
19096
19161
  pulumi.set(__self__, "project_id", project_id)
@@ -19116,7 +19181,7 @@ class ServiceIntegrationEndpointExternalGoogleCloudLoggingUserConfig(dict):
19116
19181
  @pulumi.getter(name="serviceAccountCredentials")
19117
19182
  def service_account_credentials(self) -> str:
19118
19183
  """
19119
- This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
19184
+ This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
19120
19185
  """
19121
19186
  return pulumi.get(self, "service_account_credentials")
19122
19187
 
@@ -20568,7 +20633,7 @@ class ServiceIntegrationMetricsUserConfigSourceMysqlTelegraf(dict):
20568
20633
  perf_events_statements_time_limit: Optional[int] = None):
20569
20634
  """
20570
20635
  :param bool gather_event_waits: Gather metrics from PERFORMANCE*SCHEMA.EVENT*WAITS.
20571
- :param bool gather_file_events_stats: Gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20636
+ :param bool gather_file_events_stats: gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20572
20637
  :param bool gather_index_io_waits: Gather metrics from PERFORMANCE*SCHEMA.TABLE*IO*WAITS*SUMMARY*BY*INDEX_USAGE.
20573
20638
  :param bool gather_info_schema_auto_inc: Gather auto_increment columns and max values from information schema.
20574
20639
  :param bool gather_innodb_metrics: Gather metrics from INFORMATION*SCHEMA.INNODB*METRICS.
@@ -20623,7 +20688,7 @@ class ServiceIntegrationMetricsUserConfigSourceMysqlTelegraf(dict):
20623
20688
  @pulumi.getter(name="gatherFileEventsStats")
20624
20689
  def gather_file_events_stats(self) -> Optional[bool]:
20625
20690
  """
20626
- Gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20691
+ gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20627
20692
  """
20628
20693
  return pulumi.get(self, "gather_file_events_stats")
20629
20694
 
@@ -20841,7 +20906,7 @@ class ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf(dict):
20841
20906
  perf_events_statements_time_limit: Optional[int] = None):
20842
20907
  """
20843
20908
  :param bool gather_event_waits: Gather metrics from PERFORMANCE*SCHEMA.EVENT*WAITS.
20844
- :param bool gather_file_events_stats: Gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20909
+ :param bool gather_file_events_stats: gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20845
20910
  :param bool gather_index_io_waits: Gather metrics from PERFORMANCE*SCHEMA.TABLE*IO*WAITS*SUMMARY*BY*INDEX_USAGE.
20846
20911
  :param bool gather_info_schema_auto_inc: Gather auto_increment columns and max values from information schema.
20847
20912
  :param bool gather_innodb_metrics: Gather metrics from INFORMATION*SCHEMA.INNODB*METRICS.
@@ -20896,7 +20961,7 @@ class ServiceIntegrationPrometheusUserConfigSourceMysqlTelegraf(dict):
20896
20961
  @pulumi.getter(name="gatherFileEventsStats")
20897
20962
  def gather_file_events_stats(self) -> Optional[bool]:
20898
20963
  """
20899
- Gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20964
+ gather metrics from PERFORMANCE*SCHEMA.FILE*SUMMARY*BY*EVENT_NAME.
20900
20965
  """
20901
20966
  return pulumi.get(self, "gather_file_events_stats")
20902
20967
 
@@ -21064,6 +21129,12 @@ class GetAccountAuthenticationSamlFieldMappingResult(dict):
21064
21129
  return pulumi.get(self, "real_name")
21065
21130
 
21066
21131
 
21132
+ @pulumi.output_type
21133
+ class GetCassandaCassandraResult(dict):
21134
+ def __init__(__self__):
21135
+ pass
21136
+
21137
+
21067
21138
  @pulumi.output_type
21068
21139
  class GetCassandaCassandraUserConfigResult(dict):
21069
21140
  def __init__(__self__, *,
@@ -21087,7 +21158,7 @@ class GetCassandaCassandraUserConfigResult(dict):
21087
21158
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
21088
21159
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
21089
21160
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
21090
- :param 'GetCassandaCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
21161
+ :param 'GetCassandaCassandraUserConfigCassandraArgs' cassandra: cassandra configuration values
21091
21162
  :param str cassandra_version: Cassandra version.
21092
21163
  :param Sequence['GetCassandaCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
21093
21164
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -21165,7 +21236,7 @@ class GetCassandaCassandraUserConfigResult(dict):
21165
21236
  @pulumi.getter
21166
21237
  def cassandra(self) -> Optional['outputs.GetCassandaCassandraUserConfigCassandraResult']:
21167
21238
  """
21168
- Cassandra configuration values
21239
+ cassandra configuration values
21169
21240
  """
21170
21241
  return pulumi.get(self, "cassandra")
21171
21242
 
@@ -21551,6 +21622,12 @@ class GetCassandaTechEmailResult(dict):
21551
21622
  return pulumi.get(self, "email")
21552
21623
 
21553
21624
 
21625
+ @pulumi.output_type
21626
+ class GetCassandraCassandraResult(dict):
21627
+ def __init__(__self__):
21628
+ pass
21629
+
21630
+
21554
21631
  @pulumi.output_type
21555
21632
  class GetCassandraCassandraUserConfigResult(dict):
21556
21633
  def __init__(__self__, *,
@@ -21574,7 +21651,7 @@ class GetCassandraCassandraUserConfigResult(dict):
21574
21651
  :param str additional_backup_regions: Additional Cloud Regions for Backup Replication.
21575
21652
  :param int backup_hour: The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.
21576
21653
  :param int backup_minute: The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.
21577
- :param 'GetCassandraCassandraUserConfigCassandraArgs' cassandra: Cassandra configuration values
21654
+ :param 'GetCassandraCassandraUserConfigCassandraArgs' cassandra: cassandra configuration values
21578
21655
  :param str cassandra_version: Cassandra version.
21579
21656
  :param Sequence['GetCassandraCassandraUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
21580
21657
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -21652,7 +21729,7 @@ class GetCassandraCassandraUserConfigResult(dict):
21652
21729
  @pulumi.getter
21653
21730
  def cassandra(self) -> Optional['outputs.GetCassandraCassandraUserConfigCassandraResult']:
21654
21731
  """
21655
- Cassandra configuration values
21732
+ cassandra configuration values
21656
21733
  """
21657
21734
  return pulumi.get(self, "cassandra")
21658
21735
 
@@ -22038,6 +22115,12 @@ class GetCassandraTechEmailResult(dict):
22038
22115
  return pulumi.get(self, "email")
22039
22116
 
22040
22117
 
22118
+ @pulumi.output_type
22119
+ class GetClickhouseClickhouseResult(dict):
22120
+ def __init__(__self__):
22121
+ pass
22122
+
22123
+
22041
22124
  @pulumi.output_type
22042
22125
  class GetClickhouseClickhouseUserConfigResult(dict):
22043
22126
  def __init__(__self__, *,
@@ -22644,11 +22727,16 @@ class GetDragonflyComponentResult(dict):
22644
22727
  return pulumi.get(self, "usage")
22645
22728
 
22646
22729
 
22730
+ @pulumi.output_type
22731
+ class GetDragonflyDragonflyResult(dict):
22732
+ def __init__(__self__):
22733
+ pass
22734
+
22735
+
22647
22736
  @pulumi.output_type
22648
22737
  class GetDragonflyDragonflyUserConfigResult(dict):
22649
22738
  def __init__(__self__, *,
22650
22739
  cache_mode: Optional[bool] = None,
22651
- dragonfly_persistence: Optional[str] = None,
22652
22740
  dragonfly_ssl: Optional[bool] = None,
22653
22741
  ip_filter_objects: Optional[Sequence['outputs.GetDragonflyDragonflyUserConfigIpFilterObjectResult']] = None,
22654
22742
  ip_filter_strings: Optional[Sequence[str]] = None,
@@ -22664,7 +22752,6 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22664
22752
  static_ips: Optional[bool] = None):
22665
22753
  """
22666
22754
  :param bool cache_mode: Evict entries when getting close to maxmemory limit. The default value is `false`.
22667
- :param str dragonfly_persistence: When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
22668
22755
  :param bool dragonfly_ssl: Require SSL to access Dragonfly. The default value is `true`.
22669
22756
  :param Sequence['GetDragonflyDragonflyUserConfigIpFilterObjectArgs'] ip_filter_objects: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'
22670
22757
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
@@ -22681,8 +22768,6 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22681
22768
  """
22682
22769
  if cache_mode is not None:
22683
22770
  pulumi.set(__self__, "cache_mode", cache_mode)
22684
- if dragonfly_persistence is not None:
22685
- pulumi.set(__self__, "dragonfly_persistence", dragonfly_persistence)
22686
22771
  if dragonfly_ssl is not None:
22687
22772
  pulumi.set(__self__, "dragonfly_ssl", dragonfly_ssl)
22688
22773
  if ip_filter_objects is not None:
@@ -22718,14 +22803,6 @@ class GetDragonflyDragonflyUserConfigResult(dict):
22718
22803
  """
22719
22804
  return pulumi.get(self, "cache_mode")
22720
22805
 
22721
- @property
22722
- @pulumi.getter(name="dragonflyPersistence")
22723
- def dragonfly_persistence(self) -> Optional[str]:
22724
- """
22725
- When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.
22726
- """
22727
- return pulumi.get(self, "dragonfly_persistence")
22728
-
22729
22806
  @property
22730
22807
  @pulumi.getter(name="dragonflySsl")
22731
22808
  def dragonfly_ssl(self) -> Optional[bool]:
@@ -23660,6 +23737,12 @@ class GetGrafanaComponentResult(dict):
23660
23737
  return pulumi.get(self, "usage")
23661
23738
 
23662
23739
 
23740
+ @pulumi.output_type
23741
+ class GetGrafanaGrafanaResult(dict):
23742
+ def __init__(__self__):
23743
+ pass
23744
+
23745
+
23663
23746
  @pulumi.output_type
23664
23747
  class GetGrafanaGrafanaUserConfigResult(dict):
23665
23748
  def __init__(__self__, *,
@@ -23840,6 +23923,9 @@ class GetGrafanaGrafanaUserConfigResult(dict):
23840
23923
  """
23841
23924
  Additional Cloud Regions for Backup Replication.
23842
23925
  """
23926
+ warnings.warn("""This property is deprecated.""", DeprecationWarning)
23927
+ pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
23928
+
23843
23929
  return pulumi.get(self, "additional_backup_regions")
23844
23930
 
23845
23931
  @property
@@ -25779,6 +25865,12 @@ class GetKafkaConnectComponentResult(dict):
25779
25865
  return pulumi.get(self, "usage")
25780
25866
 
25781
25867
 
25868
+ @pulumi.output_type
25869
+ class GetKafkaConnectKafkaConnectResult(dict):
25870
+ def __init__(__self__):
25871
+ pass
25872
+
25873
+
25782
25874
  @pulumi.output_type
25783
25875
  class GetKafkaConnectKafkaConnectUserConfigResult(dict):
25784
25876
  def __init__(__self__, *,
@@ -25966,7 +26058,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
25966
26058
  :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
25967
26059
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
25968
26060
  :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
25969
- :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
26061
+ :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
25970
26062
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
25971
26063
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
25972
26064
  :param int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
@@ -26048,7 +26140,7 @@ class GetKafkaConnectKafkaConnectUserConfigKafkaConnectResult(dict):
26048
26140
  @pulumi.getter(name="consumerMaxPartitionFetchBytes")
26049
26141
  def consumer_max_partition_fetch_bytes(self) -> Optional[int]:
26050
26142
  """
26051
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
26143
+ Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
26052
26144
  """
26053
26145
  return pulumi.get(self, "consumer_max_partition_fetch_bytes")
26054
26146
 
@@ -26360,11 +26452,11 @@ class GetKafkaKafkaResult(dict):
26360
26452
  rest_uri: str,
26361
26453
  schema_registry_uri: str):
26362
26454
  """
26363
- :param str access_cert: The Kafka client certificate.
26364
- :param str access_key: The Kafka client certificate key.
26365
- :param str connect_uri: The Kafka Connect URI.
26366
- :param str rest_uri: The Kafka REST URI.
26367
- :param str schema_registry_uri: The Schema Registry URI.
26455
+ :param str access_cert: The Kafka client certificate
26456
+ :param str access_key: The Kafka client certificate key
26457
+ :param str connect_uri: The Kafka Connect URI, if any
26458
+ :param str rest_uri: The Kafka REST URI, if any
26459
+ :param str schema_registry_uri: The Schema Registry URI, if any
26368
26460
  """
26369
26461
  pulumi.set(__self__, "access_cert", access_cert)
26370
26462
  pulumi.set(__self__, "access_key", access_key)
@@ -26376,7 +26468,7 @@ class GetKafkaKafkaResult(dict):
26376
26468
  @pulumi.getter(name="accessCert")
26377
26469
  def access_cert(self) -> str:
26378
26470
  """
26379
- The Kafka client certificate.
26471
+ The Kafka client certificate
26380
26472
  """
26381
26473
  return pulumi.get(self, "access_cert")
26382
26474
 
@@ -26384,7 +26476,7 @@ class GetKafkaKafkaResult(dict):
26384
26476
  @pulumi.getter(name="accessKey")
26385
26477
  def access_key(self) -> str:
26386
26478
  """
26387
- The Kafka client certificate key.
26479
+ The Kafka client certificate key
26388
26480
  """
26389
26481
  return pulumi.get(self, "access_key")
26390
26482
 
@@ -26392,7 +26484,7 @@ class GetKafkaKafkaResult(dict):
26392
26484
  @pulumi.getter(name="connectUri")
26393
26485
  def connect_uri(self) -> str:
26394
26486
  """
26395
- The Kafka Connect URI.
26487
+ The Kafka Connect URI, if any
26396
26488
  """
26397
26489
  return pulumi.get(self, "connect_uri")
26398
26490
 
@@ -26400,7 +26492,7 @@ class GetKafkaKafkaResult(dict):
26400
26492
  @pulumi.getter(name="restUri")
26401
26493
  def rest_uri(self) -> str:
26402
26494
  """
26403
- The Kafka REST URI.
26495
+ The Kafka REST URI, if any
26404
26496
  """
26405
26497
  return pulumi.get(self, "rest_uri")
26406
26498
 
@@ -26408,7 +26500,7 @@ class GetKafkaKafkaResult(dict):
26408
26500
  @pulumi.getter(name="schemaRegistryUri")
26409
26501
  def schema_registry_uri(self) -> str:
26410
26502
  """
26411
- The Schema Registry URI.
26503
+ The Schema Registry URI, if any
26412
26504
  """
26413
26505
  return pulumi.get(self, "schema_registry_uri")
26414
26506
 
@@ -26788,7 +26880,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26788
26880
  :param int log_index_size_max_bytes: The maximum size in bytes of the offset index.
26789
26881
  :param int log_local_retention_bytes: The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
26790
26882
  :param int log_local_retention_ms: The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
26791
- :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
26883
+ :param bool log_message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
26792
26884
  :param int log_message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
26793
26885
  :param str log_message_timestamp_type: Define whether the timestamp in the message is message create time or log append time.
26794
26886
  :param bool log_preallocate: Should pre allocate file when create new segment?
@@ -26810,7 +26902,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
26810
26902
  :param int replica_fetch_response_max_bytes: Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
26811
26903
  :param str sasl_oauthbearer_expected_audience: The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
26812
26904
  :param str sasl_oauthbearer_expected_issuer: Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
26813
- :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
26905
+ :param str sasl_oauthbearer_jwks_endpoint_url: OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .
26814
26906
  :param str sasl_oauthbearer_sub_claim_name: Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
26815
26907
  :param int socket_request_max_bytes: The maximum number of bytes in a socket request (defaults to 104857600).
26816
26908
  :param bool transaction_partition_verification_enable: Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
@@ -27058,7 +27150,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27058
27150
  @pulumi.getter(name="logMessageDownconversionEnable")
27059
27151
  def log_message_downconversion_enable(self) -> Optional[bool]:
27060
27152
  """
27061
- This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
27153
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
27062
27154
  """
27063
27155
  return pulumi.get(self, "log_message_downconversion_enable")
27064
27156
 
@@ -27234,7 +27326,7 @@ class GetKafkaKafkaUserConfigKafkaResult(dict):
27234
27326
  @pulumi.getter(name="saslOauthbearerJwksEndpointUrl")
27235
27327
  def sasl_oauthbearer_jwks_endpoint_url(self) -> Optional[str]:
27236
27328
  """
27237
- OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
27329
+ OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .
27238
27330
  """
27239
27331
  return pulumi.get(self, "sasl_oauthbearer_jwks_endpoint_url")
27240
27332
 
@@ -27334,7 +27426,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27334
27426
  :param str consumer_auto_offset_reset: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
27335
27427
  :param int consumer_fetch_max_bytes: Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
27336
27428
  :param str consumer_isolation_level: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
27337
- :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
27429
+ :param int consumer_max_partition_fetch_bytes: Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
27338
27430
  :param int consumer_max_poll_interval_ms: The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
27339
27431
  :param int consumer_max_poll_records: The maximum number of records returned in a single call to poll() (defaults to 500).
27340
27432
  :param int offset_flush_interval_ms: The interval at which to try committing offsets for tasks (defaults to 60000).
@@ -27416,7 +27508,7 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
27416
27508
  @pulumi.getter(name="consumerMaxPartitionFetchBytes")
27417
27509
  def consumer_max_partition_fetch_bytes(self) -> Optional[int]:
27418
27510
  """
27419
- Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
27511
+ Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
27420
27512
  """
27421
27513
  return pulumi.get(self, "consumer_max_partition_fetch_bytes")
27422
27514
 
@@ -28031,6 +28123,12 @@ class GetKafkaMirrorMakerComponentResult(dict):
28031
28123
  return pulumi.get(self, "usage")
28032
28124
 
28033
28125
 
28126
+ @pulumi.output_type
28127
+ class GetKafkaMirrorMakerKafkaMirrormakerResult(dict):
28128
+ def __init__(__self__):
28129
+ pass
28130
+
28131
+
28034
28132
  @pulumi.output_type
28035
28133
  class GetKafkaMirrorMakerKafkaMirrormakerUserConfigResult(dict):
28036
28134
  def __init__(__self__, *,
@@ -28932,6 +29030,12 @@ class GetM3AggregatorComponentResult(dict):
28932
29030
  return pulumi.get(self, "usage")
28933
29031
 
28934
29032
 
29033
+ @pulumi.output_type
29034
+ class GetM3AggregatorM3aggregatorResult(dict):
29035
+ def __init__(__self__):
29036
+ pass
29037
+
29038
+
28935
29039
  @pulumi.output_type
28936
29040
  class GetM3AggregatorM3aggregatorUserConfigResult(dict):
28937
29041
  def __init__(__self__, *,
@@ -29239,6 +29343,12 @@ class GetM3DbComponentResult(dict):
29239
29343
  return pulumi.get(self, "usage")
29240
29344
 
29241
29345
 
29346
+ @pulumi.output_type
29347
+ class GetM3DbM3dbResult(dict):
29348
+ def __init__(__self__):
29349
+ pass
29350
+
29351
+
29242
29352
  @pulumi.output_type
29243
29353
  class GetM3DbM3dbUserConfigResult(dict):
29244
29354
  def __init__(__self__, *,
@@ -29323,6 +29433,9 @@ class GetM3DbM3dbUserConfigResult(dict):
29323
29433
  """
29324
29434
  Additional Cloud Regions for Backup Replication.
29325
29435
  """
29436
+ warnings.warn("""This property is deprecated.""", DeprecationWarning)
29437
+ pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
29438
+
29326
29439
  return pulumi.get(self, "additional_backup_regions")
29327
29440
 
29328
29441
  @property
@@ -30178,6 +30291,12 @@ class GetMySqlComponentResult(dict):
30178
30291
  return pulumi.get(self, "usage")
30179
30292
 
30180
30293
 
30294
+ @pulumi.output_type
30295
+ class GetMySqlMysqlResult(dict):
30296
+ def __init__(__self__):
30297
+ pass
30298
+
30299
+
30181
30300
  @pulumi.output_type
30182
30301
  class GetMySqlMysqlUserConfigResult(dict):
30183
30302
  def __init__(__self__, *,
@@ -31280,7 +31399,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31280
31399
  :param Sequence[str] ip_filter_strings: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
31281
31400
  :param Sequence[str] ip_filters: Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.
31282
31401
  :param bool keep_index_refresh_interval: Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true.
31283
- :param int max_index_count: Use index_patterns instead. The default value is `0`.
31402
+ :param int max_index_count: use index_patterns instead. The default value is `0`.
31284
31403
  :param 'GetOpenSearchOpensearchUserConfigOpenidArgs' openid: OpenSearch OpenID Connect Configuration
31285
31404
  :param 'GetOpenSearchOpensearchUserConfigOpensearchArgs' opensearch: OpenSearch settings
31286
31405
  :param 'GetOpenSearchOpensearchUserConfigOpensearchDashboardsArgs' opensearch_dashboards: OpenSearch Dashboards settings
@@ -31348,6 +31467,9 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31348
31467
  """
31349
31468
  Additional Cloud Regions for Backup Replication.
31350
31469
  """
31470
+ warnings.warn("""This property is deprecated.""", DeprecationWarning)
31471
+ pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
31472
+
31351
31473
  return pulumi.get(self, "additional_backup_regions")
31352
31474
 
31353
31475
  @property
@@ -31421,7 +31543,7 @@ class GetOpenSearchOpensearchUserConfigResult(dict):
31421
31543
  @pulumi.getter(name="maxIndexCount")
31422
31544
  def max_index_count(self) -> Optional[int]:
31423
31545
  """
31424
- Use index_patterns instead. The default value is `0`.
31546
+ use index_patterns instead. The default value is `0`.
31425
31547
  """
31426
31548
  return pulumi.get(self, "max_index_count")
31427
31549
 
@@ -31850,10 +31972,10 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
31850
31972
  :param int http_max_header_size: The max size of allowed headers, in bytes.
31851
31973
  :param int http_max_initial_line_length: The max length of an HTTP URL, in bytes.
31852
31974
  :param int indices_fielddata_cache_size: Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.
31853
- :param int indices_memory_index_buffer_size: Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.
31975
+ :param int indices_memory_index_buffer_size: Percentage value. Default is 10%! (MISSING)Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.
31854
31976
  :param int indices_memory_max_index_buffer_size: Absolute value. Default is unbound. Doesn't work without indices.memory.index_buffer_size. Maximum amount of heap used for query cache, an absolute indices.memory.index_buffer_size maximum hard limit.
31855
31977
  :param int indices_memory_min_index_buffer_size: Absolute value. Default is 48mb. Doesn't work without indices.memory.index_buffer_size. Minimum amount of heap used for query cache, an absolute indices.memory.index_buffer_size minimal hard limit.
31856
- :param int indices_queries_cache_size: Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.
31978
+ :param int indices_queries_cache_size: Percentage value. Default is 10%! (MISSING)Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.
31857
31979
  :param int indices_query_bool_max_clause_count: Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.
31858
31980
  :param int indices_recovery_max_bytes_per_sec: Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.
31859
31981
  :param int indices_recovery_max_concurrent_file_chunks: Number of file chunks sent in parallel for each recovery. Defaults to 2.
@@ -32073,7 +32195,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
32073
32195
  @pulumi.getter(name="indicesMemoryIndexBufferSize")
32074
32196
  def indices_memory_index_buffer_size(self) -> Optional[int]:
32075
32197
  """
32076
- Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.
32198
+ Percentage value. Default is 10%! (MISSING)Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.
32077
32199
  """
32078
32200
  return pulumi.get(self, "indices_memory_index_buffer_size")
32079
32201
 
@@ -32097,7 +32219,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
32097
32219
  @pulumi.getter(name="indicesQueriesCacheSize")
32098
32220
  def indices_queries_cache_size(self) -> Optional[int]:
32099
32221
  """
32100
- Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.
32222
+ Percentage value. Default is 10%! (MISSING)Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.
32101
32223
  """
32102
32224
  return pulumi.get(self, "indices_queries_cache_size")
32103
32225
 
@@ -33097,12 +33219,12 @@ class GetPgPgUserConfigResult(dict):
33097
33219
  :param str recovery_target_time: Recovery target time when forking a service. This has effect only when a new service is being created.
33098
33220
  :param bool service_log: Store logs for the service so that they are available in the HTTP API and console.
33099
33221
  :param str service_to_fork_from: Name of another service to fork from. This has effect only when a new service is being created.
33100
- :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
33222
+ :param float shared_buffers_percentage: Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20%. This setting adjusts the shared_buffers configuration value.
33101
33223
  :param bool static_ips: Use static public IP addresses.
33102
33224
  :param str synchronous_replication: Synchronous replication type. Note that the service plan also needs to support synchronous replication.
33103
33225
  :param 'GetPgPgUserConfigTimescaledbArgs' timescaledb: System-wide settings for the timescaledb extension
33104
33226
  :param str variant: Variant of the PostgreSQL service, may affect the features that are exposed by default.
33105
- :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
33227
+ :param int work_mem: Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075%!o(MISSING)f total RAM (up to 32MB).
33106
33228
  """
33107
33229
  if additional_backup_regions is not None:
33108
33230
  pulumi.set(__self__, "additional_backup_regions", additional_backup_regions)
@@ -33387,7 +33509,7 @@ class GetPgPgUserConfigResult(dict):
33387
33509
  @pulumi.getter(name="sharedBuffersPercentage")
33388
33510
  def shared_buffers_percentage(self) -> Optional[float]:
33389
33511
  """
33390
- Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.
33512
+ Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20%. This setting adjusts the shared_buffers configuration value.
33391
33513
  """
33392
33514
  return pulumi.get(self, "shared_buffers_percentage")
33393
33515
 
@@ -33427,7 +33549,7 @@ class GetPgPgUserConfigResult(dict):
33427
33549
  @pulumi.getter(name="workMem")
33428
33550
  def work_mem(self) -> Optional[int]:
33429
33551
  """
33430
- Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).
33552
+ Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075%!o(MISSING)f total RAM (up to 32MB).
33431
33553
  """
33432
33554
  return pulumi.get(self, "work_mem")
33433
33555
 
@@ -33616,14 +33738,14 @@ class GetPgPgUserConfigPgResult(dict):
33616
33738
  wal_sender_timeout: Optional[int] = None,
33617
33739
  wal_writer_delay: Optional[int] = None):
33618
33740
  """
33619
- :param float autovacuum_analyze_scale_factor: Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size).
33741
+ :param float autovacuum_analyze_scale_factor: Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20%!o(MISSING)f table size).
33620
33742
  :param int autovacuum_analyze_threshold: Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.
33621
33743
  :param int autovacuum_freeze_max_age: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.
33622
33744
  :param int autovacuum_max_workers: Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.
33623
33745
  :param int autovacuum_naptime: Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute.
33624
33746
  :param int autovacuum_vacuum_cost_delay: Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds.
33625
33747
  :param int autovacuum_vacuum_cost_limit: Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.
33626
- :param float autovacuum_vacuum_scale_factor: Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size).
33748
+ :param float autovacuum_vacuum_scale_factor: Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20%!o(MISSING)f table size).
33627
33749
  :param int autovacuum_vacuum_threshold: Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples.
33628
33750
  :param int bgwriter_delay: Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.
33629
33751
  :param int bgwriter_flush_after: Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.
@@ -33655,7 +33777,7 @@ class GetPgPgUserConfigPgResult(dict):
33655
33777
  :param int pg_partman_bgw_dot_interval: Sets the time interval to run pg_partman's scheduled tasks.
33656
33778
  :param str pg_partman_bgw_dot_role: Controls which role to use for pg_partman's scheduled background tasks.
33657
33779
  :param bool pg_stat_monitor_dot_pgsm_enable_query_plan: Enables or disables query plan monitoring.
33658
- :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets.
33780
+ :param int pg_stat_monitor_dot_pgsm_max_buckets: Sets the maximum number of buckets .
33659
33781
  :param str pg_stat_statements_dot_track: Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
33660
33782
  :param int temp_file_limit: PostgreSQL temporary file limit in KiB, -1 for unlimited.
33661
33783
  :param str timezone: PostgreSQL service timezone.
@@ -33769,7 +33891,7 @@ class GetPgPgUserConfigPgResult(dict):
33769
33891
  @pulumi.getter(name="autovacuumAnalyzeScaleFactor")
33770
33892
  def autovacuum_analyze_scale_factor(self) -> Optional[float]:
33771
33893
  """
33772
- Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size).
33894
+ Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20%!o(MISSING)f table size).
33773
33895
  """
33774
33896
  return pulumi.get(self, "autovacuum_analyze_scale_factor")
33775
33897
 
@@ -33825,7 +33947,7 @@ class GetPgPgUserConfigPgResult(dict):
33825
33947
  @pulumi.getter(name="autovacuumVacuumScaleFactor")
33826
33948
  def autovacuum_vacuum_scale_factor(self) -> Optional[float]:
33827
33949
  """
33828
- Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size).
33950
+ Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20%!o(MISSING)f table size).
33829
33951
  """
33830
33952
  return pulumi.get(self, "autovacuum_vacuum_scale_factor")
33831
33953
 
@@ -34081,7 +34203,7 @@ class GetPgPgUserConfigPgResult(dict):
34081
34203
  @pulumi.getter(name="pgStatMonitorDotPgsmMaxBuckets")
34082
34204
  def pg_stat_monitor_dot_pgsm_max_buckets(self) -> Optional[int]:
34083
34205
  """
34084
- Sets the maximum number of buckets.
34206
+ Sets the maximum number of buckets .
34085
34207
  """
34086
34208
  return pulumi.get(self, "pg_stat_monitor_dot_pgsm_max_buckets")
34087
34209
 
@@ -34779,8 +34901,8 @@ class GetProjectTagResult(dict):
34779
34901
  key: str,
34780
34902
  value: str):
34781
34903
  """
34782
- :param str key: Project tag key.
34783
- :param str value: Project tag value.
34904
+ :param str key: Project tag key
34905
+ :param str value: Project tag value
34784
34906
  """
34785
34907
  pulumi.set(__self__, "key", key)
34786
34908
  pulumi.set(__self__, "value", value)
@@ -34789,7 +34911,7 @@ class GetProjectTagResult(dict):
34789
34911
  @pulumi.getter
34790
34912
  def key(self) -> str:
34791
34913
  """
34792
- Project tag key.
34914
+ Project tag key
34793
34915
  """
34794
34916
  return pulumi.get(self, "key")
34795
34917
 
@@ -34797,7 +34919,7 @@ class GetProjectTagResult(dict):
34797
34919
  @pulumi.getter
34798
34920
  def value(self) -> str:
34799
34921
  """
34800
- Project tag value.
34922
+ Project tag value
34801
34923
  """
34802
34924
  return pulumi.get(self, "value")
34803
34925
 
@@ -34897,6 +35019,12 @@ class GetRedisComponentResult(dict):
34897
35019
  return pulumi.get(self, "usage")
34898
35020
 
34899
35021
 
35022
+ @pulumi.output_type
35023
+ class GetRedisRediResult(dict):
35024
+ def __init__(__self__):
35025
+ pass
35026
+
35027
+
34900
35028
  @pulumi.output_type
34901
35029
  class GetRedisRedisUserConfigResult(dict):
34902
35030
  def __init__(__self__, *,
@@ -35009,6 +35137,9 @@ class GetRedisRedisUserConfigResult(dict):
35009
35137
  """
35010
35138
  Additional Cloud Regions for Backup Replication.
35011
35139
  """
35140
+ warnings.warn("""This property is deprecated.""", DeprecationWarning)
35141
+ pulumi.log.warn("""additional_backup_regions is deprecated: This property is deprecated.""")
35142
+
35012
35143
  return pulumi.get(self, "additional_backup_regions")
35013
35144
 
35014
35145
  @property
@@ -35941,18 +36072,14 @@ class GetServiceIntegrationDatadogUserConfigDatadogTagResult(dict):
35941
36072
  @pulumi.output_type
35942
36073
  class GetServiceIntegrationDatadogUserConfigOpensearchResult(dict):
35943
36074
  def __init__(__self__, *,
35944
- cluster_stats_enabled: Optional[bool] = None,
35945
36075
  index_stats_enabled: Optional[bool] = None,
35946
36076
  pending_task_stats_enabled: Optional[bool] = None,
35947
36077
  pshard_stats_enabled: Optional[bool] = None):
35948
36078
  """
35949
- :param bool cluster_stats_enabled: Enable Datadog Opensearch Cluster Monitoring.
35950
36079
  :param bool index_stats_enabled: Enable Datadog Opensearch Index Monitoring.
35951
36080
  :param bool pending_task_stats_enabled: Enable Datadog Opensearch Pending Task Monitoring.
35952
36081
  :param bool pshard_stats_enabled: Enable Datadog Opensearch Primary Shard Monitoring.
35953
36082
  """
35954
- if cluster_stats_enabled is not None:
35955
- pulumi.set(__self__, "cluster_stats_enabled", cluster_stats_enabled)
35956
36083
  if index_stats_enabled is not None:
35957
36084
  pulumi.set(__self__, "index_stats_enabled", index_stats_enabled)
35958
36085
  if pending_task_stats_enabled is not None:
@@ -35960,14 +36087,6 @@ class GetServiceIntegrationDatadogUserConfigOpensearchResult(dict):
35960
36087
  if pshard_stats_enabled is not None:
35961
36088
  pulumi.set(__self__, "pshard_stats_enabled", pshard_stats_enabled)
35962
36089
 
35963
- @property
35964
- @pulumi.getter(name="clusterStatsEnabled")
35965
- def cluster_stats_enabled(self) -> Optional[bool]:
35966
- """
35967
- Enable Datadog Opensearch Cluster Monitoring.
35968
- """
35969
- return pulumi.get(self, "cluster_stats_enabled")
35970
-
35971
36090
  @property
35972
36091
  @pulumi.getter(name="indexStatsEnabled")
35973
36092
  def index_stats_enabled(self) -> Optional[bool]:
@@ -36307,7 +36426,7 @@ class GetServiceIntegrationEndpointExternalGoogleCloudBigqueryResult(dict):
36307
36426
  service_account_credentials: str):
36308
36427
  """
36309
36428
  :param str project_id: GCP project id.
36310
- :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
36429
+ :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
36311
36430
  """
36312
36431
  pulumi.set(__self__, "project_id", project_id)
36313
36432
  pulumi.set(__self__, "service_account_credentials", service_account_credentials)
@@ -36324,7 +36443,7 @@ class GetServiceIntegrationEndpointExternalGoogleCloudBigqueryResult(dict):
36324
36443
  @pulumi.getter(name="serviceAccountCredentials")
36325
36444
  def service_account_credentials(self) -> str:
36326
36445
  """
36327
- This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
36446
+ This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
36328
36447
  """
36329
36448
  return pulumi.get(self, "service_account_credentials")
36330
36449
 
@@ -36338,7 +36457,7 @@ class GetServiceIntegrationEndpointExternalGoogleCloudLoggingUserConfigResult(di
36338
36457
  """
36339
36458
  :param str log_id: Google Cloud Logging log id.
36340
36459
  :param str project_id: GCP project id.
36341
- :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
36460
+ :param str service_account_credentials: This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
36342
36461
  """
36343
36462
  pulumi.set(__self__, "log_id", log_id)
36344
36463
  pulumi.set(__self__, "project_id", project_id)
@@ -36364,7 +36483,7 @@ class GetServiceIntegrationEndpointExternalGoogleCloudLoggingUserConfigResult(di
36364
36483
  @pulumi.getter(name="serviceAccountCredentials")
36365
36484
  def service_account_credentials(self) -> str:
36366
36485
  """
36367
- This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.
36486
+ This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
36368
36487
  """
36369
36488
  return pulumi.get(self, "service_account_credentials")
36370
36489
 
@@ -37405,7 +37524,7 @@ class GetServiceIntegrationMetricsUserConfigSourceMysqlTelegrafResult(dict):
37405
37524
  perf_events_statements_time_limit: Optional[int] = None):
37406
37525
  """
37407
37526
  :param bool gather_event_waits: Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
37408
- :param bool gather_file_events_stats: Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37527
+ :param bool gather_file_events_stats: gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37409
37528
  :param bool gather_index_io_waits: Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
37410
37529
  :param bool gather_info_schema_auto_inc: Gather auto_increment columns and max values from information schema.
37411
37530
  :param bool gather_innodb_metrics: Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
@@ -37460,7 +37579,7 @@ class GetServiceIntegrationMetricsUserConfigSourceMysqlTelegrafResult(dict):
37460
37579
  @pulumi.getter(name="gatherFileEventsStats")
37461
37580
  def gather_file_events_stats(self) -> Optional[bool]:
37462
37581
  """
37463
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37582
+ gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37464
37583
  """
37465
37584
  return pulumi.get(self, "gather_file_events_stats")
37466
37585
 
@@ -37618,7 +37737,7 @@ class GetServiceIntegrationPrometheusUserConfigSourceMysqlTelegrafResult(dict):
37618
37737
  perf_events_statements_time_limit: Optional[int] = None):
37619
37738
  """
37620
37739
  :param bool gather_event_waits: Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
37621
- :param bool gather_file_events_stats: Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37740
+ :param bool gather_file_events_stats: gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37622
37741
  :param bool gather_index_io_waits: Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
37623
37742
  :param bool gather_info_schema_auto_inc: Gather auto_increment columns and max values from information schema.
37624
37743
  :param bool gather_innodb_metrics: Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
@@ -37673,7 +37792,7 @@ class GetServiceIntegrationPrometheusUserConfigSourceMysqlTelegrafResult(dict):
37673
37792
  @pulumi.getter(name="gatherFileEventsStats")
37674
37793
  def gather_file_events_stats(self) -> Optional[bool]:
37675
37794
  """
37676
- Gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37795
+ gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
37677
37796
  """
37678
37797
  return pulumi.get(self, "gather_file_events_stats")
37679
37798