pulumi-aiven 6.38.0a1747372094__py3-none-any.whl → 6.38.0a1747647727__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-aiven might be problematic. Click here for more details.

Files changed (31) hide show
  1. pulumi_aiven/__init__.py +9 -0
  2. pulumi_aiven/_inputs.py +1302 -160
  3. pulumi_aiven/clickhouse_grant.py +18 -0
  4. pulumi_aiven/get_external_identity.py +5 -5
  5. pulumi_aiven/get_open_search.py +7 -7
  6. pulumi_aiven/get_open_search_acl_config.py +7 -7
  7. pulumi_aiven/get_open_search_acl_rule.py +19 -19
  8. pulumi_aiven/get_opensearch_security_plugin_config.py +7 -7
  9. pulumi_aiven/get_opensearch_user.py +13 -13
  10. pulumi_aiven/get_organization.py +24 -6
  11. pulumi_aiven/get_organization_address.py +52 -29
  12. pulumi_aiven/get_organization_billing_group.py +38 -15
  13. pulumi_aiven/get_organization_billing_group_list.py +40 -12
  14. pulumi_aiven/get_organization_project.py +50 -14
  15. pulumi_aiven/governance_access.py +418 -0
  16. pulumi_aiven/open_search.py +17 -19
  17. pulumi_aiven/open_search_acl_config.py +29 -41
  18. pulumi_aiven/open_search_acl_rule.py +47 -47
  19. pulumi_aiven/opensearch_security_plugin_config.py +14 -42
  20. pulumi_aiven/opensearch_user.py +32 -32
  21. pulumi_aiven/organization.py +24 -19
  22. pulumi_aiven/organization_address.py +102 -96
  23. pulumi_aiven/organization_application_user.py +9 -0
  24. pulumi_aiven/organization_billing_group.py +66 -60
  25. pulumi_aiven/organization_project.py +111 -62
  26. pulumi_aiven/outputs.py +1039 -189
  27. pulumi_aiven/pulumi-plugin.json +1 -1
  28. {pulumi_aiven-6.38.0a1747372094.dist-info → pulumi_aiven-6.38.0a1747647727.dist-info}/METADATA +1 -1
  29. {pulumi_aiven-6.38.0a1747372094.dist-info → pulumi_aiven-6.38.0a1747647727.dist-info}/RECORD +31 -30
  30. {pulumi_aiven-6.38.0a1747372094.dist-info → pulumi_aiven-6.38.0a1747647727.dist-info}/WHEEL +0 -0
  31. {pulumi_aiven-6.38.0a1747372094.dist-info → pulumi_aiven-6.38.0a1747647727.dist-info}/top_level.txt +0 -0
pulumi_aiven/outputs.py CHANGED
@@ -23,6 +23,7 @@ __all__ = [
23
23
  'AlloydbomniAlloydbomniUserConfig',
24
24
  'AlloydbomniAlloydbomniUserConfigIpFilterObject',
25
25
  'AlloydbomniAlloydbomniUserConfigPg',
26
+ 'AlloydbomniAlloydbomniUserConfigPgaudit',
26
27
  'AlloydbomniAlloydbomniUserConfigPgbouncer',
27
28
  'AlloydbomniAlloydbomniUserConfigPglookout',
28
29
  'AlloydbomniAlloydbomniUserConfigPrivateAccess',
@@ -80,6 +81,8 @@ __all__ = [
80
81
  'FlinkServiceIntegration',
81
82
  'FlinkTag',
82
83
  'FlinkTechEmail',
84
+ 'GovernanceAccessAccessData',
85
+ 'GovernanceAccessAccessDataAcl',
83
86
  'GrafanaComponent',
84
87
  'GrafanaGrafana',
85
88
  'GrafanaGrafanaUserConfig',
@@ -132,6 +135,7 @@ __all__ = [
132
135
  'KafkaKafkaUserConfigKafka',
133
136
  'KafkaKafkaUserConfigKafkaAuthenticationMethods',
134
137
  'KafkaKafkaUserConfigKafkaConnectConfig',
138
+ 'KafkaKafkaUserConfigKafkaConnectPluginVersion',
135
139
  'KafkaKafkaUserConfigKafkaConnectSecretProvider',
136
140
  'KafkaKafkaUserConfigKafkaConnectSecretProviderAws',
137
141
  'KafkaKafkaUserConfigKafkaConnectSecretProviderVault',
@@ -240,6 +244,7 @@ __all__ = [
240
244
  'OrganizationGroupProjectTimeouts',
241
245
  'OrganizationPermissionPermission',
242
246
  'OrganizationProjectTag',
247
+ 'OrganizationProjectTimeouts',
243
248
  'OrganizationTimeouts',
244
249
  'OrganizationUserGroupMemberTimeouts',
245
250
  'PgComponent',
@@ -352,6 +357,7 @@ __all__ = [
352
357
  'GetAlloydbomniAlloydbomniUserConfigResult',
353
358
  'GetAlloydbomniAlloydbomniUserConfigIpFilterObjectResult',
354
359
  'GetAlloydbomniAlloydbomniUserConfigPgResult',
360
+ 'GetAlloydbomniAlloydbomniUserConfigPgauditResult',
355
361
  'GetAlloydbomniAlloydbomniUserConfigPgbouncerResult',
356
362
  'GetAlloydbomniAlloydbomniUserConfigPglookoutResult',
357
363
  'GetAlloydbomniAlloydbomniUserConfigPrivateAccessResult',
@@ -465,6 +471,7 @@ __all__ = [
465
471
  'GetKafkaKafkaUserConfigKafkaResult',
466
472
  'GetKafkaKafkaUserConfigKafkaAuthenticationMethodsResult',
467
473
  'GetKafkaKafkaUserConfigKafkaConnectConfigResult',
474
+ 'GetKafkaKafkaUserConfigKafkaConnectPluginVersionResult',
468
475
  'GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult',
469
476
  'GetKafkaKafkaUserConfigKafkaConnectSecretProviderAwsResult',
470
477
  'GetKafkaKafkaUserConfigKafkaConnectSecretProviderVaultResult',
@@ -568,8 +575,13 @@ __all__ = [
568
575
  'GetOpenSearchServiceIntegrationResult',
569
576
  'GetOpenSearchTagResult',
570
577
  'GetOpenSearchTechEmailResult',
578
+ 'GetOrganizationAddressTimeoutsResult',
571
579
  'GetOrganizationBillingGroupListBillingGroupResult',
580
+ 'GetOrganizationBillingGroupListTimeoutsResult',
581
+ 'GetOrganizationBillingGroupTimeoutsResult',
572
582
  'GetOrganizationProjectTagResult',
583
+ 'GetOrganizationProjectTimeoutsResult',
584
+ 'GetOrganizationTimeoutsResult',
573
585
  'GetOrganizationUserListUserResult',
574
586
  'GetOrganizationUserListUserUserInfoResult',
575
587
  'GetPgComponentResult',
@@ -1147,6 +1159,7 @@ class AlloydbomniAlloydbomniUserConfig(dict):
1147
1159
  pg_read_replica: Optional[builtins.bool] = None,
1148
1160
  pg_service_to_fork_from: Optional[builtins.str] = None,
1149
1161
  pg_version: Optional[builtins.str] = None,
1162
+ pgaudit: Optional['outputs.AlloydbomniAlloydbomniUserConfigPgaudit'] = None,
1150
1163
  pgbouncer: Optional['outputs.AlloydbomniAlloydbomniUserConfigPgbouncer'] = None,
1151
1164
  pglookout: Optional['outputs.AlloydbomniAlloydbomniUserConfigPglookout'] = None,
1152
1165
  private_access: Optional['outputs.AlloydbomniAlloydbomniUserConfigPrivateAccess'] = None,
@@ -1178,6 +1191,7 @@ class AlloydbomniAlloydbomniUserConfig(dict):
1178
1191
  :param builtins.bool pg_read_replica: Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).
1179
1192
  :param builtins.str pg_service_to_fork_from: Name of the PG Service from which to fork (deprecated, use service*to*fork_from). This has effect only when a new service is being created. Example: `anotherservicename`.
1180
1193
  :param builtins.str pg_version: Enum: `15`, and newer. PostgreSQL major version.
1194
+ :param 'AlloydbomniAlloydbomniUserConfigPgauditArgs' pgaudit: System-wide settings for the pgaudit extension
1181
1195
  :param 'AlloydbomniAlloydbomniUserConfigPgbouncerArgs' pgbouncer: PGBouncer connection pooling settings
1182
1196
  :param 'AlloydbomniAlloydbomniUserConfigPglookoutArgs' pglookout: System-wide settings for pglookout
1183
1197
  :param 'AlloydbomniAlloydbomniUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
@@ -1225,6 +1239,8 @@ class AlloydbomniAlloydbomniUserConfig(dict):
1225
1239
  pulumi.set(__self__, "pg_service_to_fork_from", pg_service_to_fork_from)
1226
1240
  if pg_version is not None:
1227
1241
  pulumi.set(__self__, "pg_version", pg_version)
1242
+ if pgaudit is not None:
1243
+ pulumi.set(__self__, "pgaudit", pgaudit)
1228
1244
  if pgbouncer is not None:
1229
1245
  pulumi.set(__self__, "pgbouncer", pgbouncer)
1230
1246
  if pglookout is not None:
@@ -1383,6 +1399,14 @@ class AlloydbomniAlloydbomniUserConfig(dict):
1383
1399
  """
1384
1400
  return pulumi.get(self, "pg_version")
1385
1401
 
1402
+ @property
1403
+ @pulumi.getter
1404
+ def pgaudit(self) -> Optional['outputs.AlloydbomniAlloydbomniUserConfigPgaudit']:
1405
+ """
1406
+ System-wide settings for the pgaudit extension
1407
+ """
1408
+ return pulumi.get(self, "pgaudit")
1409
+
1386
1410
  @property
1387
1411
  @pulumi.getter
1388
1412
  def pgbouncer(self) -> Optional['outputs.AlloydbomniAlloydbomniUserConfigPgbouncer']:
@@ -2216,6 +2240,220 @@ class AlloydbomniAlloydbomniUserConfigPg(dict):
2216
2240
  return pulumi.get(self, "wal_writer_delay")
2217
2241
 
2218
2242
 
2243
+ @pulumi.output_type
2244
+ class AlloydbomniAlloydbomniUserConfigPgaudit(dict):
2245
+ @staticmethod
2246
+ def __key_warning(key: str):
2247
+ suggest = None
2248
+ if key == "featureEnabled":
2249
+ suggest = "feature_enabled"
2250
+ elif key == "logCatalog":
2251
+ suggest = "log_catalog"
2252
+ elif key == "logClient":
2253
+ suggest = "log_client"
2254
+ elif key == "logLevel":
2255
+ suggest = "log_level"
2256
+ elif key == "logMaxStringLength":
2257
+ suggest = "log_max_string_length"
2258
+ elif key == "logNestedStatements":
2259
+ suggest = "log_nested_statements"
2260
+ elif key == "logParameter":
2261
+ suggest = "log_parameter"
2262
+ elif key == "logParameterMaxSize":
2263
+ suggest = "log_parameter_max_size"
2264
+ elif key == "logRelation":
2265
+ suggest = "log_relation"
2266
+ elif key == "logRows":
2267
+ suggest = "log_rows"
2268
+ elif key == "logStatement":
2269
+ suggest = "log_statement"
2270
+ elif key == "logStatementOnce":
2271
+ suggest = "log_statement_once"
2272
+
2273
+ if suggest:
2274
+ pulumi.log.warn(f"Key '{key}' not found in AlloydbomniAlloydbomniUserConfigPgaudit. Access the value via the '{suggest}' property getter instead.")
2275
+
2276
+ def __getitem__(self, key: str) -> Any:
2277
+ AlloydbomniAlloydbomniUserConfigPgaudit.__key_warning(key)
2278
+ return super().__getitem__(key)
2279
+
2280
+ def get(self, key: str, default = None) -> Any:
2281
+ AlloydbomniAlloydbomniUserConfigPgaudit.__key_warning(key)
2282
+ return super().get(key, default)
2283
+
2284
+ def __init__(__self__, *,
2285
+ feature_enabled: Optional[builtins.bool] = None,
2286
+ log_catalog: Optional[builtins.bool] = None,
2287
+ log_client: Optional[builtins.bool] = None,
2288
+ log_level: Optional[builtins.str] = None,
2289
+ log_max_string_length: Optional[builtins.int] = None,
2290
+ log_nested_statements: Optional[builtins.bool] = None,
2291
+ log_parameter: Optional[builtins.bool] = None,
2292
+ log_parameter_max_size: Optional[builtins.int] = None,
2293
+ log_relation: Optional[builtins.bool] = None,
2294
+ log_rows: Optional[builtins.bool] = None,
2295
+ log_statement: Optional[builtins.bool] = None,
2296
+ log_statement_once: Optional[builtins.bool] = None,
2297
+ logs: Optional[Sequence[builtins.str]] = None,
2298
+ role: Optional[builtins.str] = None):
2299
+ """
2300
+ :param builtins.bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. Default: `false`.
2301
+ :param builtins.bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. Default: `true`.
2302
+ :param builtins.bool log_client: Specifies whether log messages will be visible to a client process such as psql. Default: `false`.
2303
+ :param builtins.str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `log`, `notice`, `warning`. Specifies the log level that will be used for log entries. Default: `log`.
2304
+ :param builtins.int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. Default: `-1`.
2305
+ :param builtins.bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. Default: `true`.
2306
+ :param builtins.bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. Default: `false`.
2307
+ :param builtins.int log_parameter_max_size: Specifies that parameter values longer than this setting (in bytes) should not be logged, but replaced with \\n\\n. Default: `0`.
2308
+ :param builtins.bool log_relation: Specifies whether session audit logging should create a separate log entry for each relation (TABLE, VIEW, etc.) referenced in a SELECT or DML statement. Default: `false`.
2309
+ :param builtins.bool log_rows: Specifies that audit logging should include the rows retrieved or affected by a statement. When enabled the rows field will be included after the parameter field. Default: `false`.
2310
+ :param builtins.bool log_statement: Specifies whether logging will include the statement text and parameters (if enabled). Default: `true`.
2311
+ :param builtins.bool log_statement_once: Specifies whether logging will include the statement text and parameters with the first log entry for a statement/substatement combination or with every entry. Default: `false`.
2312
+ :param Sequence[builtins.str] logs: Specifies which classes of statements will be logged by session audit logging.
2313
+ :param builtins.str role: Specifies the master role to use for object audit logging.
2314
+ """
2315
+ if feature_enabled is not None:
2316
+ pulumi.set(__self__, "feature_enabled", feature_enabled)
2317
+ if log_catalog is not None:
2318
+ pulumi.set(__self__, "log_catalog", log_catalog)
2319
+ if log_client is not None:
2320
+ pulumi.set(__self__, "log_client", log_client)
2321
+ if log_level is not None:
2322
+ pulumi.set(__self__, "log_level", log_level)
2323
+ if log_max_string_length is not None:
2324
+ pulumi.set(__self__, "log_max_string_length", log_max_string_length)
2325
+ if log_nested_statements is not None:
2326
+ pulumi.set(__self__, "log_nested_statements", log_nested_statements)
2327
+ if log_parameter is not None:
2328
+ pulumi.set(__self__, "log_parameter", log_parameter)
2329
+ if log_parameter_max_size is not None:
2330
+ pulumi.set(__self__, "log_parameter_max_size", log_parameter_max_size)
2331
+ if log_relation is not None:
2332
+ pulumi.set(__self__, "log_relation", log_relation)
2333
+ if log_rows is not None:
2334
+ pulumi.set(__self__, "log_rows", log_rows)
2335
+ if log_statement is not None:
2336
+ pulumi.set(__self__, "log_statement", log_statement)
2337
+ if log_statement_once is not None:
2338
+ pulumi.set(__self__, "log_statement_once", log_statement_once)
2339
+ if logs is not None:
2340
+ pulumi.set(__self__, "logs", logs)
2341
+ if role is not None:
2342
+ pulumi.set(__self__, "role", role)
2343
+
2344
+ @property
2345
+ @pulumi.getter(name="featureEnabled")
2346
+ def feature_enabled(self) -> Optional[builtins.bool]:
2347
+ """
2348
+ Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. Default: `false`.
2349
+ """
2350
+ return pulumi.get(self, "feature_enabled")
2351
+
2352
+ @property
2353
+ @pulumi.getter(name="logCatalog")
2354
+ def log_catalog(self) -> Optional[builtins.bool]:
2355
+ """
2356
+ Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. Default: `true`.
2357
+ """
2358
+ return pulumi.get(self, "log_catalog")
2359
+
2360
+ @property
2361
+ @pulumi.getter(name="logClient")
2362
+ def log_client(self) -> Optional[builtins.bool]:
2363
+ """
2364
+ Specifies whether log messages will be visible to a client process such as psql. Default: `false`.
2365
+ """
2366
+ return pulumi.get(self, "log_client")
2367
+
2368
+ @property
2369
+ @pulumi.getter(name="logLevel")
2370
+ def log_level(self) -> Optional[builtins.str]:
2371
+ """
2372
+ Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `log`, `notice`, `warning`. Specifies the log level that will be used for log entries. Default: `log`.
2373
+ """
2374
+ return pulumi.get(self, "log_level")
2375
+
2376
+ @property
2377
+ @pulumi.getter(name="logMaxStringLength")
2378
+ def log_max_string_length(self) -> Optional[builtins.int]:
2379
+ """
2380
+ Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. Default: `-1`.
2381
+ """
2382
+ return pulumi.get(self, "log_max_string_length")
2383
+
2384
+ @property
2385
+ @pulumi.getter(name="logNestedStatements")
2386
+ def log_nested_statements(self) -> Optional[builtins.bool]:
2387
+ """
2388
+ This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. Default: `true`.
2389
+ """
2390
+ return pulumi.get(self, "log_nested_statements")
2391
+
2392
+ @property
2393
+ @pulumi.getter(name="logParameter")
2394
+ def log_parameter(self) -> Optional[builtins.bool]:
2395
+ """
2396
+ Specifies that audit logging should include the parameters that were passed with the statement. Default: `false`.
2397
+ """
2398
+ return pulumi.get(self, "log_parameter")
2399
+
2400
+ @property
2401
+ @pulumi.getter(name="logParameterMaxSize")
2402
+ def log_parameter_max_size(self) -> Optional[builtins.int]:
2403
+ """
2404
+ Specifies that parameter values longer than this setting (in bytes) should not be logged, but replaced with \\n\\n. Default: `0`.
2405
+ """
2406
+ return pulumi.get(self, "log_parameter_max_size")
2407
+
2408
+ @property
2409
+ @pulumi.getter(name="logRelation")
2410
+ def log_relation(self) -> Optional[builtins.bool]:
2411
+ """
2412
+ Specifies whether session audit logging should create a separate log entry for each relation (TABLE, VIEW, etc.) referenced in a SELECT or DML statement. Default: `false`.
2413
+ """
2414
+ return pulumi.get(self, "log_relation")
2415
+
2416
+ @property
2417
+ @pulumi.getter(name="logRows")
2418
+ def log_rows(self) -> Optional[builtins.bool]:
2419
+ """
2420
+ Specifies that audit logging should include the rows retrieved or affected by a statement. When enabled the rows field will be included after the parameter field. Default: `false`.
2421
+ """
2422
+ return pulumi.get(self, "log_rows")
2423
+
2424
+ @property
2425
+ @pulumi.getter(name="logStatement")
2426
+ def log_statement(self) -> Optional[builtins.bool]:
2427
+ """
2428
+ Specifies whether logging will include the statement text and parameters (if enabled). Default: `true`.
2429
+ """
2430
+ return pulumi.get(self, "log_statement")
2431
+
2432
+ @property
2433
+ @pulumi.getter(name="logStatementOnce")
2434
+ def log_statement_once(self) -> Optional[builtins.bool]:
2435
+ """
2436
+ Specifies whether logging will include the statement text and parameters with the first log entry for a statement/substatement combination or with every entry. Default: `false`.
2437
+ """
2438
+ return pulumi.get(self, "log_statement_once")
2439
+
2440
+ @property
2441
+ @pulumi.getter
2442
+ def logs(self) -> Optional[Sequence[builtins.str]]:
2443
+ """
2444
+ Specifies which classes of statements will be logged by session audit logging.
2445
+ """
2446
+ return pulumi.get(self, "logs")
2447
+
2448
+ @property
2449
+ @pulumi.getter
2450
+ def role(self) -> Optional[builtins.str]:
2451
+ """
2452
+ Specifies the master role to use for object audit logging.
2453
+ """
2454
+ return pulumi.get(self, "role")
2455
+
2456
+
2219
2457
  @pulumi.output_type
2220
2458
  class AlloydbomniAlloydbomniUserConfigPgbouncer(dict):
2221
2459
  @staticmethod
@@ -6173,6 +6411,197 @@ class FlinkTechEmail(dict):
6173
6411
  return pulumi.get(self, "email")
6174
6412
 
6175
6413
 
6414
+ @pulumi.output_type
6415
+ class GovernanceAccessAccessData(dict):
6416
+ @staticmethod
6417
+ def __key_warning(key: str):
6418
+ suggest = None
6419
+ if key == "serviceName":
6420
+ suggest = "service_name"
6421
+
6422
+ if suggest:
6423
+ pulumi.log.warn(f"Key '{key}' not found in GovernanceAccessAccessData. Access the value via the '{suggest}' property getter instead.")
6424
+
6425
+ def __getitem__(self, key: str) -> Any:
6426
+ GovernanceAccessAccessData.__key_warning(key)
6427
+ return super().__getitem__(key)
6428
+
6429
+ def get(self, key: str, default = None) -> Any:
6430
+ GovernanceAccessAccessData.__key_warning(key)
6431
+ return super().get(key, default)
6432
+
6433
+ def __init__(__self__, *,
6434
+ acls: Sequence['outputs.GovernanceAccessAccessDataAcl'],
6435
+ project: builtins.str,
6436
+ service_name: builtins.str,
6437
+ username: Optional[builtins.str] = None):
6438
+ """
6439
+ :param Sequence['GovernanceAccessAccessDataAclArgs'] acls: The permissions granted to the assigned service user. Maximum length: `54`. Changing this property forces recreation of the resource.
6440
+ :param builtins.str project: The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
6441
+ :param builtins.str service_name: The name of the service that this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
6442
+ :param builtins.str username: The name for the new service user given access. If not provided, the name is automatically generated. Maximum length: `54`. Changing this property forces recreation of the resource.
6443
+ """
6444
+ pulumi.set(__self__, "acls", acls)
6445
+ pulumi.set(__self__, "project", project)
6446
+ pulumi.set(__self__, "service_name", service_name)
6447
+ if username is not None:
6448
+ pulumi.set(__self__, "username", username)
6449
+
6450
+ @property
6451
+ @pulumi.getter
6452
+ def acls(self) -> Sequence['outputs.GovernanceAccessAccessDataAcl']:
6453
+ """
6454
+ The permissions granted to the assigned service user. Maximum length: `54`. Changing this property forces recreation of the resource.
6455
+ """
6456
+ return pulumi.get(self, "acls")
6457
+
6458
+ @property
6459
+ @pulumi.getter
6460
+ def project(self) -> builtins.str:
6461
+ """
6462
+ The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
6463
+ """
6464
+ return pulumi.get(self, "project")
6465
+
6466
+ @property
6467
+ @pulumi.getter(name="serviceName")
6468
+ def service_name(self) -> builtins.str:
6469
+ """
6470
+ The name of the service that this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
6471
+ """
6472
+ return pulumi.get(self, "service_name")
6473
+
6474
+ @property
6475
+ @pulumi.getter
6476
+ def username(self) -> Optional[builtins.str]:
6477
+ """
6478
+ The name for the new service user given access. If not provided, the name is automatically generated. Maximum length: `54`. Changing this property forces recreation of the resource.
6479
+ """
6480
+ return pulumi.get(self, "username")
6481
+
6482
+
6483
+ @pulumi.output_type
6484
+ class GovernanceAccessAccessDataAcl(dict):
6485
+ @staticmethod
6486
+ def __key_warning(key: str):
6487
+ suggest = None
6488
+ if key == "permissionType":
6489
+ suggest = "permission_type"
6490
+ elif key == "resourceName":
6491
+ suggest = "resource_name"
6492
+ elif key == "resourceType":
6493
+ suggest = "resource_type"
6494
+ elif key == "patternType":
6495
+ suggest = "pattern_type"
6496
+
6497
+ if suggest:
6498
+ pulumi.log.warn(f"Key '{key}' not found in GovernanceAccessAccessDataAcl. Access the value via the '{suggest}' property getter instead.")
6499
+
6500
+ def __getitem__(self, key: str) -> Any:
6501
+ GovernanceAccessAccessDataAcl.__key_warning(key)
6502
+ return super().__getitem__(key)
6503
+
6504
+ def get(self, key: str, default = None) -> Any:
6505
+ GovernanceAccessAccessDataAcl.__key_warning(key)
6506
+ return super().get(key, default)
6507
+
6508
+ def __init__(__self__, *,
6509
+ operation: builtins.str,
6510
+ permission_type: builtins.str,
6511
+ resource_name: builtins.str,
6512
+ resource_type: builtins.str,
6513
+ host: Optional[builtins.str] = None,
6514
+ id: Optional[builtins.str] = None,
6515
+ pattern_type: Optional[builtins.str] = None,
6516
+ principal: Optional[builtins.str] = None):
6517
+ """
6518
+ :param builtins.str operation: The action that will be allowed for the service user. The possible values are `Read` and `Write`. Changing this property forces recreation of the resource.
6519
+ :param builtins.str permission_type: Explicitly allows or denies the action for the service user on the specified resource. The possible value is `ALLOW`. Changing this property forces recreation of the resource.
6520
+ :param builtins.str resource_name: The name of the resource the permission applies to, such as the topic name or group ID in the Kafka service. Maximum length: `256`. Changing this property forces recreation of the resource.
6521
+ :param builtins.str resource_type: The type of resource. The possible value is `Topic`. Changing this property forces recreation of the resource.
6522
+ :param builtins.str host: The IP address from which a principal is allowed or denied access to the resource. Use `*` for all hosts. Maximum length: `256`. Changing this property forces recreation of the resource.
6523
+ :param builtins.str id: The ACL ID.
6524
+ :param builtins.str pattern_type: Pattern used to match specified resources. The possible value is `LITERAL`.
6525
+ :param builtins.str principal: Identities in `user:name` format that the permissions apply to.
6526
+ """
6527
+ pulumi.set(__self__, "operation", operation)
6528
+ pulumi.set(__self__, "permission_type", permission_type)
6529
+ pulumi.set(__self__, "resource_name", resource_name)
6530
+ pulumi.set(__self__, "resource_type", resource_type)
6531
+ if host is not None:
6532
+ pulumi.set(__self__, "host", host)
6533
+ if id is not None:
6534
+ pulumi.set(__self__, "id", id)
6535
+ if pattern_type is not None:
6536
+ pulumi.set(__self__, "pattern_type", pattern_type)
6537
+ if principal is not None:
6538
+ pulumi.set(__self__, "principal", principal)
6539
+
6540
+ @property
6541
+ @pulumi.getter
6542
+ def operation(self) -> builtins.str:
6543
+ """
6544
+ The action that will be allowed for the service user. The possible values are `Read` and `Write`. Changing this property forces recreation of the resource.
6545
+ """
6546
+ return pulumi.get(self, "operation")
6547
+
6548
+ @property
6549
+ @pulumi.getter(name="permissionType")
6550
+ def permission_type(self) -> builtins.str:
6551
+ """
6552
+ Explicitly allows or denies the action for the service user on the specified resource. The possible value is `ALLOW`. Changing this property forces recreation of the resource.
6553
+ """
6554
+ return pulumi.get(self, "permission_type")
6555
+
6556
+ @property
6557
+ @pulumi.getter(name="resourceName")
6558
+ def resource_name(self) -> builtins.str:
6559
+ """
6560
+ The name of the resource the permission applies to, such as the topic name or group ID in the Kafka service. Maximum length: `256`. Changing this property forces recreation of the resource.
6561
+ """
6562
+ return pulumi.get(self, "resource_name")
6563
+
6564
+ @property
6565
+ @pulumi.getter(name="resourceType")
6566
+ def resource_type(self) -> builtins.str:
6567
+ """
6568
+ The type of resource. The possible value is `Topic`. Changing this property forces recreation of the resource.
6569
+ """
6570
+ return pulumi.get(self, "resource_type")
6571
+
6572
+ @property
6573
+ @pulumi.getter
6574
+ def host(self) -> Optional[builtins.str]:
6575
+ """
6576
+ The IP address from which a principal is allowed or denied access to the resource. Use `*` for all hosts. Maximum length: `256`. Changing this property forces recreation of the resource.
6577
+ """
6578
+ return pulumi.get(self, "host")
6579
+
6580
+ @property
6581
+ @pulumi.getter
6582
+ def id(self) -> Optional[builtins.str]:
6583
+ """
6584
+ The ACL ID.
6585
+ """
6586
+ return pulumi.get(self, "id")
6587
+
6588
+ @property
6589
+ @pulumi.getter(name="patternType")
6590
+ def pattern_type(self) -> Optional[builtins.str]:
6591
+ """
6592
+ Pattern used to match specified resources. The possible value is `LITERAL`.
6593
+ """
6594
+ return pulumi.get(self, "pattern_type")
6595
+
6596
+ @property
6597
+ @pulumi.getter
6598
+ def principal(self) -> Optional[builtins.str]:
6599
+ """
6600
+ Identities in `user:name` format that the permissions apply to.
6601
+ """
6602
+ return pulumi.get(self, "principal")
6603
+
6604
+
6176
6605
  @pulumi.output_type
6177
6606
  class GrafanaComponent(dict):
6178
6607
  @staticmethod
@@ -10307,6 +10736,8 @@ class KafkaKafkaUserConfig(dict):
10307
10736
  suggest = "kafka_connect"
10308
10737
  elif key == "kafkaConnectConfig":
10309
10738
  suggest = "kafka_connect_config"
10739
+ elif key == "kafkaConnectPluginVersions":
10740
+ suggest = "kafka_connect_plugin_versions"
10310
10741
  elif key == "kafkaConnectSecretProviders":
10311
10742
  suggest = "kafka_connect_secret_providers"
10312
10743
  elif key == "kafkaRest":
@@ -10363,6 +10794,7 @@ class KafkaKafkaUserConfig(dict):
10363
10794
  kafka_authentication_methods: Optional['outputs.KafkaKafkaUserConfigKafkaAuthenticationMethods'] = None,
10364
10795
  kafka_connect: Optional[builtins.bool] = None,
10365
10796
  kafka_connect_config: Optional['outputs.KafkaKafkaUserConfigKafkaConnectConfig'] = None,
10797
+ kafka_connect_plugin_versions: Optional[Sequence['outputs.KafkaKafkaUserConfigKafkaConnectPluginVersion']] = None,
10366
10798
  kafka_connect_secret_providers: Optional[Sequence['outputs.KafkaKafkaUserConfigKafkaConnectSecretProvider']] = None,
10367
10799
  kafka_rest: Optional[builtins.bool] = None,
10368
10800
  kafka_rest_authorization: Optional[builtins.bool] = None,
@@ -10391,6 +10823,7 @@ class KafkaKafkaUserConfig(dict):
10391
10823
  :param 'KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs' kafka_authentication_methods: Kafka authentication methods
10392
10824
  :param builtins.bool kafka_connect: Enable Kafka Connect service. Default: `false`.
10393
10825
  :param 'KafkaKafkaUserConfigKafkaConnectConfigArgs' kafka_connect_config: Kafka Connect configuration values
10826
+ :param Sequence['KafkaKafkaUserConfigKafkaConnectPluginVersionArgs'] kafka_connect_plugin_versions: The plugin selected by the user
10394
10827
  :param builtins.bool kafka_rest: Enable Kafka-REST service. Default: `false`.
10395
10828
  :param builtins.bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
10396
10829
  :param 'KafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
@@ -10429,6 +10862,8 @@ class KafkaKafkaUserConfig(dict):
10429
10862
  pulumi.set(__self__, "kafka_connect", kafka_connect)
10430
10863
  if kafka_connect_config is not None:
10431
10864
  pulumi.set(__self__, "kafka_connect_config", kafka_connect_config)
10865
+ if kafka_connect_plugin_versions is not None:
10866
+ pulumi.set(__self__, "kafka_connect_plugin_versions", kafka_connect_plugin_versions)
10432
10867
  if kafka_connect_secret_providers is not None:
10433
10868
  pulumi.set(__self__, "kafka_connect_secret_providers", kafka_connect_secret_providers)
10434
10869
  if kafka_rest is not None:
@@ -10552,6 +10987,14 @@ class KafkaKafkaUserConfig(dict):
10552
10987
  """
10553
10988
  return pulumi.get(self, "kafka_connect_config")
10554
10989
 
10990
+ @property
10991
+ @pulumi.getter(name="kafkaConnectPluginVersions")
10992
+ def kafka_connect_plugin_versions(self) -> Optional[Sequence['outputs.KafkaKafkaUserConfigKafkaConnectPluginVersion']]:
10993
+ """
10994
+ The plugin selected by the user
10995
+ """
10996
+ return pulumi.get(self, "kafka_connect_plugin_versions")
10997
+
10555
10998
  @property
10556
10999
  @pulumi.getter(name="kafkaConnectSecretProviders")
10557
11000
  def kafka_connect_secret_providers(self) -> Optional[Sequence['outputs.KafkaKafkaUserConfigKafkaConnectSecretProvider']]:
@@ -11670,6 +12113,52 @@ class KafkaKafkaUserConfigKafkaConnectConfig(dict):
11670
12113
  return pulumi.get(self, "session_timeout_ms")
11671
12114
 
11672
12115
 
12116
+ @pulumi.output_type
12117
+ class KafkaKafkaUserConfigKafkaConnectPluginVersion(dict):
12118
+ @staticmethod
12119
+ def __key_warning(key: str):
12120
+ suggest = None
12121
+ if key == "pluginName":
12122
+ suggest = "plugin_name"
12123
+
12124
+ if suggest:
12125
+ pulumi.log.warn(f"Key '{key}' not found in KafkaKafkaUserConfigKafkaConnectPluginVersion. Access the value via the '{suggest}' property getter instead.")
12126
+
12127
+ def __getitem__(self, key: str) -> Any:
12128
+ KafkaKafkaUserConfigKafkaConnectPluginVersion.__key_warning(key)
12129
+ return super().__getitem__(key)
12130
+
12131
+ def get(self, key: str, default = None) -> Any:
12132
+ KafkaKafkaUserConfigKafkaConnectPluginVersion.__key_warning(key)
12133
+ return super().get(key, default)
12134
+
12135
+ def __init__(__self__, *,
12136
+ plugin_name: builtins.str,
12137
+ version: builtins.str):
12138
+ """
12139
+ :param builtins.str plugin_name: The name of the plugin. Example: `debezium-connector`.
12140
+ :param builtins.str version: The version of the plugin. Example: `2.5.0`.
12141
+ """
12142
+ pulumi.set(__self__, "plugin_name", plugin_name)
12143
+ pulumi.set(__self__, "version", version)
12144
+
12145
+ @property
12146
+ @pulumi.getter(name="pluginName")
12147
+ def plugin_name(self) -> builtins.str:
12148
+ """
12149
+ The name of the plugin. Example: `debezium-connector`.
12150
+ """
12151
+ return pulumi.get(self, "plugin_name")
12152
+
12153
+ @property
12154
+ @pulumi.getter
12155
+ def version(self) -> builtins.str:
12156
+ """
12157
+ The version of the plugin. Example: `2.5.0`.
12158
+ """
12159
+ return pulumi.get(self, "version")
12160
+
12161
+
11673
12162
  @pulumi.output_type
11674
12163
  class KafkaKafkaUserConfigKafkaConnectSecretProvider(dict):
11675
12164
  def __init__(__self__, *,
@@ -13344,33 +13833,33 @@ class KafkaTopicConfig(dict):
13344
13833
  segment_ms: Optional[builtins.str] = None,
13345
13834
  unclean_leader_election_enable: Optional[builtins.bool] = None):
13346
13835
  """
13347
- :param builtins.str cleanup_policy: cleanup.policy value. The possible values are `compact`, `compact,delete` and `delete`.
13348
- :param builtins.str compression_type: compression.type value. The possible values are `gzip`, `lz4`, `producer`, `snappy`, `uncompressed` and `zstd`.
13349
- :param builtins.str delete_retention_ms: delete.retention.ms value
13350
- :param builtins.str file_delete_delay_ms: file.delete.delay.ms value
13351
- :param builtins.str flush_messages: flush.messages value
13352
- :param builtins.str flush_ms: flush.ms value
13353
- :param builtins.str index_interval_bytes: index.interval.bytes value
13354
- :param builtins.str local_retention_bytes: local.retention.bytes value
13355
- :param builtins.str local_retention_ms: local.retention.ms value
13356
- :param builtins.str max_compaction_lag_ms: max.compaction.lag.ms value
13357
- :param builtins.str max_message_bytes: max.message.bytes value
13358
- :param builtins.bool message_downconversion_enable: message.downconversion.enable value
13359
- :param builtins.str message_format_version: message.format.version value. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.1` and `4.1-IV0`.
13360
- :param builtins.str message_timestamp_difference_max_ms: message.timestamp.difference.max.ms value
13361
- :param builtins.str message_timestamp_type: message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
13362
- :param builtins.float min_cleanable_dirty_ratio: min.cleanable.dirty.ratio value
13363
- :param builtins.str min_compaction_lag_ms: min.compaction.lag.ms value
13364
- :param builtins.str min_insync_replicas: min.insync.replicas value
13365
- :param builtins.bool preallocate: preallocate value
13366
- :param builtins.bool remote_storage_enable: remote.storage.enable value
13367
- :param builtins.str retention_bytes: retention.bytes value
13368
- :param builtins.str retention_ms: retention.ms value
13369
- :param builtins.str segment_bytes: segment.bytes value
13370
- :param builtins.str segment_index_bytes: segment.index.bytes value
13371
- :param builtins.str segment_jitter_ms: segment.jitter.ms value
13372
- :param builtins.str segment_ms: segment.ms value
13373
- :param builtins.bool unclean_leader_election_enable: unclean.leader.election.enable value; This field is deprecated and no longer functional.
13836
+ :param builtins.str cleanup_policy: The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic. The possible values are `compact`, `compact,delete` and `delete`.
13837
+ :param builtins.str compression_type: Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer. The possible values are `gzip`, `lz4`, `producer`, `snappy`, `uncompressed` and `zstd`.
13838
+ :param builtins.str delete_retention_ms: The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).
13839
+ :param builtins.str file_delete_delay_ms: The time to wait before deleting a file from the filesystem.
13840
+ :param builtins.str flush_messages: This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
13841
+ :param builtins.str flush_ms: This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
13842
+ :param builtins.str index_interval_bytes: This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
13843
+ :param builtins.str local_retention_bytes: This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
13844
+ :param builtins.str local_retention_ms: This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
13845
+ :param builtins.str max_compaction_lag_ms: The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
13846
+ :param builtins.str max_message_bytes: The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
13847
+ :param builtins.bool message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
13848
+ :param builtins.str message_format_version: Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
13849
+ :param builtins.str message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
13850
+ :param builtins.str message_timestamp_type: Define whether the timestamp in the message is message create time or log append time. The possible values are `CreateTime` and `LogAppendTime`.
13851
+ :param builtins.float min_cleanable_dirty_ratio: This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
13852
+ :param builtins.str min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
13853
+ :param builtins.str min_insync_replicas: When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
13854
+ :param builtins.bool preallocate: True if we should preallocate the file on disk when creating a new log segment.
13855
+ :param builtins.bool remote_storage_enable: Indicates whether tiered storage should be enabled.
13856
+ :param builtins.str retention_bytes: This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
13857
+ :param builtins.str retention_ms: This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the 'delete' retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.
13858
+ :param builtins.str segment_bytes: This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
13859
+ :param builtins.str segment_index_bytes: This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
13860
+ :param builtins.str segment_jitter_ms: The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling
13861
+ :param builtins.str segment_ms: This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 seconds.
13862
+ :param builtins.bool unclean_leader_election_enable: Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.
13374
13863
  """
13375
13864
  if cleanup_policy is not None:
13376
13865
  pulumi.set(__self__, "cleanup_policy", cleanup_policy)
@@ -13431,7 +13920,7 @@ class KafkaTopicConfig(dict):
13431
13920
  @pulumi.getter(name="cleanupPolicy")
13432
13921
  def cleanup_policy(self) -> Optional[builtins.str]:
13433
13922
  """
13434
- cleanup.policy value. The possible values are `compact`, `compact,delete` and `delete`.
13923
+ The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic. The possible values are `compact`, `compact,delete` and `delete`.
13435
13924
  """
13436
13925
  return pulumi.get(self, "cleanup_policy")
13437
13926
 
@@ -13439,7 +13928,7 @@ class KafkaTopicConfig(dict):
13439
13928
  @pulumi.getter(name="compressionType")
13440
13929
  def compression_type(self) -> Optional[builtins.str]:
13441
13930
  """
13442
- compression.type value. The possible values are `gzip`, `lz4`, `producer`, `snappy`, `uncompressed` and `zstd`.
13931
+ Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer. The possible values are `gzip`, `lz4`, `producer`, `snappy`, `uncompressed` and `zstd`.
13443
13932
  """
13444
13933
  return pulumi.get(self, "compression_type")
13445
13934
 
@@ -13447,7 +13936,7 @@ class KafkaTopicConfig(dict):
13447
13936
  @pulumi.getter(name="deleteRetentionMs")
13448
13937
  def delete_retention_ms(self) -> Optional[builtins.str]:
13449
13938
  """
13450
- delete.retention.ms value
13939
+ The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).
13451
13940
  """
13452
13941
  return pulumi.get(self, "delete_retention_ms")
13453
13942
 
@@ -13455,7 +13944,7 @@ class KafkaTopicConfig(dict):
13455
13944
  @pulumi.getter(name="fileDeleteDelayMs")
13456
13945
  def file_delete_delay_ms(self) -> Optional[builtins.str]:
13457
13946
  """
13458
- file.delete.delay.ms value
13947
+ The time to wait before deleting a file from the filesystem.
13459
13948
  """
13460
13949
  return pulumi.get(self, "file_delete_delay_ms")
13461
13950
 
@@ -13463,7 +13952,7 @@ class KafkaTopicConfig(dict):
13463
13952
  @pulumi.getter(name="flushMessages")
13464
13953
  def flush_messages(self) -> Optional[builtins.str]:
13465
13954
  """
13466
- flush.messages value
13955
+ This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
13467
13956
  """
13468
13957
  return pulumi.get(self, "flush_messages")
13469
13958
 
@@ -13471,7 +13960,7 @@ class KafkaTopicConfig(dict):
13471
13960
  @pulumi.getter(name="flushMs")
13472
13961
  def flush_ms(self) -> Optional[builtins.str]:
13473
13962
  """
13474
- flush.ms value
13963
+ This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
13475
13964
  """
13476
13965
  return pulumi.get(self, "flush_ms")
13477
13966
 
@@ -13479,7 +13968,7 @@ class KafkaTopicConfig(dict):
13479
13968
  @pulumi.getter(name="indexIntervalBytes")
13480
13969
  def index_interval_bytes(self) -> Optional[builtins.str]:
13481
13970
  """
13482
- index.interval.bytes value
13971
+ This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
13483
13972
  """
13484
13973
  return pulumi.get(self, "index_interval_bytes")
13485
13974
 
@@ -13487,7 +13976,7 @@ class KafkaTopicConfig(dict):
13487
13976
  @pulumi.getter(name="localRetentionBytes")
13488
13977
  def local_retention_bytes(self) -> Optional[builtins.str]:
13489
13978
  """
13490
- local.retention.bytes value
13979
+ This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
13491
13980
  """
13492
13981
  return pulumi.get(self, "local_retention_bytes")
13493
13982
 
@@ -13495,7 +13984,7 @@ class KafkaTopicConfig(dict):
13495
13984
  @pulumi.getter(name="localRetentionMs")
13496
13985
  def local_retention_ms(self) -> Optional[builtins.str]:
13497
13986
  """
13498
- local.retention.ms value
13987
+ This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
13499
13988
  """
13500
13989
  return pulumi.get(self, "local_retention_ms")
13501
13990
 
@@ -13503,7 +13992,7 @@ class KafkaTopicConfig(dict):
13503
13992
  @pulumi.getter(name="maxCompactionLagMs")
13504
13993
  def max_compaction_lag_ms(self) -> Optional[builtins.str]:
13505
13994
  """
13506
- max.compaction.lag.ms value
13995
+ The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
13507
13996
  """
13508
13997
  return pulumi.get(self, "max_compaction_lag_ms")
13509
13998
 
@@ -13511,7 +14000,7 @@ class KafkaTopicConfig(dict):
13511
14000
  @pulumi.getter(name="maxMessageBytes")
13512
14001
  def max_message_bytes(self) -> Optional[builtins.str]:
13513
14002
  """
13514
- max.message.bytes value
14003
+ The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
13515
14004
  """
13516
14005
  return pulumi.get(self, "max_message_bytes")
13517
14006
 
@@ -13519,7 +14008,7 @@ class KafkaTopicConfig(dict):
13519
14008
  @pulumi.getter(name="messageDownconversionEnable")
13520
14009
  def message_downconversion_enable(self) -> Optional[builtins.bool]:
13521
14010
  """
13522
- message.downconversion.enable value
14011
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
13523
14012
  """
13524
14013
  return pulumi.get(self, "message_downconversion_enable")
13525
14014
 
@@ -13527,7 +14016,7 @@ class KafkaTopicConfig(dict):
13527
14016
  @pulumi.getter(name="messageFormatVersion")
13528
14017
  def message_format_version(self) -> Optional[builtins.str]:
13529
14018
  """
13530
- message.format.version value. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.1` and `4.1-IV0`.
14019
+ Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
13531
14020
  """
13532
14021
  return pulumi.get(self, "message_format_version")
13533
14022
 
@@ -13535,7 +14024,7 @@ class KafkaTopicConfig(dict):
13535
14024
  @pulumi.getter(name="messageTimestampDifferenceMaxMs")
13536
14025
  def message_timestamp_difference_max_ms(self) -> Optional[builtins.str]:
13537
14026
  """
13538
- message.timestamp.difference.max.ms value
14027
+ The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
13539
14028
  """
13540
14029
  return pulumi.get(self, "message_timestamp_difference_max_ms")
13541
14030
 
@@ -13543,7 +14032,7 @@ class KafkaTopicConfig(dict):
13543
14032
  @pulumi.getter(name="messageTimestampType")
13544
14033
  def message_timestamp_type(self) -> Optional[builtins.str]:
13545
14034
  """
13546
- message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
14035
+ Define whether the timestamp in the message is message create time or log append time. The possible values are `CreateTime` and `LogAppendTime`.
13547
14036
  """
13548
14037
  return pulumi.get(self, "message_timestamp_type")
13549
14038
 
@@ -13551,7 +14040,7 @@ class KafkaTopicConfig(dict):
13551
14040
  @pulumi.getter(name="minCleanableDirtyRatio")
13552
14041
  def min_cleanable_dirty_ratio(self) -> Optional[builtins.float]:
13553
14042
  """
13554
- min.cleanable.dirty.ratio value
14043
+ This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
13555
14044
  """
13556
14045
  return pulumi.get(self, "min_cleanable_dirty_ratio")
13557
14046
 
@@ -13559,7 +14048,7 @@ class KafkaTopicConfig(dict):
13559
14048
  @pulumi.getter(name="minCompactionLagMs")
13560
14049
  def min_compaction_lag_ms(self) -> Optional[builtins.str]:
13561
14050
  """
13562
- min.compaction.lag.ms value
14051
+ The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
13563
14052
  """
13564
14053
  return pulumi.get(self, "min_compaction_lag_ms")
13565
14054
 
@@ -13567,7 +14056,7 @@ class KafkaTopicConfig(dict):
13567
14056
  @pulumi.getter(name="minInsyncReplicas")
13568
14057
  def min_insync_replicas(self) -> Optional[builtins.str]:
13569
14058
  """
13570
- min.insync.replicas value
14059
+ When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
13571
14060
  """
13572
14061
  return pulumi.get(self, "min_insync_replicas")
13573
14062
 
@@ -13575,7 +14064,7 @@ class KafkaTopicConfig(dict):
13575
14064
  @pulumi.getter
13576
14065
  def preallocate(self) -> Optional[builtins.bool]:
13577
14066
  """
13578
- preallocate value
14067
+ True if we should preallocate the file on disk when creating a new log segment.
13579
14068
  """
13580
14069
  return pulumi.get(self, "preallocate")
13581
14070
 
@@ -13583,7 +14072,7 @@ class KafkaTopicConfig(dict):
13583
14072
  @pulumi.getter(name="remoteStorageEnable")
13584
14073
  def remote_storage_enable(self) -> Optional[builtins.bool]:
13585
14074
  """
13586
- remote.storage.enable value
14075
+ Indicates whether tiered storage should be enabled.
13587
14076
  """
13588
14077
  return pulumi.get(self, "remote_storage_enable")
13589
14078
 
@@ -13591,7 +14080,7 @@ class KafkaTopicConfig(dict):
13591
14080
  @pulumi.getter(name="retentionBytes")
13592
14081
  def retention_bytes(self) -> Optional[builtins.str]:
13593
14082
  """
13594
- retention.bytes value
14083
+ This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
13595
14084
  """
13596
14085
  return pulumi.get(self, "retention_bytes")
13597
14086
 
@@ -13599,7 +14088,7 @@ class KafkaTopicConfig(dict):
13599
14088
  @pulumi.getter(name="retentionMs")
13600
14089
  def retention_ms(self) -> Optional[builtins.str]:
13601
14090
  """
13602
- retention.ms value
14091
+ This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the 'delete' retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.
13603
14092
  """
13604
14093
  return pulumi.get(self, "retention_ms")
13605
14094
 
@@ -13607,7 +14096,7 @@ class KafkaTopicConfig(dict):
13607
14096
  @pulumi.getter(name="segmentBytes")
13608
14097
  def segment_bytes(self) -> Optional[builtins.str]:
13609
14098
  """
13610
- segment.bytes value
14099
+ This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
13611
14100
  """
13612
14101
  return pulumi.get(self, "segment_bytes")
13613
14102
 
@@ -13615,7 +14104,7 @@ class KafkaTopicConfig(dict):
13615
14104
  @pulumi.getter(name="segmentIndexBytes")
13616
14105
  def segment_index_bytes(self) -> Optional[builtins.str]:
13617
14106
  """
13618
- segment.index.bytes value
14107
+ This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
13619
14108
  """
13620
14109
  return pulumi.get(self, "segment_index_bytes")
13621
14110
 
@@ -13623,7 +14112,7 @@ class KafkaTopicConfig(dict):
13623
14112
  @pulumi.getter(name="segmentJitterMs")
13624
14113
  def segment_jitter_ms(self) -> Optional[builtins.str]:
13625
14114
  """
13626
- segment.jitter.ms value
14115
+ The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling
13627
14116
  """
13628
14117
  return pulumi.get(self, "segment_jitter_ms")
13629
14118
 
@@ -13631,16 +14120,15 @@ class KafkaTopicConfig(dict):
13631
14120
  @pulumi.getter(name="segmentMs")
13632
14121
  def segment_ms(self) -> Optional[builtins.str]:
13633
14122
  """
13634
- segment.ms value
14123
+ This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 seconds.
13635
14124
  """
13636
14125
  return pulumi.get(self, "segment_ms")
13637
14126
 
13638
14127
  @property
13639
14128
  @pulumi.getter(name="uncleanLeaderElectionEnable")
13640
- @_utilities.deprecated("""This field is deprecated and no longer functional.""")
13641
14129
  def unclean_leader_election_enable(self) -> Optional[builtins.bool]:
13642
14130
  """
13643
- unclean.leader.election.enable value; This field is deprecated and no longer functional.
14131
+ Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.
13644
14132
  """
13645
14133
  return pulumi.get(self, "unclean_leader_election_enable")
13646
14134
 
@@ -17014,11 +17502,11 @@ class OpenSearchOpensearch(dict):
17014
17502
  uris: Optional[Sequence[builtins.str]] = None,
17015
17503
  username: Optional[builtins.str] = None):
17016
17504
  """
17017
- :param builtins.str kibana_uri: URI for Kibana dashboard frontend
17018
- :param builtins.str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend
17019
- :param builtins.str password: OpenSearch password
17505
+ :param builtins.str kibana_uri: URI for Kibana dashboard frontend.
17506
+ :param builtins.str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend.
17507
+ :param builtins.str password: OpenSearch password.
17020
17508
  :param Sequence[builtins.str] uris: OpenSearch server URIs.
17021
- :param builtins.str username: OpenSearch username
17509
+ :param builtins.str username: OpenSearch username.
17022
17510
  """
17023
17511
  if kibana_uri is not None:
17024
17512
  pulumi.set(__self__, "kibana_uri", kibana_uri)
@@ -17036,7 +17524,7 @@ class OpenSearchOpensearch(dict):
17036
17524
  @_utilities.deprecated("""This field was added by mistake and has never worked. It will be removed in future versions.""")
17037
17525
  def kibana_uri(self) -> Optional[builtins.str]:
17038
17526
  """
17039
- URI for Kibana dashboard frontend
17527
+ URI for Kibana dashboard frontend.
17040
17528
  """
17041
17529
  return pulumi.get(self, "kibana_uri")
17042
17530
 
@@ -17044,7 +17532,7 @@ class OpenSearchOpensearch(dict):
17044
17532
  @pulumi.getter(name="opensearchDashboardsUri")
17045
17533
  def opensearch_dashboards_uri(self) -> Optional[builtins.str]:
17046
17534
  """
17047
- URI for OpenSearch dashboard frontend
17535
+ URI for OpenSearch dashboard frontend.
17048
17536
  """
17049
17537
  return pulumi.get(self, "opensearch_dashboards_uri")
17050
17538
 
@@ -17052,7 +17540,7 @@ class OpenSearchOpensearch(dict):
17052
17540
  @pulumi.getter
17053
17541
  def password(self) -> Optional[builtins.str]:
17054
17542
  """
17055
- OpenSearch password
17543
+ OpenSearch password.
17056
17544
  """
17057
17545
  return pulumi.get(self, "password")
17058
17546
 
@@ -17068,7 +17556,7 @@ class OpenSearchOpensearch(dict):
17068
17556
  @pulumi.getter
17069
17557
  def username(self) -> Optional[builtins.str]:
17070
17558
  """
17071
- OpenSearch username
17559
+ OpenSearch username.
17072
17560
  """
17073
17561
  return pulumi.get(self, "username")
17074
17562
 
@@ -18257,6 +18745,8 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18257
18745
  suggest = "email_sender_username"
18258
18746
  elif key == "enableRemoteBackedStorage":
18259
18747
  suggest = "enable_remote_backed_storage"
18748
+ elif key == "enableSearchableSnapshots":
18749
+ suggest = "enable_searchable_snapshots"
18260
18750
  elif key == "enableSecurityAudit":
18261
18751
  suggest = "enable_security_audit"
18262
18752
  elif key == "httpMaxContentLength":
@@ -18360,6 +18850,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18360
18850
  email_sender_password: Optional[builtins.str] = None,
18361
18851
  email_sender_username: Optional[builtins.str] = None,
18362
18852
  enable_remote_backed_storage: Optional[builtins.bool] = None,
18853
+ enable_searchable_snapshots: Optional[builtins.bool] = None,
18363
18854
  enable_security_audit: Optional[builtins.bool] = None,
18364
18855
  http_max_content_length: Optional[builtins.int] = None,
18365
18856
  http_max_header_size: Optional[builtins.int] = None,
@@ -18412,6 +18903,7 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18412
18903
  :param builtins.str email_sender_password: Sender password for Opensearch alerts to authenticate with SMTP server. Example: `very-secure-mail-password`.
18413
18904
  :param builtins.str email_sender_username: Sender username for Opensearch alerts. Example: `jane@example.com`.
18414
18905
  :param builtins.bool enable_remote_backed_storage: Enable remote-backed storage.
18906
+ :param builtins.bool enable_searchable_snapshots: Enable searchable snapshots.
18415
18907
  :param builtins.bool enable_security_audit: Enable/Disable security audit.
18416
18908
  :param builtins.int http_max_content_length: Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.
18417
18909
  :param builtins.int http_max_header_size: The max size of allowed headers, in bytes. Example: `8192`.
@@ -18476,6 +18968,8 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18476
18968
  pulumi.set(__self__, "email_sender_username", email_sender_username)
18477
18969
  if enable_remote_backed_storage is not None:
18478
18970
  pulumi.set(__self__, "enable_remote_backed_storage", enable_remote_backed_storage)
18971
+ if enable_searchable_snapshots is not None:
18972
+ pulumi.set(__self__, "enable_searchable_snapshots", enable_searchable_snapshots)
18479
18973
  if enable_security_audit is not None:
18480
18974
  pulumi.set(__self__, "enable_security_audit", enable_security_audit)
18481
18975
  if http_max_content_length is not None:
@@ -18650,6 +19144,14 @@ class OpenSearchOpensearchUserConfigOpensearch(dict):
18650
19144
  """
18651
19145
  return pulumi.get(self, "enable_remote_backed_storage")
18652
19146
 
19147
+ @property
19148
+ @pulumi.getter(name="enableSearchableSnapshots")
19149
+ def enable_searchable_snapshots(self) -> Optional[builtins.bool]:
19150
+ """
19151
+ Enable searchable snapshots.
19152
+ """
19153
+ return pulumi.get(self, "enable_searchable_snapshots")
19154
+
18653
19155
  @property
18654
19156
  @pulumi.getter(name="enableSecurityAudit")
18655
19157
  def enable_security_audit(self) -> Optional[builtins.bool]:
@@ -21316,6 +21818,61 @@ class OrganizationProjectTag(dict):
21316
21818
  return pulumi.get(self, "value")
21317
21819
 
21318
21820
 
21821
+ @pulumi.output_type
21822
+ class OrganizationProjectTimeouts(dict):
21823
+ def __init__(__self__, *,
21824
+ create: Optional[builtins.str] = None,
21825
+ delete: Optional[builtins.str] = None,
21826
+ read: Optional[builtins.str] = None,
21827
+ update: Optional[builtins.str] = None):
21828
+ """
21829
+ :param builtins.str create: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
21830
+ :param builtins.str delete: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
21831
+ :param builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
21832
+ :param builtins.str update: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
21833
+ """
21834
+ if create is not None:
21835
+ pulumi.set(__self__, "create", create)
21836
+ if delete is not None:
21837
+ pulumi.set(__self__, "delete", delete)
21838
+ if read is not None:
21839
+ pulumi.set(__self__, "read", read)
21840
+ if update is not None:
21841
+ pulumi.set(__self__, "update", update)
21842
+
21843
+ @property
21844
+ @pulumi.getter
21845
+ def create(self) -> Optional[builtins.str]:
21846
+ """
21847
+ A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
21848
+ """
21849
+ return pulumi.get(self, "create")
21850
+
21851
+ @property
21852
+ @pulumi.getter
21853
+ def delete(self) -> Optional[builtins.str]:
21854
+ """
21855
+ A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
21856
+ """
21857
+ return pulumi.get(self, "delete")
21858
+
21859
+ @property
21860
+ @pulumi.getter
21861
+ def read(self) -> Optional[builtins.str]:
21862
+ """
21863
+ A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
21864
+ """
21865
+ return pulumi.get(self, "read")
21866
+
21867
+ @property
21868
+ @pulumi.getter
21869
+ def update(self) -> Optional[builtins.str]:
21870
+ """
21871
+ A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
21872
+ """
21873
+ return pulumi.get(self, "update")
21874
+
21875
+
21319
21876
  @pulumi.output_type
21320
21877
  class OrganizationTimeouts(dict):
21321
21878
  def __init__(__self__, *,
@@ -22188,7 +22745,6 @@ class PgPgUserConfig(dict):
22188
22745
 
22189
22746
  @property
22190
22747
  @pulumi.getter
22191
- @_utilities.deprecated("""This property is deprecated.""")
22192
22748
  def pgaudit(self) -> Optional['outputs.PgPgUserConfigPgaudit']:
22193
22749
  """
22194
22750
  System-wide settings for the pgaudit extension
@@ -23351,7 +23907,7 @@ class PgPgUserConfigPgaudit(dict):
23351
23907
  :param builtins.bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. Default: `false`.
23352
23908
  :param builtins.bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. Default: `true`.
23353
23909
  :param builtins.bool log_client: Specifies whether log messages will be visible to a client process such as psql. Default: `false`.
23354
- :param builtins.str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. Default: `log`.
23910
+ :param builtins.str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `log`, `notice`, `warning`. Specifies the log level that will be used for log entries. Default: `log`.
23355
23911
  :param builtins.int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. Default: `-1`.
23356
23912
  :param builtins.bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. Default: `true`.
23357
23913
  :param builtins.bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. Default: `false`.
@@ -23394,7 +23950,6 @@ class PgPgUserConfigPgaudit(dict):
23394
23950
 
23395
23951
  @property
23396
23952
  @pulumi.getter(name="featureEnabled")
23397
- @_utilities.deprecated("""This property is deprecated.""")
23398
23953
  def feature_enabled(self) -> Optional[builtins.bool]:
23399
23954
  """
23400
23955
  Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. Default: `false`.
@@ -23403,7 +23958,6 @@ class PgPgUserConfigPgaudit(dict):
23403
23958
 
23404
23959
  @property
23405
23960
  @pulumi.getter(name="logCatalog")
23406
- @_utilities.deprecated("""This property is deprecated.""")
23407
23961
  def log_catalog(self) -> Optional[builtins.bool]:
23408
23962
  """
23409
23963
  Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. Default: `true`.
@@ -23412,7 +23966,6 @@ class PgPgUserConfigPgaudit(dict):
23412
23966
 
23413
23967
  @property
23414
23968
  @pulumi.getter(name="logClient")
23415
- @_utilities.deprecated("""This property is deprecated.""")
23416
23969
  def log_client(self) -> Optional[builtins.bool]:
23417
23970
  """
23418
23971
  Specifies whether log messages will be visible to a client process such as psql. Default: `false`.
@@ -23421,16 +23974,14 @@ class PgPgUserConfigPgaudit(dict):
23421
23974
 
23422
23975
  @property
23423
23976
  @pulumi.getter(name="logLevel")
23424
- @_utilities.deprecated("""This property is deprecated.""")
23425
23977
  def log_level(self) -> Optional[builtins.str]:
23426
23978
  """
23427
- Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. Default: `log`.
23979
+ Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `log`, `notice`, `warning`. Specifies the log level that will be used for log entries. Default: `log`.
23428
23980
  """
23429
23981
  return pulumi.get(self, "log_level")
23430
23982
 
23431
23983
  @property
23432
23984
  @pulumi.getter(name="logMaxStringLength")
23433
- @_utilities.deprecated("""This property is deprecated.""")
23434
23985
  def log_max_string_length(self) -> Optional[builtins.int]:
23435
23986
  """
23436
23987
  Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. Default: `-1`.
@@ -23439,7 +23990,6 @@ class PgPgUserConfigPgaudit(dict):
23439
23990
 
23440
23991
  @property
23441
23992
  @pulumi.getter(name="logNestedStatements")
23442
- @_utilities.deprecated("""This property is deprecated.""")
23443
23993
  def log_nested_statements(self) -> Optional[builtins.bool]:
23444
23994
  """
23445
23995
  This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. Default: `true`.
@@ -23448,7 +23998,6 @@ class PgPgUserConfigPgaudit(dict):
23448
23998
 
23449
23999
  @property
23450
24000
  @pulumi.getter(name="logParameter")
23451
- @_utilities.deprecated("""This property is deprecated.""")
23452
24001
  def log_parameter(self) -> Optional[builtins.bool]:
23453
24002
  """
23454
24003
  Specifies that audit logging should include the parameters that were passed with the statement. Default: `false`.
@@ -23457,7 +24006,6 @@ class PgPgUserConfigPgaudit(dict):
23457
24006
 
23458
24007
  @property
23459
24008
  @pulumi.getter(name="logParameterMaxSize")
23460
- @_utilities.deprecated("""This property is deprecated.""")
23461
24009
  def log_parameter_max_size(self) -> Optional[builtins.int]:
23462
24010
  """
23463
24011
  Specifies that parameter values longer than this setting (in bytes) should not be logged, but replaced with \\n\\n. Default: `0`.
@@ -23466,7 +24014,6 @@ class PgPgUserConfigPgaudit(dict):
23466
24014
 
23467
24015
  @property
23468
24016
  @pulumi.getter(name="logRelation")
23469
- @_utilities.deprecated("""This property is deprecated.""")
23470
24017
  def log_relation(self) -> Optional[builtins.bool]:
23471
24018
  """
23472
24019
  Specifies whether session audit logging should create a separate log entry for each relation (TABLE, VIEW, etc.) referenced in a SELECT or DML statement. Default: `false`.
@@ -23475,7 +24022,6 @@ class PgPgUserConfigPgaudit(dict):
23475
24022
 
23476
24023
  @property
23477
24024
  @pulumi.getter(name="logRows")
23478
- @_utilities.deprecated("""This property is deprecated.""")
23479
24025
  def log_rows(self) -> Optional[builtins.bool]:
23480
24026
  """
23481
24027
  Specifies that audit logging should include the rows retrieved or affected by a statement. When enabled the rows field will be included after the parameter field. Default: `false`.
@@ -23484,7 +24030,6 @@ class PgPgUserConfigPgaudit(dict):
23484
24030
 
23485
24031
  @property
23486
24032
  @pulumi.getter(name="logStatement")
23487
- @_utilities.deprecated("""This property is deprecated.""")
23488
24033
  def log_statement(self) -> Optional[builtins.bool]:
23489
24034
  """
23490
24035
  Specifies whether logging will include the statement text and parameters (if enabled). Default: `true`.
@@ -23493,7 +24038,6 @@ class PgPgUserConfigPgaudit(dict):
23493
24038
 
23494
24039
  @property
23495
24040
  @pulumi.getter(name="logStatementOnce")
23496
- @_utilities.deprecated("""This property is deprecated.""")
23497
24041
  def log_statement_once(self) -> Optional[builtins.bool]:
23498
24042
  """
23499
24043
  Specifies whether logging will include the statement text and parameters with the first log entry for a statement/substatement combination or with every entry. Default: `false`.
@@ -23502,7 +24046,6 @@ class PgPgUserConfigPgaudit(dict):
23502
24046
 
23503
24047
  @property
23504
24048
  @pulumi.getter
23505
- @_utilities.deprecated("""This property is deprecated.""")
23506
24049
  def logs(self) -> Optional[Sequence[builtins.str]]:
23507
24050
  """
23508
24051
  Specifies which classes of statements will be logged by session audit logging.
@@ -23511,7 +24054,6 @@ class PgPgUserConfigPgaudit(dict):
23511
24054
 
23512
24055
  @property
23513
24056
  @pulumi.getter
23514
- @_utilities.deprecated("""This property is deprecated.""")
23515
24057
  def role(self) -> Optional[builtins.str]:
23516
24058
  """
23517
24059
  Specifies the master role to use for object audit logging.
@@ -30801,6 +31343,7 @@ class GetAlloydbomniAlloydbomniUserConfigResult(dict):
30801
31343
  pg_read_replica: Optional[builtins.bool] = None,
30802
31344
  pg_service_to_fork_from: Optional[builtins.str] = None,
30803
31345
  pg_version: Optional[builtins.str] = None,
31346
+ pgaudit: Optional['outputs.GetAlloydbomniAlloydbomniUserConfigPgauditResult'] = None,
30804
31347
  pgbouncer: Optional['outputs.GetAlloydbomniAlloydbomniUserConfigPgbouncerResult'] = None,
30805
31348
  pglookout: Optional['outputs.GetAlloydbomniAlloydbomniUserConfigPglookoutResult'] = None,
30806
31349
  private_access: Optional['outputs.GetAlloydbomniAlloydbomniUserConfigPrivateAccessResult'] = None,
@@ -30832,6 +31375,7 @@ class GetAlloydbomniAlloydbomniUserConfigResult(dict):
30832
31375
  :param builtins.bool pg_read_replica: Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).
30833
31376
  :param builtins.str pg_service_to_fork_from: Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created. Example: `anotherservicename`.
30834
31377
  :param builtins.str pg_version: Enum: `15`, and newer. PostgreSQL major version.
31378
+ :param 'GetAlloydbomniAlloydbomniUserConfigPgauditArgs' pgaudit: System-wide settings for the pgaudit extension
30835
31379
  :param 'GetAlloydbomniAlloydbomniUserConfigPgbouncerArgs' pgbouncer: PGBouncer connection pooling settings
30836
31380
  :param 'GetAlloydbomniAlloydbomniUserConfigPglookoutArgs' pglookout: System-wide settings for pglookout
30837
31381
  :param 'GetAlloydbomniAlloydbomniUserConfigPrivateAccessArgs' private_access: Allow access to selected service ports from private networks
@@ -30879,6 +31423,8 @@ class GetAlloydbomniAlloydbomniUserConfigResult(dict):
30879
31423
  pulumi.set(__self__, "pg_service_to_fork_from", pg_service_to_fork_from)
30880
31424
  if pg_version is not None:
30881
31425
  pulumi.set(__self__, "pg_version", pg_version)
31426
+ if pgaudit is not None:
31427
+ pulumi.set(__self__, "pgaudit", pgaudit)
30882
31428
  if pgbouncer is not None:
30883
31429
  pulumi.set(__self__, "pgbouncer", pgbouncer)
30884
31430
  if pglookout is not None:
@@ -31037,6 +31583,14 @@ class GetAlloydbomniAlloydbomniUserConfigResult(dict):
31037
31583
  """
31038
31584
  return pulumi.get(self, "pg_version")
31039
31585
 
31586
+ @property
31587
+ @pulumi.getter
31588
+ def pgaudit(self) -> Optional['outputs.GetAlloydbomniAlloydbomniUserConfigPgauditResult']:
31589
+ """
31590
+ System-wide settings for the pgaudit extension
31591
+ """
31592
+ return pulumi.get(self, "pgaudit")
31593
+
31040
31594
  @property
31041
31595
  @pulumi.getter
31042
31596
  def pgbouncer(self) -> Optional['outputs.GetAlloydbomniAlloydbomniUserConfigPgbouncerResult']:
@@ -31763,6 +32317,181 @@ class GetAlloydbomniAlloydbomniUserConfigPgResult(dict):
31763
32317
  return pulumi.get(self, "wal_writer_delay")
31764
32318
 
31765
32319
 
32320
+ @pulumi.output_type
32321
+ class GetAlloydbomniAlloydbomniUserConfigPgauditResult(dict):
32322
+ def __init__(__self__, *,
32323
+ feature_enabled: Optional[builtins.bool] = None,
32324
+ log_catalog: Optional[builtins.bool] = None,
32325
+ log_client: Optional[builtins.bool] = None,
32326
+ log_level: Optional[builtins.str] = None,
32327
+ log_max_string_length: Optional[builtins.int] = None,
32328
+ log_nested_statements: Optional[builtins.bool] = None,
32329
+ log_parameter: Optional[builtins.bool] = None,
32330
+ log_parameter_max_size: Optional[builtins.int] = None,
32331
+ log_relation: Optional[builtins.bool] = None,
32332
+ log_rows: Optional[builtins.bool] = None,
32333
+ log_statement: Optional[builtins.bool] = None,
32334
+ log_statement_once: Optional[builtins.bool] = None,
32335
+ logs: Optional[Sequence[builtins.str]] = None,
32336
+ role: Optional[builtins.str] = None):
32337
+ """
32338
+ :param builtins.bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. Default: `false`.
32339
+ :param builtins.bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. Default: `true`.
32340
+ :param builtins.bool log_client: Specifies whether log messages will be visible to a client process such as psql. Default: `false`.
32341
+ :param builtins.str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `log`, `notice`, `warning`. Specifies the log level that will be used for log entries. Default: `log`.
32342
+ :param builtins.int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. Default: `-1`.
32343
+ :param builtins.bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. Default: `true`.
32344
+ :param builtins.bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. Default: `false`.
32345
+ :param builtins.int log_parameter_max_size: Specifies that parameter values longer than this setting (in bytes) should not be logged, but replaced with <long param suppressed>. Default: `0`.
32346
+ :param builtins.bool log_relation: Specifies whether session audit logging should create a separate log entry for each relation (TABLE, VIEW, etc.) referenced in a SELECT or DML statement. Default: `false`.
32347
+ :param builtins.bool log_rows: Specifies that audit logging should include the rows retrieved or affected by a statement. When enabled the rows field will be included after the parameter field. Default: `false`.
32348
+ :param builtins.bool log_statement: Specifies whether logging will include the statement text and parameters (if enabled). Default: `true`.
32349
+ :param builtins.bool log_statement_once: Specifies whether logging will include the statement text and parameters with the first log entry for a statement/substatement combination or with every entry. Default: `false`.
32350
+ :param Sequence[builtins.str] logs: Specifies which classes of statements will be logged by session audit logging.
32351
+ :param builtins.str role: Specifies the master role to use for object audit logging.
32352
+ """
32353
+ if feature_enabled is not None:
32354
+ pulumi.set(__self__, "feature_enabled", feature_enabled)
32355
+ if log_catalog is not None:
32356
+ pulumi.set(__self__, "log_catalog", log_catalog)
32357
+ if log_client is not None:
32358
+ pulumi.set(__self__, "log_client", log_client)
32359
+ if log_level is not None:
32360
+ pulumi.set(__self__, "log_level", log_level)
32361
+ if log_max_string_length is not None:
32362
+ pulumi.set(__self__, "log_max_string_length", log_max_string_length)
32363
+ if log_nested_statements is not None:
32364
+ pulumi.set(__self__, "log_nested_statements", log_nested_statements)
32365
+ if log_parameter is not None:
32366
+ pulumi.set(__self__, "log_parameter", log_parameter)
32367
+ if log_parameter_max_size is not None:
32368
+ pulumi.set(__self__, "log_parameter_max_size", log_parameter_max_size)
32369
+ if log_relation is not None:
32370
+ pulumi.set(__self__, "log_relation", log_relation)
32371
+ if log_rows is not None:
32372
+ pulumi.set(__self__, "log_rows", log_rows)
32373
+ if log_statement is not None:
32374
+ pulumi.set(__self__, "log_statement", log_statement)
32375
+ if log_statement_once is not None:
32376
+ pulumi.set(__self__, "log_statement_once", log_statement_once)
32377
+ if logs is not None:
32378
+ pulumi.set(__self__, "logs", logs)
32379
+ if role is not None:
32380
+ pulumi.set(__self__, "role", role)
32381
+
32382
+ @property
32383
+ @pulumi.getter(name="featureEnabled")
32384
+ def feature_enabled(self) -> Optional[builtins.bool]:
32385
+ """
32386
+ Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. Default: `false`.
32387
+ """
32388
+ return pulumi.get(self, "feature_enabled")
32389
+
32390
+ @property
32391
+ @pulumi.getter(name="logCatalog")
32392
+ def log_catalog(self) -> Optional[builtins.bool]:
32393
+ """
32394
+ Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. Default: `true`.
32395
+ """
32396
+ return pulumi.get(self, "log_catalog")
32397
+
32398
+ @property
32399
+ @pulumi.getter(name="logClient")
32400
+ def log_client(self) -> Optional[builtins.bool]:
32401
+ """
32402
+ Specifies whether log messages will be visible to a client process such as psql. Default: `false`.
32403
+ """
32404
+ return pulumi.get(self, "log_client")
32405
+
32406
+ @property
32407
+ @pulumi.getter(name="logLevel")
32408
+ def log_level(self) -> Optional[builtins.str]:
32409
+ """
32410
+ Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `log`, `notice`, `warning`. Specifies the log level that will be used for log entries. Default: `log`.
32411
+ """
32412
+ return pulumi.get(self, "log_level")
32413
+
32414
+ @property
32415
+ @pulumi.getter(name="logMaxStringLength")
32416
+ def log_max_string_length(self) -> Optional[builtins.int]:
32417
+ """
32418
+ Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. Default: `-1`.
32419
+ """
32420
+ return pulumi.get(self, "log_max_string_length")
32421
+
32422
+ @property
32423
+ @pulumi.getter(name="logNestedStatements")
32424
+ def log_nested_statements(self) -> Optional[builtins.bool]:
32425
+ """
32426
+ This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. Default: `true`.
32427
+ """
32428
+ return pulumi.get(self, "log_nested_statements")
32429
+
32430
+ @property
32431
+ @pulumi.getter(name="logParameter")
32432
+ def log_parameter(self) -> Optional[builtins.bool]:
32433
+ """
32434
+ Specifies that audit logging should include the parameters that were passed with the statement. Default: `false`.
32435
+ """
32436
+ return pulumi.get(self, "log_parameter")
32437
+
32438
+ @property
32439
+ @pulumi.getter(name="logParameterMaxSize")
32440
+ def log_parameter_max_size(self) -> Optional[builtins.int]:
32441
+ """
32442
+ Specifies that parameter values longer than this setting (in bytes) should not be logged, but replaced with <long param suppressed>. Default: `0`.
32443
+ """
32444
+ return pulumi.get(self, "log_parameter_max_size")
32445
+
32446
+ @property
32447
+ @pulumi.getter(name="logRelation")
32448
+ def log_relation(self) -> Optional[builtins.bool]:
32449
+ """
32450
+ Specifies whether session audit logging should create a separate log entry for each relation (TABLE, VIEW, etc.) referenced in a SELECT or DML statement. Default: `false`.
32451
+ """
32452
+ return pulumi.get(self, "log_relation")
32453
+
32454
+ @property
32455
+ @pulumi.getter(name="logRows")
32456
+ def log_rows(self) -> Optional[builtins.bool]:
32457
+ """
32458
+ Specifies that audit logging should include the rows retrieved or affected by a statement. When enabled the rows field will be included after the parameter field. Default: `false`.
32459
+ """
32460
+ return pulumi.get(self, "log_rows")
32461
+
32462
+ @property
32463
+ @pulumi.getter(name="logStatement")
32464
+ def log_statement(self) -> Optional[builtins.bool]:
32465
+ """
32466
+ Specifies whether logging will include the statement text and parameters (if enabled). Default: `true`.
32467
+ """
32468
+ return pulumi.get(self, "log_statement")
32469
+
32470
+ @property
32471
+ @pulumi.getter(name="logStatementOnce")
32472
+ def log_statement_once(self) -> Optional[builtins.bool]:
32473
+ """
32474
+ Specifies whether logging will include the statement text and parameters with the first log entry for a statement/substatement combination or with every entry. Default: `false`.
32475
+ """
32476
+ return pulumi.get(self, "log_statement_once")
32477
+
32478
+ @property
32479
+ @pulumi.getter
32480
+ def logs(self) -> Optional[Sequence[builtins.str]]:
32481
+ """
32482
+ Specifies which classes of statements will be logged by session audit logging.
32483
+ """
32484
+ return pulumi.get(self, "logs")
32485
+
32486
+ @property
32487
+ @pulumi.getter
32488
+ def role(self) -> Optional[builtins.str]:
32489
+ """
32490
+ Specifies the master role to use for object audit logging.
32491
+ """
32492
+ return pulumi.get(self, "role")
32493
+
32494
+
31766
32495
  @pulumi.output_type
31767
32496
  class GetAlloydbomniAlloydbomniUserConfigPgbouncerResult(dict):
31768
32497
  def __init__(__self__, *,
@@ -38275,6 +39004,7 @@ class GetKafkaKafkaUserConfigResult(dict):
38275
39004
  kafka_authentication_methods: Optional['outputs.GetKafkaKafkaUserConfigKafkaAuthenticationMethodsResult'] = None,
38276
39005
  kafka_connect: Optional[builtins.bool] = None,
38277
39006
  kafka_connect_config: Optional['outputs.GetKafkaKafkaUserConfigKafkaConnectConfigResult'] = None,
39007
+ kafka_connect_plugin_versions: Optional[Sequence['outputs.GetKafkaKafkaUserConfigKafkaConnectPluginVersionResult']] = None,
38278
39008
  kafka_connect_secret_providers: Optional[Sequence['outputs.GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult']] = None,
38279
39009
  kafka_rest: Optional[builtins.bool] = None,
38280
39010
  kafka_rest_authorization: Optional[builtins.bool] = None,
@@ -38303,6 +39033,7 @@ class GetKafkaKafkaUserConfigResult(dict):
38303
39033
  :param 'GetKafkaKafkaUserConfigKafkaAuthenticationMethodsArgs' kafka_authentication_methods: Kafka authentication methods
38304
39034
  :param builtins.bool kafka_connect: Enable Kafka Connect service. Default: `false`.
38305
39035
  :param 'GetKafkaKafkaUserConfigKafkaConnectConfigArgs' kafka_connect_config: Kafka Connect configuration values
39036
+ :param Sequence['GetKafkaKafkaUserConfigKafkaConnectPluginVersionArgs'] kafka_connect_plugin_versions: The plugin selected by the user
38306
39037
  :param builtins.bool kafka_rest: Enable Kafka-REST service. Default: `false`.
38307
39038
  :param builtins.bool kafka_rest_authorization: Enable authorization in Kafka-REST service.
38308
39039
  :param 'GetKafkaKafkaUserConfigKafkaRestConfigArgs' kafka_rest_config: Kafka REST configuration
@@ -38341,6 +39072,8 @@ class GetKafkaKafkaUserConfigResult(dict):
38341
39072
  pulumi.set(__self__, "kafka_connect", kafka_connect)
38342
39073
  if kafka_connect_config is not None:
38343
39074
  pulumi.set(__self__, "kafka_connect_config", kafka_connect_config)
39075
+ if kafka_connect_plugin_versions is not None:
39076
+ pulumi.set(__self__, "kafka_connect_plugin_versions", kafka_connect_plugin_versions)
38344
39077
  if kafka_connect_secret_providers is not None:
38345
39078
  pulumi.set(__self__, "kafka_connect_secret_providers", kafka_connect_secret_providers)
38346
39079
  if kafka_rest is not None:
@@ -38464,6 +39197,14 @@ class GetKafkaKafkaUserConfigResult(dict):
38464
39197
  """
38465
39198
  return pulumi.get(self, "kafka_connect_config")
38466
39199
 
39200
+ @property
39201
+ @pulumi.getter(name="kafkaConnectPluginVersions")
39202
+ def kafka_connect_plugin_versions(self) -> Optional[Sequence['outputs.GetKafkaKafkaUserConfigKafkaConnectPluginVersionResult']]:
39203
+ """
39204
+ The plugin selected by the user
39205
+ """
39206
+ return pulumi.get(self, "kafka_connect_plugin_versions")
39207
+
38467
39208
  @property
38468
39209
  @pulumi.getter(name="kafkaConnectSecretProviders")
38469
39210
  def kafka_connect_secret_providers(self) -> Optional[Sequence['outputs.GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult']]:
@@ -39428,6 +40169,35 @@ class GetKafkaKafkaUserConfigKafkaConnectConfigResult(dict):
39428
40169
  return pulumi.get(self, "session_timeout_ms")
39429
40170
 
39430
40171
 
40172
+ @pulumi.output_type
40173
+ class GetKafkaKafkaUserConfigKafkaConnectPluginVersionResult(dict):
40174
+ def __init__(__self__, *,
40175
+ plugin_name: builtins.str,
40176
+ version: builtins.str):
40177
+ """
40178
+ :param builtins.str plugin_name: The name of the plugin. Example: `debezium-connector`.
40179
+ :param builtins.str version: The version of the plugin. Example: `2.5.0`.
40180
+ """
40181
+ pulumi.set(__self__, "plugin_name", plugin_name)
40182
+ pulumi.set(__self__, "version", version)
40183
+
40184
+ @property
40185
+ @pulumi.getter(name="pluginName")
40186
+ def plugin_name(self) -> builtins.str:
40187
+ """
40188
+ The name of the plugin. Example: `debezium-connector`.
40189
+ """
40190
+ return pulumi.get(self, "plugin_name")
40191
+
40192
+ @property
40193
+ @pulumi.getter
40194
+ def version(self) -> builtins.str:
40195
+ """
40196
+ The version of the plugin. Example: `2.5.0`.
40197
+ """
40198
+ return pulumi.get(self, "version")
40199
+
40200
+
39431
40201
  @pulumi.output_type
39432
40202
  class GetKafkaKafkaUserConfigKafkaConnectSecretProviderResult(dict):
39433
40203
  def __init__(__self__, *,
@@ -40696,33 +41466,33 @@ class GetKafkaTopicConfigResult(dict):
40696
41466
  segment_ms: Optional[builtins.str] = None,
40697
41467
  unclean_leader_election_enable: Optional[builtins.bool] = None):
40698
41468
  """
40699
- :param builtins.str cleanup_policy: cleanup.policy value. The possible values are `compact`, `compact,delete` and `delete`.
40700
- :param builtins.str compression_type: compression.type value. The possible values are `gzip`, `lz4`, `producer`, `snappy`, `uncompressed` and `zstd`.
40701
- :param builtins.str delete_retention_ms: delete.retention.ms value
40702
- :param builtins.str file_delete_delay_ms: file.delete.delay.ms value
40703
- :param builtins.str flush_messages: flush.messages value
40704
- :param builtins.str flush_ms: flush.ms value
40705
- :param builtins.str index_interval_bytes: index.interval.bytes value
40706
- :param builtins.str local_retention_bytes: local.retention.bytes value
40707
- :param builtins.str local_retention_ms: local.retention.ms value
40708
- :param builtins.str max_compaction_lag_ms: max.compaction.lag.ms value
40709
- :param builtins.str max_message_bytes: max.message.bytes value
40710
- :param builtins.bool message_downconversion_enable: message.downconversion.enable value
40711
- :param builtins.str message_format_version: message.format.version value. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.1` and `4.1-IV0`.
40712
- :param builtins.str message_timestamp_difference_max_ms: message.timestamp.difference.max.ms value
40713
- :param builtins.str message_timestamp_type: message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
40714
- :param builtins.float min_cleanable_dirty_ratio: min.cleanable.dirty.ratio value
40715
- :param builtins.str min_compaction_lag_ms: min.compaction.lag.ms value
40716
- :param builtins.str min_insync_replicas: min.insync.replicas value
40717
- :param builtins.bool preallocate: preallocate value
40718
- :param builtins.bool remote_storage_enable: remote.storage.enable value
40719
- :param builtins.str retention_bytes: retention.bytes value
40720
- :param builtins.str retention_ms: retention.ms value
40721
- :param builtins.str segment_bytes: segment.bytes value
40722
- :param builtins.str segment_index_bytes: segment.index.bytes value
40723
- :param builtins.str segment_jitter_ms: segment.jitter.ms value
40724
- :param builtins.str segment_ms: segment.ms value
40725
- :param builtins.bool unclean_leader_election_enable: unclean.leader.election.enable value; This field is deprecated and no longer functional.
41469
+ :param builtins.str cleanup_policy: The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic. The possible values are `compact`, `compact,delete` and `delete`.
41470
+ :param builtins.str compression_type: Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer. The possible values are `gzip`, `lz4`, `producer`, `snappy`, `uncompressed` and `zstd`.
41471
+ :param builtins.str delete_retention_ms: The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).
41472
+ :param builtins.str file_delete_delay_ms: The time to wait before deleting a file from the filesystem.
41473
+ :param builtins.str flush_messages: This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
41474
+ :param builtins.str flush_ms: This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
41475
+ :param builtins.str index_interval_bytes: This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
41476
+ :param builtins.str local_retention_bytes: This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
41477
+ :param builtins.str local_retention_ms: This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
41478
+ :param builtins.str max_compaction_lag_ms: The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
41479
+ :param builtins.str max_message_bytes: The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
41480
+ :param builtins.bool message_downconversion_enable: This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
41481
+ :param builtins.str message_format_version: Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
41482
+ :param builtins.str message_timestamp_difference_max_ms: The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
41483
+ :param builtins.str message_timestamp_type: Define whether the timestamp in the message is message create time or log append time. The possible values are `CreateTime` and `LogAppendTime`.
41484
+ :param builtins.float min_cleanable_dirty_ratio: This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
41485
+ :param builtins.str min_compaction_lag_ms: The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
41486
+ :param builtins.str min_insync_replicas: When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
41487
+ :param builtins.bool preallocate: True if we should preallocate the file on disk when creating a new log segment.
41488
+ :param builtins.bool remote_storage_enable: Indicates whether tiered storage should be enabled.
41489
+ :param builtins.str retention_bytes: This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
41490
+ :param builtins.str retention_ms: This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the 'delete' retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.
41491
+ :param builtins.str segment_bytes: This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
41492
+ :param builtins.str segment_index_bytes: This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
41493
+ :param builtins.str segment_jitter_ms: The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling
41494
+ :param builtins.str segment_ms: This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 seconds.
41495
+ :param builtins.bool unclean_leader_election_enable: Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.
40726
41496
  """
40727
41497
  if cleanup_policy is not None:
40728
41498
  pulumi.set(__self__, "cleanup_policy", cleanup_policy)
@@ -40783,7 +41553,7 @@ class GetKafkaTopicConfigResult(dict):
40783
41553
  @pulumi.getter(name="cleanupPolicy")
40784
41554
  def cleanup_policy(self) -> Optional[builtins.str]:
40785
41555
  """
40786
- cleanup.policy value. The possible values are `compact`, `compact,delete` and `delete`.
41556
+ The retention policy to use on old segments. Possible values include 'delete', 'compact', or a comma-separated list of them. The default policy ('delete') will discard old segments when their retention time or size limit has been reached. The 'compact' setting will enable log compaction on the topic. The possible values are `compact`, `compact,delete` and `delete`.
40787
41557
  """
40788
41558
  return pulumi.get(self, "cleanup_policy")
40789
41559
 
@@ -40791,7 +41561,7 @@ class GetKafkaTopicConfigResult(dict):
40791
41561
  @pulumi.getter(name="compressionType")
40792
41562
  def compression_type(self) -> Optional[builtins.str]:
40793
41563
  """
40794
- compression.type value. The possible values are `gzip`, `lz4`, `producer`, `snappy`, `uncompressed` and `zstd`.
41564
+ Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer. The possible values are `gzip`, `lz4`, `producer`, `snappy`, `uncompressed` and `zstd`.
40795
41565
  """
40796
41566
  return pulumi.get(self, "compression_type")
40797
41567
 
@@ -40799,7 +41569,7 @@ class GetKafkaTopicConfigResult(dict):
40799
41569
  @pulumi.getter(name="deleteRetentionMs")
40800
41570
  def delete_retention_ms(self) -> Optional[builtins.str]:
40801
41571
  """
40802
- delete.retention.ms value
41572
+ The amount of time to retain delete tombstone markers for log compacted topics. This setting also gives a bound on the time in which a consumer must complete a read if they begin from offset 0 to ensure that they get a valid snapshot of the final stage (otherwise delete tombstones may be collected before they complete their scan).
40803
41573
  """
40804
41574
  return pulumi.get(self, "delete_retention_ms")
40805
41575
 
@@ -40807,7 +41577,7 @@ class GetKafkaTopicConfigResult(dict):
40807
41577
  @pulumi.getter(name="fileDeleteDelayMs")
40808
41578
  def file_delete_delay_ms(self) -> Optional[builtins.str]:
40809
41579
  """
40810
- file.delete.delay.ms value
41580
+ The time to wait before deleting a file from the filesystem.
40811
41581
  """
40812
41582
  return pulumi.get(self, "file_delete_delay_ms")
40813
41583
 
@@ -40815,7 +41585,7 @@ class GetKafkaTopicConfigResult(dict):
40815
41585
  @pulumi.getter(name="flushMessages")
40816
41586
  def flush_messages(self) -> Optional[builtins.str]:
40817
41587
  """
40818
- flush.messages value
41588
+ This setting allows specifying an interval at which we will force an fsync of data written to the log. For example if this was set to 1 we would fsync after every message; if it were 5 we would fsync after every five messages. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
40819
41589
  """
40820
41590
  return pulumi.get(self, "flush_messages")
40821
41591
 
@@ -40823,7 +41593,7 @@ class GetKafkaTopicConfigResult(dict):
40823
41593
  @pulumi.getter(name="flushMs")
40824
41594
  def flush_ms(self) -> Optional[builtins.str]:
40825
41595
  """
40826
- flush.ms value
41596
+ This setting allows specifying a time interval at which we will force an fsync of data written to the log. For example if this was set to 1000 we would fsync after 1000 ms had passed. In general we recommend you not set this and use replication for durability and allow the operating system's background flush capabilities as it is more efficient.
40827
41597
  """
40828
41598
  return pulumi.get(self, "flush_ms")
40829
41599
 
@@ -40831,7 +41601,7 @@ class GetKafkaTopicConfigResult(dict):
40831
41601
  @pulumi.getter(name="indexIntervalBytes")
40832
41602
  def index_interval_bytes(self) -> Optional[builtins.str]:
40833
41603
  """
40834
- index.interval.bytes value
41604
+ This setting controls how frequently Kafka adds an index entry to its offset index. The default setting ensures that we index a message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact position in the log but makes the index larger. You probably don't need to change this.
40835
41605
  """
40836
41606
  return pulumi.get(self, "index_interval_bytes")
40837
41607
 
@@ -40839,7 +41609,7 @@ class GetKafkaTopicConfigResult(dict):
40839
41609
  @pulumi.getter(name="localRetentionBytes")
40840
41610
  def local_retention_bytes(self) -> Optional[builtins.str]:
40841
41611
  """
40842
- local.retention.bytes value
41612
+ This configuration controls the maximum bytes tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the limit is equal to overall retention time. If set to -1, no limit is applied but it's possible only if overall retention is also -1.
40843
41613
  """
40844
41614
  return pulumi.get(self, "local_retention_bytes")
40845
41615
 
@@ -40847,7 +41617,7 @@ class GetKafkaTopicConfigResult(dict):
40847
41617
  @pulumi.getter(name="localRetentionMs")
40848
41618
  def local_retention_ms(self) -> Optional[builtins.str]:
40849
41619
  """
40850
- local.retention.ms value
41620
+ This configuration controls the maximum time tiered storage will retain segment files locally before it will discard old log segments to free up space. If set to -2, the time limit is equal to overall retention time. If set to -1, no time limit is applied but it's possible only if overall retention is also -1.
40851
41621
  """
40852
41622
  return pulumi.get(self, "local_retention_ms")
40853
41623
 
@@ -40855,7 +41625,7 @@ class GetKafkaTopicConfigResult(dict):
40855
41625
  @pulumi.getter(name="maxCompactionLagMs")
40856
41626
  def max_compaction_lag_ms(self) -> Optional[builtins.str]:
40857
41627
  """
40858
- max.compaction.lag.ms value
41628
+ The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.
40859
41629
  """
40860
41630
  return pulumi.get(self, "max_compaction_lag_ms")
40861
41631
 
@@ -40863,7 +41633,7 @@ class GetKafkaTopicConfigResult(dict):
40863
41633
  @pulumi.getter(name="maxMessageBytes")
40864
41634
  def max_message_bytes(self) -> Optional[builtins.str]:
40865
41635
  """
40866
- max.message.bytes value
41636
+ The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that the they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.
40867
41637
  """
40868
41638
  return pulumi.get(self, "max_message_bytes")
40869
41639
 
@@ -40871,7 +41641,7 @@ class GetKafkaTopicConfigResult(dict):
40871
41641
  @pulumi.getter(name="messageDownconversionEnable")
40872
41642
  def message_downconversion_enable(self) -> Optional[builtins.bool]:
40873
41643
  """
40874
- message.downconversion.enable value
41644
+ This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configuration does not apply to any message format conversion that might be required for replication to followers.
40875
41645
  """
40876
41646
  return pulumi.get(self, "message_downconversion_enable")
40877
41647
 
@@ -40879,7 +41649,7 @@ class GetKafkaTopicConfigResult(dict):
40879
41649
  @pulumi.getter(name="messageFormatVersion")
40880
41650
  def message_format_version(self) -> Optional[builtins.str]:
40881
41651
  """
40882
- message.format.version value. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.1` and `4.1-IV0`.
41652
+ Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format that they don't understand. The possible values are `0.10.0`, `0.10.0-IV0`, `0.10.0-IV1`, `0.10.1`, `0.10.1-IV0`, `0.10.1-IV1`, `0.10.1-IV2`, `0.10.2`, `0.10.2-IV0`, `0.11.0`, `0.11.0-IV0`, `0.11.0-IV1`, `0.11.0-IV2`, `0.8.0`, `0.8.1`, `0.8.2`, `0.9.0`, `1.0`, `1.0-IV0`, `1.1`, `1.1-IV0`, `2.0`, `2.0-IV0`, `2.0-IV1`, `2.1`, `2.1-IV0`, `2.1-IV1`, `2.1-IV2`, `2.2`, `2.2-IV0`, `2.2-IV1`, `2.3`, `2.3-IV0`, `2.3-IV1`, `2.4`, `2.4-IV0`, `2.4-IV1`, `2.5`, `2.5-IV0`, `2.6`, `2.6-IV0`, `2.7`, `2.7-IV0`, `2.7-IV1`, `2.7-IV2`, `2.8`, `2.8-IV0`, `2.8-IV1`, `3.0`, `3.0-IV0`, `3.0-IV1`, `3.1`, `3.1-IV0`, `3.2`, `3.2-IV0`, `3.3`, `3.3-IV0`, `3.3-IV1`, `3.3-IV2`, `3.3-IV3`, `3.4`, `3.4-IV0`, `3.5`, `3.5-IV0`, `3.5-IV1`, `3.5-IV2`, `3.6`, `3.6-IV0`, `3.6-IV1`, `3.6-IV2`, `3.7`, `3.7-IV0`, `3.7-IV1`, `3.7-IV2`, `3.7-IV3`, `3.7-IV4`, `3.8`, `3.8-IV0`, `3.9`, `3.9-IV0`, `3.9-IV1`, `4.0`, `4.0-IV0`, `4.1` and `4.1-IV0`.
40883
41653
  """
40884
41654
  return pulumi.get(self, "message_format_version")
40885
41655
 
@@ -40887,7 +41657,7 @@ class GetKafkaTopicConfigResult(dict):
40887
41657
  @pulumi.getter(name="messageTimestampDifferenceMaxMs")
40888
41658
  def message_timestamp_difference_max_ms(self) -> Optional[builtins.str]:
40889
41659
  """
40890
- message.timestamp.difference.max.ms value
41660
+ The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if message.timestamp.type=LogAppendTime.
40891
41661
  """
40892
41662
  return pulumi.get(self, "message_timestamp_difference_max_ms")
40893
41663
 
@@ -40895,7 +41665,7 @@ class GetKafkaTopicConfigResult(dict):
40895
41665
  @pulumi.getter(name="messageTimestampType")
40896
41666
  def message_timestamp_type(self) -> Optional[builtins.str]:
40897
41667
  """
40898
- message.timestamp.type value. The possible values are `CreateTime` and `LogAppendTime`.
41668
+ Define whether the timestamp in the message is message create time or log append time. The possible values are `CreateTime` and `LogAppendTime`.
40899
41669
  """
40900
41670
  return pulumi.get(self, "message_timestamp_type")
40901
41671
 
@@ -40903,7 +41673,7 @@ class GetKafkaTopicConfigResult(dict):
40903
41673
  @pulumi.getter(name="minCleanableDirtyRatio")
40904
41674
  def min_cleanable_dirty_ratio(self) -> Optional[builtins.float]:
40905
41675
  """
40906
- min.cleanable.dirty.ratio value
41676
+ This configuration controls how frequently the log compactor will attempt to clean the log (assuming log compaction is enabled). By default we will avoid cleaning a log where more than 50% of the log has been compacted. This ratio bounds the maximum space wasted in the log by duplicates (at 50% at most 50% of the log could be duplicates). A higher ratio will mean fewer, more efficient cleanings but will mean more wasted space in the log. If the max.compaction.lag.ms or the min.compaction.lag.ms configurations are also specified, then the log compactor considers the log to be eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records for at most the max.compaction.lag.ms period.
40907
41677
  """
40908
41678
  return pulumi.get(self, "min_cleanable_dirty_ratio")
40909
41679
 
@@ -40911,7 +41681,7 @@ class GetKafkaTopicConfigResult(dict):
40911
41681
  @pulumi.getter(name="minCompactionLagMs")
40912
41682
  def min_compaction_lag_ms(self) -> Optional[builtins.str]:
40913
41683
  """
40914
- min.compaction.lag.ms value
41684
+ The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
40915
41685
  """
40916
41686
  return pulumi.get(self, "min_compaction_lag_ms")
40917
41687
 
@@ -40919,7 +41689,7 @@ class GetKafkaTopicConfigResult(dict):
40919
41689
  @pulumi.getter(name="minInsyncReplicas")
40920
41690
  def min_insync_replicas(self) -> Optional[builtins.str]:
40921
41691
  """
40922
- min.insync.replicas value
41692
+ When a producer sets acks to 'all' (or '-1'), this configuration specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and produce with acks of 'all'. This will ensure that the producer raises an exception if a majority of replicas do not receive a write.
40923
41693
  """
40924
41694
  return pulumi.get(self, "min_insync_replicas")
40925
41695
 
@@ -40927,7 +41697,7 @@ class GetKafkaTopicConfigResult(dict):
40927
41697
  @pulumi.getter
40928
41698
  def preallocate(self) -> Optional[builtins.bool]:
40929
41699
  """
40930
- preallocate value
41700
+ True if we should preallocate the file on disk when creating a new log segment.
40931
41701
  """
40932
41702
  return pulumi.get(self, "preallocate")
40933
41703
 
@@ -40935,7 +41705,7 @@ class GetKafkaTopicConfigResult(dict):
40935
41705
  @pulumi.getter(name="remoteStorageEnable")
40936
41706
  def remote_storage_enable(self) -> Optional[builtins.bool]:
40937
41707
  """
40938
- remote.storage.enable value
41708
+ Indicates whether tiered storage should be enabled.
40939
41709
  """
40940
41710
  return pulumi.get(self, "remote_storage_enable")
40941
41711
 
@@ -40943,7 +41713,7 @@ class GetKafkaTopicConfigResult(dict):
40943
41713
  @pulumi.getter(name="retentionBytes")
40944
41714
  def retention_bytes(self) -> Optional[builtins.str]:
40945
41715
  """
40946
- retention.bytes value
41716
+ This configuration controls the maximum size a partition (which consists of log segments) can grow to before we will discard old log segments to free up space if we are using the 'delete' retention policy. By default there is no size limit only a time limit. Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
40947
41717
  """
40948
41718
  return pulumi.get(self, "retention_bytes")
40949
41719
 
@@ -40951,7 +41721,7 @@ class GetKafkaTopicConfigResult(dict):
40951
41721
  @pulumi.getter(name="retentionMs")
40952
41722
  def retention_ms(self) -> Optional[builtins.str]:
40953
41723
  """
40954
- retention.ms value
41724
+ This configuration controls the maximum time we will retain a log before we will discard old log segments to free up space if we are using the 'delete' retention policy. This represents an SLA on how soon consumers must read their data. If set to -1, no time limit is applied.
40955
41725
  """
40956
41726
  return pulumi.get(self, "retention_ms")
40957
41727
 
@@ -40959,7 +41729,7 @@ class GetKafkaTopicConfigResult(dict):
40959
41729
  @pulumi.getter(name="segmentBytes")
40960
41730
  def segment_bytes(self) -> Optional[builtins.str]:
40961
41731
  """
40962
- segment.bytes value
41732
+ This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
40963
41733
  """
40964
41734
  return pulumi.get(self, "segment_bytes")
40965
41735
 
@@ -40967,7 +41737,7 @@ class GetKafkaTopicConfigResult(dict):
40967
41737
  @pulumi.getter(name="segmentIndexBytes")
40968
41738
  def segment_index_bytes(self) -> Optional[builtins.str]:
40969
41739
  """
40970
- segment.index.bytes value
41740
+ This configuration controls the size of the index that maps offsets to file positions. We preallocate this index file and shrink it only after log rolls. You generally should not need to change this setting.
40971
41741
  """
40972
41742
  return pulumi.get(self, "segment_index_bytes")
40973
41743
 
@@ -40975,7 +41745,7 @@ class GetKafkaTopicConfigResult(dict):
40975
41745
  @pulumi.getter(name="segmentJitterMs")
40976
41746
  def segment_jitter_ms(self) -> Optional[builtins.str]:
40977
41747
  """
40978
- segment.jitter.ms value
41748
+ The maximum random jitter subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling
40979
41749
  """
40980
41750
  return pulumi.get(self, "segment_jitter_ms")
40981
41751
 
@@ -40983,16 +41753,15 @@ class GetKafkaTopicConfigResult(dict):
40983
41753
  @pulumi.getter(name="segmentMs")
40984
41754
  def segment_ms(self) -> Optional[builtins.str]:
40985
41755
  """
40986
- segment.ms value
41756
+ This configuration controls the period of time after which Kafka will force the log to roll even if the segment file isn't full to ensure that retention can delete or compact old data. Setting this to a very low value has consequences, and the Aiven management plane ignores values less than 10 seconds.
40987
41757
  """
40988
41758
  return pulumi.get(self, "segment_ms")
40989
41759
 
40990
41760
  @property
40991
41761
  @pulumi.getter(name="uncleanLeaderElectionEnable")
40992
- @_utilities.deprecated("""This field is deprecated and no longer functional.""")
40993
41762
  def unclean_leader_election_enable(self) -> Optional[builtins.bool]:
40994
41763
  """
40995
- unclean.leader.election.enable value; This field is deprecated and no longer functional.
41764
+ Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss.
40996
41765
  """
40997
41766
  return pulumi.get(self, "unclean_leader_election_enable")
40998
41767
 
@@ -43722,11 +44491,11 @@ class GetOpenSearchOpensearchResult(dict):
43722
44491
  uris: Sequence[builtins.str],
43723
44492
  username: builtins.str):
43724
44493
  """
43725
- :param builtins.str kibana_uri: URI for Kibana dashboard frontend
43726
- :param builtins.str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend
43727
- :param builtins.str password: OpenSearch password
44494
+ :param builtins.str kibana_uri: URI for Kibana dashboard frontend.
44495
+ :param builtins.str opensearch_dashboards_uri: URI for OpenSearch dashboard frontend.
44496
+ :param builtins.str password: OpenSearch password.
43728
44497
  :param Sequence[builtins.str] uris: OpenSearch server URIs.
43729
- :param builtins.str username: OpenSearch username
44498
+ :param builtins.str username: OpenSearch username.
43730
44499
  """
43731
44500
  pulumi.set(__self__, "kibana_uri", kibana_uri)
43732
44501
  pulumi.set(__self__, "opensearch_dashboards_uri", opensearch_dashboards_uri)
@@ -43739,7 +44508,7 @@ class GetOpenSearchOpensearchResult(dict):
43739
44508
  @_utilities.deprecated("""This field was added by mistake and has never worked. It will be removed in future versions.""")
43740
44509
  def kibana_uri(self) -> builtins.str:
43741
44510
  """
43742
- URI for Kibana dashboard frontend
44511
+ URI for Kibana dashboard frontend.
43743
44512
  """
43744
44513
  return pulumi.get(self, "kibana_uri")
43745
44514
 
@@ -43747,7 +44516,7 @@ class GetOpenSearchOpensearchResult(dict):
43747
44516
  @pulumi.getter(name="opensearchDashboardsUri")
43748
44517
  def opensearch_dashboards_uri(self) -> builtins.str:
43749
44518
  """
43750
- URI for OpenSearch dashboard frontend
44519
+ URI for OpenSearch dashboard frontend.
43751
44520
  """
43752
44521
  return pulumi.get(self, "opensearch_dashboards_uri")
43753
44522
 
@@ -43755,7 +44524,7 @@ class GetOpenSearchOpensearchResult(dict):
43755
44524
  @pulumi.getter
43756
44525
  def password(self) -> builtins.str:
43757
44526
  """
43758
- OpenSearch password
44527
+ OpenSearch password.
43759
44528
  """
43760
44529
  return pulumi.get(self, "password")
43761
44530
 
@@ -43771,7 +44540,7 @@ class GetOpenSearchOpensearchResult(dict):
43771
44540
  @pulumi.getter
43772
44541
  def username(self) -> builtins.str:
43773
44542
  """
43774
- OpenSearch username
44543
+ OpenSearch username.
43775
44544
  """
43776
44545
  return pulumi.get(self, "username")
43777
44546
 
@@ -44731,6 +45500,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
44731
45500
  email_sender_password: Optional[builtins.str] = None,
44732
45501
  email_sender_username: Optional[builtins.str] = None,
44733
45502
  enable_remote_backed_storage: Optional[builtins.bool] = None,
45503
+ enable_searchable_snapshots: Optional[builtins.bool] = None,
44734
45504
  enable_security_audit: Optional[builtins.bool] = None,
44735
45505
  http_max_content_length: Optional[builtins.int] = None,
44736
45506
  http_max_header_size: Optional[builtins.int] = None,
@@ -44783,6 +45553,7 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
44783
45553
  :param builtins.str email_sender_password: Sender password for Opensearch alerts to authenticate with SMTP server. Example: `very-secure-mail-password`.
44784
45554
  :param builtins.str email_sender_username: Sender username for Opensearch alerts. Example: `jane@example.com`.
44785
45555
  :param builtins.bool enable_remote_backed_storage: Enable remote-backed storage.
45556
+ :param builtins.bool enable_searchable_snapshots: Enable searchable snapshots.
44786
45557
  :param builtins.bool enable_security_audit: Enable/Disable security audit.
44787
45558
  :param builtins.int http_max_content_length: Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.
44788
45559
  :param builtins.int http_max_header_size: The max size of allowed headers, in bytes. Example: `8192`.
@@ -44847,6 +45618,8 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
44847
45618
  pulumi.set(__self__, "email_sender_username", email_sender_username)
44848
45619
  if enable_remote_backed_storage is not None:
44849
45620
  pulumi.set(__self__, "enable_remote_backed_storage", enable_remote_backed_storage)
45621
+ if enable_searchable_snapshots is not None:
45622
+ pulumi.set(__self__, "enable_searchable_snapshots", enable_searchable_snapshots)
44850
45623
  if enable_security_audit is not None:
44851
45624
  pulumi.set(__self__, "enable_security_audit", enable_security_audit)
44852
45625
  if http_max_content_length is not None:
@@ -45021,6 +45794,14 @@ class GetOpenSearchOpensearchUserConfigOpensearchResult(dict):
45021
45794
  """
45022
45795
  return pulumi.get(self, "enable_remote_backed_storage")
45023
45796
 
45797
+ @property
45798
+ @pulumi.getter(name="enableSearchableSnapshots")
45799
+ def enable_searchable_snapshots(self) -> Optional[builtins.bool]:
45800
+ """
45801
+ Enable searchable snapshots.
45802
+ """
45803
+ return pulumi.get(self, "enable_searchable_snapshots")
45804
+
45024
45805
  @property
45025
45806
  @pulumi.getter(name="enableSecurityAudit")
45026
45807
  def enable_security_audit(self) -> Optional[builtins.bool]:
@@ -46928,6 +47709,25 @@ class GetOpenSearchTechEmailResult(dict):
46928
47709
  return pulumi.get(self, "email")
46929
47710
 
46930
47711
 
47712
+ @pulumi.output_type
47713
+ class GetOrganizationAddressTimeoutsResult(dict):
47714
+ def __init__(__self__, *,
47715
+ read: Optional[builtins.str] = None):
47716
+ """
47717
+ :param builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47718
+ """
47719
+ if read is not None:
47720
+ pulumi.set(__self__, "read", read)
47721
+
47722
+ @property
47723
+ @pulumi.getter
47724
+ def read(self) -> Optional[builtins.str]:
47725
+ """
47726
+ A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47727
+ """
47728
+ return pulumi.get(self, "read")
47729
+
47730
+
46931
47731
  @pulumi.output_type
46932
47732
  class GetOrganizationBillingGroupListBillingGroupResult(dict):
46933
47733
  def __init__(__self__, *,
@@ -46938,23 +47738,21 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
46938
47738
  billing_group_id: builtins.str,
46939
47739
  billing_group_name: builtins.str,
46940
47740
  custom_invoice_text: builtins.str,
46941
- id: builtins.str,
46942
47741
  organization_id: builtins.str,
46943
47742
  payment_method_id: builtins.str,
46944
47743
  shipping_address_id: builtins.str,
46945
47744
  vat_id: builtins.str):
46946
47745
  """
46947
- :param builtins.str billing_address_id: ID of the billing address.
47746
+ :param builtins.str billing_address_id: Billing address ID.
46948
47747
  :param Sequence[builtins.str] billing_contact_emails: List of billing contact emails.
46949
- :param builtins.str billing_currency: Billing currency.
46950
- :param Sequence[builtins.str] billing_emails: List of billing emails.
46951
- :param builtins.str billing_group_id: ID of the billing group.
46952
- :param builtins.str billing_group_name: Name of the billing group.
46953
- :param builtins.str custom_invoice_text: Custom invoice text.
46954
- :param builtins.str id: Resource ID, a composite of organization*id and billing*group_id.
46955
- :param builtins.str organization_id: ID of the organization.
46956
- :param builtins.str payment_method_id: ID of the payment method.
46957
- :param builtins.str shipping_address_id: ID of the shipping address.
47748
+ :param builtins.str billing_currency: Acceptable currencies for a billing group. The possible values are `AUD`, `CAD`, `CHF`, `DKK`, `EUR`, `GBP`, `JPY`, `NOK`, `NZD`, `SEK`, `SGD` and `USD`.
47749
+ :param Sequence[builtins.str] billing_emails: List of billing contact emails.
47750
+ :param builtins.str billing_group_id: Billing group ID.
47751
+ :param builtins.str billing_group_name: Billing Group Name.
47752
+ :param builtins.str custom_invoice_text: Extra billing text.
47753
+ :param builtins.str organization_id: Organization ID.
47754
+ :param builtins.str payment_method_id: Payment method ID.
47755
+ :param builtins.str shipping_address_id: Shipping address ID.
46958
47756
  :param builtins.str vat_id: VAT ID.
46959
47757
  """
46960
47758
  pulumi.set(__self__, "billing_address_id", billing_address_id)
@@ -46964,7 +47762,6 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
46964
47762
  pulumi.set(__self__, "billing_group_id", billing_group_id)
46965
47763
  pulumi.set(__self__, "billing_group_name", billing_group_name)
46966
47764
  pulumi.set(__self__, "custom_invoice_text", custom_invoice_text)
46967
- pulumi.set(__self__, "id", id)
46968
47765
  pulumi.set(__self__, "organization_id", organization_id)
46969
47766
  pulumi.set(__self__, "payment_method_id", payment_method_id)
46970
47767
  pulumi.set(__self__, "shipping_address_id", shipping_address_id)
@@ -46974,7 +47771,7 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
46974
47771
  @pulumi.getter(name="billingAddressId")
46975
47772
  def billing_address_id(self) -> builtins.str:
46976
47773
  """
46977
- ID of the billing address.
47774
+ Billing address ID.
46978
47775
  """
46979
47776
  return pulumi.get(self, "billing_address_id")
46980
47777
 
@@ -46990,7 +47787,7 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
46990
47787
  @pulumi.getter(name="billingCurrency")
46991
47788
  def billing_currency(self) -> builtins.str:
46992
47789
  """
46993
- Billing currency.
47790
+ Acceptable currencies for a billing group. The possible values are `AUD`, `CAD`, `CHF`, `DKK`, `EUR`, `GBP`, `JPY`, `NOK`, `NZD`, `SEK`, `SGD` and `USD`.
46994
47791
  """
46995
47792
  return pulumi.get(self, "billing_currency")
46996
47793
 
@@ -46998,7 +47795,7 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
46998
47795
  @pulumi.getter(name="billingEmails")
46999
47796
  def billing_emails(self) -> Sequence[builtins.str]:
47000
47797
  """
47001
- List of billing emails.
47798
+ List of billing contact emails.
47002
47799
  """
47003
47800
  return pulumi.get(self, "billing_emails")
47004
47801
 
@@ -47006,7 +47803,7 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47006
47803
  @pulumi.getter(name="billingGroupId")
47007
47804
  def billing_group_id(self) -> builtins.str:
47008
47805
  """
47009
- ID of the billing group.
47806
+ Billing group ID.
47010
47807
  """
47011
47808
  return pulumi.get(self, "billing_group_id")
47012
47809
 
@@ -47014,7 +47811,7 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47014
47811
  @pulumi.getter(name="billingGroupName")
47015
47812
  def billing_group_name(self) -> builtins.str:
47016
47813
  """
47017
- Name of the billing group.
47814
+ Billing Group Name.
47018
47815
  """
47019
47816
  return pulumi.get(self, "billing_group_name")
47020
47817
 
@@ -47022,23 +47819,15 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47022
47819
  @pulumi.getter(name="customInvoiceText")
47023
47820
  def custom_invoice_text(self) -> builtins.str:
47024
47821
  """
47025
- Custom invoice text.
47822
+ Extra billing text.
47026
47823
  """
47027
47824
  return pulumi.get(self, "custom_invoice_text")
47028
47825
 
47029
- @property
47030
- @pulumi.getter
47031
- def id(self) -> builtins.str:
47032
- """
47033
- Resource ID, a composite of organization*id and billing*group_id.
47034
- """
47035
- return pulumi.get(self, "id")
47036
-
47037
47826
  @property
47038
47827
  @pulumi.getter(name="organizationId")
47039
47828
  def organization_id(self) -> builtins.str:
47040
47829
  """
47041
- ID of the organization.
47830
+ Organization ID.
47042
47831
  """
47043
47832
  return pulumi.get(self, "organization_id")
47044
47833
 
@@ -47046,7 +47835,7 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47046
47835
  @pulumi.getter(name="paymentMethodId")
47047
47836
  def payment_method_id(self) -> builtins.str:
47048
47837
  """
47049
- ID of the payment method.
47838
+ Payment method ID.
47050
47839
  """
47051
47840
  return pulumi.get(self, "payment_method_id")
47052
47841
 
@@ -47054,7 +47843,7 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47054
47843
  @pulumi.getter(name="shippingAddressId")
47055
47844
  def shipping_address_id(self) -> builtins.str:
47056
47845
  """
47057
- ID of the shipping address.
47846
+ Shipping address ID.
47058
47847
  """
47059
47848
  return pulumi.get(self, "shipping_address_id")
47060
47849
 
@@ -47067,6 +47856,44 @@ class GetOrganizationBillingGroupListBillingGroupResult(dict):
47067
47856
  return pulumi.get(self, "vat_id")
47068
47857
 
47069
47858
 
47859
+ @pulumi.output_type
47860
+ class GetOrganizationBillingGroupListTimeoutsResult(dict):
47861
+ def __init__(__self__, *,
47862
+ read: Optional[builtins.str] = None):
47863
+ """
47864
+ :param builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47865
+ """
47866
+ if read is not None:
47867
+ pulumi.set(__self__, "read", read)
47868
+
47869
+ @property
47870
+ @pulumi.getter
47871
+ def read(self) -> Optional[builtins.str]:
47872
+ """
47873
+ A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47874
+ """
47875
+ return pulumi.get(self, "read")
47876
+
47877
+
47878
+ @pulumi.output_type
47879
+ class GetOrganizationBillingGroupTimeoutsResult(dict):
47880
+ def __init__(__self__, *,
47881
+ read: Optional[builtins.str] = None):
47882
+ """
47883
+ :param builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47884
+ """
47885
+ if read is not None:
47886
+ pulumi.set(__self__, "read", read)
47887
+
47888
+ @property
47889
+ @pulumi.getter
47890
+ def read(self) -> Optional[builtins.str]:
47891
+ """
47892
+ A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47893
+ """
47894
+ return pulumi.get(self, "read")
47895
+
47896
+
47070
47897
  @pulumi.output_type
47071
47898
  class GetOrganizationProjectTagResult(dict):
47072
47899
  def __init__(__self__, *,
@@ -47096,6 +47923,44 @@ class GetOrganizationProjectTagResult(dict):
47096
47923
  return pulumi.get(self, "value")
47097
47924
 
47098
47925
 
47926
+ @pulumi.output_type
47927
+ class GetOrganizationProjectTimeoutsResult(dict):
47928
+ def __init__(__self__, *,
47929
+ read: Optional[builtins.str] = None):
47930
+ """
47931
+ :param builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47932
+ """
47933
+ if read is not None:
47934
+ pulumi.set(__self__, "read", read)
47935
+
47936
+ @property
47937
+ @pulumi.getter
47938
+ def read(self) -> Optional[builtins.str]:
47939
+ """
47940
+ A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47941
+ """
47942
+ return pulumi.get(self, "read")
47943
+
47944
+
47945
+ @pulumi.output_type
47946
+ class GetOrganizationTimeoutsResult(dict):
47947
+ def __init__(__self__, *,
47948
+ read: Optional[builtins.str] = None):
47949
+ """
47950
+ :param builtins.str read: A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47951
+ """
47952
+ if read is not None:
47953
+ pulumi.set(__self__, "read", read)
47954
+
47955
+ @property
47956
+ @pulumi.getter
47957
+ def read(self) -> Optional[builtins.str]:
47958
+ """
47959
+ A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
47960
+ """
47961
+ return pulumi.get(self, "read")
47962
+
47963
+
47099
47964
  @pulumi.output_type
47100
47965
  class GetOrganizationUserListUserResult(dict):
47101
47966
  def __init__(__self__, *,
@@ -47889,7 +48754,6 @@ class GetPgPgUserConfigResult(dict):
47889
48754
 
47890
48755
  @property
47891
48756
  @pulumi.getter
47892
- @_utilities.deprecated("""This property is deprecated.""")
47893
48757
  def pgaudit(self) -> Optional['outputs.GetPgPgUserConfigPgauditResult']:
47894
48758
  """
47895
48759
  System-wide settings for the pgaudit extension
@@ -48860,7 +49724,7 @@ class GetPgPgUserConfigPgauditResult(dict):
48860
49724
  :param builtins.bool feature_enabled: Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. Default: `false`.
48861
49725
  :param builtins.bool log_catalog: Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. Default: `true`.
48862
49726
  :param builtins.bool log_client: Specifies whether log messages will be visible to a client process such as psql. Default: `false`.
48863
- :param builtins.str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. Default: `log`.
49727
+ :param builtins.str log_level: Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `log`, `notice`, `warning`. Specifies the log level that will be used for log entries. Default: `log`.
48864
49728
  :param builtins.int log_max_string_length: Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. Default: `-1`.
48865
49729
  :param builtins.bool log_nested_statements: This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. Default: `true`.
48866
49730
  :param builtins.bool log_parameter: Specifies that audit logging should include the parameters that were passed with the statement. Default: `false`.
@@ -48903,7 +49767,6 @@ class GetPgPgUserConfigPgauditResult(dict):
48903
49767
 
48904
49768
  @property
48905
49769
  @pulumi.getter(name="featureEnabled")
48906
- @_utilities.deprecated("""This property is deprecated.""")
48907
49770
  def feature_enabled(self) -> Optional[builtins.bool]:
48908
49771
  """
48909
49772
  Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. Default: `false`.
@@ -48912,7 +49775,6 @@ class GetPgPgUserConfigPgauditResult(dict):
48912
49775
 
48913
49776
  @property
48914
49777
  @pulumi.getter(name="logCatalog")
48915
- @_utilities.deprecated("""This property is deprecated.""")
48916
49778
  def log_catalog(self) -> Optional[builtins.bool]:
48917
49779
  """
48918
49780
  Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. Default: `true`.
@@ -48921,7 +49783,6 @@ class GetPgPgUserConfigPgauditResult(dict):
48921
49783
 
48922
49784
  @property
48923
49785
  @pulumi.getter(name="logClient")
48924
- @_utilities.deprecated("""This property is deprecated.""")
48925
49786
  def log_client(self) -> Optional[builtins.bool]:
48926
49787
  """
48927
49788
  Specifies whether log messages will be visible to a client process such as psql. Default: `false`.
@@ -48930,16 +49791,14 @@ class GetPgPgUserConfigPgauditResult(dict):
48930
49791
 
48931
49792
  @property
48932
49793
  @pulumi.getter(name="logLevel")
48933
- @_utilities.deprecated("""This property is deprecated.""")
48934
49794
  def log_level(self) -> Optional[builtins.str]:
48935
49795
  """
48936
- Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. Default: `log`.
49796
+ Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `log`, `notice`, `warning`. Specifies the log level that will be used for log entries. Default: `log`.
48937
49797
  """
48938
49798
  return pulumi.get(self, "log_level")
48939
49799
 
48940
49800
  @property
48941
49801
  @pulumi.getter(name="logMaxStringLength")
48942
- @_utilities.deprecated("""This property is deprecated.""")
48943
49802
  def log_max_string_length(self) -> Optional[builtins.int]:
48944
49803
  """
48945
49804
  Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. Default: `-1`.
@@ -48948,7 +49807,6 @@ class GetPgPgUserConfigPgauditResult(dict):
48948
49807
 
48949
49808
  @property
48950
49809
  @pulumi.getter(name="logNestedStatements")
48951
- @_utilities.deprecated("""This property is deprecated.""")
48952
49810
  def log_nested_statements(self) -> Optional[builtins.bool]:
48953
49811
  """
48954
49812
  This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. Default: `true`.
@@ -48957,7 +49815,6 @@ class GetPgPgUserConfigPgauditResult(dict):
48957
49815
 
48958
49816
  @property
48959
49817
  @pulumi.getter(name="logParameter")
48960
- @_utilities.deprecated("""This property is deprecated.""")
48961
49818
  def log_parameter(self) -> Optional[builtins.bool]:
48962
49819
  """
48963
49820
  Specifies that audit logging should include the parameters that were passed with the statement. Default: `false`.
@@ -48966,7 +49823,6 @@ class GetPgPgUserConfigPgauditResult(dict):
48966
49823
 
48967
49824
  @property
48968
49825
  @pulumi.getter(name="logParameterMaxSize")
48969
- @_utilities.deprecated("""This property is deprecated.""")
48970
49826
  def log_parameter_max_size(self) -> Optional[builtins.int]:
48971
49827
  """
48972
49828
  Specifies that parameter values longer than this setting (in bytes) should not be logged, but replaced with <long param suppressed>. Default: `0`.
@@ -48975,7 +49831,6 @@ class GetPgPgUserConfigPgauditResult(dict):
48975
49831
 
48976
49832
  @property
48977
49833
  @pulumi.getter(name="logRelation")
48978
- @_utilities.deprecated("""This property is deprecated.""")
48979
49834
  def log_relation(self) -> Optional[builtins.bool]:
48980
49835
  """
48981
49836
  Specifies whether session audit logging should create a separate log entry for each relation (TABLE, VIEW, etc.) referenced in a SELECT or DML statement. Default: `false`.
@@ -48984,7 +49839,6 @@ class GetPgPgUserConfigPgauditResult(dict):
48984
49839
 
48985
49840
  @property
48986
49841
  @pulumi.getter(name="logRows")
48987
- @_utilities.deprecated("""This property is deprecated.""")
48988
49842
  def log_rows(self) -> Optional[builtins.bool]:
48989
49843
  """
48990
49844
  Specifies that audit logging should include the rows retrieved or affected by a statement. When enabled the rows field will be included after the parameter field. Default: `false`.
@@ -48993,7 +49847,6 @@ class GetPgPgUserConfigPgauditResult(dict):
48993
49847
 
48994
49848
  @property
48995
49849
  @pulumi.getter(name="logStatement")
48996
- @_utilities.deprecated("""This property is deprecated.""")
48997
49850
  def log_statement(self) -> Optional[builtins.bool]:
48998
49851
  """
48999
49852
  Specifies whether logging will include the statement text and parameters (if enabled). Default: `true`.
@@ -49002,7 +49855,6 @@ class GetPgPgUserConfigPgauditResult(dict):
49002
49855
 
49003
49856
  @property
49004
49857
  @pulumi.getter(name="logStatementOnce")
49005
- @_utilities.deprecated("""This property is deprecated.""")
49006
49858
  def log_statement_once(self) -> Optional[builtins.bool]:
49007
49859
  """
49008
49860
  Specifies whether logging will include the statement text and parameters with the first log entry for a statement/substatement combination or with every entry. Default: `false`.
@@ -49011,7 +49863,6 @@ class GetPgPgUserConfigPgauditResult(dict):
49011
49863
 
49012
49864
  @property
49013
49865
  @pulumi.getter
49014
- @_utilities.deprecated("""This property is deprecated.""")
49015
49866
  def logs(self) -> Optional[Sequence[builtins.str]]:
49016
49867
  """
49017
49868
  Specifies which classes of statements will be logged by session audit logging.
@@ -49020,7 +49871,6 @@ class GetPgPgUserConfigPgauditResult(dict):
49020
49871
 
49021
49872
  @property
49022
49873
  @pulumi.getter
49023
- @_utilities.deprecated("""This property is deprecated.""")
49024
49874
  def role(self) -> Optional[builtins.str]:
49025
49875
  """
49026
49876
  Specifies the master role to use for object audit logging.