aws-sdk-firehose 1.84.0 → 1.85.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -454,39 +454,39 @@ module Aws::Firehose
454
454
 
455
455
  # @!group API Operations
456
456
 
457
- # Creates a Firehose delivery stream.
457
+ # Creates a Firehose stream.
458
458
  #
459
- # By default, you can create up to 50 delivery streams per Amazon Web
459
+ # By default, you can create up to 50 Firehose streams per Amazon Web
460
460
  # Services Region.
461
461
  #
462
462
  # This is an asynchronous operation that immediately returns. The
463
- # initial status of the delivery stream is `CREATING`. After the
464
- # delivery stream is created, its status is `ACTIVE` and it now accepts
465
- # data. If the delivery stream creation fails, the status transitions to
463
+ # initial status of the Firehose stream is `CREATING`. After the
464
+ # Firehose stream is created, its status is `ACTIVE` and it now accepts
465
+ # data. If the Firehose stream creation fails, the status transitions to
466
466
  # `CREATING_FAILED`. Attempts to send data to a delivery stream that is
467
467
  # not in the `ACTIVE` state cause an exception. To check the state of a
468
- # delivery stream, use DescribeDeliveryStream.
468
+ # Firehose stream, use DescribeDeliveryStream.
469
469
  #
470
- # If the status of a delivery stream is `CREATING_FAILED`, this status
470
+ # If the status of a Firehose stream is `CREATING_FAILED`, this status
471
471
  # doesn't change, and you can't invoke `CreateDeliveryStream` again on
472
472
  # it. However, you can invoke the DeleteDeliveryStream operation to
473
473
  # delete it.
474
474
  #
475
- # A Firehose delivery stream can be configured to receive records
476
- # directly from providers using PutRecord or PutRecordBatch, or it can
477
- # be configured to use an existing Kinesis stream as its source. To
478
- # specify a Kinesis data stream as input, set the `DeliveryStreamType`
479
- # parameter to `KinesisStreamAsSource`, and provide the Kinesis stream
480
- # Amazon Resource Name (ARN) and role ARN in the
475
+ # A Firehose stream can be configured to receive records directly from
476
+ # providers using PutRecord or PutRecordBatch, or it can be configured
477
+ # to use an existing Kinesis stream as its source. To specify a Kinesis
478
+ # data stream as input, set the `DeliveryStreamType` parameter to
479
+ # `KinesisStreamAsSource`, and provide the Kinesis stream Amazon
480
+ # Resource Name (ARN) and role ARN in the
481
481
  # `KinesisStreamSourceConfiguration` parameter.
482
482
  #
483
- # To create a delivery stream with server-side encryption (SSE) enabled,
483
+ # To create a Firehose stream with server-side encryption (SSE) enabled,
484
484
  # include DeliveryStreamEncryptionConfigurationInput in your request.
485
485
  # This is optional. You can also invoke StartDeliveryStreamEncryption to
486
- # turn on SSE for an existing delivery stream that doesn't have SSE
486
+ # turn on SSE for an existing Firehose stream that doesn't have SSE
487
487
  # enabled.
488
488
  #
489
- # A delivery stream is configured with a single destination, such as
489
+ # A Firehose stream is configured with a single destination, such as
490
490
  # Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon
491
491
  # OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any
492
492
  # custom HTTP endpoint or HTTP endpoints owned by or supported by
@@ -539,23 +539,23 @@ module Aws::Firehose
539
539
  # [1]: https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3
540
540
  #
541
541
  # @option params [required, String] :delivery_stream_name
542
- # The name of the delivery stream. This name must be unique per Amazon
542
+ # The name of the Firehose stream. This name must be unique per Amazon
543
543
  # Web Services account in the same Amazon Web Services Region. If the
544
- # delivery streams are in different accounts or different Regions, you
545
- # can have multiple delivery streams with the same name.
544
+ # Firehose streams are in different accounts or different Regions, you
545
+ # can have multiple Firehose streams with the same name.
546
546
  #
547
547
  # @option params [String] :delivery_stream_type
548
- # The delivery stream type. This parameter can be one of the following
548
+ # The Firehose stream type. This parameter can be one of the following
549
549
  # values:
550
550
  #
551
- # * `DirectPut`: Provider applications access the delivery stream
551
+ # * `DirectPut`: Provider applications access the Firehose stream
552
552
  # directly.
553
553
  #
554
- # * `KinesisStreamAsSource`: The delivery stream uses a Kinesis data
554
+ # * `KinesisStreamAsSource`: The Firehose stream uses a Kinesis data
555
555
  # stream as a source.
556
556
  #
557
557
  # @option params [Types::KinesisStreamSourceConfiguration] :kinesis_stream_source_configuration
558
- # When a Kinesis data stream is used as the source for the delivery
558
+ # When a Kinesis data stream is used as the source for the Firehose
559
559
  # stream, a KinesisStreamSourceConfiguration containing the Kinesis data
560
560
  # stream Amazon Resource Name (ARN) and the role ARN for the source
561
561
  # stream.
@@ -590,21 +590,21 @@ module Aws::Firehose
590
590
  # endpoint destination. You can specify only one destination.
591
591
  #
592
592
  # @option params [Array<Types::Tag>] :tags
593
- # A set of tags to assign to the delivery stream. A tag is a key-value
593
+ # A set of tags to assign to the Firehose stream. A tag is a key-value
594
594
  # pair that you can define and assign to Amazon Web Services resources.
595
595
  # Tags are metadata. For example, you can add friendly names and
596
596
  # descriptions or other types of information that can help you
597
- # distinguish the delivery stream. For more information about tags, see
597
+ # distinguish the Firehose stream. For more information about tags, see
598
598
  # [Using Cost Allocation Tags][1] in the Amazon Web Services Billing and
599
599
  # Cost Management User Guide.
600
600
  #
601
- # You can specify up to 50 tags when creating a delivery stream.
601
+ # You can specify up to 50 tags when creating a Firehose stream.
602
602
  #
603
603
  # If you specify tags in the `CreateDeliveryStream` action, Amazon Data
604
604
  # Firehose performs an additional authorization on the
605
605
  # `firehose:TagDeliveryStream` action to verify if users have
606
606
  # permissions to create tags. If you do not provide this permission,
607
- # requests to create new Firehose delivery streams with IAM resource
607
+ # requests to create new Firehose Firehose streams with IAM resource
608
608
  # tags will fail with an `AccessDeniedException` such as following.
609
609
  #
610
610
  # **AccessDeniedException**
@@ -635,6 +635,7 @@ module Aws::Firehose
635
635
  # @option params [Types::IcebergDestinationConfiguration] :iceberg_destination_configuration
636
636
  # Configure Apache Iceberg Tables destination.
637
637
  #
638
+ # @option params [Types::DatabaseSourceConfiguration] :database_source_configuration
638
639
  # Amazon Data Firehose is in preview release and is subject to change.
639
640
  #
640
641
  # @return [Types::CreateDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
@@ -645,7 +646,7 @@ module Aws::Firehose
645
646
  #
646
647
  # resp = client.create_delivery_stream({
647
648
  # delivery_stream_name: "DeliveryStreamName", # required
648
- # delivery_stream_type: "DirectPut", # accepts DirectPut, KinesisStreamAsSource, MSKAsSource
649
+ # delivery_stream_type: "DirectPut", # accepts DirectPut, KinesisStreamAsSource, MSKAsSource, DatabaseAsSource
649
650
  # kinesis_stream_source_configuration: {
650
651
  # kinesis_stream_arn: "KinesisStreamARN", # required
651
652
  # role_arn: "RoleARN", # required
@@ -1287,12 +1288,25 @@ module Aws::Firehose
1287
1288
  # iceberg_destination_configuration: {
1288
1289
  # destination_table_configuration_list: [
1289
1290
  # {
1290
- # destination_table_name: "NonEmptyStringWithoutWhitespace", # required
1291
- # destination_database_name: "NonEmptyStringWithoutWhitespace", # required
1291
+ # destination_table_name: "StringWithLettersDigitsUnderscoresDots", # required
1292
+ # destination_database_name: "StringWithLettersDigitsUnderscoresDots", # required
1292
1293
  # unique_keys: ["NonEmptyStringWithoutWhitespace"],
1294
+ # partition_spec: {
1295
+ # identity: [
1296
+ # {
1297
+ # source_name: "NonEmptyStringWithoutWhitespace", # required
1298
+ # },
1299
+ # ],
1300
+ # },
1293
1301
  # s3_error_output_prefix: "ErrorOutputPrefix",
1294
1302
  # },
1295
1303
  # ],
1304
+ # schema_evolution_configuration: {
1305
+ # enabled: false, # required
1306
+ # },
1307
+ # table_creation_configuration: {
1308
+ # enabled: false, # required
1309
+ # },
1296
1310
  # buffering_hints: {
1297
1311
  # size_in_m_bs: 1,
1298
1312
  # interval_in_seconds: 1,
@@ -1323,6 +1337,7 @@ module Aws::Firehose
1323
1337
  # role_arn: "RoleARN", # required
1324
1338
  # catalog_configuration: { # required
1325
1339
  # catalog_arn: "GlueDataCatalogARN",
1340
+ # warehouse_location: "WarehouseLocation",
1326
1341
  # },
1327
1342
  # s3_configuration: { # required
1328
1343
  # role_arn: "RoleARN", # required
@@ -1347,6 +1362,36 @@ module Aws::Firehose
1347
1362
  # },
1348
1363
  # },
1349
1364
  # },
1365
+ # database_source_configuration: {
1366
+ # type: "MySQL", # required, accepts MySQL, PostgreSQL
1367
+ # endpoint: "DatabaseEndpoint", # required
1368
+ # port: 1, # required
1369
+ # ssl_mode: "Disabled", # accepts Disabled, Enabled
1370
+ # databases: { # required
1371
+ # include: ["DatabaseName"],
1372
+ # exclude: ["DatabaseName"],
1373
+ # },
1374
+ # tables: { # required
1375
+ # include: ["DatabaseTableName"],
1376
+ # exclude: ["DatabaseTableName"],
1377
+ # },
1378
+ # columns: {
1379
+ # include: ["DatabaseColumnName"],
1380
+ # exclude: ["DatabaseColumnName"],
1381
+ # },
1382
+ # surrogate_keys: ["NonEmptyStringWithoutWhitespace"],
1383
+ # snapshot_watermark_table: "DatabaseTableName", # required
1384
+ # database_source_authentication_configuration: { # required
1385
+ # secrets_manager_configuration: { # required
1386
+ # secret_arn: "SecretARN",
1387
+ # role_arn: "RoleARN",
1388
+ # enabled: false, # required
1389
+ # },
1390
+ # },
1391
+ # database_source_vpc_configuration: { # required
1392
+ # vpc_endpoint_service_name: "VpcEndpointServiceName", # required
1393
+ # },
1394
+ # },
1350
1395
  # })
1351
1396
  #
1352
1397
  # @example Response structure
@@ -1362,33 +1407,33 @@ module Aws::Firehose
1362
1407
  req.send_request(options)
1363
1408
  end
1364
1409
 
1365
- # Deletes a delivery stream and its data.
1410
+ # Deletes a Firehose stream and its data.
1366
1411
  #
1367
- # You can delete a delivery stream only if it is in one of the following
1412
+ # You can delete a Firehose stream only if it is in one of the following
1368
1413
  # states: `ACTIVE`, `DELETING`, `CREATING_FAILED`, or `DELETING_FAILED`.
1369
- # You can't delete a delivery stream that is in the `CREATING` state.
1370
- # To check the state of a delivery stream, use DescribeDeliveryStream.
1414
+ # You can't delete a Firehose stream that is in the `CREATING` state.
1415
+ # To check the state of a Firehose stream, use DescribeDeliveryStream.
1371
1416
  #
1372
1417
  # DeleteDeliveryStream is an asynchronous API. When an API request to
1373
- # DeleteDeliveryStream succeeds, the delivery stream is marked for
1374
- # deletion, and it goes into the `DELETING` state.While the delivery
1418
+ # DeleteDeliveryStream succeeds, the Firehose stream is marked for
1419
+ # deletion, and it goes into the `DELETING` state.While the Firehose
1375
1420
  # stream is in the `DELETING` state, the service might continue to
1376
1421
  # accept records, but it doesn't make any guarantees with respect to
1377
1422
  # delivering the data. Therefore, as a best practice, first stop any
1378
- # applications that are sending records before you delete a delivery
1423
+ # applications that are sending records before you delete a Firehose
1379
1424
  # stream.
1380
1425
  #
1381
- # Removal of a delivery stream that is in the `DELETING` state is a low
1426
+ # Removal of a Firehose stream that is in the `DELETING` state is a low
1382
1427
  # priority operation for the service. A stream may remain in the
1383
1428
  # `DELETING` state for several minutes. Therefore, as a best practice,
1384
1429
  # applications should not wait for streams in the `DELETING` state to be
1385
1430
  # removed.
1386
1431
  #
1387
1432
  # @option params [required, String] :delivery_stream_name
1388
- # The name of the delivery stream.
1433
+ # The name of the Firehose stream.
1389
1434
  #
1390
1435
  # @option params [Boolean] :allow_force_delete
1391
- # Set this to true if you want to delete the delivery stream even if
1436
+ # Set this to true if you want to delete the Firehose stream even if
1392
1437
  # Firehose is unable to retire the grant for the CMK. Firehose might be
1393
1438
  # unable to retire the grant due to a customer error, such as when the
1394
1439
  # CMK or the grant are in an invalid state. If you force deletion, you
@@ -1421,12 +1466,12 @@ module Aws::Firehose
1421
1466
  req.send_request(options)
1422
1467
  end
1423
1468
 
1424
- # Describes the specified delivery stream and its status. For example,
1425
- # after your delivery stream is created, call `DescribeDeliveryStream`
1426
- # to see whether the delivery stream is `ACTIVE` and therefore ready for
1469
+ # Describes the specified Firehose stream and its status. For example,
1470
+ # after your Firehose stream is created, call `DescribeDeliveryStream`
1471
+ # to see whether the Firehose stream is `ACTIVE` and therefore ready for
1427
1472
  # data to be sent to it.
1428
1473
  #
1429
- # If the status of a delivery stream is `CREATING_FAILED`, this status
1474
+ # If the status of a Firehose stream is `CREATING_FAILED`, this status
1430
1475
  # doesn't change, and you can't invoke CreateDeliveryStream again on
1431
1476
  # it. However, you can invoke the DeleteDeliveryStream operation to
1432
1477
  # delete it. If the status is `DELETING_FAILED`, you can force deletion
@@ -1434,15 +1479,15 @@ module Aws::Firehose
1434
1479
  # DeleteDeliveryStreamInput$AllowForceDelete set to true.
1435
1480
  #
1436
1481
  # @option params [required, String] :delivery_stream_name
1437
- # The name of the delivery stream.
1482
+ # The name of the Firehose stream.
1438
1483
  #
1439
1484
  # @option params [Integer] :limit
1440
1485
  # The limit on the number of destinations to return. You can have one
1441
- # destination per delivery stream.
1486
+ # destination per Firehose stream.
1442
1487
  #
1443
1488
  # @option params [String] :exclusive_start_destination_id
1444
1489
  # The ID of the destination to start returning the destination
1445
- # information. Firehose supports one destination per delivery stream.
1490
+ # information. Firehose supports one destination per Firehose stream.
1446
1491
  #
1447
1492
  # @return [Types::DescribeDeliveryStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1448
1493
  #
@@ -1461,14 +1506,14 @@ module Aws::Firehose
1461
1506
  # resp.delivery_stream_description.delivery_stream_name #=> String
1462
1507
  # resp.delivery_stream_description.delivery_stream_arn #=> String
1463
1508
  # resp.delivery_stream_description.delivery_stream_status #=> String, one of "CREATING", "CREATING_FAILED", "DELETING", "DELETING_FAILED", "ACTIVE"
1464
- # resp.delivery_stream_description.failure_description.type #=> String, one of "RETIRE_KMS_GRANT_FAILED", "CREATE_KMS_GRANT_FAILED", "KMS_ACCESS_DENIED", "DISABLED_KMS_KEY", "INVALID_KMS_KEY", "KMS_KEY_NOT_FOUND", "KMS_OPT_IN_REQUIRED", "CREATE_ENI_FAILED", "DELETE_ENI_FAILED", "SUBNET_NOT_FOUND", "SECURITY_GROUP_NOT_FOUND", "ENI_ACCESS_DENIED", "SUBNET_ACCESS_DENIED", "SECURITY_GROUP_ACCESS_DENIED", "UNKNOWN_ERROR"
1509
+ # resp.delivery_stream_description.failure_description.type #=> String, one of "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND", "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED", "RETIRE_KMS_GRANT_FAILED", "CREATE_KMS_GRANT_FAILED", "KMS_ACCESS_DENIED", "DISABLED_KMS_KEY", "INVALID_KMS_KEY", "KMS_KEY_NOT_FOUND", "KMS_OPT_IN_REQUIRED", "CREATE_ENI_FAILED", "DELETE_ENI_FAILED", "SUBNET_NOT_FOUND", "SECURITY_GROUP_NOT_FOUND", "ENI_ACCESS_DENIED", "SUBNET_ACCESS_DENIED", "SECURITY_GROUP_ACCESS_DENIED", "UNKNOWN_ERROR"
1465
1510
  # resp.delivery_stream_description.failure_description.details #=> String
1466
1511
  # resp.delivery_stream_description.delivery_stream_encryption_configuration.key_arn #=> String
1467
1512
  # resp.delivery_stream_description.delivery_stream_encryption_configuration.key_type #=> String, one of "AWS_OWNED_CMK", "CUSTOMER_MANAGED_CMK"
1468
1513
  # resp.delivery_stream_description.delivery_stream_encryption_configuration.status #=> String, one of "ENABLED", "ENABLING", "ENABLING_FAILED", "DISABLED", "DISABLING", "DISABLING_FAILED"
1469
- # resp.delivery_stream_description.delivery_stream_encryption_configuration.failure_description.type #=> String, one of "RETIRE_KMS_GRANT_FAILED", "CREATE_KMS_GRANT_FAILED", "KMS_ACCESS_DENIED", "DISABLED_KMS_KEY", "INVALID_KMS_KEY", "KMS_KEY_NOT_FOUND", "KMS_OPT_IN_REQUIRED", "CREATE_ENI_FAILED", "DELETE_ENI_FAILED", "SUBNET_NOT_FOUND", "SECURITY_GROUP_NOT_FOUND", "ENI_ACCESS_DENIED", "SUBNET_ACCESS_DENIED", "SECURITY_GROUP_ACCESS_DENIED", "UNKNOWN_ERROR"
1514
+ # resp.delivery_stream_description.delivery_stream_encryption_configuration.failure_description.type #=> String, one of "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND", "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED", "RETIRE_KMS_GRANT_FAILED", "CREATE_KMS_GRANT_FAILED", "KMS_ACCESS_DENIED", "DISABLED_KMS_KEY", "INVALID_KMS_KEY", "KMS_KEY_NOT_FOUND", "KMS_OPT_IN_REQUIRED", "CREATE_ENI_FAILED", "DELETE_ENI_FAILED", "SUBNET_NOT_FOUND", "SECURITY_GROUP_NOT_FOUND", "ENI_ACCESS_DENIED", "SUBNET_ACCESS_DENIED", "SECURITY_GROUP_ACCESS_DENIED", "UNKNOWN_ERROR"
1470
1515
  # resp.delivery_stream_description.delivery_stream_encryption_configuration.failure_description.details #=> String
1471
- # resp.delivery_stream_description.delivery_stream_type #=> String, one of "DirectPut", "KinesisStreamAsSource", "MSKAsSource"
1516
+ # resp.delivery_stream_description.delivery_stream_type #=> String, one of "DirectPut", "KinesisStreamAsSource", "MSKAsSource", "DatabaseAsSource"
1472
1517
  # resp.delivery_stream_description.version_id #=> String
1473
1518
  # resp.delivery_stream_description.create_timestamp #=> Time
1474
1519
  # resp.delivery_stream_description.last_update_timestamp #=> Time
@@ -1481,6 +1526,37 @@ module Aws::Firehose
1481
1526
  # resp.delivery_stream_description.source.msk_source_description.authentication_configuration.connectivity #=> String, one of "PUBLIC", "PRIVATE"
1482
1527
  # resp.delivery_stream_description.source.msk_source_description.delivery_start_timestamp #=> Time
1483
1528
  # resp.delivery_stream_description.source.msk_source_description.read_from_timestamp #=> Time
1529
+ # resp.delivery_stream_description.source.database_source_description.type #=> String, one of "MySQL", "PostgreSQL"
1530
+ # resp.delivery_stream_description.source.database_source_description.endpoint #=> String
1531
+ # resp.delivery_stream_description.source.database_source_description.port #=> Integer
1532
+ # resp.delivery_stream_description.source.database_source_description.ssl_mode #=> String, one of "Disabled", "Enabled"
1533
+ # resp.delivery_stream_description.source.database_source_description.databases.include #=> Array
1534
+ # resp.delivery_stream_description.source.database_source_description.databases.include[0] #=> String
1535
+ # resp.delivery_stream_description.source.database_source_description.databases.exclude #=> Array
1536
+ # resp.delivery_stream_description.source.database_source_description.databases.exclude[0] #=> String
1537
+ # resp.delivery_stream_description.source.database_source_description.tables.include #=> Array
1538
+ # resp.delivery_stream_description.source.database_source_description.tables.include[0] #=> String
1539
+ # resp.delivery_stream_description.source.database_source_description.tables.exclude #=> Array
1540
+ # resp.delivery_stream_description.source.database_source_description.tables.exclude[0] #=> String
1541
+ # resp.delivery_stream_description.source.database_source_description.columns.include #=> Array
1542
+ # resp.delivery_stream_description.source.database_source_description.columns.include[0] #=> String
1543
+ # resp.delivery_stream_description.source.database_source_description.columns.exclude #=> Array
1544
+ # resp.delivery_stream_description.source.database_source_description.columns.exclude[0] #=> String
1545
+ # resp.delivery_stream_description.source.database_source_description.surrogate_keys #=> Array
1546
+ # resp.delivery_stream_description.source.database_source_description.surrogate_keys[0] #=> String
1547
+ # resp.delivery_stream_description.source.database_source_description.snapshot_watermark_table #=> String
1548
+ # resp.delivery_stream_description.source.database_source_description.snapshot_info #=> Array
1549
+ # resp.delivery_stream_description.source.database_source_description.snapshot_info[0].id #=> String
1550
+ # resp.delivery_stream_description.source.database_source_description.snapshot_info[0].table #=> String
1551
+ # resp.delivery_stream_description.source.database_source_description.snapshot_info[0].request_timestamp #=> Time
1552
+ # resp.delivery_stream_description.source.database_source_description.snapshot_info[0].requested_by #=> String, one of "USER", "FIREHOSE"
1553
+ # resp.delivery_stream_description.source.database_source_description.snapshot_info[0].status #=> String, one of "IN_PROGRESS", "COMPLETE", "SUSPENDED"
1554
+ # resp.delivery_stream_description.source.database_source_description.snapshot_info[0].failure_description.type #=> String, one of "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND", "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED", "RETIRE_KMS_GRANT_FAILED", "CREATE_KMS_GRANT_FAILED", "KMS_ACCESS_DENIED", "DISABLED_KMS_KEY", "INVALID_KMS_KEY", "KMS_KEY_NOT_FOUND", "KMS_OPT_IN_REQUIRED", "CREATE_ENI_FAILED", "DELETE_ENI_FAILED", "SUBNET_NOT_FOUND", "SECURITY_GROUP_NOT_FOUND", "ENI_ACCESS_DENIED", "SUBNET_ACCESS_DENIED", "SECURITY_GROUP_ACCESS_DENIED", "UNKNOWN_ERROR"
1555
+ # resp.delivery_stream_description.source.database_source_description.snapshot_info[0].failure_description.details #=> String
1556
+ # resp.delivery_stream_description.source.database_source_description.database_source_authentication_configuration.secrets_manager_configuration.secret_arn #=> String
1557
+ # resp.delivery_stream_description.source.database_source_description.database_source_authentication_configuration.secrets_manager_configuration.role_arn #=> String
1558
+ # resp.delivery_stream_description.source.database_source_description.database_source_authentication_configuration.secrets_manager_configuration.enabled #=> Boolean
1559
+ # resp.delivery_stream_description.source.database_source_description.database_source_vpc_configuration.vpc_endpoint_service_name #=> String
1484
1560
  # resp.delivery_stream_description.destinations #=> Array
1485
1561
  # resp.delivery_stream_description.destinations[0].destination_id #=> String
1486
1562
  # resp.delivery_stream_description.destinations[0].s3_destination_description.role_arn #=> String
@@ -1826,7 +1902,11 @@ module Aws::Firehose
1826
1902
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.destination_table_configuration_list[0].destination_database_name #=> String
1827
1903
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.destination_table_configuration_list[0].unique_keys #=> Array
1828
1904
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.destination_table_configuration_list[0].unique_keys[0] #=> String
1905
+ # resp.delivery_stream_description.destinations[0].iceberg_destination_description.destination_table_configuration_list[0].partition_spec.identity #=> Array
1906
+ # resp.delivery_stream_description.destinations[0].iceberg_destination_description.destination_table_configuration_list[0].partition_spec.identity[0].source_name #=> String
1829
1907
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.destination_table_configuration_list[0].s3_error_output_prefix #=> String
1908
+ # resp.delivery_stream_description.destinations[0].iceberg_destination_description.schema_evolution_configuration.enabled #=> Boolean
1909
+ # resp.delivery_stream_description.destinations[0].iceberg_destination_description.table_creation_configuration.enabled #=> Boolean
1830
1910
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.buffering_hints.size_in_m_bs #=> Integer
1831
1911
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.buffering_hints.interval_in_seconds #=> Integer
1832
1912
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.cloud_watch_logging_options.enabled #=> Boolean
@@ -1842,6 +1922,7 @@ module Aws::Firehose
1842
1922
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.retry_options.duration_in_seconds #=> Integer
1843
1923
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.role_arn #=> String
1844
1924
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.catalog_configuration.catalog_arn #=> String
1925
+ # resp.delivery_stream_description.destinations[0].iceberg_destination_description.catalog_configuration.warehouse_location #=> String
1845
1926
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.s3_destination_description.role_arn #=> String
1846
1927
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.s3_destination_description.bucket_arn #=> String
1847
1928
  # resp.delivery_stream_description.destinations[0].iceberg_destination_description.s3_destination_description.prefix #=> String
@@ -1865,36 +1946,36 @@ module Aws::Firehose
1865
1946
  req.send_request(options)
1866
1947
  end
1867
1948
 
1868
- # Lists your delivery streams in alphabetical order of their names.
1949
+ # Lists your Firehose streams in alphabetical order of their names.
1869
1950
  #
1870
- # The number of delivery streams might be too large to return using a
1951
+ # The number of Firehose streams might be too large to return using a
1871
1952
  # single call to `ListDeliveryStreams`. You can limit the number of
1872
- # delivery streams returned, using the `Limit` parameter. To determine
1953
+ # Firehose streams returned, using the `Limit` parameter. To determine
1873
1954
  # whether there are more delivery streams to list, check the value of
1874
- # `HasMoreDeliveryStreams` in the output. If there are more delivery
1955
+ # `HasMoreDeliveryStreams` in the output. If there are more Firehose
1875
1956
  # streams to list, you can request them by calling this operation again
1876
1957
  # and setting the `ExclusiveStartDeliveryStreamName` parameter to the
1877
- # name of the last delivery stream returned in the last call.
1958
+ # name of the last Firehose stream returned in the last call.
1878
1959
  #
1879
1960
  # @option params [Integer] :limit
1880
- # The maximum number of delivery streams to list. The default value is
1961
+ # The maximum number of Firehose streams to list. The default value is
1881
1962
  # 10.
1882
1963
  #
1883
1964
  # @option params [String] :delivery_stream_type
1884
- # The delivery stream type. This can be one of the following values:
1965
+ # The Firehose stream type. This can be one of the following values:
1885
1966
  #
1886
- # * `DirectPut`: Provider applications access the delivery stream
1967
+ # * `DirectPut`: Provider applications access the Firehose stream
1887
1968
  # directly.
1888
1969
  #
1889
- # * `KinesisStreamAsSource`: The delivery stream uses a Kinesis data
1970
+ # * `KinesisStreamAsSource`: The Firehose stream uses a Kinesis data
1890
1971
  # stream as a source.
1891
1972
  #
1892
- # This parameter is optional. If this parameter is omitted, delivery
1973
+ # This parameter is optional. If this parameter is omitted, Firehose
1893
1974
  # streams of all types are returned.
1894
1975
  #
1895
1976
  # @option params [String] :exclusive_start_delivery_stream_name
1896
- # The list of delivery streams returned by this call to
1897
- # `ListDeliveryStreams` will start with the delivery stream whose name
1977
+ # The list of Firehose streams returned by this call to
1978
+ # `ListDeliveryStreams` will start with the Firehose stream whose name
1898
1979
  # comes alphabetically immediately after the name you specify in
1899
1980
  # `ExclusiveStartDeliveryStreamName`.
1900
1981
  #
@@ -1907,7 +1988,7 @@ module Aws::Firehose
1907
1988
  #
1908
1989
  # resp = client.list_delivery_streams({
1909
1990
  # limit: 1,
1910
- # delivery_stream_type: "DirectPut", # accepts DirectPut, KinesisStreamAsSource, MSKAsSource
1991
+ # delivery_stream_type: "DirectPut", # accepts DirectPut, KinesisStreamAsSource, MSKAsSource, DatabaseAsSource
1911
1992
  # exclusive_start_delivery_stream_name: "DeliveryStreamName",
1912
1993
  # })
1913
1994
  #
@@ -1926,11 +2007,11 @@ module Aws::Firehose
1926
2007
  req.send_request(options)
1927
2008
  end
1928
2009
 
1929
- # Lists the tags for the specified delivery stream. This operation has a
2010
+ # Lists the tags for the specified Firehose stream. This operation has a
1930
2011
  # limit of five transactions per second per account.
1931
2012
  #
1932
2013
  # @option params [required, String] :delivery_stream_name
1933
- # The name of the delivery stream whose tags you want to list.
2014
+ # The name of the Firehose stream whose tags you want to list.
1934
2015
  #
1935
2016
  # @option params [String] :exclusive_start_tag_key
1936
2017
  # The key to use as the starting point for the list of tags. If you set
@@ -1939,7 +2020,7 @@ module Aws::Firehose
1939
2020
  #
1940
2021
  # @option params [Integer] :limit
1941
2022
  # The number of tags to return. If this number is less than the total
1942
- # number of tags associated with the delivery stream, `HasMoreTags` is
2023
+ # number of tags associated with the Firehose stream, `HasMoreTags` is
1943
2024
  # set to `true` in the response. To list additional tags, set
1944
2025
  # `ExclusiveStartTagKey` to the last key in the response.
1945
2026
  #
@@ -1972,30 +2053,36 @@ module Aws::Firehose
1972
2053
  req.send_request(options)
1973
2054
  end
1974
2055
 
1975
- # Writes a single data record into an Amazon Firehose delivery stream.
1976
- # To write multiple data records into a delivery stream, use
1977
- # PutRecordBatch. Applications using these operations are referred to as
1978
- # producers.
2056
+ # Writes a single data record into an Firehose stream. To write multiple
2057
+ # data records into a Firehose stream, use PutRecordBatch. Applications
2058
+ # using these operations are referred to as producers.
1979
2059
  #
1980
- # By default, each delivery stream can take in up to 2,000 transactions
2060
+ # By default, each Firehose stream can take in up to 2,000 transactions
1981
2061
  # per second, 5,000 records per second, or 5 MB per second. If you use
1982
2062
  # PutRecord and PutRecordBatch, the limits are an aggregate across these
1983
- # two operations for each delivery stream. For more information about
2063
+ # two operations for each Firehose stream. For more information about
1984
2064
  # limits and how to request an increase, see [Amazon Firehose
1985
2065
  # Limits][1].
1986
2066
  #
1987
2067
  # Firehose accumulates and publishes a particular metric for a customer
1988
2068
  # account in one minute intervals. It is possible that the bursts of
1989
- # incoming bytes/records ingested to a delivery stream last only for a
2069
+ # incoming bytes/records ingested to a Firehose stream last only for a
1990
2070
  # few seconds. Due to this, the actual spikes in the traffic might not
1991
2071
  # be fully visible in the customer's 1 minute CloudWatch metrics.
1992
2072
  #
1993
- # You must specify the name of the delivery stream and the data record
2073
+ # You must specify the name of the Firehose stream and the data record
1994
2074
  # when using PutRecord. The data record consists of a data blob that can
1995
2075
  # be up to 1,000 KiB in size, and any kind of data. For example, it can
1996
2076
  # be a segment from a log file, geographic location data, website
1997
2077
  # clickstream data, and so on.
1998
2078
  #
2079
+ # For multi record de-aggregation, you can not put more than 500 records
2080
+ # even if the data blob length is less than 1000 KiB. If you include
2081
+ # more than 500 records, the request succeeds but the record
2082
+ # de-aggregation doesn't work as expected and transformation lambda is
2083
+ # invoked with the complete base64 encoded data blob instead of
2084
+ # de-aggregated base64 decoded records.
2085
+ #
1999
2086
  # Firehose buffers records before delivering them to the destination. To
2000
2087
  # disambiguate the data blobs at the destination, a common solution is
2001
2088
  # to use delimiters in the data, such as a newline (`\n`) or some other
@@ -2010,14 +2097,14 @@ module Aws::Firehose
2010
2097
  # If the `PutRecord` operation throws a `ServiceUnavailableException`,
2011
2098
  # the API is automatically reinvoked (retried) 3 times. If the exception
2012
2099
  # persists, it is possible that the throughput limits have been exceeded
2013
- # for the delivery stream.
2100
+ # for the Firehose stream.
2014
2101
  #
2015
2102
  # Re-invoking the Put API operations (for example, PutRecord and
2016
2103
  # PutRecordBatch) can result in data duplicates. For larger data assets,
2017
2104
  # allow for a longer time out before retrying Put API operations.
2018
2105
  #
2019
2106
  # Data records sent to Firehose are stored for 24 hours from the time
2020
- # they are added to a delivery stream as it tries to send the records to
2107
+ # they are added to a Firehose stream as it tries to send the records to
2021
2108
  # the destination. If the destination is unreachable for more than 24
2022
2109
  # hours, the data is no longer available.
2023
2110
  #
@@ -2030,7 +2117,7 @@ module Aws::Firehose
2030
2117
  # [1]: https://docs.aws.amazon.com/firehose/latest/dev/limits.html
2031
2118
  #
2032
2119
  # @option params [required, String] :delivery_stream_name
2033
- # The name of the delivery stream.
2120
+ # The name of the Firehose stream.
2034
2121
  #
2035
2122
  # @option params [required, Types::Record] :record
2036
2123
  # The record.
@@ -2063,15 +2150,15 @@ module Aws::Firehose
2063
2150
  req.send_request(options)
2064
2151
  end
2065
2152
 
2066
- # Writes multiple data records into a delivery stream in a single call,
2153
+ # Writes multiple data records into a Firehose stream in a single call,
2067
2154
  # which can achieve higher throughput per producer than when writing
2068
- # single records. To write single data records into a delivery stream,
2155
+ # single records. To write single data records into a Firehose stream,
2069
2156
  # use PutRecord. Applications using these operations are referred to as
2070
2157
  # producers.
2071
2158
  #
2072
2159
  # Firehose accumulates and publishes a particular metric for a customer
2073
2160
  # account in one minute intervals. It is possible that the bursts of
2074
- # incoming bytes/records ingested to a delivery stream last only for a
2161
+ # incoming bytes/records ingested to a Firehose stream last only for a
2075
2162
  # few seconds. Due to this, the actual spikes in the traffic might not
2076
2163
  # be fully visible in the customer's 1 minute CloudWatch metrics.
2077
2164
  #
@@ -2082,12 +2169,19 @@ module Aws::Firehose
2082
2169
  # to a limit of 4 MB for the entire request. These limits cannot be
2083
2170
  # changed.
2084
2171
  #
2085
- # You must specify the name of the delivery stream and the data record
2172
+ # You must specify the name of the Firehose stream and the data record
2086
2173
  # when using PutRecord. The data record consists of a data blob that can
2087
2174
  # be up to 1,000 KB in size, and any kind of data. For example, it could
2088
2175
  # be a segment from a log file, geographic location data, website
2089
2176
  # clickstream data, and so on.
2090
2177
  #
2178
+ # For multi record de-aggregation, you can not put more than 500 records
2179
+ # even if the data blob length is less than 1000 KiB. If you include
2180
+ # more than 500 records, the request succeeds but the record
2181
+ # de-aggregation doesn't work as expected and transformation lambda is
2182
+ # invoked with the complete base64 encoded data blob instead of
2183
+ # de-aggregated base64 decoded records.
2184
+ #
2091
2185
  # Firehose buffers records before delivering them to the destination. To
2092
2186
  # disambiguate the data blobs at the destination, a common solution is
2093
2187
  # to use delimiters in the data, such as a newline (`\n`) or some other
@@ -2126,14 +2220,14 @@ module Aws::Firehose
2126
2220
  # If PutRecordBatch throws `ServiceUnavailableException`, the API is
2127
2221
  # automatically reinvoked (retried) 3 times. If the exception persists,
2128
2222
  # it is possible that the throughput limits have been exceeded for the
2129
- # delivery stream.
2223
+ # Firehose stream.
2130
2224
  #
2131
2225
  # Re-invoking the Put API operations (for example, PutRecord and
2132
2226
  # PutRecordBatch) can result in data duplicates. For larger data assets,
2133
2227
  # allow for a longer time out before retrying Put API operations.
2134
2228
  #
2135
2229
  # Data records sent to Firehose are stored for 24 hours from the time
2136
- # they are added to a delivery stream as it attempts to send the records
2230
+ # they are added to a Firehose stream as it attempts to send the records
2137
2231
  # to the destination. If the destination is unreachable for more than 24
2138
2232
  # hours, the data is no longer available.
2139
2233
  #
@@ -2146,7 +2240,7 @@ module Aws::Firehose
2146
2240
  # [1]: https://docs.aws.amazon.com/firehose/latest/dev/limits.html
2147
2241
  #
2148
2242
  # @option params [required, String] :delivery_stream_name
2149
- # The name of the delivery stream.
2243
+ # The name of the Firehose stream.
2150
2244
  #
2151
2245
  # @option params [required, Array<Types::Record>] :records
2152
2246
  # One or more records.
@@ -2186,26 +2280,26 @@ module Aws::Firehose
2186
2280
  req.send_request(options)
2187
2281
  end
2188
2282
 
2189
- # Enables server-side encryption (SSE) for the delivery stream.
2283
+ # Enables server-side encryption (SSE) for the Firehose stream.
2190
2284
  #
2191
2285
  # This operation is asynchronous. It returns immediately. When you
2192
2286
  # invoke it, Firehose first sets the encryption status of the stream to
2193
- # `ENABLING`, and then to `ENABLED`. The encryption status of a delivery
2287
+ # `ENABLING`, and then to `ENABLED`. The encryption status of a Firehose
2194
2288
  # stream is the `Status` property in
2195
2289
  # DeliveryStreamEncryptionConfiguration. If the operation fails, the
2196
2290
  # encryption status changes to `ENABLING_FAILED`. You can continue to
2197
- # read and write data to your delivery stream while the encryption
2291
+ # read and write data to your Firehose stream while the encryption
2198
2292
  # status is `ENABLING`, but the data is not encrypted. It can take up to
2199
2293
  # 5 seconds after the encryption status changes to `ENABLED` before all
2200
- # records written to the delivery stream are encrypted. To find out
2294
+ # records written to the Firehose stream are encrypted. To find out
2201
2295
  # whether a record or a batch of records was encrypted, check the
2202
2296
  # response elements PutRecordOutput$Encrypted and
2203
2297
  # PutRecordBatchOutput$Encrypted, respectively.
2204
2298
  #
2205
- # To check the encryption status of a delivery stream, use
2299
+ # To check the encryption status of a Firehose stream, use
2206
2300
  # DescribeDeliveryStream.
2207
2301
  #
2208
- # Even if encryption is currently enabled for a delivery stream, you can
2302
+ # Even if encryption is currently enabled for a Firehose stream, you can
2209
2303
  # still invoke this operation on it to change the ARN of the CMK or both
2210
2304
  # its type and ARN. If you invoke this method to change the CMK, and the
2211
2305
  # old CMK is of type `CUSTOMER_MANAGED_CMK`, Firehose schedules the
@@ -2218,29 +2312,29 @@ module Aws::Firehose
2218
2312
  # should not be called with session credentials that are more than 6
2219
2313
  # hours old.
2220
2314
  #
2221
- # If a delivery stream already has encryption enabled and then you
2315
+ # If a Firehose stream already has encryption enabled and then you
2222
2316
  # invoke this operation to change the ARN of the CMK or both its type
2223
2317
  # and ARN and you get `ENABLING_FAILED`, this only means that the
2224
2318
  # attempt to change the CMK failed. In this case, encryption remains
2225
2319
  # enabled with the old CMK.
2226
2320
  #
2227
- # If the encryption status of your delivery stream is `ENABLING_FAILED`,
2321
+ # If the encryption status of your Firehose stream is `ENABLING_FAILED`,
2228
2322
  # you can invoke this operation again with a valid CMK. The CMK must be
2229
2323
  # enabled and the key policy mustn't explicitly deny the permission for
2230
2324
  # Firehose to invoke KMS encrypt and decrypt operations.
2231
2325
  #
2232
- # You can enable SSE for a delivery stream only if it's a delivery
2326
+ # You can enable SSE for a Firehose stream only if it's a Firehose
2233
2327
  # stream that uses `DirectPut` as its source.
2234
2328
  #
2235
2329
  # The `StartDeliveryStreamEncryption` and `StopDeliveryStreamEncryption`
2236
- # operations have a combined limit of 25 calls per delivery stream per
2330
+ # operations have a combined limit of 25 calls per Firehose stream per
2237
2331
  # 24 hours. For example, you reach the limit if you call
2238
2332
  # `StartDeliveryStreamEncryption` 13 times and
2239
- # `StopDeliveryStreamEncryption` 12 times for the same delivery stream
2333
+ # `StopDeliveryStreamEncryption` 12 times for the same Firehose stream
2240
2334
  # in a 24-hour period.
2241
2335
  #
2242
2336
  # @option params [required, String] :delivery_stream_name
2243
- # The name of the delivery stream for which you want to enable
2337
+ # The name of the Firehose stream for which you want to enable
2244
2338
  # server-side encryption (SSE).
2245
2339
  #
2246
2340
  # @option params [Types::DeliveryStreamEncryptionConfigurationInput] :delivery_stream_encryption_configuration_input
@@ -2268,20 +2362,20 @@ module Aws::Firehose
2268
2362
  req.send_request(options)
2269
2363
  end
2270
2364
 
2271
- # Disables server-side encryption (SSE) for the delivery stream.
2365
+ # Disables server-side encryption (SSE) for the Firehose stream.
2272
2366
  #
2273
2367
  # This operation is asynchronous. It returns immediately. When you
2274
2368
  # invoke it, Firehose first sets the encryption status of the stream to
2275
2369
  # `DISABLING`, and then to `DISABLED`. You can continue to read and
2276
2370
  # write data to your stream while its status is `DISABLING`. It can take
2277
2371
  # up to 5 seconds after the encryption status changes to `DISABLED`
2278
- # before all records written to the delivery stream are no longer
2372
+ # before all records written to the Firehose stream are no longer
2279
2373
  # subject to encryption. To find out whether a record or a batch of
2280
2374
  # records was encrypted, check the response elements
2281
2375
  # PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted,
2282
2376
  # respectively.
2283
2377
  #
2284
- # To check the encryption state of a delivery stream, use
2378
+ # To check the encryption state of a Firehose stream, use
2285
2379
  # DescribeDeliveryStream.
2286
2380
  #
2287
2381
  # If SSE is enabled using a customer managed CMK and then you invoke
@@ -2290,14 +2384,14 @@ module Aws::Firehose
2290
2384
  # finished delivering records to the destination.
2291
2385
  #
2292
2386
  # The `StartDeliveryStreamEncryption` and `StopDeliveryStreamEncryption`
2293
- # operations have a combined limit of 25 calls per delivery stream per
2387
+ # operations have a combined limit of 25 calls per Firehose stream per
2294
2388
  # 24 hours. For example, you reach the limit if you call
2295
2389
  # `StartDeliveryStreamEncryption` 13 times and
2296
- # `StopDeliveryStreamEncryption` 12 times for the same delivery stream
2390
+ # `StopDeliveryStreamEncryption` 12 times for the same Firehose stream
2297
2391
  # in a 24-hour period.
2298
2392
  #
2299
2393
  # @option params [required, String] :delivery_stream_name
2300
- # The name of the delivery stream for which you want to disable
2394
+ # The name of the Firehose stream for which you want to disable
2301
2395
  # server-side encryption (SSE).
2302
2396
  #
2303
2397
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
@@ -2317,17 +2411,17 @@ module Aws::Firehose
2317
2411
  req.send_request(options)
2318
2412
  end
2319
2413
 
2320
- # Adds or updates tags for the specified delivery stream. A tag is a
2414
+ # Adds or updates tags for the specified Firehose stream. A tag is a
2321
2415
  # key-value pair that you can define and assign to Amazon Web Services
2322
2416
  # resources. If you specify a tag that already exists, the tag value is
2323
2417
  # replaced with the value that you specify in the request. Tags are
2324
2418
  # metadata. For example, you can add friendly names and descriptions or
2325
- # other types of information that can help you distinguish the delivery
2419
+ # other types of information that can help you distinguish the Firehose
2326
2420
  # stream. For more information about tags, see [Using Cost Allocation
2327
2421
  # Tags][1] in the *Amazon Web Services Billing and Cost Management User
2328
2422
  # Guide*.
2329
2423
  #
2330
- # Each delivery stream can have up to 50 tags.
2424
+ # Each Firehose stream can have up to 50 tags.
2331
2425
  #
2332
2426
  # This operation has a limit of five transactions per second per
2333
2427
  # account.
@@ -2337,7 +2431,7 @@ module Aws::Firehose
2337
2431
  # [1]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html
2338
2432
  #
2339
2433
  # @option params [required, String] :delivery_stream_name
2340
- # The name of the delivery stream to which you want to add the tags.
2434
+ # The name of the Firehose stream to which you want to add the tags.
2341
2435
  #
2342
2436
  # @option params [required, Array<Types::Tag>] :tags
2343
2437
  # A set of key-value pairs to use to create the tags.
@@ -2365,7 +2459,7 @@ module Aws::Firehose
2365
2459
  req.send_request(options)
2366
2460
  end
2367
2461
 
2368
- # Removes tags from the specified delivery stream. Removed tags are
2462
+ # Removes tags from the specified Firehose stream. Removed tags are
2369
2463
  # deleted, and you can't recover them after this operation successfully
2370
2464
  # completes.
2371
2465
  #
@@ -2375,7 +2469,7 @@ module Aws::Firehose
2375
2469
  # account.
2376
2470
  #
2377
2471
  # @option params [required, String] :delivery_stream_name
2378
- # The name of the delivery stream.
2472
+ # The name of the Firehose stream.
2379
2473
  #
2380
2474
  # @option params [required, Array<String>] :tag_keys
2381
2475
  # A list of tag keys. Each corresponding tag is removed from the
@@ -2399,14 +2493,14 @@ module Aws::Firehose
2399
2493
  req.send_request(options)
2400
2494
  end
2401
2495
 
2402
- # Updates the specified destination of the specified delivery stream.
2496
+ # Updates the specified destination of the specified Firehose stream.
2403
2497
  #
2404
2498
  # Use this operation to change the destination type (for example, to
2405
2499
  # replace the Amazon S3 destination with Amazon Redshift) or change the
2406
2500
  # parameters associated with a destination (for example, to change the
2407
2501
  # bucket name of the Amazon S3 destination). The update might not occur
2408
- # immediately. The target delivery stream remains active while the
2409
- # configurations are updated, so data writes to the delivery stream can
2502
+ # immediately. The target Firehose stream remains active while the
2503
+ # configurations are updated, so data writes to the Firehose stream can
2410
2504
  # continue during this process. The updated configurations are usually
2411
2505
  # effective within a few minutes.
2412
2506
  #
@@ -2435,7 +2529,7 @@ module Aws::Firehose
2435
2529
  # `CurrentDeliveryStreamVersionId` in the next call.
2436
2530
  #
2437
2531
  # @option params [required, String] :delivery_stream_name
2438
- # The name of the delivery stream.
2532
+ # The name of the Firehose stream.
2439
2533
  #
2440
2534
  # @option params [required, String] :current_delivery_stream_version_id
2441
2535
  # Obtain this value from the `VersionId` result of
@@ -2480,8 +2574,6 @@ module Aws::Firehose
2480
2574
  # @option params [Types::IcebergDestinationUpdate] :iceberg_destination_update
2481
2575
  # Describes an update for a destination in Apache Iceberg Tables.
2482
2576
  #
2483
- # Amazon Data Firehose is in preview release and is subject to change.
2484
- #
2485
2577
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
2486
2578
  #
2487
2579
  # @example Request syntax with placeholder values
@@ -3087,12 +3179,25 @@ module Aws::Firehose
3087
3179
  # iceberg_destination_update: {
3088
3180
  # destination_table_configuration_list: [
3089
3181
  # {
3090
- # destination_table_name: "NonEmptyStringWithoutWhitespace", # required
3091
- # destination_database_name: "NonEmptyStringWithoutWhitespace", # required
3182
+ # destination_table_name: "StringWithLettersDigitsUnderscoresDots", # required
3183
+ # destination_database_name: "StringWithLettersDigitsUnderscoresDots", # required
3092
3184
  # unique_keys: ["NonEmptyStringWithoutWhitespace"],
3185
+ # partition_spec: {
3186
+ # identity: [
3187
+ # {
3188
+ # source_name: "NonEmptyStringWithoutWhitespace", # required
3189
+ # },
3190
+ # ],
3191
+ # },
3093
3192
  # s3_error_output_prefix: "ErrorOutputPrefix",
3094
3193
  # },
3095
3194
  # ],
3195
+ # schema_evolution_configuration: {
3196
+ # enabled: false, # required
3197
+ # },
3198
+ # table_creation_configuration: {
3199
+ # enabled: false, # required
3200
+ # },
3096
3201
  # buffering_hints: {
3097
3202
  # size_in_m_bs: 1,
3098
3203
  # interval_in_seconds: 1,
@@ -3123,6 +3228,7 @@ module Aws::Firehose
3123
3228
  # role_arn: "RoleARN",
3124
3229
  # catalog_configuration: {
3125
3230
  # catalog_arn: "GlueDataCatalogARN",
3231
+ # warehouse_location: "WarehouseLocation",
3126
3232
  # },
3127
3233
  # s3_configuration: {
3128
3234
  # role_arn: "RoleARN", # required
@@ -3176,7 +3282,7 @@ module Aws::Firehose
3176
3282
  tracer: tracer
3177
3283
  )
3178
3284
  context[:gem_name] = 'aws-sdk-firehose'
3179
- context[:gem_version] = '1.84.0'
3285
+ context[:gem_version] = '1.85.0'
3180
3286
  Seahorse::Client::Request.new(handlers, context)
3181
3287
  end
3182
3288