@cdklabs/cdk-ecs-codedeploy 0.0.1 → 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +9 -6
- package/README.md +1 -1
- package/lib/ecs-appspec/index.js +1 -1
- package/lib/ecs-deployment/index.js +1 -1
- package/node_modules/aws-sdk/CHANGELOG.md +14 -1
- package/node_modules/aws-sdk/README.md +1 -1
- package/node_modules/aws-sdk/apis/customer-profiles-2020-08-15.min.json +6 -2
- package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +341 -334
- package/node_modules/aws-sdk/apis/kinesisvideo-2017-09-30.min.json +160 -15
- package/node_modules/aws-sdk/apis/metadata.json +4 -0
- package/node_modules/aws-sdk/apis/rds-2014-10-31.min.json +136 -123
- package/node_modules/aws-sdk/apis/rekognition-2016-06-27.min.json +60 -35
- package/node_modules/aws-sdk/apis/sagemaker-metrics-2022-09-30.examples.json +5 -0
- package/node_modules/aws-sdk/apis/sagemaker-metrics-2022-09-30.min.json +73 -0
- package/node_modules/aws-sdk/apis/sagemaker-metrics-2022-09-30.paginators.json +4 -0
- package/node_modules/aws-sdk/clients/all.d.ts +1 -0
- package/node_modules/aws-sdk/clients/all.js +2 -1
- package/node_modules/aws-sdk/clients/cloudtrail.d.ts +2 -1
- package/node_modules/aws-sdk/clients/customerprofiles.d.ts +16 -0
- package/node_modules/aws-sdk/clients/ec2.d.ts +3 -1
- package/node_modules/aws-sdk/clients/kinesisvideo.d.ts +191 -2
- package/node_modules/aws-sdk/clients/lookoutvision.d.ts +2 -2
- package/node_modules/aws-sdk/clients/migrationhubrefactorspaces.d.ts +4 -4
- package/node_modules/aws-sdk/clients/rds.d.ts +31 -2
- package/node_modules/aws-sdk/clients/rekognition.d.ts +37 -7
- package/node_modules/aws-sdk/clients/sagemakermetrics.d.ts +93 -0
- package/node_modules/aws-sdk/clients/sagemakermetrics.js +18 -0
- package/node_modules/aws-sdk/clients/wafv2.d.ts +4 -4
- package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +2 -2
- package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +46 -9
- package/node_modules/aws-sdk/dist/aws-sdk.js +704 -510
- package/node_modules/aws-sdk/dist/aws-sdk.min.js +69 -69
- package/node_modules/aws-sdk/lib/config_service_placeholders.d.ts +2 -0
- package/node_modules/aws-sdk/lib/core.js +1 -1
- package/node_modules/aws-sdk/package.json +1 -1
- package/package.json +11 -7
|
@@ -43,6 +43,14 @@ declare class KinesisVideo extends Service {
|
|
|
43
43
|
* Deletes a Kinesis video stream and the data contained in the stream. This method marks the stream for deletion, and makes the data in the stream inaccessible immediately. To ensure that you have the latest version of the stream before deleting it, you can specify the stream version. Kinesis Video Streams assigns a version to each stream. When you update a stream, Kinesis Video Streams assigns a new version number. To get the latest stream version, use the DescribeStream API. This operation requires permission for the KinesisVideo:DeleteStream action.
|
|
44
44
|
*/
|
|
45
45
|
deleteStream(callback?: (err: AWSError, data: KinesisVideo.Types.DeleteStreamOutput) => void): Request<KinesisVideo.Types.DeleteStreamOutput, AWSError>;
|
|
46
|
+
/**
|
|
47
|
+
* Describes a stream’s edge configuration that was set using the StartEdgeConfigurationUpdate API. Use this API to get the status of the configuration if the configuration is in sync with the Edge Agent.
|
|
48
|
+
*/
|
|
49
|
+
describeEdgeConfiguration(params: KinesisVideo.Types.DescribeEdgeConfigurationInput, callback?: (err: AWSError, data: KinesisVideo.Types.DescribeEdgeConfigurationOutput) => void): Request<KinesisVideo.Types.DescribeEdgeConfigurationOutput, AWSError>;
|
|
50
|
+
/**
|
|
51
|
+
* Describes a stream’s edge configuration that was set using the StartEdgeConfigurationUpdate API. Use this API to get the status of the configuration if the configuration is in sync with the Edge Agent.
|
|
52
|
+
*/
|
|
53
|
+
describeEdgeConfiguration(callback?: (err: AWSError, data: KinesisVideo.Types.DescribeEdgeConfigurationOutput) => void): Request<KinesisVideo.Types.DescribeEdgeConfigurationOutput, AWSError>;
|
|
46
54
|
/**
|
|
47
55
|
* Gets the ImageGenerationConfiguration for a given Kinesis video stream.
|
|
48
56
|
*/
|
|
@@ -123,6 +131,14 @@ declare class KinesisVideo extends Service {
|
|
|
123
131
|
* Returns a list of tags associated with the specified stream. In the request, you must specify either the StreamName or the StreamARN.
|
|
124
132
|
*/
|
|
125
133
|
listTagsForStream(callback?: (err: AWSError, data: KinesisVideo.Types.ListTagsForStreamOutput) => void): Request<KinesisVideo.Types.ListTagsForStreamOutput, AWSError>;
|
|
134
|
+
/**
|
|
135
|
+
* An asynchronous API that updates a stream’s existing edge configuration. If this API is invoked for the first time, a new edge configuration will be created for the stream, and the sync status will be set to SYNCING. The Kinesis Video Stream will sync the stream’s edge configuration with the Edge Agent IoT Greengrass component that runs on an IoT Hub Device setup at your premise. The time to sync can vary and depends on the connectivity of the Hub Device. The SyncStatus will be updated as the edge configuration is acknowledged, and synced with the Edge Agent. You will have to wait for the sync status to reach a terminal state such as: IN_SYNC and SYNC_FAILED, before using this API again. If you invoke this API during the syncing process, a ResourceInUseException will be thrown. The connectivity of the stream's edge configuration and the Edge Agent will be retried for 15 minutes. After 15 minutes, the status will transition into the SYNC_FAILED state.
|
|
136
|
+
*/
|
|
137
|
+
startEdgeConfigurationUpdate(params: KinesisVideo.Types.StartEdgeConfigurationUpdateInput, callback?: (err: AWSError, data: KinesisVideo.Types.StartEdgeConfigurationUpdateOutput) => void): Request<KinesisVideo.Types.StartEdgeConfigurationUpdateOutput, AWSError>;
|
|
138
|
+
/**
|
|
139
|
+
* An asynchronous API that updates a stream’s existing edge configuration. If this API is invoked for the first time, a new edge configuration will be created for the stream, and the sync status will be set to SYNCING. The Kinesis Video Stream will sync the stream’s edge configuration with the Edge Agent IoT Greengrass component that runs on an IoT Hub Device setup at your premise. The time to sync can vary and depends on the connectivity of the Hub Device. The SyncStatus will be updated as the edge configuration is acknowledged, and synced with the Edge Agent. You will have to wait for the sync status to reach a terminal state such as: IN_SYNC and SYNC_FAILED, before using this API again. If you invoke this API during the syncing process, a ResourceInUseException will be thrown. The connectivity of the stream's edge configuration and the Edge Agent will be retried for 15 minutes. After 15 minutes, the status will transition into the SYNC_FAILED state.
|
|
140
|
+
*/
|
|
141
|
+
startEdgeConfigurationUpdate(callback?: (err: AWSError, data: KinesisVideo.Types.StartEdgeConfigurationUpdateOutput) => void): Request<KinesisVideo.Types.StartEdgeConfigurationUpdateOutput, AWSError>;
|
|
126
142
|
/**
|
|
127
143
|
* Adds one or more tags to a signaling channel. A tag is a key-value pair (the value is optional) that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. For more information, see Using Cost Allocation Tags in the Billing and Cost Management and Cost Management User Guide.
|
|
128
144
|
*/
|
|
@@ -304,6 +320,7 @@ declare namespace KinesisVideo {
|
|
|
304
320
|
export type DataEndpoint = string;
|
|
305
321
|
export type DataRetentionChangeInHours = number;
|
|
306
322
|
export type DataRetentionInHours = number;
|
|
323
|
+
export type DeleteAfterUpload = boolean;
|
|
307
324
|
export interface DeleteSignalingChannelInput {
|
|
308
325
|
/**
|
|
309
326
|
* The Amazon Resource Name (ARN) of the signaling channel that you want to delete.
|
|
@@ -328,6 +345,60 @@ declare namespace KinesisVideo {
|
|
|
328
345
|
}
|
|
329
346
|
export interface DeleteStreamOutput {
|
|
330
347
|
}
|
|
348
|
+
export interface DeletionConfig {
|
|
349
|
+
/**
|
|
350
|
+
* The number of hours that you want to retain the data in the stream on the Edge Agent. The default value of the retention time is 720 hours, which translates to 30 days.
|
|
351
|
+
*/
|
|
352
|
+
EdgeRetentionInHours?: EdgeRetentionInHours;
|
|
353
|
+
/**
|
|
354
|
+
* The value of the local size required in order to delete the edge configuration.
|
|
355
|
+
*/
|
|
356
|
+
LocalSizeConfig?: LocalSizeConfig;
|
|
357
|
+
/**
|
|
358
|
+
* The boolean value used to indicate whether or not you want to mark the media for deletion, once it has been uploaded to the Kinesis Video Stream cloud. The media files can be deleted if any of the deletion configuration values are set to true, such as when the limit for the EdgeRetentionInHours, or the MaxLocalMediaSizeInMB, has been reached. Since the default value is set to true, configure the uploader schedule such that the media files are not being deleted before they are initially uploaded to AWS cloud.
|
|
359
|
+
*/
|
|
360
|
+
DeleteAfterUpload?: DeleteAfterUpload;
|
|
361
|
+
}
|
|
362
|
+
export interface DescribeEdgeConfigurationInput {
|
|
363
|
+
/**
|
|
364
|
+
* The name of the stream whose edge configuration you want to update. Specify either the StreamName or the StreamARN.
|
|
365
|
+
*/
|
|
366
|
+
StreamName?: StreamName;
|
|
367
|
+
/**
|
|
368
|
+
* The Amazon Resource Name (ARN) of the stream. Specify either the StreamNameor the StreamARN.
|
|
369
|
+
*/
|
|
370
|
+
StreamARN?: ResourceARN;
|
|
371
|
+
}
|
|
372
|
+
export interface DescribeEdgeConfigurationOutput {
|
|
373
|
+
/**
|
|
374
|
+
* The name of the stream from which the edge configuration was updated.
|
|
375
|
+
*/
|
|
376
|
+
StreamName?: StreamName;
|
|
377
|
+
/**
|
|
378
|
+
* The Amazon Resource Name (ARN) of the stream.
|
|
379
|
+
*/
|
|
380
|
+
StreamARN?: ResourceARN;
|
|
381
|
+
/**
|
|
382
|
+
* The timestamp at which a stream’s edge configuration was first created.
|
|
383
|
+
*/
|
|
384
|
+
CreationTime?: Timestamp;
|
|
385
|
+
/**
|
|
386
|
+
* The timestamp at which a stream’s edge configuration was last updated.
|
|
387
|
+
*/
|
|
388
|
+
LastUpdatedTime?: Timestamp;
|
|
389
|
+
/**
|
|
390
|
+
* The latest status of the edge configuration update.
|
|
391
|
+
*/
|
|
392
|
+
SyncStatus?: SyncStatus;
|
|
393
|
+
/**
|
|
394
|
+
* A description of the generated failure status.
|
|
395
|
+
*/
|
|
396
|
+
FailedStatusDetails?: FailedStatusDetails;
|
|
397
|
+
/**
|
|
398
|
+
* A description of the stream's edge configuration that will be used to sync with the Edge Agent IoT Greengrass component. The Edge Agent component will run on an IoT Hub Device setup at your premise.
|
|
399
|
+
*/
|
|
400
|
+
EdgeConfig?: EdgeConfig;
|
|
401
|
+
}
|
|
331
402
|
export interface DescribeImageGenerationConfigurationInput {
|
|
332
403
|
/**
|
|
333
404
|
* The name of the stream from which to retrieve the image generation configuration. You must specify either the StreamName or the StreamARN.
|
|
@@ -395,6 +466,27 @@ declare namespace KinesisVideo {
|
|
|
395
466
|
export type DestinationRegion = string;
|
|
396
467
|
export type DestinationUri = string;
|
|
397
468
|
export type DeviceName = string;
|
|
469
|
+
export type DurationInSeconds = number;
|
|
470
|
+
export interface EdgeConfig {
|
|
471
|
+
/**
|
|
472
|
+
* The "Internet of Things (IoT) Thing" Arn of the stream.
|
|
473
|
+
*/
|
|
474
|
+
HubDeviceArn: HubDeviceArn;
|
|
475
|
+
/**
|
|
476
|
+
* The recorder configuration consists of the local MediaSourceConfig details, that are used as credentials to access the local media files streamed on the camera.
|
|
477
|
+
*/
|
|
478
|
+
RecorderConfig: RecorderConfig;
|
|
479
|
+
/**
|
|
480
|
+
* The uploader configuration contains the ScheduleExpression details that are used, to schedule upload jobs for the recorded media files from the Edge Agent, to a Kinesis Video Stream.
|
|
481
|
+
*/
|
|
482
|
+
UploaderConfig?: UploaderConfig;
|
|
483
|
+
/**
|
|
484
|
+
* The deletion configuration is made up of the retention time (EdgeRetentionInHours) and local size configuration (LocalSizeConfig) details that are used to make the deletion.
|
|
485
|
+
*/
|
|
486
|
+
DeletionConfig?: DeletionConfig;
|
|
487
|
+
}
|
|
488
|
+
export type EdgeRetentionInHours = number;
|
|
489
|
+
export type FailedStatusDetails = string;
|
|
398
490
|
export type Format = "JPEG"|"PNG"|string;
|
|
399
491
|
export type FormatConfig = {[key: string]: FormatConfigValue};
|
|
400
492
|
export type FormatConfigKey = "JPEGQuality"|string;
|
|
@@ -436,6 +528,7 @@ declare namespace KinesisVideo {
|
|
|
436
528
|
ResourceEndpointList?: ResourceEndpointList;
|
|
437
529
|
}
|
|
438
530
|
export type HeightPixels = number;
|
|
531
|
+
export type HubDeviceArn = string;
|
|
439
532
|
export interface ImageGenerationConfiguration {
|
|
440
533
|
/**
|
|
441
534
|
* Indicates whether the ContinuousImageGenerationConfigurations API is enabled or disabled.
|
|
@@ -472,7 +565,7 @@ declare namespace KinesisVideo {
|
|
|
472
565
|
}
|
|
473
566
|
export interface ImageGenerationDestinationConfig {
|
|
474
567
|
/**
|
|
475
|
-
* The Uniform Resource
|
|
568
|
+
* The Uniform Resource Identifier (URI) that identifies where the images will be delivered.
|
|
476
569
|
*/
|
|
477
570
|
Uri: DestinationUri;
|
|
478
571
|
/**
|
|
@@ -576,7 +669,30 @@ declare namespace KinesisVideo {
|
|
|
576
669
|
*/
|
|
577
670
|
Tags?: ResourceTags;
|
|
578
671
|
}
|
|
672
|
+
export interface LocalSizeConfig {
|
|
673
|
+
/**
|
|
674
|
+
* The overall maximum size of the media that you want to store for a stream on the Edge Agent.
|
|
675
|
+
*/
|
|
676
|
+
MaxLocalMediaSizeInMB?: MaxLocalMediaSizeInMB;
|
|
677
|
+
/**
|
|
678
|
+
* The strategy to perform when a stream’s MaxLocalMediaSizeInMB limit is reached.
|
|
679
|
+
*/
|
|
680
|
+
StrategyOnFullSize?: StrategyOnFullSize;
|
|
681
|
+
}
|
|
682
|
+
export type MaxLocalMediaSizeInMB = number;
|
|
683
|
+
export interface MediaSourceConfig {
|
|
684
|
+
/**
|
|
685
|
+
* The AWS Secrets Manager ARN for the username and password of the camera, or a local media file location.
|
|
686
|
+
*/
|
|
687
|
+
MediaUriSecretArn: MediaUriSecretArn;
|
|
688
|
+
/**
|
|
689
|
+
* The Uniform Resource Identifier (Uri) type. The FILE_URI value can be used to stream local media files.
|
|
690
|
+
*/
|
|
691
|
+
MediaUriType: MediaUriType;
|
|
692
|
+
}
|
|
579
693
|
export type MediaType = string;
|
|
694
|
+
export type MediaUriSecretArn = string;
|
|
695
|
+
export type MediaUriType = "RTSP_URI"|"FILE_URI"|string;
|
|
580
696
|
export type MessageTtlSeconds = number;
|
|
581
697
|
export type NextToken = string;
|
|
582
698
|
export interface NotificationConfiguration {
|
|
@@ -591,10 +707,20 @@ declare namespace KinesisVideo {
|
|
|
591
707
|
}
|
|
592
708
|
export interface NotificationDestinationConfig {
|
|
593
709
|
/**
|
|
594
|
-
* The Uniform Resource
|
|
710
|
+
* The Uniform Resource Identifier (URI) that identifies where the images will be delivered.
|
|
595
711
|
*/
|
|
596
712
|
Uri: DestinationUri;
|
|
597
713
|
}
|
|
714
|
+
export interface RecorderConfig {
|
|
715
|
+
/**
|
|
716
|
+
* The configuration details that consist of the credentials required (MediaUriSecretArn and MediaUriType) to access the media files streamed to the camera.
|
|
717
|
+
*/
|
|
718
|
+
MediaSourceConfig: MediaSourceConfig;
|
|
719
|
+
/**
|
|
720
|
+
* The configuration that consists of the ScheduleExpression and the DurationInMinutes details that specify the scheduling to record from a camera, or local media file, onto the Edge Agent. If the ScheduleExpression attribute is not provided, then the Edge Agent will always be set to recording mode.
|
|
721
|
+
*/
|
|
722
|
+
ScheduleConfig?: ScheduleConfig;
|
|
723
|
+
}
|
|
598
724
|
export type ResourceARN = string;
|
|
599
725
|
export type ResourceEndpoint = string;
|
|
600
726
|
export type ResourceEndpointList = ResourceEndpointListItem[];
|
|
@@ -610,6 +736,17 @@ declare namespace KinesisVideo {
|
|
|
610
736
|
}
|
|
611
737
|
export type ResourceTags = {[key: string]: TagValue};
|
|
612
738
|
export type SamplingInterval = number;
|
|
739
|
+
export interface ScheduleConfig {
|
|
740
|
+
/**
|
|
741
|
+
* The Quartz cron expression that takes care of scheduling jobs to record from the camera, or local media file, onto the Edge Agent. If the ScheduleExpression is not provided for the RecorderConfig, then the Edge Agent will always be set to recording mode. For more information about Quartz, refer to the Cron Trigger Tutorial page to understand the valid expressions and its use.
|
|
742
|
+
*/
|
|
743
|
+
ScheduleExpression: ScheduleExpression;
|
|
744
|
+
/**
|
|
745
|
+
* The total duration to record the media. If the ScheduleExpression attribute is provided, then the DurationInSeconds attribute should also be specified.
|
|
746
|
+
*/
|
|
747
|
+
DurationInSeconds: DurationInSeconds;
|
|
748
|
+
}
|
|
749
|
+
export type ScheduleExpression = string;
|
|
613
750
|
export interface SingleMasterChannelEndpointConfiguration {
|
|
614
751
|
/**
|
|
615
752
|
* This property is used to determine the nature of communication over this SINGLE_MASTER signaling channel. If WSS is specified, this API returns a websocket endpoint. If HTTPS is specified, this API returns an HTTPS endpoint.
|
|
@@ -626,7 +763,52 @@ declare namespace KinesisVideo {
|
|
|
626
763
|
*/
|
|
627
764
|
MessageTtlSeconds?: MessageTtlSeconds;
|
|
628
765
|
}
|
|
766
|
+
export interface StartEdgeConfigurationUpdateInput {
|
|
767
|
+
/**
|
|
768
|
+
* The name of the stream whose edge configuration you want to update. Specify either the StreamName or the StreamARN.
|
|
769
|
+
*/
|
|
770
|
+
StreamName?: StreamName;
|
|
771
|
+
/**
|
|
772
|
+
* The Amazon Resource Name (ARN) of the stream. Specify either the StreamName or the StreamARN.
|
|
773
|
+
*/
|
|
774
|
+
StreamARN?: ResourceARN;
|
|
775
|
+
/**
|
|
776
|
+
* The edge configuration details required to invoke the update process.
|
|
777
|
+
*/
|
|
778
|
+
EdgeConfig: EdgeConfig;
|
|
779
|
+
}
|
|
780
|
+
export interface StartEdgeConfigurationUpdateOutput {
|
|
781
|
+
/**
|
|
782
|
+
* The name of the stream from which the edge configuration was updated.
|
|
783
|
+
*/
|
|
784
|
+
StreamName?: StreamName;
|
|
785
|
+
/**
|
|
786
|
+
* The Amazon Resource Name (ARN) of the stream.
|
|
787
|
+
*/
|
|
788
|
+
StreamARN?: ResourceARN;
|
|
789
|
+
/**
|
|
790
|
+
* The timestamp at which a stream’s edge configuration was first created.
|
|
791
|
+
*/
|
|
792
|
+
CreationTime?: Timestamp;
|
|
793
|
+
/**
|
|
794
|
+
* The timestamp at which a stream’s edge configuration was last updated.
|
|
795
|
+
*/
|
|
796
|
+
LastUpdatedTime?: Timestamp;
|
|
797
|
+
/**
|
|
798
|
+
* The current sync status of the stream's edge configuration. When you invoke this API, the sync status will be set to the SYNCING state. Use the DescribeEdgeConfiguration API to get the latest status of the edge configuration.
|
|
799
|
+
*/
|
|
800
|
+
SyncStatus?: SyncStatus;
|
|
801
|
+
/**
|
|
802
|
+
* A description of the generated failure status.
|
|
803
|
+
*/
|
|
804
|
+
FailedStatusDetails?: FailedStatusDetails;
|
|
805
|
+
/**
|
|
806
|
+
* A description of the stream's edge configuration that will be used to sync with the Edge Agent IoT Greengrass component. The Edge Agent component will run on an IoT Hub Device setup at your premise.
|
|
807
|
+
*/
|
|
808
|
+
EdgeConfig?: EdgeConfig;
|
|
809
|
+
}
|
|
629
810
|
export type Status = "CREATING"|"ACTIVE"|"UPDATING"|"DELETING"|string;
|
|
811
|
+
export type StrategyOnFullSize = "DELETE_OLDEST_MEDIA"|"DENY_NEW_MEDIA"|string;
|
|
630
812
|
export interface StreamInfo {
|
|
631
813
|
/**
|
|
632
814
|
* The name of the device that is associated with the stream.
|
|
@@ -677,6 +859,7 @@ declare namespace KinesisVideo {
|
|
|
677
859
|
*/
|
|
678
860
|
ComparisonValue?: StreamName;
|
|
679
861
|
}
|
|
862
|
+
export type SyncStatus = "SYNCING"|"ACKNOWLEDGED"|"IN_SYNC"|"SYNC_FAILED"|"DELETING"|"DELETE_FAILED"|string;
|
|
680
863
|
export interface Tag {
|
|
681
864
|
/**
|
|
682
865
|
* The key of the tag that is associated with the specified signaling channel.
|
|
@@ -846,6 +1029,12 @@ declare namespace KinesisVideo {
|
|
|
846
1029
|
}
|
|
847
1030
|
export interface UpdateStreamOutput {
|
|
848
1031
|
}
|
|
1032
|
+
export interface UploaderConfig {
|
|
1033
|
+
/**
|
|
1034
|
+
* The configuration that consists of the ScheduleExpression and the DurationInMinutesdetails that specify the scheduling to record from a camera, or local media file, onto the Edge Agent. If the ScheduleExpression is not provided, then the Edge Agent will always be in recording mode.
|
|
1035
|
+
*/
|
|
1036
|
+
ScheduleConfig: ScheduleConfig;
|
|
1037
|
+
}
|
|
849
1038
|
export type Version = string;
|
|
850
1039
|
export type WidthPixels = number;
|
|
851
1040
|
/**
|
|
@@ -149,11 +149,11 @@ declare class LookoutVision extends Service {
|
|
|
149
149
|
*/
|
|
150
150
|
startModel(callback?: (err: AWSError, data: LookoutVision.Types.StartModelResponse) => void): Request<LookoutVision.Types.StartModelResponse, AWSError>;
|
|
151
151
|
/**
|
|
152
|
-
* Starts an Amazon Lookout for Vision model packaging job. A model packaging job creates an AWS IoT Greengrass component for a Lookout for Vision model. You can use the component to deploy your model to an edge device managed by Greengrass. Use the DescribeModelPackagingJob API to determine the current status of the job. The model packaging job is complete if the value of Status is SUCCEEDED. To deploy the component to the target device, use the component name and component version with the AWS IoT Greengrass CreateDeployment API. This operation requires the following permissions: lookoutvision:StartModelPackagingJob s3:PutObject s3:GetBucketLocation greengrass:CreateComponentVersion greengrass:DescribeComponent (Optional) greengrass:TagResource. Only required if you want to tag the component. For more information, see Using your Amazon Lookout for Vision model on an edge device in the Amazon Lookout for Vision Developer Guide.
|
|
152
|
+
* Starts an Amazon Lookout for Vision model packaging job. A model packaging job creates an AWS IoT Greengrass component for a Lookout for Vision model. You can use the component to deploy your model to an edge device managed by Greengrass. Use the DescribeModelPackagingJob API to determine the current status of the job. The model packaging job is complete if the value of Status is SUCCEEDED. To deploy the component to the target device, use the component name and component version with the AWS IoT Greengrass CreateDeployment API. This operation requires the following permissions: lookoutvision:StartModelPackagingJob s3:PutObject s3:GetBucketLocation kms:GenerateDataKey greengrass:CreateComponentVersion greengrass:DescribeComponent (Optional) greengrass:TagResource. Only required if you want to tag the component. For more information, see Using your Amazon Lookout for Vision model on an edge device in the Amazon Lookout for Vision Developer Guide.
|
|
153
153
|
*/
|
|
154
154
|
startModelPackagingJob(params: LookoutVision.Types.StartModelPackagingJobRequest, callback?: (err: AWSError, data: LookoutVision.Types.StartModelPackagingJobResponse) => void): Request<LookoutVision.Types.StartModelPackagingJobResponse, AWSError>;
|
|
155
155
|
/**
|
|
156
|
-
* Starts an Amazon Lookout for Vision model packaging job. A model packaging job creates an AWS IoT Greengrass component for a Lookout for Vision model. You can use the component to deploy your model to an edge device managed by Greengrass. Use the DescribeModelPackagingJob API to determine the current status of the job. The model packaging job is complete if the value of Status is SUCCEEDED. To deploy the component to the target device, use the component name and component version with the AWS IoT Greengrass CreateDeployment API. This operation requires the following permissions: lookoutvision:StartModelPackagingJob s3:PutObject s3:GetBucketLocation greengrass:CreateComponentVersion greengrass:DescribeComponent (Optional) greengrass:TagResource. Only required if you want to tag the component. For more information, see Using your Amazon Lookout for Vision model on an edge device in the Amazon Lookout for Vision Developer Guide.
|
|
156
|
+
* Starts an Amazon Lookout for Vision model packaging job. A model packaging job creates an AWS IoT Greengrass component for a Lookout for Vision model. You can use the component to deploy your model to an edge device managed by Greengrass. Use the DescribeModelPackagingJob API to determine the current status of the job. The model packaging job is complete if the value of Status is SUCCEEDED. To deploy the component to the target device, use the component name and component version with the AWS IoT Greengrass CreateDeployment API. This operation requires the following permissions: lookoutvision:StartModelPackagingJob s3:PutObject s3:GetBucketLocation kms:GenerateDataKey greengrass:CreateComponentVersion greengrass:DescribeComponent (Optional) greengrass:TagResource. Only required if you want to tag the component. For more information, see Using your Amazon Lookout for Vision model on an edge device in the Amazon Lookout for Vision Developer Guide.
|
|
157
157
|
*/
|
|
158
158
|
startModelPackagingJob(callback?: (err: AWSError, data: LookoutVision.Types.StartModelPackagingJobResponse) => void): Request<LookoutVision.Types.StartModelPackagingJobResponse, AWSError>;
|
|
159
159
|
/**
|
|
@@ -28,11 +28,11 @@ declare class MigrationHubRefactorSpaces extends Service {
|
|
|
28
28
|
*/
|
|
29
29
|
createEnvironment(callback?: (err: AWSError, data: MigrationHubRefactorSpaces.Types.CreateEnvironmentResponse) => void): Request<MigrationHubRefactorSpaces.Types.CreateEnvironmentResponse, AWSError>;
|
|
30
30
|
/**
|
|
31
|
-
* Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the environment owner, regardless of which account creates the route. Routes target a service in the application. If an application does not have any routes, then the first route must be created as a DEFAULT RouteType. When created, the default route defaults to an active state so state is not a required input. However, like all other state values the state of the default route can be updated after creation, but only when all other routes are also inactive. Conversely, no route can be active without the default route also being active. When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic to the target service as follows: If the service has a URL endpoint, and the endpoint resolves to a private IP address, Refactor Spaces routes traffic using the API Gateway VPC link. If the service has a URL endpoint, and the endpoint resolves to a public IP address, Refactor Spaces routes traffic over the public internet. If the service has an Lambda function endpoint, then Refactor Spaces configures the Lambda function's resource policy to allow the application's API Gateway to invoke the function. A one-time health check is performed on the service when either the route is updated from inactive to active, or when it is created with an active state. If the health check fails, the route transitions the route state to FAILED, an error code of SERVICE_ENDPOINT_HEALTH_CHECK_FAILURE is provided, and no traffic is sent to the service. For Lambda functions, the Lambda function state is checked. If the function is not active, the function configuration is updated so that Lambda resources are provisioned. If the Lambda state is Failed, then the route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide. For Lambda endpoints, a check is performed to determine that a Lambda function with the specified ARN exists. If it does not exist, the health check fails. For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, the health check fails. For private URLS, a target group is created on the Elastic Load Balancing and the target group health check is run. The HealthCheckProtocol, HealthCheckPort, and HealthCheckPath are the same protocol, port, and path specified in the URL or health URL, if used. All other settings use the default values, as described in Health checks for your target groups. The health check is considered successful if at least one target within the target group transitions to a healthy state. Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed certificates are supported. Private Certificate Authorities (CAs) are permitted only if the CA's domain is also publicly resolvable.
|
|
31
|
+
* Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the environment owner, regardless of which account creates the route. Routes target a service in the application. If an application does not have any routes, then the first route must be created as a DEFAULT RouteType. When created, the default route defaults to an active state so state is not a required input. However, like all other state values the state of the default route can be updated after creation, but only when all other routes are also inactive. Conversely, no route can be active without the default route also being active. When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic to the target service as follows: If the service has a URL endpoint, and the endpoint resolves to a private IP address, Refactor Spaces routes traffic using the API Gateway VPC link. If the service has a URL endpoint, and the endpoint resolves to a public IP address, Refactor Spaces routes traffic over the public internet. If the service has an Lambda function endpoint, then Refactor Spaces configures the Lambda function's resource policy to allow the application's API Gateway to invoke the function. A one-time health check is performed on the service when either the route is updated from inactive to active, or when it is created with an active state. If the health check fails, the route transitions the route state to FAILED, an error code of SERVICE_ENDPOINT_HEALTH_CHECK_FAILURE is provided, and no traffic is sent to the service. For Lambda functions, the Lambda function state is checked. If the function is not active, the function configuration is updated so that Lambda resources are provisioned. If the Lambda state is Failed, then the route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide. For Lambda endpoints, a check is performed to determine that a Lambda function with the specified ARN exists. If it does not exist, the health check fails. For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, the health check fails. Refactor Spaces automatically resolves the public Domain Name System (DNS) names that are set in CreateServiceRequest$UrlEndpoint when you create a service. The DNS names resolve when the DNS time-to-live (TTL) expires, or every 60 seconds for TTLs less than 60 seconds. This periodic DNS resolution ensures that the route configuration remains up-to-date. For private URLS, a target group is created on the Elastic Load Balancing and the target group health check is run. The HealthCheckProtocol, HealthCheckPort, and HealthCheckPath are the same protocol, port, and path specified in the URL or health URL, if used. All other settings use the default values, as described in Health checks for your target groups. The health check is considered successful if at least one target within the target group transitions to a healthy state. Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed certificates are supported. Private Certificate Authorities (CAs) are permitted only if the CA's domain is also publicly resolvable.
|
|
32
32
|
*/
|
|
33
33
|
createRoute(params: MigrationHubRefactorSpaces.Types.CreateRouteRequest, callback?: (err: AWSError, data: MigrationHubRefactorSpaces.Types.CreateRouteResponse) => void): Request<MigrationHubRefactorSpaces.Types.CreateRouteResponse, AWSError>;
|
|
34
34
|
/**
|
|
35
|
-
* Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the environment owner, regardless of which account creates the route. Routes target a service in the application. If an application does not have any routes, then the first route must be created as a DEFAULT RouteType. When created, the default route defaults to an active state so state is not a required input. However, like all other state values the state of the default route can be updated after creation, but only when all other routes are also inactive. Conversely, no route can be active without the default route also being active. When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic to the target service as follows: If the service has a URL endpoint, and the endpoint resolves to a private IP address, Refactor Spaces routes traffic using the API Gateway VPC link. If the service has a URL endpoint, and the endpoint resolves to a public IP address, Refactor Spaces routes traffic over the public internet. If the service has an Lambda function endpoint, then Refactor Spaces configures the Lambda function's resource policy to allow the application's API Gateway to invoke the function. A one-time health check is performed on the service when either the route is updated from inactive to active, or when it is created with an active state. If the health check fails, the route transitions the route state to FAILED, an error code of SERVICE_ENDPOINT_HEALTH_CHECK_FAILURE is provided, and no traffic is sent to the service. For Lambda functions, the Lambda function state is checked. If the function is not active, the function configuration is updated so that Lambda resources are provisioned. If the Lambda state is Failed, then the route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide. For Lambda endpoints, a check is performed to determine that a Lambda function with the specified ARN exists. If it does not exist, the health check fails. For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, the health check fails. For private URLS, a target group is created on the Elastic Load Balancing and the target group health check is run. The HealthCheckProtocol, HealthCheckPort, and HealthCheckPath are the same protocol, port, and path specified in the URL or health URL, if used. All other settings use the default values, as described in Health checks for your target groups. The health check is considered successful if at least one target within the target group transitions to a healthy state. Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed certificates are supported. Private Certificate Authorities (CAs) are permitted only if the CA's domain is also publicly resolvable.
|
|
35
|
+
* Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the environment owner, regardless of which account creates the route. Routes target a service in the application. If an application does not have any routes, then the first route must be created as a DEFAULT RouteType. When created, the default route defaults to an active state so state is not a required input. However, like all other state values the state of the default route can be updated after creation, but only when all other routes are also inactive. Conversely, no route can be active without the default route also being active. When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic to the target service as follows: If the service has a URL endpoint, and the endpoint resolves to a private IP address, Refactor Spaces routes traffic using the API Gateway VPC link. If the service has a URL endpoint, and the endpoint resolves to a public IP address, Refactor Spaces routes traffic over the public internet. If the service has an Lambda function endpoint, then Refactor Spaces configures the Lambda function's resource policy to allow the application's API Gateway to invoke the function. A one-time health check is performed on the service when either the route is updated from inactive to active, or when it is created with an active state. If the health check fails, the route transitions the route state to FAILED, an error code of SERVICE_ENDPOINT_HEALTH_CHECK_FAILURE is provided, and no traffic is sent to the service. For Lambda functions, the Lambda function state is checked. If the function is not active, the function configuration is updated so that Lambda resources are provisioned. If the Lambda state is Failed, then the route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide. For Lambda endpoints, a check is performed to determine that a Lambda function with the specified ARN exists. If it does not exist, the health check fails. For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, the health check fails. Refactor Spaces automatically resolves the public Domain Name System (DNS) names that are set in CreateServiceRequest$UrlEndpoint when you create a service. The DNS names resolve when the DNS time-to-live (TTL) expires, or every 60 seconds for TTLs less than 60 seconds. This periodic DNS resolution ensures that the route configuration remains up-to-date. For private URLS, a target group is created on the Elastic Load Balancing and the target group health check is run. The HealthCheckProtocol, HealthCheckPort, and HealthCheckPath are the same protocol, port, and path specified in the URL or health URL, if used. All other settings use the default values, as described in Health checks for your target groups. The health check is considered successful if at least one target within the target group transitions to a healthy state. Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed certificates are supported. Private Certificate Authorities (CAs) are permitted only if the CA's domain is also publicly resolvable.
|
|
36
36
|
*/
|
|
37
37
|
createRoute(callback?: (err: AWSError, data: MigrationHubRefactorSpaces.Types.CreateRouteResponse) => void): Request<MigrationHubRefactorSpaces.Types.CreateRouteResponse, AWSError>;
|
|
38
38
|
/**
|
|
@@ -613,7 +613,7 @@ declare namespace MigrationHubRefactorSpaces {
|
|
|
613
613
|
*/
|
|
614
614
|
Tags?: TagMap;
|
|
615
615
|
/**
|
|
616
|
-
* The configuration for the URL endpoint type.
|
|
616
|
+
* The configuration for the URL endpoint type. When creating a route to a service, Refactor Spaces automatically resolves the address in the UrlEndpointInput object URL when the Domain Name System (DNS) time-to-live (TTL) expires, or every 60 seconds for TTLs less than 60 seconds.
|
|
617
617
|
*/
|
|
618
618
|
UrlEndpoint?: UrlEndpointInput;
|
|
619
619
|
/**
|
|
@@ -1280,7 +1280,7 @@ declare namespace MigrationHubRefactorSpaces {
|
|
|
1280
1280
|
}
|
|
1281
1281
|
export interface LambdaEndpointInput {
|
|
1282
1282
|
/**
|
|
1283
|
-
* The Amazon Resource Name (ARN) of the Lambda
|
|
1283
|
+
* The Amazon Resource Name (ARN) of the Lambda function or alias.
|
|
1284
1284
|
*/
|
|
1285
1285
|
Arn: LambdaArn;
|
|
1286
1286
|
}
|
|
@@ -1500,6 +1500,7 @@ declare namespace RDS {
|
|
|
1500
1500
|
*/
|
|
1501
1501
|
CharacterSetDescription?: String;
|
|
1502
1502
|
}
|
|
1503
|
+
export type ClientPasswordAuthType = "MYSQL_NATIVE_PASSWORD"|"POSTGRES_SCRAM_SHA_256"|"POSTGRES_MD5"|"SQL_SERVER_AUTHENTICATION"|string;
|
|
1503
1504
|
export interface CloudwatchLogsExportConfiguration {
|
|
1504
1505
|
/**
|
|
1505
1506
|
* The list of log types to enable.
|
|
@@ -1666,7 +1667,7 @@ declare namespace RDS {
|
|
|
1666
1667
|
KmsKeyId?: String;
|
|
1667
1668
|
Tags?: TagList;
|
|
1668
1669
|
/**
|
|
1669
|
-
* A value that indicates whether to copy all tags from the source DB snapshot to the target DB snapshot. By default, tags
|
|
1670
|
+
* A value that indicates whether to copy all tags from the source DB snapshot to the target DB snapshot. By default, tags aren't copied.
|
|
1670
1671
|
*/
|
|
1671
1672
|
CopyTags?: BooleanOptional;
|
|
1672
1673
|
/**
|
|
@@ -1681,6 +1682,10 @@ declare namespace RDS {
|
|
|
1681
1682
|
* The external custom Availability Zone (CAZ) identifier for the target CAZ. Example: rds-caz-aiqhTgQv.
|
|
1682
1683
|
*/
|
|
1683
1684
|
TargetCustomAvailabilityZone?: String;
|
|
1685
|
+
/**
|
|
1686
|
+
* A value that indicates whether to copy the DB option group associated with the source DB snapshot to the target Amazon Web Services account and associate with the target DB snapshot. The associated option group can be copied only with cross-account snapshot copy calls.
|
|
1687
|
+
*/
|
|
1688
|
+
CopyOptionGroup?: BooleanOptional;
|
|
1684
1689
|
/**
|
|
1685
1690
|
* The ID of the region that contains the snapshot to be copied.
|
|
1686
1691
|
*/
|
|
@@ -6749,9 +6754,21 @@ declare namespace RDS {
|
|
|
6749
6754
|
*/
|
|
6750
6755
|
VpcId?: String;
|
|
6751
6756
|
/**
|
|
6752
|
-
*
|
|
6757
|
+
* Specifies the Amazon Resource Name (ARN) for the option group.
|
|
6753
6758
|
*/
|
|
6754
6759
|
OptionGroupArn?: String;
|
|
6760
|
+
/**
|
|
6761
|
+
* Specifies the name of the option group from which this option group is copied.
|
|
6762
|
+
*/
|
|
6763
|
+
SourceOptionGroup?: String;
|
|
6764
|
+
/**
|
|
6765
|
+
* Specifies the Amazon Web Services account ID for the option group from which this option group is copied.
|
|
6766
|
+
*/
|
|
6767
|
+
SourceAccountId?: String;
|
|
6768
|
+
/**
|
|
6769
|
+
* Indicates when the option group was copied.
|
|
6770
|
+
*/
|
|
6771
|
+
CopyTimestamp?: TStamp;
|
|
6755
6772
|
}
|
|
6756
6773
|
export interface OptionGroupMembership {
|
|
6757
6774
|
/**
|
|
@@ -6829,6 +6846,10 @@ declare namespace RDS {
|
|
|
6829
6846
|
* The versions that are available for the option.
|
|
6830
6847
|
*/
|
|
6831
6848
|
OptionGroupOptionVersions?: OptionGroupOptionVersionsList;
|
|
6849
|
+
/**
|
|
6850
|
+
* Specifies whether the option can be copied across Amazon Web Services accounts.
|
|
6851
|
+
*/
|
|
6852
|
+
CopyableCrossAccount?: BooleanOptional;
|
|
6832
6853
|
}
|
|
6833
6854
|
export interface OptionGroupOptionSetting {
|
|
6834
6855
|
/**
|
|
@@ -8942,6 +8963,10 @@ declare namespace RDS {
|
|
|
8942
8963
|
* Whether to require or disallow Amazon Web Services Identity and Access Management (IAM) authentication for connections to the proxy. The ENABLED value is valid only for proxies with RDS for Microsoft SQL Server.
|
|
8943
8964
|
*/
|
|
8944
8965
|
IAMAuth?: IAMAuthMode;
|
|
8966
|
+
/**
|
|
8967
|
+
* The type of authentication the proxy uses for connections from clients.
|
|
8968
|
+
*/
|
|
8969
|
+
ClientPasswordAuthType?: ClientPasswordAuthType;
|
|
8945
8970
|
}
|
|
8946
8971
|
export interface UserAuthConfigInfo {
|
|
8947
8972
|
/**
|
|
@@ -8964,6 +8989,10 @@ declare namespace RDS {
|
|
|
8964
8989
|
* Whether to require or disallow Amazon Web Services Identity and Access Management (IAM) authentication for connections to the proxy. The ENABLED value is valid only for proxies with RDS for Microsoft SQL Server.
|
|
8965
8990
|
*/
|
|
8966
8991
|
IAMAuth?: IAMAuthMode;
|
|
8992
|
+
/**
|
|
8993
|
+
* The type of authentication the proxy uses for connections from clients.
|
|
8994
|
+
*/
|
|
8995
|
+
ClientPasswordAuthType?: ClientPasswordAuthType;
|
|
8967
8996
|
}
|
|
8968
8997
|
export type UserAuthConfigInfoList = UserAuthConfigInfo[];
|
|
8969
8998
|
export type UserAuthConfigList = UserAuthConfig[];
|
|
@@ -181,11 +181,11 @@ declare class Rekognition extends Service {
|
|
|
181
181
|
*/
|
|
182
182
|
detectFaces(callback?: (err: AWSError, data: Rekognition.Types.DetectFacesResponse) => void): Request<Rekognition.Types.DetectFacesResponse, AWSError>;
|
|
183
183
|
/**
|
|
184
|
-
* Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information regarding labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to.
|
|
184
|
+
* Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information regarding labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation. That is, the operation does not persist any data. This operation requires permissions to perform the rekognition:DetectLabels action.
|
|
185
185
|
*/
|
|
186
186
|
detectLabels(params: Rekognition.Types.DetectLabelsRequest, callback?: (err: AWSError, data: Rekognition.Types.DetectLabelsResponse) => void): Request<Rekognition.Types.DetectLabelsResponse, AWSError>;
|
|
187
187
|
/**
|
|
188
|
-
* Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information regarding labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to.
|
|
188
|
+
* Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information regarding labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation. That is, the operation does not persist any data. This operation requires permissions to perform the rekognition:DetectLabels action.
|
|
189
189
|
*/
|
|
190
190
|
detectLabels(callback?: (err: AWSError, data: Rekognition.Types.DetectLabelsResponse) => void): Request<Rekognition.Types.DetectLabelsResponse, AWSError>;
|
|
191
191
|
/**
|
|
@@ -261,11 +261,11 @@ declare class Rekognition extends Service {
|
|
|
261
261
|
*/
|
|
262
262
|
getFaceSearch(callback?: (err: AWSError, data: Rekognition.Types.GetFaceSearchResponse) => void): Request<Rekognition.Types.GetFaceSearchResponse, AWSError>;
|
|
263
263
|
/**
|
|
264
|
-
* Gets the label detection results of a Amazon Rekognition Video analysis started by StartLabelDetection. The label detection operation is started by a call to StartLabelDetection which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection.
|
|
264
|
+
* Gets the label detection results of a Amazon Rekognition Video analysis started by StartLabelDetection. The label detection operation is started by a call to StartLabelDetection which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection. GetLabelDetection returns an array of detected labels (Labels) sorted by the time the labels were detected. You can also sort by the label name by specifying NAME for the SortBy input parameter. If there is no NAME specified, the default sort is by timestamp. You can select how results are aggregated by using the AggregateBy input parameter. The default aggregation method is TIMESTAMPS. You can also aggregate by SEGMENTS, which aggregates all instances of labels detected in a given segment. The returned Labels array may include the following attributes: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. GetLabelDetection returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. Timestamp - Time, in milliseconds from the start of the video, that the label was detected. For aggregation by SEGMENTS, the StartTimestampMillis, EndTimestampMillis, and DurationMillis structures are what define a segment. Although the “Timestamp” structure is still returned with each label, its value is set to be the same as StartTimestampMillis. Timestamp and Bounding box information are returned for detected Instances, only if aggregation is done by TIMESTAMPS. If aggregating by SEGMENTS, information about detected instances isn’t returned. The version of the label model used for the detection is also returned. Note DominantColors isn't returned for Instances, although it is shown as part of the response in the sample seen below. Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request parameter with the token value returned from the previous call to GetLabelDetection.
|
|
265
265
|
*/
|
|
266
266
|
getLabelDetection(params: Rekognition.Types.GetLabelDetectionRequest, callback?: (err: AWSError, data: Rekognition.Types.GetLabelDetectionResponse) => void): Request<Rekognition.Types.GetLabelDetectionResponse, AWSError>;
|
|
267
267
|
/**
|
|
268
|
-
* Gets the label detection results of a Amazon Rekognition Video analysis started by StartLabelDetection. The label detection operation is started by a call to StartLabelDetection which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection.
|
|
268
|
+
* Gets the label detection results of a Amazon Rekognition Video analysis started by StartLabelDetection. The label detection operation is started by a call to StartLabelDetection which returns a job identifier (JobId). When the label detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartlabelDetection. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection. GetLabelDetection returns an array of detected labels (Labels) sorted by the time the labels were detected. You can also sort by the label name by specifying NAME for the SortBy input parameter. If there is no NAME specified, the default sort is by timestamp. You can select how results are aggregated by using the AggregateBy input parameter. The default aggregation method is TIMESTAMPS. You can also aggregate by SEGMENTS, which aggregates all instances of labels detected in a given segment. The returned Labels array may include the following attributes: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. GetLabelDetection returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. Timestamp - Time, in milliseconds from the start of the video, that the label was detected. For aggregation by SEGMENTS, the StartTimestampMillis, EndTimestampMillis, and DurationMillis structures are what define a segment. Although the “Timestamp” structure is still returned with each label, its value is set to be the same as StartTimestampMillis. Timestamp and Bounding box information are returned for detected Instances, only if aggregation is done by TIMESTAMPS. If aggregating by SEGMENTS, information about detected instances isn’t returned. The version of the label model used for the detection is also returned. Note DominantColors isn't returned for Instances, although it is shown as part of the response in the sample seen below. Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request parameter with the token value returned from the previous call to GetLabelDetection.
|
|
269
269
|
*/
|
|
270
270
|
getLabelDetection(callback?: (err: AWSError, data: Rekognition.Types.GetLabelDetectionResponse) => void): Request<Rekognition.Types.GetLabelDetectionResponse, AWSError>;
|
|
271
271
|
/**
|
|
@@ -421,11 +421,11 @@ declare class Rekognition extends Service {
|
|
|
421
421
|
*/
|
|
422
422
|
startFaceSearch(callback?: (err: AWSError, data: Rekognition.Types.StartFaceSearchResponse) => void): Request<Rekognition.Types.StartFaceSearchResponse, AWSError>;
|
|
423
423
|
/**
|
|
424
|
-
* Starts asynchronous detection of labels in a stored video. Amazon Rekognition Video can detect labels in a video. Labels are instances of real-world entities. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; concepts like landscape, evening, and nature; and activities like a person getting out of a car or a person skiing. The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartLabelDetection returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection.
|
|
424
|
+
* Starts asynchronous detection of labels in a stored video. Amazon Rekognition Video can detect labels in a video. Labels are instances of real-world entities. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; concepts like landscape, evening, and nature; and activities like a person getting out of a car or a person skiing. The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartLabelDetection returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection. Optional Parameters StartLabelDetection has the GENERAL_LABELS Feature applied by default. This feature allows you to provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering, see Detecting labels in a video. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 50.
|
|
425
425
|
*/
|
|
426
426
|
startLabelDetection(params: Rekognition.Types.StartLabelDetectionRequest, callback?: (err: AWSError, data: Rekognition.Types.StartLabelDetectionResponse) => void): Request<Rekognition.Types.StartLabelDetectionResponse, AWSError>;
|
|
427
427
|
/**
|
|
428
|
-
* Starts asynchronous detection of labels in a stored video. Amazon Rekognition Video can detect labels in a video. Labels are instances of real-world entities. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; concepts like landscape, evening, and nature; and activities like a person getting out of a car or a person skiing. The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartLabelDetection returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection.
|
|
428
|
+
* Starts asynchronous detection of labels in a stored video. Amazon Rekognition Video can detect labels in a video. Labels are instances of real-world entities. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; concepts like landscape, evening, and nature; and activities like a person getting out of a car or a person skiing. The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartLabelDetection returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the label detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLabelDetection and pass the job identifier (JobId) from the initial call to StartLabelDetection. Optional Parameters StartLabelDetection has the GENERAL_LABELS Feature applied by default. This feature allows you to provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering, see Detecting labels in a video. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 50.
|
|
429
429
|
*/
|
|
430
430
|
startLabelDetection(callback?: (err: AWSError, data: Rekognition.Types.StartLabelDetectionResponse) => void): Request<Rekognition.Types.StartLabelDetectionResponse, AWSError>;
|
|
431
431
|
/**
|
|
@@ -2088,6 +2088,10 @@ declare namespace Rekognition {
|
|
|
2088
2088
|
* Sort to use for elements in the Labels array. Use TIMESTAMP to sort array elements by the time labels are detected. Use NAME to alphabetically group elements for a label together. Within each label group, the array element are sorted by detection confidence. The default sort is by TIMESTAMP.
|
|
2089
2089
|
*/
|
|
2090
2090
|
SortBy?: LabelDetectionSortBy;
|
|
2091
|
+
/**
|
|
2092
|
+
* Defines how to aggregate the returned results. Results can be aggregated by timestamps or segments.
|
|
2093
|
+
*/
|
|
2094
|
+
AggregateBy?: LabelDetectionAggregateBy;
|
|
2091
2095
|
}
|
|
2092
2096
|
export interface GetLabelDetectionResponse {
|
|
2093
2097
|
/**
|
|
@@ -2448,6 +2452,24 @@ declare namespace Rekognition {
|
|
|
2448
2452
|
* Details about the detected label.
|
|
2449
2453
|
*/
|
|
2450
2454
|
Label?: Label;
|
|
2455
|
+
/**
|
|
2456
|
+
* The time in milliseconds defining the start of the timeline segment containing a continuously detected label.
|
|
2457
|
+
*/
|
|
2458
|
+
StartTimestampMillis?: ULong;
|
|
2459
|
+
/**
|
|
2460
|
+
* The time in milliseconds defining the end of the timeline segment containing a continuously detected label.
|
|
2461
|
+
*/
|
|
2462
|
+
EndTimestampMillis?: ULong;
|
|
2463
|
+
/**
|
|
2464
|
+
* The time duration of a segment in milliseconds, I.e. time elapsed from StartTimestampMillis to EndTimestampMillis.
|
|
2465
|
+
*/
|
|
2466
|
+
DurationMillis?: ULong;
|
|
2467
|
+
}
|
|
2468
|
+
export type LabelDetectionAggregateBy = "TIMESTAMPS"|"SEGMENTS"|string;
|
|
2469
|
+
export type LabelDetectionFeatureList = LabelDetectionFeatureName[];
|
|
2470
|
+
export type LabelDetectionFeatureName = "GENERAL_LABELS"|string;
|
|
2471
|
+
export interface LabelDetectionSettings {
|
|
2472
|
+
GeneralLabels?: GeneralLabelsSettings;
|
|
2451
2473
|
}
|
|
2452
2474
|
export type LabelDetectionSortBy = "NAME"|"TIMESTAMP"|string;
|
|
2453
2475
|
export type LabelDetections = LabelDetection[];
|
|
@@ -3332,7 +3354,7 @@ declare namespace Rekognition {
|
|
|
3332
3354
|
*/
|
|
3333
3355
|
ClientRequestToken?: ClientRequestToken;
|
|
3334
3356
|
/**
|
|
3335
|
-
* Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected label. Confidence represents how certain Amazon Rekognition is that a label is correctly identified.0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any labels with a confidence level lower than this specified value. If you don't specify MinConfidence, the operation returns labels with confidence values greater than or equal to 50 percent.
|
|
3357
|
+
* Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected label. Confidence represents how certain Amazon Rekognition is that a label is correctly identified.0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any labels with a confidence level lower than this specified value. If you don't specify MinConfidence, the operation returns labels and bounding boxes (if detected) with confidence values greater than or equal to 50 percent.
|
|
3336
3358
|
*/
|
|
3337
3359
|
MinConfidence?: Percent;
|
|
3338
3360
|
/**
|
|
@@ -3343,6 +3365,14 @@ declare namespace Rekognition {
|
|
|
3343
3365
|
* An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.
|
|
3344
3366
|
*/
|
|
3345
3367
|
JobTag?: JobTag;
|
|
3368
|
+
/**
|
|
3369
|
+
* The features to return after video analysis. You can specify that GENERAL_LABELS are returned.
|
|
3370
|
+
*/
|
|
3371
|
+
Features?: LabelDetectionFeatureList;
|
|
3372
|
+
/**
|
|
3373
|
+
* The settings for a StartLabelDetection request.Contains the specified parameters for the label detection request of an asynchronous label analysis operation. Settings can include filters for GENERAL_LABELS.
|
|
3374
|
+
*/
|
|
3375
|
+
Settings?: LabelDetectionSettings;
|
|
3346
3376
|
}
|
|
3347
3377
|
export interface StartLabelDetectionResponse {
|
|
3348
3378
|
/**
|