@awboost/cfn-resource-types 0.1.321 → 0.1.322

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -83,6 +83,13 @@ export type AudioExtractionCategory = {
83
83
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-audioextractioncategorytype.html}
84
84
  */
85
85
  export type AudioExtractionCategoryType = "AUDIO_CONTENT_MODERATION" | "TRANSCRIPT" | "TOPIC_CONTENT_MODERATION";
86
+ /**
87
+ * Type definition for `AWS::Bedrock::DataAutomationProject.AudioOverrideConfiguration`.
88
+ * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-audiooverrideconfiguration.html}
89
+ */
90
+ export type AudioOverrideConfiguration = {
91
+ ModalityProcessing?: ModalityProcessingConfiguration;
92
+ };
86
93
  /**
87
94
  * Type definition for `AWS::Bedrock::DataAutomationProject.AudioStandardExtraction`.
88
95
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-audiostandardextraction.html}
@@ -159,6 +166,11 @@ export type DataAutomationProjectStage = "DEVELOPMENT" | "LIVE";
159
166
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-dataautomationprojectstatus.html}
160
167
  */
161
168
  export type DataAutomationProjectStatus = "COMPLETED" | "IN_PROGRESS" | "FAILED";
169
+ /**
170
+ * Type definition for `AWS::Bedrock::DataAutomationProject.DesiredModality`.
171
+ * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-desiredmodality.html}
172
+ */
173
+ export type DesiredModality = "DOCUMENT" | "IMAGE" | "VIDEO" | "AUDIO";
162
174
  /**
163
175
  * Type definition for `AWS::Bedrock::DataAutomationProject.DocumentBoundingBox`.
164
176
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-documentboundingbox.html}
@@ -210,6 +222,7 @@ export type DocumentOutputTextFormatType = "PLAIN_TEXT" | "MARKDOWN" | "HTML" |
210
222
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-documentoverrideconfiguration.html}
211
223
  */
212
224
  export type DocumentOverrideConfiguration = {
225
+ ModalityProcessing?: ModalityProcessingConfiguration;
213
226
  Splitter?: SplitterConfiguration;
214
227
  };
215
228
  /**
@@ -256,6 +269,13 @@ export type ImageExtractionCategory = {
256
269
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-imageextractioncategorytype.html}
257
270
  */
258
271
  export type ImageExtractionCategoryType = "CONTENT_MODERATION" | "TEXT_DETECTION" | "LOGOS";
272
+ /**
273
+ * Type definition for `AWS::Bedrock::DataAutomationProject.ImageOverrideConfiguration`.
274
+ * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-imageoverrideconfiguration.html}
275
+ */
276
+ export type ImageOverrideConfiguration = {
277
+ ModalityProcessing?: ModalityProcessingConfiguration;
278
+ };
259
279
  /**
260
280
  * Type definition for `AWS::Bedrock::DataAutomationProject.ImageStandardExtraction`.
261
281
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-imagestandardextraction.html}
@@ -285,13 +305,38 @@ export type ImageStandardOutputConfiguration = {
285
305
  Extraction?: ImageStandardExtraction;
286
306
  GenerativeField?: ImageStandardGenerativeField;
287
307
  };
308
+ /**
309
+ * Type definition for `AWS::Bedrock::DataAutomationProject.ModalityProcessingConfiguration`.
310
+ * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-modalityprocessingconfiguration.html}
311
+ */
312
+ export type ModalityProcessingConfiguration = {
313
+ State?: State;
314
+ };
315
+ /**
316
+ * Type definition for `AWS::Bedrock::DataAutomationProject.ModalityRoutingConfiguration`.
317
+ * Modality routing configuration
318
+ * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-modalityroutingconfiguration.html}
319
+ */
320
+ export type ModalityRoutingConfiguration = {
321
+ jpeg?: DesiredModality;
322
+ mov?: DesiredModality;
323
+ mp4?: DesiredModality;
324
+ png?: DesiredModality;
325
+ };
288
326
  /**
289
327
  * Type definition for `AWS::Bedrock::DataAutomationProject.OverrideConfiguration`.
290
328
  * Override configuration
291
329
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-overrideconfiguration.html}
292
330
  */
293
331
  export type OverrideConfiguration = {
332
+ Audio?: AudioOverrideConfiguration;
294
333
  Document?: DocumentOverrideConfiguration;
334
+ Image?: ImageOverrideConfiguration;
335
+ /**
336
+ * Modality routing configuration
337
+ */
338
+ ModalityRouting?: ModalityRoutingConfiguration;
339
+ Video?: VideoOverrideConfiguration;
295
340
  };
296
341
  /**
297
342
  * Type definition for `AWS::Bedrock::DataAutomationProject.SplitterConfiguration`.
@@ -357,6 +402,13 @@ export type VideoExtractionCategory = {
357
402
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-videoextractioncategorytype.html}
358
403
  */
359
404
  export type VideoExtractionCategoryType = "CONTENT_MODERATION" | "TEXT_DETECTION" | "TRANSCRIPT" | "LOGOS";
405
+ /**
406
+ * Type definition for `AWS::Bedrock::DataAutomationProject.VideoOverrideConfiguration`.
407
+ * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-videooverrideconfiguration.html}
408
+ */
409
+ export type VideoOverrideConfiguration = {
410
+ ModalityProcessing?: ModalityProcessingConfiguration;
411
+ };
360
412
  /**
361
413
  * Type definition for `AWS::Bedrock::DataAutomationProject.VideoStandardExtraction`.
362
414
  * @see {@link https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-dataautomationproject-videostandardextraction.html}
@@ -93,8 +93,8 @@ export type ECSServiceProperties = {
93
93
  /**
94
94
  * The scheduling strategy to use for the service. For more information, see [Services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html).
95
95
  There are two service scheduler strategies available:
96
- + ``REPLICA``-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. This scheduler strategy is required if the service uses the ``CODE_DEPLOY`` or ``EXTERNAL`` deployment controller types.
97
- + ``DAEMON``-The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks and will stop tasks that don't meet the placement constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies.
96
+ + ``REPLICA``-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. This scheduler strategy is required if the service uses the ``CODE_DEPLOY`` or ``EXTERNAL`` deployment controller types.
97
+ + ``DAEMON``-The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks and will stop tasks that don't meet the placement constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies.
98
98
  Tasks using the Fargate launch type or the ``CODE_DEPLOY`` or ``EXTERNAL`` deployment controller types don't support the ``DAEMON`` scheduling strategy.
99
99
  */
100
100
  SchedulingStrategy?: "DAEMON" | "REPLICA";
@@ -371,7 +371,9 @@ export type LogConfiguration = {
371
371
  /**
372
372
  * The configuration options to send to the log driver.
373
373
  The options you can specify depend on the log driver. Some of the options you can specify when you use the ``awslogs`` log driver to route logs to Amazon CloudWatch include the following:
374
- + awslogs-create-group Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false. Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group. + awslogs-region Required: Yes Specify the Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. + awslogs-group Required: Yes Make sure to specify a log group that the awslogs log driver sends its log streams to. + awslogs-stream-prefix Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type. Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id. If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. + awslogs-datetime-format Required: No This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see awslogs-datetime-format. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. + awslogs-multiline-pattern Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see awslogs-multiline-pattern. This option is ignored if awslogs-datetime-format is also configured. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. + mode Required: No Valid values: non-blocking | blocking This option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted. If you use the blocking mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver. + max-buffer-size Required: No Default value: 1m When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.
374
+ + awslogs-create-group Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false. Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group. + awslogs-region Required: Yes Specify the Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. + awslogs-group Required: Yes Make sure to specify a log group that the awslogs log driver sends its log streams to. + awslogs-stream-prefix Required: Yes, when using Fargate.Optional when using EC2. Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id. If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. + awslogs-datetime-format Required: No This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see awslogs-datetime-format. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. + awslogs-multiline-pattern Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see awslogs-multiline-pattern. This option is ignored if awslogs-datetime-format is also configured. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.
375
+ The following options apply to all supported log drivers.
376
+ + mode Required: No Valid values: non-blocking | blocking This option defines the delivery mode of log messages from the container to the log driver specified using logDriver. The delivery mode you choose affects application availability when the flow of logs from container is interrupted. If you use the blocking mode and the flow of logs is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver. You can set a default mode for all containers in a specific Region by using the defaultLogDriverMode account setting. If you don't specify the mode option or configure the account setting, Amazon ECS will default to the blocking mode. For more information about the account setting, see Default log driver mode in the Amazon Elastic Container Service Developer Guide. + max-buffer-size Required: No Default value: 1m When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.
375
377
  To route logs using the ``splunk`` log router, you need to specify a ``splunk-token`` and a ``splunk-url``.
376
378
  When you use the ``awsfirelens`` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the ``log-driver-buffer-limit`` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.
377
379
  Other options you can specify when using ``awsfirelens`` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with ``region`` and a name for the log stream with ``delivery_stream``.
@@ -592,9 +594,9 @@ export type ServiceManagedEBSVolumeConfiguration = {
592
594
  /**
593
595
  * The number of I/O operations per second (IOPS). For ``gp3``, ``io1``, and ``io2`` volumes, this represents the number of IOPS that are provisioned for the volume. For ``gp2`` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.
594
596
  The following are the supported values for each volume type.
595
- + ``gp3``: 3,000 - 16,000 IOPS
596
- + ``io1``: 100 - 64,000 IOPS
597
- + ``io2``: 100 - 256,000 IOPS
597
+ + ``gp3``: 3,000 - 16,000 IOPS
598
+ + ``io1``: 100 - 64,000 IOPS
599
+ + ``io2``: 100 - 256,000 IOPS
598
600
 
599
601
  This parameter is required for ``io1`` and ``io2`` volume types. The default for ``gp3`` volumes is ``3,000 IOPS``. This parameter is not supported for ``st1``, ``sc1``, or ``standard`` volume types.
600
602
  This parameter maps 1:1 with the ``Iops`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.
@@ -602,7 +604,7 @@ export type ServiceManagedEBSVolumeConfiguration = {
602
604
  Iops?: number;
603
605
  /**
604
606
  * The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no AWS Key Management Service key is specified, the default AWS managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the ``KmsKeyId`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.
605
- AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.
607
+ AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.
606
608
  */
607
609
  KmsKeyId?: string;
608
610
  /**
@@ -612,10 +614,10 @@ export type ServiceManagedEBSVolumeConfiguration = {
612
614
  /**
613
615
  * The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the ``Size`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.
614
616
  The following are the supported volume size values for each volume type.
615
- + ``gp2`` and ``gp3``: 1-16,384
616
- + ``io1`` and ``io2``: 4-16,384
617
- + ``st1`` and ``sc1``: 125-16,384
618
- + ``standard``: 1-1,024
617
+ + ``gp2`` and ``gp3``: 1-16,384
618
+ + ``io1`` and ``io2``: 4-16,384
619
+ + ``st1`` and ``sc1``: 125-16,384
620
+ + ``standard``: 1-1,024
619
621
  */
620
622
  SizeInGiB?: number;
621
623
  /**
@@ -727,7 +729,7 @@ export type TimeoutConfiguration = {
727
729
  */
728
730
  IdleTimeoutSeconds?: number;
729
731
  /**
730
- * The amount of time waiting for the upstream to respond with a complete response per request. A value of ``0`` can be set to disable ``perRequestTimeout``. ``perRequestTimeout`` can only be set if Service Connect ``appProtocol`` isn't ``TCP``. Only ``idleTimeout`` is allowed for ``TCP`` ``appProtocol``.
732
+ * The amount of time waiting for the upstream to respond with a complete response per request. A value of ``0`` can be set to disable ``perRequestTimeout``. ``perRequestTimeout`` can only be set if Service Connect ``appProtocol`` isn't ``TCP``. Only ``idleTimeout`` is allowed for ``TCP````appProtocol``.
731
733
  */
732
734
  PerRequestTimeoutSeconds?: number;
733
735
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@awboost/cfn-resource-types",
3
- "version": "0.1.321",
3
+ "version": "0.1.322",
4
4
  "publishConfig": {
5
5
  "access": "public"
6
6
  },