aws-cdk-lib 2.173.4__py3-none-any.whl → 2.174.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aws-cdk-lib might be problematic. Click here for more details.
- aws_cdk/__init__.py +210 -181
- aws_cdk/_jsii/__init__.py +1 -1
- aws_cdk/_jsii/{aws-cdk-lib@2.173.4.jsii.tgz → aws-cdk-lib@2.174.1.jsii.tgz} +0 -0
- aws_cdk/aws_amazonmq/__init__.py +18 -0
- aws_cdk/aws_apigateway/__init__.py +127 -168
- aws_cdk/aws_appconfig/__init__.py +498 -1
- aws_cdk/aws_applicationautoscaling/__init__.py +4 -1
- aws_cdk/aws_appsync/__init__.py +4 -0
- aws_cdk/aws_autoscaling/__init__.py +96 -0
- aws_cdk/aws_batch/__init__.py +662 -237
- aws_cdk/aws_bedrock/__init__.py +2122 -181
- aws_cdk/aws_cassandra/__init__.py +476 -4
- aws_cdk/aws_cleanrooms/__init__.py +1227 -20
- aws_cdk/aws_cloudformation/__init__.py +172 -169
- aws_cdk/aws_cloudfront/__init__.py +773 -26
- aws_cdk/aws_cloudtrail/__init__.py +8 -2
- aws_cdk/aws_codebuild/__init__.py +83 -0
- aws_cdk/aws_codepipeline/__init__.py +2 -1
- aws_cdk/aws_cognito/__init__.py +232 -200
- aws_cdk/aws_connect/__init__.py +187 -36
- aws_cdk/aws_connectcampaignsv2/__init__.py +106 -12
- aws_cdk/aws_databrew/__init__.py +69 -1
- aws_cdk/aws_datasync/__init__.py +33 -28
- aws_cdk/aws_datazone/__init__.py +141 -41
- aws_cdk/aws_dlm/__init__.py +20 -10
- aws_cdk/aws_dms/__init__.py +736 -22
- aws_cdk/aws_docdb/__init__.py +134 -0
- aws_cdk/aws_dynamodb/__init__.py +76 -4
- aws_cdk/aws_ec2/__init__.py +1233 -65
- aws_cdk/aws_ecr/__init__.py +31 -0
- aws_cdk/aws_ecr_assets/__init__.py +5 -0
- aws_cdk/aws_ecs/__init__.py +88 -11
- aws_cdk/aws_efs/__init__.py +15 -8
- aws_cdk/aws_eks/__init__.py +114 -45
- aws_cdk/aws_elasticloadbalancingv2/__init__.py +2 -2
- aws_cdk/aws_emrserverless/__init__.py +143 -0
- aws_cdk/aws_events/__init__.py +50 -30
- aws_cdk/aws_fis/__init__.py +33 -33
- aws_cdk/aws_fsx/__init__.py +20 -10
- aws_cdk/aws_glue/__init__.py +34 -11
- aws_cdk/aws_greengrass/__init__.py +8 -8
- aws_cdk/aws_guardduty/__init__.py +1 -1
- aws_cdk/aws_imagebuilder/__init__.py +45 -47
- aws_cdk/aws_internetmonitor/__init__.py +3 -3
- aws_cdk/aws_invoicing/__init__.py +55 -30
- aws_cdk/aws_iot/__init__.py +1117 -4
- aws_cdk/aws_iot1click/__init__.py +17 -7
- aws_cdk/aws_kendra/__init__.py +4 -6
- aws_cdk/aws_lambda/__init__.py +15 -1
- aws_cdk/aws_logs/__init__.py +4011 -86
- aws_cdk/aws_m2/__init__.py +41 -0
- aws_cdk/aws_mediaconnect/__init__.py +202 -7
- aws_cdk/aws_mediaconvert/__init__.py +39 -0
- aws_cdk/aws_memorydb/__init__.py +22 -22
- aws_cdk/aws_networkmanager/__init__.py +758 -0
- aws_cdk/aws_opensearchservice/__init__.py +222 -0
- aws_cdk/aws_organizations/__init__.py +5 -3
- aws_cdk/aws_pcs/__init__.py +3038 -0
- aws_cdk/aws_pipes/__init__.py +10 -10
- aws_cdk/aws_qbusiness/__init__.py +415 -140
- aws_cdk/aws_quicksight/__init__.py +59795 -49748
- aws_cdk/aws_ram/__init__.py +5 -11
- aws_cdk/aws_rbin/__init__.py +28 -16
- aws_cdk/aws_rds/__init__.py +186 -31
- aws_cdk/aws_redshiftserverless/__init__.py +156 -0
- aws_cdk/aws_resiliencehub/__init__.py +2 -1
- aws_cdk/aws_route53_targets/__init__.py +5 -5
- aws_cdk/aws_s3/__init__.py +25 -15
- aws_cdk/aws_s3express/__init__.py +7 -7
- aws_cdk/aws_s3tables/__init__.py +683 -0
- aws_cdk/aws_sagemaker/__init__.py +580 -195
- aws_cdk/aws_secretsmanager/__init__.py +2 -0
- aws_cdk/aws_securityhub/__init__.py +41 -165
- aws_cdk/aws_servicediscovery/__init__.py +10 -3
- aws_cdk/aws_ses/__init__.py +190 -5
- aws_cdk/aws_sns/__init__.py +7 -3
- aws_cdk/aws_synthetics/__init__.py +29 -4
- aws_cdk/aws_transfer/__init__.py +8 -0
- aws_cdk/aws_vpclattice/__init__.py +147 -77
- aws_cdk/aws_wafv2/__init__.py +20 -8
- aws_cdk/aws_wisdom/__init__.py +162 -111
- aws_cdk/aws_workspaces/__init__.py +10 -4
- aws_cdk/cloud_assembly_schema/__init__.py +22 -0
- aws_cdk/custom_resources/__init__.py +31 -0
- aws_cdk/cx_api/__init__.py +19 -0
- {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/LICENSE +1 -1
- {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/METADATA +2 -2
- {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/NOTICE +1 -1
- {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/RECORD +91 -89
- {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/WHEEL +0 -0
- {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/top_level.txt +0 -0
aws_cdk/aws_ecr/__init__.py
CHANGED
|
@@ -2940,6 +2940,17 @@ class CfnRepositoryProps:
|
|
|
2940
2940
|
class IRepository(_IResource_c80c4260, typing_extensions.Protocol):
|
|
2941
2941
|
'''Represents an ECR repository.'''
|
|
2942
2942
|
|
|
2943
|
+
@builtins.property
|
|
2944
|
+
@jsii.member(jsii_name="registryUri")
|
|
2945
|
+
def registry_uri(self) -> builtins.str:
|
|
2946
|
+
'''The URI of this repository's registry:.
|
|
2947
|
+
|
|
2948
|
+
ACCOUNT.dkr.ecr.REGION.amazonaws.com
|
|
2949
|
+
|
|
2950
|
+
:attribute: true
|
|
2951
|
+
'''
|
|
2952
|
+
...
|
|
2953
|
+
|
|
2943
2954
|
@builtins.property
|
|
2944
2955
|
@jsii.member(jsii_name="repositoryArn")
|
|
2945
2956
|
def repository_arn(self) -> builtins.str:
|
|
@@ -3176,6 +3187,17 @@ class _IRepositoryProxy(
|
|
|
3176
3187
|
|
|
3177
3188
|
__jsii_type__: typing.ClassVar[str] = "aws-cdk-lib.aws_ecr.IRepository"
|
|
3178
3189
|
|
|
3190
|
+
@builtins.property
|
|
3191
|
+
@jsii.member(jsii_name="registryUri")
|
|
3192
|
+
def registry_uri(self) -> builtins.str:
|
|
3193
|
+
'''The URI of this repository's registry:.
|
|
3194
|
+
|
|
3195
|
+
ACCOUNT.dkr.ecr.REGION.amazonaws.com
|
|
3196
|
+
|
|
3197
|
+
:attribute: true
|
|
3198
|
+
'''
|
|
3199
|
+
return typing.cast(builtins.str, jsii.get(self, "registryUri"))
|
|
3200
|
+
|
|
3179
3201
|
@builtins.property
|
|
3180
3202
|
@jsii.member(jsii_name="repositoryArn")
|
|
3181
3203
|
def repository_arn(self) -> builtins.str:
|
|
@@ -4387,6 +4409,15 @@ class RepositoryBase(
|
|
|
4387
4409
|
check_type(argname="argument tag_or_digest", value=tag_or_digest, expected_type=type_hints["tag_or_digest"])
|
|
4388
4410
|
return typing.cast(builtins.str, jsii.invoke(self, "repositoryUriForTagOrDigest", [tag_or_digest]))
|
|
4389
4411
|
|
|
4412
|
+
@builtins.property
|
|
4413
|
+
@jsii.member(jsii_name="registryUri")
|
|
4414
|
+
def registry_uri(self) -> builtins.str:
|
|
4415
|
+
'''The URI of this repository's registry:.
|
|
4416
|
+
|
|
4417
|
+
ACCOUNT.dkr.ecr.REGION.amazonaws.com
|
|
4418
|
+
'''
|
|
4419
|
+
return typing.cast(builtins.str, jsii.get(self, "registryUri"))
|
|
4420
|
+
|
|
4390
4421
|
@builtins.property
|
|
4391
4422
|
@jsii.member(jsii_name="repositoryArn")
|
|
4392
4423
|
@abc.abstractmethod
|
|
@@ -52,6 +52,11 @@ asset hash.
|
|
|
52
52
|
Additionally, you can supply `buildSecrets`. Your system must have Buildkit
|
|
53
53
|
enabled, see https://docs.docker.com/build/buildkit/.
|
|
54
54
|
|
|
55
|
+
Also, similarly to `@aws-cdk/aws-s3-assets`, you can set the CDK_DOCKER environment
|
|
56
|
+
variable in order to provide a custom Docker executable command or path. This may sometimes
|
|
57
|
+
be needed when building in environments where the standard docker cannot be executed
|
|
58
|
+
(see https://github.com/aws/aws-cdk/issues/8460 for details).
|
|
59
|
+
|
|
55
60
|
SSH agent sockets or keys may be passed to docker build via `buildSsh`.
|
|
56
61
|
|
|
57
62
|
```python
|
aws_cdk/aws_ecs/__init__.py
CHANGED
|
@@ -1998,6 +1998,7 @@ from ..aws_applicationautoscaling import (
|
|
|
1998
1998
|
from ..aws_autoscaling import (
|
|
1999
1999
|
AutoScalingGroup as _AutoScalingGroup_c547a7b9,
|
|
2000
2000
|
BlockDevice as _BlockDevice_0cfc0568,
|
|
2001
|
+
CapacityDistributionStrategy as _CapacityDistributionStrategy_2393ccfe,
|
|
2001
2002
|
CommonAutoScalingGroupProps as _CommonAutoScalingGroupProps_808bbf2d,
|
|
2002
2003
|
GroupMetrics as _GroupMetrics_7cdf729b,
|
|
2003
2004
|
HealthCheck as _HealthCheck_03a4bd5a,
|
|
@@ -2211,6 +2212,7 @@ class AddAutoScalingGroupCapacityOptions:
|
|
|
2211
2212
|
"allow_all_outbound": "allowAllOutbound",
|
|
2212
2213
|
"associate_public_ip_address": "associatePublicIpAddress",
|
|
2213
2214
|
"auto_scaling_group_name": "autoScalingGroupName",
|
|
2215
|
+
"az_capacity_distribution_strategy": "azCapacityDistributionStrategy",
|
|
2214
2216
|
"block_devices": "blockDevices",
|
|
2215
2217
|
"capacity_rebalance": "capacityRebalance",
|
|
2216
2218
|
"cooldown": "cooldown",
|
|
@@ -2252,6 +2254,7 @@ class AddCapacityOptions(
|
|
|
2252
2254
|
allow_all_outbound: typing.Optional[builtins.bool] = None,
|
|
2253
2255
|
associate_public_ip_address: typing.Optional[builtins.bool] = None,
|
|
2254
2256
|
auto_scaling_group_name: typing.Optional[builtins.str] = None,
|
|
2257
|
+
az_capacity_distribution_strategy: typing.Optional[_CapacityDistributionStrategy_2393ccfe] = None,
|
|
2255
2258
|
block_devices: typing.Optional[typing.Sequence[typing.Union[_BlockDevice_0cfc0568, typing.Dict[builtins.str, typing.Any]]]] = None,
|
|
2256
2259
|
capacity_rebalance: typing.Optional[builtins.bool] = None,
|
|
2257
2260
|
cooldown: typing.Optional[_Duration_4839e8c3] = None,
|
|
@@ -2287,6 +2290,7 @@ class AddCapacityOptions(
|
|
|
2287
2290
|
:param allow_all_outbound: Whether the instances can initiate connections to anywhere by default. Default: true
|
|
2288
2291
|
:param associate_public_ip_address: Whether instances in the Auto Scaling Group should have public IP addresses associated with them. ``launchTemplate`` and ``mixedInstancesPolicy`` must not be specified when this property is specified Default: - Use subnet setting.
|
|
2289
2292
|
:param auto_scaling_group_name: The name of the Auto Scaling group. This name must be unique per Region per account. Default: - Auto generated by CloudFormation
|
|
2293
|
+
:param az_capacity_distribution_strategy: The strategy for distributing instances across Availability Zones. Default: None
|
|
2290
2294
|
:param block_devices: Specifies how block devices are exposed to the instance. You can specify virtual devices and EBS volumes. Each instance that is launched has an associated root device volume, either an Amazon EBS volume or an instance store volume. You can use block device mappings to specify additional EBS volumes or instance store volumes to attach to an instance when it is launched. ``launchTemplate`` and ``mixedInstancesPolicy`` must not be specified when this property is specified Default: - Uses the block device mapping of the AMI
|
|
2291
2295
|
:param capacity_rebalance: Indicates whether Capacity Rebalancing is enabled. When you turn on Capacity Rebalancing, Amazon EC2 Auto Scaling attempts to launch a Spot Instance whenever Amazon EC2 notifies that a Spot Instance is at an elevated risk of interruption. After launching a new instance, it then terminates an old instance. Default: false
|
|
2292
2296
|
:param cooldown: Default scaling cooldown for this AutoScalingGroup. Default: Duration.minutes(5)
|
|
@@ -2337,6 +2341,7 @@ class AddCapacityOptions(
|
|
|
2337
2341
|
check_type(argname="argument allow_all_outbound", value=allow_all_outbound, expected_type=type_hints["allow_all_outbound"])
|
|
2338
2342
|
check_type(argname="argument associate_public_ip_address", value=associate_public_ip_address, expected_type=type_hints["associate_public_ip_address"])
|
|
2339
2343
|
check_type(argname="argument auto_scaling_group_name", value=auto_scaling_group_name, expected_type=type_hints["auto_scaling_group_name"])
|
|
2344
|
+
check_type(argname="argument az_capacity_distribution_strategy", value=az_capacity_distribution_strategy, expected_type=type_hints["az_capacity_distribution_strategy"])
|
|
2340
2345
|
check_type(argname="argument block_devices", value=block_devices, expected_type=type_hints["block_devices"])
|
|
2341
2346
|
check_type(argname="argument capacity_rebalance", value=capacity_rebalance, expected_type=type_hints["capacity_rebalance"])
|
|
2342
2347
|
check_type(argname="argument cooldown", value=cooldown, expected_type=type_hints["cooldown"])
|
|
@@ -2379,6 +2384,8 @@ class AddCapacityOptions(
|
|
|
2379
2384
|
self._values["associate_public_ip_address"] = associate_public_ip_address
|
|
2380
2385
|
if auto_scaling_group_name is not None:
|
|
2381
2386
|
self._values["auto_scaling_group_name"] = auto_scaling_group_name
|
|
2387
|
+
if az_capacity_distribution_strategy is not None:
|
|
2388
|
+
self._values["az_capacity_distribution_strategy"] = az_capacity_distribution_strategy
|
|
2382
2389
|
if block_devices is not None:
|
|
2383
2390
|
self._values["block_devices"] = block_devices
|
|
2384
2391
|
if capacity_rebalance is not None:
|
|
@@ -2505,6 +2512,17 @@ class AddCapacityOptions(
|
|
|
2505
2512
|
result = self._values.get("auto_scaling_group_name")
|
|
2506
2513
|
return typing.cast(typing.Optional[builtins.str], result)
|
|
2507
2514
|
|
|
2515
|
+
@builtins.property
|
|
2516
|
+
def az_capacity_distribution_strategy(
|
|
2517
|
+
self,
|
|
2518
|
+
) -> typing.Optional[_CapacityDistributionStrategy_2393ccfe]:
|
|
2519
|
+
'''The strategy for distributing instances across Availability Zones.
|
|
2520
|
+
|
|
2521
|
+
:default: None
|
|
2522
|
+
'''
|
|
2523
|
+
result = self._values.get("az_capacity_distribution_strategy")
|
|
2524
|
+
return typing.cast(typing.Optional[_CapacityDistributionStrategy_2393ccfe], result)
|
|
2525
|
+
|
|
2508
2526
|
@builtins.property
|
|
2509
2527
|
def block_devices(self) -> typing.Optional[typing.List[_BlockDevice_0cfc0568]]:
|
|
2510
2528
|
'''Specifies how block devices are exposed to the instance. You can specify virtual devices and EBS volumes.
|
|
@@ -8342,7 +8360,7 @@ class CfnService(
|
|
|
8342
8360
|
:param scope: Scope in which this resource is defined.
|
|
8343
8361
|
:param id: Construct identifier for this resource (unique in its scope).
|
|
8344
8362
|
:param availability_zone_rebalancing: Indicates whether to use Availability Zone rebalancing for the service. For more information, see `Balancing an Amazon ECS service across Availability Zones <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-rebalancing.html>`_ in the *Amazon Elastic Container Service Developer Guide* . Default: - "DISABLED"
|
|
8345
|
-
:param capacity_provider_strategy: The capacity provider strategy to use for the service. If a ``capacityProviderStrategy`` is specified, the ``launchType`` parameter must be omitted. If no ``capacityProviderStrategy`` or ``launchType`` is specified, the ``defaultCapacityProviderStrategy`` for the cluster is used. A capacity provider strategy
|
|
8363
|
+
:param capacity_provider_strategy: The capacity provider strategy to use for the service. If a ``capacityProviderStrategy`` is specified, the ``launchType`` parameter must be omitted. If no ``capacityProviderStrategy`` or ``launchType`` is specified, the ``defaultCapacityProviderStrategy`` for the cluster is used. A capacity provider strategy can contain a maximum of 20 capacity providers.
|
|
8346
8364
|
:param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. If you do not specify a cluster, the default cluster is assumed.
|
|
8347
8365
|
:param deployment_configuration: Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.
|
|
8348
8366
|
:param deployment_controller: The deployment controller to use for the service. If no deployment controller is specified, the default value of ``ECS`` is used.
|
|
@@ -9272,8 +9290,8 @@ class CfnService(
|
|
|
9272
9290
|
|
|
9273
9291
|
:param alarms: Information about the CloudWatch alarms.
|
|
9274
9292
|
:param deployment_circuit_breaker: .. epigraph:: The deployment circuit breaker can only be used for services using the rolling update ( ``ECS`` ) deployment type. The *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. For more information, see `Rolling update <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html>`_ in the *Amazon Elastic Container Service Developer Guide*
|
|
9275
|
-
:param maximum_percent: If a service is using the rolling update ( ``ECS`` ) deployment type, the ``maximumPercent`` parameter represents an upper limit on the number of your service's tasks that are allowed in the ``RUNNING`` or ``PENDING`` state during a deployment, as a percentage of the ``desiredCount`` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the ``REPLICA`` service scheduler and has a ``desiredCount`` of four tasks and a ``maximumPercent`` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default ``maximumPercent`` value for a service using the ``REPLICA`` service scheduler is 200%. If a service is using either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the ``RUNNING`` state while the container instances are in the ``DRAINING`` state. .. epigraph:: You can't specify a custom ``maximumPercent`` value for a service that uses either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types and has tasks that use the EC2 launch type. If the tasks in the service use the Fargate launch type, the maximum percent value is not used
|
|
9276
|
-
:param minimum_healthy_percent: If a service is using the rolling update ( ``ECS`` ) deployment type, the ``minimumHealthyPercent`` represents a lower limit on the number of your service's tasks that must remain in the ``RUNNING`` state during a deployment, as a percentage of the ``desiredCount`` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a ``desiredCount`` of four tasks and a ``minimumHealthyPercent`` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks. For services that *do not* use a load balancer, the following should be noted: - A service is considered healthy if all essential containers within the tasks in the service pass their health checks. - If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a ``RUNNING`` state before the task is counted towards the minimum healthy percent total. - If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings. For services that *do* use a load balancer, the following should be noted: - If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total. - If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total. The default value for a replica service for ``minimumHealthyPercent`` is 100%. The default ``minimumHealthyPercent`` value for a service using the ``DAEMON`` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console. The minimum number of healthy tasks during a deployment is the ``desiredCount`` multiplied by the ``minimumHealthyPercent`` /100, rounded up to the nearest integer value. If a service is using either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the ``RUNNING`` state while the container instances are in the ``DRAINING`` state. .. epigraph:: You can't specify a custom ``minimumHealthyPercent`` value for a service that uses either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types and has tasks that use the EC2 launch type. If a service is using either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.
|
|
9293
|
+
:param maximum_percent: If a service is using the rolling update ( ``ECS`` ) deployment type, the ``maximumPercent`` parameter represents an upper limit on the number of your service's tasks that are allowed in the ``RUNNING`` or ``PENDING`` state during a deployment, as a percentage of the ``desiredCount`` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the ``REPLICA`` service scheduler and has a ``desiredCount`` of four tasks and a ``maximumPercent`` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default ``maximumPercent`` value for a service using the ``REPLICA`` service scheduler is 200%. The Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see `Amazon ECS services <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html>`_ . If a service is using either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the ``RUNNING`` state while the container instances are in the ``DRAINING`` state. .. epigraph:: You can't specify a custom ``maximumPercent`` value for a service that uses either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types and has tasks that use the EC2 launch type. If the service uses either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types, and the tasks in the service use the Fargate launch type, the maximum percent value is not used. The value is still returned when describing your service.
|
|
9294
|
+
:param minimum_healthy_percent: If a service is using the rolling update ( ``ECS`` ) deployment type, the ``minimumHealthyPercent`` represents a lower limit on the number of your service's tasks that must remain in the ``RUNNING`` state during a deployment, as a percentage of the ``desiredCount`` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a ``desiredCount`` of four tasks and a ``minimumHealthyPercent`` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks. If any tasks are unhealthy and if ``maximumPercent`` doesn't allow the Amazon ECS scheduler to start replacement tasks, the scheduler stops the unhealthy tasks one-by-one — using the ``minimumHealthyPercent`` as a constraint — to clear up capacity to launch replacement tasks. For more information about how the scheduler replaces unhealthy tasks, see `Amazon ECS services <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html>`_ . For services that *do not* use a load balancer, the following should be noted: - A service is considered healthy if all essential containers within the tasks in the service pass their health checks. - If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a ``RUNNING`` state before the task is counted towards the minimum healthy percent total. - If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings. For services that *do* use a load balancer, the following should be noted: - If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total. - If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total. The default value for a replica service for ``minimumHealthyPercent`` is 100%. The default ``minimumHealthyPercent`` value for a service using the ``DAEMON`` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console. The minimum number of healthy tasks during a deployment is the ``desiredCount`` multiplied by the ``minimumHealthyPercent`` /100, rounded up to the nearest integer value. If a service is using either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the ``RUNNING`` state while the container instances are in the ``DRAINING`` state. .. epigraph:: You can't specify a custom ``minimumHealthyPercent`` value for a service that uses either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types and has tasks that use the EC2 launch type. If a service is using either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.
|
|
9277
9295
|
|
|
9278
9296
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-service-deploymentconfiguration.html
|
|
9279
9297
|
:exampleMetadata: fixture=_generated
|
|
@@ -9346,12 +9364,14 @@ class CfnService(
|
|
|
9346
9364
|
|
|
9347
9365
|
This parameter enables you to define the deployment batch size. For example, if your service is using the ``REPLICA`` service scheduler and has a ``desiredCount`` of four tasks and a ``maximumPercent`` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default ``maximumPercent`` value for a service using the ``REPLICA`` service scheduler is 200%.
|
|
9348
9366
|
|
|
9367
|
+
The Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see `Amazon ECS services <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html>`_ .
|
|
9368
|
+
|
|
9349
9369
|
If a service is using either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the ``RUNNING`` state while the container instances are in the ``DRAINING`` state.
|
|
9350
9370
|
.. epigraph::
|
|
9351
9371
|
|
|
9352
9372
|
You can't specify a custom ``maximumPercent`` value for a service that uses either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types and has tasks that use the EC2 launch type.
|
|
9353
9373
|
|
|
9354
|
-
If the tasks in the service use the Fargate launch type, the maximum percent value is not used
|
|
9374
|
+
If the service uses either the blue/green ( ``CODE_DEPLOY`` ) or ``EXTERNAL`` deployment types, and the tasks in the service use the Fargate launch type, the maximum percent value is not used. The value is still returned when describing your service.
|
|
9355
9375
|
|
|
9356
9376
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-service-deploymentconfiguration.html#cfn-ecs-service-deploymentconfiguration-maximumpercent
|
|
9357
9377
|
'''
|
|
@@ -9364,6 +9384,8 @@ class CfnService(
|
|
|
9364
9384
|
|
|
9365
9385
|
This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a ``desiredCount`` of four tasks and a ``minimumHealthyPercent`` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.
|
|
9366
9386
|
|
|
9387
|
+
If any tasks are unhealthy and if ``maximumPercent`` doesn't allow the Amazon ECS scheduler to start replacement tasks, the scheduler stops the unhealthy tasks one-by-one — using the ``minimumHealthyPercent`` as a constraint — to clear up capacity to launch replacement tasks. For more information about how the scheduler replaces unhealthy tasks, see `Amazon ECS services <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html>`_ .
|
|
9388
|
+
|
|
9367
9389
|
For services that *do not* use a load balancer, the following should be noted:
|
|
9368
9390
|
|
|
9369
9391
|
- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.
|
|
@@ -9716,7 +9738,7 @@ class CfnService(
|
|
|
9716
9738
|
- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.
|
|
9717
9739
|
|
|
9718
9740
|
:param log_driver: The log driver to use for the container. For tasks on AWS Fargate , the supported log drivers are ``awslogs`` , ``splunk`` , and ``awsfirelens`` . For tasks hosted on Amazon EC2 instances, the supported log drivers are ``awslogs`` , ``fluentd`` , ``gelf`` , ``json-file`` , ``journald`` , ``syslog`` , ``splunk`` , and ``awsfirelens`` . For more information about using the ``awslogs`` log driver, see `Send Amazon ECS logs to CloudWatch <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html>`_ in the *Amazon Elastic Container Service Developer Guide* . For more information about using the ``awsfirelens`` log driver, see `Send Amazon ECS logs to an AWS service or AWS Partner <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html>`_ . .. epigraph:: If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's `available on GitHub <https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent>`_ and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.
|
|
9719
|
-
:param options: The configuration options to send to the log driver. The options you can specify depend on the log driver. Some of the options you can specify when you use the ``awslogs`` log driver to route logs to Amazon CloudWatch include the following: - **awslogs-create-group** - Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to ``false`` . .. epigraph:: Your IAM policy must include the ``logs:CreateLogGroup`` permission before you attempt to use ``awslogs-create-group`` . - **awslogs-region** - Required: Yes Specify the AWS Region that the ``awslogs`` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. - **awslogs-group** - Required: Yes Make sure to specify a log group that the ``awslogs`` log driver sends its log streams to. - **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type. Use the ``awslogs-stream-prefix`` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format ``prefix-name/container-name/ecs-task-id`` . If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. - **awslogs-datetime-format** - Required: No This option defines a multiline start pattern in Python ``strftime`` format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see `awslogs-datetime-format <https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format>`_ . You cannot configure both the ``awslogs-datetime-format`` and ``awslogs-multiline-pattern`` options. .. epigraph:: Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. - **awslogs-multiline-pattern** - Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see `awslogs-multiline-pattern <https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern>`_ . This option is ignored if ``awslogs-datetime-format`` is also configured. You cannot configure both the ``awslogs-datetime-format`` and ``awslogs-multiline-pattern`` options. .. epigraph:: Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. - **mode** - Required: No Valid values: ``non-blocking`` | ``blocking`` This option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted. If you use the ``blocking`` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the ``stdout`` and ``stderr`` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the ``non-blocking`` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the ``max-buffer-size`` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see `Preventing log loss with non-blocking mode in the ``awslogs`` container log driver <https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/>`_ . - **max-buffer-size** - Required: No Default value: ``1m`` When ``non-blocking`` mode is used, the ``max-buffer-size`` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost. To route logs using the ``splunk`` log router, you need to specify a ``splunk-token`` and a ``splunk-url`` . When you use the ``awsfirelens`` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the ``log-driver-buffer-limit`` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker. Other options you can specify when using ``awsfirelens`` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with ``region`` and a name for the log stream with ``delivery_stream`` . When you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with ``region`` and a data stream name with ``stream`` . When you export logs to Amazon OpenSearch Service, you can specify options like ``Name`` , ``Host`` (OpenSearch Service endpoint without protocol), ``Port`` , ``Index`` , ``Type`` , ``Aws_auth`` , ``Aws_region`` , ``Suppress_Type_Name`` , and ``tls`` . When you export logs to Amazon S3, you can specify the bucket using the ``bucket`` option. You can also specify ``region`` , ``total_file_size`` , ``upload_timeout`` , and ``use_put_object`` as options. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: ``sudo docker version --format '{{.Server.APIVersion}}'``
|
|
9741
|
+
:param options: The configuration options to send to the log driver. The options you can specify depend on the log driver. Some of the options you can specify when you use the ``awslogs`` log driver to route logs to Amazon CloudWatch include the following: - **awslogs-create-group** - Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to ``false`` . .. epigraph:: Your IAM policy must include the ``logs:CreateLogGroup`` permission before you attempt to use ``awslogs-create-group`` . - **awslogs-region** - Required: Yes Specify the AWS Region that the ``awslogs`` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. - **awslogs-group** - Required: Yes Make sure to specify a log group that the ``awslogs`` log driver sends its log streams to. - **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type. Use the ``awslogs-stream-prefix`` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format ``prefix-name/container-name/ecs-task-id`` . If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. - **awslogs-datetime-format** - Required: No This option defines a multiline start pattern in Python ``strftime`` format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see `awslogs-datetime-format <https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format>`_ . You cannot configure both the ``awslogs-datetime-format`` and ``awslogs-multiline-pattern`` options. .. epigraph:: Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. - **awslogs-multiline-pattern** - Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see `awslogs-multiline-pattern <https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern>`_ . This option is ignored if ``awslogs-datetime-format`` is also configured. You cannot configure both the ``awslogs-datetime-format`` and ``awslogs-multiline-pattern`` options. .. epigraph:: Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. - **mode** - Required: No Valid values: ``non-blocking`` | ``blocking`` This option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted. If you use the ``blocking`` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the ``stdout`` and ``stderr`` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the ``non-blocking`` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the ``max-buffer-size`` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see `Preventing log loss with non-blocking mode in the ``awslogs`` container log driver <https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/>`_ . - **max-buffer-size** - Required: No Default value: ``1m`` When ``non-blocking`` mode is used, the ``max-buffer-size`` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost. To route logs using the ``splunk`` log router, you need to specify a ``splunk-token`` and a ``splunk-url`` . When you use the ``awsfirelens`` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the ``log-driver-buffer-limit`` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker. Other options you can specify when using ``awsfirelens`` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with ``region`` and a name for the log stream with ``delivery_stream`` . When you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with ``region`` and a data stream name with ``stream`` . When you export logs to Amazon OpenSearch Service, you can specify options like ``Name`` , ``Host`` (OpenSearch Service endpoint without protocol), ``Port`` , ``Index`` , ``Type`` , ``Aws_auth`` , ``Aws_region`` , ``Suppress_Type_Name`` , and ``tls`` . For more information, see `Under the hood: FireLens for Amazon ECS Tasks <https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/>`_ . When you export logs to Amazon S3, you can specify the bucket using the ``bucket`` option. You can also specify ``region`` , ``total_file_size`` , ``upload_timeout`` , and ``use_put_object`` as options. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: ``sudo docker version --format '{{.Server.APIVersion}}'``
|
|
9720
9742
|
:param secret_options: The secrets to pass to the log configuration. For more information, see `Specifying sensitive data <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html>`_ in the *Amazon Elastic Container Service Developer Guide* .
|
|
9721
9743
|
|
|
9722
9744
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-service-logconfiguration.html
|
|
@@ -9855,7 +9877,7 @@ class CfnService(
|
|
|
9855
9877
|
|
|
9856
9878
|
When you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with ``region`` and a data stream name with ``stream`` .
|
|
9857
9879
|
|
|
9858
|
-
When you export logs to Amazon OpenSearch Service, you can specify options like ``Name`` , ``Host`` (OpenSearch Service endpoint without protocol), ``Port`` , ``Index`` , ``Type`` , ``Aws_auth`` , ``Aws_region`` , ``Suppress_Type_Name`` , and ``tls`` .
|
|
9880
|
+
When you export logs to Amazon OpenSearch Service, you can specify options like ``Name`` , ``Host`` (OpenSearch Service endpoint without protocol), ``Port`` , ``Index`` , ``Type`` , ``Aws_auth`` , ``Aws_region`` , ``Suppress_Type_Name`` , and ``tls`` . For more information, see `Under the hood: FireLens for Amazon ECS Tasks <https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/>`_ .
|
|
9859
9881
|
|
|
9860
9882
|
When you export logs to Amazon S3, you can specify the bucket using the ``bucket`` option. You can also specify ``region`` , ``total_file_size`` , ``upload_timeout`` , and ``use_put_object`` as options.
|
|
9861
9883
|
|
|
@@ -11532,7 +11554,7 @@ class CfnServiceProps:
|
|
|
11532
11554
|
'''Properties for defining a ``CfnService``.
|
|
11533
11555
|
|
|
11534
11556
|
:param availability_zone_rebalancing: Indicates whether to use Availability Zone rebalancing for the service. For more information, see `Balancing an Amazon ECS service across Availability Zones <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-rebalancing.html>`_ in the *Amazon Elastic Container Service Developer Guide* . Default: - "DISABLED"
|
|
11535
|
-
:param capacity_provider_strategy: The capacity provider strategy to use for the service. If a ``capacityProviderStrategy`` is specified, the ``launchType`` parameter must be omitted. If no ``capacityProviderStrategy`` or ``launchType`` is specified, the ``defaultCapacityProviderStrategy`` for the cluster is used. A capacity provider strategy
|
|
11557
|
+
:param capacity_provider_strategy: The capacity provider strategy to use for the service. If a ``capacityProviderStrategy`` is specified, the ``launchType`` parameter must be omitted. If no ``capacityProviderStrategy`` or ``launchType`` is specified, the ``defaultCapacityProviderStrategy`` for the cluster is used. A capacity provider strategy can contain a maximum of 20 capacity providers.
|
|
11536
11558
|
:param cluster: The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. If you do not specify a cluster, the default cluster is assumed.
|
|
11537
11559
|
:param deployment_configuration: Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.
|
|
11538
11560
|
:param deployment_controller: The deployment controller to use for the service. If no deployment controller is specified, the default value of ``ECS`` is used.
|
|
@@ -11813,7 +11835,7 @@ class CfnServiceProps:
|
|
|
11813
11835
|
|
|
11814
11836
|
If a ``capacityProviderStrategy`` is specified, the ``launchType`` parameter must be omitted. If no ``capacityProviderStrategy`` or ``launchType`` is specified, the ``defaultCapacityProviderStrategy`` for the cluster is used.
|
|
11815
11837
|
|
|
11816
|
-
A capacity provider strategy
|
|
11838
|
+
A capacity provider strategy can contain a maximum of 20 capacity providers.
|
|
11817
11839
|
|
|
11818
11840
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-service.html#cfn-ecs-service-capacityproviderstrategy
|
|
11819
11841
|
'''
|
|
@@ -12314,6 +12336,7 @@ class CfnTaskDefinition(
|
|
|
12314
12336
|
working_directory="workingDirectory"
|
|
12315
12337
|
)],
|
|
12316
12338
|
cpu="cpu",
|
|
12339
|
+
enable_fault_injection=False,
|
|
12317
12340
|
ephemeral_storage=ecs.CfnTaskDefinition.EphemeralStorageProperty(
|
|
12318
12341
|
size_in_gi_b=123
|
|
12319
12342
|
),
|
|
@@ -12403,6 +12426,7 @@ class CfnTaskDefinition(
|
|
|
12403
12426
|
*,
|
|
12404
12427
|
container_definitions: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Sequence[typing.Union[_IResolvable_da3f097b, typing.Union["CfnTaskDefinition.ContainerDefinitionProperty", typing.Dict[builtins.str, typing.Any]]]]]] = None,
|
|
12405
12428
|
cpu: typing.Optional[builtins.str] = None,
|
|
12429
|
+
enable_fault_injection: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,
|
|
12406
12430
|
ephemeral_storage: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnTaskDefinition.EphemeralStorageProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
|
|
12407
12431
|
execution_role_arn: typing.Optional[builtins.str] = None,
|
|
12408
12432
|
family: typing.Optional[builtins.str] = None,
|
|
@@ -12424,6 +12448,7 @@ class CfnTaskDefinition(
|
|
|
12424
12448
|
:param id: Construct identifier for this resource (unique in its scope).
|
|
12425
12449
|
:param container_definitions: A list of container definitions in JSON format that describe the different containers that make up your task. For more information about container definition parameters and defaults, see `Amazon ECS Task Definitions <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html>`_ in the *Amazon Elastic Container Service Developer Guide* .
|
|
12426
12450
|
:param cpu: The number of ``cpu`` units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the ``memory`` parameter. If you use the EC2 launch type, this field is optional. Supported values are between ``128`` CPU units ( ``0.125`` vCPUs) and ``10240`` CPU units ( ``10`` vCPUs). The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. - 256 (.25 vCPU) - Available ``memory`` values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - 512 (.5 vCPU) - Available ``memory`` values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - 1024 (1 vCPU) - Available ``memory`` values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - 2048 (2 vCPU) - Available ``memory`` values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - 4096 (4 vCPU) - Available ``memory`` values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - 8192 (8 vCPU) - Available ``memory`` values: 16 GB and 60 GB in 4 GB increments This option requires Linux platform ``1.4.0`` or later. - 16384 (16vCPU) - Available ``memory`` values: 32GB and 120 GB in 8 GB increments This option requires Linux platform ``1.4.0`` or later.
|
|
12451
|
+
:param enable_fault_injection: Enables fault injection and allows for fault injection requests to be accepted from the task's containers. The default value is ``false`` .
|
|
12427
12452
|
:param ephemeral_storage: The ephemeral storage settings to use for tasks run with the task definition.
|
|
12428
12453
|
:param execution_role_arn: The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see `IAM roles for Amazon ECS <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html>`_ in the *Amazon Elastic Container Service Developer Guide* .
|
|
12429
12454
|
:param family: The name of a family that this task definition is registered to. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. A family groups multiple versions of a task definition. Amazon ECS gives the first task definition that you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task definition that you add. .. epigraph:: To use revision numbers when you update a task definition, specify this property. If you don't specify a value, AWS CloudFormation generates a new task definition each time that you update it.
|
|
@@ -12447,6 +12472,7 @@ class CfnTaskDefinition(
|
|
|
12447
12472
|
props = CfnTaskDefinitionProps(
|
|
12448
12473
|
container_definitions=container_definitions,
|
|
12449
12474
|
cpu=cpu,
|
|
12475
|
+
enable_fault_injection=enable_fault_injection,
|
|
12450
12476
|
ephemeral_storage=ephemeral_storage,
|
|
12451
12477
|
execution_role_arn=execution_role_arn,
|
|
12452
12478
|
family=family,
|
|
@@ -12547,6 +12573,24 @@ class CfnTaskDefinition(
|
|
|
12547
12573
|
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
|
|
12548
12574
|
jsii.set(self, "cpu", value) # pyright: ignore[reportArgumentType]
|
|
12549
12575
|
|
|
12576
|
+
@builtins.property
|
|
12577
|
+
@jsii.member(jsii_name="enableFaultInjection")
|
|
12578
|
+
def enable_fault_injection(
|
|
12579
|
+
self,
|
|
12580
|
+
) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
|
|
12581
|
+
'''Enables fault injection and allows for fault injection requests to be accepted from the task's containers.'''
|
|
12582
|
+
return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], jsii.get(self, "enableFaultInjection"))
|
|
12583
|
+
|
|
12584
|
+
@enable_fault_injection.setter
|
|
12585
|
+
def enable_fault_injection(
|
|
12586
|
+
self,
|
|
12587
|
+
value: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]],
|
|
12588
|
+
) -> None:
|
|
12589
|
+
if __debug__:
|
|
12590
|
+
type_hints = typing.get_type_hints(_typecheckingstub__aa2816ef80c100b1df58247274b83e116e7f9568ae1e6f2acba3307b8b5c8f1f)
|
|
12591
|
+
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
|
|
12592
|
+
jsii.set(self, "enableFaultInjection", value) # pyright: ignore[reportArgumentType]
|
|
12593
|
+
|
|
12550
12594
|
@builtins.property
|
|
12551
12595
|
@jsii.member(jsii_name="ephemeralStorage")
|
|
12552
12596
|
def ephemeral_storage(
|
|
@@ -15581,7 +15625,7 @@ class CfnTaskDefinition(
|
|
|
15581
15625
|
'''The ``LogConfiguration`` property specifies log configuration options to send to a custom log driver for the container.
|
|
15582
15626
|
|
|
15583
15627
|
:param log_driver: The log driver to use for the container. For tasks on AWS Fargate , the supported log drivers are ``awslogs`` , ``splunk`` , and ``awsfirelens`` . For tasks hosted on Amazon EC2 instances, the supported log drivers are ``awslogs`` , ``fluentd`` , ``gelf`` , ``json-file`` , ``journald`` , ``syslog`` , ``splunk`` , and ``awsfirelens`` . For more information about using the ``awslogs`` log driver, see `Send Amazon ECS logs to CloudWatch <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html>`_ in the *Amazon Elastic Container Service Developer Guide* . For more information about using the ``awsfirelens`` log driver, see `Send Amazon ECS logs to an AWS service or AWS Partner <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html>`_ . .. epigraph:: If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's `available on GitHub <https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent>`_ and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.
|
|
15584
|
-
:param options: The configuration options to send to the log driver. The options you can specify depend on the log driver. Some of the options you can specify when you use the ``awslogs`` log driver to route logs to Amazon CloudWatch include the following: - **awslogs-create-group** - Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to ``false`` . .. epigraph:: Your IAM policy must include the ``logs:CreateLogGroup`` permission before you attempt to use ``awslogs-create-group`` . - **awslogs-region** - Required: Yes Specify the AWS Region that the ``awslogs`` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. - **awslogs-group** - Required: Yes Make sure to specify a log group that the ``awslogs`` log driver sends its log streams to. - **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type. Use the ``awslogs-stream-prefix`` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format ``prefix-name/container-name/ecs-task-id`` . If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. - **awslogs-datetime-format** - Required: No This option defines a multiline start pattern in Python ``strftime`` format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see `awslogs-datetime-format <https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format>`_ . You cannot configure both the ``awslogs-datetime-format`` and ``awslogs-multiline-pattern`` options. .. epigraph:: Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. - **awslogs-multiline-pattern** - Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see `awslogs-multiline-pattern <https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern>`_ . This option is ignored if ``awslogs-datetime-format`` is also configured. You cannot configure both the ``awslogs-datetime-format`` and ``awslogs-multiline-pattern`` options. .. epigraph:: Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. - **mode** - Required: No Valid values: ``non-blocking`` | ``blocking`` This option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted. If you use the ``blocking`` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the ``stdout`` and ``stderr`` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the ``non-blocking`` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the ``max-buffer-size`` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see `Preventing log loss with non-blocking mode in the ``awslogs`` container log driver <https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/>`_ . - **max-buffer-size** - Required: No Default value: ``1m`` When ``non-blocking`` mode is used, the ``max-buffer-size`` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost. To route logs using the ``splunk`` log router, you need to specify a ``splunk-token`` and a ``splunk-url`` . When you use the ``awsfirelens`` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the ``log-driver-buffer-limit`` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker. Other options you can specify when using ``awsfirelens`` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with ``region`` and a name for the log stream with ``delivery_stream`` . When you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with ``region`` and a data stream name with ``stream`` . When you export logs to Amazon OpenSearch Service, you can specify options like ``Name`` , ``Host`` (OpenSearch Service endpoint without protocol), ``Port`` , ``Index`` , ``Type`` , ``Aws_auth`` , ``Aws_region`` , ``Suppress_Type_Name`` , and ``tls`` . When you export logs to Amazon S3, you can specify the bucket using the ``bucket`` option. You can also specify ``region`` , ``total_file_size`` , ``upload_timeout`` , and ``use_put_object`` as options. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: ``sudo docker version --format '{{.Server.APIVersion}}'``
|
|
15628
|
+
:param options: The configuration options to send to the log driver. The options you can specify depend on the log driver. Some of the options you can specify when you use the ``awslogs`` log driver to route logs to Amazon CloudWatch include the following: - **awslogs-create-group** - Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to ``false`` . .. epigraph:: Your IAM policy must include the ``logs:CreateLogGroup`` permission before you attempt to use ``awslogs-create-group`` . - **awslogs-region** - Required: Yes Specify the AWS Region that the ``awslogs`` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. - **awslogs-group** - Required: Yes Make sure to specify a log group that the ``awslogs`` log driver sends its log streams to. - **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type. Use the ``awslogs-stream-prefix`` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format ``prefix-name/container-name/ecs-task-id`` . If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. - **awslogs-datetime-format** - Required: No This option defines a multiline start pattern in Python ``strftime`` format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see `awslogs-datetime-format <https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format>`_ . You cannot configure both the ``awslogs-datetime-format`` and ``awslogs-multiline-pattern`` options. .. epigraph:: Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. - **awslogs-multiline-pattern** - Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see `awslogs-multiline-pattern <https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern>`_ . This option is ignored if ``awslogs-datetime-format`` is also configured. You cannot configure both the ``awslogs-datetime-format`` and ``awslogs-multiline-pattern`` options. .. epigraph:: Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. - **mode** - Required: No Valid values: ``non-blocking`` | ``blocking`` This option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted. If you use the ``blocking`` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the ``stdout`` and ``stderr`` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the ``non-blocking`` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the ``max-buffer-size`` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see `Preventing log loss with non-blocking mode in the ``awslogs`` container log driver <https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/>`_ . - **max-buffer-size** - Required: No Default value: ``1m`` When ``non-blocking`` mode is used, the ``max-buffer-size`` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost. To route logs using the ``splunk`` log router, you need to specify a ``splunk-token`` and a ``splunk-url`` . When you use the ``awsfirelens`` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the ``log-driver-buffer-limit`` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker. Other options you can specify when using ``awsfirelens`` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with ``region`` and a name for the log stream with ``delivery_stream`` . When you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with ``region`` and a data stream name with ``stream`` . When you export logs to Amazon OpenSearch Service, you can specify options like ``Name`` , ``Host`` (OpenSearch Service endpoint without protocol), ``Port`` , ``Index`` , ``Type`` , ``Aws_auth`` , ``Aws_region`` , ``Suppress_Type_Name`` , and ``tls`` . For more information, see `Under the hood: FireLens for Amazon ECS Tasks <https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/>`_ . When you export logs to Amazon S3, you can specify the bucket using the ``bucket`` option. You can also specify ``region`` , ``total_file_size`` , ``upload_timeout`` , and ``use_put_object`` as options. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: ``sudo docker version --format '{{.Server.APIVersion}}'``
|
|
15585
15629
|
:param secret_options: The secrets to pass to the log configuration. For more information, see `Specifying sensitive data <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html>`_ in the *Amazon Elastic Container Service Developer Guide* .
|
|
15586
15630
|
|
|
15587
15631
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-logconfiguration.html
|
|
@@ -15723,7 +15767,7 @@ class CfnTaskDefinition(
|
|
|
15723
15767
|
|
|
15724
15768
|
When you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with ``region`` and a data stream name with ``stream`` .
|
|
15725
15769
|
|
|
15726
|
-
When you export logs to Amazon OpenSearch Service, you can specify options like ``Name`` , ``Host`` (OpenSearch Service endpoint without protocol), ``Port`` , ``Index`` , ``Type`` , ``Aws_auth`` , ``Aws_region`` , ``Suppress_Type_Name`` , and ``tls`` .
|
|
15770
|
+
When you export logs to Amazon OpenSearch Service, you can specify options like ``Name`` , ``Host`` (OpenSearch Service endpoint without protocol), ``Port`` , ``Index`` , ``Type`` , ``Aws_auth`` , ``Aws_region`` , ``Suppress_Type_Name`` , and ``tls`` . For more information, see `Under the hood: FireLens for Amazon ECS Tasks <https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/>`_ .
|
|
15727
15771
|
|
|
15728
15772
|
When you export logs to Amazon S3, you can specify the bucket using the ``bucket`` option. You can also specify ``region`` , ``total_file_size`` , ``upload_timeout`` , and ``use_put_object`` as options.
|
|
15729
15773
|
|
|
@@ -17190,6 +17234,7 @@ class CfnTaskDefinition(
|
|
|
17190
17234
|
name_mapping={
|
|
17191
17235
|
"container_definitions": "containerDefinitions",
|
|
17192
17236
|
"cpu": "cpu",
|
|
17237
|
+
"enable_fault_injection": "enableFaultInjection",
|
|
17193
17238
|
"ephemeral_storage": "ephemeralStorage",
|
|
17194
17239
|
"execution_role_arn": "executionRoleArn",
|
|
17195
17240
|
"family": "family",
|
|
@@ -17213,6 +17258,7 @@ class CfnTaskDefinitionProps:
|
|
|
17213
17258
|
*,
|
|
17214
17259
|
container_definitions: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Sequence[typing.Union[_IResolvable_da3f097b, typing.Union[CfnTaskDefinition.ContainerDefinitionProperty, typing.Dict[builtins.str, typing.Any]]]]]] = None,
|
|
17215
17260
|
cpu: typing.Optional[builtins.str] = None,
|
|
17261
|
+
enable_fault_injection: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,
|
|
17216
17262
|
ephemeral_storage: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union[CfnTaskDefinition.EphemeralStorageProperty, typing.Dict[builtins.str, typing.Any]]]] = None,
|
|
17217
17263
|
execution_role_arn: typing.Optional[builtins.str] = None,
|
|
17218
17264
|
family: typing.Optional[builtins.str] = None,
|
|
@@ -17233,6 +17279,7 @@ class CfnTaskDefinitionProps:
|
|
|
17233
17279
|
|
|
17234
17280
|
:param container_definitions: A list of container definitions in JSON format that describe the different containers that make up your task. For more information about container definition parameters and defaults, see `Amazon ECS Task Definitions <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html>`_ in the *Amazon Elastic Container Service Developer Guide* .
|
|
17235
17281
|
:param cpu: The number of ``cpu`` units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the ``memory`` parameter. If you use the EC2 launch type, this field is optional. Supported values are between ``128`` CPU units ( ``0.125`` vCPUs) and ``10240`` CPU units ( ``10`` vCPUs). The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate. - 256 (.25 vCPU) - Available ``memory`` values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - 512 (.5 vCPU) - Available ``memory`` values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - 1024 (1 vCPU) - Available ``memory`` values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - 2048 (2 vCPU) - Available ``memory`` values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - 4096 (4 vCPU) - Available ``memory`` values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - 8192 (8 vCPU) - Available ``memory`` values: 16 GB and 60 GB in 4 GB increments This option requires Linux platform ``1.4.0`` or later. - 16384 (16vCPU) - Available ``memory`` values: 32GB and 120 GB in 8 GB increments This option requires Linux platform ``1.4.0`` or later.
|
|
17282
|
+
:param enable_fault_injection: Enables fault injection and allows for fault injection requests to be accepted from the task's containers. The default value is ``false`` .
|
|
17236
17283
|
:param ephemeral_storage: The ephemeral storage settings to use for tasks run with the task definition.
|
|
17237
17284
|
:param execution_role_arn: The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see `IAM roles for Amazon ECS <https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html>`_ in the *Amazon Elastic Container Service Developer Guide* .
|
|
17238
17285
|
:param family: The name of a family that this task definition is registered to. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. A family groups multiple versions of a task definition. Amazon ECS gives the first task definition that you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task definition that you add. .. epigraph:: To use revision numbers when you update a task definition, specify this property. If you don't specify a value, AWS CloudFormation generates a new task definition each time that you update it.
|
|
@@ -17396,6 +17443,7 @@ class CfnTaskDefinitionProps:
|
|
|
17396
17443
|
working_directory="workingDirectory"
|
|
17397
17444
|
)],
|
|
17398
17445
|
cpu="cpu",
|
|
17446
|
+
enable_fault_injection=False,
|
|
17399
17447
|
ephemeral_storage=ecs.CfnTaskDefinition.EphemeralStorageProperty(
|
|
17400
17448
|
size_in_gi_b=123
|
|
17401
17449
|
),
|
|
@@ -17481,6 +17529,7 @@ class CfnTaskDefinitionProps:
|
|
|
17481
17529
|
type_hints = typing.get_type_hints(_typecheckingstub__aa26bba98a26acc68000a13cd676c1f16ef698b1af77ac4e6fb710c7f0ba893d)
|
|
17482
17530
|
check_type(argname="argument container_definitions", value=container_definitions, expected_type=type_hints["container_definitions"])
|
|
17483
17531
|
check_type(argname="argument cpu", value=cpu, expected_type=type_hints["cpu"])
|
|
17532
|
+
check_type(argname="argument enable_fault_injection", value=enable_fault_injection, expected_type=type_hints["enable_fault_injection"])
|
|
17484
17533
|
check_type(argname="argument ephemeral_storage", value=ephemeral_storage, expected_type=type_hints["ephemeral_storage"])
|
|
17485
17534
|
check_type(argname="argument execution_role_arn", value=execution_role_arn, expected_type=type_hints["execution_role_arn"])
|
|
17486
17535
|
check_type(argname="argument family", value=family, expected_type=type_hints["family"])
|
|
@@ -17501,6 +17550,8 @@ class CfnTaskDefinitionProps:
|
|
|
17501
17550
|
self._values["container_definitions"] = container_definitions
|
|
17502
17551
|
if cpu is not None:
|
|
17503
17552
|
self._values["cpu"] = cpu
|
|
17553
|
+
if enable_fault_injection is not None:
|
|
17554
|
+
self._values["enable_fault_injection"] = enable_fault_injection
|
|
17504
17555
|
if ephemeral_storage is not None:
|
|
17505
17556
|
self._values["ephemeral_storage"] = ephemeral_storage
|
|
17506
17557
|
if execution_role_arn is not None:
|
|
@@ -17573,6 +17624,19 @@ class CfnTaskDefinitionProps:
|
|
|
17573
17624
|
result = self._values.get("cpu")
|
|
17574
17625
|
return typing.cast(typing.Optional[builtins.str], result)
|
|
17575
17626
|
|
|
17627
|
+
@builtins.property
|
|
17628
|
+
def enable_fault_injection(
|
|
17629
|
+
self,
|
|
17630
|
+
) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
|
|
17631
|
+
'''Enables fault injection and allows for fault injection requests to be accepted from the task's containers.
|
|
17632
|
+
|
|
17633
|
+
The default value is ``false`` .
|
|
17634
|
+
|
|
17635
|
+
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-taskdefinition.html#cfn-ecs-taskdefinition-enablefaultinjection
|
|
17636
|
+
'''
|
|
17637
|
+
result = self._values.get("enable_fault_injection")
|
|
17638
|
+
return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], result)
|
|
17639
|
+
|
|
17576
17640
|
@builtins.property
|
|
17577
17641
|
def ephemeral_storage(
|
|
17578
17642
|
self,
|
|
@@ -18326,7 +18390,7 @@ class CfnTaskSet(
|
|
|
18326
18390
|
|
|
18327
18391
|
With ``FARGATE_SPOT`` , you can run interruption tolerant tasks at a rate that's discounted compared to the ``FARGATE`` price. ``FARGATE_SPOT`` runs tasks on spare compute capacity. When AWS needs the capacity back, your tasks are interrupted with a two-minute warning. ``FARGATE_SPOT`` supports Linux tasks with the X86_64 architecture on platform version 1.3.0 or later. ``FARGATE_SPOT`` supports Linux tasks with the ARM64 architecture on platform version 1.4.0 or later.
|
|
18328
18392
|
|
|
18329
|
-
A capacity provider strategy
|
|
18393
|
+
A capacity provider strategy can contain a maximum of 20 capacity providers.
|
|
18330
18394
|
|
|
18331
18395
|
:param base: The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of ``0`` is used.
|
|
18332
18396
|
:param capacity_provider: The short name of the capacity provider.
|
|
@@ -38899,6 +38963,7 @@ class Cluster(
|
|
|
38899
38963
|
allow_all_outbound: typing.Optional[builtins.bool] = None,
|
|
38900
38964
|
associate_public_ip_address: typing.Optional[builtins.bool] = None,
|
|
38901
38965
|
auto_scaling_group_name: typing.Optional[builtins.str] = None,
|
|
38966
|
+
az_capacity_distribution_strategy: typing.Optional[_CapacityDistributionStrategy_2393ccfe] = None,
|
|
38902
38967
|
block_devices: typing.Optional[typing.Sequence[typing.Union[_BlockDevice_0cfc0568, typing.Dict[builtins.str, typing.Any]]]] = None,
|
|
38903
38968
|
capacity_rebalance: typing.Optional[builtins.bool] = None,
|
|
38904
38969
|
cooldown: typing.Optional[_Duration_4839e8c3] = None,
|
|
@@ -38939,6 +39004,7 @@ class Cluster(
|
|
|
38939
39004
|
:param allow_all_outbound: Whether the instances can initiate connections to anywhere by default. Default: true
|
|
38940
39005
|
:param associate_public_ip_address: Whether instances in the Auto Scaling Group should have public IP addresses associated with them. ``launchTemplate`` and ``mixedInstancesPolicy`` must not be specified when this property is specified Default: - Use subnet setting.
|
|
38941
39006
|
:param auto_scaling_group_name: The name of the Auto Scaling group. This name must be unique per Region per account. Default: - Auto generated by CloudFormation
|
|
39007
|
+
:param az_capacity_distribution_strategy: The strategy for distributing instances across Availability Zones. Default: None
|
|
38942
39008
|
:param block_devices: Specifies how block devices are exposed to the instance. You can specify virtual devices and EBS volumes. Each instance that is launched has an associated root device volume, either an Amazon EBS volume or an instance store volume. You can use block device mappings to specify additional EBS volumes or instance store volumes to attach to an instance when it is launched. ``launchTemplate`` and ``mixedInstancesPolicy`` must not be specified when this property is specified Default: - Uses the block device mapping of the AMI
|
|
38943
39009
|
:param capacity_rebalance: Indicates whether Capacity Rebalancing is enabled. When you turn on Capacity Rebalancing, Amazon EC2 Auto Scaling attempts to launch a Spot Instance whenever Amazon EC2 notifies that a Spot Instance is at an elevated risk of interruption. After launching a new instance, it then terminates an old instance. Default: false
|
|
38944
39010
|
:param cooldown: Default scaling cooldown for this AutoScalingGroup. Default: Duration.minutes(5)
|
|
@@ -38976,6 +39042,7 @@ class Cluster(
|
|
|
38976
39042
|
allow_all_outbound=allow_all_outbound,
|
|
38977
39043
|
associate_public_ip_address=associate_public_ip_address,
|
|
38978
39044
|
auto_scaling_group_name=auto_scaling_group_name,
|
|
39045
|
+
az_capacity_distribution_strategy=az_capacity_distribution_strategy,
|
|
38979
39046
|
block_devices=block_devices,
|
|
38980
39047
|
capacity_rebalance=capacity_rebalance,
|
|
38981
39048
|
cooldown=cooldown,
|
|
@@ -42102,6 +42169,7 @@ def _typecheckingstub__64f2d9b3495e3be78346f77d5ad90928968c8ce230e670b6279dc67ad
|
|
|
42102
42169
|
allow_all_outbound: typing.Optional[builtins.bool] = None,
|
|
42103
42170
|
associate_public_ip_address: typing.Optional[builtins.bool] = None,
|
|
42104
42171
|
auto_scaling_group_name: typing.Optional[builtins.str] = None,
|
|
42172
|
+
az_capacity_distribution_strategy: typing.Optional[_CapacityDistributionStrategy_2393ccfe] = None,
|
|
42105
42173
|
block_devices: typing.Optional[typing.Sequence[typing.Union[_BlockDevice_0cfc0568, typing.Dict[builtins.str, typing.Any]]]] = None,
|
|
42106
42174
|
capacity_rebalance: typing.Optional[builtins.bool] = None,
|
|
42107
42175
|
cooldown: typing.Optional[_Duration_4839e8c3] = None,
|
|
@@ -43126,6 +43194,7 @@ def _typecheckingstub__a77e92d9ff0a9ac5b9f5909726a2e91dafa1fae662c3fdef09e5f9c3f
|
|
|
43126
43194
|
*,
|
|
43127
43195
|
container_definitions: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Sequence[typing.Union[_IResolvable_da3f097b, typing.Union[CfnTaskDefinition.ContainerDefinitionProperty, typing.Dict[builtins.str, typing.Any]]]]]] = None,
|
|
43128
43196
|
cpu: typing.Optional[builtins.str] = None,
|
|
43197
|
+
enable_fault_injection: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,
|
|
43129
43198
|
ephemeral_storage: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union[CfnTaskDefinition.EphemeralStorageProperty, typing.Dict[builtins.str, typing.Any]]]] = None,
|
|
43130
43199
|
execution_role_arn: typing.Optional[builtins.str] = None,
|
|
43131
43200
|
family: typing.Optional[builtins.str] = None,
|
|
@@ -43169,6 +43238,12 @@ def _typecheckingstub__54ee0f0538c4fcd482362fac760fb6e813d1c01e85a1bb6e1da58c885
|
|
|
43169
43238
|
"""Type checking stubs"""
|
|
43170
43239
|
pass
|
|
43171
43240
|
|
|
43241
|
+
def _typecheckingstub__aa2816ef80c100b1df58247274b83e116e7f9568ae1e6f2acba3307b8b5c8f1f(
|
|
43242
|
+
value: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]],
|
|
43243
|
+
) -> None:
|
|
43244
|
+
"""Type checking stubs"""
|
|
43245
|
+
pass
|
|
43246
|
+
|
|
43172
43247
|
def _typecheckingstub__501da7d8d052de8848e38b3ce458cd787c3006be2870679cd704efc8594577e3(
|
|
43173
43248
|
value: typing.Optional[typing.Union[_IResolvable_da3f097b, CfnTaskDefinition.EphemeralStorageProperty]],
|
|
43174
43249
|
) -> None:
|
|
@@ -43594,6 +43669,7 @@ def _typecheckingstub__aa26bba98a26acc68000a13cd676c1f16ef698b1af77ac4e6fb710c7f
|
|
|
43594
43669
|
*,
|
|
43595
43670
|
container_definitions: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Sequence[typing.Union[_IResolvable_da3f097b, typing.Union[CfnTaskDefinition.ContainerDefinitionProperty, typing.Dict[builtins.str, typing.Any]]]]]] = None,
|
|
43596
43671
|
cpu: typing.Optional[builtins.str] = None,
|
|
43672
|
+
enable_fault_injection: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,
|
|
43597
43673
|
ephemeral_storage: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union[CfnTaskDefinition.EphemeralStorageProperty, typing.Dict[builtins.str, typing.Any]]]] = None,
|
|
43598
43674
|
execution_role_arn: typing.Optional[builtins.str] = None,
|
|
43599
43675
|
family: typing.Optional[builtins.str] = None,
|
|
@@ -45850,6 +45926,7 @@ def _typecheckingstub__63e98e008463515927d4aee3c938d64639e34ce8a2c09fa766883be6a
|
|
|
45850
45926
|
allow_all_outbound: typing.Optional[builtins.bool] = None,
|
|
45851
45927
|
associate_public_ip_address: typing.Optional[builtins.bool] = None,
|
|
45852
45928
|
auto_scaling_group_name: typing.Optional[builtins.str] = None,
|
|
45929
|
+
az_capacity_distribution_strategy: typing.Optional[_CapacityDistributionStrategy_2393ccfe] = None,
|
|
45853
45930
|
block_devices: typing.Optional[typing.Sequence[typing.Union[_BlockDevice_0cfc0568, typing.Dict[builtins.str, typing.Any]]]] = None,
|
|
45854
45931
|
capacity_rebalance: typing.Optional[builtins.bool] = None,
|
|
45855
45932
|
cooldown: typing.Optional[_Duration_4839e8c3] = None,
|
aws_cdk/aws_efs/__init__.py
CHANGED
|
@@ -2070,7 +2070,7 @@ class CfnFileSystem(
|
|
|
2070
2070
|
) -> None:
|
|
2071
2071
|
'''Describes the protection on the file system.
|
|
2072
2072
|
|
|
2073
|
-
:param replication_overwrite_protection: The status of the file system's replication overwrite protection. - ``ENABLED`` – The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is ``ENABLED`` by default. - ``DISABLED`` – The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication. - ``REPLICATING`` – The file system is being used as the destination file system in a replication configuration. The file system is read-only and is
|
|
2073
|
+
:param replication_overwrite_protection: The status of the file system's replication overwrite protection. - ``ENABLED`` – The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is ``ENABLED`` by default. - ``DISABLED`` – The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication. - ``REPLICATING`` – The file system is being used as the destination file system in a replication configuration. The file system is read-only and is modified only by EFS replication. If the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.
|
|
2074
2074
|
|
|
2075
2075
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-efs-filesystem-filesystemprotection.html
|
|
2076
2076
|
:exampleMetadata: fixture=_generated
|
|
@@ -2098,7 +2098,7 @@ class CfnFileSystem(
|
|
|
2098
2098
|
|
|
2099
2099
|
- ``ENABLED`` – The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is ``ENABLED`` by default.
|
|
2100
2100
|
- ``DISABLED`` – The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.
|
|
2101
|
-
- ``REPLICATING`` – The file system is being used as the destination file system in a replication configuration. The file system is read-only and is
|
|
2101
|
+
- ``REPLICATING`` – The file system is being used as the destination file system in a replication configuration. The file system is read-only and is modified only by EFS replication.
|
|
2102
2102
|
|
|
2103
2103
|
If the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.
|
|
2104
2104
|
|
|
@@ -2317,9 +2317,9 @@ class CfnFileSystem(
|
|
|
2317
2317
|
:param file_system_id: The ID of the destination Amazon EFS file system.
|
|
2318
2318
|
:param kms_key_id: The ID of an AWS KMS key used to protect the encrypted file system.
|
|
2319
2319
|
:param region: The AWS Region in which the destination file system is located. .. epigraph:: For One Zone file systems, the replication configuration must specify the AWS Region in which the destination file system is located.
|
|
2320
|
-
:param role_arn:
|
|
2321
|
-
:param status:
|
|
2322
|
-
:param status_message:
|
|
2320
|
+
:param role_arn: The Amazon Resource Name (ARN) of the current source file system in the replication configuration.
|
|
2321
|
+
:param status: Describes the status of the replication configuration. For more information about replication status, see `Viewing replication details <https://docs.aws.amazon.com//efs/latest/ug/awsbackup.html#restoring-backup-efsmonitoring-replication-status.html>`_ in the *Amazon EFS User Guide* .
|
|
2322
|
+
:param status_message: Message that provides details about the ``PAUSED`` or ``ERRROR`` state of the replication destination configuration. For more information about replication status messages, see `Viewing replication details <https://docs.aws.amazon.com//efs/latest/ug/awsbackup.html#restoring-backup-efsmonitoring-replication-status.html>`_ in the *Amazon EFS User Guide* .
|
|
2323
2323
|
|
|
2324
2324
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-efs-filesystem-replicationdestination.html
|
|
2325
2325
|
:exampleMetadata: fixture=_generated
|
|
@@ -2412,7 +2412,8 @@ class CfnFileSystem(
|
|
|
2412
2412
|
|
|
2413
2413
|
@builtins.property
|
|
2414
2414
|
def role_arn(self) -> typing.Optional[builtins.str]:
|
|
2415
|
-
'''
|
|
2415
|
+
'''The Amazon Resource Name (ARN) of the current source file system in the replication configuration.
|
|
2416
|
+
|
|
2416
2417
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-efs-filesystem-replicationdestination.html#cfn-efs-filesystem-replicationdestination-rolearn
|
|
2417
2418
|
'''
|
|
2418
2419
|
result = self._values.get("role_arn")
|
|
@@ -2420,7 +2421,10 @@ class CfnFileSystem(
|
|
|
2420
2421
|
|
|
2421
2422
|
@builtins.property
|
|
2422
2423
|
def status(self) -> typing.Optional[builtins.str]:
|
|
2423
|
-
'''
|
|
2424
|
+
'''Describes the status of the replication configuration.
|
|
2425
|
+
|
|
2426
|
+
For more information about replication status, see `Viewing replication details <https://docs.aws.amazon.com//efs/latest/ug/awsbackup.html#restoring-backup-efsmonitoring-replication-status.html>`_ in the *Amazon EFS User Guide* .
|
|
2427
|
+
|
|
2424
2428
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-efs-filesystem-replicationdestination.html#cfn-efs-filesystem-replicationdestination-status
|
|
2425
2429
|
'''
|
|
2426
2430
|
result = self._values.get("status")
|
|
@@ -2428,7 +2432,10 @@ class CfnFileSystem(
|
|
|
2428
2432
|
|
|
2429
2433
|
@builtins.property
|
|
2430
2434
|
def status_message(self) -> typing.Optional[builtins.str]:
|
|
2431
|
-
'''
|
|
2435
|
+
'''Message that provides details about the ``PAUSED`` or ``ERRROR`` state of the replication destination configuration.
|
|
2436
|
+
|
|
2437
|
+
For more information about replication status messages, see `Viewing replication details <https://docs.aws.amazon.com//efs/latest/ug/awsbackup.html#restoring-backup-efsmonitoring-replication-status.html>`_ in the *Amazon EFS User Guide* .
|
|
2438
|
+
|
|
2432
2439
|
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-efs-filesystem-replicationdestination.html#cfn-efs-filesystem-replicationdestination-statusmessage
|
|
2433
2440
|
'''
|
|
2434
2441
|
result = self._values.get("status_message")
|