aws-cdk-lib 2.186.0__py3-none-any.whl → 2.188.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aws-cdk-lib might be problematic. Click here for more details.
- aws_cdk/__init__.py +303 -119
- aws_cdk/_jsii/__init__.py +1 -1
- aws_cdk/_jsii/{aws-cdk-lib@2.186.0.jsii.tgz → aws-cdk-lib@2.188.0.jsii.tgz} +0 -0
- aws_cdk/aws_amplify/__init__.py +136 -0
- aws_cdk/aws_apigateway/__init__.py +69 -17
- aws_cdk/aws_apigatewayv2/__init__.py +88 -0
- aws_cdk/aws_appconfig/__init__.py +30 -14
- aws_cdk/aws_appsync/__init__.py +14 -5
- aws_cdk/aws_arczonalshift/__init__.py +4 -4
- aws_cdk/aws_bedrock/__init__.py +637 -32
- aws_cdk/aws_budgets/__init__.py +8 -8
- aws_cdk/aws_cassandra/__init__.py +4 -2
- aws_cdk/aws_ce/__init__.py +2 -2
- aws_cdk/aws_cloudformation/__init__.py +3 -3
- aws_cdk/aws_cloudfront/__init__.py +11 -0
- aws_cdk/aws_cloudtrail/__init__.py +4 -18
- aws_cdk/aws_cloudwatch/__init__.py +50 -50
- aws_cdk/aws_codebuild/__init__.py +32 -1
- aws_cdk/aws_codepipeline/__init__.py +47 -32
- aws_cdk/aws_codepipeline_actions/__init__.py +2786 -1042
- aws_cdk/aws_codestarnotifications/__init__.py +16 -16
- aws_cdk/aws_cognito/__init__.py +8 -2
- aws_cdk/aws_config/__init__.py +2 -5
- aws_cdk/aws_datazone/__init__.py +287 -226
- aws_cdk/aws_detective/__init__.py +3 -3
- aws_cdk/aws_dynamodb/__init__.py +37 -0
- aws_cdk/aws_ec2/__init__.py +2448 -442
- aws_cdk/aws_ecr/__init__.py +143 -0
- aws_cdk/aws_ecr_assets/__init__.py +115 -4
- aws_cdk/aws_ecs/__init__.py +51 -0
- aws_cdk/aws_eks/__init__.py +222 -6
- aws_cdk/aws_events/__init__.py +8 -11
- aws_cdk/aws_events_targets/__init__.py +136 -0
- aws_cdk/aws_forecast/__init__.py +1 -1
- aws_cdk/aws_fsx/__init__.py +2 -2
- aws_cdk/aws_gamelift/__init__.py +11 -11
- aws_cdk/aws_iam/__init__.py +6 -4
- aws_cdk/aws_identitystore/__init__.py +6 -4
- aws_cdk/aws_iotsitewise/__init__.py +623 -0
- aws_cdk/aws_kinesisfirehose/__init__.py +38 -0
- aws_cdk/aws_kms/__init__.py +10 -11
- aws_cdk/aws_lakeformation/__init__.py +3 -3
- aws_cdk/aws_lambda/__init__.py +105 -4
- aws_cdk/aws_lambda_event_sources/__init__.py +87 -25
- aws_cdk/aws_lambda_nodejs/__init__.py +5 -24
- aws_cdk/aws_lex/__init__.py +985 -5
- aws_cdk/aws_logs/__init__.py +18 -0
- aws_cdk/aws_logs_destinations/__init__.py +146 -0
- aws_cdk/aws_mediaconnect/__init__.py +714 -290
- aws_cdk/aws_mwaa/__init__.py +9 -9
- aws_cdk/aws_networkfirewall/__init__.py +44 -0
- aws_cdk/aws_notifications/__init__.py +4 -4
- aws_cdk/aws_omics/__init__.py +225 -1
- aws_cdk/aws_opensearchserverless/__init__.py +31 -23
- aws_cdk/aws_organizations/__init__.py +1 -1
- aws_cdk/aws_pcaconnectorad/__init__.py +3 -2
- aws_cdk/aws_quicksight/__init__.py +268 -50
- aws_cdk/aws_rds/__init__.py +186 -10
- aws_cdk/aws_route53/__init__.py +5 -5
- aws_cdk/aws_route53recoverycontrol/__init__.py +41 -2
- aws_cdk/aws_rum/__init__.py +13 -10
- aws_cdk/aws_s3/__init__.py +3 -6
- aws_cdk/aws_s3_assets/__init__.py +70 -1
- aws_cdk/aws_s3_deployment/__init__.py +4 -0
- aws_cdk/aws_sagemaker/__init__.py +47 -4
- aws_cdk/aws_scheduler_targets/__init__.py +4 -16
- aws_cdk/aws_securitylake/__init__.py +2 -2
- aws_cdk/aws_servicecatalog/__init__.py +4 -0
- aws_cdk/aws_sns/__init__.py +1 -1
- aws_cdk/aws_ssmincidents/__init__.py +10 -10
- aws_cdk/aws_stepfunctions/__init__.py +23 -17
- aws_cdk/aws_stepfunctions_tasks/__init__.py +4 -0
- aws_cdk/aws_synthetics/__init__.py +9 -0
- aws_cdk/aws_systemsmanagersap/__init__.py +160 -0
- aws_cdk/aws_transfer/__init__.py +19 -10
- aws_cdk/aws_wafv2/__init__.py +512 -1141
- aws_cdk/cloud_assembly_schema/__init__.py +60 -10
- aws_cdk/cx_api/__init__.py +38 -2
- aws_cdk/pipelines/__init__.py +52 -2
- {aws_cdk_lib-2.186.0.dist-info → aws_cdk_lib-2.188.0.dist-info}/METADATA +4 -4
- {aws_cdk_lib-2.186.0.dist-info → aws_cdk_lib-2.188.0.dist-info}/RECORD +85 -85
- {aws_cdk_lib-2.186.0.dist-info → aws_cdk_lib-2.188.0.dist-info}/WHEEL +1 -1
- {aws_cdk_lib-2.186.0.dist-info → aws_cdk_lib-2.188.0.dist-info}/LICENSE +0 -0
- {aws_cdk_lib-2.186.0.dist-info → aws_cdk_lib-2.188.0.dist-info}/NOTICE +0 -0
- {aws_cdk_lib-2.186.0.dist-info → aws_cdk_lib-2.188.0.dist-info}/top_level.txt +0 -0
|
@@ -241,7 +241,7 @@ behavior:
|
|
|
241
241
|
* **onFailure**: In the event a record fails and consumes all retries, the record will be sent to S3 bucket, SQS queue or SNS topic that is specified here
|
|
242
242
|
* **parallelizationFactor**: The number of batches to concurrently process on each shard.
|
|
243
243
|
* **retryAttempts**: The maximum number of times a record should be retried in the event of failure.
|
|
244
|
-
* **startingPosition**: Will determine where to begin consumption. 'LATEST' will start at the most recent record and ignore all records that arrived prior to attaching the event source, 'TRIM_HORIZON' will start at the oldest record and ensure you process all available data, while 'AT_TIMESTAMP' will start reading records from a specified time stamp.
|
|
244
|
+
* **startingPosition**: Will determine where to begin consumption. 'LATEST' will start at the most recent record and ignore all records that arrived prior to attaching the event source, 'TRIM_HORIZON' will start at the oldest record and ensure you process all available data, while 'AT_TIMESTAMP' will start reading records from a specified time stamp.
|
|
245
245
|
* **startingPositionTimestamp**: The time stamp from which to start reading. Used in conjunction with **startingPosition** when set to 'AT_TIMESTAMP'.
|
|
246
246
|
* **tumblingWindow**: The duration in seconds of a processing window when using streams.
|
|
247
247
|
* **enabled**: If the event source mapping should be enabled. The default is true.
|
|
@@ -282,7 +282,14 @@ my_function.add_event_source(KinesisConsumerEventSource(stream_consumer,
|
|
|
282
282
|
|
|
283
283
|
## Kafka
|
|
284
284
|
|
|
285
|
-
You can write Lambda functions to process data either from [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html) or a [self
|
|
285
|
+
You can write Lambda functions to process data either from [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html) or a [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/kafka-smaa.html) cluster. The following parameters will impact to the polling behavior:
|
|
286
|
+
|
|
287
|
+
* **startingPosition**: Will determine where to begin consumption. 'LATEST' will start at the most recent record and ignore all records that arrived prior to attaching the event source, 'TRIM_HORIZON' will start at the oldest record and ensure you process all available data, while 'AT_TIMESTAMP' will start reading records from a specified time stamp.
|
|
288
|
+
* **startingPositionTimestamp**: The time stamp from which to start reading. Used in conjunction with **startingPosition** when set to 'AT_TIMESTAMP'.
|
|
289
|
+
* **batchSize**: Determines how many records are buffered before invoking your lambda function - could impact your function's memory usage (if too high) and ability to keep up with incoming data velocity (if too low).
|
|
290
|
+
* **maxBatchingWindow**: The maximum amount of time to gather records before invoking the lambda. This increases the likelihood of a full batch at the cost of possibly delaying processing.
|
|
291
|
+
* **onFailure**: In the event a record fails and consumes all retries, the record will be sent to SQS queue or SNS topic that is specified here
|
|
292
|
+
* **enabled**: If the Kafka event source mapping should be enabled. The default is true.
|
|
286
293
|
|
|
287
294
|
The following code sets up Amazon MSK as an event source for a lambda function. Credentials will need to be configured to access the
|
|
288
295
|
MSK cluster, as described in [Username/Password authentication](https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html).
|
|
@@ -827,6 +834,7 @@ class BaseStreamEventSourceProps:
|
|
|
827
834
|
"filters": "filters",
|
|
828
835
|
"on_failure": "onFailure",
|
|
829
836
|
"secret": "secret",
|
|
837
|
+
"starting_position_timestamp": "startingPositionTimestamp",
|
|
830
838
|
},
|
|
831
839
|
)
|
|
832
840
|
class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
@@ -844,6 +852,7 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
844
852
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
845
853
|
on_failure: typing.Optional[_IEventSourceDlq_5e2c6ad9] = None,
|
|
846
854
|
secret: typing.Optional[_ISecret_6e020e6a] = None,
|
|
855
|
+
starting_position_timestamp: typing.Optional[jsii.Number] = None,
|
|
847
856
|
) -> None:
|
|
848
857
|
'''Properties for a Kafka event source.
|
|
849
858
|
|
|
@@ -858,6 +867,7 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
858
867
|
:param filters: Add filter criteria to Event Source. Default: - none
|
|
859
868
|
:param on_failure: Add an on Failure Destination for this Kafka event. SNS/SQS/S3 are supported Default: - discarded records are ignored
|
|
860
869
|
:param secret: The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Default: none
|
|
870
|
+
:param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
|
|
861
871
|
|
|
862
872
|
:exampleMetadata: fixture=_generated
|
|
863
873
|
|
|
@@ -894,7 +904,8 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
894
904
|
maximum_pollers=123,
|
|
895
905
|
minimum_pollers=123
|
|
896
906
|
),
|
|
897
|
-
secret=secret
|
|
907
|
+
secret=secret,
|
|
908
|
+
starting_position_timestamp=123
|
|
898
909
|
)
|
|
899
910
|
'''
|
|
900
911
|
if isinstance(provisioned_poller_config, dict):
|
|
@@ -912,6 +923,7 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
912
923
|
check_type(argname="argument filters", value=filters, expected_type=type_hints["filters"])
|
|
913
924
|
check_type(argname="argument on_failure", value=on_failure, expected_type=type_hints["on_failure"])
|
|
914
925
|
check_type(argname="argument secret", value=secret, expected_type=type_hints["secret"])
|
|
926
|
+
check_type(argname="argument starting_position_timestamp", value=starting_position_timestamp, expected_type=type_hints["starting_position_timestamp"])
|
|
915
927
|
self._values: typing.Dict[builtins.str, typing.Any] = {
|
|
916
928
|
"starting_position": starting_position,
|
|
917
929
|
"topic": topic,
|
|
@@ -934,6 +946,8 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
934
946
|
self._values["on_failure"] = on_failure
|
|
935
947
|
if secret is not None:
|
|
936
948
|
self._values["secret"] = secret
|
|
949
|
+
if starting_position_timestamp is not None:
|
|
950
|
+
self._values["starting_position_timestamp"] = starting_position_timestamp
|
|
937
951
|
|
|
938
952
|
@builtins.property
|
|
939
953
|
def starting_position(self) -> _StartingPosition_c0a4852c:
|
|
@@ -1060,6 +1074,15 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
1060
1074
|
result = self._values.get("secret")
|
|
1061
1075
|
return typing.cast(typing.Optional[_ISecret_6e020e6a], result)
|
|
1062
1076
|
|
|
1077
|
+
@builtins.property
|
|
1078
|
+
def starting_position_timestamp(self) -> typing.Optional[jsii.Number]:
|
|
1079
|
+
'''The time from which to start reading, in Unix time seconds.
|
|
1080
|
+
|
|
1081
|
+
:default: - no timestamp
|
|
1082
|
+
'''
|
|
1083
|
+
result = self._values.get("starting_position_timestamp")
|
|
1084
|
+
return typing.cast(typing.Optional[jsii.Number], result)
|
|
1085
|
+
|
|
1063
1086
|
def __eq__(self, rhs: typing.Any) -> builtins.bool:
|
|
1064
1087
|
return isinstance(rhs, self.__class__) and rhs._values == self._values
|
|
1065
1088
|
|
|
@@ -1087,6 +1110,7 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
1087
1110
|
"filters": "filters",
|
|
1088
1111
|
"on_failure": "onFailure",
|
|
1089
1112
|
"secret": "secret",
|
|
1113
|
+
"starting_position_timestamp": "startingPositionTimestamp",
|
|
1090
1114
|
"cluster_arn": "clusterArn",
|
|
1091
1115
|
},
|
|
1092
1116
|
)
|
|
@@ -1105,6 +1129,7 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1105
1129
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
1106
1130
|
on_failure: typing.Optional[_IEventSourceDlq_5e2c6ad9] = None,
|
|
1107
1131
|
secret: typing.Optional[_ISecret_6e020e6a] = None,
|
|
1132
|
+
starting_position_timestamp: typing.Optional[jsii.Number] = None,
|
|
1108
1133
|
cluster_arn: builtins.str,
|
|
1109
1134
|
) -> None:
|
|
1110
1135
|
'''Properties for a MSK event source.
|
|
@@ -1120,6 +1145,7 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1120
1145
|
:param filters: Add filter criteria to Event Source. Default: - none
|
|
1121
1146
|
:param on_failure: Add an on Failure Destination for this Kafka event. SNS/SQS/S3 are supported Default: - discarded records are ignored
|
|
1122
1147
|
:param secret: The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Default: none
|
|
1148
|
+
:param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
|
|
1123
1149
|
:param cluster_arn: An MSK cluster construct.
|
|
1124
1150
|
|
|
1125
1151
|
:exampleMetadata: infused
|
|
@@ -1164,6 +1190,7 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1164
1190
|
check_type(argname="argument filters", value=filters, expected_type=type_hints["filters"])
|
|
1165
1191
|
check_type(argname="argument on_failure", value=on_failure, expected_type=type_hints["on_failure"])
|
|
1166
1192
|
check_type(argname="argument secret", value=secret, expected_type=type_hints["secret"])
|
|
1193
|
+
check_type(argname="argument starting_position_timestamp", value=starting_position_timestamp, expected_type=type_hints["starting_position_timestamp"])
|
|
1167
1194
|
check_type(argname="argument cluster_arn", value=cluster_arn, expected_type=type_hints["cluster_arn"])
|
|
1168
1195
|
self._values: typing.Dict[builtins.str, typing.Any] = {
|
|
1169
1196
|
"starting_position": starting_position,
|
|
@@ -1188,6 +1215,8 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1188
1215
|
self._values["on_failure"] = on_failure
|
|
1189
1216
|
if secret is not None:
|
|
1190
1217
|
self._values["secret"] = secret
|
|
1218
|
+
if starting_position_timestamp is not None:
|
|
1219
|
+
self._values["starting_position_timestamp"] = starting_position_timestamp
|
|
1191
1220
|
|
|
1192
1221
|
@builtins.property
|
|
1193
1222
|
def starting_position(self) -> _StartingPosition_c0a4852c:
|
|
@@ -1314,6 +1343,15 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1314
1343
|
result = self._values.get("secret")
|
|
1315
1344
|
return typing.cast(typing.Optional[_ISecret_6e020e6a], result)
|
|
1316
1345
|
|
|
1346
|
+
@builtins.property
|
|
1347
|
+
def starting_position_timestamp(self) -> typing.Optional[jsii.Number]:
|
|
1348
|
+
'''The time from which to start reading, in Unix time seconds.
|
|
1349
|
+
|
|
1350
|
+
:default: - no timestamp
|
|
1351
|
+
'''
|
|
1352
|
+
result = self._values.get("starting_position_timestamp")
|
|
1353
|
+
return typing.cast(typing.Optional[jsii.Number], result)
|
|
1354
|
+
|
|
1317
1355
|
@builtins.property
|
|
1318
1356
|
def cluster_arn(self) -> builtins.str:
|
|
1319
1357
|
'''An MSK cluster construct.'''
|
|
@@ -1680,6 +1718,7 @@ class S3OnFailureDestination(
|
|
|
1680
1718
|
"filters": "filters",
|
|
1681
1719
|
"on_failure": "onFailure",
|
|
1682
1720
|
"secret": "secret",
|
|
1721
|
+
"starting_position_timestamp": "startingPositionTimestamp",
|
|
1683
1722
|
"bootstrap_servers": "bootstrapServers",
|
|
1684
1723
|
"authentication_method": "authenticationMethod",
|
|
1685
1724
|
"root_ca_certificate": "rootCACertificate",
|
|
@@ -1703,6 +1742,7 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1703
1742
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
1704
1743
|
on_failure: typing.Optional[_IEventSourceDlq_5e2c6ad9] = None,
|
|
1705
1744
|
secret: typing.Optional[_ISecret_6e020e6a] = None,
|
|
1745
|
+
starting_position_timestamp: typing.Optional[jsii.Number] = None,
|
|
1706
1746
|
bootstrap_servers: typing.Sequence[builtins.str],
|
|
1707
1747
|
authentication_method: typing.Optional[AuthenticationMethod] = None,
|
|
1708
1748
|
root_ca_certificate: typing.Optional[_ISecret_6e020e6a] = None,
|
|
@@ -1725,6 +1765,7 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1725
1765
|
:param filters: Add filter criteria to Event Source. Default: - none
|
|
1726
1766
|
:param on_failure: Add an on Failure Destination for this Kafka event. SNS/SQS/S3 are supported Default: - discarded records are ignored
|
|
1727
1767
|
:param secret: The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Default: none
|
|
1768
|
+
:param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
|
|
1728
1769
|
:param bootstrap_servers: The list of host and port pairs that are the addresses of the Kafka brokers in a "bootstrap" Kafka cluster that a Kafka client connects to initially to bootstrap itself. They are in the format ``abc.xyz.com:xxxx``.
|
|
1729
1770
|
:param authentication_method: The authentication method for your Kafka cluster. Default: AuthenticationMethod.SASL_SCRAM_512_AUTH
|
|
1730
1771
|
:param root_ca_certificate: The secret with the root CA certificate used by your Kafka brokers for TLS encryption This field is required if your Kafka brokers use certificates signed by a private CA. Default: - none
|
|
@@ -1779,6 +1820,7 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1779
1820
|
check_type(argname="argument filters", value=filters, expected_type=type_hints["filters"])
|
|
1780
1821
|
check_type(argname="argument on_failure", value=on_failure, expected_type=type_hints["on_failure"])
|
|
1781
1822
|
check_type(argname="argument secret", value=secret, expected_type=type_hints["secret"])
|
|
1823
|
+
check_type(argname="argument starting_position_timestamp", value=starting_position_timestamp, expected_type=type_hints["starting_position_timestamp"])
|
|
1782
1824
|
check_type(argname="argument bootstrap_servers", value=bootstrap_servers, expected_type=type_hints["bootstrap_servers"])
|
|
1783
1825
|
check_type(argname="argument authentication_method", value=authentication_method, expected_type=type_hints["authentication_method"])
|
|
1784
1826
|
check_type(argname="argument root_ca_certificate", value=root_ca_certificate, expected_type=type_hints["root_ca_certificate"])
|
|
@@ -1808,6 +1850,8 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1808
1850
|
self._values["on_failure"] = on_failure
|
|
1809
1851
|
if secret is not None:
|
|
1810
1852
|
self._values["secret"] = secret
|
|
1853
|
+
if starting_position_timestamp is not None:
|
|
1854
|
+
self._values["starting_position_timestamp"] = starting_position_timestamp
|
|
1811
1855
|
if authentication_method is not None:
|
|
1812
1856
|
self._values["authentication_method"] = authentication_method
|
|
1813
1857
|
if root_ca_certificate is not None:
|
|
@@ -1944,6 +1988,15 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1944
1988
|
result = self._values.get("secret")
|
|
1945
1989
|
return typing.cast(typing.Optional[_ISecret_6e020e6a], result)
|
|
1946
1990
|
|
|
1991
|
+
@builtins.property
|
|
1992
|
+
def starting_position_timestamp(self) -> typing.Optional[jsii.Number]:
|
|
1993
|
+
'''The time from which to start reading, in Unix time seconds.
|
|
1994
|
+
|
|
1995
|
+
:default: - no timestamp
|
|
1996
|
+
'''
|
|
1997
|
+
result = self._values.get("starting_position_timestamp")
|
|
1998
|
+
return typing.cast(typing.Optional[jsii.Number], result)
|
|
1999
|
+
|
|
1947
2000
|
@builtins.property
|
|
1948
2001
|
def bootstrap_servers(self) -> typing.List[builtins.str]:
|
|
1949
2002
|
'''The list of host and port pairs that are the addresses of the Kafka brokers in a "bootstrap" Kafka cluster that a Kafka client connects to initially to bootstrap itself.
|
|
@@ -2595,7 +2648,7 @@ class StreamEventSource(
|
|
|
2595
2648
|
:param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
|
|
2596
2649
|
:param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
|
|
2597
2650
|
:param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
|
|
2598
|
-
:param retry_attempts: Maximum number of retry attempts
|
|
2651
|
+
:param retry_attempts: Maximum number of retry attempts. Set to -1 for infinite retries (until the record expires in the event source). Valid Range: -1 (infinite) or 0 to 10000 Default: -1 (infinite retries)
|
|
2599
2652
|
:param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. Default: - None
|
|
2600
2653
|
:param starting_position: Where to begin consuming the stream.
|
|
2601
2654
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
@@ -2791,7 +2844,7 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
|
|
|
2791
2844
|
:param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
|
|
2792
2845
|
:param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
|
|
2793
2846
|
:param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
|
|
2794
|
-
:param retry_attempts: Maximum number of retry attempts
|
|
2847
|
+
:param retry_attempts: Maximum number of retry attempts. Set to -1 for infinite retries (until the record expires in the event source). Valid Range: -1 (infinite) or 0 to 10000 Default: -1 (infinite retries)
|
|
2795
2848
|
:param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. Default: - None
|
|
2796
2849
|
|
|
2797
2850
|
:exampleMetadata: fixture=_generated
|
|
@@ -3044,13 +3097,13 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
|
|
|
3044
3097
|
|
|
3045
3098
|
@builtins.property
|
|
3046
3099
|
def retry_attempts(self) -> typing.Optional[jsii.Number]:
|
|
3047
|
-
'''Maximum number of retry attempts
|
|
3100
|
+
'''Maximum number of retry attempts.
|
|
3048
3101
|
|
|
3049
|
-
|
|
3050
|
-
When MaximumRetryAttempts is infinite, Lambda retries failed records until
|
|
3051
|
-
the record expires in the event source.
|
|
3102
|
+
Set to -1 for infinite retries (until the record expires in the event source).
|
|
3052
3103
|
|
|
3053
|
-
:
|
|
3104
|
+
Valid Range: -1 (infinite) or 0 to 10000
|
|
3105
|
+
|
|
3106
|
+
:default: -1 (infinite retries)
|
|
3054
3107
|
'''
|
|
3055
3108
|
result = self._values.get("retry_attempts")
|
|
3056
3109
|
return typing.cast(typing.Optional[jsii.Number], result)
|
|
@@ -3138,7 +3191,7 @@ class DynamoEventSource(
|
|
|
3138
3191
|
:param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
|
|
3139
3192
|
:param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
|
|
3140
3193
|
:param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
|
|
3141
|
-
:param retry_attempts: Maximum number of retry attempts
|
|
3194
|
+
:param retry_attempts: Maximum number of retry attempts. Set to -1 for infinite retries (until the record expires in the event source). Valid Range: -1 (infinite) or 0 to 10000 Default: -1 (infinite retries)
|
|
3142
3195
|
:param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. Default: - None
|
|
3143
3196
|
:param starting_position: Where to begin consuming the stream.
|
|
3144
3197
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
@@ -3248,7 +3301,7 @@ class DynamoEventSourceProps(StreamEventSourceProps):
|
|
|
3248
3301
|
:param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
|
|
3249
3302
|
:param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
|
|
3250
3303
|
:param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
|
|
3251
|
-
:param retry_attempts: Maximum number of retry attempts
|
|
3304
|
+
:param retry_attempts: Maximum number of retry attempts. Set to -1 for infinite retries (until the record expires in the event source). Valid Range: -1 (infinite) or 0 to 10000 Default: -1 (infinite retries)
|
|
3252
3305
|
:param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. Default: - None
|
|
3253
3306
|
|
|
3254
3307
|
:exampleMetadata: infused
|
|
@@ -3483,13 +3536,13 @@ class DynamoEventSourceProps(StreamEventSourceProps):
|
|
|
3483
3536
|
|
|
3484
3537
|
@builtins.property
|
|
3485
3538
|
def retry_attempts(self) -> typing.Optional[jsii.Number]:
|
|
3486
|
-
'''Maximum number of retry attempts
|
|
3539
|
+
'''Maximum number of retry attempts.
|
|
3487
3540
|
|
|
3488
|
-
|
|
3489
|
-
When MaximumRetryAttempts is infinite, Lambda retries failed records until
|
|
3490
|
-
the record expires in the event source.
|
|
3541
|
+
Set to -1 for infinite retries (until the record expires in the event source).
|
|
3491
3542
|
|
|
3492
|
-
:
|
|
3543
|
+
Valid Range: -1 (infinite) or 0 to 10000
|
|
3544
|
+
|
|
3545
|
+
:default: -1 (infinite retries)
|
|
3493
3546
|
'''
|
|
3494
3547
|
result = self._values.get("retry_attempts")
|
|
3495
3548
|
return typing.cast(typing.Optional[jsii.Number], result)
|
|
@@ -3575,7 +3628,7 @@ class KinesisConsumerEventSource(
|
|
|
3575
3628
|
:param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
|
|
3576
3629
|
:param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
|
|
3577
3630
|
:param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
|
|
3578
|
-
:param retry_attempts: Maximum number of retry attempts
|
|
3631
|
+
:param retry_attempts: Maximum number of retry attempts. Set to -1 for infinite retries (until the record expires in the event source). Valid Range: -1 (infinite) or 0 to 10000 Default: -1 (infinite retries)
|
|
3579
3632
|
:param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. Default: - None
|
|
3580
3633
|
:param starting_position: Where to begin consuming the stream.
|
|
3581
3634
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
@@ -3692,7 +3745,7 @@ class KinesisEventSource(
|
|
|
3692
3745
|
:param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
|
|
3693
3746
|
:param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
|
|
3694
3747
|
:param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
|
|
3695
|
-
:param retry_attempts: Maximum number of retry attempts
|
|
3748
|
+
:param retry_attempts: Maximum number of retry attempts. Set to -1 for infinite retries (until the record expires in the event source). Valid Range: -1 (infinite) or 0 to 10000 Default: -1 (infinite retries)
|
|
3696
3749
|
:param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. Default: - None
|
|
3697
3750
|
:param starting_position: Where to begin consuming the stream.
|
|
3698
3751
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
@@ -3810,7 +3863,7 @@ class KinesisEventSourceProps(StreamEventSourceProps):
|
|
|
3810
3863
|
:param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
|
|
3811
3864
|
:param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
|
|
3812
3865
|
:param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
|
|
3813
|
-
:param retry_attempts: Maximum number of retry attempts
|
|
3866
|
+
:param retry_attempts: Maximum number of retry attempts. Set to -1 for infinite retries (until the record expires in the event source). Valid Range: -1 (infinite) or 0 to 10000 Default: -1 (infinite retries)
|
|
3814
3867
|
:param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis Valid Range: 0 - 15 minutes. Default: - None
|
|
3815
3868
|
:param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
|
|
3816
3869
|
|
|
@@ -4041,13 +4094,13 @@ class KinesisEventSourceProps(StreamEventSourceProps):
|
|
|
4041
4094
|
|
|
4042
4095
|
@builtins.property
|
|
4043
4096
|
def retry_attempts(self) -> typing.Optional[jsii.Number]:
|
|
4044
|
-
'''Maximum number of retry attempts
|
|
4097
|
+
'''Maximum number of retry attempts.
|
|
4045
4098
|
|
|
4046
|
-
|
|
4047
|
-
When MaximumRetryAttempts is infinite, Lambda retries failed records until
|
|
4048
|
-
the record expires in the event source.
|
|
4099
|
+
Set to -1 for infinite retries (until the record expires in the event source).
|
|
4049
4100
|
|
|
4050
|
-
:
|
|
4101
|
+
Valid Range: -1 (infinite) or 0 to 10000
|
|
4102
|
+
|
|
4103
|
+
:default: -1 (infinite retries)
|
|
4051
4104
|
'''
|
|
4052
4105
|
result = self._values.get("retry_attempts")
|
|
4053
4106
|
return typing.cast(typing.Optional[jsii.Number], result)
|
|
@@ -4127,6 +4180,7 @@ class ManagedKafkaEventSource(
|
|
|
4127
4180
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
4128
4181
|
on_failure: typing.Optional[_IEventSourceDlq_5e2c6ad9] = None,
|
|
4129
4182
|
secret: typing.Optional[_ISecret_6e020e6a] = None,
|
|
4183
|
+
starting_position_timestamp: typing.Optional[jsii.Number] = None,
|
|
4130
4184
|
starting_position: _StartingPosition_c0a4852c,
|
|
4131
4185
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
4132
4186
|
enabled: typing.Optional[builtins.bool] = None,
|
|
@@ -4141,6 +4195,7 @@ class ManagedKafkaEventSource(
|
|
|
4141
4195
|
:param filters: Add filter criteria to Event Source. Default: - none
|
|
4142
4196
|
:param on_failure: Add an on Failure Destination for this Kafka event. SNS/SQS/S3 are supported Default: - discarded records are ignored
|
|
4143
4197
|
:param secret: The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Default: none
|
|
4198
|
+
:param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
|
|
4144
4199
|
:param starting_position: Where to begin consuming the stream.
|
|
4145
4200
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
4146
4201
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
@@ -4155,6 +4210,7 @@ class ManagedKafkaEventSource(
|
|
|
4155
4210
|
filters=filters,
|
|
4156
4211
|
on_failure=on_failure,
|
|
4157
4212
|
secret=secret,
|
|
4213
|
+
starting_position_timestamp=starting_position_timestamp,
|
|
4158
4214
|
starting_position=starting_position,
|
|
4159
4215
|
batch_size=batch_size,
|
|
4160
4216
|
enabled=enabled,
|
|
@@ -4241,6 +4297,7 @@ class SelfManagedKafkaEventSource(
|
|
|
4241
4297
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
4242
4298
|
on_failure: typing.Optional[_IEventSourceDlq_5e2c6ad9] = None,
|
|
4243
4299
|
secret: typing.Optional[_ISecret_6e020e6a] = None,
|
|
4300
|
+
starting_position_timestamp: typing.Optional[jsii.Number] = None,
|
|
4244
4301
|
starting_position: _StartingPosition_c0a4852c,
|
|
4245
4302
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
4246
4303
|
enabled: typing.Optional[builtins.bool] = None,
|
|
@@ -4260,6 +4317,7 @@ class SelfManagedKafkaEventSource(
|
|
|
4260
4317
|
:param filters: Add filter criteria to Event Source. Default: - none
|
|
4261
4318
|
:param on_failure: Add an on Failure Destination for this Kafka event. SNS/SQS/S3 are supported Default: - discarded records are ignored
|
|
4262
4319
|
:param secret: The secret with the Kafka credentials, see https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html for details This field is required if your Kafka brokers are accessed over the Internet. Default: none
|
|
4320
|
+
:param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
|
|
4263
4321
|
:param starting_position: Where to begin consuming the stream.
|
|
4264
4322
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
4265
4323
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
@@ -4279,6 +4337,7 @@ class SelfManagedKafkaEventSource(
|
|
|
4279
4337
|
filters=filters,
|
|
4280
4338
|
on_failure=on_failure,
|
|
4281
4339
|
secret=secret,
|
|
4340
|
+
starting_position_timestamp=starting_position_timestamp,
|
|
4282
4341
|
starting_position=starting_position,
|
|
4283
4342
|
batch_size=batch_size,
|
|
4284
4343
|
enabled=enabled,
|
|
@@ -4379,6 +4438,7 @@ def _typecheckingstub__980041697091a50415a7444df02a046d910ddd83f1229789d80780bf7
|
|
|
4379
4438
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
4380
4439
|
on_failure: typing.Optional[_IEventSourceDlq_5e2c6ad9] = None,
|
|
4381
4440
|
secret: typing.Optional[_ISecret_6e020e6a] = None,
|
|
4441
|
+
starting_position_timestamp: typing.Optional[jsii.Number] = None,
|
|
4382
4442
|
) -> None:
|
|
4383
4443
|
"""Type checking stubs"""
|
|
4384
4444
|
pass
|
|
@@ -4396,6 +4456,7 @@ def _typecheckingstub__e930f585c1bae37174885c54f0f224909bfb0a75d9f1b652bbcf33461
|
|
|
4396
4456
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
4397
4457
|
on_failure: typing.Optional[_IEventSourceDlq_5e2c6ad9] = None,
|
|
4398
4458
|
secret: typing.Optional[_ISecret_6e020e6a] = None,
|
|
4459
|
+
starting_position_timestamp: typing.Optional[jsii.Number] = None,
|
|
4399
4460
|
cluster_arn: builtins.str,
|
|
4400
4461
|
) -> None:
|
|
4401
4462
|
"""Type checking stubs"""
|
|
@@ -4473,6 +4534,7 @@ def _typecheckingstub__0100a45aa91b9c2103378e2ba54dd41b054f1d6a50733797256d6971b
|
|
|
4473
4534
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
4474
4535
|
on_failure: typing.Optional[_IEventSourceDlq_5e2c6ad9] = None,
|
|
4475
4536
|
secret: typing.Optional[_ISecret_6e020e6a] = None,
|
|
4537
|
+
starting_position_timestamp: typing.Optional[jsii.Number] = None,
|
|
4476
4538
|
bootstrap_servers: typing.Sequence[builtins.str],
|
|
4477
4539
|
authentication_method: typing.Optional[AuthenticationMethod] = None,
|
|
4478
4540
|
root_ca_certificate: typing.Optional[_ISecret_6e020e6a] = None,
|
|
@@ -1938,30 +1938,11 @@ class NodejsFunctionProps(_FunctionOptions_328f4d39):
|
|
|
1938
1938
|
|
|
1939
1939
|
nodejs.NodejsFunction(self, "my-handler",
|
|
1940
1940
|
bundling=nodejs.BundlingOptions(
|
|
1941
|
-
|
|
1942
|
-
|
|
1943
|
-
|
|
1944
|
-
|
|
1945
|
-
|
|
1946
|
-
loader={ # Use the 'dataurl' loader for '.png' files
|
|
1947
|
-
".png": "dataurl"},
|
|
1948
|
-
define={ # Replace strings during build time
|
|
1949
|
-
"process.env.API_KEY": JSON.stringify("xxx-xxxx-xxx"),
|
|
1950
|
-
"process.env.PRODUCTION": JSON.stringify(True),
|
|
1951
|
-
"process.env.NUMBER": JSON.stringify(123)},
|
|
1952
|
-
log_level=nodejs.LogLevel.ERROR, # defaults to LogLevel.WARNING
|
|
1953
|
-
keep_names=True, # defaults to false
|
|
1954
|
-
tsconfig="custom-tsconfig.json", # use custom-tsconfig.json instead of default,
|
|
1955
|
-
metafile=True, # include meta file, defaults to false
|
|
1956
|
-
banner="/* comments */", # requires esbuild >= 0.9.0, defaults to none
|
|
1957
|
-
footer="/* comments */", # requires esbuild >= 0.9.0, defaults to none
|
|
1958
|
-
charset=nodejs.Charset.UTF8, # do not escape non-ASCII characters, defaults to Charset.ASCII
|
|
1959
|
-
format=nodejs.OutputFormat.ESM, # ECMAScript module output format, defaults to OutputFormat.CJS (OutputFormat.ESM requires Node.js >= 14)
|
|
1960
|
-
main_fields=["module", "main"], # prefer ECMAScript versions of dependencies
|
|
1961
|
-
inject=["./my-shim.js", "./other-shim.js"], # allows to automatically replace a global variable with an import from another file
|
|
1962
|
-
esbuild_args={ # Pass additional arguments to esbuild
|
|
1963
|
-
"--log-limit": "0",
|
|
1964
|
-
"--splitting": True}
|
|
1941
|
+
network="host",
|
|
1942
|
+
security_opt="no-new-privileges",
|
|
1943
|
+
user="user:group",
|
|
1944
|
+
volumes_from=["777f7dc92da7"],
|
|
1945
|
+
volumes=[DockerVolume(host_path="/host-path", container_path="/container-path")]
|
|
1965
1946
|
)
|
|
1966
1947
|
)
|
|
1967
1948
|
'''
|