aws-cdk-lib 2.168.0__py3-none-any.whl → 2.170.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aws_cdk/__init__.py +2 -0
- aws_cdk/_jsii/__init__.py +1 -1
- aws_cdk/_jsii/{aws-cdk-lib@2.168.0.jsii.tgz → aws-cdk-lib@2.170.0.jsii.tgz} +0 -0
- aws_cdk/aws_accessanalyzer/__init__.py +244 -13
- aws_cdk/aws_applicationsignals/__init__.py +8 -1
- aws_cdk/aws_autoscaling/__init__.py +310 -9
- aws_cdk/aws_cloudfront/__init__.py +50 -0
- aws_cdk/aws_cloudfront/experimental/__init__.py +5 -0
- aws_cdk/aws_codebuild/__init__.py +2 -2
- aws_cdk/aws_connect/__init__.py +378 -0
- aws_cdk/aws_customerprofiles/__init__.py +44 -0
- aws_cdk/aws_deadline/__init__.py +299 -6
- aws_cdk/aws_dynamodb/__init__.py +47 -25
- aws_cdk/aws_ec2/__init__.py +6 -2
- aws_cdk/aws_ecs/__init__.py +28 -22
- aws_cdk/aws_efs/__init__.py +61 -4
- aws_cdk/aws_eks/__init__.py +116 -0
- aws_cdk/aws_gamelift/__init__.py +385 -251
- aws_cdk/aws_iot/__init__.py +209 -0
- aws_cdk/aws_iotfleetwise/__init__.py +550 -0
- aws_cdk/aws_iotsitewise/__init__.py +6 -3
- aws_cdk/aws_ivs/__init__.py +458 -0
- aws_cdk/aws_kinesisfirehose/__init__.py +90 -33
- aws_cdk/aws_lambda/__init__.py +150 -6
- aws_cdk/aws_lambda_event_sources/__init__.py +298 -1
- aws_cdk/aws_rbin/__init__.py +902 -0
- aws_cdk/aws_rds/__init__.py +115 -0
- aws_cdk/aws_route53resolver/__init__.py +76 -19
- aws_cdk/aws_sagemaker/__init__.py +32 -0
- aws_cdk/aws_sns/__init__.py +593 -8
- aws_cdk/aws_sns_subscriptions/__init__.py +68 -22
- aws_cdk/aws_synthetics/__init__.py +46 -0
- aws_cdk/aws_vpclattice/__init__.py +118 -2
- aws_cdk/aws_wisdom/__init__.py +16 -21
- {aws_cdk_lib-2.168.0.dist-info → aws_cdk_lib-2.170.0.dist-info}/METADATA +1 -1
- {aws_cdk_lib-2.168.0.dist-info → aws_cdk_lib-2.170.0.dist-info}/RECORD +40 -39
- {aws_cdk_lib-2.168.0.dist-info → aws_cdk_lib-2.170.0.dist-info}/LICENSE +0 -0
- {aws_cdk_lib-2.168.0.dist-info → aws_cdk_lib-2.170.0.dist-info}/NOTICE +0 -0
- {aws_cdk_lib-2.168.0.dist-info → aws_cdk_lib-2.170.0.dist-info}/WHEEL +0 -0
- {aws_cdk_lib-2.168.0.dist-info → aws_cdk_lib-2.170.0.dist-info}/top_level.txt +0 -0
|
@@ -407,6 +407,30 @@ my_function.add_event_source(ManagedKafkaEventSource(
|
|
|
407
407
|
))
|
|
408
408
|
```
|
|
409
409
|
|
|
410
|
+
Set configuration for provisioned pollers that read from the event source.
|
|
411
|
+
|
|
412
|
+
```python
|
|
413
|
+
from aws_cdk.aws_lambda_event_sources import ManagedKafkaEventSource
|
|
414
|
+
|
|
415
|
+
# Your MSK cluster arn
|
|
416
|
+
# cluster_arn: str
|
|
417
|
+
|
|
418
|
+
# my_function: lambda.Function
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
# The Kafka topic you want to subscribe to
|
|
422
|
+
topic = "some-cool-topic"
|
|
423
|
+
my_function.add_event_source(ManagedKafkaEventSource(
|
|
424
|
+
cluster_arn=cluster_arn,
|
|
425
|
+
topic=topic,
|
|
426
|
+
starting_position=lambda_.StartingPosition.TRIM_HORIZON,
|
|
427
|
+
provisioned_poller_config=ProvisionedPollerConfig(
|
|
428
|
+
minimum_pollers=1,
|
|
429
|
+
maximum_pollers=3
|
|
430
|
+
)
|
|
431
|
+
))
|
|
432
|
+
```
|
|
433
|
+
|
|
410
434
|
## Roadmap
|
|
411
435
|
|
|
412
436
|
Eventually, this module will support all the event sources described under
|
|
@@ -472,6 +496,7 @@ from ..aws_lambda import (
|
|
|
472
496
|
IEventSourceMapping as _IEventSourceMapping_e216064e,
|
|
473
497
|
IFunction as _IFunction_6adb0ab8,
|
|
474
498
|
MetricsConfig as _MetricsConfig_48ab59c4,
|
|
499
|
+
ProvisionedPollerConfig as _ProvisionedPollerConfig_1025e063,
|
|
475
500
|
SourceAccessConfiguration as _SourceAccessConfiguration_1926ff89,
|
|
476
501
|
StartingPosition as _StartingPosition_c0a4852c,
|
|
477
502
|
)
|
|
@@ -627,6 +652,7 @@ class AuthenticationMethod(enum.Enum):
|
|
|
627
652
|
"batch_size": "batchSize",
|
|
628
653
|
"enabled": "enabled",
|
|
629
654
|
"max_batching_window": "maxBatchingWindow",
|
|
655
|
+
"provisioned_poller_config": "provisionedPollerConfig",
|
|
630
656
|
},
|
|
631
657
|
)
|
|
632
658
|
class BaseStreamEventSourceProps:
|
|
@@ -637,6 +663,7 @@ class BaseStreamEventSourceProps:
|
|
|
637
663
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
638
664
|
enabled: typing.Optional[builtins.bool] = None,
|
|
639
665
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
666
|
+
provisioned_poller_config: typing.Optional[typing.Union["ProvisionedPollerConfig", typing.Dict[builtins.str, typing.Any]]] = None,
|
|
640
667
|
) -> None:
|
|
641
668
|
'''The set of properties for streaming event sources shared by Dynamo, Kinesis and Kafka.
|
|
642
669
|
|
|
@@ -644,6 +671,7 @@ class BaseStreamEventSourceProps:
|
|
|
644
671
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
645
672
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
646
673
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
674
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
647
675
|
|
|
648
676
|
:exampleMetadata: fixture=_generated
|
|
649
677
|
|
|
@@ -661,15 +689,22 @@ class BaseStreamEventSourceProps:
|
|
|
661
689
|
# the properties below are optional
|
|
662
690
|
batch_size=123,
|
|
663
691
|
enabled=False,
|
|
664
|
-
max_batching_window=cdk.Duration.minutes(30)
|
|
692
|
+
max_batching_window=cdk.Duration.minutes(30),
|
|
693
|
+
provisioned_poller_config=lambda_event_sources.ProvisionedPollerConfig(
|
|
694
|
+
maximum_pollers=123,
|
|
695
|
+
minimum_pollers=123
|
|
696
|
+
)
|
|
665
697
|
)
|
|
666
698
|
'''
|
|
699
|
+
if isinstance(provisioned_poller_config, dict):
|
|
700
|
+
provisioned_poller_config = ProvisionedPollerConfig(**provisioned_poller_config)
|
|
667
701
|
if __debug__:
|
|
668
702
|
type_hints = typing.get_type_hints(_typecheckingstub__49e8158c80bdc70be09102671a6e9cb783a1a28274da91874a575eeb7e7b4fc6)
|
|
669
703
|
check_type(argname="argument starting_position", value=starting_position, expected_type=type_hints["starting_position"])
|
|
670
704
|
check_type(argname="argument batch_size", value=batch_size, expected_type=type_hints["batch_size"])
|
|
671
705
|
check_type(argname="argument enabled", value=enabled, expected_type=type_hints["enabled"])
|
|
672
706
|
check_type(argname="argument max_batching_window", value=max_batching_window, expected_type=type_hints["max_batching_window"])
|
|
707
|
+
check_type(argname="argument provisioned_poller_config", value=provisioned_poller_config, expected_type=type_hints["provisioned_poller_config"])
|
|
673
708
|
self._values: typing.Dict[builtins.str, typing.Any] = {
|
|
674
709
|
"starting_position": starting_position,
|
|
675
710
|
}
|
|
@@ -679,6 +714,8 @@ class BaseStreamEventSourceProps:
|
|
|
679
714
|
self._values["enabled"] = enabled
|
|
680
715
|
if max_batching_window is not None:
|
|
681
716
|
self._values["max_batching_window"] = max_batching_window
|
|
717
|
+
if provisioned_poller_config is not None:
|
|
718
|
+
self._values["provisioned_poller_config"] = provisioned_poller_config
|
|
682
719
|
|
|
683
720
|
@builtins.property
|
|
684
721
|
def starting_position(self) -> _StartingPosition_c0a4852c:
|
|
@@ -729,6 +766,18 @@ class BaseStreamEventSourceProps:
|
|
|
729
766
|
result = self._values.get("max_batching_window")
|
|
730
767
|
return typing.cast(typing.Optional[_Duration_4839e8c3], result)
|
|
731
768
|
|
|
769
|
+
@builtins.property
|
|
770
|
+
def provisioned_poller_config(self) -> typing.Optional["ProvisionedPollerConfig"]:
|
|
771
|
+
'''Configuration for provisioned pollers that read from the event source.
|
|
772
|
+
|
|
773
|
+
When specified, allows control over the minimum and maximum number of pollers
|
|
774
|
+
that can be provisioned to process events from the source.
|
|
775
|
+
|
|
776
|
+
:default: - no provisioned pollers
|
|
777
|
+
'''
|
|
778
|
+
result = self._values.get("provisioned_poller_config")
|
|
779
|
+
return typing.cast(typing.Optional["ProvisionedPollerConfig"], result)
|
|
780
|
+
|
|
732
781
|
def __eq__(self, rhs: typing.Any) -> builtins.bool:
|
|
733
782
|
return isinstance(rhs, self.__class__) and rhs._values == self._values
|
|
734
783
|
|
|
@@ -749,6 +798,7 @@ class BaseStreamEventSourceProps:
|
|
|
749
798
|
"batch_size": "batchSize",
|
|
750
799
|
"enabled": "enabled",
|
|
751
800
|
"max_batching_window": "maxBatchingWindow",
|
|
801
|
+
"provisioned_poller_config": "provisionedPollerConfig",
|
|
752
802
|
"topic": "topic",
|
|
753
803
|
"consumer_group_id": "consumerGroupId",
|
|
754
804
|
"filter_encryption": "filterEncryption",
|
|
@@ -765,6 +815,7 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
765
815
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
766
816
|
enabled: typing.Optional[builtins.bool] = None,
|
|
767
817
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
818
|
+
provisioned_poller_config: typing.Optional[typing.Union["ProvisionedPollerConfig", typing.Dict[builtins.str, typing.Any]]] = None,
|
|
768
819
|
topic: builtins.str,
|
|
769
820
|
consumer_group_id: typing.Optional[builtins.str] = None,
|
|
770
821
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
@@ -778,6 +829,7 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
778
829
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
779
830
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
780
831
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
832
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
781
833
|
:param topic: The Kafka topic to subscribe to.
|
|
782
834
|
:param consumer_group_id: The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. The value must have a lenght between 1 and 200 and full the pattern '[a-zA-Z0-9-/*:_+=.@-]*'. Default: - none
|
|
783
835
|
:param filter_encryption: Add Customer managed KMS key to encrypt Filter Criteria. Default: - none
|
|
@@ -816,15 +868,22 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
816
868
|
}],
|
|
817
869
|
max_batching_window=cdk.Duration.minutes(30),
|
|
818
870
|
on_failure=event_source_dlq,
|
|
871
|
+
provisioned_poller_config=lambda_event_sources.ProvisionedPollerConfig(
|
|
872
|
+
maximum_pollers=123,
|
|
873
|
+
minimum_pollers=123
|
|
874
|
+
),
|
|
819
875
|
secret=secret
|
|
820
876
|
)
|
|
821
877
|
'''
|
|
878
|
+
if isinstance(provisioned_poller_config, dict):
|
|
879
|
+
provisioned_poller_config = ProvisionedPollerConfig(**provisioned_poller_config)
|
|
822
880
|
if __debug__:
|
|
823
881
|
type_hints = typing.get_type_hints(_typecheckingstub__980041697091a50415a7444df02a046d910ddd83f1229789d80780bf7903633d)
|
|
824
882
|
check_type(argname="argument starting_position", value=starting_position, expected_type=type_hints["starting_position"])
|
|
825
883
|
check_type(argname="argument batch_size", value=batch_size, expected_type=type_hints["batch_size"])
|
|
826
884
|
check_type(argname="argument enabled", value=enabled, expected_type=type_hints["enabled"])
|
|
827
885
|
check_type(argname="argument max_batching_window", value=max_batching_window, expected_type=type_hints["max_batching_window"])
|
|
886
|
+
check_type(argname="argument provisioned_poller_config", value=provisioned_poller_config, expected_type=type_hints["provisioned_poller_config"])
|
|
828
887
|
check_type(argname="argument topic", value=topic, expected_type=type_hints["topic"])
|
|
829
888
|
check_type(argname="argument consumer_group_id", value=consumer_group_id, expected_type=type_hints["consumer_group_id"])
|
|
830
889
|
check_type(argname="argument filter_encryption", value=filter_encryption, expected_type=type_hints["filter_encryption"])
|
|
@@ -841,6 +900,8 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
841
900
|
self._values["enabled"] = enabled
|
|
842
901
|
if max_batching_window is not None:
|
|
843
902
|
self._values["max_batching_window"] = max_batching_window
|
|
903
|
+
if provisioned_poller_config is not None:
|
|
904
|
+
self._values["provisioned_poller_config"] = provisioned_poller_config
|
|
844
905
|
if consumer_group_id is not None:
|
|
845
906
|
self._values["consumer_group_id"] = consumer_group_id
|
|
846
907
|
if filter_encryption is not None:
|
|
@@ -901,6 +962,18 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
901
962
|
result = self._values.get("max_batching_window")
|
|
902
963
|
return typing.cast(typing.Optional[_Duration_4839e8c3], result)
|
|
903
964
|
|
|
965
|
+
@builtins.property
|
|
966
|
+
def provisioned_poller_config(self) -> typing.Optional["ProvisionedPollerConfig"]:
|
|
967
|
+
'''Configuration for provisioned pollers that read from the event source.
|
|
968
|
+
|
|
969
|
+
When specified, allows control over the minimum and maximum number of pollers
|
|
970
|
+
that can be provisioned to process events from the source.
|
|
971
|
+
|
|
972
|
+
:default: - no provisioned pollers
|
|
973
|
+
'''
|
|
974
|
+
result = self._values.get("provisioned_poller_config")
|
|
975
|
+
return typing.cast(typing.Optional["ProvisionedPollerConfig"], result)
|
|
976
|
+
|
|
904
977
|
@builtins.property
|
|
905
978
|
def topic(self) -> builtins.str:
|
|
906
979
|
'''The Kafka topic to subscribe to.'''
|
|
@@ -985,6 +1058,7 @@ class KafkaEventSourceProps(BaseStreamEventSourceProps):
|
|
|
985
1058
|
"batch_size": "batchSize",
|
|
986
1059
|
"enabled": "enabled",
|
|
987
1060
|
"max_batching_window": "maxBatchingWindow",
|
|
1061
|
+
"provisioned_poller_config": "provisionedPollerConfig",
|
|
988
1062
|
"topic": "topic",
|
|
989
1063
|
"consumer_group_id": "consumerGroupId",
|
|
990
1064
|
"filter_encryption": "filterEncryption",
|
|
@@ -1002,6 +1076,7 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1002
1076
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
1003
1077
|
enabled: typing.Optional[builtins.bool] = None,
|
|
1004
1078
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
1079
|
+
provisioned_poller_config: typing.Optional[typing.Union["ProvisionedPollerConfig", typing.Dict[builtins.str, typing.Any]]] = None,
|
|
1005
1080
|
topic: builtins.str,
|
|
1006
1081
|
consumer_group_id: typing.Optional[builtins.str] = None,
|
|
1007
1082
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
@@ -1016,6 +1091,7 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1016
1091
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
1017
1092
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
1018
1093
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
1094
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
1019
1095
|
:param topic: The Kafka topic to subscribe to.
|
|
1020
1096
|
:param consumer_group_id: The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. The value must have a lenght between 1 and 200 and full the pattern '[a-zA-Z0-9-/*:_+=.@-]*'. Default: - none
|
|
1021
1097
|
:param filter_encryption: Add Customer managed KMS key to encrypt Filter Criteria. Default: - none
|
|
@@ -1051,12 +1127,15 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1051
1127
|
starting_position=lambda_.StartingPosition.TRIM_HORIZON
|
|
1052
1128
|
))
|
|
1053
1129
|
'''
|
|
1130
|
+
if isinstance(provisioned_poller_config, dict):
|
|
1131
|
+
provisioned_poller_config = ProvisionedPollerConfig(**provisioned_poller_config)
|
|
1054
1132
|
if __debug__:
|
|
1055
1133
|
type_hints = typing.get_type_hints(_typecheckingstub__e930f585c1bae37174885c54f0f224909bfb0a75d9f1b652bbcf3346100e977e)
|
|
1056
1134
|
check_type(argname="argument starting_position", value=starting_position, expected_type=type_hints["starting_position"])
|
|
1057
1135
|
check_type(argname="argument batch_size", value=batch_size, expected_type=type_hints["batch_size"])
|
|
1058
1136
|
check_type(argname="argument enabled", value=enabled, expected_type=type_hints["enabled"])
|
|
1059
1137
|
check_type(argname="argument max_batching_window", value=max_batching_window, expected_type=type_hints["max_batching_window"])
|
|
1138
|
+
check_type(argname="argument provisioned_poller_config", value=provisioned_poller_config, expected_type=type_hints["provisioned_poller_config"])
|
|
1060
1139
|
check_type(argname="argument topic", value=topic, expected_type=type_hints["topic"])
|
|
1061
1140
|
check_type(argname="argument consumer_group_id", value=consumer_group_id, expected_type=type_hints["consumer_group_id"])
|
|
1062
1141
|
check_type(argname="argument filter_encryption", value=filter_encryption, expected_type=type_hints["filter_encryption"])
|
|
@@ -1075,6 +1154,8 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1075
1154
|
self._values["enabled"] = enabled
|
|
1076
1155
|
if max_batching_window is not None:
|
|
1077
1156
|
self._values["max_batching_window"] = max_batching_window
|
|
1157
|
+
if provisioned_poller_config is not None:
|
|
1158
|
+
self._values["provisioned_poller_config"] = provisioned_poller_config
|
|
1078
1159
|
if consumer_group_id is not None:
|
|
1079
1160
|
self._values["consumer_group_id"] = consumer_group_id
|
|
1080
1161
|
if filter_encryption is not None:
|
|
@@ -1135,6 +1216,18 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1135
1216
|
result = self._values.get("max_batching_window")
|
|
1136
1217
|
return typing.cast(typing.Optional[_Duration_4839e8c3], result)
|
|
1137
1218
|
|
|
1219
|
+
@builtins.property
|
|
1220
|
+
def provisioned_poller_config(self) -> typing.Optional["ProvisionedPollerConfig"]:
|
|
1221
|
+
'''Configuration for provisioned pollers that read from the event source.
|
|
1222
|
+
|
|
1223
|
+
When specified, allows control over the minimum and maximum number of pollers
|
|
1224
|
+
that can be provisioned to process events from the source.
|
|
1225
|
+
|
|
1226
|
+
:default: - no provisioned pollers
|
|
1227
|
+
'''
|
|
1228
|
+
result = self._values.get("provisioned_poller_config")
|
|
1229
|
+
return typing.cast(typing.Optional["ProvisionedPollerConfig"], result)
|
|
1230
|
+
|
|
1138
1231
|
@builtins.property
|
|
1139
1232
|
def topic(self) -> builtins.str:
|
|
1140
1233
|
'''The Kafka topic to subscribe to.'''
|
|
@@ -1218,6 +1311,90 @@ class ManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1218
1311
|
)
|
|
1219
1312
|
|
|
1220
1313
|
|
|
1314
|
+
@jsii.data_type(
|
|
1315
|
+
jsii_type="aws-cdk-lib.aws_lambda_event_sources.ProvisionedPollerConfig",
|
|
1316
|
+
jsii_struct_bases=[],
|
|
1317
|
+
name_mapping={
|
|
1318
|
+
"maximum_pollers": "maximumPollers",
|
|
1319
|
+
"minimum_pollers": "minimumPollers",
|
|
1320
|
+
},
|
|
1321
|
+
)
|
|
1322
|
+
class ProvisionedPollerConfig:
|
|
1323
|
+
def __init__(
|
|
1324
|
+
self,
|
|
1325
|
+
*,
|
|
1326
|
+
maximum_pollers: jsii.Number,
|
|
1327
|
+
minimum_pollers: jsii.Number,
|
|
1328
|
+
) -> None:
|
|
1329
|
+
'''
|
|
1330
|
+
:param maximum_pollers: The maximum number of pollers that can be provisioned. Default: 200
|
|
1331
|
+
:param minimum_pollers: The minimum number of pollers that should be provisioned. Default: 1
|
|
1332
|
+
|
|
1333
|
+
:exampleMetadata: infused
|
|
1334
|
+
|
|
1335
|
+
Example::
|
|
1336
|
+
|
|
1337
|
+
from aws_cdk.aws_lambda_event_sources import ManagedKafkaEventSource
|
|
1338
|
+
|
|
1339
|
+
# Your MSK cluster arn
|
|
1340
|
+
# cluster_arn: str
|
|
1341
|
+
|
|
1342
|
+
# my_function: lambda.Function
|
|
1343
|
+
|
|
1344
|
+
|
|
1345
|
+
# The Kafka topic you want to subscribe to
|
|
1346
|
+
topic = "some-cool-topic"
|
|
1347
|
+
my_function.add_event_source(ManagedKafkaEventSource(
|
|
1348
|
+
cluster_arn=cluster_arn,
|
|
1349
|
+
topic=topic,
|
|
1350
|
+
starting_position=lambda_.StartingPosition.TRIM_HORIZON,
|
|
1351
|
+
provisioned_poller_config=ProvisionedPollerConfig(
|
|
1352
|
+
minimum_pollers=1,
|
|
1353
|
+
maximum_pollers=3
|
|
1354
|
+
)
|
|
1355
|
+
))
|
|
1356
|
+
'''
|
|
1357
|
+
if __debug__:
|
|
1358
|
+
type_hints = typing.get_type_hints(_typecheckingstub__52613c26f6551f26dae012e5cb997a9c6c981e7e4a8c59f252b025ef2acd28a7)
|
|
1359
|
+
check_type(argname="argument maximum_pollers", value=maximum_pollers, expected_type=type_hints["maximum_pollers"])
|
|
1360
|
+
check_type(argname="argument minimum_pollers", value=minimum_pollers, expected_type=type_hints["minimum_pollers"])
|
|
1361
|
+
self._values: typing.Dict[builtins.str, typing.Any] = {
|
|
1362
|
+
"maximum_pollers": maximum_pollers,
|
|
1363
|
+
"minimum_pollers": minimum_pollers,
|
|
1364
|
+
}
|
|
1365
|
+
|
|
1366
|
+
@builtins.property
|
|
1367
|
+
def maximum_pollers(self) -> jsii.Number:
|
|
1368
|
+
'''The maximum number of pollers that can be provisioned.
|
|
1369
|
+
|
|
1370
|
+
:default: 200
|
|
1371
|
+
'''
|
|
1372
|
+
result = self._values.get("maximum_pollers")
|
|
1373
|
+
assert result is not None, "Required property 'maximum_pollers' is missing"
|
|
1374
|
+
return typing.cast(jsii.Number, result)
|
|
1375
|
+
|
|
1376
|
+
@builtins.property
|
|
1377
|
+
def minimum_pollers(self) -> jsii.Number:
|
|
1378
|
+
'''The minimum number of pollers that should be provisioned.
|
|
1379
|
+
|
|
1380
|
+
:default: 1
|
|
1381
|
+
'''
|
|
1382
|
+
result = self._values.get("minimum_pollers")
|
|
1383
|
+
assert result is not None, "Required property 'minimum_pollers' is missing"
|
|
1384
|
+
return typing.cast(jsii.Number, result)
|
|
1385
|
+
|
|
1386
|
+
def __eq__(self, rhs: typing.Any) -> builtins.bool:
|
|
1387
|
+
return isinstance(rhs, self.__class__) and rhs._values == self._values
|
|
1388
|
+
|
|
1389
|
+
def __ne__(self, rhs: typing.Any) -> builtins.bool:
|
|
1390
|
+
return not (rhs == self)
|
|
1391
|
+
|
|
1392
|
+
def __repr__(self) -> str:
|
|
1393
|
+
return "ProvisionedPollerConfig(%s)" % ", ".join(
|
|
1394
|
+
k + "=" + repr(v) for k, v in self._values.items()
|
|
1395
|
+
)
|
|
1396
|
+
|
|
1397
|
+
|
|
1221
1398
|
@jsii.implements(_IEventSource_3686b3f8)
|
|
1222
1399
|
class S3EventSource(
|
|
1223
1400
|
metaclass=jsii.JSIIMeta,
|
|
@@ -1473,6 +1650,7 @@ class S3OnFailureDestination(
|
|
|
1473
1650
|
"batch_size": "batchSize",
|
|
1474
1651
|
"enabled": "enabled",
|
|
1475
1652
|
"max_batching_window": "maxBatchingWindow",
|
|
1653
|
+
"provisioned_poller_config": "provisionedPollerConfig",
|
|
1476
1654
|
"topic": "topic",
|
|
1477
1655
|
"consumer_group_id": "consumerGroupId",
|
|
1478
1656
|
"filter_encryption": "filterEncryption",
|
|
@@ -1495,6 +1673,7 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1495
1673
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
1496
1674
|
enabled: typing.Optional[builtins.bool] = None,
|
|
1497
1675
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
1676
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
1498
1677
|
topic: builtins.str,
|
|
1499
1678
|
consumer_group_id: typing.Optional[builtins.str] = None,
|
|
1500
1679
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
@@ -1516,6 +1695,7 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1516
1695
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
1517
1696
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
1518
1697
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
1698
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
1519
1699
|
:param topic: The Kafka topic to subscribe to.
|
|
1520
1700
|
:param consumer_group_id: The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. The value must have a lenght between 1 and 200 and full the pattern '[a-zA-Z0-9-/*:_+=.@-]*'. Default: - none
|
|
1521
1701
|
:param filter_encryption: Add Customer managed KMS key to encrypt Filter Criteria. Default: - none
|
|
@@ -1559,6 +1739,8 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1559
1739
|
starting_position=lambda_.StartingPosition.TRIM_HORIZON
|
|
1560
1740
|
))
|
|
1561
1741
|
'''
|
|
1742
|
+
if isinstance(provisioned_poller_config, dict):
|
|
1743
|
+
provisioned_poller_config = ProvisionedPollerConfig(**provisioned_poller_config)
|
|
1562
1744
|
if isinstance(vpc_subnets, dict):
|
|
1563
1745
|
vpc_subnets = _SubnetSelection_e57d76df(**vpc_subnets)
|
|
1564
1746
|
if __debug__:
|
|
@@ -1567,6 +1749,7 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1567
1749
|
check_type(argname="argument batch_size", value=batch_size, expected_type=type_hints["batch_size"])
|
|
1568
1750
|
check_type(argname="argument enabled", value=enabled, expected_type=type_hints["enabled"])
|
|
1569
1751
|
check_type(argname="argument max_batching_window", value=max_batching_window, expected_type=type_hints["max_batching_window"])
|
|
1752
|
+
check_type(argname="argument provisioned_poller_config", value=provisioned_poller_config, expected_type=type_hints["provisioned_poller_config"])
|
|
1570
1753
|
check_type(argname="argument topic", value=topic, expected_type=type_hints["topic"])
|
|
1571
1754
|
check_type(argname="argument consumer_group_id", value=consumer_group_id, expected_type=type_hints["consumer_group_id"])
|
|
1572
1755
|
check_type(argname="argument filter_encryption", value=filter_encryption, expected_type=type_hints["filter_encryption"])
|
|
@@ -1590,6 +1773,8 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1590
1773
|
self._values["enabled"] = enabled
|
|
1591
1774
|
if max_batching_window is not None:
|
|
1592
1775
|
self._values["max_batching_window"] = max_batching_window
|
|
1776
|
+
if provisioned_poller_config is not None:
|
|
1777
|
+
self._values["provisioned_poller_config"] = provisioned_poller_config
|
|
1593
1778
|
if consumer_group_id is not None:
|
|
1594
1779
|
self._values["consumer_group_id"] = consumer_group_id
|
|
1595
1780
|
if filter_encryption is not None:
|
|
@@ -1660,6 +1845,18 @@ class SelfManagedKafkaEventSourceProps(KafkaEventSourceProps):
|
|
|
1660
1845
|
result = self._values.get("max_batching_window")
|
|
1661
1846
|
return typing.cast(typing.Optional[_Duration_4839e8c3], result)
|
|
1662
1847
|
|
|
1848
|
+
@builtins.property
|
|
1849
|
+
def provisioned_poller_config(self) -> typing.Optional[ProvisionedPollerConfig]:
|
|
1850
|
+
'''Configuration for provisioned pollers that read from the event source.
|
|
1851
|
+
|
|
1852
|
+
When specified, allows control over the minimum and maximum number of pollers
|
|
1853
|
+
that can be provisioned to process events from the source.
|
|
1854
|
+
|
|
1855
|
+
:default: - no provisioned pollers
|
|
1856
|
+
'''
|
|
1857
|
+
result = self._values.get("provisioned_poller_config")
|
|
1858
|
+
return typing.cast(typing.Optional[ProvisionedPollerConfig], result)
|
|
1859
|
+
|
|
1663
1860
|
@builtins.property
|
|
1664
1861
|
def topic(self) -> builtins.str:
|
|
1665
1862
|
'''The Kafka topic to subscribe to.'''
|
|
@@ -2364,6 +2561,7 @@ class StreamEventSource(
|
|
|
2364
2561
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
2365
2562
|
enabled: typing.Optional[builtins.bool] = None,
|
|
2366
2563
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
2564
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
2367
2565
|
) -> None:
|
|
2368
2566
|
'''
|
|
2369
2567
|
:param bisect_batch_on_error: If the function returns an error, split the batch in two and retry. Default: false
|
|
@@ -2380,6 +2578,7 @@ class StreamEventSource(
|
|
|
2380
2578
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
2381
2579
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
2382
2580
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
2581
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
2383
2582
|
'''
|
|
2384
2583
|
props = StreamEventSourceProps(
|
|
2385
2584
|
bisect_batch_on_error=bisect_batch_on_error,
|
|
@@ -2396,6 +2595,7 @@ class StreamEventSource(
|
|
|
2396
2595
|
batch_size=batch_size,
|
|
2397
2596
|
enabled=enabled,
|
|
2398
2597
|
max_batching_window=max_batching_window,
|
|
2598
|
+
provisioned_poller_config=provisioned_poller_config,
|
|
2399
2599
|
)
|
|
2400
2600
|
|
|
2401
2601
|
jsii.create(self.__class__, self, [props])
|
|
@@ -2428,6 +2628,7 @@ class StreamEventSource(
|
|
|
2428
2628
|
metrics_config: typing.Optional[typing.Union[_MetricsConfig_48ab59c4, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
2429
2629
|
on_failure: typing.Optional[_IEventSourceDlq_5e2c6ad9] = None,
|
|
2430
2630
|
parallelization_factor: typing.Optional[jsii.Number] = None,
|
|
2631
|
+
provisioned_poller_config: typing.Optional[typing.Union[_ProvisionedPollerConfig_1025e063, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
2431
2632
|
report_batch_item_failures: typing.Optional[builtins.bool] = None,
|
|
2432
2633
|
retry_attempts: typing.Optional[jsii.Number] = None,
|
|
2433
2634
|
source_access_configurations: typing.Optional[typing.Sequence[typing.Union[_SourceAccessConfiguration_1926ff89, typing.Dict[builtins.str, typing.Any]]]] = None,
|
|
@@ -2452,6 +2653,7 @@ class StreamEventSource(
|
|
|
2452
2653
|
:param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
|
|
2453
2654
|
:param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
|
|
2454
2655
|
:param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
|
|
2656
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
2455
2657
|
:param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
|
|
2456
2658
|
:param retry_attempts: The maximum number of times to retry when the function returns an error. Set to ``undefined`` if you want lambda to keep retrying infinitely or until the record expires. Valid Range: - Minimum value of 0 - Maximum value of 10000 Default: - infinite or until the record expires.
|
|
2457
2659
|
:param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
|
|
@@ -2476,6 +2678,7 @@ class StreamEventSource(
|
|
|
2476
2678
|
metrics_config=metrics_config,
|
|
2477
2679
|
on_failure=on_failure,
|
|
2478
2680
|
parallelization_factor=parallelization_factor,
|
|
2681
|
+
provisioned_poller_config=provisioned_poller_config,
|
|
2479
2682
|
report_batch_item_failures=report_batch_item_failures,
|
|
2480
2683
|
retry_attempts=retry_attempts,
|
|
2481
2684
|
source_access_configurations=source_access_configurations,
|
|
@@ -2517,6 +2720,7 @@ typing.cast(typing.Any, StreamEventSource).__jsii_proxy_class__ = lambda : _Stre
|
|
|
2517
2720
|
"batch_size": "batchSize",
|
|
2518
2721
|
"enabled": "enabled",
|
|
2519
2722
|
"max_batching_window": "maxBatchingWindow",
|
|
2723
|
+
"provisioned_poller_config": "provisionedPollerConfig",
|
|
2520
2724
|
"bisect_batch_on_error": "bisectBatchOnError",
|
|
2521
2725
|
"filter_encryption": "filterEncryption",
|
|
2522
2726
|
"filters": "filters",
|
|
@@ -2537,6 +2741,7 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
|
|
|
2537
2741
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
2538
2742
|
enabled: typing.Optional[builtins.bool] = None,
|
|
2539
2743
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
2744
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
2540
2745
|
bisect_batch_on_error: typing.Optional[builtins.bool] = None,
|
|
2541
2746
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
2542
2747
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
@@ -2554,6 +2759,7 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
|
|
|
2554
2759
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
2555
2760
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
2556
2761
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
2762
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
2557
2763
|
:param bisect_batch_on_error: If the function returns an error, split the batch in two and retry. Default: false
|
|
2558
2764
|
:param filter_encryption: Add Customer managed KMS key to encrypt Filter Criteria. Default: - none
|
|
2559
2765
|
:param filters: Add filter criteria option. Default: - None
|
|
@@ -2598,11 +2804,17 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
|
|
|
2598
2804
|
),
|
|
2599
2805
|
on_failure=event_source_dlq,
|
|
2600
2806
|
parallelization_factor=123,
|
|
2807
|
+
provisioned_poller_config=lambda_event_sources.ProvisionedPollerConfig(
|
|
2808
|
+
maximum_pollers=123,
|
|
2809
|
+
minimum_pollers=123
|
|
2810
|
+
),
|
|
2601
2811
|
report_batch_item_failures=False,
|
|
2602
2812
|
retry_attempts=123,
|
|
2603
2813
|
tumbling_window=cdk.Duration.minutes(30)
|
|
2604
2814
|
)
|
|
2605
2815
|
'''
|
|
2816
|
+
if isinstance(provisioned_poller_config, dict):
|
|
2817
|
+
provisioned_poller_config = ProvisionedPollerConfig(**provisioned_poller_config)
|
|
2606
2818
|
if isinstance(metrics_config, dict):
|
|
2607
2819
|
metrics_config = _MetricsConfig_48ab59c4(**metrics_config)
|
|
2608
2820
|
if __debug__:
|
|
@@ -2611,6 +2823,7 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
|
|
|
2611
2823
|
check_type(argname="argument batch_size", value=batch_size, expected_type=type_hints["batch_size"])
|
|
2612
2824
|
check_type(argname="argument enabled", value=enabled, expected_type=type_hints["enabled"])
|
|
2613
2825
|
check_type(argname="argument max_batching_window", value=max_batching_window, expected_type=type_hints["max_batching_window"])
|
|
2826
|
+
check_type(argname="argument provisioned_poller_config", value=provisioned_poller_config, expected_type=type_hints["provisioned_poller_config"])
|
|
2614
2827
|
check_type(argname="argument bisect_batch_on_error", value=bisect_batch_on_error, expected_type=type_hints["bisect_batch_on_error"])
|
|
2615
2828
|
check_type(argname="argument filter_encryption", value=filter_encryption, expected_type=type_hints["filter_encryption"])
|
|
2616
2829
|
check_type(argname="argument filters", value=filters, expected_type=type_hints["filters"])
|
|
@@ -2630,6 +2843,8 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
|
|
|
2630
2843
|
self._values["enabled"] = enabled
|
|
2631
2844
|
if max_batching_window is not None:
|
|
2632
2845
|
self._values["max_batching_window"] = max_batching_window
|
|
2846
|
+
if provisioned_poller_config is not None:
|
|
2847
|
+
self._values["provisioned_poller_config"] = provisioned_poller_config
|
|
2633
2848
|
if bisect_batch_on_error is not None:
|
|
2634
2849
|
self._values["bisect_batch_on_error"] = bisect_batch_on_error
|
|
2635
2850
|
if filter_encryption is not None:
|
|
@@ -2700,6 +2915,18 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
|
|
|
2700
2915
|
result = self._values.get("max_batching_window")
|
|
2701
2916
|
return typing.cast(typing.Optional[_Duration_4839e8c3], result)
|
|
2702
2917
|
|
|
2918
|
+
@builtins.property
|
|
2919
|
+
def provisioned_poller_config(self) -> typing.Optional[ProvisionedPollerConfig]:
|
|
2920
|
+
'''Configuration for provisioned pollers that read from the event source.
|
|
2921
|
+
|
|
2922
|
+
When specified, allows control over the minimum and maximum number of pollers
|
|
2923
|
+
that can be provisioned to process events from the source.
|
|
2924
|
+
|
|
2925
|
+
:default: - no provisioned pollers
|
|
2926
|
+
'''
|
|
2927
|
+
result = self._values.get("provisioned_poller_config")
|
|
2928
|
+
return typing.cast(typing.Optional[ProvisionedPollerConfig], result)
|
|
2929
|
+
|
|
2703
2930
|
@builtins.property
|
|
2704
2931
|
def bisect_batch_on_error(self) -> typing.Optional[builtins.bool]:
|
|
2705
2932
|
'''If the function returns an error, split the batch in two and retry.
|
|
@@ -2876,6 +3103,7 @@ class DynamoEventSource(
|
|
|
2876
3103
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
2877
3104
|
enabled: typing.Optional[builtins.bool] = None,
|
|
2878
3105
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
3106
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
2879
3107
|
) -> None:
|
|
2880
3108
|
'''
|
|
2881
3109
|
:param table: -
|
|
@@ -2893,6 +3121,7 @@ class DynamoEventSource(
|
|
|
2893
3121
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
2894
3122
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
2895
3123
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
3124
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
2896
3125
|
'''
|
|
2897
3126
|
if __debug__:
|
|
2898
3127
|
type_hints = typing.get_type_hints(_typecheckingstub__826ee32f5239c7c242a7200ffc24778bee01c1101c3cc939fed4a861510c8358)
|
|
@@ -2912,6 +3141,7 @@ class DynamoEventSource(
|
|
|
2912
3141
|
batch_size=batch_size,
|
|
2913
3142
|
enabled=enabled,
|
|
2914
3143
|
max_batching_window=max_batching_window,
|
|
3144
|
+
provisioned_poller_config=provisioned_poller_config,
|
|
2915
3145
|
)
|
|
2916
3146
|
|
|
2917
3147
|
jsii.create(self.__class__, self, [table, props])
|
|
@@ -2948,6 +3178,7 @@ class DynamoEventSource(
|
|
|
2948
3178
|
"batch_size": "batchSize",
|
|
2949
3179
|
"enabled": "enabled",
|
|
2950
3180
|
"max_batching_window": "maxBatchingWindow",
|
|
3181
|
+
"provisioned_poller_config": "provisionedPollerConfig",
|
|
2951
3182
|
"bisect_batch_on_error": "bisectBatchOnError",
|
|
2952
3183
|
"filter_encryption": "filterEncryption",
|
|
2953
3184
|
"filters": "filters",
|
|
@@ -2968,6 +3199,7 @@ class DynamoEventSourceProps(StreamEventSourceProps):
|
|
|
2968
3199
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
2969
3200
|
enabled: typing.Optional[builtins.bool] = None,
|
|
2970
3201
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
3202
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
2971
3203
|
bisect_batch_on_error: typing.Optional[builtins.bool] = None,
|
|
2972
3204
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
2973
3205
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
@@ -2984,6 +3216,7 @@ class DynamoEventSourceProps(StreamEventSourceProps):
|
|
|
2984
3216
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
2985
3217
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
2986
3218
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
3219
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
2987
3220
|
:param bisect_batch_on_error: If the function returns an error, split the batch in two and retry. Default: false
|
|
2988
3221
|
:param filter_encryption: Add Customer managed KMS key to encrypt Filter Criteria. Default: - none
|
|
2989
3222
|
:param filters: Add filter criteria option. Default: - None
|
|
@@ -3019,6 +3252,8 @@ class DynamoEventSourceProps(StreamEventSourceProps):
|
|
|
3019
3252
|
)
|
|
3020
3253
|
))
|
|
3021
3254
|
'''
|
|
3255
|
+
if isinstance(provisioned_poller_config, dict):
|
|
3256
|
+
provisioned_poller_config = ProvisionedPollerConfig(**provisioned_poller_config)
|
|
3022
3257
|
if isinstance(metrics_config, dict):
|
|
3023
3258
|
metrics_config = _MetricsConfig_48ab59c4(**metrics_config)
|
|
3024
3259
|
if __debug__:
|
|
@@ -3027,6 +3262,7 @@ class DynamoEventSourceProps(StreamEventSourceProps):
|
|
|
3027
3262
|
check_type(argname="argument batch_size", value=batch_size, expected_type=type_hints["batch_size"])
|
|
3028
3263
|
check_type(argname="argument enabled", value=enabled, expected_type=type_hints["enabled"])
|
|
3029
3264
|
check_type(argname="argument max_batching_window", value=max_batching_window, expected_type=type_hints["max_batching_window"])
|
|
3265
|
+
check_type(argname="argument provisioned_poller_config", value=provisioned_poller_config, expected_type=type_hints["provisioned_poller_config"])
|
|
3030
3266
|
check_type(argname="argument bisect_batch_on_error", value=bisect_batch_on_error, expected_type=type_hints["bisect_batch_on_error"])
|
|
3031
3267
|
check_type(argname="argument filter_encryption", value=filter_encryption, expected_type=type_hints["filter_encryption"])
|
|
3032
3268
|
check_type(argname="argument filters", value=filters, expected_type=type_hints["filters"])
|
|
@@ -3046,6 +3282,8 @@ class DynamoEventSourceProps(StreamEventSourceProps):
|
|
|
3046
3282
|
self._values["enabled"] = enabled
|
|
3047
3283
|
if max_batching_window is not None:
|
|
3048
3284
|
self._values["max_batching_window"] = max_batching_window
|
|
3285
|
+
if provisioned_poller_config is not None:
|
|
3286
|
+
self._values["provisioned_poller_config"] = provisioned_poller_config
|
|
3049
3287
|
if bisect_batch_on_error is not None:
|
|
3050
3288
|
self._values["bisect_batch_on_error"] = bisect_batch_on_error
|
|
3051
3289
|
if filter_encryption is not None:
|
|
@@ -3116,6 +3354,18 @@ class DynamoEventSourceProps(StreamEventSourceProps):
|
|
|
3116
3354
|
result = self._values.get("max_batching_window")
|
|
3117
3355
|
return typing.cast(typing.Optional[_Duration_4839e8c3], result)
|
|
3118
3356
|
|
|
3357
|
+
@builtins.property
|
|
3358
|
+
def provisioned_poller_config(self) -> typing.Optional[ProvisionedPollerConfig]:
|
|
3359
|
+
'''Configuration for provisioned pollers that read from the event source.
|
|
3360
|
+
|
|
3361
|
+
When specified, allows control over the minimum and maximum number of pollers
|
|
3362
|
+
that can be provisioned to process events from the source.
|
|
3363
|
+
|
|
3364
|
+
:default: - no provisioned pollers
|
|
3365
|
+
'''
|
|
3366
|
+
result = self._values.get("provisioned_poller_config")
|
|
3367
|
+
return typing.cast(typing.Optional[ProvisionedPollerConfig], result)
|
|
3368
|
+
|
|
3119
3369
|
@builtins.property
|
|
3120
3370
|
def bisect_batch_on_error(self) -> typing.Optional[builtins.bool]:
|
|
3121
3371
|
'''If the function returns an error, split the batch in two and retry.
|
|
@@ -3285,6 +3535,7 @@ class KinesisEventSource(
|
|
|
3285
3535
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
3286
3536
|
enabled: typing.Optional[builtins.bool] = None,
|
|
3287
3537
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
3538
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
3288
3539
|
) -> None:
|
|
3289
3540
|
'''
|
|
3290
3541
|
:param stream: -
|
|
@@ -3303,6 +3554,7 @@ class KinesisEventSource(
|
|
|
3303
3554
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
3304
3555
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
3305
3556
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
3557
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
3306
3558
|
'''
|
|
3307
3559
|
if __debug__:
|
|
3308
3560
|
type_hints = typing.get_type_hints(_typecheckingstub__9f81acc98c12b4967363bdd43130a7e674a566679a7b200f5ccd6a0ae313ad2e)
|
|
@@ -3323,6 +3575,7 @@ class KinesisEventSource(
|
|
|
3323
3575
|
batch_size=batch_size,
|
|
3324
3576
|
enabled=enabled,
|
|
3325
3577
|
max_batching_window=max_batching_window,
|
|
3578
|
+
provisioned_poller_config=provisioned_poller_config,
|
|
3326
3579
|
)
|
|
3327
3580
|
|
|
3328
3581
|
jsii.create(self.__class__, self, [stream, props])
|
|
@@ -3364,6 +3617,7 @@ class KinesisEventSource(
|
|
|
3364
3617
|
"batch_size": "batchSize",
|
|
3365
3618
|
"enabled": "enabled",
|
|
3366
3619
|
"max_batching_window": "maxBatchingWindow",
|
|
3620
|
+
"provisioned_poller_config": "provisionedPollerConfig",
|
|
3367
3621
|
"bisect_batch_on_error": "bisectBatchOnError",
|
|
3368
3622
|
"filter_encryption": "filterEncryption",
|
|
3369
3623
|
"filters": "filters",
|
|
@@ -3385,6 +3639,7 @@ class KinesisEventSourceProps(StreamEventSourceProps):
|
|
|
3385
3639
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
3386
3640
|
enabled: typing.Optional[builtins.bool] = None,
|
|
3387
3641
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
3642
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
3388
3643
|
bisect_batch_on_error: typing.Optional[builtins.bool] = None,
|
|
3389
3644
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
3390
3645
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
@@ -3402,6 +3657,7 @@ class KinesisEventSourceProps(StreamEventSourceProps):
|
|
|
3402
3657
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
3403
3658
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
3404
3659
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
3660
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
3405
3661
|
:param bisect_batch_on_error: If the function returns an error, split the batch in two and retry. Default: false
|
|
3406
3662
|
:param filter_encryption: Add Customer managed KMS key to encrypt Filter Criteria. Default: - none
|
|
3407
3663
|
:param filters: Add filter criteria option. Default: - None
|
|
@@ -3430,6 +3686,8 @@ class KinesisEventSourceProps(StreamEventSourceProps):
|
|
|
3430
3686
|
starting_position=lambda_.StartingPosition.TRIM_HORIZON
|
|
3431
3687
|
))
|
|
3432
3688
|
'''
|
|
3689
|
+
if isinstance(provisioned_poller_config, dict):
|
|
3690
|
+
provisioned_poller_config = ProvisionedPollerConfig(**provisioned_poller_config)
|
|
3433
3691
|
if isinstance(metrics_config, dict):
|
|
3434
3692
|
metrics_config = _MetricsConfig_48ab59c4(**metrics_config)
|
|
3435
3693
|
if __debug__:
|
|
@@ -3438,6 +3696,7 @@ class KinesisEventSourceProps(StreamEventSourceProps):
|
|
|
3438
3696
|
check_type(argname="argument batch_size", value=batch_size, expected_type=type_hints["batch_size"])
|
|
3439
3697
|
check_type(argname="argument enabled", value=enabled, expected_type=type_hints["enabled"])
|
|
3440
3698
|
check_type(argname="argument max_batching_window", value=max_batching_window, expected_type=type_hints["max_batching_window"])
|
|
3699
|
+
check_type(argname="argument provisioned_poller_config", value=provisioned_poller_config, expected_type=type_hints["provisioned_poller_config"])
|
|
3441
3700
|
check_type(argname="argument bisect_batch_on_error", value=bisect_batch_on_error, expected_type=type_hints["bisect_batch_on_error"])
|
|
3442
3701
|
check_type(argname="argument filter_encryption", value=filter_encryption, expected_type=type_hints["filter_encryption"])
|
|
3443
3702
|
check_type(argname="argument filters", value=filters, expected_type=type_hints["filters"])
|
|
@@ -3458,6 +3717,8 @@ class KinesisEventSourceProps(StreamEventSourceProps):
|
|
|
3458
3717
|
self._values["enabled"] = enabled
|
|
3459
3718
|
if max_batching_window is not None:
|
|
3460
3719
|
self._values["max_batching_window"] = max_batching_window
|
|
3720
|
+
if provisioned_poller_config is not None:
|
|
3721
|
+
self._values["provisioned_poller_config"] = provisioned_poller_config
|
|
3461
3722
|
if bisect_batch_on_error is not None:
|
|
3462
3723
|
self._values["bisect_batch_on_error"] = bisect_batch_on_error
|
|
3463
3724
|
if filter_encryption is not None:
|
|
@@ -3530,6 +3791,18 @@ class KinesisEventSourceProps(StreamEventSourceProps):
|
|
|
3530
3791
|
result = self._values.get("max_batching_window")
|
|
3531
3792
|
return typing.cast(typing.Optional[_Duration_4839e8c3], result)
|
|
3532
3793
|
|
|
3794
|
+
@builtins.property
|
|
3795
|
+
def provisioned_poller_config(self) -> typing.Optional[ProvisionedPollerConfig]:
|
|
3796
|
+
'''Configuration for provisioned pollers that read from the event source.
|
|
3797
|
+
|
|
3798
|
+
When specified, allows control over the minimum and maximum number of pollers
|
|
3799
|
+
that can be provisioned to process events from the source.
|
|
3800
|
+
|
|
3801
|
+
:default: - no provisioned pollers
|
|
3802
|
+
'''
|
|
3803
|
+
result = self._values.get("provisioned_poller_config")
|
|
3804
|
+
return typing.cast(typing.Optional[ProvisionedPollerConfig], result)
|
|
3805
|
+
|
|
3533
3806
|
@builtins.property
|
|
3534
3807
|
def bisect_batch_on_error(self) -> typing.Optional[builtins.bool]:
|
|
3535
3808
|
'''If the function returns an error, split the batch in two and retry.
|
|
@@ -3714,6 +3987,7 @@ class ManagedKafkaEventSource(
|
|
|
3714
3987
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
3715
3988
|
enabled: typing.Optional[builtins.bool] = None,
|
|
3716
3989
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
3990
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
3717
3991
|
) -> None:
|
|
3718
3992
|
'''
|
|
3719
3993
|
:param cluster_arn: An MSK cluster construct.
|
|
@@ -3727,6 +4001,7 @@ class ManagedKafkaEventSource(
|
|
|
3727
4001
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
3728
4002
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
3729
4003
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
4004
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
3730
4005
|
'''
|
|
3731
4006
|
props = ManagedKafkaEventSourceProps(
|
|
3732
4007
|
cluster_arn=cluster_arn,
|
|
@@ -3740,6 +4015,7 @@ class ManagedKafkaEventSource(
|
|
|
3740
4015
|
batch_size=batch_size,
|
|
3741
4016
|
enabled=enabled,
|
|
3742
4017
|
max_batching_window=max_batching_window,
|
|
4018
|
+
provisioned_poller_config=provisioned_poller_config,
|
|
3743
4019
|
)
|
|
3744
4020
|
|
|
3745
4021
|
jsii.create(self.__class__, self, [props])
|
|
@@ -3825,6 +4101,7 @@ class SelfManagedKafkaEventSource(
|
|
|
3825
4101
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
3826
4102
|
enabled: typing.Optional[builtins.bool] = None,
|
|
3827
4103
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4104
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
3828
4105
|
) -> None:
|
|
3829
4106
|
'''
|
|
3830
4107
|
:param bootstrap_servers: The list of host and port pairs that are the addresses of the Kafka brokers in a "bootstrap" Kafka cluster that a Kafka client connects to initially to bootstrap itself. They are in the format ``abc.xyz.com:xxxx``.
|
|
@@ -3843,6 +4120,7 @@ class SelfManagedKafkaEventSource(
|
|
|
3843
4120
|
:param batch_size: The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. Your function receives an event with all the retrieved records. Valid Range: - Minimum value of 1 - Maximum value of: - 1000 for ``DynamoEventSource`` - 10000 for ``KinesisEventSource``, ``ManagedKafkaEventSource`` and ``SelfManagedKafkaEventSource`` Default: 100
|
|
3844
4121
|
:param enabled: If the stream event source mapping should be enabled. Default: true
|
|
3845
4122
|
:param max_batching_window: The maximum amount of time to gather records before invoking the function. Maximum of Duration.minutes(5). Default: - Duration.seconds(0) for Kinesis, DynamoDB, and SQS event sources, Duration.millis(500) for MSK, self-managed Kafka, and Amazon MQ.
|
|
4123
|
+
:param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
|
|
3846
4124
|
'''
|
|
3847
4125
|
props = SelfManagedKafkaEventSourceProps(
|
|
3848
4126
|
bootstrap_servers=bootstrap_servers,
|
|
@@ -3861,6 +4139,7 @@ class SelfManagedKafkaEventSource(
|
|
|
3861
4139
|
batch_size=batch_size,
|
|
3862
4140
|
enabled=enabled,
|
|
3863
4141
|
max_batching_window=max_batching_window,
|
|
4142
|
+
provisioned_poller_config=provisioned_poller_config,
|
|
3864
4143
|
)
|
|
3865
4144
|
|
|
3866
4145
|
jsii.create(self.__class__, self, [props])
|
|
@@ -3888,6 +4167,7 @@ __all__ = [
|
|
|
3888
4167
|
"KinesisEventSourceProps",
|
|
3889
4168
|
"ManagedKafkaEventSource",
|
|
3890
4169
|
"ManagedKafkaEventSourceProps",
|
|
4170
|
+
"ProvisionedPollerConfig",
|
|
3891
4171
|
"S3EventSource",
|
|
3892
4172
|
"S3EventSourceProps",
|
|
3893
4173
|
"S3EventSourceV2",
|
|
@@ -3936,6 +4216,7 @@ def _typecheckingstub__49e8158c80bdc70be09102671a6e9cb783a1a28274da91874a575eeb7
|
|
|
3936
4216
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
3937
4217
|
enabled: typing.Optional[builtins.bool] = None,
|
|
3938
4218
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4219
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
3939
4220
|
) -> None:
|
|
3940
4221
|
"""Type checking stubs"""
|
|
3941
4222
|
pass
|
|
@@ -3946,6 +4227,7 @@ def _typecheckingstub__980041697091a50415a7444df02a046d910ddd83f1229789d80780bf7
|
|
|
3946
4227
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
3947
4228
|
enabled: typing.Optional[builtins.bool] = None,
|
|
3948
4229
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4230
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
3949
4231
|
topic: builtins.str,
|
|
3950
4232
|
consumer_group_id: typing.Optional[builtins.str] = None,
|
|
3951
4233
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
@@ -3962,6 +4244,7 @@ def _typecheckingstub__e930f585c1bae37174885c54f0f224909bfb0a75d9f1b652bbcf33461
|
|
|
3962
4244
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
3963
4245
|
enabled: typing.Optional[builtins.bool] = None,
|
|
3964
4246
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4247
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
3965
4248
|
topic: builtins.str,
|
|
3966
4249
|
consumer_group_id: typing.Optional[builtins.str] = None,
|
|
3967
4250
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
@@ -3973,6 +4256,14 @@ def _typecheckingstub__e930f585c1bae37174885c54f0f224909bfb0a75d9f1b652bbcf33461
|
|
|
3973
4256
|
"""Type checking stubs"""
|
|
3974
4257
|
pass
|
|
3975
4258
|
|
|
4259
|
+
def _typecheckingstub__52613c26f6551f26dae012e5cb997a9c6c981e7e4a8c59f252b025ef2acd28a7(
|
|
4260
|
+
*,
|
|
4261
|
+
maximum_pollers: jsii.Number,
|
|
4262
|
+
minimum_pollers: jsii.Number,
|
|
4263
|
+
) -> None:
|
|
4264
|
+
"""Type checking stubs"""
|
|
4265
|
+
pass
|
|
4266
|
+
|
|
3976
4267
|
def _typecheckingstub__f651835f5621f3e8376a1eb7f742b3f55035a565412de372e2e29a201c5b9347(
|
|
3977
4268
|
bucket: _Bucket_2d22f22c,
|
|
3978
4269
|
*,
|
|
@@ -4030,6 +4321,7 @@ def _typecheckingstub__0100a45aa91b9c2103378e2ba54dd41b054f1d6a50733797256d6971b
|
|
|
4030
4321
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
4031
4322
|
enabled: typing.Optional[builtins.bool] = None,
|
|
4032
4323
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4324
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
4033
4325
|
topic: builtins.str,
|
|
4034
4326
|
consumer_group_id: typing.Optional[builtins.str] = None,
|
|
4035
4327
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
@@ -4144,6 +4436,7 @@ def _typecheckingstub__f846cf48a1cd8d8120ed4973fabeee827928eff2de72d372506124b7d
|
|
|
4144
4436
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
4145
4437
|
enabled: typing.Optional[builtins.bool] = None,
|
|
4146
4438
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4439
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
4147
4440
|
bisect_batch_on_error: typing.Optional[builtins.bool] = None,
|
|
4148
4441
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
4149
4442
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
@@ -4175,6 +4468,7 @@ def _typecheckingstub__826ee32f5239c7c242a7200ffc24778bee01c1101c3cc939fed4a8615
|
|
|
4175
4468
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
4176
4469
|
enabled: typing.Optional[builtins.bool] = None,
|
|
4177
4470
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4471
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
4178
4472
|
) -> None:
|
|
4179
4473
|
"""Type checking stubs"""
|
|
4180
4474
|
pass
|
|
@@ -4191,6 +4485,7 @@ def _typecheckingstub__ec371d5e4612e8923bbdcc024d90e26915d64be2dc40151f22fc41139
|
|
|
4191
4485
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
4192
4486
|
enabled: typing.Optional[builtins.bool] = None,
|
|
4193
4487
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4488
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
4194
4489
|
bisect_batch_on_error: typing.Optional[builtins.bool] = None,
|
|
4195
4490
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
4196
4491
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|
|
@@ -4223,6 +4518,7 @@ def _typecheckingstub__9f81acc98c12b4967363bdd43130a7e674a566679a7b200f5ccd6a0ae
|
|
|
4223
4518
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
4224
4519
|
enabled: typing.Optional[builtins.bool] = None,
|
|
4225
4520
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4521
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
4226
4522
|
) -> None:
|
|
4227
4523
|
"""Type checking stubs"""
|
|
4228
4524
|
pass
|
|
@@ -4239,6 +4535,7 @@ def _typecheckingstub__a0e5c31953f1ef382a7863484f36982000428c40cc94325c4f41de1e3
|
|
|
4239
4535
|
batch_size: typing.Optional[jsii.Number] = None,
|
|
4240
4536
|
enabled: typing.Optional[builtins.bool] = None,
|
|
4241
4537
|
max_batching_window: typing.Optional[_Duration_4839e8c3] = None,
|
|
4538
|
+
provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
|
|
4242
4539
|
bisect_batch_on_error: typing.Optional[builtins.bool] = None,
|
|
4243
4540
|
filter_encryption: typing.Optional[_IKey_5f11635f] = None,
|
|
4244
4541
|
filters: typing.Optional[typing.Sequence[typing.Mapping[builtins.str, typing.Any]]] = None,
|