aws-cdk-lib 2.201.0__py3-none-any.whl → 2.202.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cdk-lib might be problematic. Click here for more details.

@@ -4414,6 +4414,8 @@ class CfnEventInvokeConfig(
4414
4414
  ) -> None:
4415
4415
  '''A configuration object that specifies the destination of an event after Lambda processes it.
4416
4416
 
4417
+ For more information, see `Adding a destination <https://docs.aws.amazon.com/lambda/latest/dg/invocation-async-retain-records.html#invocation-async-destinations>`_ .
4418
+
4417
4419
  :param on_failure: The destination configuration for failed invocations. .. epigraph:: When using an Amazon SQS queue as a destination, FIFO queues cannot be used.
4418
4420
  :param on_success: The destination configuration for successful invocations. .. epigraph:: When using an Amazon SQS queue as a destination, FIFO queues cannot be used.
4419
4421
 
@@ -4495,7 +4497,7 @@ class CfnEventInvokeConfig(
4495
4497
  def __init__(self, *, destination: builtins.str) -> None:
4496
4498
  '''A destination for events that failed processing.
4497
4499
 
4498
- See `Capturing records of Lambda asynchronous invocations <https://docs.aws.amazon.com/lambda/latest/dg/invocation-async-retain-records.html>`_ for more information.
4500
+ For more information, see `Adding a destination <https://docs.aws.amazon.com/lambda/latest/dg/invocation-async-retain-records.html#invocation-async-destinations>`_ .
4499
4501
 
4500
4502
  :param destination: The Amazon Resource Name (ARN) of the destination resource. To retain records of unsuccessful `asynchronous invocations <https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations>`_ , you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination. To retain records of failed invocations from `Kinesis <https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html>`_ , `DynamoDB <https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html>`_ , `self-managed Kafka <https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination>`_ or `Amazon MSK <https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination>`_ , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.
4501
4503
 
@@ -4554,6 +4556,9 @@ class CfnEventInvokeConfig(
4554
4556
  '''A destination for events that were processed successfully.
4555
4557
 
4556
4558
  To retain records of successful `asynchronous invocations <https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations>`_ , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.
4559
+ .. epigraph::
4560
+
4561
+ ``OnSuccess`` is not supported in ``CreateEventSourceMapping`` or ``UpdateEventSourceMapping`` requests.
4557
4562
 
4558
4563
  :param destination: The Amazon Resource Name (ARN) of the destination resource.
4559
4564
 
@@ -5556,6 +5561,8 @@ class CfnEventSourceMapping(
5556
5561
  ) -> None:
5557
5562
  '''A configuration object that specifies the destination of an event after Lambda processes it.
5558
5563
 
5564
+ For more information, see `Adding a destination <https://docs.aws.amazon.com/lambda/latest/dg/invocation-async-retain-records.html#invocation-async-destinations>`_ .
5565
+
5559
5566
  :param on_failure: The destination configuration for failed invocations.
5560
5567
 
5561
5568
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-eventsourcemapping-destinationconfig.html
@@ -5934,7 +5941,7 @@ class CfnEventSourceMapping(
5934
5941
  ) -> None:
5935
5942
  '''A destination for events that failed processing.
5936
5943
 
5937
- See `Capturing records of Lambda asynchronous invocations <https://docs.aws.amazon.com/lambda/latest/dg/invocation-async-retain-records.html>`_ for more information.
5944
+ For more information, see `Adding a destination <https://docs.aws.amazon.com/lambda/latest/dg/invocation-async-retain-records.html#invocation-async-destinations>`_ .
5938
5945
 
5939
5946
  :param destination: The Amazon Resource Name (ARN) of the destination resource. To retain records of unsuccessful `asynchronous invocations <https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations>`_ , you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination. To retain records of failed invocations from `Kinesis <https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html>`_ , `DynamoDB <https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html>`_ , `self-managed Kafka <https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination>`_ or `Amazon MSK <https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination>`_ , you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.
5940
5947
 
@@ -13868,6 +13875,78 @@ class EventInvokeConfigProps(EventInvokeConfigOptions):
13868
13875
  )
13869
13876
 
13870
13877
 
13878
+ class EventRecordFormat(
13879
+ metaclass=jsii.JSIIMeta,
13880
+ jsii_type="aws-cdk-lib.aws_lambda.EventRecordFormat",
13881
+ ):
13882
+ '''The format target function should recieve record in.
13883
+
13884
+ :exampleMetadata: infused
13885
+
13886
+ Example::
13887
+
13888
+ from aws_cdk.aws_lambda_event_sources import ManagedKafkaEventSource, ConfluentSchemaRegistry
13889
+ from aws_cdk.aws_secretsmanager import Secret
13890
+
13891
+ # Your MSK cluster arn
13892
+ # cluster_arn: str
13893
+
13894
+ # my_function: lambda.Function
13895
+
13896
+
13897
+ # The Kafka topic you want to subscribe to
13898
+ topic = "some-cool-topic"
13899
+
13900
+ secret = Secret(self, "Secret", secret_name="AmazonMSK_KafkaSecret")
13901
+ my_function.add_event_source(ManagedKafkaEventSource(
13902
+ cluster_arn=cluster_arn,
13903
+ topic=topic,
13904
+ starting_position=lambda_.StartingPosition.TRIM_HORIZON,
13905
+ provisioned_poller_config=ProvisionedPollerConfig(
13906
+ minimum_pollers=1,
13907
+ maximum_pollers=3
13908
+ ),
13909
+ schema_registry_config=ConfluentSchemaRegistry(
13910
+ schema_registry_uri="https://example.com",
13911
+ event_record_format=lambda_.EventRecordFormat.JSON,
13912
+ authentication_type=lambda_.KafkaSchemaRegistryAccessConfigType.BASIC_AUTH,
13913
+ secret=secret,
13914
+ schema_validation_configs=[lambda.KafkaSchemaValidationConfig(attribute=lambda_.KafkaSchemaValidationAttribute.KEY)]
13915
+ )
13916
+ ))
13917
+ '''
13918
+
13919
+ @jsii.member(jsii_name="of")
13920
+ @builtins.classmethod
13921
+ def of(cls, name: builtins.str) -> "EventRecordFormat":
13922
+ '''A custom event record format.
13923
+
13924
+ :param name: -
13925
+ '''
13926
+ if __debug__:
13927
+ type_hints = typing.get_type_hints(_typecheckingstub__913b83169547e1e27e0d17d4e5a189f10545dca54ca45db7a5dc40a7d5cc2999)
13928
+ check_type(argname="argument name", value=name, expected_type=type_hints["name"])
13929
+ return typing.cast("EventRecordFormat", jsii.sinvoke(cls, "of", [name]))
13930
+
13931
+ @jsii.python.classproperty
13932
+ @jsii.member(jsii_name="JSON")
13933
+ def JSON(cls) -> "EventRecordFormat":
13934
+ '''The target function will recieve records as json objects.'''
13935
+ return typing.cast("EventRecordFormat", jsii.sget(cls, "JSON"))
13936
+
13937
+ @jsii.python.classproperty
13938
+ @jsii.member(jsii_name="SOURCE")
13939
+ def SOURCE(cls) -> "EventRecordFormat":
13940
+ '''The target function will recieve records in same format as the schema source.'''
13941
+ return typing.cast("EventRecordFormat", jsii.sget(cls, "SOURCE"))
13942
+
13943
+ @builtins.property
13944
+ @jsii.member(jsii_name="value")
13945
+ def value(self) -> builtins.str:
13946
+ '''The enum to use in ``SchemaRegistryConfig.EventRecordFormat`` property in CloudFormation.'''
13947
+ return typing.cast(builtins.str, jsii.get(self, "value"))
13948
+
13949
+
13871
13950
  @jsii.data_type(
13872
13951
  jsii_type="aws-cdk-lib.aws_lambda.EventSourceMappingOptions",
13873
13952
  jsii_struct_bases=[],
@@ -13890,6 +13969,7 @@ class EventInvokeConfigProps(EventInvokeConfigOptions):
13890
13969
  "provisioned_poller_config": "provisionedPollerConfig",
13891
13970
  "report_batch_item_failures": "reportBatchItemFailures",
13892
13971
  "retry_attempts": "retryAttempts",
13972
+ "schema_registry_config": "schemaRegistryConfig",
13893
13973
  "source_access_configurations": "sourceAccessConfigurations",
13894
13974
  "starting_position": "startingPosition",
13895
13975
  "starting_position_timestamp": "startingPositionTimestamp",
@@ -13919,6 +13999,7 @@ class EventSourceMappingOptions:
13919
13999
  provisioned_poller_config: typing.Optional[typing.Union["ProvisionedPollerConfig", typing.Dict[builtins.str, typing.Any]]] = None,
13920
14000
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
13921
14001
  retry_attempts: typing.Optional[jsii.Number] = None,
14002
+ schema_registry_config: typing.Optional["ISchemaRegistry"] = None,
13922
14003
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union["SourceAccessConfiguration", typing.Dict[builtins.str, typing.Any]]]] = None,
13923
14004
  starting_position: typing.Optional["StartingPosition"] = None,
13924
14005
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -13944,6 +14025,7 @@ class EventSourceMappingOptions:
13944
14025
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
13945
14026
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
13946
14027
  :param retry_attempts: The maximum number of times to retry when the function returns an error. Set to ``undefined`` if you want lambda to keep retrying infinitely or until the record expires. Valid Range: - Minimum value of 0 - Maximum value of 10000 Default: - infinite or until the record expires.
14028
+ :param schema_registry_config: Specific configuration settings for a Kafka schema registry. Default: - none
13947
14029
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
13948
14030
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
13949
14031
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
@@ -13963,6 +14045,7 @@ class EventSourceMappingOptions:
13963
14045
  # event_source_dlq: lambda.IEventSourceDlq
13964
14046
  # filters: Any
13965
14047
  # key: kms.Key
14048
+ # schema_registry: lambda.ISchemaRegistry
13966
14049
  # source_access_configuration_type: lambda.SourceAccessConfigurationType
13967
14050
 
13968
14051
  event_source_mapping_options = lambda.EventSourceMappingOptions(
@@ -13991,6 +14074,7 @@ class EventSourceMappingOptions:
13991
14074
  ),
13992
14075
  report_batch_item_failures=False,
13993
14076
  retry_attempts=123,
14077
+ schema_registry_config=schema_registry,
13994
14078
  source_access_configurations=[lambda.SourceAccessConfiguration(
13995
14079
  type=source_access_configuration_type,
13996
14080
  uri="uri"
@@ -14025,6 +14109,7 @@ class EventSourceMappingOptions:
14025
14109
  check_type(argname="argument provisioned_poller_config", value=provisioned_poller_config, expected_type=type_hints["provisioned_poller_config"])
14026
14110
  check_type(argname="argument report_batch_item_failures", value=report_batch_item_failures, expected_type=type_hints["report_batch_item_failures"])
14027
14111
  check_type(argname="argument retry_attempts", value=retry_attempts, expected_type=type_hints["retry_attempts"])
14112
+ check_type(argname="argument schema_registry_config", value=schema_registry_config, expected_type=type_hints["schema_registry_config"])
14028
14113
  check_type(argname="argument source_access_configurations", value=source_access_configurations, expected_type=type_hints["source_access_configurations"])
14029
14114
  check_type(argname="argument starting_position", value=starting_position, expected_type=type_hints["starting_position"])
14030
14115
  check_type(argname="argument starting_position_timestamp", value=starting_position_timestamp, expected_type=type_hints["starting_position_timestamp"])
@@ -14067,6 +14152,8 @@ class EventSourceMappingOptions:
14067
14152
  self._values["report_batch_item_failures"] = report_batch_item_failures
14068
14153
  if retry_attempts is not None:
14069
14154
  self._values["retry_attempts"] = retry_attempts
14155
+ if schema_registry_config is not None:
14156
+ self._values["schema_registry_config"] = schema_registry_config
14070
14157
  if source_access_configurations is not None:
14071
14158
  self._values["source_access_configurations"] = source_access_configurations
14072
14159
  if starting_position is not None:
@@ -14294,6 +14381,15 @@ class EventSourceMappingOptions:
14294
14381
  result = self._values.get("retry_attempts")
14295
14382
  return typing.cast(typing.Optional[jsii.Number], result)
14296
14383
 
14384
+ @builtins.property
14385
+ def schema_registry_config(self) -> typing.Optional["ISchemaRegistry"]:
14386
+ '''Specific configuration settings for a Kafka schema registry.
14387
+
14388
+ :default: - none
14389
+ '''
14390
+ result = self._values.get("schema_registry_config")
14391
+ return typing.cast(typing.Optional["ISchemaRegistry"], result)
14392
+
14297
14393
  @builtins.property
14298
14394
  def source_access_configurations(
14299
14395
  self,
@@ -14387,6 +14483,7 @@ class EventSourceMappingOptions:
14387
14483
  "provisioned_poller_config": "provisionedPollerConfig",
14388
14484
  "report_batch_item_failures": "reportBatchItemFailures",
14389
14485
  "retry_attempts": "retryAttempts",
14486
+ "schema_registry_config": "schemaRegistryConfig",
14390
14487
  "source_access_configurations": "sourceAccessConfigurations",
14391
14488
  "starting_position": "startingPosition",
14392
14489
  "starting_position_timestamp": "startingPositionTimestamp",
@@ -14417,6 +14514,7 @@ class EventSourceMappingProps(EventSourceMappingOptions):
14417
14514
  provisioned_poller_config: typing.Optional[typing.Union["ProvisionedPollerConfig", typing.Dict[builtins.str, typing.Any]]] = None,
14418
14515
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
14419
14516
  retry_attempts: typing.Optional[jsii.Number] = None,
14517
+ schema_registry_config: typing.Optional["ISchemaRegistry"] = None,
14420
14518
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union["SourceAccessConfiguration", typing.Dict[builtins.str, typing.Any]]]] = None,
14421
14519
  starting_position: typing.Optional["StartingPosition"] = None,
14422
14520
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -14444,6 +14542,7 @@ class EventSourceMappingProps(EventSourceMappingOptions):
14444
14542
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
14445
14543
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
14446
14544
  :param retry_attempts: The maximum number of times to retry when the function returns an error. Set to ``undefined`` if you want lambda to keep retrying infinitely or until the record expires. Valid Range: - Minimum value of 0 - Maximum value of 10000 Default: - infinite or until the record expires.
14545
+ :param schema_registry_config: Specific configuration settings for a Kafka schema registry. Default: - none
14447
14546
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
14448
14547
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
14449
14548
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
@@ -14465,6 +14564,7 @@ class EventSourceMappingProps(EventSourceMappingOptions):
14465
14564
  # filters: Any
14466
14565
  # function_: lambda.Function
14467
14566
  # key: kms.Key
14567
+ # schema_registry: lambda.ISchemaRegistry
14468
14568
  # source_access_configuration_type: lambda.SourceAccessConfigurationType
14469
14569
 
14470
14570
  event_source_mapping_props = lambda.EventSourceMappingProps(
@@ -14496,6 +14596,7 @@ class EventSourceMappingProps(EventSourceMappingOptions):
14496
14596
  ),
14497
14597
  report_batch_item_failures=False,
14498
14598
  retry_attempts=123,
14599
+ schema_registry_config=schema_registry,
14499
14600
  source_access_configurations=[lambda.SourceAccessConfiguration(
14500
14601
  type=source_access_configuration_type,
14501
14602
  uri="uri"
@@ -14530,6 +14631,7 @@ class EventSourceMappingProps(EventSourceMappingOptions):
14530
14631
  check_type(argname="argument provisioned_poller_config", value=provisioned_poller_config, expected_type=type_hints["provisioned_poller_config"])
14531
14632
  check_type(argname="argument report_batch_item_failures", value=report_batch_item_failures, expected_type=type_hints["report_batch_item_failures"])
14532
14633
  check_type(argname="argument retry_attempts", value=retry_attempts, expected_type=type_hints["retry_attempts"])
14634
+ check_type(argname="argument schema_registry_config", value=schema_registry_config, expected_type=type_hints["schema_registry_config"])
14533
14635
  check_type(argname="argument source_access_configurations", value=source_access_configurations, expected_type=type_hints["source_access_configurations"])
14534
14636
  check_type(argname="argument starting_position", value=starting_position, expected_type=type_hints["starting_position"])
14535
14637
  check_type(argname="argument starting_position_timestamp", value=starting_position_timestamp, expected_type=type_hints["starting_position_timestamp"])
@@ -14575,6 +14677,8 @@ class EventSourceMappingProps(EventSourceMappingOptions):
14575
14677
  self._values["report_batch_item_failures"] = report_batch_item_failures
14576
14678
  if retry_attempts is not None:
14577
14679
  self._values["retry_attempts"] = retry_attempts
14680
+ if schema_registry_config is not None:
14681
+ self._values["schema_registry_config"] = schema_registry_config
14578
14682
  if source_access_configurations is not None:
14579
14683
  self._values["source_access_configurations"] = source_access_configurations
14580
14684
  if starting_position is not None:
@@ -14802,6 +14906,15 @@ class EventSourceMappingProps(EventSourceMappingOptions):
14802
14906
  result = self._values.get("retry_attempts")
14803
14907
  return typing.cast(typing.Optional[jsii.Number], result)
14804
14908
 
14909
+ @builtins.property
14910
+ def schema_registry_config(self) -> typing.Optional["ISchemaRegistry"]:
14911
+ '''Specific configuration settings for a Kafka schema registry.
14912
+
14913
+ :default: - none
14914
+ '''
14915
+ result = self._values.get("schema_registry_config")
14916
+ return typing.cast(typing.Optional["ISchemaRegistry"], result)
14917
+
14805
14918
  @builtins.property
14806
14919
  def source_access_configurations(
14807
14920
  self,
@@ -18291,6 +18404,7 @@ class IFunction(
18291
18404
  provisioned_poller_config: typing.Optional[typing.Union["ProvisionedPollerConfig", typing.Dict[builtins.str, typing.Any]]] = None,
18292
18405
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
18293
18406
  retry_attempts: typing.Optional[jsii.Number] = None,
18407
+ schema_registry_config: typing.Optional["ISchemaRegistry"] = None,
18294
18408
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union["SourceAccessConfiguration", typing.Dict[builtins.str, typing.Any]]]] = None,
18295
18409
  starting_position: typing.Optional["StartingPosition"] = None,
18296
18410
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -18318,6 +18432,7 @@ class IFunction(
18318
18432
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
18319
18433
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
18320
18434
  :param retry_attempts: The maximum number of times to retry when the function returns an error. Set to ``undefined`` if you want lambda to keep retrying infinitely or until the record expires. Valid Range: - Minimum value of 0 - Maximum value of 10000 Default: - infinite or until the record expires.
18435
+ :param schema_registry_config: Specific configuration settings for a Kafka schema registry. Default: - none
18321
18436
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
18322
18437
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
18323
18438
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
@@ -18733,6 +18848,7 @@ class _IFunctionProxy(
18733
18848
  provisioned_poller_config: typing.Optional[typing.Union["ProvisionedPollerConfig", typing.Dict[builtins.str, typing.Any]]] = None,
18734
18849
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
18735
18850
  retry_attempts: typing.Optional[jsii.Number] = None,
18851
+ schema_registry_config: typing.Optional["ISchemaRegistry"] = None,
18736
18852
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union["SourceAccessConfiguration", typing.Dict[builtins.str, typing.Any]]]] = None,
18737
18853
  starting_position: typing.Optional["StartingPosition"] = None,
18738
18854
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -18760,6 +18876,7 @@ class _IFunctionProxy(
18760
18876
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
18761
18877
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
18762
18878
  :param retry_attempts: The maximum number of times to retry when the function returns an error. Set to ``undefined`` if you want lambda to keep retrying infinitely or until the record expires. Valid Range: - Minimum value of 0 - Maximum value of 10000 Default: - infinite or until the record expires.
18879
+ :param schema_registry_config: Specific configuration settings for a Kafka schema registry. Default: - none
18763
18880
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
18764
18881
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
18765
18882
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
@@ -18788,6 +18905,7 @@ class _IFunctionProxy(
18788
18905
  provisioned_poller_config=provisioned_poller_config,
18789
18906
  report_batch_item_failures=report_batch_item_failures,
18790
18907
  retry_attempts=retry_attempts,
18908
+ schema_registry_config=schema_registry_config,
18791
18909
  source_access_configurations=source_access_configurations,
18792
18910
  starting_position=starting_position,
18793
18911
  starting_position_timestamp=starting_position_timestamp,
@@ -19527,6 +19645,50 @@ class _IScalableFunctionAttributeProxy(
19527
19645
  typing.cast(typing.Any, IScalableFunctionAttribute).__jsii_proxy_class__ = lambda : _IScalableFunctionAttributeProxy
19528
19646
 
19529
19647
 
19648
+ @jsii.interface(jsii_type="aws-cdk-lib.aws_lambda.ISchemaRegistry")
19649
+ class ISchemaRegistry(typing_extensions.Protocol):
19650
+ '''A schema registry for an event source.'''
19651
+
19652
+ @jsii.member(jsii_name="bind")
19653
+ def bind(
19654
+ self,
19655
+ target: IEventSourceMapping,
19656
+ target_handler: IFunction,
19657
+ ) -> "KafkaSchemaRegistryConfig":
19658
+ '''Returns the schema registry config of the event source.
19659
+
19660
+ :param target: -
19661
+ :param target_handler: -
19662
+ '''
19663
+ ...
19664
+
19665
+
19666
+ class _ISchemaRegistryProxy:
19667
+ '''A schema registry for an event source.'''
19668
+
19669
+ __jsii_type__: typing.ClassVar[str] = "aws-cdk-lib.aws_lambda.ISchemaRegistry"
19670
+
19671
+ @jsii.member(jsii_name="bind")
19672
+ def bind(
19673
+ self,
19674
+ target: IEventSourceMapping,
19675
+ target_handler: IFunction,
19676
+ ) -> "KafkaSchemaRegistryConfig":
19677
+ '''Returns the schema registry config of the event source.
19678
+
19679
+ :param target: -
19680
+ :param target_handler: -
19681
+ '''
19682
+ if __debug__:
19683
+ type_hints = typing.get_type_hints(_typecheckingstub__0c68a1588ffc2bca6786310b7681f323e4368d6bf66e13c5fbdc7ef0ad571a22)
19684
+ check_type(argname="argument target", value=target, expected_type=type_hints["target"])
19685
+ check_type(argname="argument target_handler", value=target_handler, expected_type=type_hints["target_handler"])
19686
+ return typing.cast("KafkaSchemaRegistryConfig", jsii.invoke(self, "bind", [target, target_handler]))
19687
+
19688
+ # Adding a "__jsii_proxy_class__(): typing.Type" function to the interface
19689
+ typing.cast(typing.Any, ISchemaRegistry).__jsii_proxy_class__ = lambda : _ISchemaRegistryProxy
19690
+
19691
+
19530
19692
  @jsii.interface(jsii_type="aws-cdk-lib.aws_lambda.IVersion")
19531
19693
  class IVersion(IFunction, typing_extensions.Protocol):
19532
19694
  @builtins.property
@@ -19745,6 +19907,408 @@ class InvokeMode(enum.Enum):
19745
19907
  '''
19746
19908
 
19747
19909
 
19910
+ @jsii.data_type(
19911
+ jsii_type="aws-cdk-lib.aws_lambda.KafkaSchemaRegistryAccessConfig",
19912
+ jsii_struct_bases=[],
19913
+ name_mapping={"type": "type", "uri": "uri"},
19914
+ )
19915
+ class KafkaSchemaRegistryAccessConfig:
19916
+ def __init__(
19917
+ self,
19918
+ *,
19919
+ type: "KafkaSchemaRegistryAccessConfigType",
19920
+ uri: builtins.str,
19921
+ ) -> None:
19922
+ '''Specific access configuration settings that tell Lambda how to authenticate with your schema registry.
19923
+
19924
+ If you're working with an AWS Glue schema registry, don't provide authentication details in this object. Instead, ensure that your execution role has the required permissions for Lambda to access your cluster.
19925
+
19926
+ If you're working with a Confluent schema registry, choose the authentication method in the Type field, and provide the AWS Secrets Manager secret ARN in the URI field.
19927
+
19928
+ :param type: The type of authentication Lambda uses to access your schema registry.
19929
+ :param uri: The URI of the secret (Secrets Manager secret ARN) to authenticate with your schema registry.
19930
+
19931
+ :exampleMetadata: fixture=_generated
19932
+
19933
+ Example::
19934
+
19935
+ # The code below shows an example of how to instantiate this type.
19936
+ # The values are placeholders you should change.
19937
+ from aws_cdk import aws_lambda as lambda_
19938
+
19939
+ # kafka_schema_registry_access_config_type: lambda.KafkaSchemaRegistryAccessConfigType
19940
+
19941
+ kafka_schema_registry_access_config = lambda.KafkaSchemaRegistryAccessConfig(
19942
+ type=kafka_schema_registry_access_config_type,
19943
+ uri="uri"
19944
+ )
19945
+ '''
19946
+ if __debug__:
19947
+ type_hints = typing.get_type_hints(_typecheckingstub__a9ed7f710fdd20a96eb8eeb709bbea9f1e52b3ca20bed4ca85cf8341031090d2)
19948
+ check_type(argname="argument type", value=type, expected_type=type_hints["type"])
19949
+ check_type(argname="argument uri", value=uri, expected_type=type_hints["uri"])
19950
+ self._values: typing.Dict[builtins.str, typing.Any] = {
19951
+ "type": type,
19952
+ "uri": uri,
19953
+ }
19954
+
19955
+ @builtins.property
19956
+ def type(self) -> "KafkaSchemaRegistryAccessConfigType":
19957
+ '''The type of authentication Lambda uses to access your schema registry.'''
19958
+ result = self._values.get("type")
19959
+ assert result is not None, "Required property 'type' is missing"
19960
+ return typing.cast("KafkaSchemaRegistryAccessConfigType", result)
19961
+
19962
+ @builtins.property
19963
+ def uri(self) -> builtins.str:
19964
+ '''The URI of the secret (Secrets Manager secret ARN) to authenticate with your schema registry.
19965
+
19966
+ :see: KafkaSchemaRegistryAccessConfigType
19967
+ '''
19968
+ result = self._values.get("uri")
19969
+ assert result is not None, "Required property 'uri' is missing"
19970
+ return typing.cast(builtins.str, result)
19971
+
19972
+ def __eq__(self, rhs: typing.Any) -> builtins.bool:
19973
+ return isinstance(rhs, self.__class__) and rhs._values == self._values
19974
+
19975
+ def __ne__(self, rhs: typing.Any) -> builtins.bool:
19976
+ return not (rhs == self)
19977
+
19978
+ def __repr__(self) -> str:
19979
+ return "KafkaSchemaRegistryAccessConfig(%s)" % ", ".join(
19980
+ k + "=" + repr(v) for k, v in self._values.items()
19981
+ )
19982
+
19983
+
19984
+ class KafkaSchemaRegistryAccessConfigType(
19985
+ metaclass=jsii.JSIIMeta,
19986
+ jsii_type="aws-cdk-lib.aws_lambda.KafkaSchemaRegistryAccessConfigType",
19987
+ ):
19988
+ '''The type of authentication protocol for your schema registry.
19989
+
19990
+ :exampleMetadata: infused
19991
+
19992
+ Example::
19993
+
19994
+ from aws_cdk.aws_lambda_event_sources import ManagedKafkaEventSource, ConfluentSchemaRegistry
19995
+ from aws_cdk.aws_secretsmanager import Secret
19996
+
19997
+ # Your MSK cluster arn
19998
+ # cluster_arn: str
19999
+
20000
+ # my_function: lambda.Function
20001
+
20002
+
20003
+ # The Kafka topic you want to subscribe to
20004
+ topic = "some-cool-topic"
20005
+
20006
+ secret = Secret(self, "Secret", secret_name="AmazonMSK_KafkaSecret")
20007
+ my_function.add_event_source(ManagedKafkaEventSource(
20008
+ cluster_arn=cluster_arn,
20009
+ topic=topic,
20010
+ starting_position=lambda_.StartingPosition.TRIM_HORIZON,
20011
+ provisioned_poller_config=ProvisionedPollerConfig(
20012
+ minimum_pollers=1,
20013
+ maximum_pollers=3
20014
+ ),
20015
+ schema_registry_config=ConfluentSchemaRegistry(
20016
+ schema_registry_uri="https://example.com",
20017
+ event_record_format=lambda_.EventRecordFormat.JSON,
20018
+ authentication_type=lambda_.KafkaSchemaRegistryAccessConfigType.BASIC_AUTH,
20019
+ secret=secret,
20020
+ schema_validation_configs=[lambda.KafkaSchemaValidationConfig(attribute=lambda_.KafkaSchemaValidationAttribute.KEY)]
20021
+ )
20022
+ ))
20023
+ '''
20024
+
20025
+ @jsii.member(jsii_name="of")
20026
+ @builtins.classmethod
20027
+ def of(cls, name: builtins.str) -> "KafkaSchemaRegistryAccessConfigType":
20028
+ '''A custom source access configuration property for schema registry.
20029
+
20030
+ :param name: -
20031
+ '''
20032
+ if __debug__:
20033
+ type_hints = typing.get_type_hints(_typecheckingstub__4bc6850ec99a0d35e26da78049f4174bb567964e6b20083c7a1e29e3f120f831)
20034
+ check_type(argname="argument name", value=name, expected_type=type_hints["name"])
20035
+ return typing.cast("KafkaSchemaRegistryAccessConfigType", jsii.sinvoke(cls, "of", [name]))
20036
+
20037
+ @jsii.python.classproperty
20038
+ @jsii.member(jsii_name="BASIC_AUTH")
20039
+ def BASIC_AUTH(cls) -> "KafkaSchemaRegistryAccessConfigType":
20040
+ '''The Secrets Manager secret that stores your broker credentials.'''
20041
+ return typing.cast("KafkaSchemaRegistryAccessConfigType", jsii.sget(cls, "BASIC_AUTH"))
20042
+
20043
+ @jsii.python.classproperty
20044
+ @jsii.member(jsii_name="CLIENT_CERTIFICATE_TLS_AUTH")
20045
+ def CLIENT_CERTIFICATE_TLS_AUTH(cls) -> "KafkaSchemaRegistryAccessConfigType":
20046
+ '''The Secrets Manager ARN of your secret key containing the certificate chain (X.509 PEM), private key (PKCS#8 PEM), and private key password (optional) used for mutual TLS authentication of your schema registry.'''
20047
+ return typing.cast("KafkaSchemaRegistryAccessConfigType", jsii.sget(cls, "CLIENT_CERTIFICATE_TLS_AUTH"))
20048
+
20049
+ @jsii.python.classproperty
20050
+ @jsii.member(jsii_name="SERVER_ROOT_CA_CERTIFICATE")
20051
+ def SERVER_ROOT_CA_CERTIFICATE(cls) -> "KafkaSchemaRegistryAccessConfigType":
20052
+ '''The Secrets Manager ARN of your secret key containing the root CA certificate (X.509 PEM) used for TLS encryption of your schema registry.'''
20053
+ return typing.cast("KafkaSchemaRegistryAccessConfigType", jsii.sget(cls, "SERVER_ROOT_CA_CERTIFICATE"))
20054
+
20055
+ @builtins.property
20056
+ @jsii.member(jsii_name="type")
20057
+ def type(self) -> builtins.str:
20058
+ '''The key to use in ``SchemaRegistryConfig.AccessConfig.Type`` property in CloudFormation.'''
20059
+ return typing.cast(builtins.str, jsii.get(self, "type"))
20060
+
20061
+
20062
+ @jsii.data_type(
20063
+ jsii_type="aws-cdk-lib.aws_lambda.KafkaSchemaRegistryConfig",
20064
+ jsii_struct_bases=[],
20065
+ name_mapping={
20066
+ "event_record_format": "eventRecordFormat",
20067
+ "schema_registry_uri": "schemaRegistryUri",
20068
+ "schema_validation_configs": "schemaValidationConfigs",
20069
+ "access_configs": "accessConfigs",
20070
+ },
20071
+ )
20072
+ class KafkaSchemaRegistryConfig:
20073
+ def __init__(
20074
+ self,
20075
+ *,
20076
+ event_record_format: EventRecordFormat,
20077
+ schema_registry_uri: builtins.str,
20078
+ schema_validation_configs: typing.Sequence[typing.Union["KafkaSchemaValidationConfig", typing.Dict[builtins.str, typing.Any]]],
20079
+ access_configs: typing.Optional[typing.Sequence[typing.Union[KafkaSchemaRegistryAccessConfig, typing.Dict[builtins.str, typing.Any]]]] = None,
20080
+ ) -> None:
20081
+ '''(Amazon MSK and self-managed Apache Kafka only) Specific configuration settings for a Kafka schema registry.
20082
+
20083
+ :param event_record_format: The record format that Lambda delivers to your function after schema validation. - Choose JSON to have Lambda deliver the record to your function as a standard JSON object. - Choose SOURCE to have Lambda deliver the record to your function in its original source format. Lambda removes all schema metadata, such as the schema ID, before sending the record to your function. Default: - none
20084
+ :param schema_registry_uri: The URI for your schema registry. The correct URI format depends on the type of schema registry you're using. Default: - none
20085
+ :param schema_validation_configs: An array of schema validation configuration objects, which tell Lambda the message attributes you want to validate and filter using your schema registry. Default: - none
20086
+ :param access_configs: An array of access configuration objects that tell Lambda how to authenticate with your schema registry. Default: - none
20087
+
20088
+ :exampleMetadata: fixture=_generated
20089
+
20090
+ Example::
20091
+
20092
+ # The code below shows an example of how to instantiate this type.
20093
+ # The values are placeholders you should change.
20094
+ from aws_cdk import aws_lambda as lambda_
20095
+
20096
+ # event_record_format: lambda.EventRecordFormat
20097
+ # kafka_schema_registry_access_config_type: lambda.KafkaSchemaRegistryAccessConfigType
20098
+ # kafka_schema_validation_attribute: lambda.KafkaSchemaValidationAttribute
20099
+
20100
+ kafka_schema_registry_config = lambda.KafkaSchemaRegistryConfig(
20101
+ event_record_format=event_record_format,
20102
+ schema_registry_uri="schemaRegistryUri",
20103
+ schema_validation_configs=[lambda.KafkaSchemaValidationConfig(
20104
+ attribute=kafka_schema_validation_attribute
20105
+ )],
20106
+
20107
+ # the properties below are optional
20108
+ access_configs=[lambda.KafkaSchemaRegistryAccessConfig(
20109
+ type=kafka_schema_registry_access_config_type,
20110
+ uri="uri"
20111
+ )]
20112
+ )
20113
+ '''
20114
+ if __debug__:
20115
+ type_hints = typing.get_type_hints(_typecheckingstub__a3171876130664545cc7c6ac22856db59c006c52bb5f546cb11b18d515fef45e)
20116
+ check_type(argname="argument event_record_format", value=event_record_format, expected_type=type_hints["event_record_format"])
20117
+ check_type(argname="argument schema_registry_uri", value=schema_registry_uri, expected_type=type_hints["schema_registry_uri"])
20118
+ check_type(argname="argument schema_validation_configs", value=schema_validation_configs, expected_type=type_hints["schema_validation_configs"])
20119
+ check_type(argname="argument access_configs", value=access_configs, expected_type=type_hints["access_configs"])
20120
+ self._values: typing.Dict[builtins.str, typing.Any] = {
20121
+ "event_record_format": event_record_format,
20122
+ "schema_registry_uri": schema_registry_uri,
20123
+ "schema_validation_configs": schema_validation_configs,
20124
+ }
20125
+ if access_configs is not None:
20126
+ self._values["access_configs"] = access_configs
20127
+
20128
+ @builtins.property
20129
+ def event_record_format(self) -> EventRecordFormat:
20130
+ '''The record format that Lambda delivers to your function after schema validation.
20131
+
20132
+ - Choose JSON to have Lambda deliver the record to your function as a standard JSON object.
20133
+ - Choose SOURCE to have Lambda deliver the record to your function in its original source format. Lambda removes all schema metadata, such as the schema ID, before sending the record to your function.
20134
+
20135
+ :default: - none
20136
+ '''
20137
+ result = self._values.get("event_record_format")
20138
+ assert result is not None, "Required property 'event_record_format' is missing"
20139
+ return typing.cast(EventRecordFormat, result)
20140
+
20141
+ @builtins.property
20142
+ def schema_registry_uri(self) -> builtins.str:
20143
+ '''The URI for your schema registry.
20144
+
20145
+ The correct URI format depends on the type of schema registry you're using.
20146
+
20147
+ :default: - none
20148
+ '''
20149
+ result = self._values.get("schema_registry_uri")
20150
+ assert result is not None, "Required property 'schema_registry_uri' is missing"
20151
+ return typing.cast(builtins.str, result)
20152
+
20153
+ @builtins.property
20154
+ def schema_validation_configs(self) -> typing.List["KafkaSchemaValidationConfig"]:
20155
+ '''An array of schema validation configuration objects, which tell Lambda the message attributes you want to validate and filter using your schema registry.
20156
+
20157
+ :default: - none
20158
+ '''
20159
+ result = self._values.get("schema_validation_configs")
20160
+ assert result is not None, "Required property 'schema_validation_configs' is missing"
20161
+ return typing.cast(typing.List["KafkaSchemaValidationConfig"], result)
20162
+
20163
+ @builtins.property
20164
+ def access_configs(
20165
+ self,
20166
+ ) -> typing.Optional[typing.List[KafkaSchemaRegistryAccessConfig]]:
20167
+ '''An array of access configuration objects that tell Lambda how to authenticate with your schema registry.
20168
+
20169
+ :default: - none
20170
+ '''
20171
+ result = self._values.get("access_configs")
20172
+ return typing.cast(typing.Optional[typing.List[KafkaSchemaRegistryAccessConfig]], result)
20173
+
20174
+ def __eq__(self, rhs: typing.Any) -> builtins.bool:
20175
+ return isinstance(rhs, self.__class__) and rhs._values == self._values
20176
+
20177
+ def __ne__(self, rhs: typing.Any) -> builtins.bool:
20178
+ return not (rhs == self)
20179
+
20180
+ def __repr__(self) -> str:
20181
+ return "KafkaSchemaRegistryConfig(%s)" % ", ".join(
20182
+ k + "=" + repr(v) for k, v in self._values.items()
20183
+ )
20184
+
20185
+
20186
+ class KafkaSchemaValidationAttribute(
20187
+ metaclass=jsii.JSIIMeta,
20188
+ jsii_type="aws-cdk-lib.aws_lambda.KafkaSchemaValidationAttribute",
20189
+ ):
20190
+ '''Specific schema validation configuration settings that tell Lambda the message attributes you want to validate and filter using your schema registry.
20191
+
20192
+ :exampleMetadata: infused
20193
+
20194
+ Example::
20195
+
20196
+ from aws_cdk.aws_lambda_event_sources import ManagedKafkaEventSource, ConfluentSchemaRegistry
20197
+ from aws_cdk.aws_secretsmanager import Secret
20198
+
20199
+ # Your MSK cluster arn
20200
+ # cluster_arn: str
20201
+
20202
+ # my_function: lambda.Function
20203
+
20204
+
20205
+ # The Kafka topic you want to subscribe to
20206
+ topic = "some-cool-topic"
20207
+
20208
+ secret = Secret(self, "Secret", secret_name="AmazonMSK_KafkaSecret")
20209
+ my_function.add_event_source(ManagedKafkaEventSource(
20210
+ cluster_arn=cluster_arn,
20211
+ topic=topic,
20212
+ starting_position=lambda_.StartingPosition.TRIM_HORIZON,
20213
+ provisioned_poller_config=ProvisionedPollerConfig(
20214
+ minimum_pollers=1,
20215
+ maximum_pollers=3
20216
+ ),
20217
+ schema_registry_config=ConfluentSchemaRegistry(
20218
+ schema_registry_uri="https://example.com",
20219
+ event_record_format=lambda_.EventRecordFormat.JSON,
20220
+ authentication_type=lambda_.KafkaSchemaRegistryAccessConfigType.BASIC_AUTH,
20221
+ secret=secret,
20222
+ schema_validation_configs=[lambda.KafkaSchemaValidationConfig(attribute=lambda_.KafkaSchemaValidationAttribute.KEY)]
20223
+ )
20224
+ ))
20225
+ '''
20226
+
20227
+ @jsii.member(jsii_name="of")
20228
+ @builtins.classmethod
20229
+ def of(cls, name: builtins.str) -> "KafkaSchemaValidationAttribute":
20230
+ '''A custom schema validation attribute property.
20231
+
20232
+ :param name: -
20233
+ '''
20234
+ if __debug__:
20235
+ type_hints = typing.get_type_hints(_typecheckingstub__90f583f1aa63ef4c8f67ca1d84697d67f4e81ae0ea5473533af46b85cc17e696)
20236
+ check_type(argname="argument name", value=name, expected_type=type_hints["name"])
20237
+ return typing.cast("KafkaSchemaValidationAttribute", jsii.sinvoke(cls, "of", [name]))
20238
+
20239
+ @jsii.python.classproperty
20240
+ @jsii.member(jsii_name="KEY")
20241
+ def KEY(cls) -> "KafkaSchemaValidationAttribute":
20242
+ '''De-serialize the key field of the parload to target function.'''
20243
+ return typing.cast("KafkaSchemaValidationAttribute", jsii.sget(cls, "KEY"))
20244
+
20245
+ @jsii.python.classproperty
20246
+ @jsii.member(jsii_name="VALUE")
20247
+ def VALUE(cls) -> "KafkaSchemaValidationAttribute":
20248
+ '''De-serialize the value field of the parload to target function.'''
20249
+ return typing.cast("KafkaSchemaValidationAttribute", jsii.sget(cls, "VALUE"))
20250
+
20251
+ @builtins.property
20252
+ @jsii.member(jsii_name="value")
20253
+ def value(self) -> builtins.str:
20254
+ '''The enum to use in ``SchemaRegistryConfig.SchemaValidationConfigs.Attribute`` property in CloudFormation.'''
20255
+ return typing.cast(builtins.str, jsii.get(self, "value"))
20256
+
20257
+
20258
+ @jsii.data_type(
20259
+ jsii_type="aws-cdk-lib.aws_lambda.KafkaSchemaValidationConfig",
20260
+ jsii_struct_bases=[],
20261
+ name_mapping={"attribute": "attribute"},
20262
+ )
20263
+ class KafkaSchemaValidationConfig:
20264
+ def __init__(self, *, attribute: KafkaSchemaValidationAttribute) -> None:
20265
+ '''Specific schema validation configuration settings that tell Lambda the message attributes you want to validate and filter using your schema registry.
20266
+
20267
+ :param attribute: The attributes you want your schema registry to validate and filter for. If you selected JSON as the EventRecordFormat, Lambda also deserializes the selected message attributes.
20268
+
20269
+ :exampleMetadata: fixture=_generated
20270
+
20271
+ Example::
20272
+
20273
+ # The code below shows an example of how to instantiate this type.
20274
+ # The values are placeholders you should change.
20275
+ from aws_cdk import aws_lambda as lambda_
20276
+
20277
+ # kafka_schema_validation_attribute: lambda.KafkaSchemaValidationAttribute
20278
+
20279
+ kafka_schema_validation_config = lambda.KafkaSchemaValidationConfig(
20280
+ attribute=kafka_schema_validation_attribute
20281
+ )
20282
+ '''
20283
+ if __debug__:
20284
+ type_hints = typing.get_type_hints(_typecheckingstub__89fd450fee20fb5d81fd39157485acaae3ba70536153566208b1778d213a1ae8)
20285
+ check_type(argname="argument attribute", value=attribute, expected_type=type_hints["attribute"])
20286
+ self._values: typing.Dict[builtins.str, typing.Any] = {
20287
+ "attribute": attribute,
20288
+ }
20289
+
20290
+ @builtins.property
20291
+ def attribute(self) -> KafkaSchemaValidationAttribute:
20292
+ '''The attributes you want your schema registry to validate and filter for.
20293
+
20294
+ If you selected JSON as the EventRecordFormat, Lambda also deserializes the selected message attributes.
20295
+ '''
20296
+ result = self._values.get("attribute")
20297
+ assert result is not None, "Required property 'attribute' is missing"
20298
+ return typing.cast(KafkaSchemaValidationAttribute, result)
20299
+
20300
+ def __eq__(self, rhs: typing.Any) -> builtins.bool:
20301
+ return isinstance(rhs, self.__class__) and rhs._values == self._values
20302
+
20303
+ def __ne__(self, rhs: typing.Any) -> builtins.bool:
20304
+ return not (rhs == self)
20305
+
20306
+ def __repr__(self) -> str:
20307
+ return "KafkaSchemaValidationConfig(%s)" % ", ".join(
20308
+ k + "=" + repr(v) for k, v in self._values.items()
20309
+ )
20310
+
20311
+
19748
20312
  class LambdaInsightsVersion(
19749
20313
  metaclass=jsii.JSIIAbstractClass,
19750
20314
  jsii_type="aws-cdk-lib.aws_lambda.LambdaInsightsVersion",
@@ -22314,6 +22878,88 @@ class S3CodeV2(
22314
22878
  return typing.cast(builtins.bool, jsii.get(self, "isInline"))
22315
22879
 
22316
22880
 
22881
+ @jsii.data_type(
22882
+ jsii_type="aws-cdk-lib.aws_lambda.SchemaRegistryProps",
22883
+ jsii_struct_bases=[],
22884
+ name_mapping={
22885
+ "event_record_format": "eventRecordFormat",
22886
+ "schema_validation_configs": "schemaValidationConfigs",
22887
+ },
22888
+ )
22889
+ class SchemaRegistryProps:
22890
+ def __init__(
22891
+ self,
22892
+ *,
22893
+ event_record_format: EventRecordFormat,
22894
+ schema_validation_configs: typing.Sequence[typing.Union[KafkaSchemaValidationConfig, typing.Dict[builtins.str, typing.Any]]],
22895
+ ) -> None:
22896
+ '''Properties for schema registry configuration.
22897
+
22898
+ :param event_record_format: The record format that Lambda delivers to your function after schema validation. - Choose JSON to have Lambda deliver the record to your function as a standard JSON object. - Choose SOURCE to have Lambda deliver the record to your function in its original source format. Lambda removes all schema metadata, such as the schema ID, before sending the record to your function. Default: - none
22899
+ :param schema_validation_configs: An array of schema validation configuration objects, which tell Lambda the message attributes you want to validate and filter using your schema registry. Default: - none
22900
+
22901
+ :exampleMetadata: fixture=_generated
22902
+
22903
+ Example::
22904
+
22905
+ # The code below shows an example of how to instantiate this type.
22906
+ # The values are placeholders you should change.
22907
+ from aws_cdk import aws_lambda as lambda_
22908
+
22909
+ # event_record_format: lambda.EventRecordFormat
22910
+ # kafka_schema_validation_attribute: lambda.KafkaSchemaValidationAttribute
22911
+
22912
+ schema_registry_props = lambda.SchemaRegistryProps(
22913
+ event_record_format=event_record_format,
22914
+ schema_validation_configs=[lambda.KafkaSchemaValidationConfig(
22915
+ attribute=kafka_schema_validation_attribute
22916
+ )]
22917
+ )
22918
+ '''
22919
+ if __debug__:
22920
+ type_hints = typing.get_type_hints(_typecheckingstub__0915a5e0439a722acf480d4010d03236559e242684c698d0a460dffc5709933b)
22921
+ check_type(argname="argument event_record_format", value=event_record_format, expected_type=type_hints["event_record_format"])
22922
+ check_type(argname="argument schema_validation_configs", value=schema_validation_configs, expected_type=type_hints["schema_validation_configs"])
22923
+ self._values: typing.Dict[builtins.str, typing.Any] = {
22924
+ "event_record_format": event_record_format,
22925
+ "schema_validation_configs": schema_validation_configs,
22926
+ }
22927
+
22928
+ @builtins.property
22929
+ def event_record_format(self) -> EventRecordFormat:
22930
+ '''The record format that Lambda delivers to your function after schema validation.
22931
+
22932
+ - Choose JSON to have Lambda deliver the record to your function as a standard JSON object.
22933
+ - Choose SOURCE to have Lambda deliver the record to your function in its original source format. Lambda removes all schema metadata, such as the schema ID, before sending the record to your function.
22934
+
22935
+ :default: - none
22936
+ '''
22937
+ result = self._values.get("event_record_format")
22938
+ assert result is not None, "Required property 'event_record_format' is missing"
22939
+ return typing.cast(EventRecordFormat, result)
22940
+
22941
+ @builtins.property
22942
+ def schema_validation_configs(self) -> typing.List[KafkaSchemaValidationConfig]:
22943
+ '''An array of schema validation configuration objects, which tell Lambda the message attributes you want to validate and filter using your schema registry.
22944
+
22945
+ :default: - none
22946
+ '''
22947
+ result = self._values.get("schema_validation_configs")
22948
+ assert result is not None, "Required property 'schema_validation_configs' is missing"
22949
+ return typing.cast(typing.List[KafkaSchemaValidationConfig], result)
22950
+
22951
+ def __eq__(self, rhs: typing.Any) -> builtins.bool:
22952
+ return isinstance(rhs, self.__class__) and rhs._values == self._values
22953
+
22954
+ def __ne__(self, rhs: typing.Any) -> builtins.bool:
22955
+ return not (rhs == self)
22956
+
22957
+ def __repr__(self) -> str:
22958
+ return "SchemaRegistryProps(%s)" % ", ".join(
22959
+ k + "=" + repr(v) for k, v in self._values.items()
22960
+ )
22961
+
22962
+
22317
22963
  @jsii.data_type(
22318
22964
  jsii_type="aws-cdk-lib.aws_lambda.SingletonFunctionProps",
22319
22965
  jsii_struct_bases=[FunctionProps],
@@ -23551,20 +24197,30 @@ class StartingPosition(enum.Enum):
23551
24197
 
23552
24198
  Example::
23553
24199
 
23554
- import aws_cdk.aws_kinesis as kinesis
23555
- from aws_cdk.aws_lambda_event_sources import KinesisConsumerEventSource
24200
+ from aws_cdk.aws_lambda_event_sources import ManagedKafkaEventSource
24201
+ from aws_cdk.aws_kms import Key
23556
24202
 
23557
24203
  # my_function: lambda.Function
23558
24204
 
23559
24205
 
23560
- stream = kinesis.Stream(self, "MyStream")
23561
- stream_consumer = kinesis.StreamConsumer(self, "MyStreamConsumer",
23562
- stream=stream,
23563
- stream_consumer_name="MyStreamConsumer"
23564
- )
23565
- my_function.add_event_source(KinesisConsumerEventSource(stream_consumer,
23566
- batch_size=100, # default
23567
- starting_position=lambda_.StartingPosition.TRIM_HORIZON
24206
+ # Your MSK cluster arn
24207
+ cluster_arn = "arn:aws:kafka:us-east-1:0123456789019:cluster/SalesCluster/abcd1234-abcd-cafe-abab-9876543210ab-4"
24208
+
24209
+ # The Kafka topic you want to subscribe to
24210
+ topic = "some-cool-topic"
24211
+
24212
+ # Your self managed KMS key
24213
+ my_key = Key.from_key_arn(self, "SourceBucketEncryptionKey", "arn:aws:kms:us-east-1:123456789012:key/<key-id>")
24214
+ my_function.add_event_source(ManagedKafkaEventSource(
24215
+ cluster_arn=cluster_arn,
24216
+ topic=topic,
24217
+ starting_position=lambda_.StartingPosition.TRIM_HORIZON,
24218
+ filters=[
24219
+ lambda_.FilterCriteria.filter({
24220
+ "string_equals": lambda_.FilterRule.is_equal("test")
24221
+ })
24222
+ ],
24223
+ filter_encryption=my_key
23568
24224
  ))
23569
24225
  '''
23570
24226
 
@@ -26278,6 +26934,7 @@ class EventSourceMapping(
26278
26934
  # filters: Any
26279
26935
  # function_: lambda.Function
26280
26936
  # key: kms.Key
26937
+ # schema_registry: lambda.ISchemaRegistry
26281
26938
  # source_access_configuration_type: lambda.SourceAccessConfigurationType
26282
26939
 
26283
26940
  event_source_mapping = lambda_.EventSourceMapping(self, "MyEventSourceMapping",
@@ -26309,6 +26966,7 @@ class EventSourceMapping(
26309
26966
  ),
26310
26967
  report_batch_item_failures=False,
26311
26968
  retry_attempts=123,
26969
+ schema_registry_config=schema_registry,
26312
26970
  source_access_configurations=[lambda.SourceAccessConfiguration(
26313
26971
  type=source_access_configuration_type,
26314
26972
  uri="uri"
@@ -26344,6 +27002,7 @@ class EventSourceMapping(
26344
27002
  provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
26345
27003
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
26346
27004
  retry_attempts: typing.Optional[jsii.Number] = None,
27005
+ schema_registry_config: typing.Optional[ISchemaRegistry] = None,
26347
27006
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union[SourceAccessConfiguration, typing.Dict[builtins.str, typing.Any]]]] = None,
26348
27007
  starting_position: typing.Optional[StartingPosition] = None,
26349
27008
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -26372,6 +27031,7 @@ class EventSourceMapping(
26372
27031
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
26373
27032
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
26374
27033
  :param retry_attempts: The maximum number of times to retry when the function returns an error. Set to ``undefined`` if you want lambda to keep retrying infinitely or until the record expires. Valid Range: - Minimum value of 0 - Maximum value of 10000 Default: - infinite or until the record expires.
27034
+ :param schema_registry_config: Specific configuration settings for a Kafka schema registry. Default: - none
26375
27035
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
26376
27036
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
26377
27037
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
@@ -26402,6 +27062,7 @@ class EventSourceMapping(
26402
27062
  provisioned_poller_config=provisioned_poller_config,
26403
27063
  report_batch_item_failures=report_batch_item_failures,
26404
27064
  retry_attempts=retry_attempts,
27065
+ schema_registry_config=schema_registry_config,
26405
27066
  source_access_configurations=source_access_configurations,
26406
27067
  starting_position=starting_position,
26407
27068
  starting_position_timestamp=starting_position_timestamp,
@@ -26529,6 +27190,7 @@ class FunctionBase(
26529
27190
  provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
26530
27191
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
26531
27192
  retry_attempts: typing.Optional[jsii.Number] = None,
27193
+ schema_registry_config: typing.Optional[ISchemaRegistry] = None,
26532
27194
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union[SourceAccessConfiguration, typing.Dict[builtins.str, typing.Any]]]] = None,
26533
27195
  starting_position: typing.Optional[StartingPosition] = None,
26534
27196
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -26556,6 +27218,7 @@ class FunctionBase(
26556
27218
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
26557
27219
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
26558
27220
  :param retry_attempts: The maximum number of times to retry when the function returns an error. Set to ``undefined`` if you want lambda to keep retrying infinitely or until the record expires. Valid Range: - Minimum value of 0 - Maximum value of 10000 Default: - infinite or until the record expires.
27221
+ :param schema_registry_config: Specific configuration settings for a Kafka schema registry. Default: - none
26559
27222
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
26560
27223
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
26561
27224
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
@@ -26584,6 +27247,7 @@ class FunctionBase(
26584
27247
  provisioned_poller_config=provisioned_poller_config,
26585
27248
  report_batch_item_failures=report_batch_item_failures,
26586
27249
  retry_attempts=retry_attempts,
27250
+ schema_registry_config=schema_registry_config,
26587
27251
  source_access_configurations=source_access_configurations,
26588
27252
  starting_position=starting_position,
26589
27253
  starting_position_timestamp=starting_position_timestamp,
@@ -29618,6 +30282,7 @@ __all__ = [
29618
30282
  "EventInvokeConfig",
29619
30283
  "EventInvokeConfigOptions",
29620
30284
  "EventInvokeConfigProps",
30285
+ "EventRecordFormat",
29621
30286
  "EventSourceMapping",
29622
30287
  "EventSourceMappingOptions",
29623
30288
  "EventSourceMappingProps",
@@ -29648,9 +30313,15 @@ __all__ = [
29648
30313
  "IFunctionUrl",
29649
30314
  "ILayerVersion",
29650
30315
  "IScalableFunctionAttribute",
30316
+ "ISchemaRegistry",
29651
30317
  "IVersion",
29652
30318
  "InlineCode",
29653
30319
  "InvokeMode",
30320
+ "KafkaSchemaRegistryAccessConfig",
30321
+ "KafkaSchemaRegistryAccessConfigType",
30322
+ "KafkaSchemaRegistryConfig",
30323
+ "KafkaSchemaValidationAttribute",
30324
+ "KafkaSchemaValidationConfig",
29654
30325
  "LambdaInsightsVersion",
29655
30326
  "LambdaRuntimeProps",
29656
30327
  "LayerVersion",
@@ -29677,6 +30348,7 @@ __all__ = [
29677
30348
  "RuntimeManagementMode",
29678
30349
  "S3Code",
29679
30350
  "S3CodeV2",
30351
+ "SchemaRegistryProps",
29680
30352
  "SingletonFunction",
29681
30353
  "SingletonFunctionProps",
29682
30354
  "SnapStartConf",
@@ -31503,6 +32175,12 @@ def _typecheckingstub__39c1b3a3185624e50b391ccba8992ee288e54789a9b063a9fbffc3207
31503
32175
  """Type checking stubs"""
31504
32176
  pass
31505
32177
 
32178
+ def _typecheckingstub__913b83169547e1e27e0d17d4e5a189f10545dca54ca45db7a5dc40a7d5cc2999(
32179
+ name: builtins.str,
32180
+ ) -> None:
32181
+ """Type checking stubs"""
32182
+ pass
32183
+
31506
32184
  def _typecheckingstub__7442d2bd60e56a826eab54e95fa6a6ebc8961285a26558c7189840a124a0a2e0(
31507
32185
  *,
31508
32186
  batch_size: typing.Optional[jsii.Number] = None,
@@ -31523,6 +32201,7 @@ def _typecheckingstub__7442d2bd60e56a826eab54e95fa6a6ebc8961285a26558c7189840a12
31523
32201
  provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
31524
32202
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
31525
32203
  retry_attempts: typing.Optional[jsii.Number] = None,
32204
+ schema_registry_config: typing.Optional[ISchemaRegistry] = None,
31526
32205
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union[SourceAccessConfiguration, typing.Dict[builtins.str, typing.Any]]]] = None,
31527
32206
  starting_position: typing.Optional[StartingPosition] = None,
31528
32207
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -31552,6 +32231,7 @@ def _typecheckingstub__e74d0bc5516fc715f7302bdf199df23dddf769e98771f0bac2ff026a4
31552
32231
  provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
31553
32232
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
31554
32233
  retry_attempts: typing.Optional[jsii.Number] = None,
32234
+ schema_registry_config: typing.Optional[ISchemaRegistry] = None,
31555
32235
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union[SourceAccessConfiguration, typing.Dict[builtins.str, typing.Any]]]] = None,
31556
32236
  starting_position: typing.Optional[StartingPosition] = None,
31557
32237
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -31835,6 +32515,7 @@ def _typecheckingstub__726375d512fd3c0da30be8d20d1c4016974ba77359e6bac8eb3569126
31835
32515
  provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
31836
32516
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
31837
32517
  retry_attempts: typing.Optional[jsii.Number] = None,
32518
+ schema_registry_config: typing.Optional[ISchemaRegistry] = None,
31838
32519
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union[SourceAccessConfiguration, typing.Dict[builtins.str, typing.Any]]]] = None,
31839
32520
  starting_position: typing.Optional[StartingPosition] = None,
31840
32521
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -31941,6 +32622,13 @@ def _typecheckingstub__6400e52bafb7e00e3113dbb1e115c4dea946b786b17eee75a6aa6706a
31941
32622
  """Type checking stubs"""
31942
32623
  pass
31943
32624
 
32625
+ def _typecheckingstub__0c68a1588ffc2bca6786310b7681f323e4368d6bf66e13c5fbdc7ef0ad571a22(
32626
+ target: IEventSourceMapping,
32627
+ target_handler: IFunction,
32628
+ ) -> None:
32629
+ """Type checking stubs"""
32630
+ pass
32631
+
31944
32632
  def _typecheckingstub__d163f09d3cc5fe40599d7400b73c5f0814fcf5b50dccd44d5740a368a0d84cf9(
31945
32633
  alias_name: builtins.str,
31946
32634
  *,
@@ -31967,6 +32655,43 @@ def _typecheckingstub__68701e0be659943818e792689f9c11f6ea386ae16b7e76ef2e090037d
31967
32655
  """Type checking stubs"""
31968
32656
  pass
31969
32657
 
32658
+ def _typecheckingstub__a9ed7f710fdd20a96eb8eeb709bbea9f1e52b3ca20bed4ca85cf8341031090d2(
32659
+ *,
32660
+ type: KafkaSchemaRegistryAccessConfigType,
32661
+ uri: builtins.str,
32662
+ ) -> None:
32663
+ """Type checking stubs"""
32664
+ pass
32665
+
32666
+ def _typecheckingstub__4bc6850ec99a0d35e26da78049f4174bb567964e6b20083c7a1e29e3f120f831(
32667
+ name: builtins.str,
32668
+ ) -> None:
32669
+ """Type checking stubs"""
32670
+ pass
32671
+
32672
+ def _typecheckingstub__a3171876130664545cc7c6ac22856db59c006c52bb5f546cb11b18d515fef45e(
32673
+ *,
32674
+ event_record_format: EventRecordFormat,
32675
+ schema_registry_uri: builtins.str,
32676
+ schema_validation_configs: typing.Sequence[typing.Union[KafkaSchemaValidationConfig, typing.Dict[builtins.str, typing.Any]]],
32677
+ access_configs: typing.Optional[typing.Sequence[typing.Union[KafkaSchemaRegistryAccessConfig, typing.Dict[builtins.str, typing.Any]]]] = None,
32678
+ ) -> None:
32679
+ """Type checking stubs"""
32680
+ pass
32681
+
32682
+ def _typecheckingstub__90f583f1aa63ef4c8f67ca1d84697d67f4e81ae0ea5473533af46b85cc17e696(
32683
+ name: builtins.str,
32684
+ ) -> None:
32685
+ """Type checking stubs"""
32686
+ pass
32687
+
32688
+ def _typecheckingstub__89fd450fee20fb5d81fd39157485acaae3ba70536153566208b1778d213a1ae8(
32689
+ *,
32690
+ attribute: KafkaSchemaValidationAttribute,
32691
+ ) -> None:
32692
+ """Type checking stubs"""
32693
+ pass
32694
+
31970
32695
  def _typecheckingstub__263309ccb98199a52576e8475ff6ab5f39d5cf43f49cd26a3d9a85ab711464ad(
31971
32696
  arn: builtins.str,
31972
32697
  ) -> None:
@@ -32218,6 +32943,14 @@ def _typecheckingstub__304505e97ff3b397f5306079c5410e06bb217281e1cc348ada6eef6ae
32218
32943
  """Type checking stubs"""
32219
32944
  pass
32220
32945
 
32946
+ def _typecheckingstub__0915a5e0439a722acf480d4010d03236559e242684c698d0a460dffc5709933b(
32947
+ *,
32948
+ event_record_format: EventRecordFormat,
32949
+ schema_validation_configs: typing.Sequence[typing.Union[KafkaSchemaValidationConfig, typing.Dict[builtins.str, typing.Any]]],
32950
+ ) -> None:
32951
+ """Type checking stubs"""
32952
+ pass
32953
+
32221
32954
  def _typecheckingstub__68a03ec9f866a29c77aabcf8328c63a49511790fa9714874f255b3292623893c(
32222
32955
  *,
32223
32956
  max_event_age: typing.Optional[_Duration_4839e8c3] = None,
@@ -32553,6 +33286,7 @@ def _typecheckingstub__b0460bc5250777612d2b42ec799737ce019fcdc03fe86c6540ab2ecec
32553
33286
  provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
32554
33287
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
32555
33288
  retry_attempts: typing.Optional[jsii.Number] = None,
33289
+ schema_registry_config: typing.Optional[ISchemaRegistry] = None,
32556
33290
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union[SourceAccessConfiguration, typing.Dict[builtins.str, typing.Any]]]] = None,
32557
33291
  starting_position: typing.Optional[StartingPosition] = None,
32558
33292
  starting_position_timestamp: typing.Optional[jsii.Number] = None,
@@ -32609,6 +33343,7 @@ def _typecheckingstub__bfc312bd9bc4e64c5ae8419715155a56676bb9fe40870f57ffa4f3030
32609
33343
  provisioned_poller_config: typing.Optional[typing.Union[ProvisionedPollerConfig, typing.Dict[builtins.str, typing.Any]]] = None,
32610
33344
  report_batch_item_failures: typing.Optional[builtins.bool] = None,
32611
33345
  retry_attempts: typing.Optional[jsii.Number] = None,
33346
+ schema_registry_config: typing.Optional[ISchemaRegistry] = None,
32612
33347
  source_access_configurations: typing.Optional[typing.Sequence[typing.Union[SourceAccessConfiguration, typing.Dict[builtins.str, typing.Any]]]] = None,
32613
33348
  starting_position: typing.Optional[StartingPosition] = None,
32614
33349
  starting_position_timestamp: typing.Optional[jsii.Number] = None,