konokenj.cdk-api-mcp-server 0.50.0__py3-none-any.whl → 0.52.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of konokenj.cdk-api-mcp-server might be problematic. Click here for more details.

Files changed (29) hide show
  1. cdk_api_mcp_server/__about__.py +1 -1
  2. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-bedrock-agentcore-alpha/README.md +327 -5
  3. cdk_api_mcp_server/resources/aws-cdk/constructs/@aws-cdk/aws-msk-alpha/README.md +30 -0
  4. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigateway/README.md +9 -0
  5. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigateway/integ.spec-restapi.ts +1 -0
  6. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/README.md +93 -81
  7. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-apigatewayv2/integ.stage.ts +20 -4
  8. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-codepipeline-actions/integ.pipeline-elastic-beanstalk-deploy.ts +4 -1
  9. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/README.md +53 -0
  10. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/integ.dynamodb.add-to-resource-policy.ts +80 -0
  11. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-dynamodb/integ.dynamodb.policy.ts +21 -1
  12. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-ec2/integ.vpc-flow-logs.ts +4 -0
  13. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-elasticloadbalancingv2/README.md +34 -4
  14. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-elasticloadbalancingv2/integ.nlb.security-group.ts +70 -0
  15. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-events-targets/README.md +22 -0
  16. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-events-targets/integ.firehose-delivery-stream.ts +51 -0
  17. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-iam/integ.managed-policy.ts +9 -0
  18. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-iam/integ.policy.ts +9 -0
  19. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-kinesisfirehose/README.md +60 -3
  20. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-kinesisfirehose/integ.cloudwatch-logs-processors.ts +45 -0
  21. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda/integ.runtime.fromasset.ts +19 -4
  22. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-lambda/integ.runtime.inlinecode.ts +7 -0
  23. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/cx-api/FEATURE_FLAGS.md +32 -13
  24. {konokenj_cdk_api_mcp_server-0.50.0.dist-info → konokenj_cdk_api_mcp_server-0.52.0.dist-info}/METADATA +2 -2
  25. {konokenj_cdk_api_mcp_server-0.50.0.dist-info → konokenj_cdk_api_mcp_server-0.52.0.dist-info}/RECORD +28 -25
  26. cdk_api_mcp_server/resources/aws-cdk/constructs/aws-cdk-lib/aws-events-targets/integ.kinesis-firehose-stream.ts +0 -33
  27. {konokenj_cdk_api_mcp_server-0.50.0.dist-info → konokenj_cdk_api_mcp_server-0.52.0.dist-info}/WHEEL +0 -0
  28. {konokenj_cdk_api_mcp_server-0.50.0.dist-info → konokenj_cdk_api_mcp_server-0.52.0.dist-info}/entry_points.txt +0 -0
  29. {konokenj_cdk_api_mcp_server-0.50.0.dist-info → konokenj_cdk_api_mcp_server-0.52.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -1,4 +1,4 @@
1
1
  # SPDX-FileCopyrightText: 2025-present Kenji Kono <konoken@amazon.co.jp>
2
2
  #
3
3
  # SPDX-License-Identifier: MIT
4
- __version__ = "0.50.0"
4
+ __version__ = "0.52.0"
@@ -45,6 +45,11 @@ This construct library facilitates the deployment of Bedrock AgentCore primitive
45
45
  - [Code Interpreter Network Modes](#code-interpreter-network-modes)
46
46
  - [Basic Code Interpreter Creation](#basic-code-interpreter-creation)
47
47
  - [Code Interpreter IAM permissions](#code-interpreter-iam-permissions)
48
+ - [Memory](#memory)
49
+ - [Memory properties](#memory-properties)
50
+ - [Basic Memory Creation](#basic-memory-creation)
51
+ - [LTM Memory Extraction Stategies](#ltm-memory-extraction-stategies)
52
+ - [Memory Strategy Methods](#memory-strategy-methods)
48
53
 
49
54
 
50
55
  ## AgentCore Runtime
@@ -115,9 +120,9 @@ const runtime = new agentcore.Runtime(this, "MyAgentRuntime", {
115
120
 
116
121
  To grant the runtime permission to invoke a Bedrock model or inference profile:
117
122
 
118
- ```text
123
+ ```typescript fixture=default
119
124
  // Note: This example uses @aws-cdk/aws-bedrock-alpha which must be installed separately
120
- import * as bedrock from '@aws-cdk/aws-bedrock-alpha';
125
+ declare const runtime: agentcore.Runtime;
121
126
 
122
127
  // Create a cross-region inference profile for Claude 3.7 Sonnet
123
128
  const inferenceProfile = bedrock.CrossRegionInferenceProfile.fromConfig({
@@ -298,6 +303,10 @@ IAM authentication is the default mode, when no authorizerConfiguration is set t
298
303
  To configure AWS Cognito User Pool authentication:
299
304
 
300
305
  ```typescript
306
+ declare const userPool: cognito.UserPool;
307
+ declare const userPoolClient: cognito.UserPoolClient;
308
+ declare const anotherUserPoolClient: cognito.UserPoolClient;
309
+
301
310
  const repository = new ecr.Repository(this, "TestRepository", {
302
311
  repositoryName: "test-agent-runtime",
303
312
  });
@@ -307,9 +316,8 @@ const runtime = new agentcore.Runtime(this, "MyAgentRuntime", {
307
316
  runtimeName: "myAgent",
308
317
  agentRuntimeArtifact: agentRuntimeArtifact,
309
318
  authorizerConfiguration: agentcore.RuntimeAuthorizerConfiguration.usingCognito(
310
- "us-west-2_ABC123", // User Pool ID (required)
311
- "client123", // Client ID (required)
312
- "us-west-2" // Region (optional, defaults to stack region)
319
+ userPool, // User Pool (required)
320
+ [userPoolClient, anotherUserPoolClient], // User Pool Clients
313
321
  ),
314
322
  });
315
323
  ```
@@ -794,3 +802,317 @@ const codeInterpreter = new agentcore.CodeInterpreterCustom(this, "MyCodeInterpr
794
802
  },
795
803
  });
796
804
  ```
805
+
806
+ ## Memory
807
+
808
+ Memory is a critical component of intelligence. While Large Language Models (LLMs) have impressive capabilities, they lack persistent memory across conversations. Amazon Bedrock AgentCore Memory addresses this limitation by providing a managed service that enables AI agents to maintain context over time, remember important facts, and deliver consistent, personalized experiences.
809
+
810
+ AgentCore Memory operates on two levels:
811
+
812
+ - **Short-Term Memory**: Immediate conversation context and session-based information that provides continuity within a single interaction or closely related sessions.
813
+ - **Long-Term Memory**: Persistent information extracted and stored across multiple conversations, including facts, preferences, and summaries that enable personalized experiences over time.
814
+
815
+ When you interact with the memory via the `CreateEvent` API, you store interactions in Short-Term Memory (STM) instantly. These interactions can include everything from user messages, assistant responses, to tool actions.
816
+
817
+ To write to long-term memory, you need to configure extraction strategies which define how and where to store information from conversations for future use. These strategies are asynchronously processed from raw events after every few turns based on the strategy that was selected. You can't create long term memory records directly, as they are extracted asynchronously by AgentCore Memory.
818
+
819
+ ### Memory Properties
820
+
821
+ | Name | Type | Required | Description |
822
+ |------|------|----------|-------------|
823
+ | `memoryName` | `string` | Yes | The name of the memory |
824
+ | `expirationDuration` | `Duration` | No | Short-term memory expiration in days (between 7 and 365). Default: 90 days |
825
+ | `description` | `string` | No | Optional description for the memory. Default: no description. |
826
+ | `kmsKey` | `IKey` | No | Custom KMS key to use for encryption. Default: Your data is encrypted with a key that AWS owns and manages for you |
827
+ | `memoryStrategies` | `MemoryStrategyBase[]` | No | Built-in extraction strategies to use for this memory. Default: No extraction strategies (short term memory only) |
828
+ | `executionRole` | `iam.IRole` | No | The IAM role that provides permissions for the memory to access AWS services. Default: A new role will be created. |
829
+ | `tags` | `{ [key: string]: string }` | No | Tags for memory. Default: no tags. |
830
+
831
+ ### Basic Memory Creation
832
+
833
+ Below you can find how to configure a simple short-term memory (STM) with no long-term memory extraction strategies.
834
+ Note how you set `expirationDuration`, which defines the time the events will be stored in the short-term memory before they expire.
835
+
836
+ ```typescript fixture=default
837
+
838
+ // Create a basic memory with default settings, no LTM strategies
839
+ const memory = new agentcore.Memory(this, "MyMemory", {
840
+ memoryName: "my_memory",
841
+ description: "A memory for storing user interactions for a period of 90 days",
842
+ expirationDuration: cdk.Duration.days(90),
843
+ });
844
+ ```
845
+
846
+ Basic Memory with Custom KMS Encryption
847
+
848
+ ```typescript fixture=default
849
+ // Create a custom KMS key for encryption
850
+ const encryptionKey = new kms.Key(this, "MemoryEncryptionKey", {
851
+ enableKeyRotation: true,
852
+ description: "KMS key for memory encryption",
853
+ });
854
+
855
+ // Create memory with custom encryption
856
+ const memory = new agentcore.Memory(this, "MyMemory", {
857
+ memoryName: "my_encrypted_memory",
858
+ description: "Memory with custom KMS encryption",
859
+ expirationDuration: cdk.Duration.days(90),
860
+ kmsKey: encryptionKey,
861
+ });
862
+ ```
863
+
864
+ ### LTM Memory Extraction Stategies
865
+
866
+ If you need long-term memory for context recall across sessions, you can setup memory extraction strategies
867
+ to extract the relevant memory from the raw events.
868
+
869
+ Amazon Bedrock AgentCore Memory has different memory strategies for extracting and organizing information:
870
+
871
+ - **Summarization**: to summarize interactions to preserve critical context and key insights.
872
+ - **Semantic Memory**: to extract general factual knowledge, concepts and meanings from raw conversations using vector embeddings.
873
+ This enables similarity-based retrieval of relevant facts and context.
874
+ - **User Preferences**: to extract user behavior patterns from raw conversations.
875
+
876
+ You can use built-in extraction strategies for quick setup, or create custom extraction strategies with specific models and prompt templates.
877
+
878
+ ### Memory with Built-in Strategies
879
+
880
+ The library provides three built-in LTM strategies. These are default strategies for organizing and extracting memory data,
881
+ each optimized for specific use cases.
882
+
883
+ For example: An agent helps multiple users with cloud storage setup. From these conversations,
884
+ see how each strategy processes users expressing confusion about account connection:
885
+
886
+ 1. **Summarization Strategy** (`MemoryStrategy.usingBuiltInSummarization()`)
887
+ This strategy compresses conversations into concise overviews, preserving essential context and key insights for quick recall.
888
+ Extracted memory example: Users confused by cloud setup during onboarding.
889
+
890
+ - Extracts concise summaries to preserve critical context and key insights
891
+ - Namespace: `/strategies/{memoryStrategyId}/actors/{actorId}/sessions/{sessionId}`
892
+
893
+ 2. **Semantic Memory Strategy** (`MemoryStrategy.usingBuiltInSemantic()`)
894
+ Distills general facts, concepts, and underlying meanings from raw conversational data, presenting the information in a context-independent format.
895
+ Extracted memory example: In-context learning = task-solving via examples, no training needed.
896
+
897
+ - Extracts general factual knowledge, concepts and meanings from raw conversations
898
+ - Namespace: `/strategies/{memoryStrategyId}/actors/{actorId}`
899
+
900
+ 3. **User Preference Strategy** (`MemoryStrategy.usingBuiltInUserPreference()`)
901
+ Captures individual preferences, interaction patterns, and personalized settings to enhance future experiences.
902
+ Extracted memory example: User needs clear guidance on cloud storage account connection during onboarding.
903
+
904
+ - Extracts user behavior patterns from raw conversations
905
+ - Namespace: `/strategies/{memoryStrategyId}/actors/{actorId}`
906
+
907
+ ```typescript fixture=default
908
+ // Create memory with built-in strategies
909
+ const memory = new agentcore.Memory(this, "MyMemory", {
910
+ memoryName: "my_memory",
911
+ description: "Memory with built-in strategies",
912
+ expirationDuration: cdk.Duration.days(90),
913
+ memoryStrategies: [
914
+ agentcore.MemoryStrategy.usingBuiltInSummarization(),
915
+ agentcore.MemoryStrategy.usingBuiltInSemantic(),
916
+ agentcore.MemoryStrategy.usingBuiltInUserPreference(),
917
+ ],
918
+ });
919
+ ```
920
+
921
+ The name generated for each built in memory strategy is as follows:
922
+
923
+ - For Summarization: `summary_builtin_cdk001`
924
+ - For Semantic:`semantic_builtin_cdk001>`
925
+ - For User Preferences: `preference_builtin_cdk001`
926
+
927
+ ### Memory with custom Strategies
928
+
929
+ With Long-Term Memory, organization is managed through Namespaces.
930
+
931
+ An `actor` refers to entity such as end users or agent/user combinations. For example, in a coding support chatbot,
932
+ the actor is usually the developer asking questions. Using the actor ID helps the system know which user the memory belongs to,
933
+ keeping each user's data separate and organized.
934
+
935
+ A `session` is usually a single conversation or interaction period between the user and the AI agent.
936
+ It groups all related messages and events that happen during that conversation.
937
+
938
+ A `namespace` is used to logically group and organize long-term memories. It ensures data stays neat, separate, and secure.
939
+
940
+ With AgentCore Memory, you need to add a namespace when you define a memory strategy. This namespace helps define where the long-term memory
941
+ will be logically grouped. Every time a new long-term memory is extracted using this memory strategy, it is saved under the namespace you set.
942
+ This means that all long-term memories are scoped to their specific namespace, keeping them organized and preventing any mix-ups with other
943
+ users or sessions. You should use a hierarchical format separated by forward slashes /. This helps keep memories organized clearly. As needed,
944
+ you can choose to use the below pre-defined variables within braces in the namespace based on your applications' organization needs:
945
+
946
+ - `actorId` – Identifies who the long-term memory belongs to, such as a user
947
+ - `memoryStrategyId` – Shows which memory strategy is being used. This strategy identifier is auto-generated when you create a memory using CreateMemory operation.
948
+ - `sessionId` – Identifies which session or conversation the memory is from.
949
+
950
+ For example, if you define the following namespace as the input to your strategy in CreateMemory operation:
951
+
952
+ ```shell
953
+ /strategy/{memoryStrategyId}/actor/{actorId}/session/{sessionId}
954
+ ```
955
+
956
+ After memory creation, this namespace might look like:
957
+
958
+ ```shell
959
+ /strategy/summarization-93483043//actor/actor-9830m2w3/session/session-9330sds8
960
+ ```
961
+
962
+ You can customise the namespace, i.e. where the memories are stored by using the following methods:
963
+
964
+ 1. **Summarization Strategy** (`MemoryStrategy.usingSummarization(props)`)
965
+ 1. **Semantic Memory Strategy** (`MemoryStrategy.usingSemantic(props)`)
966
+ 1. **User Preference Strategy** (`MemoryStrategy.usingUserPreference(props)`)
967
+
968
+ ```typescript fixture=default
969
+ // Create memory with built-in strategies
970
+ const memory = new agentcore.Memory(this, "MyMemory", {
971
+ memoryName: "my_memory",
972
+ description: "Memory with built-in strategies",
973
+ expirationDuration: cdk.Duration.days(90),
974
+ memoryStrategies: [
975
+ agentcore.MemoryStrategy.usingUserPreference({
976
+ name: "CustomerPreferences",
977
+ namespaces: ["support/customer/{actorId}/preferences"]
978
+ }),
979
+ agentcore.MemoryStrategy.usingSemantic({
980
+ name: "CustomerSupportSemantic",
981
+ namespaces: ["support/customer/{actorId}/semantic"]
982
+ }),
983
+ ],
984
+ });
985
+ ```
986
+
987
+ Custom memory strategies let you tailor memory extraction and consolidation to your specific domain or use case.
988
+ You can override the prompts for extracting and consolidating semantic, summary, or user preferences.
989
+ You can also choose the model that you want to use for extraction and consolidation.
990
+
991
+ The custom prompts you create are appended to a non-editable system prompt.
992
+
993
+ Since a custom strategy requires you to invoke certain FMs, you need a role with appropriate permissions. For that, you can:
994
+
995
+ - Let the L2 construct create a minimum permission role for you when use L2 Bedrock Foundation Models.
996
+ - Use a custom role with the overly permissive `AmazonBedrockAgentCoreMemoryBedrockModelInferenceExecutionRolePolicy` managed policy.
997
+ - Use a custom role with your own custom policies.
998
+
999
+ #### Memory with Custom Execution Role
1000
+
1001
+ Keep in mind that memories that **do not** use custom strategies do not require a service role.
1002
+ So even if you provide it, it will be ignored as it will never be used.
1003
+
1004
+ ```typescript fixture=default
1005
+ // Create a custom execution role
1006
+ const executionRole = new iam.Role(this, "MemoryExecutionRole", {
1007
+ assumedBy: new iam.ServicePrincipal("bedrock-agentcore.amazonaws.com"),
1008
+ managedPolicies: [
1009
+ iam.ManagedPolicy.fromAwsManagedPolicyName(
1010
+ "AmazonBedrockAgentCoreMemoryBedrockModelInferenceExecutionRolePolicy"
1011
+ ),
1012
+ ],
1013
+ });
1014
+
1015
+ // Create memory with custom execution role
1016
+ const memory = new agentcore.Memory(this, "MyMemory", {
1017
+ memoryName: "my_memory",
1018
+ description: "Memory with custom execution role",
1019
+ expirationDuration: cdk.Duration.days(90),
1020
+ executionRole: executionRole,
1021
+ });
1022
+ ```
1023
+
1024
+ In customConsolidation and customExtraction, the model property uses the [@aws-cdk/aws-bedrock-alph](https://www.npmjs.com/package/@aws-cdk/aws-bedrock-alpha) library which must be installed separately.
1025
+
1026
+ ```typescript fixture=default
1027
+ // Create a custom semantic memory strategy
1028
+ const customSemanticStrategy = agentcore.MemoryStrategy.usingSemantic({
1029
+ name: "customSemanticStrategy",
1030
+ description: "Custom semantic memory strategy",
1031
+ namespaces: ["/custom/strategies/{memoryStrategyId}/actors/{actorId}"],
1032
+ customConsolidation: {
1033
+ model: bedrock.BedrockFoundationModel.ANTHROPIC_CLAUDE_3_5_SONNET_V1_0,
1034
+ appendToPrompt: "Custom consolidation prompt for semantic memory",
1035
+ },
1036
+ customExtraction: {
1037
+ model: bedrock.BedrockFoundationModel.ANTHROPIC_CLAUDE_3_5_SONNET_V1_0,
1038
+ appendToPrompt: "Custom extraction prompt for semantic memory",
1039
+ },
1040
+ });
1041
+
1042
+ // Create memory with custom strategy
1043
+ const memory = new agentcore.Memory(this, "MyMemory", {
1044
+ memoryName: "my-custom-memory",
1045
+ description: "Memory with custom strategy",
1046
+ expirationDuration: cdk.Duration.days(90),
1047
+ memoryStrategies: [customSemanticStrategy],
1048
+ });
1049
+ ```
1050
+
1051
+ ### Memory with self-managed Strategies
1052
+
1053
+ A self-managed strategy in Amazon Bedrock AgentCore Memory gives you complete control over your memory extraction and consolidation pipelines.
1054
+ With a self-managed strategy, you can build custom memory processing workflows while leveraging Amazon Bedrock AgentCore for storage and retrieval.
1055
+
1056
+ For additional information, you can refer to the [developer guide for self managed strategies](https://docs.aws.amazon.com/bedrock-agentcore/latest/devguide/memory-self-managed-strategies.html).
1057
+
1058
+ Create the required AWS resources including:
1059
+
1060
+ - an S3 bucket in your account where Amazon Bedrock AgentCore will deliver batched event payloads.
1061
+ - an SNS topic for job notifications. Use FIFO topics if processing order within sessions is important for your use case.
1062
+
1063
+ The construct will apply the correct permissions to the memory execution role to access these resources.
1064
+
1065
+ ```typescript fixture=default
1066
+
1067
+ const bucket = new s3.Bucket(this, 'memoryBucket', {
1068
+ bucketName: 'test-memory',
1069
+ removalPolicy: cdk.RemovalPolicy.DESTROY,
1070
+ autoDeleteObjects: true,
1071
+ });
1072
+
1073
+ const topic = new sns.Topic(this, 'topic');
1074
+
1075
+ // Create a custom semantic memory strategy
1076
+ const selfManagedStrategy = agentcore.MemoryStrategy.usingSelfManaged({
1077
+ name: "selfManagedStrategy",
1078
+ description: "self managed memory strategy",
1079
+ historicalContextWindowSize: 5,
1080
+ invocationConfiguration: {
1081
+ topic: topic,
1082
+ s3Location: {
1083
+ bucketName: bucket.bucketName,
1084
+ objectKey: 'memory/',
1085
+ }
1086
+ },
1087
+ triggerConditions: {
1088
+ messageBasedTrigger: 1,
1089
+ timeBasedTrigger: cdk.Duration.seconds(10),
1090
+ tokenBasedTrigger: 100
1091
+ }
1092
+ });
1093
+
1094
+ // Create memory with custom strategy
1095
+ const memory = new agentcore.Memory(this, "MyMemory", {
1096
+ memoryName: "my-custom-memory",
1097
+ description: "Memory with custom strategy",
1098
+ expirationDuration: cdk.Duration.days(90),
1099
+ memoryStrategies: [selfManagedStrategy],
1100
+ });
1101
+ ```
1102
+
1103
+ ### Memory Strategy Methods
1104
+
1105
+ You can add new memory strategies to the memory construct using the `addMemoryStrategy()` method, for instance:
1106
+
1107
+ ```typescript fixture=default
1108
+ // Create memory without initial strategies
1109
+ const memory = new agentcore.Memory(this, "test-memory", {
1110
+ memoryName: "test_memory_add_strategy",
1111
+ description: "A test memory for testing addMemoryStrategy method",
1112
+ expirationDuration: cdk.Duration.days(90),
1113
+ });
1114
+
1115
+ // Add strategies after instantiation
1116
+ memory.addMemoryStrategy(agentcore.MemoryStrategy.usingBuiltInSummarization());
1117
+ memory.addMemoryStrategy(agentcore.MemoryStrategy.usingBuiltInSemantic());
1118
+ ```
@@ -232,6 +232,36 @@ const cluster = new msk.Cluster(this, 'cluster', {
232
232
  });
233
233
  ```
234
234
 
235
+ ## MSK Express Brokers
236
+
237
+ You can create an MSK cluster with Express Brokers by setting the `brokerType` property to `BrokerType.EXPRESS`. Express Brokers are a low-cost option for development, testing, and workloads that don't require the high availability guarantees of standard MSK cluster.
238
+ For more information, see [Amazon MSK Express Brokers](https://docs.aws.amazon.com/msk/latest/developerguide/msk-broker-types-express.html).
239
+
240
+ **Note:** When using Express Brokers, the following constraints apply:
241
+
242
+ - Apache Kafka version must be 3.6.x or 3.8.x
243
+ - You must specify the `instanceType`
244
+ - The VPC must have at least 3 subnets (across 3 AZs)
245
+ - `ebsStorageInfo` is not supported
246
+ - `storageMode` is not supported
247
+ - `logging` is not supported
248
+ - Supported broker sizes: `m7g.xlarge`, `m7g.2xlarge`, `m7g.4xlarge`, `m7g.8xlarge`, `m7g.12xlarge`, `m7g.16xlarge`
249
+
250
+ ```ts
251
+ declare const vpc: ec2.Vpc;
252
+
253
+ const expressCluster = new msk.Cluster(this, 'ExpressCluster', {
254
+ clusterName: 'MyExpressCluster',
255
+ kafkaVersion: msk.KafkaVersion.V3_8_X,
256
+ vpc,
257
+ brokerType: msk.BrokerType.EXPRESS,
258
+ instanceType: ec2.InstanceType.of(
259
+ ec2.InstanceClass.M7G,
260
+ ec2.InstanceSize.XLARGE,
261
+ ),
262
+ });
263
+ ```
264
+
235
265
  ## MSK Serverless
236
266
 
237
267
  You can also use MSK Serverless by using `ServerlessCluster` class.
@@ -1652,6 +1652,15 @@ const api = new apigateway.SpecRestApi(this, 'books-api', {
1652
1652
  });
1653
1653
  ```
1654
1654
 
1655
+ `SpecRestApi` also supports binary media types, similar to `RestApi`:
1656
+
1657
+ ```ts
1658
+ const api = new apigateway.SpecRestApi(this, 'books-api', {
1659
+ apiDefinition: apigateway.ApiDefinition.fromAsset('path-to-file.json'),
1660
+ binaryMediaTypes: ['image/png', 'application/pdf']
1661
+ });
1662
+ ```
1663
+
1655
1664
  ### Endpoint configuration
1656
1665
 
1657
1666
  By default, `SpecRestApi` will create an edge optimized endpoint.
@@ -14,6 +14,7 @@ class Test extends cdk.Stack {
14
14
  apiDefinition: apigateway.ApiDefinition.fromAsset(path.join(__dirname, 'sample-definition.yaml')),
15
15
  disableExecuteApiEndpoint: true,
16
16
  minCompressionSize: Size.bytes(1024),
17
+ binaryMediaTypes: ['image/png', 'application/pdf'],
17
18
  retainDeployments: true,
18
19
  cloudWatchRole: true,
19
20
  deployOptions: {
@@ -14,11 +14,13 @@
14
14
  - [VPC Link](#vpc-link)
15
15
  - [Private Integration](#private-integration)
16
16
  - [Generating ARN for Execute API](#generating-arn-for-execute-api)
17
- - [Access Logging](#access-logging)
18
17
  - [WebSocket API](#websocket-api)
19
18
  - [Manage Connections Permission](#manage-connections-permission)
20
19
  - [Managing access to WebSocket APIs](#managing-access-to-websocket-apis)
21
20
  - [Usage Plan and API Keys](#usage-plan-and-api-keys)
21
+ - [Common Config](#common-config)
22
+ - [Route Settings](#route-settings)
23
+ - [Access Logging](#access-logging)
22
24
 
23
25
  ## Introduction
24
26
 
@@ -375,65 +377,6 @@ const arn = api.arnForExecuteApi('GET', '/myApiPath', 'dev');
375
377
  - The 'ANY' method can be used for matching any HTTP methods not explicitly defined.
376
378
  - The function gracefully handles undefined parameters by using wildcards, making it flexible for various API configurations.
377
379
 
378
- ## Access Logging
379
-
380
- You can turn on logging to write logs to CloudWatch Logs.
381
- Read more at [Configure logging for HTTP APIs in API Gateway](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-logging.html)
382
-
383
- ```ts
384
- import * as logs from 'aws-cdk-lib/aws-logs';
385
-
386
- declare const api: apigwv2.HttpApi;
387
- declare const logGroup: logs.LogGroup;
388
-
389
- const stage = new apigwv2.HttpStage(this, 'Stage', {
390
- httpApi: api,
391
- accessLogSettings: {
392
- destination: new apigwv2.LogGroupLogDestination(logGroup),
393
- },
394
- });
395
- ```
396
-
397
- The following code will generate the access log in the [CLF format](https://en.wikipedia.org/wiki/Common_Log_Format).
398
-
399
- ```ts
400
- import * as apigw from 'aws-cdk-lib/aws-apigateway';
401
- import * as logs from 'aws-cdk-lib/aws-logs';
402
-
403
- declare const api: apigwv2.HttpApi;
404
- declare const logGroup: logs.LogGroup;
405
-
406
- const stage = new apigwv2.HttpStage(this, 'Stage', {
407
- httpApi: api,
408
- accessLogSettings: {
409
- destination: new apigwv2.LogGroupLogDestination(logGroup),
410
- format: apigw.AccessLogFormat.clf(),
411
- },
412
- });
413
- ```
414
-
415
- You can also configure your own access log format by using the `AccessLogFormat.custom()` API.
416
- `AccessLogField` provides commonly used fields. The following code configures access log to contain.
417
-
418
- ```ts
419
- import * as apigw from 'aws-cdk-lib/aws-apigateway';
420
- import * as logs from 'aws-cdk-lib/aws-logs';
421
-
422
- declare const api: apigwv2.HttpApi;
423
- declare const logGroup: logs.LogGroup;
424
-
425
- const stage = new apigwv2.HttpStage(this, 'Stage', {
426
- httpApi: api,
427
- accessLogSettings: {
428
- destination: new apigwv2.LogGroupLogDestination(logGroup),
429
- format: apigw.AccessLogFormat.custom(
430
- `${apigw.AccessLogField.contextRequestId()} ${apigw.AccessLogField.contextErrorMessage()} ${apigw.AccessLogField.contextErrorMessageString()}
431
- ${apigw.AccessLogField.contextAuthorizerError()} ${apigw.AccessLogField.contextAuthorizerIntegrationStatus()}`
432
- ),
433
- },
434
- });
435
- ```
436
-
437
380
  ## WebSocket API
438
381
 
439
382
  A WebSocket API in API Gateway is a collection of WebSocket routes that are integrated with backend HTTP endpoints,
@@ -578,26 +521,6 @@ const webSocketApi = new apigwv2.WebSocketApi(this, 'mywsapi',{
578
521
  });
579
522
  ```
580
523
 
581
- ## Common Config
582
-
583
- Common config for both HTTP API and WebSocket API
584
-
585
- ### Route Settings
586
-
587
- Represents a collection of route settings.
588
-
589
- ```ts
590
- declare const api: apigwv2.HttpApi;
591
-
592
- new apigwv2.HttpStage(this, 'Stage', {
593
- httpApi: api,
594
- throttle: {
595
- rateLimit: 1000,
596
- burstLimit: 1000,
597
- },
598
- detailedMetricsEnabled: true,
599
- });
600
- ```
601
524
  ## Usage Plan and API Keys
602
525
 
603
526
  A usage plan specifies who can access one or more deployed WebSocket API stages, and the rate at which they can be accessed. The plan uses API keys to
@@ -740,4 +663,93 @@ const key = new apigwv2.RateLimitedApiKey(this, 'rate-limited-api-key', {
740
663
  burstLimit: 200
741
664
  }
742
665
  });
743
- ```
666
+ ```
667
+
668
+ ## Common Config
669
+
670
+ Common config for both HTTP API and WebSocket API
671
+
672
+ ### Route Settings
673
+
674
+ Represents a collection of route settings.
675
+
676
+ ```ts
677
+ declare const api: apigwv2.HttpApi;
678
+
679
+ new apigwv2.HttpStage(this, 'Stage', {
680
+ httpApi: api,
681
+ throttle: {
682
+ rateLimit: 1000,
683
+ burstLimit: 1000,
684
+ },
685
+ detailedMetricsEnabled: true,
686
+ });
687
+ ```
688
+
689
+ ### Access Logging
690
+
691
+ You can turn on logging to write logs to CloudWatch Logs.
692
+ Read more at Configure logging for [HTTP APIs](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-logging.html) or [WebSocket APIs](https://docs.aws.amazon.com/apigateway/latest/developerguide/websocket-api-logging.html)
693
+
694
+ ```ts
695
+ import * as logs from 'aws-cdk-lib/aws-logs';
696
+
697
+ declare const httpApi: apigwv2.HttpApi;
698
+ declare const webSocketApi : apigwv2.WebSocketApi;
699
+ declare const logGroup: logs.LogGroup;
700
+
701
+ new apigwv2.HttpStage(this, 'HttpStage', {
702
+ httpApi,
703
+ accessLogSettings: {
704
+ destination: new apigwv2.LogGroupLogDestination(logGroup),
705
+ },
706
+ });
707
+
708
+ new apigwv2.WebSocketStage(this, 'WebSocketStage', {
709
+ webSocketApi,
710
+ stageName: 'dev',
711
+ accessLogSettings: {
712
+ destination: new apigwv2.LogGroupLogDestination(logGroup),
713
+ },
714
+ });
715
+ ```
716
+
717
+ The following code will generate the access log in the [CLF format](https://en.wikipedia.org/wiki/Common_Log_Format).
718
+
719
+ ```ts
720
+ import * as apigw from 'aws-cdk-lib/aws-apigateway';
721
+ import * as logs from 'aws-cdk-lib/aws-logs';
722
+
723
+ declare const api: apigwv2.HttpApi;
724
+ declare const logGroup: logs.LogGroup;
725
+
726
+ const stage = new apigwv2.HttpStage(this, 'Stage', {
727
+ httpApi: api,
728
+ accessLogSettings: {
729
+ destination: new apigwv2.LogGroupLogDestination(logGroup),
730
+ format: apigw.AccessLogFormat.clf(),
731
+ },
732
+ });
733
+ ```
734
+
735
+ You can also configure your own access log format by using the `AccessLogFormat.custom()` API.
736
+ `AccessLogField` provides commonly used fields. The following code configures access log to contain.
737
+
738
+ ```ts
739
+ import * as apigw from 'aws-cdk-lib/aws-apigateway';
740
+ import * as logs from 'aws-cdk-lib/aws-logs';
741
+
742
+ declare const api: apigwv2.HttpApi;
743
+ declare const logGroup: logs.LogGroup;
744
+
745
+ const stage = new apigwv2.HttpStage(this, 'Stage', {
746
+ httpApi: api,
747
+ accessLogSettings: {
748
+ destination: new apigwv2.LogGroupLogDestination(logGroup),
749
+ format: apigw.AccessLogFormat.custom(
750
+ `${apigw.AccessLogField.contextRequestId()} ${apigw.AccessLogField.contextErrorMessage()} ${apigw.AccessLogField.contextErrorMessageString()}
751
+ ${apigw.AccessLogField.contextAuthorizerError()} ${apigw.AccessLogField.contextAuthorizerIntegrationStatus()}`
752
+ ),
753
+ },
754
+ });
755
+ ```
@@ -1,12 +1,19 @@
1
1
  #!/usr/bin/env node
2
+ import { IntegTest } from '@aws-cdk/integ-tests-alpha';
2
3
  import * as cdk from 'aws-cdk-lib';
3
- import * as apigw from 'aws-cdk-lib/aws-apigatewayv2';
4
+ import * as apigwv2 from 'aws-cdk-lib/aws-apigatewayv2';
5
+ import * as apigw from 'aws-cdk-lib/aws-apigateway';
6
+ import * as logs from 'aws-cdk-lib/aws-logs';
4
7
 
5
8
  const app = new cdk.App();
6
9
  const stack = new cdk.Stack(app, 'aws-cdk-aws-apigatewayv2-websocket-stage');
7
10
 
8
- const webSocketApi = new apigw.WebSocketApi(stack, 'WebSocketApi');
9
- new apigw.WebSocketStage(stack, 'WebSocketStage', {
11
+ const logGroup = new logs.LogGroup(stack, 'MyLogGroup', {
12
+ removalPolicy: cdk.RemovalPolicy.DESTROY,
13
+ });
14
+
15
+ const webSocketApi = new apigwv2.WebSocketApi(stack, 'WebSocketApi');
16
+ new apigwv2.WebSocketStage(stack, 'WebSocketStage', {
10
17
  webSocketApi,
11
18
  stageName: 'dev',
12
19
  throttle: {
@@ -15,6 +22,15 @@ new apigw.WebSocketStage(stack, 'WebSocketStage', {
15
22
  },
16
23
  detailedMetricsEnabled: true,
17
24
  description: 'My Stage',
25
+ accessLogSettings: {
26
+ destination: new apigwv2.LogGroupLogDestination(logGroup),
27
+ format: apigw.AccessLogFormat.custom(JSON.stringify({
28
+ extendedRequestId: apigw.AccessLogField.contextExtendedRequestId(),
29
+ requestTime: apigw.AccessLogField.contextRequestTime(),
30
+ })),
31
+ },
18
32
  });
19
33
 
20
- app.synth();
34
+ new IntegTest(app, 'aws-cdk-aws-apigatewayv2-websocket-stage-test', {
35
+ testCases: [stack],
36
+ });