konokenj.cdk-api-mcp-server 0.30.0__py3-none-any.whl → 0.32.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of konokenj.cdk-api-mcp-server might be problematic. Click here for more details.

@@ -1,4 +1,4 @@
1
1
  # SPDX-FileCopyrightText: 2025-present Kenji Kono <konoken@amazon.co.jp>
2
2
  #
3
3
  # SPDX-License-Identifier: MIT
4
- __version__ = "0.30.0"
4
+ __version__ = "0.32.0"
@@ -65,22 +65,23 @@ new python.PythonFunction(this, 'MyFunction', {
65
65
 
66
66
  ## Packaging
67
67
 
68
- If `requirements.txt`, `Pipfile` or `poetry.lock` exists at the entry path, the construct will handle installing all required modules in a [Lambda compatible Docker container](https://gallery.ecr.aws/sam/build-python3.7) according to the `runtime` and with the Docker platform based on the target architecture of the Lambda function.
68
+ If `requirements.txt`, `Pipfile`, `uv.lock` or `poetry.lock` exists at the entry path, the construct will handle installing all required modules in a [Lambda compatible Docker container](https://gallery.ecr.aws/sam/build-python3.13) according to the `runtime` and with the Docker platform based on the target architecture of the Lambda function.
69
69
 
70
70
  Python bundles are only recreated and published when a file in a source directory has changed.
71
71
  Therefore (and as a general best-practice), it is highly recommended to commit a lockfile with a
72
72
  list of all transitive dependencies and their exact versions. This will ensure that when any dependency version is updated, the bundle asset is recreated and uploaded.
73
73
 
74
- To that end, we recommend using [`pipenv`] or [`poetry`] which have lockfile support.
74
+ To that end, we recommend using [`pipenv`], [`uv`] or [`poetry`] which have lockfile support.
75
75
 
76
76
  - [`pipenv`](https://pipenv-fork.readthedocs.io/en/latest/basics.html#example-pipfile-lock)
77
77
  - [`poetry`](https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control)
78
+ - [`uv`](https://docs.astral.sh/uv/concepts/projects/sync/#exporting-the-lockfile)
78
79
 
79
80
  Packaging is executed using the `Packaging` class, which:
80
81
 
81
82
  1. Infers the packaging type based on the files present.
82
- 2. If it sees a `Pipfile` or a `poetry.lock` file, it exports it to a compatible `requirements.txt` file with credentials (if they're available in the source files or in the bundling container).
83
- 3. Installs dependencies using `pip`.
83
+ 2. If it sees a `Pipfile`, `uv.lock` or a `poetry.lock` file, it exports it to a compatible `requirements.txt` file with credentials (if they're available in the source files or in the bundling container).
84
+ 3. Installs dependencies using `pip` or `uv`.
84
85
  4. Copies the dependencies into an asset that is bundled for the Lambda package.
85
86
 
86
87
  **Lambda with a requirements.txt**
@@ -109,6 +110,18 @@ Packaging is executed using the `Packaging` class, which:
109
110
  ├── poetry.lock # your poetry lock file has to be present at the entry path
110
111
  ```
111
112
 
113
+ **Lambda with a uv.lock**
114
+
115
+ Reference: https://docs.astral.sh/uv/concepts/projects/layout/
116
+
117
+ ```plaintext
118
+ .
119
+ ├── lambda_function.py # exports a function named 'handler'
120
+ ├── pyproject.toml # your poetry project definition
121
+ ├── uv.lock # your uv lock file has to be present at the entry path
122
+ ├── .python-version # this file is ignored, python version is configured via Runtime
123
+ ```
124
+
112
125
  **Excluding source files**
113
126
 
114
127
  You can exclude files from being copied using the optional bundling string array parameter `assetExcludes`:
@@ -53,6 +53,39 @@ const metric = new cloudwatch.Metric({
53
53
  });
54
54
  ```
55
55
 
56
+ ### Metric ID
57
+
58
+ Metrics can be assigned a unique identifier using the `id` property. This is
59
+ useful when referencing metrics in math expressions:
60
+
61
+ ```ts
62
+ const metric = new cloudwatch.Metric({
63
+ namespace: 'AWS/Lambda',
64
+ metricName: 'Invocations',
65
+ dimensionsMap: {
66
+ FunctionName: 'MyFunction'
67
+ },
68
+ id: 'invocations'
69
+ });
70
+ ```
71
+
72
+ The `id` must start with a lowercase letter and can only contain letters, numbers, and underscores.
73
+
74
+ ### Metric Visible
75
+ Metrics can be hidden from dashboard graphs using the `visible` property:
76
+
77
+ ```ts
78
+ declare const fn: lambda.Function;
79
+
80
+ const metric = fn.metricErrors({
81
+ visible: false
82
+ });
83
+ ```
84
+
85
+ By default, all metrics are visible (`visible: true`). Setting `visible: false`
86
+ hides the metric from dashboard visualizations while still allowing it to be
87
+ used in math expressions given that it has an `id` set to it.
88
+
56
89
  ### Metric Math
57
90
 
58
91
  Math expressions are supported by instantiating the `MathExpression` class.
@@ -86,6 +119,31 @@ const problemPercentage = new cloudwatch.MathExpression({
86
119
  });
87
120
  ```
88
121
 
122
+ ### Metric ID Usage in Math Expressions
123
+
124
+ When metrics have custom IDs, you can reference them directly in math expressions.
125
+
126
+ ```ts
127
+ declare const fn: lambda.Function;
128
+
129
+ const invocations = fn.metricInvocations({
130
+ id: 'lambda_invocations',
131
+ });
132
+
133
+ const errors = fn.metricErrors({
134
+ id: 'lambda_errors',
135
+ });
136
+ ```
137
+
138
+ When metrics have predefined IDs, they can be referenced directly in math expressions by their ID without requiring the `usingMetrics` property.
139
+
140
+ ```ts
141
+ const errorRate = new cloudwatch.MathExpression({
142
+ expression: 'lambda_errors / lambda_invocations * 100',
143
+ label: 'Error Rate (%)',
144
+ });
145
+ ```
146
+
89
147
  ### Search Expressions
90
148
 
91
149
  Math expressions also support search expressions. For example, the following
@@ -0,0 +1,70 @@
1
+ import { App, Stack, StackProps } from 'aws-cdk-lib';
2
+ import { IntegTest } from '@aws-cdk/integ-tests-alpha';
3
+ import { Dashboard, Metric, GraphWidget, MathExpression } from 'aws-cdk-lib/aws-cloudwatch';
4
+
5
+ class DashboardWithMetricIdAndVisibleIntegrationTest extends Stack {
6
+ constructor(scope: App, id: string, props?: StackProps) {
7
+ super(scope, id, props);
8
+
9
+ const dashboard = new Dashboard(this, 'Dash');
10
+
11
+ const lambdaInvocations = new Metric({
12
+ namespace: 'AWS/Lambda',
13
+ metricName: 'Invocations',
14
+ dimensionsMap: { FunctionName: 'test-function' },
15
+ label: 'Lambda Invocations',
16
+ id: 'lambda_invocations',
17
+ visible: true,
18
+ });
19
+
20
+ const lambdaErrors = new Metric({
21
+ namespace: 'AWS/Lambda',
22
+ metricName: 'Errors',
23
+ dimensionsMap: { FunctionName: 'test-function' },
24
+ label: 'Lambda Errors (Hidden for calculation)',
25
+ id: 'lambda_errors',
26
+ visible: false,
27
+ });
28
+
29
+ const lambdaDuration = new Metric({
30
+ namespace: 'AWS/Lambda',
31
+ metricName: 'Duration',
32
+ dimensionsMap: { FunctionName: 'test-function' },
33
+ label: 'Lambda Duration',
34
+ id: 'lambda_duration',
35
+ visible: true,
36
+ });
37
+
38
+ const lambdaThrottles = new Metric({
39
+ namespace: 'AWS/Lambda',
40
+ metricName: 'Throttles',
41
+ dimensionsMap: { FunctionName: 'test-function' },
42
+ label: 'Lambda Throttles (Hidden)',
43
+ id: 'lambda_throttles',
44
+ visible: false,
45
+ });
46
+
47
+ const errorRate = new MathExpression({
48
+ expression: 'lambda_errors / lambda_invocations * 100',
49
+ label: 'Error Rate (%)',
50
+ });
51
+
52
+ const widget = new GraphWidget({
53
+ title: 'Lambda Metrics with ID and Visible Properties',
54
+ left: [
55
+ lambdaInvocations,
56
+ lambdaErrors,
57
+ lambdaDuration,
58
+ lambdaThrottles,
59
+ errorRate,
60
+ ],
61
+ });
62
+
63
+ dashboard.addWidgets(widget);
64
+ }
65
+ }
66
+
67
+ const app = new App();
68
+ new IntegTest(app, 'cdk-integ-dashboard-with-metric-id-and-visible', {
69
+ testCases: [new DashboardWithMetricIdAndVisibleIntegrationTest(app, 'DashboardWithMetricIdAndVisibleIntegrationTest')],
70
+ });
@@ -46,6 +46,10 @@ const asset8 = new assets.DockerImageAsset(stack, 'DockerImage8', {
46
46
  cacheDisabled: true,
47
47
  });
48
48
 
49
+ const asset9 = new assets.DockerImageAsset(stack, 'DockerImage9', {
50
+ directory: path.join(__dirname, 'demo-image-dockerignore'),
51
+ });
52
+
49
53
  const user = new iam.User(stack, 'MyUser');
50
54
  asset.repository.grantPull(user);
51
55
  asset2.repository.grantPull(user);
@@ -55,6 +59,7 @@ asset5.repository.grantPull(user);
55
59
  asset6.repository.grantPull(user);
56
60
  asset7.repository.grantPull(user);
57
61
  asset8.repository.grantPull(user);
62
+ asset9.repository.grantPull(user);
58
63
 
59
64
  new cdk.CfnOutput(stack, 'ImageUri', { value: asset.imageUri });
60
65
  new cdk.CfnOutput(stack, 'ImageUri2', { value: asset2.imageUri });
@@ -64,5 +69,6 @@ new cdk.CfnOutput(stack, 'ImageUri5', { value: asset5.imageUri });
64
69
  new cdk.CfnOutput(stack, 'ImageUri6', { value: asset6.imageUri });
65
70
  new cdk.CfnOutput(stack, 'ImageUri7', { value: asset7.imageUri });
66
71
  new cdk.CfnOutput(stack, 'ImageUri8', { value: asset8.imageUri });
72
+ new cdk.CfnOutput(stack, 'ImageUri9', { value: asset9.imageUri });
67
73
 
68
74
  app.synth();
@@ -0,0 +1,50 @@
1
+ /**
2
+ * This integration test tests the case of a customer setting a permissions boundary using a custom aspect,
3
+ * then trying to override at a more specific level using the PermissionsBoundary.of() API.
4
+ *
5
+ * Overriding should work.
6
+ */
7
+ import { App, Stack, IAspect, Aspects } from 'aws-cdk-lib';
8
+ import { IntegTest } from '@aws-cdk/integ-tests-alpha';
9
+ import { CfnRole, ManagedPolicy, PermissionsBoundary, Role, ServicePrincipal } from 'aws-cdk-lib/aws-iam';
10
+ import { IConstruct } from 'constructs';
11
+
12
+ class CustomAspect implements IAspect {
13
+ public visit(node: IConstruct): void {
14
+ if (node instanceof CfnRole) {
15
+ node.addPropertyOverride('PermissionsBoundary', 'arn:aws:iam::aws:policy/ReadOnlyAccess');
16
+ }
17
+ }
18
+ }
19
+
20
+ const app = new App({
21
+ postCliContext: {
22
+ // Force the intended behavior, from before we found this bug
23
+ '@aws-cdk/core:aspectPrioritiesMutating': false,
24
+ },
25
+ });
26
+
27
+ const stack = new Stack(app, 'integ-permissions-boundary', {
28
+ env: {
29
+ account: process.env.CDK_INTEG_ACCOUNT ?? process.env.CDK_DEFAULT_ACCOUNT,
30
+ region: process.env.CDK_INTEG_REGION ?? process.env.CDK_DEFAULT_REGION,
31
+ },
32
+ });
33
+
34
+ Aspects.of(stack).add(new CustomAspect());
35
+
36
+ new Role(stack, 'NormalRole', {
37
+ assumedBy: new ServicePrincipal('sqs.amazonaws.com'),
38
+ });
39
+
40
+ const powerRole = new Role(stack, 'PowerRole', {
41
+ assumedBy: new ServicePrincipal('sqs.amazonaws.com'),
42
+ });
43
+
44
+ PermissionsBoundary.of(powerRole).apply(ManagedPolicy.fromAwsManagedPolicyName('AdministratorAccess'));
45
+
46
+ new IntegTest(app, 'integ-test', {
47
+ testCases: [stack],
48
+ });
49
+
50
+ app.synth();
@@ -1605,6 +1605,25 @@ const dbFromLookup = rds.DatabaseInstance.fromLookup(this, 'dbFromLookup', {
1605
1605
  dbFromLookup.grantConnect(myUserRole, 'my-user-id');
1606
1606
  ```
1607
1607
 
1608
+ ## Importing existing DatabaseCluster
1609
+
1610
+ ### Lookup DatabaseCluster by clusterIdentifier
1611
+
1612
+ You can lookup an existing DatabaseCluster by its clusterIdentifier using `DatabaseCluster.fromLookup()`. This method returns an `IDatabaseCluster`.
1613
+
1614
+ Here's how `DatabaseCluster.fromLookup()` can be used:
1615
+
1616
+ ```ts
1617
+ declare const myUserRole: iam.Role;
1618
+
1619
+ const clusterFromLookup = rds.DatabaseCluster.fromLookup(this, 'ClusterFromLookup', {
1620
+ clusterIdentifier: 'my-cluster-id',
1621
+ });
1622
+
1623
+ // Grant a connection
1624
+ clusterFromLookup.grantConnect(myUserRole, 'my-user-id');
1625
+ ```
1626
+
1608
1627
  ## Limitless Database Cluster
1609
1628
 
1610
1629
  Amazon Aurora [PostgreSQL Limitless Database](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/limitless.html) provides automated horizontal scaling to process millions of write transactions per second and manages petabytes of data while maintaining the simplicity of operating inside a single database.
@@ -0,0 +1,100 @@
1
+ import { IntegTest } from '@aws-cdk/integ-tests-alpha';
2
+ import { App, CfnOutput, Stack } from 'aws-cdk-lib';
3
+ import * as cloudwatch from 'aws-cdk-lib/aws-cloudwatch';
4
+ import * as iam from 'aws-cdk-lib/aws-iam';
5
+ import * as rds from 'aws-cdk-lib/aws-rds';
6
+
7
+ const app = new App();
8
+ const clusterIdentifier = 'test-cluster-lookup';
9
+
10
+ const stackLookup = new Stack(app, 'aws-cdk-rds-cluster-lookup', {
11
+ env: {
12
+ account: process.env.CDK_INTEG_ACCOUNT ?? process.env.CDK_DEFAULT_ACCOUNT,
13
+ region: process.env.CDK_INTEG_REGION ?? process.env.CDK_DEFAULT_REGION,
14
+ },
15
+ });
16
+
17
+ // Lookup the existing cluster created by the preDeploy hook
18
+ const lookedUpCluster = rds.DatabaseCluster.fromLookup(stackLookup, 'LookedUpCluster', {
19
+ clusterIdentifier,
20
+ });
21
+
22
+ new CfnOutput(stackLookup, 'LookedUpClusterEndpoint', {
23
+ value: lookedUpCluster.clusterEndpoint.socketAddress,
24
+ });
25
+
26
+ new CfnOutput(stackLookup, 'LookedUpClusterReadEndpoint', {
27
+ value: lookedUpCluster.clusterReadEndpoint.socketAddress,
28
+ });
29
+
30
+ new CfnOutput(stackLookup, 'LookedUpClusterIdentifier', {
31
+ value: lookedUpCluster.clusterIdentifier,
32
+ });
33
+
34
+ new CfnOutput(stackLookup, 'LookedUpClusterResourceIdentifier', {
35
+ value: lookedUpCluster.clusterResourceIdentifier,
36
+ });
37
+
38
+ new CfnOutput(stackLookup, 'LookedUpClusterArn', {
39
+ value: lookedUpCluster.clusterArn,
40
+ });
41
+
42
+ new CfnOutput(stackLookup, 'SecurityGroupIds', {
43
+ value: lookedUpCluster.connections.securityGroups.map(sg => sg.securityGroupId).join(','),
44
+ });
45
+
46
+ // test grant
47
+ const dbAccessRole = new iam.Role(stackLookup, 'DbAccessRole', {
48
+ assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'),
49
+ description: 'Role for accessing the Aurora cluster via IAM authentication',
50
+ });
51
+
52
+ lookedUpCluster.grantConnect(dbAccessRole, 'admin');
53
+ lookedUpCluster.grantDataApiAccess(dbAccessRole);
54
+
55
+ // test metric
56
+ lookedUpCluster.metricDatabaseConnections().createAlarm(stackLookup, 'HighConnectionsAlarm', {
57
+ threshold: 100,
58
+ evaluationPeriods: 3,
59
+ alarmDescription: 'Database has high number of connections',
60
+ comparisonOperator: cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD,
61
+ });
62
+
63
+ lookedUpCluster.metricCPUUtilization().createAlarm(stackLookup, 'HighCPUAlarm', {
64
+ threshold: 90,
65
+ evaluationPeriods: 3,
66
+ alarmDescription: 'Database CPU utilization is high',
67
+ comparisonOperator: cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD,
68
+ });
69
+
70
+ lookedUpCluster.metricFreeableMemory().createAlarm(stackLookup, 'LowMemoryAlarm', {
71
+ threshold: 100 * 1024 * 1024,
72
+ evaluationPeriods: 3,
73
+ alarmDescription: 'Database is running low on memory',
74
+ comparisonOperator: cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD,
75
+ });
76
+
77
+ lookedUpCluster.metricDeadlocks().createAlarm(stackLookup, 'DeadlockAlarm', {
78
+ threshold: 5,
79
+ evaluationPeriods: 2,
80
+ alarmDescription: 'Database has deadlocks',
81
+ comparisonOperator: cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD,
82
+ });
83
+
84
+ new IntegTest(app, 'integ-rds-cluster-from-lookup', {
85
+ testCases: [stackLookup],
86
+ enableLookups: true,
87
+ stackUpdateWorkflow: false,
88
+ // Create Aurora cluster before the test and delete it after
89
+ hooks: {
90
+ preDeploy: [
91
+ `aws rds create-db-cluster --db-cluster-identifier ${clusterIdentifier} --engine aurora-mysql --engine-version 8.0.mysql_aurora.3.09.0 --master-username admin --master-user-password Admin1234 --enable-http-endpoint --enable-iam-database-authentication --region us-east-1`,
92
+ `aws rds create-db-instance --db-instance-identifier ${clusterIdentifier}-instance --db-cluster-identifier ${clusterIdentifier} --engine aurora-mysql --db-instance-class db.r5.large --region us-east-1`,
93
+ `aws rds wait db-instance-available --db-instance-identifier ${clusterIdentifier}-instance --region us-east-1`,
94
+ ],
95
+ postDeploy: [
96
+ `aws rds delete-db-instance --db-instance-identifier ${clusterIdentifier}-instance --skip-final-snapshot --region us-east-1`,
97
+ `aws rds delete-db-cluster --db-cluster-identifier ${clusterIdentifier} --skip-final-snapshot --region us-east-1`,
98
+ ],
99
+ },
100
+ });