awslabs.dynamodb-mcp-server 2.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. awslabs/__init__.py +17 -0
  2. awslabs/dynamodb_mcp_server/__init__.py +17 -0
  3. awslabs/dynamodb_mcp_server/cdk_generator/__init__.py +19 -0
  4. awslabs/dynamodb_mcp_server/cdk_generator/generator.py +276 -0
  5. awslabs/dynamodb_mcp_server/cdk_generator/models.py +521 -0
  6. awslabs/dynamodb_mcp_server/cdk_generator/templates/README.md +57 -0
  7. awslabs/dynamodb_mcp_server/cdk_generator/templates/stack.ts.j2 +70 -0
  8. awslabs/dynamodb_mcp_server/common.py +94 -0
  9. awslabs/dynamodb_mcp_server/db_analyzer/__init__.py +30 -0
  10. awslabs/dynamodb_mcp_server/db_analyzer/analyzer_utils.py +394 -0
  11. awslabs/dynamodb_mcp_server/db_analyzer/base_plugin.py +355 -0
  12. awslabs/dynamodb_mcp_server/db_analyzer/mysql.py +450 -0
  13. awslabs/dynamodb_mcp_server/db_analyzer/plugin_registry.py +73 -0
  14. awslabs/dynamodb_mcp_server/db_analyzer/postgresql.py +215 -0
  15. awslabs/dynamodb_mcp_server/db_analyzer/sqlserver.py +255 -0
  16. awslabs/dynamodb_mcp_server/markdown_formatter.py +513 -0
  17. awslabs/dynamodb_mcp_server/model_validation_utils.py +845 -0
  18. awslabs/dynamodb_mcp_server/prompts/dynamodb_architect.md +851 -0
  19. awslabs/dynamodb_mcp_server/prompts/json_generation_guide.md +185 -0
  20. awslabs/dynamodb_mcp_server/prompts/transform_model_validation_result.md +168 -0
  21. awslabs/dynamodb_mcp_server/server.py +524 -0
  22. awslabs_dynamodb_mcp_server-2.0.10.dist-info/METADATA +306 -0
  23. awslabs_dynamodb_mcp_server-2.0.10.dist-info/RECORD +27 -0
  24. awslabs_dynamodb_mcp_server-2.0.10.dist-info/WHEEL +4 -0
  25. awslabs_dynamodb_mcp_server-2.0.10.dist-info/entry_points.txt +2 -0
  26. awslabs_dynamodb_mcp_server-2.0.10.dist-info/licenses/LICENSE +175 -0
  27. awslabs_dynamodb_mcp_server-2.0.10.dist-info/licenses/NOTICE +2 -0
@@ -0,0 +1,524 @@
1
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """DynamoDB MCP Server for data modeling and database analysis."""
16
+
17
+ import json
18
+ import os
19
+ from awslabs.aws_api_mcp_server.server import call_aws
20
+ from awslabs.dynamodb_mcp_server.cdk_generator.generator import CdkGenerator
21
+ from awslabs.dynamodb_mcp_server.common import handle_exceptions
22
+ from awslabs.dynamodb_mcp_server.db_analyzer import analyzer_utils
23
+ from awslabs.dynamodb_mcp_server.db_analyzer.plugin_registry import PluginRegistry
24
+ from awslabs.dynamodb_mcp_server.model_validation_utils import (
25
+ create_validation_resources,
26
+ get_validation_result_transform_prompt,
27
+ setup_dynamodb_local,
28
+ )
29
+ from loguru import logger
30
+ from mcp.server.fastmcp import Context, FastMCP
31
+ from pathlib import Path
32
+ from pydantic import Field
33
+ from typing import Any, Dict, List, Optional
34
+
35
+
36
+ DATA_MODEL_JSON_FILE = 'dynamodb_data_model.json'
37
+ DATA_MODEL_VALIDATION_RESULT_JSON_FILE = 'dynamodb_model_validation.json'
38
+
39
+ # Define server instructions and dependencies
40
+ SERVER_INSTRUCTIONS = """The official MCP Server for AWS DynamoDB design and modeling guidance
41
+
42
+ This server provides DynamoDB design and modeling expertise.
43
+
44
+ Available Tools:
45
+ --------------
46
+ Use the `dynamodb_data_modeling` tool to access enterprise-level DynamoDB design expertise.
47
+ This tool provides systematic methodology for creating multi-table design with
48
+ advanced optimizations, cost analysis, and integration patterns.
49
+
50
+ Use the `source_db_analyzer` tool to analyze existing databases for DynamoDB Data Modeling:
51
+ - Supports MySQL, PostgreSQL, and SQL Server
52
+ - Two execution modes:
53
+ * SELF_SERVICE: Generate SQL queries, user runs them, tool parses results
54
+ * MANAGED: Direct database connection (MySQL supports RDS Data API or connection-based access)
55
+
56
+ Managed Analysis Workflow:
57
+ - Extracts schema structure (tables, columns, indexes, foreign keys)
58
+ - Captures access patterns from query logs (when available)
59
+ - Generates timestamped analysis files (Markdown format) for use with dynamodb_data_modeling
60
+ - Safe for production use (read-only analysis)
61
+
62
+ Self-Service Mode Workflow:
63
+ 1. User selects database type (mysql/postgresql/sqlserver)
64
+ 2. Tool generates SQL queries to file
65
+ 3. User runs queries against their database
66
+ 4. User provides result file path
67
+ 5. Tool generates analysis markdown files
68
+
69
+ Use the `dynamodb_data_model_validation` tool to validate your DynamoDB data model:
70
+ - Loads and validates dynamodb_data_model.json structure (checks required keys: tables, items, access_patterns)
71
+ - Sets up DynamoDB Local environment automatically (tries containers first: Docker/Podman/Finch/nerdctl, falls back to Java)
72
+ - Cleans up existing tables from previous validation runs
73
+ - Creates tables and inserts test data from your model specification
74
+ - Tests all defined access patterns by executing their AWS CLI implementations
75
+ - Saves detailed validation results to dynamodb_model_validation.json with pattern responses
76
+ - Transforms results to markdown format for comprehensive review
77
+
78
+ Use the `generate_resources` tool to generate resources from your DynamoDB data model:
79
+ - Supported resource types: 'cdk' for CDK app generation
80
+ - Generates a standalone CDK app for deploying DynamoDB tables and GSIs
81
+ - The CDK app reads dynamodb_data_model.json to create tables with proper configuration
82
+ - Use after completing data model validation
83
+ - Creates a 'cdk' directory with a ready-to-deploy CDK project
84
+ """
85
+
86
+
87
+ def create_server():
88
+ """Create and configure the MCP server instance."""
89
+ return FastMCP(
90
+ 'awslabs.dynamodb-mcp-server',
91
+ instructions=SERVER_INSTRUCTIONS,
92
+ )
93
+
94
+
95
+ app = create_server()
96
+
97
+
98
+ @app.tool()
99
+ @handle_exceptions
100
+ async def dynamodb_data_modeling() -> str:
101
+ """Retrieves the complete DynamoDB Data Modeling Expert prompt.
102
+
103
+ This tool returns a prompt to help user with data modeling on DynamoDB.
104
+ The prompt guides through requirements gathering, access pattern analysis, and
105
+ schema design. The prompt contains:
106
+
107
+ - Structured 2-phase workflow (requirements → final design)
108
+ - Enterprise design patterns: hot partition analysis, write sharding, sparse GSIs, and more
109
+ - Cost optimization strategies and RPS-based capacity planning
110
+ - Multi-table design philosophy with advanced denormalization patterns
111
+ - Integration guidance for OpenSearch, Lambda, and analytics
112
+
113
+ Usage: Simply call this tool to get the expert prompt.
114
+
115
+ Returns: Complete expert system prompt as text (no parameters required)
116
+ """
117
+ prompt_file = Path(__file__).parent / 'prompts' / 'dynamodb_architect.md'
118
+ architect_prompt = prompt_file.read_text(encoding='utf-8')
119
+ return architect_prompt
120
+
121
+
122
+ @app.tool()
123
+ @handle_exceptions
124
+ async def source_db_analyzer(
125
+ source_db_type: str = Field(
126
+ description="Database type: 'mysql', 'postgresql', or 'sqlserver'"
127
+ ),
128
+ database_name: Optional[str] = Field(
129
+ default=None,
130
+ description='Database name to analyze. REQUIRED for self_service. Env: MYSQL_DATABASE.',
131
+ ),
132
+ execution_mode: str = Field(
133
+ default='self_service',
134
+ description=(
135
+ "'self_service': generates SQL for user to run, then parses results. "
136
+ "'managed' (MySQL only): RDS Data API-based access (aws_cluster_arn) "
137
+ 'or Connection-based access (hostname+port).'
138
+ ),
139
+ ),
140
+ queries_file_path: Optional[str] = Field(
141
+ default=None,
142
+ description='[self_service] Output path for generated SQL queries (Step 1).',
143
+ ),
144
+ query_result_file_path: Optional[str] = Field(
145
+ default=None,
146
+ description='[self_service] Path to query results file for parsing (Step 2).',
147
+ ),
148
+ pattern_analysis_days: Optional[int] = Field(
149
+ default=30,
150
+ description='Days of query logs to analyze. Default: 30.',
151
+ ge=1,
152
+ ),
153
+ max_query_results: Optional[int] = Field(
154
+ default=None,
155
+ description='Max rows per query. Default: 500. Env: MYSQL_MAX_QUERY_RESULTS.',
156
+ ge=1,
157
+ ),
158
+ aws_cluster_arn: Optional[str] = Field(
159
+ default=None,
160
+ description='[managed/RDS Data API-based] Aurora cluster ARN. Use this OR hostname, not both. Env: MYSQL_CLUSTER_ARN.',
161
+ ),
162
+ aws_secret_arn: Optional[str] = Field(
163
+ default=None,
164
+ description='[managed] Secrets Manager ARN for DB credentials. REQUIRED. Env: MYSQL_SECRET_ARN.',
165
+ ),
166
+ aws_region: Optional[str] = Field(
167
+ default=None,
168
+ description='[managed] AWS region. REQUIRED. Env: AWS_REGION.',
169
+ ),
170
+ hostname: Optional[str] = Field(
171
+ default=None,
172
+ description='[managed/connection-based] MySQL hostname. Use this OR aws_cluster_arn, not both. Env: MYSQL_HOSTNAME.',
173
+ ),
174
+ port: Optional[int] = Field(
175
+ default=None,
176
+ description='[managed/connection-based] MySQL port. Default: 3306. Env: MYSQL_PORT.',
177
+ ),
178
+ output_dir: str = Field(
179
+ description='Absolute path for output folder. Must exist and be writable. REQUIRED.',
180
+ ),
181
+ ) -> str:
182
+ """Analyzes source database to extract schema and access patterns for DynamoDB modeling.
183
+
184
+ WHEN TO USE: Call this tool when the user selects "Existing Database Analysis" option
185
+ after invoking the `dynamodb_data_modeling` tool. This extracts schema and query patterns
186
+ from an existing relational database to accelerate DynamoDB data model design.
187
+
188
+ IMPORTANT: Always ask the user which execution mode they prefer before calling this tool.
189
+
190
+ Execution Modes:
191
+ - self_service: Generates SQL queries for user to run manually, then parses their results.
192
+ - managed (MySQL only): Database connection via RDS Data API or hostname.
193
+
194
+ Supported Databases: MySQL, PostgreSQL, SQL Server
195
+
196
+ Output: Generates analysis files (schema structure, access patterns, relationships) in
197
+ Markdown format. These files feed into the DynamoDB data modeling workflow to inform
198
+ table design, GSI selection, and access pattern mapping.
199
+
200
+ Returns: Analysis summary with file locations and next steps.
201
+ """
202
+ # Validate execution mode
203
+ if execution_mode not in ['managed', 'self_service']:
204
+ return f'Invalid execution_mode: {execution_mode}. Must be "self_service" or "managed".'
205
+
206
+ # Get plugin for database type
207
+ try:
208
+ plugin = PluginRegistry.get_plugin(source_db_type)
209
+ except ValueError as e:
210
+ return f'{str(e)}. Supported types: {PluginRegistry.get_supported_types()}'
211
+
212
+ # Managed mode only supports MySQL
213
+ if execution_mode == 'managed' and source_db_type != 'mysql':
214
+ return (
215
+ f'Managed mode is not supported for {source_db_type}. Use self_service mode instead.'
216
+ )
217
+
218
+ max_results = max_query_results or 500
219
+
220
+ # Self-service mode - Step 1: Generate queries
221
+ if execution_mode == 'self_service' and queries_file_path and not query_result_file_path:
222
+ try:
223
+ return analyzer_utils.generate_query_file(
224
+ plugin, database_name, max_results, queries_file_path, output_dir, source_db_type
225
+ )
226
+ except Exception as e:
227
+ logger.error(f'Failed to write queries: {str(e)}')
228
+ return f'Failed to write queries: {str(e)}'
229
+
230
+ # Self-service mode - Step 2: Parse results and generate analysis
231
+ if execution_mode == 'self_service' and query_result_file_path:
232
+ try:
233
+ return analyzer_utils.parse_results_and_generate_analysis(
234
+ plugin,
235
+ query_result_file_path,
236
+ output_dir,
237
+ database_name,
238
+ pattern_analysis_days,
239
+ max_results,
240
+ source_db_type,
241
+ )
242
+ except FileNotFoundError as e:
243
+ logger.error(f'Query Result file not found: {str(e)}')
244
+ return str(e)
245
+ except Exception as e:
246
+ logger.error(f'Analysis failed: {str(e)}')
247
+ return f'Analysis failed: {str(e)}'
248
+
249
+ # Managed analysis mode
250
+ if execution_mode == 'managed':
251
+ connection_params = analyzer_utils.build_connection_params(
252
+ source_db_type,
253
+ database_name=database_name,
254
+ pattern_analysis_days=pattern_analysis_days,
255
+ max_query_results=max_results,
256
+ aws_cluster_arn=aws_cluster_arn,
257
+ aws_secret_arn=aws_secret_arn,
258
+ aws_region=aws_region,
259
+ hostname=hostname,
260
+ port=port,
261
+ output_dir=output_dir,
262
+ )
263
+
264
+ # Validate parameters
265
+ missing_params, param_descriptions = analyzer_utils.validate_connection_params(
266
+ source_db_type, connection_params
267
+ )
268
+ if missing_params:
269
+ missing_descriptions = [param_descriptions[param] for param in missing_params]
270
+ return f'To analyze your {source_db_type} database, I need: {", ".join(missing_descriptions)}'
271
+
272
+ logger.info(
273
+ f'Starting managed analysis for {source_db_type}: {connection_params.get("database")}'
274
+ )
275
+
276
+ try:
277
+ return await analyzer_utils.execute_managed_analysis(
278
+ plugin, connection_params, source_db_type
279
+ )
280
+ except NotImplementedError as e:
281
+ logger.error(f'Managed mode not supported: {str(e)}')
282
+ return str(e)
283
+ except Exception as e:
284
+ logger.error(f'Analysis failed: {str(e)}')
285
+ return f'Analysis failed: {str(e)}'
286
+
287
+ # Invalid mode combination
288
+ return 'Invalid parameter combination. For self-service mode, provide either queries_file_path (to generate queries) or query_result_file_path (to parse results).'
289
+
290
+
291
+ async def _execute_dynamodb_command(
292
+ command: str,
293
+ endpoint_url: Optional[str] = None,
294
+ ):
295
+ """Execute AWS CLI DynamoDB commands (internal use only).
296
+
297
+ Args:
298
+ command: AWS CLI command string (must start with 'aws dynamodb')
299
+ endpoint_url: DynamoDB endpoint URL for local testing
300
+
301
+ Returns:
302
+ AWS CLI command execution results or error response
303
+
304
+ Raises:
305
+ ValueError: If command doesn't start with 'aws dynamodb'
306
+ """
307
+ # Validate command starts with 'aws dynamodb'
308
+ if not command.strip().startswith('aws dynamodb'):
309
+ raise ValueError("Command must start with 'aws dynamodb'")
310
+
311
+ # Configure environment with fake AWS credentials if endpoint_url is present
312
+ if endpoint_url:
313
+ os.environ['AWS_ACCESS_KEY_ID'] = 'AKIAIOSFODNN7EXAMPLE' # pragma: allowlist secret
314
+ os.environ['AWS_SECRET_ACCESS_KEY'] = (
315
+ 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY' # pragma: allowlist secret
316
+ )
317
+ os.environ['AWS_DEFAULT_REGION'] = os.environ.get('AWS_REGION', 'us-east-1')
318
+ command += f' --endpoint-url {endpoint_url}'
319
+
320
+ try:
321
+ return await call_aws(command, Context())
322
+ except Exception as e:
323
+ return e
324
+
325
+
326
+ @app.tool()
327
+ @handle_exceptions
328
+ async def dynamodb_data_model_validation(
329
+ workspace_dir: str = Field(description='Absolute path of the workspace directory'),
330
+ ) -> str:
331
+ """Validates and tests DynamoDB data models against DynamoDB Local.
332
+
333
+ Use this tool to validate, test, and verify your DynamoDB data model after completing the design phase.
334
+ This tool automatically checks that all access patterns work correctly by executing them against a local
335
+ DynamoDB instance.
336
+
337
+ WHEN TO USE:
338
+ - After completing data model design with dynamodb_data_modeling tool
339
+ - When user asks to "validate", "test", "check", or "verify" their DynamoDB data model
340
+ - To ensure all access patterns execute correctly before deploying to production
341
+
342
+ WHAT IT DOES:
343
+ 1. If dynamodb_data_model.json doesn't exist:
344
+ - Returns complete JSON generation guide from json_generation_guide.md
345
+ - Follow the guide to create the JSON file with tables, items, and access_patterns
346
+ - Call this tool again after creating the JSON to validate
347
+
348
+ 2. If dynamodb_data_model.json exists:
349
+ - Validates the JSON structure (checks for required keys: tables, items, access_patterns)
350
+ - Sets up DynamoDB Local environment (Docker/Podman/Finch/nerdctl or Java fallback)
351
+ - Cleans up existing tables from previous validation runs
352
+ - Creates tables and inserts test data from your model specification
353
+ - Tests all defined access patterns by executing their AWS CLI implementations
354
+ - Saves detailed validation results to dynamodb_model_validation.json
355
+ - Transforms results to markdown format for comprehensive review
356
+
357
+ WHAT TO DO ON SUCCESSFUL COMPLETION:
358
+ - You MUST ask the user if they want to call the `generate_resources` tool to create the CDK app to provision the DynamoDB data model tables and GSIs.
359
+
360
+ Args:
361
+ workspace_dir: Absolute path of the workspace directory
362
+
363
+ Returns:
364
+ JSON generation guide (if file missing) or validation results with transformation prompt (if file exists)
365
+ """
366
+ try:
367
+ # Step 1: Get current working directory reliably
368
+ data_model_path = os.path.join(workspace_dir, DATA_MODEL_JSON_FILE)
369
+
370
+ if not os.path.exists(data_model_path):
371
+ # Return the JSON generation guide to help users create the required file
372
+ guide_path = Path(__file__).parent / 'prompts' / 'json_generation_guide.md'
373
+ try:
374
+ json_guide = guide_path.read_text(encoding='utf-8')
375
+ return f"""Error: {data_model_path} not found in your working directory.
376
+
377
+ {json_guide}"""
378
+ except FileNotFoundError:
379
+ return f'Error: {data_model_path} not found. Please generate your data model with dynamodb_data_modeling tool first.'
380
+
381
+ # Step 2: Load and validate JSON structure
382
+ logger.info('Loading data model configuration')
383
+ try:
384
+ with open(data_model_path, 'r') as f:
385
+ data_model = json.load(f)
386
+ except json.JSONDecodeError as e:
387
+ return f'Error: Invalid JSON in {data_model_path}: {str(e)}'
388
+
389
+ # Validate required structure
390
+ required_keys = ['tables', 'items', 'access_patterns']
391
+ missing_keys = [key for key in required_keys if key not in data_model]
392
+ if missing_keys:
393
+ return f'Error: Missing required keys in data model: {missing_keys}'
394
+
395
+ # Step 3: Setup DynamoDB Local
396
+ logger.info('Setting up DynamoDB Local environment')
397
+ endpoint_url = setup_dynamodb_local()
398
+
399
+ # Step 4: Create resources
400
+ logger.info('Creating validation resources')
401
+ create_validation_resources(data_model, endpoint_url)
402
+
403
+ # Step 5: Execute access patterns
404
+ logger.info('Executing access patterns')
405
+ await _execute_access_patterns(
406
+ workspace_dir, data_model.get('access_patterns', []), endpoint_url
407
+ )
408
+
409
+ # Step 6: Transform validation results to markdown
410
+ return get_validation_result_transform_prompt()
411
+
412
+ except FileNotFoundError as e:
413
+ logger.error(f'File not found: {e}')
414
+ return f'Error: Required file not found: {str(e)}'
415
+ except Exception as e:
416
+ logger.error(f'Data model validation failed: {e}')
417
+ return f'Data model validation failed: {str(e)}. Please check your data model JSON structure and try again.'
418
+
419
+
420
+ @app.tool()
421
+ @handle_exceptions
422
+ async def generate_resources(
423
+ dynamodb_data_model_json_file: str = Field(
424
+ description='Absolute path to the dynamodb_data_model.json file. Resources will be generated in the same directory.'
425
+ ),
426
+ resource_type: str = Field(description="Type of resource to generate: 'cdk' for CDK app"),
427
+ ) -> str:
428
+ """Generates resources from a DynamoDB data model JSON file (dynamodb_data_model.json).
429
+
430
+ This tool generates various resources based on the provided `dynamodb_data_model.json` file.
431
+ Currently supports generating a CDK app for deploying DynamoDB tables.
432
+
433
+ Supported resource types:
434
+ - cdk: CDK app for deploying DynamoDB tables.
435
+ Generates a CDK app that provisions DynamoDB tables and GSIs as defined in `dynamodb_data_model.json`.
436
+
437
+ WHEN TO USE:
438
+ - After completing data model validation with `dynamodb_data_model_validation` tool
439
+ - When user asks to "create", "deploy", "test", or "provision" their DynamoDB data model or tables
440
+ - To create the DynamoDB tables and GSIs using a CDK app
441
+
442
+ WHEN NOT TO USE:
443
+ - Before completing data model validation with `dynamodb_data_model_validation` tool
444
+ - Before having created the `dynamodb_data_model.json` file
445
+
446
+ WHAT TO DO ON SUCCESSFUL COMPLETION:
447
+ - You MUST ask the user if they want to use the CDK app to create the DynamoDB data model tables and GSIs.
448
+
449
+ Args:
450
+ dynamodb_data_model_json_file: Absolute path to the `dynamodb_data_model.json` file
451
+ resource_type: Type of resource to generate, possible values: cdk
452
+
453
+ Returns:
454
+ Success message with the destination path, or error message if generation fails
455
+ """
456
+ if resource_type == 'cdk':
457
+ logger.info(
458
+ f'Generating resources. resource_type: {resource_type}, dynamodb_data_model_json_file: {dynamodb_data_model_json_file}'
459
+ )
460
+ json_path = Path(dynamodb_data_model_json_file)
461
+ generator = CdkGenerator()
462
+ generator.generate(json_path)
463
+
464
+ # Generator returns None on success, so we construct the success message
465
+ cdk_dir = json_path.parent / 'cdk'
466
+ logger.info(f'CDK project generated successfully. cdk_dir: {cdk_dir}')
467
+ return f"Successfully generated CDK project at '{cdk_dir}'"
468
+ else:
469
+ return f"Error: Unknown resource type '{resource_type}'. Supported types: cdk"
470
+
471
+
472
+ def main():
473
+ """Main entry point for the MCP server application."""
474
+ app.run()
475
+
476
+
477
+ async def _execute_access_patterns(
478
+ workspace_dir: str,
479
+ access_patterns: List[Dict[str, Any]],
480
+ endpoint_url: Optional[str] = None,
481
+ ) -> dict:
482
+ """Execute all data model validation access patterns operations.
483
+
484
+ Args:
485
+ workspace_dir: Absolute path of the workspace directory
486
+ access_patterns: List of access patterns to test
487
+ endpoint_url: DynamoDB endpoint URL
488
+
489
+ Returns:
490
+ Dictionary with all execution results
491
+ """
492
+ try:
493
+ results = []
494
+ for pattern in access_patterns:
495
+ if 'implementation' not in pattern:
496
+ results.append(pattern)
497
+ continue
498
+
499
+ command = pattern['implementation']
500
+ result = await _execute_dynamodb_command(command, endpoint_url)
501
+ results.append(
502
+ {
503
+ 'pattern_id': pattern.get('pattern'),
504
+ 'description': pattern.get('description'),
505
+ 'dynamodb_operation': pattern.get('dynamodb_operation'),
506
+ 'command': command,
507
+ 'response': result if isinstance(result, dict) else str(result),
508
+ }
509
+ )
510
+
511
+ validation_response = {'validation_response': results}
512
+
513
+ output_file = os.path.join(workspace_dir, DATA_MODEL_VALIDATION_RESULT_JSON_FILE)
514
+ with open(output_file, 'w') as f:
515
+ json.dump(validation_response, f, indent=2)
516
+
517
+ return validation_response
518
+ except Exception as e:
519
+ logger.error(f'Failed to execute access patterns validation: {e}')
520
+ return {'validation_response': [], 'error': str(e)}
521
+
522
+
523
+ if __name__ == '__main__':
524
+ main()