runbooks 0.7.0__py3-none-any.whl → 0.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. runbooks/__init__.py +87 -37
  2. runbooks/cfat/README.md +300 -49
  3. runbooks/cfat/__init__.py +2 -2
  4. runbooks/finops/__init__.py +1 -1
  5. runbooks/finops/cli.py +1 -1
  6. runbooks/inventory/collectors/__init__.py +8 -0
  7. runbooks/inventory/collectors/aws_management.py +791 -0
  8. runbooks/inventory/collectors/aws_networking.py +3 -3
  9. runbooks/main.py +3389 -782
  10. runbooks/operate/__init__.py +207 -0
  11. runbooks/operate/base.py +311 -0
  12. runbooks/operate/cloudformation_operations.py +619 -0
  13. runbooks/operate/cloudwatch_operations.py +496 -0
  14. runbooks/operate/dynamodb_operations.py +812 -0
  15. runbooks/operate/ec2_operations.py +926 -0
  16. runbooks/operate/iam_operations.py +569 -0
  17. runbooks/operate/s3_operations.py +1211 -0
  18. runbooks/operate/tagging_operations.py +655 -0
  19. runbooks/remediation/CLAUDE.md +100 -0
  20. runbooks/remediation/DOME9.md +218 -0
  21. runbooks/remediation/README.md +26 -0
  22. runbooks/remediation/Tests/__init__.py +0 -0
  23. runbooks/remediation/Tests/update_policy.py +74 -0
  24. runbooks/remediation/__init__.py +95 -0
  25. runbooks/remediation/acm_cert_expired_unused.py +98 -0
  26. runbooks/remediation/acm_remediation.py +875 -0
  27. runbooks/remediation/api_gateway_list.py +167 -0
  28. runbooks/remediation/base.py +643 -0
  29. runbooks/remediation/cloudtrail_remediation.py +908 -0
  30. runbooks/remediation/cloudtrail_s3_modifications.py +296 -0
  31. runbooks/remediation/cognito_active_users.py +78 -0
  32. runbooks/remediation/cognito_remediation.py +856 -0
  33. runbooks/remediation/cognito_user_password_reset.py +163 -0
  34. runbooks/remediation/commons.py +455 -0
  35. runbooks/remediation/dynamodb_optimize.py +155 -0
  36. runbooks/remediation/dynamodb_remediation.py +744 -0
  37. runbooks/remediation/dynamodb_server_side_encryption.py +108 -0
  38. runbooks/remediation/ec2_public_ips.py +134 -0
  39. runbooks/remediation/ec2_remediation.py +892 -0
  40. runbooks/remediation/ec2_subnet_disable_auto_ip_assignment.py +72 -0
  41. runbooks/remediation/ec2_unattached_ebs_volumes.py +448 -0
  42. runbooks/remediation/ec2_unused_security_groups.py +202 -0
  43. runbooks/remediation/kms_enable_key_rotation.py +651 -0
  44. runbooks/remediation/kms_remediation.py +717 -0
  45. runbooks/remediation/lambda_list.py +243 -0
  46. runbooks/remediation/lambda_remediation.py +971 -0
  47. runbooks/remediation/multi_account.py +569 -0
  48. runbooks/remediation/rds_instance_list.py +199 -0
  49. runbooks/remediation/rds_remediation.py +873 -0
  50. runbooks/remediation/rds_snapshot_list.py +192 -0
  51. runbooks/remediation/requirements.txt +118 -0
  52. runbooks/remediation/s3_block_public_access.py +159 -0
  53. runbooks/remediation/s3_bucket_public_access.py +143 -0
  54. runbooks/remediation/s3_disable_static_website_hosting.py +74 -0
  55. runbooks/remediation/s3_downloader.py +215 -0
  56. runbooks/remediation/s3_enable_access_logging.py +562 -0
  57. runbooks/remediation/s3_encryption.py +526 -0
  58. runbooks/remediation/s3_force_ssl_secure_policy.py +143 -0
  59. runbooks/remediation/s3_list.py +141 -0
  60. runbooks/remediation/s3_object_search.py +201 -0
  61. runbooks/remediation/s3_remediation.py +816 -0
  62. runbooks/remediation/scan_for_phrase.py +425 -0
  63. runbooks/remediation/workspaces_list.py +220 -0
  64. runbooks/security/__init__.py +9 -10
  65. runbooks/security/security_baseline_tester.py +4 -2
  66. runbooks-0.7.6.dist-info/METADATA +608 -0
  67. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/RECORD +84 -76
  68. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/entry_points.txt +0 -1
  69. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/top_level.txt +0 -1
  70. jupyter-agent/.env +0 -2
  71. jupyter-agent/.env.template +0 -2
  72. jupyter-agent/.gitattributes +0 -35
  73. jupyter-agent/.gradio/certificate.pem +0 -31
  74. jupyter-agent/README.md +0 -16
  75. jupyter-agent/__main__.log +0 -8
  76. jupyter-agent/app.py +0 -256
  77. jupyter-agent/cloudops-agent.png +0 -0
  78. jupyter-agent/ds-system-prompt.txt +0 -154
  79. jupyter-agent/jupyter-agent.png +0 -0
  80. jupyter-agent/llama3_template.jinja +0 -123
  81. jupyter-agent/requirements.txt +0 -9
  82. jupyter-agent/tmp/4ojbs8a02ir/jupyter-agent.ipynb +0 -68
  83. jupyter-agent/tmp/cm5iasgpm3p/jupyter-agent.ipynb +0 -91
  84. jupyter-agent/tmp/crqbsseag5/jupyter-agent.ipynb +0 -91
  85. jupyter-agent/tmp/hohanq1u097/jupyter-agent.ipynb +0 -57
  86. jupyter-agent/tmp/jns1sam29wm/jupyter-agent.ipynb +0 -53
  87. jupyter-agent/tmp/jupyter-agent.ipynb +0 -27
  88. jupyter-agent/utils.py +0 -409
  89. runbooks/aws/__init__.py +0 -58
  90. runbooks/aws/dynamodb_operations.py +0 -231
  91. runbooks/aws/ec2_copy_image_cross-region.py +0 -195
  92. runbooks/aws/ec2_describe_instances.py +0 -202
  93. runbooks/aws/ec2_ebs_snapshots_delete.py +0 -186
  94. runbooks/aws/ec2_run_instances.py +0 -213
  95. runbooks/aws/ec2_start_stop_instances.py +0 -212
  96. runbooks/aws/ec2_terminate_instances.py +0 -143
  97. runbooks/aws/ec2_unused_eips.py +0 -196
  98. runbooks/aws/ec2_unused_volumes.py +0 -188
  99. runbooks/aws/s3_create_bucket.py +0 -142
  100. runbooks/aws/s3_list_buckets.py +0 -152
  101. runbooks/aws/s3_list_objects.py +0 -156
  102. runbooks/aws/s3_object_operations.py +0 -183
  103. runbooks/aws/tagging_lambda_handler.py +0 -183
  104. runbooks/inventory/FAILED_SCRIPTS_TROUBLESHOOTING.md +0 -619
  105. runbooks/inventory/PASSED_SCRIPTS_GUIDE.md +0 -738
  106. runbooks/inventory/aws_organization.png +0 -0
  107. runbooks/inventory/cfn_move_stack_instances.py +0 -1526
  108. runbooks/inventory/delete_s3_buckets_objects.py +0 -169
  109. runbooks/inventory/lockdown_cfn_stackset_role.py +0 -224
  110. runbooks/inventory/update_aws_actions.py +0 -173
  111. runbooks/inventory/update_cfn_stacksets.py +0 -1215
  112. runbooks/inventory/update_cloudwatch_logs_retention_policy.py +0 -294
  113. runbooks/inventory/update_iam_roles_cross_accounts.py +0 -478
  114. runbooks/inventory/update_s3_public_access_block.py +0 -539
  115. runbooks/organizations/__init__.py +0 -12
  116. runbooks/organizations/manager.py +0 -374
  117. runbooks-0.7.0.dist-info/METADATA +0 -375
  118. /runbooks/inventory/{tests → Tests}/common_test_data.py +0 -0
  119. /runbooks/inventory/{tests → Tests}/common_test_functions.py +0 -0
  120. /runbooks/inventory/{tests → Tests}/script_test_data.py +0 -0
  121. /runbooks/inventory/{tests → Tests}/setup.py +0 -0
  122. /runbooks/inventory/{tests → Tests}/src.py +0 -0
  123. /runbooks/inventory/{tests/test_inventory_modules.py → Tests/test_Inventory_Modules.py} +0 -0
  124. /runbooks/inventory/{tests → Tests}/test_cfn_describe_stacks.py +0 -0
  125. /runbooks/inventory/{tests → Tests}/test_ec2_describe_instances.py +0 -0
  126. /runbooks/inventory/{tests → Tests}/test_lambda_list_functions.py +0 -0
  127. /runbooks/inventory/{tests → Tests}/test_moto_integration_example.py +0 -0
  128. /runbooks/inventory/{tests → Tests}/test_org_list_accounts.py +0 -0
  129. /runbooks/inventory/{Inventory_Modules.py → inventory_modules.py} +0 -0
  130. /runbooks/{aws → operate}/tags.json +0 -0
  131. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/WHEEL +0 -0
  132. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/licenses/LICENSE +0 -0
@@ -1,1526 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Enterprise AWS CloudFormation StackSet Instance Migration Tool
4
-
5
- Advanced enterprise-grade automation tool for migrating CloudFormation stack instances between
6
- StackSets without resource disruption or service interruption. Designed for complex organizational
7
- restructuring, compliance alignment, and governance optimization across multi-account AWS
8
- Organizations environments with comprehensive safety controls and recovery mechanisms.
9
-
10
- **Enterprise CloudFormation Management**: Sophisticated StackSet migration operations with
11
- comprehensive drift detection, batch processing optimization, and enterprise-grade error handling.
12
-
13
- Core Migration Capabilities:
14
- - Zero-downtime stack instance migration between StackSets
15
- - Automated template body extraction and cleanup from source StackSets
16
- - Parameter preservation and validation during migration operations
17
- - IAM capability requirement detection and automatic handling
18
- - Batch processing optimization for large-scale migrations (10 instances per operation)
19
-
20
- Advanced Safety & Recovery Features:
21
- - Comprehensive drift detection and validation before migration
22
- - Automatic recovery file generation with stack instance metadata
23
- - Transaction-style operations with rollback capabilities
24
- - Real-time operation monitoring with detailed progress reporting
25
- - Validation checkpoints throughout the migration process
26
-
27
- Enterprise Migration Workflow:
28
-
29
- **New StackSet Creation Path:**
30
- 1. Template body extraction from existing StackSet with JSON cleanup
31
- 2. Parameter analysis and preservation from source StackSet
32
- 3. IAM capability requirement detection (CAPABILITIES_NAMED_IAM)
33
- 4. New StackSet creation with validated template and parameters
34
-
35
- **Existing StackSet Path:**
36
- 1. StackSet validation and compatibility verification
37
- 2. Parameter and template alignment confirmation
38
-
39
- **Common Migration Operations:**
40
- 1. Stack instance discovery and metadata collection
41
- 2. Recovery file generation with comprehensive backup data
42
- 3. Batch import operations with optimized account grouping
43
- 4. Continuous operation monitoring and status validation
44
- 5. Regional deployment parallelization for performance optimization
45
- 6. Comprehensive success validation and error handling
46
- 7. Final migration status reporting and audit documentation
47
-
48
- Migration Process Components:
49
- - Template body analysis with escaped JSON cleanup and validation
50
- - Parameter extraction with type validation and default handling
51
- - Stack instance identification with comprehensive metadata collection
52
- - IAM capability detection through template analysis and validation
53
- - Batch operation coordination with account-based grouping strategies
54
- - Operation polling with timeout handling and progress monitoring
55
- - Recovery file management with detailed backup and restoration data
56
-
57
- Enterprise Integration Features:
58
- - Integration with AWS Organizations for account validation
59
- - Cross-account IAM role management and validation
60
- - CloudFormation drift detection integration for pre-migration validation
61
- - Comprehensive audit logging for compliance and governance requirements
62
- - Enterprise monitoring system integration for operational visibility
63
-
64
- Security & Compliance:
65
- - Pre-migration drift detection ensuring infrastructure consistency
66
- - IAM permission validation for cross-account operations
67
- - Comprehensive audit trail generation for compliance reporting
68
- - Recovery mechanism validation ensuring operational continuity
69
- - Resource-level validation preventing unintended modifications
70
-
71
- Performance Optimizations:
72
- - Batch processing with optimal instance grouping (10 per operation)
73
- - Regional deployment parallelization reducing migration time
74
- - Account aggregation strategies for efficient StackSet operations
75
- - Operation polling optimization reducing API call overhead
76
- - Memory-efficient processing for large-scale organizational migrations
77
-
78
- Error Handling & Recovery:
79
- - Transaction-style operations with automatic rollback capabilities
80
- - Comprehensive error categorization and recovery guidance
81
- - Real-time operation monitoring with detailed failure analysis
82
- - Recovery file generation for manual intervention scenarios
83
- - Validation checkpoints ensuring migration integrity
84
-
85
- Command-Line Interface:
86
- - --old: Source StackSet name for migration operations
87
- - --new: Target StackSet name or creation specification
88
- - --Account: Specific account selection for targeted migrations
89
- - --Empty: Empty StackSet creation for template copying
90
- - --recovery: Recovery mode operation using backup files
91
- - --drift-check: Pre-migration drift detection and validation
92
-
93
- Usage Examples:
94
- Full migration with drift checking:
95
- ```bash
96
- python cfn_move_stack_instances.py --old LegacyStackSet --new ModernStackSet --drift-check
97
- ```
98
-
99
- Selective account migration:
100
- ```bash
101
- python cfn_move_stack_instances.py --old OldStack --new NewStack -A 123456789012 234567890123
102
- ```
103
-
104
- Recovery operation from backup:
105
- ```bash
106
- python cfn_move_stack_instances.py --recovery
107
- ```
108
-
109
- Dependencies:
110
- - boto3: AWS SDK for CloudFormation and Organizations operations
111
- - account_class: Enterprise AWS account access management
112
- - ArgumentsClass: Standardized command-line argument processing
113
- - colorama: Cross-platform terminal color support for operational visibility
114
-
115
- Version: 2024.02.16 - Enterprise Enhanced Edition
116
- Author: AWS Cloud Foundations Team
117
- License: Internal Enterprise Use
118
- """
119
-
120
- # import pprint
121
- # import Inventory_Modules
122
- import logging
123
- import sys
124
- from datetime import datetime
125
- from os import remove
126
- from os.path import exists, split
127
- from time import sleep, time
128
-
129
- from account_class import aws_acct_access
130
- from ArgumentsClass import CommonArguments
131
- from botocore.exceptions import ClientError, WaiterError
132
- from colorama import Fore, Style, init
133
-
134
- init()
135
- __version__ = "2024.02.16"
136
-
137
-
138
- ##################
139
- # Functions
140
- ##################
141
- def parse_args(args):
142
- """
143
- Configure and parse enterprise-grade command-line arguments for StackSet instance migration.
144
-
145
- Establishes comprehensive command-line interface for CloudFormation StackSet migration
146
- operations with enterprise-specific parameters including drift detection, recovery modes,
147
- account filtering, and operational safety controls. Designed for complex organizational
148
- migrations with detailed configuration options and validation requirements.
149
-
150
- Args:
151
- args: Command-line arguments list from sys.argv[1:] for argument parsing
152
-
153
- Returns:
154
- argparse.Namespace: Parsed command-line arguments containing:
155
- - pOldStackSet: Source StackSet name for migration operations
156
- - pNewStackSet: Target StackSet name or creation specification
157
- - pAccountsToMove: List of specific AWS account numbers for targeted migration
158
- - pEmpty: Boolean flag for empty StackSet creation with template copying
159
- - pRecoveryFlag: Boolean flag for recovery mode operation using backup files
160
- - pDriftCheckFlag: Boolean flag for pre-migration drift detection validation
161
- - Standard CommonArguments: Profile, region, confirmation, verbosity, timing
162
-
163
- Command-Line Arguments:
164
-
165
- **Core Migration Parameters:**
166
- --old: Source StackSet name containing stack instances to migrate
167
- - Required for all migration operations except recovery mode
168
- - Validates StackSet existence and access permissions
169
- - Used for template body and parameter extraction
170
-
171
- --new: Target StackSet name for migration destination
172
- - Can specify existing StackSet or trigger new StackSet creation
173
- - Validates compatibility with source StackSet template and parameters
174
- - Used for import operations and final migration destination
175
-
176
- **Account Selection & Filtering:**
177
- -A, --Account: Specific AWS account numbers for targeted migration
178
- - Supports multiple account specification for batch operations
179
- - Validates account membership in AWS Organizations
180
- - Enables selective migration for phased organizational changes
181
-
182
- **Operational Mode Controls:**
183
- --Empty, --empty: Create empty target StackSet with template copying
184
- - Copies template body and parameters without stack instances
185
- - Useful for template validation and staging environments
186
- - Enables pre-validation of StackSet creation before migration
187
-
188
- --recovery: Enable recovery mode operation using backup files
189
- - Utilizes previously generated recovery files for rollback operations
190
- - Provides transaction-style recovery for failed migrations
191
- - Enables manual intervention and recovery scenarios
192
-
193
- --drift-check: Enable comprehensive pre-migration drift detection
194
- - Validates infrastructure consistency before migration operations
195
- - Prevents migration of drifted stack instances requiring remediation
196
- - Provides detailed drift analysis and remediation guidance
197
-
198
- **Standard Enterprise Arguments:**
199
- - Single region operation for controlled migration scope
200
- - Single profile operation for consistent credential management
201
- - Interactive confirmation for safety-critical operations
202
- - Configurable verbosity for operational visibility and debugging
203
- - Execution timing for performance monitoring and optimization
204
- - Version information for operational tracking and audit requirements
205
-
206
- Enterprise Integration Features:
207
- - Integration with CommonArguments for standardized enterprise CLI patterns
208
- - Comprehensive help documentation for operational guidance
209
- - Argument validation and error handling for operational safety
210
- - Script name extraction for contextual help and error messaging
211
-
212
- Security & Validation:
213
- - Account number validation preventing unauthorized access
214
- - StackSet name validation ensuring proper resource identification
215
- - Parameter validation preventing configuration errors
216
- - Access control validation through AWS credential verification
217
-
218
- Operational Safety Controls:
219
- - Interactive confirmation for destructive operations
220
- - Recovery mode availability for rollback scenarios
221
- - Drift detection integration for infrastructure validation
222
- - Comprehensive error handling and user guidance
223
- """
224
- # Extract script name for contextual argument grouping and help documentation
225
- script_path, script_name = split(sys.argv[0])
226
-
227
- # Initialize enterprise argument parser with standard organizational controls
228
- parser = CommonArguments()
229
- parser.singleregion() # Controlled regional scope for migration safety
230
- parser.singleprofile() # Consistent credential management across operations
231
- parser.confirm() # Interactive confirmation for safety-critical operations
232
- parser.verbosity() # Configurable logging for operational visibility
233
- parser.timing() # Performance monitoring for enterprise analytics
234
- parser.version(__version__) # Version tracking for operational audit requirements
235
-
236
- # Define script-specific argument group for StackSet migration parameters
237
- local = parser.my_parser.add_argument_group(script_name, "Parameters specific to this script")
238
-
239
- # Source StackSet specification for migration operations
240
- local.add_argument(
241
- "--old",
242
- dest="pOldStackSet",
243
- metavar="The name of the old stackset",
244
- help="This is the name of the old stackset, which manages the existing stack instances in the legacy accounts.",
245
- )
246
-
247
- # Target StackSet specification for migration destination
248
- local.add_argument(
249
- "--new",
250
- dest="pNewStackSet",
251
- metavar="The name of the new stackset",
252
- help="This is the name of the new stackset, which will manage the existing stack instances going forward.",
253
- )
254
-
255
- # Account filtering for selective migration operations
256
- local.add_argument(
257
- "-A",
258
- "--Account",
259
- dest="pAccountsToMove",
260
- default=None,
261
- nargs="*",
262
- metavar="Account Numbers",
263
- help="The account(s) to be moved from one stackset to another",
264
- )
265
-
266
- # Empty StackSet creation mode for template copying and validation
267
- local.add_argument(
268
- "--Empty",
269
- "--empty",
270
- dest="pEmpty",
271
- action="store_true",
272
- help="Whether to simply create an empty (but copied) new stackset from the 'old' stackset",
273
- )
274
-
275
- # Recovery mode operation for rollback and manual intervention scenarios
276
- local.add_argument(
277
- "--recovery", dest="pRecoveryFlag", action="store_true", help="Whether we should use the recovery file."
278
- )
279
-
280
- # Pre-migration drift detection for infrastructure validation
281
- local.add_argument(
282
- "--drift-check",
283
- dest="pDriftCheckFlag",
284
- action="store_true",
285
- help="Whether we should check for drift before moving instances",
286
- )
287
-
288
- # Parse and validate all command-line arguments
289
- return parser.my_parser.parse_args(args)
290
-
291
-
292
- def check_stack_set_drift_status(faws_acct: aws_acct_access, fStack_set_name: str, fOperation_id=None) -> dict:
293
- """
294
- Execute and monitor CloudFormation StackSet drift detection for pre-migration validation.
295
-
296
- Performs comprehensive drift detection analysis on CloudFormation StackSets to ensure
297
- infrastructure consistency before migration operations. Supports both drift detection
298
- initiation and operation monitoring with enterprise-grade error handling and parallel
299
- processing optimization for large-scale organizational environments.
300
-
301
- Args:
302
- faws_acct (aws_acct_access): Authenticated AWS account access object for API operations
303
- fStack_set_name (str): CloudFormation StackSet name for drift detection analysis
304
- fOperation_id (str, optional): Existing drift detection operation ID for monitoring.
305
- None triggers new drift detection initiation.
306
-
307
- Returns:
308
- dict: Comprehensive drift detection operation result containing:
309
- - OperationId: CloudFormation drift detection operation identifier
310
- - Success: Boolean indicating successful operation initiation or completion
311
- - ErrorMessage: Detailed error information for troubleshooting (on failure)
312
- - Drift detection status and statistics (when monitoring existing operation)
313
-
314
- Drift Detection Operations:
315
-
316
- **New Drift Detection Initiation (fOperation_id=None):**
317
- - CloudFormation detect_stack_set_drift API call with optimized preferences
318
- - Parallel region processing for performance optimization (RegionConcurrencyType: PARALLEL)
319
- - Failure tolerance configuration (10% FailureTolerancePercentage)
320
- - Maximum concurrency optimization (100% MaxConcurrentPercentage)
321
- - Operation ID capture for subsequent monitoring operations
322
-
323
- **Existing Operation Monitoring (fOperation_id provided):**
324
- - Continuous operation status polling using describe_stack_set_operation
325
- - Real-time drift detection progress monitoring and reporting
326
- - Comprehensive drift analysis including drifted, in-sync, and failed instances
327
- - Operation completion detection with detailed status reporting
328
-
329
- Enterprise Drift Detection Features:
330
- - Multi-region parallel processing for performance optimization
331
- - Configurable failure tolerance for resilient drift detection
332
- - Comprehensive error handling for various CloudFormation exceptions
333
- - Real-time operation monitoring with detailed progress reporting
334
- - Integration with pre-migration validation workflows
335
-
336
- Error Handling & Recovery:
337
- - InvalidOperationException: Handles invalid drift detection requests
338
- - StackSetNotFoundException: Validates StackSet existence before operations
339
- - OperationInProgressException: Detects and manages concurrent drift operations
340
- - Automatic operation ID extraction from error messages for recovery
341
-
342
- Drift Detection Status Analysis:
343
- - DriftStatus: IN_SYNC, DRIFTED indicating overall StackSet drift state
344
- - DriftDetectionStatus: COMPLETED, IN_PROGRESS, FAILED operation status
345
- - TotalStackInstancesCount: Complete inventory of StackSet instances
346
- - DriftedStackInstancesCount: Number of instances with configuration drift
347
- - InSyncStackInstancesCount: Number of instances matching expected configuration
348
- - FailedStackInstancesCount: Number of instances with drift detection failures
349
-
350
- Performance Optimizations:
351
- - Parallel region processing reducing drift detection time
352
- - Optimized failure tolerance balancing speed and reliability
353
- - Maximum concurrency configuration for large StackSet environments
354
- - Efficient operation polling patterns reducing API call overhead
355
-
356
- Enterprise Integration:
357
- - Integration with StackSet migration workflows for pre-migration validation
358
- - Comprehensive logging for operational visibility and troubleshooting
359
- - Error message preservation for enterprise monitoring and alerting
360
- - Operation ID management for long-running drift detection processes
361
-
362
- Security & Compliance:
363
- - AWS credential validation through authenticated account access
364
- - CloudFormation API permission validation for drift detection operations
365
- - Comprehensive audit logging for compliance and governance requirements
366
- - Error handling preventing sensitive information exposure
367
-
368
- Usage in Migration Context:
369
- - Pre-migration infrastructure consistency validation
370
- - Drift remediation requirement identification before StackSet migration
371
- - Risk assessment for migration operations on drifted infrastructure
372
- - Compliance validation for organizational governance requirements
373
- """
374
-
375
- import logging
376
-
377
- # Initialize CloudFormation client for drift detection operations
378
- client_cfn = faws_acct.session.client("cloudformation")
379
- return_response = dict()
380
- Sync_Has_Started = False
381
- time_waited = 0
382
-
383
- if fOperation_id is None:
384
- # Initiate new drift detection operation with enterprise-optimized preferences
385
- try:
386
- # Execute drift detection with parallel processing for performance optimization
387
- response = client_cfn.detect_stack_set_drift(
388
- StackSetName=fStack_set_name,
389
- OperationPreferences={
390
- "RegionConcurrencyType": "PARALLEL", # Parallel region processing for speed
391
- "FailureTolerancePercentage": 10, # 10% failure tolerance for resilience
392
- "MaxConcurrentPercentage": 100, # Maximum concurrency for performance
393
- },
394
- )
395
-
396
- # Capture operation ID for monitoring and return successful initiation
397
- fOperation_id = response["OperationId"]
398
- return_response = {"OperationId": fOperation_id, "Success": True}
399
-
400
- except client_cfn.exceptions.InvalidOperationException as myError:
401
- # Handle invalid drift detection operation requests
402
- logging.error(f"There's been an error: {myError}")
403
- return_response = {"ErrorMessage": myError, "Success": False}
404
-
405
- except client_cfn.exceptions.StackSetNotFoundException as myError:
406
- # Handle non-existent StackSet validation errors
407
- logging.error(f"There's been an error: {myError}")
408
- return_response = {"ErrorMessage": myError, "Success": False}
409
-
410
- except client_cfn.exceptions.OperationInProgressException as myError:
411
- # Handle concurrent drift detection operations with automatic recovery
412
- logging.error(f"There's a drift-detection process already running: {myError}")
413
-
414
- # Extract existing operation ID from error message for monitoring
415
- OperationId = myError.response["Error"]["Message"][myError.response["Error"]["Message"].rfind(":") + 2 :]
416
- return_response = {"OperationId": OperationId, "Success": True}
417
-
418
- return return_response
419
- else:
420
- # Monitor existing drift detection operation with comprehensive status analysis
421
- """
422
- The response we're going to get from this "describe" operation looks like this:
423
- {
424
- "StackSetOperation": {
425
- "OperationId": "4e23045a-xxxx-xxxx-xxxx-bad01ed6902a",
426
- "StackSetId": "Test4-IOT6:735b8599-xxxx-xxxx-xxxx-7bc78fe8b817",
427
- "Action": "DETECT_DRIFT",
428
- "Status": "SUCCEEDED",
429
- "OperationPreferences": {
430
- "RegionConcurrencyType": "PARALLEL",
431
- "RegionOrder": [],
432
- "FailureTolerancePercentage": 10,
433
- "MaxConcurrentPercentage": 100
434
- },
435
- "AdministrationRoleARN": "arn:aws:iam::517713657778:role/AWSCloudFormationStackSetAdministrationRole",
436
- "ExecutionRoleName": "AWSCloudFormationStackSetExecutionRole",
437
- "CreationTimestamp": "2022-09-19T16:55:44.358000+00:00",
438
- "EndTimestamp": "2022-09-19T16:59:32.138000+00:00",
439
- "StackSetDriftDetectionDetails": {
440
- "DriftStatus": "IN_SYNC",
441
- "DriftDetectionStatus": "COMPLETED",
442
- "LastDriftCheckTimestamp": "2022-09-19T16:59:00.324000+00:00",
443
- "TotalStackInstancesCount": 39,
444
- "DriftedStackInstancesCount": 0,
445
- "InSyncStackInstancesCount": 39,
446
- "InProgressStackInstancesCount": 0,
447
- "FailedStackInstancesCount": 0
448
- }
449
- }
450
- }
451
- """
452
- Finished = False
453
- while Finished is False:
454
- try:
455
- response = client_cfn.describe_stack_set_operation(
456
- StackSetName=fStack_set_name,
457
- OperationId=fOperation_id,
458
- )
459
- Start_Time = response["StackSetOperation"]["CreationTimestamp"]
460
- Operation_Status = response["StackSetOperation"]["Status"]
461
- if "StackSetDriftDetectionDetails" in response["StackSetOperation"].keys():
462
- Drift_Detection_Status = response["StackSetOperation"]["StackSetDriftDetectionDetails"][
463
- "DriftDetectionStatus"
464
- ]
465
- if (
466
- "LastDriftCheckTimestamp"
467
- in response["StackSetOperation"]["StackSetDriftDetectionDetails"].keys()
468
- ):
469
- Sync_Has_Started = True
470
- else:
471
- Sync_Has_Started = False
472
- if Operation_Status == "RUNNING" and Sync_Has_Started:
473
- # TODO: Give a decent status, Wait a little longer, and try again
474
- Last_Instances_Finished = response["StackSetOperation"]["StackSetDriftDetectionDetails"][
475
- "LastDriftCheckTimestamp"
476
- ]
477
- Check_Failed = response["StackSetOperation"]["StackSetDriftDetectionDetails"][
478
- "FailedStackInstancesCount"
479
- ]
480
- Total_Stack_Instances = response["StackSetOperation"]["StackSetDriftDetectionDetails"][
481
- "TotalStackInstancesCount"
482
- ]
483
- Drifted_Instances = response["StackSetOperation"]["StackSetDriftDetectionDetails"][
484
- "DriftedStackInstancesCount"
485
- ]
486
- In_Sync_Instances = response["StackSetOperation"]["StackSetDriftDetectionDetails"][
487
- "InSyncStackInstancesCount"
488
- ]
489
- Currently_Checking = response["StackSetOperation"]["StackSetDriftDetectionDetails"][
490
- "InProgressStackInstancesCount"
491
- ]
492
- Time_Taken = Last_Instances_Finished - Start_Time
493
- Checked_Instances = In_Sync_Instances + Drifted_Instances + Check_Failed
494
- Time_Left = (Time_Taken / Checked_Instances) * Currently_Checking
495
- print(
496
- f"{ERASE_LINE} It's taken {Time_Taken} to detect on {Checked_Instances} "
497
- f"instances, which means we probably have {Time_Left} left to go for {Currently_Checking} more stack instances",
498
- end="\r",
499
- )
500
- logging.info(f"{response}")
501
- return_response = {
502
- "OperationStatus": Operation_Status,
503
- "StartTime": Start_Time,
504
- "EndTime": None,
505
- "DriftedInstances": Drifted_Instances,
506
- "FailedInstances": Check_Failed,
507
- "StackInstancesChecked": Total_Stack_Instances,
508
- "Success": False,
509
- }
510
- Finished = False
511
- elif Operation_Status == "RUNNING" and not Sync_Has_Started:
512
- # TODO: Give a decent status, Wait a little longer, and try again
513
- time_waited += sleep_interval
514
- print(
515
- f"{ERASE_LINE} We're still waiting for the Sync to start... Sleeping for {time_waited} seconds",
516
- end="\r",
517
- )
518
- Finished = False
519
- elif Operation_Status == "SUCCEEDED":
520
- End_Time = response["StackSetOperation"]["EndTimestamp"]
521
- Total_Stack_Instances = response["StackSetOperation"]["StackSetDriftDetectionDetails"][
522
- "TotalStackInstancesCount"
523
- ]
524
- return_response.update(
525
- {
526
- "OperationStatus": Operation_Status,
527
- "StartTime": Start_Time,
528
- "EndTime": End_Time,
529
- "StackInstancesChecked": Total_Stack_Instances,
530
- "Success": True,
531
- }
532
- )
533
- Finished = True
534
- except client_cfn.exceptions.StackSetNotFoundException as myError:
535
- logging.error(f"There's been an error: {myError}")
536
- return_response = {"ErrorMessage": myError, "Success": False}
537
- Finished = True
538
- except client_cfn.exceptions.OperationNotFoundException as myError:
539
- logging.error(f"There's been an error: {myError}")
540
- return_response = {"ErrorMessage": myError, "Success": False}
541
- Finished = True
542
- logging.info(f"Sleeping for {sleep_interval} seconds")
543
- sleep(sleep_interval)
544
- return return_response
545
-
546
-
547
- def check_stack_set_status(faws_acct: aws_acct_access, fStack_set_name: str, fOperationId: str = None) -> dict:
548
- """
549
- response = client.describe_stack_set_operation(
550
- StackSetName='string',
551
- OperationId='string',
552
- CallAs='SELF'|'DELEGATED_ADMIN'
553
- )
554
- """
555
- import logging
556
-
557
- client_cfn = faws_acct.session.client("cloudformation")
558
- return_response = dict()
559
- # If the calling process couldn't supply the OpId, then we have to find it, based on the name of the stackset
560
- if fOperationId is None:
561
- # If there is no OperationId, they've called us after creating the stack-set itself,
562
- # so we need to check the status of the stack-set creation, and not the operations that happen to the stackset
563
- try:
564
- response = client_cfn.describe_stack_set(StackSetName=fStack_set_name, CallAs="SELF")["StackSet"]
565
- return_response["StackSetStatus"] = response["Status"]
566
- return_response["Success"] = True
567
- logging.info(f"Stackset: {fStack_set_name} | Status: {return_response['StackSetStatus']}")
568
- return return_response
569
- except client_cfn.exceptions.StackSetNotFoundException as myError:
570
- logging.error(f"Stack Set {fStack_set_name} Not Found: {myError}")
571
- return_response["Success"] = False
572
- return return_response
573
- try:
574
- response = client_cfn.describe_stack_set_operation(
575
- StackSetName=fStack_set_name, OperationId=fOperationId, CallAs="SELF"
576
- )["StackSetOperation"]
577
- return_response["StackSetStatus"] = response["Status"]
578
- return_response["Success"] = True
579
- except client_cfn.exceptions.StackSetNotFoundException as myError:
580
- print(f"StackSet Not Found: {myError}")
581
- return_response["Success"] = False
582
- except client_cfn.exceptions.OperationNotFoundException as myError:
583
- print(f"Operation Not Found: {myError}")
584
- return_response["Success"] = False
585
- return return_response
586
-
587
-
588
- def find_if_stack_set_exists(faws_acct: aws_acct_access, fStack_set_name: str) -> dict:
589
- """
590
- response = client.describe_stack_set(
591
- StackSetName='string',
592
- CallAs='SELF'|'DELEGATED_ADMIN'
593
- )
594
- """
595
- import logging
596
-
597
- logging.info(f"Verifying whether the stackset {fStack_set_name} in account {faws_acct.acct_number} exists")
598
- client_cfn = faws_acct.session.client("cloudformation")
599
- return_response = dict()
600
- try:
601
- response = client_cfn.describe_stack_set(StackSetName=fStack_set_name, CallAs="SELF")["StackSet"]
602
- return_response = {"Payload": response, "Success": True}
603
- except client_cfn.exceptions.StackSetNotFoundException as myError:
604
- logging.info(f"StackSet {fStack_set_name} not found in this account.")
605
- logging.debug(f"{myError}")
606
- return_response["Success"] = False
607
- return return_response
608
-
609
-
610
- def get_template_body_and_parameters(faws_acct: aws_acct_access, fExisting_stack_set_name: str) -> dict:
611
- """
612
- @param faws_acct: object
613
- @param fExisting_stack_set_name: The existing stackset name
614
- @return: return_response:
615
- 'stack_set_info' = stack_set_info
616
- 'Success' = True | False
617
-
618
- describe_stack_set output:
619
- {
620
- "StackSet": {
621
- "StackSetName": "AWS-Landing-Zone-Baseline-DemoRoles",
622
- "StackSetId": "AWS-Landing-Zone-Baseline-DemoRoles:872bab58-25b9-4785-8973-e7920cbe46d3",
623
- "Status": "ACTIVE",
624
- "TemplateBody":
625
- "AWSTemplateFormatVersion: "2010-09-09"
626
- "Description": Sample of a new role with the use of a managed policy, and a parameterized trust policy.
627
- Parameters:
628
- AdministratorAccountId:
629
- Type: String
630
- Default: "287201118218"
631
- Description: AWS Account Id of the administrator account.
632
- Resources:
633
- SampleRole:
634
- Type: "AWS::IAM::Role"
635
- Properties:
636
- RoleName: DemoRole
637
- Path: /
638
- AssumeRolePolicyDocument:
639
- Version: "2012-10-17"
640
- Statement:
641
- - Effect: "Allow"
642
- Principal:
643
- AWS:
644
- - !Sub 'arn:aws:iam::${AdministratorAccountId}:role/Owner'
645
- - !Sub 'arn:aws:iam::${AdministratorAccountId}:user/Paul'
646
- Action:
647
- - "sts:AssumeRole"
648
- ManagedPolicyArns:
649
- - arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
650
- "Parameters": [
651
- {
652
- "ParameterKey": "AdministratorAccountId",
653
- "ParameterValue": "517713657778",
654
- "UsePreviousValue": false
655
- }
656
- ],
657
- "Capabilities": [
658
- "CAPABILITY_NAMED_IAM"
659
- ],
660
- "Tags": [
661
- {
662
- "Key": "AWS_Solutions",
663
- "Value": "LandingZoneStackSet"
664
- }
665
- ],
666
- "StackSetARN": "arn:aws:cloudformation:us-east-1:517713657778:stackset/AWS-Landing-Zone-Baseline-DemoRoles:872bab58-25b9-4785-8973-e7920cbe46d3",
667
- "AdministrationRoleARN": "arn:aws:iam::517713657778:role/AWSCloudFormationStackSetAdministrationRole",
668
- "ExecutionRoleName": "AWSCloudFormationStackSetExecutionRole",
669
- "StackSetDriftDetectionDetails": {
670
- "DriftStatus": "NOT_CHECKED",
671
- "TotalStackInstancesCount": 0,
672
- "DriftedStackInstancesCount": 0,
673
- "InSyncStackInstancesCount": 0,
674
- "InProgressStackInstancesCount": 0,
675
- "FailedStackInstancesCount": 0
676
- },
677
- "OrganizationalUnitIds": []
678
- }
679
- }
680
- """
681
- import logging
682
-
683
- logging.info(f"Connecting to account {faws_acct.acct_number} to get info about stackset {fExisting_stack_set_name}")
684
- client_cfn = faws_acct.session.client("cloudformation")
685
- return_response = {"Success": False}
686
- try:
687
- stack_set_info = client_cfn.describe_stack_set(StackSetName=fExisting_stack_set_name)["StackSet"]
688
- return_response["stack_set_info"] = stack_set_info
689
- return_response["Success"] = True
690
- except client_cfn.exceptions.StackSetNotFoundException as myError:
691
- ErrorMessage = f"{fExisting_stack_set_name} doesn't seem to exist. Please check the spelling"
692
- print(f"{ErrorMessage}: {myError}")
693
- return_response["Success"] = False
694
- return return_response
695
-
696
-
697
- def compare_stacksets(faws_acct: aws_acct_access, fExisting_stack_set_name: str, fNew_stack_set_name: str) -> dict:
698
- """
699
- The idea here is to compare the templates and parameters of the stacksets, to ensure that the import will succeed.
700
- """
701
-
702
- return_response = {
703
- "Success": False,
704
- "TemplateComparison": False,
705
- "CapabilitiesComparison": False,
706
- "ParametersComparison": False,
707
- "TagsComparison": False,
708
- "DescriptionComparison": False,
709
- "ExecutionRoleComparison": False,
710
- }
711
- Stack_Set_Info_old = get_template_body_and_parameters(faws_acct, fExisting_stack_set_name)
712
- Stack_Set_Info_new = get_template_body_and_parameters(faws_acct, fNew_stack_set_name)
713
- # Time to compare - only the Template Body, Parameters, and Capabilities are critical to making sure the stackset works.
714
- return_response["TemplateComparison"] = (
715
- Stack_Set_Info_old["stack_set_info"]["TemplateBody"] == Stack_Set_Info_new["stack_set_info"]["TemplateBody"]
716
- )
717
- return_response["CapabilitiesComparison"] = (
718
- Stack_Set_Info_old["stack_set_info"]["Capabilities"] == Stack_Set_Info_new["stack_set_info"]["Capabilities"]
719
- )
720
- return_response["ParametersComparison"] = (
721
- Stack_Set_Info_old["stack_set_info"]["Parameters"] == Stack_Set_Info_new["stack_set_info"]["Parameters"]
722
- )
723
- return_response["TagsComparison"] = (
724
- Stack_Set_Info_old["stack_set_info"]["Tags"] == Stack_Set_Info_new["stack_set_info"]["Tags"]
725
- )
726
- try:
727
- return_response["DescriptionComparison"] = (
728
- Stack_Set_Info_old["stack_set_info"]["Description"] == Stack_Set_Info_new["stack_set_info"]["Description"]
729
- )
730
- except KeyError as myError:
731
- # This checks for the presence of the Description key before using it as a key for checking, to resolve an error when it's not there.
732
- if (
733
- "Description" in Stack_Set_Info_new["stack_set_info"].keys()
734
- and Stack_Set_Info_new["stack_set_info"]["Description"] == Default_Description_Text
735
- ):
736
- print(
737
- f"There was no description in the old StackSet, and creating a new one in this way requires one, so we've populated it with a default Description -- '{Default_Description_Text}'\n"
738
- f"This won't cause a problem with the migration, just something to note..."
739
- )
740
- return_response["DescriptionComparison"] = True
741
- else:
742
- logging.error(f"Description key isn't available... continuing anyway...")
743
- return_response["DescriptionComparison"] = True
744
- return_response["ExecutionRoleComparison"] = (
745
- Stack_Set_Info_old["stack_set_info"]["ExecutionRoleName"]
746
- == Stack_Set_Info_new["stack_set_info"]["ExecutionRoleName"]
747
- )
748
-
749
- if (
750
- return_response["TemplateComparison"]
751
- and return_response["CapabilitiesComparison"]
752
- and return_response["ParametersComparison"]
753
- ):
754
- return_response["Success"] = True
755
- return return_response
756
-
757
-
758
- def get_stack_ids_from_existing_stack_set(
759
- faws_acct: aws_acct_access, fExisting_stack_set_name: str, fAccountsToMove: list = None
760
- ) -> dict:
761
- """
762
- response = client.list_stack_instances(
763
- StackSetName='string',
764
- NextToken='string',
765
- MaxResults=123,
766
- Filters=[
767
- {
768
- 'Name': 'DETAILED_STATUS',
769
- 'Values': 'string'
770
- },
771
- ],
772
- StackInstanceAccount='string',
773
- StackInstanceRegion='string',
774
- CallAs='SELF'|'DELEGATED_ADMIN'
775
- )
776
- """
777
- import logging
778
-
779
- client_cfn = faws_acct.session.client("cloudformation")
780
- return_response = dict()
781
- try:
782
- response = client_cfn.list_stack_instances(StackSetName=fExisting_stack_set_name, CallAs="SELF")
783
- return_response["Stack_instances"] = response["Summaries"]
784
- while "NextToken" in response.keys():
785
- response = client_cfn.list_stack_instances(
786
- StackSetName=fExisting_stack_set_name, CallAs="SELF", NextToken=response["NextToken"]
787
- )
788
- return_response["Stack_instances"].extend(response["Summaries"])
789
- return_response["Success"] = True
790
- except client_cfn.exceptions.StackSetNotFoundException as myError:
791
- print(myError)
792
- return_response["Success"] = False
793
- if fAccountsToMove is None:
794
- logging.debug(f"No Account was specified, so all stack-instance-ids are being returned")
795
- pass
796
- else:
797
- # TODO: Replace this below with a "filter(lambda)" syntax
798
- return_response["Stack_instances"] = [
799
- stacksetinfo
800
- for stacksetinfo in return_response["Stack_instances"]
801
- if stacksetinfo["Account"] in fAccountsToMove
802
- ]
803
- logging.debug(
804
- f"Account {fAccountsToMove} was specified, so only the {len(return_response['Stack_instances'])} "
805
- f"stack-instance-ids matching th{'ose accounts' if len(fAccountsToMove) == 1 else 'at account'} are being returned"
806
- )
807
- return return_response
808
-
809
-
810
- def write_info_to_file(faws_acct: aws_acct_access, fstack_ids) -> dict:
811
- """
812
- Docs go here
813
- """
814
- import logging
815
-
816
- import simplejson as json
817
-
818
- # Create a dictionary that will represent everything we're trying to do
819
- try:
820
- StackSetsInfo = {
821
- "ProfileUsed": pProfile,
822
- "ManagementAccount": faws_acct.MgmtAccount,
823
- "Region": pRegion,
824
- "AccountNumber": faws_acct.acct_number,
825
- "AccountsToMove": pAccountsToMove,
826
- "OldStackSetName": pOldStackSet,
827
- "NewStackSetName": pNewStackSet,
828
- "stack_ids": fstack_ids,
829
- }
830
- logging.info(f"Writing data to the file {InfoFilename}")
831
- logging.debug(f"Here's the data we're writing: {StackSetsInfo}")
832
- file_data = json.dumps(StackSetsInfo, sort_keys=True, indent=4 * " ", default=str)
833
- with open(InfoFilename, "w") as out:
834
- print(file_data, file=out)
835
- return_response = {"Success": True}
836
- return return_response
837
- except Exception as myError:
838
- error_message = "There was a problem. Not sure... "
839
- logging.error(error_message)
840
- return_response = {"Success": False, "ErrorMessage": myError}
841
- return return_response
842
-
843
-
844
- def read_stack_info_from_file() -> dict:
845
- """
846
- Docs go here
847
- """
848
- import logging
849
-
850
- import simplejson as json
851
-
852
- try:
853
- with open(InfoFilename) as input_file:
854
- my_input_file = json.load(input_file)
855
- return_response = {"Success": True, "Payload": my_input_file}
856
- return return_response
857
- except Exception as myError:
858
- error_message = "There was a problem. Not sure... "
859
- logging.error(error_message)
860
- return_response = {"Success": False, "ErrorMessage": myError}
861
- return return_response
862
-
863
-
864
- def create_stack_set_with_body_and_parameters(
865
- faws_acct: aws_acct_access, fNew_stack_set_name: str, fStack_set_info: dict
866
- ) -> dict:
867
- """
868
- response = client.create_stack_set(
869
- StackSetName='string',
870
- Description='string',
871
- TemplateBody='string',
872
- TemplateURL='string',
873
- StackId='string',
874
- Parameters=[
875
- {
876
- 'ParameterKey': 'string',
877
- 'ParameterValue': 'string',
878
- 'UsePreviousValue': True|False,
879
- 'ResolvedValue': 'string'
880
- },
881
- ],
882
- Capabilities=[
883
- 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM'|'CAPABILITY_AUTO_EXPAND',
884
- ],
885
- Tags=[
886
- {
887
- 'Key': 'string',
888
- 'Value': 'string'
889
- },
890
- ],
891
- AdministrationRoleARN='string',
892
- ExecutionRoleName='string',
893
- PermissionModel='SERVICE_MANAGED'|'SELF_MANAGED',
894
- AutoDeployment={
895
- 'Enabled': True|False,
896
- 'RetainStacksOnAccountRemoval': True|False
897
- },
898
- CallAs='SELF'|'DELEGATED_ADMIN',
899
- ClientRequestToken='string'
900
- )
901
- """
902
- import logging
903
-
904
- logging.info(
905
- f"Creating a new stackset name {fNew_stack_set_name} in account {faws_acct.acct_number} with a template body, parameters, capabilities and tagging from this:"
906
- )
907
- logging.info(f"{fStack_set_info}")
908
- client_cfn = faws_acct.session.client("cloudformation")
909
- return_response = dict()
910
- # TODO: We should consider changing the template body to a template url to accommodate really big templates,
911
- # That would mean we need to have an S3 bucket to put the template, which we don't necessarily have at this point, so it's a bigger deal than you might immediately think.
912
- # However, this script doesn't check the size of the template ahead of time, so what happens if we try to create a new stackset template when the old one is too big?
913
-
914
- # TODO: This only creates a new stackset as a "Self-Managed" stackset.
915
- # We need to catch the scenario, when the old stackset was "Service-Managed" and decide whether we create the new one that way (which may be difficult, with automatic deployments, etc),
916
- # Or tell the user that we cannot create a new service-managed stackset, and do they want to create it as a self-managed instead?
917
- try:
918
- response = client_cfn.create_stack_set(
919
- StackSetName=fNew_stack_set_name,
920
- TemplateBody=fStack_set_info["TemplateBody"],
921
- Description=fStack_set_info["Description"]
922
- if "Description" in fStack_set_info.keys()
923
- else Default_Description_Text,
924
- Parameters=fStack_set_info["Parameters"],
925
- Capabilities=fStack_set_info["Capabilities"],
926
- Tags=fStack_set_info["Tags"],
927
- )
928
- return_response["StackSetId"] = response["StackSetId"]
929
- return_response["Success"] = True
930
- # There is currently no waiter to use for this operation...
931
- except (
932
- client_cfn.exceptions.NameAlreadyExistsException,
933
- client_cfn.exceptions.CreatedButModifiedException,
934
- client_cfn.exceptions.LimitExceededException,
935
- ) as myError:
936
- logging.error(f"Operation Failed: {myError}")
937
- return_response["Success"] = False
938
- return_response["Error_Message"] = myError.response["Error"]["Message"]
939
- return return_response
940
-
941
-
942
- def disconnect_stack_instances(faws_acct: aws_acct_access, fStack_instances: dict, fOldStackSet: str) -> dict:
943
- """
944
- response = client.delete_stack_instances(
945
- StackSetName='string',
946
- Accounts=[
947
- 'string',
948
- ],
949
- DeploymentTargets={
950
- 'Accounts': [
951
- 'string',
952
- ],
953
- 'AccountsUrl': 'string',
954
- 'OrganizationalUnitIds': [
955
- 'string',
956
- ]
957
- },
958
- Regions=[
959
- 'string',
960
- ],
961
- OperationPreferences={
962
- 'RegionConcurrencyType': 'SEQUENTIAL'|'PARALLEL',
963
- 'RegionOrder': [
964
- 'string',
965
- ],
966
- 'FailureToleranceCount': 123,
967
- 'FailureTolerancePercentage': 123,
968
- 'MaxConcurrentCount': 123,
969
- 'MaxConcurrentPercentage': 123
970
- },
971
- RetainStacks=True|False,
972
- OperationId='string',
973
- CallAs='SELF'|'DELEGATED_ADMIN'
974
- )
975
-
976
- """
977
- import logging
978
-
979
- logging.info(f"Disassociating stacks from {fOldStackSet}")
980
- return_response = dict()
981
- if len(fStack_instances["Stack_instances"]) == 0:
982
- return_response = {
983
- "Success": False,
984
- "ErrorMessage": f"Stackset {fOldStackSet} has no matching instances",
985
- "OperationId": None,
986
- }
987
- return return_response
988
- client_cfn = faws_acct.session.client("cloudformation")
989
- regions = set()
990
- accounts = set()
991
- for item in fStack_instances["Stack_instances"]:
992
- regions.add(item["Region"])
993
- accounts.add(item["Account"])
994
- try:
995
- response = client_cfn.delete_stack_instances(
996
- StackSetName=fOldStackSet,
997
- Accounts=list(accounts),
998
- Regions=list(regions),
999
- OperationPreferences={
1000
- "RegionConcurrencyType": "PARALLEL",
1001
- "FailureTolerancePercentage": 10,
1002
- "MaxConcurrentPercentage": 100,
1003
- },
1004
- RetainStacks=True,
1005
- CallAs="SELF",
1006
- )
1007
- return_response["OperationId"] = response["OperationId"]
1008
- return_response["Success"] = True
1009
- except client_cfn.exceptions.StackSetNotFoundException as myError:
1010
- logging.error(f"Operation Failed: {myError}")
1011
- return_response["Success"] = False
1012
- except client_cfn.exceptions.OperationInProgressException as myError:
1013
- logging.error(f"Operation Failed: {myError}")
1014
- return_response["Success"] = False
1015
- except client_cfn.exceptions.OperationIdAlreadyExistsException as myError:
1016
- logging.error(f"Operation Failed: {myError}")
1017
- return_response["Success"] = False
1018
- except client_cfn.exceptions.StaleRequestException as myError:
1019
- logging.error(f"Operation Failed: {myError}")
1020
- return_response["Success"] = False
1021
- except client_cfn.exceptions.InvalidOperationException as myError:
1022
- logging.error(f"Operation Failed: {myError}")
1023
- return_response["Success"] = False
1024
- stack_instance_operation_waiter = client_cfn.get_waiter("stack_delete_complete")
1025
- try:
1026
- stack_instance_operation_waiter.wait(StackName=fOldStackSet)
1027
- return_response["Success"] = True
1028
- except WaiterError as myError:
1029
- if "Max attempts exceeded" in myError:
1030
- logging.error(f"Import didn't complete within 600 seconds")
1031
- logging.error(myError)
1032
- return_response["Success"] = False
1033
- return return_response
1034
-
1035
-
1036
- def create_change_set_for_new_stack():
1037
- """
1038
- Do we need to do this?
1039
- """
1040
-
1041
-
1042
- def populate_new_stack_with_existing_stack_instances(
1043
- faws_acct: aws_acct_access, fStack_instance_info: list, fNew_stack_name: str
1044
- ) -> dict:
1045
- """
1046
- response = client.import_stacks_to_stack_set(
1047
- StackSetName='string',
1048
- StackIds=[
1049
- 'string',
1050
- ],
1051
- OperationPreferences={
1052
- 'RegionConcurrencyType': 'SEQUENTIAL'|'PARALLEL',
1053
- 'RegionOrder': [
1054
- 'string',
1055
- ],
1056
- 'FailureToleranceCount': 123,
1057
- 'FailureTolerancePercentage': 123,
1058
- 'MaxConcurrentCount': 123,
1059
- 'MaxConcurrentPercentage': 123
1060
- },
1061
- OperationId='string',
1062
- CallAs='SELF'|'DELEGATED_ADMIN'
1063
- )
1064
-
1065
- The Operation Id as the response is really important, because that's how we determine whether teh operation is done (or a success),
1066
- so that we can add 10 more stacks... This can take a long time for a lot of instances...
1067
- """
1068
- import logging
1069
-
1070
- stack_instance_ids = [
1071
- stack_instance["StackId"]
1072
- for stack_instance in fStack_instance_info
1073
- if stack_instance["Status"] in ["CURRENT", "OUTDATED", "CREATE_COMPLETE", "UPDATE_COMPLETE"]
1074
- ]
1075
- logging.info(
1076
- f"Populating new stackset {fNew_stack_name} in account {faws_acct.acct_number} with stack_ids: {stack_instance_ids}"
1077
- )
1078
- client_cfn = faws_acct.session.client("cloudformation")
1079
- return_response = dict()
1080
- try:
1081
- response = client_cfn.import_stacks_to_stack_set(
1082
- StackSetName=fNew_stack_name,
1083
- StackIds=stack_instance_ids,
1084
- OperationPreferences={
1085
- "RegionConcurrencyType": "PARALLEL",
1086
- "FailureTolerancePercentage": 0,
1087
- "MaxConcurrentPercentage": 100,
1088
- },
1089
- CallAs="SELF",
1090
- )
1091
- return_response["OperationId"] = response["OperationId"]
1092
- return_response["Success"] = True
1093
- except client_cfn.exceptions.LimitExceededException as myError:
1094
- logging.error(f"Limit Exceeded: {myError}")
1095
- return_response["Success"] = False
1096
- return_response["ErrorMessage"] = myError
1097
- except client_cfn.exceptions.StackSetNotFoundException as myError:
1098
- logging.error(f"Stack Set Not Found: {myError}")
1099
- return_response["Success"] = False
1100
- return_response["ErrorMessage"] = myError
1101
- except client_cfn.exceptions.InvalidOperationException as myError:
1102
- logging.error(f"Invalid Operation: {myError}")
1103
- return_response["Success"] = False
1104
- return_response["ErrorMessage"] = myError
1105
- except client_cfn.exceptions.OperationInProgressException as myError:
1106
- logging.error(f"Operation is already in progress: {myError}")
1107
- return_response["Success"] = False
1108
- return_response["ErrorMessage"] = myError
1109
- except client_cfn.exceptions.StackNotFoundException as myError:
1110
- logging.error(f"Stack Not Found: {myError}")
1111
- return_response["Success"] = False
1112
- return_response["ErrorMessage"] = myError
1113
- except client_cfn.exceptions.StaleRequestException as myError:
1114
- logging.error(f"Stale Request: {myError}")
1115
- return_response["Success"] = False
1116
- return_response["ErrorMessage"] = myError
1117
- except ClientError as myError:
1118
- logging.error(f"Client Error: {myError}")
1119
- return_response["Success"] = False
1120
- return_response["ErrorMessage"] = myError
1121
- return return_response
1122
-
1123
-
1124
- ##################
1125
- # Main
1126
- ##################
1127
- if __name__ == "__main__":
1128
- args = parse_args(sys.argv[1:])
1129
- pProfile = args.Profile
1130
- pRegion = args.Region
1131
- pForce = args.Confirm
1132
- pTiming = args.Time
1133
- verbose = args.loglevel
1134
- pRecoveryFlag = args.pRecoveryFlag
1135
- pDriftCheck = args.pDriftCheckFlag
1136
- # version = args.Version
1137
- pOldStackSet = args.pOldStackSet
1138
- pNewStackSet = args.pNewStackSet
1139
- pAccountsToMove = args.pAccountsToMove
1140
- pEmpty = args.pEmpty
1141
- # Logging Settings
1142
- # Set Log Level
1143
- logging.basicConfig(level=verbose, format="[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
1144
- logging.getLogger("boto3").setLevel(logging.CRITICAL)
1145
- logging.getLogger("botocore").setLevel(logging.CRITICAL)
1146
- logging.getLogger("s3transfer").setLevel(logging.CRITICAL)
1147
- logging.getLogger("urllib3").setLevel(logging.CRITICAL)
1148
-
1149
- ERASE_LINE = "\x1b[2K"
1150
- # The time between checks to see if the stackset instances have been created, or imported...
1151
- sleep_interval = 5
1152
- begin_time = time()
1153
- Default_Description_Text = "This is a default description"
1154
- # Currently, this is a hard-stop at 10, but I made it a variable in case they up the limit
1155
- StackInstancesImportedAtOnce = 10
1156
- stack_ids = dict()
1157
-
1158
- aws_acct = aws_acct_access(pProfile)
1159
- datetime_extension = datetime.now().strftime("%Y%m%d-%H%M")
1160
- InfoFilename = f"{pOldStackSet}-{pNewStackSet}-{aws_acct.acct_number}-{pRegion}.{datetime_extension}"
1161
- Use_recovery_file = False
1162
-
1163
- if pDriftCheck:
1164
- drift_check_response = check_stack_set_drift_status(aws_acct, pOldStackSet)
1165
- print(drift_check_response)
1166
- print(f"Kicked off Drift Sync... now we'll wait {sleep_interval} seconds before checking on the process...")
1167
- sleep(sleep_interval)
1168
- if drift_check_response["Success"]:
1169
- drift_check_response2 = check_stack_set_drift_status(
1170
- aws_acct, pOldStackSet, drift_check_response["OperationId"]
1171
- )
1172
- Total_Stacksets = drift_check_response2["StackInstancesChecked"]
1173
- Drifted_Stacksets = (
1174
- drift_check_response2["DriftedInstances"]
1175
- if "DriftedInstances" in drift_check_response2.keys()
1176
- else None
1177
- )
1178
- Failed_Stacksets = drift_check_response2["FailedInstances"]
1179
- Duration = drift_check_response2["EndTime"] - drift_check_response2["StartTime"]
1180
- print()
1181
- print(
1182
- f"We're done checking the drift status of the old StackSet\n"
1183
- f"We found {Total_Stacksets} Total Stacksets\n"
1184
- f"{Drifted_Stacksets} have drifted, while\n"
1185
- f"{Failed_Stacksets} failed to be checked\n"
1186
- f"This process took {Duration} seconds to run"
1187
- )
1188
- logging.info(drift_check_response2)
1189
- print()
1190
- sys.exit("Exiting...")
1191
-
1192
- if exists(InfoFilename) and pRecoveryFlag:
1193
- print(
1194
- f"You requested to use the recovery file {InfoFilename}, so we'll use that to pick up from where we left off"
1195
- )
1196
- Use_recovery_file = True
1197
- elif pRecoveryFlag:
1198
- print(
1199
- f"You requested to use the Recovery file, but we couldn't find one named {InfoFilename}, so we're exiting\n"
1200
- f"Please supply the proper StackSet, Region and Profile parameters so we can find the recovery file.",
1201
- file=sys.stderr,
1202
- )
1203
- sys.exit(5)
1204
- elif exists(InfoFilename):
1205
- print(f"There exists a recovery file for the parameters you've supplied, named {InfoFilename}\n")
1206
- Use_recovery_file = input(f"Do you want to use this file? (y/n): ") in ["y", "Y"]
1207
- if not Use_recovery_file:
1208
- print(
1209
- f"If you don't want to use that file, please change the filename, and re-run this script, to avoid over-writing it.",
1210
- file=sys.stderr,
1211
- )
1212
- sys.exit(6)
1213
-
1214
- if Use_recovery_file:
1215
- fileinput = read_stack_info_from_file()
1216
- AccountNumber = fileinput["Payload"]["AccountNumber"]
1217
- if not AccountNumber == aws_acct.acct_number:
1218
- print(
1219
- f"You're running this script referencing a different account than the one used last when the recovery file {InfoFilename} was created.\n"
1220
- f"Please make sure you finish that last task before starting a new one.\n",
1221
- file=sys.stderr,
1222
- )
1223
- sys.exit(4)
1224
- pAccountsToMove = fileinput["Payload"]["AccountsToMove"]
1225
- pOldStackSet = fileinput["Payload"]["OldStackSetName"]
1226
- pNewStackSet = fileinput["Payload"]["NewStackSetName"]
1227
- pRegion = fileinput["Payload"]["Region"]
1228
- stack_ids = fileinput["Payload"]["stack_ids"]
1229
-
1230
- # The following is just letting the user know what we're going to do in this script.
1231
- # Since this script is by nature intrusive, we want the user to confirm everything before they continue.
1232
- if pAccountsToMove is None:
1233
- logging.info(
1234
- f"Successfully connected to account {aws_acct.acct_number} to move stack instances "
1235
- f"from {pOldStackSet} to {pNewStackSet}"
1236
- )
1237
- else:
1238
- logging.info(
1239
- f"Connecting to account {aws_acct.acct_number} to move instances for accounts {pAccountsToMove}"
1240
- f" from {pOldStackSet} to {pNewStackSet}"
1241
- )
1242
- # Check to see if the new StackSet already exists, or we need to create it.
1243
- if find_if_stack_set_exists(aws_acct, pNewStackSet)["Success"]:
1244
- print(
1245
- f"{Fore.GREEN}The 'New' Stackset {pNewStackSet} exists within the account {aws_acct.acct_number}{Fore.RESET}"
1246
- )
1247
- NewStackSetExists = True
1248
- else:
1249
- print(
1250
- f"{Fore.RED}The 'New' Stackset {pNewStackSet} does not exist within the account {aws_acct.acct_number}{Fore.RESET}"
1251
- )
1252
- NewStackSetExists = False
1253
- # Check to see if the old StackSet exists, as they may have typed something wrong - or the recovery file was never deleted.
1254
- if find_if_stack_set_exists(aws_acct, pOldStackSet)["Success"]:
1255
- print(
1256
- f"{Fore.GREEN}The 'Old' Stackset {pOldStackSet} exists within the account {aws_acct.acct_number}{Fore.RESET}"
1257
- )
1258
- OldStackSetExists = True
1259
- else:
1260
- print(
1261
- f"{Fore.RED}The 'Old' Stackset {pOldStackSet} does not exist within the account {aws_acct.acct_number}{Fore.RESET}"
1262
- )
1263
- OldStackSetExists = False
1264
-
1265
- CompareTemplates = {"Success": False}
1266
- if OldStackSetExists and NewStackSetExists:
1267
- CompareTemplates = compare_stacksets(aws_acct, pOldStackSet, pNewStackSet)
1268
- if OldStackSetExists and not NewStackSetExists:
1269
- print()
1270
- print(
1271
- f"It looks like the new stack-set doesn't yet have a template assigned to it.\n"
1272
- f"We can simply copy over the template from the source stackset and copy to the new stackset.\n"
1273
- f"Please answer Y to the prompt below, if you're ok with that."
1274
- )
1275
- print()
1276
- elif not CompareTemplates["Success"]:
1277
- print()
1278
- print(
1279
- f"{Fore.RED}Ok - there's a problem here. The templates or parameters or capabilities in the two stacksets you provided don't match{Fore.RESET}\n"
1280
- f"It might be a very bad idea to try to import these stacksets, if the templates or other critical components don't match.\n"
1281
- f"I'd suggest strongly that you answer 'N' to the next prompt... "
1282
- )
1283
- print()
1284
- elif CompareTemplates["Success"] and not (
1285
- CompareTemplates["TagsComparison"]
1286
- and CompareTemplates["DescriptionComparison"]
1287
- and CompareTemplates["ExecutionRoleComparison"]
1288
- ):
1289
- print()
1290
- print(
1291
- f"{Fore.CYAN}Ok - there {Style.BRIGHT}might{Style.NORMAL} be a problem here. While the templates, parameters and capabilities in the two stacksets you provided match\n"
1292
- f"Either the Description, the Tags, or the ExecutionRole is different between the two stacksets.\n"
1293
- f"I'd suggest that you answer 'N' to the next prompt, and then investigate the differences\n"
1294
- f"No changes were made yet - so you can always run this script again.{Fore.RESET}"
1295
- )
1296
- print()
1297
-
1298
- # Ignore whether or not the recovery file exists, since if it does - it's just updating the variables needed for this run.
1299
- # We shouldn't be doing much of anything differently, based on whether the recovery file exists.
1300
- if OldStackSetExists and pEmpty:
1301
- print(
1302
- f"You've asked to create an empty stackset called {pNewStackSet} from the existing stackset {pOldStackSet}"
1303
- )
1304
- print(
1305
- f"You specified accounts to move, but we're not doing that, since you asked for this stackset to be created empty."
1306
- ) if pAccountsToMove is not None else ""
1307
- """ Create new stackset from old stackset """
1308
- Stack_Set_Info = get_template_body_and_parameters(aws_acct, pOldStackSet)
1309
- # Creates the new stack
1310
- NewStackSetId = create_stack_set_with_body_and_parameters(
1311
- aws_acct, pNewStackSet, Stack_Set_Info["stack_set_info"]
1312
- )
1313
- logging.warning(f"Waiting for new stackset {pNewStackSet} to be created")
1314
- sleep(sleep_interval)
1315
- # Checks on the new stack creation
1316
- NewStackSetStatus = check_stack_set_status(aws_acct, pNewStackSet)
1317
- intervals_waited = 1
1318
- # If the creation effort (async) and the creation checking both succeeded...
1319
- if NewStackSetStatus["Success"] and NewStackSetId["Success"]:
1320
- # TODO: Fix message about length of time waiting...
1321
- while NewStackSetStatus["Success"] and not NewStackSetStatus["StackSetStatus"] in ["ACTIVE"]:
1322
- print(f"Waiting for StackSet {pNewStackSet} to be ready." * intervals_waited, end="\r")
1323
- sleep(sleep_interval)
1324
- intervals_waited += 1
1325
- NewStackSetStatus = check_stack_set_status(aws_acct, pNewStackSet)
1326
- print(f"{ERASE_LINE}Stackset {pNewStackSet} has been successfully created")
1327
- # TODO: Use the NewStackSetId Operation Id, to check if the empty new stackset has successfully been created
1328
- pass
1329
- # If only the creation effort (async) succeeded, but checking on that operation showed a failure...
1330
- elif NewStackSetStatus["Success"]:
1331
- print(
1332
- f"{Fore.RED}{pNewStackSet} appears to already exist. New stack set failed to be created. Exiting...{Fore.RESET}"
1333
- )
1334
- Failure_GoToEnd = True
1335
- sys.exit(98)
1336
- # Any other failure scenario
1337
- else:
1338
- print(f"{pNewStackSet} failed to be created. Exiting...")
1339
- Failure_GoToEnd = True
1340
- sys.exit(99)
1341
-
1342
- elif OldStackSetExists and not pEmpty:
1343
- print()
1344
- if not pForce: # Checking to see if they've specified no confirmations
1345
- User_Confirmation = input(f"Do you want to proceed with the migration? (y/n): ") in ["y", "Y"]
1346
- else:
1347
- User_Confirmation = True
1348
- if not User_Confirmation:
1349
- print(f"User cancelled script", file=sys.stderr)
1350
- Failure_GoToEnd = True
1351
- sys.exit(10)
1352
- # We would only get to this point if (for some reason) the script dies before a new stackset could be made.
1353
- # In that case, we may not have even written a recovery file yet.
1354
- if not NewStackSetExists: # We need to create the new stacksets
1355
- """
1356
- 1. Determine the template body of the existing stackset.
1357
- 2. Determine the parameters from the existing stackset
1358
- 2.5 Determine whether you need to specify "--capabilities CAPABILITIES_NAMED_IAM" when creating the new stackset
1359
- 3. Create a new stackset with the template body of the existing stackset.
1360
- """
1361
- print()
1362
- print(
1363
- f"You've asked for us to move stacksets from the existing stackset {pOldStackSet}"
1364
- f" and create a new stackset called: {pNewStackSet}\n"
1365
- f"Please note that we can only move {StackInstancesImportedAtOnce} stack instances at a time, so we may to loop a few times to do this."
1366
- )
1367
- if pAccountsToMove is not None:
1368
- print(f"But only for account {pAccountsToMove}")
1369
- print()
1370
- Stack_Set_Info = get_template_body_and_parameters(aws_acct, pOldStackSet)
1371
- NewStackSetId = create_stack_set_with_body_and_parameters(
1372
- aws_acct, pNewStackSet, Stack_Set_Info["stack_set_info"]
1373
- )
1374
- logging.warning(f"Waiting for new stackset {pNewStackSet} to be created")
1375
- sleep(sleep_interval)
1376
- NewStackSetStatus = check_stack_set_status(aws_acct, pNewStackSet)
1377
- intervals_waited = 1
1378
- if NewStackSetStatus["Success"]:
1379
- while NewStackSetStatus["Success"] and not NewStackSetStatus["StackSetStatus"] in ["ACTIVE"]:
1380
- print(f"Waiting for StackSet {pNewStackSet} to be ready", f"." * intervals_waited, end="\r")
1381
- sleep(sleep_interval)
1382
- intervals_waited += 1
1383
- NewStackSetStatus = check_stack_set_status(aws_acct, pNewStackSet)
1384
- print(f"{ERASE_LINE}Stackset {pNewStackSet} has been successfully created")
1385
- # TODO: Use the NewStackSetId Operation Id, to check if the empty new stackset has successfully been created
1386
- pass
1387
- else:
1388
- print(f"{pNewStackSet} failed to be created. Exiting...")
1389
- Failure_GoToEnd = True
1390
- sys.exit(99)
1391
-
1392
- else: # PNewStackSet *does* exist
1393
- # First time this script has run...
1394
- print("New Stack Set already exists...")
1395
-
1396
- """ ######## This code is common across both use-cases ################## """
1397
- logging.debug(f"Getting Stack Ids from existing stack set {pOldStackSet}")
1398
- # **** 1. Get the stack-ids from the old stack-set ****
1399
- if Use_recovery_file:
1400
- pass
1401
- else:
1402
- """
1403
- 1. Get the stack-ids from the old stack-set - write them to a file (in case we need to recover the process)
1404
- """
1405
- stack_ids = get_stack_ids_from_existing_stack_set(aws_acct, pOldStackSet, pAccountsToMove)
1406
- logging.debug(f"Found {len(stack_ids)} stack ids from stackset {pOldStackSet}")
1407
- # Write the stack_ids info to a file, so we don't lose this info if the script fails
1408
- fileresult = write_info_to_file(aws_acct, stack_ids)
1409
- if not fileresult["Success"]:
1410
- print(f"Something went wrong.\nError Message: {fileresult['ErrorMessage']}")
1411
- Failure_GoToEnd = True
1412
- sys.exit(9)
1413
- # For every 10 stack-ids, use the OpId below to verify that the Operation has finished:
1414
- # **** 2. Remove the stack-instances from the old stack-set ****
1415
- logging.debug(f"Removing stack instances from stackset {pOldStackSet}")
1416
- DisconnectStackInstances = disconnect_stack_instances(aws_acct, stack_ids, pOldStackSet)
1417
- if not DisconnectStackInstances["Success"]:
1418
- if DisconnectStackInstances["ErrorMessage"].find("has no matching instances") > 0 and Use_recovery_file:
1419
- pass # This could be because the Old Stackset already had the instances disconnected when the script failed
1420
- else:
1421
- print(f"Failure... exiting due to: {DisconnectStackInstances['ErrorMessage']}")
1422
- Failure_GoToEnd = True
1423
- sys.exit(7)
1424
- logging.debug(f"Removed stack instances from {pOldStackSet}")
1425
- if DisconnectStackInstances["OperationId"] is not None:
1426
- StackInstancesAreGone = check_stack_set_status(
1427
- aws_acct, pOldStackSet, DisconnectStackInstances["OperationId"]
1428
- )
1429
- if not StackInstancesAreGone["Success"]:
1430
- Failure_GoToEnd = True
1431
- sys.exit(
1432
- f"There was a problem with removing the stack instances from stackset {pOldStackSet}. Exiting..."
1433
- )
1434
- logging.debug(
1435
- f"The operation id {DisconnectStackInstances['OperationId']} is {StackInstancesAreGone['StackSetStatus']}"
1436
- )
1437
- intervals_waited = 1
1438
- while StackInstancesAreGone["StackSetStatus"] in ["RUNNING"]:
1439
- print(
1440
- f"Waiting for stack instances to be disconnected from stackset {pOldStackSet} -",
1441
- # f"." * intervals_waited,
1442
- f"{sleep_interval * intervals_waited} seconds waited so far",
1443
- end="\r",
1444
- )
1445
- sleep(sleep_interval)
1446
- intervals_waited += 1
1447
- StackInstancesAreGone = check_stack_set_status(
1448
- aws_acct, pOldStackSet, DisconnectStackInstances["OperationId"]
1449
- )
1450
- if not StackInstancesAreGone["Success"]:
1451
- print(f"There was a problem with removing the stack instances from stackset {pOldStackSet}. Exiting...")
1452
- Failure_GoToEnd = True
1453
- sys.exit(8)
1454
- # For every 10 stack-ids:
1455
- # **** 3. Import those stack-ids into the new stack-set, 10 at a time ****
1456
- x = 0
1457
- limit = StackInstancesImportedAtOnce
1458
- intervals_waited = 1
1459
- while x < len(stack_ids["Stack_instances"]):
1460
- stack_ids_subset = [
1461
- stack_ids["Stack_instances"][x + i] for i in range(limit) if x + i < len(stack_ids["Stack_instances"])
1462
- ]
1463
- x += limit
1464
- print(
1465
- f"{ERASE_LINE}Importing {len(stack_ids_subset)} of {len(stack_ids['Stack_instances'])} stacks into the new stackset now...",
1466
- end="\r",
1467
- )
1468
- ReconnectStackInstances = populate_new_stack_with_existing_stack_instances(
1469
- aws_acct, stack_ids_subset, pNewStackSet
1470
- )
1471
- if not ReconnectStackInstances["Success"]:
1472
- print(
1473
- f"Re-attaching the stack-instance to the new stackset seems to have failed."
1474
- f"The error received was: {ReconnectStackInstances['ErrorMessage']}"
1475
- )
1476
- print(
1477
- f"You'll have to resolve the issue that caused this problem, and then re-run this script using the recovery file."
1478
- )
1479
- Failure_GoToEnd = True
1480
- sys.exit(9)
1481
- StackReadyToImport = check_stack_set_status(aws_acct, pNewStackSet, ReconnectStackInstances["OperationId"])
1482
- if not StackReadyToImport["Success"]:
1483
- Failure_GoToEnd = True
1484
- sys.exit(
1485
- f"There was a problem with importing the stack instances into stackset {pNewStackSet}. Exiting..."
1486
- )
1487
- while StackReadyToImport["StackSetStatus"] in ["RUNNING", "QUEUED"]:
1488
- print(
1489
- f"{ERASE_LINE}Waiting for {len(stack_ids_subset)} more instances of StackSet {pNewStackSet} to finish importing -",
1490
- f"{sleep_interval * intervals_waited} seconds waited so far",
1491
- end="\r",
1492
- )
1493
- sleep(sleep_interval)
1494
- intervals_waited += 1
1495
- StackReadyToImport = check_stack_set_status(
1496
- aws_acct, pNewStackSet, ReconnectStackInstances["OperationId"]
1497
- )
1498
- if not StackReadyToImport["Success"]:
1499
- Failure_GoToEnd = True
1500
- sys.exit(
1501
- f"There was a problem with importing the stack instances into stackset {pNewStackSet}. Exiting..."
1502
- )
1503
- logging.info(f"{ERASE_LINE}That import took {intervals_waited * sleep_interval} seconds to complete")
1504
-
1505
- else: # Old Stackset doesn't exist - so there was a typo somewhere. Tell the user and exit
1506
- print(
1507
- f"It appears that the legacy stackset you provided {pOldStackSet} doesn't exist.\n"
1508
- f"Please check the spelling, or the account, and try again.\n\n"
1509
- f"{Fore.LIGHTBLUE_EX}Perhaps the recovery file was never deleted?{Fore.RESET}"
1510
- )
1511
-
1512
- # Delete the recovery file, if it exists
1513
- # TODO: Insert a check to make sure the recovery file isn't deleted, if we failed something above...
1514
- if exists(InfoFilename):
1515
- try:
1516
- FileDeleted = remove(InfoFilename)
1517
- except OSError as myError:
1518
- print(myError)
1519
-
1520
- if pTiming:
1521
- print(ERASE_LINE)
1522
- print(f"{Fore.GREEN}This script took {time() - begin_time:.2f} seconds{Fore.RESET}")
1523
-
1524
- print()
1525
- print("Thank you for using this script")
1526
- print()