runbooks 0.7.0__py3-none-any.whl → 0.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. runbooks/__init__.py +87 -37
  2. runbooks/cfat/README.md +300 -49
  3. runbooks/cfat/__init__.py +2 -2
  4. runbooks/finops/__init__.py +1 -1
  5. runbooks/finops/cli.py +1 -1
  6. runbooks/inventory/collectors/__init__.py +8 -0
  7. runbooks/inventory/collectors/aws_management.py +791 -0
  8. runbooks/inventory/collectors/aws_networking.py +3 -3
  9. runbooks/main.py +3389 -782
  10. runbooks/operate/__init__.py +207 -0
  11. runbooks/operate/base.py +311 -0
  12. runbooks/operate/cloudformation_operations.py +619 -0
  13. runbooks/operate/cloudwatch_operations.py +496 -0
  14. runbooks/operate/dynamodb_operations.py +812 -0
  15. runbooks/operate/ec2_operations.py +926 -0
  16. runbooks/operate/iam_operations.py +569 -0
  17. runbooks/operate/s3_operations.py +1211 -0
  18. runbooks/operate/tagging_operations.py +655 -0
  19. runbooks/remediation/CLAUDE.md +100 -0
  20. runbooks/remediation/DOME9.md +218 -0
  21. runbooks/remediation/README.md +26 -0
  22. runbooks/remediation/Tests/__init__.py +0 -0
  23. runbooks/remediation/Tests/update_policy.py +74 -0
  24. runbooks/remediation/__init__.py +95 -0
  25. runbooks/remediation/acm_cert_expired_unused.py +98 -0
  26. runbooks/remediation/acm_remediation.py +875 -0
  27. runbooks/remediation/api_gateway_list.py +167 -0
  28. runbooks/remediation/base.py +643 -0
  29. runbooks/remediation/cloudtrail_remediation.py +908 -0
  30. runbooks/remediation/cloudtrail_s3_modifications.py +296 -0
  31. runbooks/remediation/cognito_active_users.py +78 -0
  32. runbooks/remediation/cognito_remediation.py +856 -0
  33. runbooks/remediation/cognito_user_password_reset.py +163 -0
  34. runbooks/remediation/commons.py +455 -0
  35. runbooks/remediation/dynamodb_optimize.py +155 -0
  36. runbooks/remediation/dynamodb_remediation.py +744 -0
  37. runbooks/remediation/dynamodb_server_side_encryption.py +108 -0
  38. runbooks/remediation/ec2_public_ips.py +134 -0
  39. runbooks/remediation/ec2_remediation.py +892 -0
  40. runbooks/remediation/ec2_subnet_disable_auto_ip_assignment.py +72 -0
  41. runbooks/remediation/ec2_unattached_ebs_volumes.py +448 -0
  42. runbooks/remediation/ec2_unused_security_groups.py +202 -0
  43. runbooks/remediation/kms_enable_key_rotation.py +651 -0
  44. runbooks/remediation/kms_remediation.py +717 -0
  45. runbooks/remediation/lambda_list.py +243 -0
  46. runbooks/remediation/lambda_remediation.py +971 -0
  47. runbooks/remediation/multi_account.py +569 -0
  48. runbooks/remediation/rds_instance_list.py +199 -0
  49. runbooks/remediation/rds_remediation.py +873 -0
  50. runbooks/remediation/rds_snapshot_list.py +192 -0
  51. runbooks/remediation/requirements.txt +118 -0
  52. runbooks/remediation/s3_block_public_access.py +159 -0
  53. runbooks/remediation/s3_bucket_public_access.py +143 -0
  54. runbooks/remediation/s3_disable_static_website_hosting.py +74 -0
  55. runbooks/remediation/s3_downloader.py +215 -0
  56. runbooks/remediation/s3_enable_access_logging.py +562 -0
  57. runbooks/remediation/s3_encryption.py +526 -0
  58. runbooks/remediation/s3_force_ssl_secure_policy.py +143 -0
  59. runbooks/remediation/s3_list.py +141 -0
  60. runbooks/remediation/s3_object_search.py +201 -0
  61. runbooks/remediation/s3_remediation.py +816 -0
  62. runbooks/remediation/scan_for_phrase.py +425 -0
  63. runbooks/remediation/workspaces_list.py +220 -0
  64. runbooks/security/__init__.py +9 -10
  65. runbooks/security/security_baseline_tester.py +4 -2
  66. runbooks-0.7.6.dist-info/METADATA +608 -0
  67. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/RECORD +84 -76
  68. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/entry_points.txt +0 -1
  69. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/top_level.txt +0 -1
  70. jupyter-agent/.env +0 -2
  71. jupyter-agent/.env.template +0 -2
  72. jupyter-agent/.gitattributes +0 -35
  73. jupyter-agent/.gradio/certificate.pem +0 -31
  74. jupyter-agent/README.md +0 -16
  75. jupyter-agent/__main__.log +0 -8
  76. jupyter-agent/app.py +0 -256
  77. jupyter-agent/cloudops-agent.png +0 -0
  78. jupyter-agent/ds-system-prompt.txt +0 -154
  79. jupyter-agent/jupyter-agent.png +0 -0
  80. jupyter-agent/llama3_template.jinja +0 -123
  81. jupyter-agent/requirements.txt +0 -9
  82. jupyter-agent/tmp/4ojbs8a02ir/jupyter-agent.ipynb +0 -68
  83. jupyter-agent/tmp/cm5iasgpm3p/jupyter-agent.ipynb +0 -91
  84. jupyter-agent/tmp/crqbsseag5/jupyter-agent.ipynb +0 -91
  85. jupyter-agent/tmp/hohanq1u097/jupyter-agent.ipynb +0 -57
  86. jupyter-agent/tmp/jns1sam29wm/jupyter-agent.ipynb +0 -53
  87. jupyter-agent/tmp/jupyter-agent.ipynb +0 -27
  88. jupyter-agent/utils.py +0 -409
  89. runbooks/aws/__init__.py +0 -58
  90. runbooks/aws/dynamodb_operations.py +0 -231
  91. runbooks/aws/ec2_copy_image_cross-region.py +0 -195
  92. runbooks/aws/ec2_describe_instances.py +0 -202
  93. runbooks/aws/ec2_ebs_snapshots_delete.py +0 -186
  94. runbooks/aws/ec2_run_instances.py +0 -213
  95. runbooks/aws/ec2_start_stop_instances.py +0 -212
  96. runbooks/aws/ec2_terminate_instances.py +0 -143
  97. runbooks/aws/ec2_unused_eips.py +0 -196
  98. runbooks/aws/ec2_unused_volumes.py +0 -188
  99. runbooks/aws/s3_create_bucket.py +0 -142
  100. runbooks/aws/s3_list_buckets.py +0 -152
  101. runbooks/aws/s3_list_objects.py +0 -156
  102. runbooks/aws/s3_object_operations.py +0 -183
  103. runbooks/aws/tagging_lambda_handler.py +0 -183
  104. runbooks/inventory/FAILED_SCRIPTS_TROUBLESHOOTING.md +0 -619
  105. runbooks/inventory/PASSED_SCRIPTS_GUIDE.md +0 -738
  106. runbooks/inventory/aws_organization.png +0 -0
  107. runbooks/inventory/cfn_move_stack_instances.py +0 -1526
  108. runbooks/inventory/delete_s3_buckets_objects.py +0 -169
  109. runbooks/inventory/lockdown_cfn_stackset_role.py +0 -224
  110. runbooks/inventory/update_aws_actions.py +0 -173
  111. runbooks/inventory/update_cfn_stacksets.py +0 -1215
  112. runbooks/inventory/update_cloudwatch_logs_retention_policy.py +0 -294
  113. runbooks/inventory/update_iam_roles_cross_accounts.py +0 -478
  114. runbooks/inventory/update_s3_public_access_block.py +0 -539
  115. runbooks/organizations/__init__.py +0 -12
  116. runbooks/organizations/manager.py +0 -374
  117. runbooks-0.7.0.dist-info/METADATA +0 -375
  118. /runbooks/inventory/{tests → Tests}/common_test_data.py +0 -0
  119. /runbooks/inventory/{tests → Tests}/common_test_functions.py +0 -0
  120. /runbooks/inventory/{tests → Tests}/script_test_data.py +0 -0
  121. /runbooks/inventory/{tests → Tests}/setup.py +0 -0
  122. /runbooks/inventory/{tests → Tests}/src.py +0 -0
  123. /runbooks/inventory/{tests/test_inventory_modules.py → Tests/test_Inventory_Modules.py} +0 -0
  124. /runbooks/inventory/{tests → Tests}/test_cfn_describe_stacks.py +0 -0
  125. /runbooks/inventory/{tests → Tests}/test_ec2_describe_instances.py +0 -0
  126. /runbooks/inventory/{tests → Tests}/test_lambda_list_functions.py +0 -0
  127. /runbooks/inventory/{tests → Tests}/test_moto_integration_example.py +0 -0
  128. /runbooks/inventory/{tests → Tests}/test_org_list_accounts.py +0 -0
  129. /runbooks/inventory/{Inventory_Modules.py → inventory_modules.py} +0 -0
  130. /runbooks/{aws → operate}/tags.json +0 -0
  131. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/WHEEL +0 -0
  132. {runbooks-0.7.0.dist-info → runbooks-0.7.6.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1211 @@
1
+ """
2
+ Enterprise-Grade S3 Operations Module.
3
+
4
+ Comprehensive S3 resource management with Lambda support, environment configuration,
5
+ validation utilities, and full compatibility with original AWS Cloud Foundations scripts.
6
+
7
+ Migrated and enhanced from:
8
+ - aws/s3_create_bucket.py (with bucket validation and region-specific creation)
9
+ - aws/s3_object_operations.py (with Lambda handlers and ACL support)
10
+ - aws/s3_list_objects.py (with pagination and filtering)
11
+ - aws/s3_list_buckets.py (with comprehensive listing)
12
+
13
+ Author: CloudOps DevOps Engineer
14
+ Date: 2025-01-21
15
+ Version: 2.0.0 - Enterprise Enhancement
16
+ """
17
+
18
+ import json
19
+ import os
20
+ import re
21
+ from datetime import datetime
22
+ from typing import Any, Dict, List, Optional, Union
23
+
24
+ import boto3
25
+ from botocore.exceptions import BotoCoreError, ClientError
26
+ from loguru import logger
27
+
28
+ from runbooks.operate.base import BaseOperation, OperationContext, OperationResult, OperationStatus
29
+
30
+
31
+ class S3Operations(BaseOperation):
32
+ """
33
+ Enterprise-grade S3 resource operations and lifecycle management.
34
+
35
+ Handles all S3-related operational tasks including bucket management,
36
+ object operations, storage lifecycle management, and comprehensive validation.
37
+ Supports environment variable configuration and AWS Lambda execution.
38
+ """
39
+
40
+ service_name = "s3"
41
+ supported_operations = {
42
+ "create_bucket",
43
+ "delete_bucket",
44
+ "put_object",
45
+ "delete_object",
46
+ "copy_object",
47
+ "list_objects",
48
+ "list_buckets",
49
+ "set_bucket_policy",
50
+ "set_bucket_versioning",
51
+ "set_bucket_encryption",
52
+ "set_lifecycle_configuration",
53
+ "empty_bucket",
54
+ "delete_bucket_and_objects",
55
+ "set_public_access_block",
56
+ "get_public_access_block",
57
+ "sync_objects",
58
+ }
59
+ requires_confirmation = True
60
+
61
+ def __init__(self, profile: Optional[str] = None, region: Optional[str] = None, dry_run: bool = False):
62
+ """
63
+ Initialize S3 operations with enhanced configuration support.
64
+
65
+ Args:
66
+ profile: AWS profile name (can be overridden by AWS_PROFILE env var)
67
+ region: AWS region (can be overridden by AWS_REGION env var)
68
+ dry_run: Dry run mode (can be overridden by DRY_RUN env var)
69
+ """
70
+ # Environment variable support for Lambda/Container deployment
71
+ self.profile = profile or os.getenv("AWS_PROFILE")
72
+ self.region = region or os.getenv("AWS_REGION", "us-east-1")
73
+ self.dry_run = dry_run or os.getenv("DRY_RUN", "false").lower() == "true"
74
+
75
+ super().__init__(self.profile, self.region, self.dry_run)
76
+
77
+ def validate_bucket_name(self, bucket_name: str) -> None:
78
+ """
79
+ Validates an S3 bucket name based on AWS naming rules.
80
+
81
+ Based on original aws/s3_create_bucket.py validation.
82
+
83
+ Args:
84
+ bucket_name: The bucket name to validate
85
+
86
+ Raises:
87
+ ValueError: If the bucket name is invalid
88
+ """
89
+ # AWS Bucket Naming Rules
90
+ if len(bucket_name) < 3 or len(bucket_name) > 63:
91
+ raise ValueError("Bucket name must be between 3 and 63 characters long.")
92
+
93
+ if not re.match(r"^[a-z0-9.-]+$", bucket_name):
94
+ raise ValueError("Bucket name can only contain lowercase letters, numbers, hyphens (-), and periods (.).")
95
+
96
+ if bucket_name.startswith(".") or bucket_name.endswith("."):
97
+ raise ValueError("Bucket name cannot start or end with a period (.)")
98
+
99
+ if ".." in bucket_name:
100
+ raise ValueError("Bucket name cannot contain consecutive periods (..).")
101
+
102
+ logger.info(f"✅ Bucket name '{bucket_name}' is valid.")
103
+
104
+ def format_object_list(self, objects: List[Dict]) -> List[Dict[str, str]]:
105
+ """
106
+ Format object list for display with size conversion and date formatting.
107
+
108
+ Based on original aws/s3_list_objects.py formatting.
109
+
110
+ Args:
111
+ objects: List of S3 objects from API response
112
+
113
+ Returns:
114
+ Formatted list with human-readable data
115
+ """
116
+ formatted_objects = []
117
+ for obj in objects:
118
+ formatted_objects.append(
119
+ {
120
+ "Key": obj["Key"],
121
+ "Size (KB)": f"{obj['Size'] / 1024:.2f}", # Convert bytes to KB
122
+ "LastModified": obj["LastModified"].strftime("%Y-%m-%d %H:%M:%S"),
123
+ }
124
+ )
125
+ return formatted_objects
126
+
127
+ def execute_operation(self, context: OperationContext, operation_type: str, **kwargs) -> List[OperationResult]:
128
+ """
129
+ Execute S3 operation.
130
+
131
+ Args:
132
+ context: Operation context
133
+ operation_type: Type of operation to execute
134
+ **kwargs: Operation-specific arguments
135
+
136
+ Returns:
137
+ List of operation results
138
+ """
139
+ self.validate_context(context)
140
+
141
+ if operation_type == "create_bucket":
142
+ return self.create_bucket(context, **kwargs)
143
+ elif operation_type == "delete_bucket":
144
+ return self.delete_bucket(context, kwargs.get("bucket_name"))
145
+ elif operation_type == "put_object":
146
+ return self.put_object(context, **kwargs)
147
+ elif operation_type == "delete_object":
148
+ return self.delete_object(context, **kwargs)
149
+ elif operation_type == "copy_object":
150
+ return self.copy_object(context, **kwargs)
151
+ elif operation_type == "list_objects":
152
+ return self.list_objects(context, **kwargs)
153
+ elif operation_type == "list_buckets":
154
+ return self.list_buckets(context)
155
+ elif operation_type == "set_bucket_policy":
156
+ return self.set_bucket_policy(context, **kwargs)
157
+ elif operation_type == "set_bucket_versioning":
158
+ return self.set_bucket_versioning(context, **kwargs)
159
+ elif operation_type == "set_bucket_encryption":
160
+ return self.set_bucket_encryption(context, **kwargs)
161
+ elif operation_type == "set_lifecycle_configuration":
162
+ return self.set_lifecycle_configuration(context, **kwargs)
163
+ elif operation_type == "empty_bucket":
164
+ return self.empty_bucket(context, kwargs.get("bucket_name"))
165
+ elif operation_type == "delete_bucket_and_objects":
166
+ return self.delete_bucket_and_objects(context, kwargs.get("bucket_name"))
167
+ elif operation_type == "set_public_access_block":
168
+ return self.set_public_access_block(context, **kwargs)
169
+ elif operation_type == "get_public_access_block":
170
+ return self.get_public_access_block(context, kwargs.get("account_id"))
171
+ elif operation_type == "sync_objects":
172
+ return self.sync_objects(context, **kwargs)
173
+ else:
174
+ raise ValueError(f"Unsupported operation: {operation_type}")
175
+
176
+ def validate_bucket_name(self, bucket_name: str) -> bool:
177
+ """
178
+ Validate S3 bucket name according to AWS naming rules.
179
+
180
+ Args:
181
+ bucket_name: Bucket name to validate
182
+
183
+ Returns:
184
+ True if valid
185
+
186
+ Raises:
187
+ ValueError: If bucket name is invalid
188
+ """
189
+ import re
190
+
191
+ if len(bucket_name) < 3 or len(bucket_name) > 63:
192
+ raise ValueError("Bucket name must be between 3 and 63 characters long")
193
+
194
+ if not re.match(r"^[a-z0-9.-]+$", bucket_name):
195
+ raise ValueError("Bucket name can only contain lowercase letters, numbers, hyphens (-), and periods (.)")
196
+
197
+ if bucket_name.startswith("-") or bucket_name.endswith("-"):
198
+ raise ValueError("Bucket name cannot start or end with hyphens")
199
+
200
+ if ".." in bucket_name:
201
+ raise ValueError("Bucket name cannot contain consecutive periods")
202
+
203
+ return True
204
+
205
+ def create_bucket(
206
+ self,
207
+ context: OperationContext,
208
+ bucket_name: str,
209
+ region: Optional[str] = None,
210
+ acl: str = "private",
211
+ encryption: bool = True,
212
+ versioning: bool = False,
213
+ public_access_block: bool = True,
214
+ tags: Optional[Dict[str, str]] = None,
215
+ ) -> List[OperationResult]:
216
+ """
217
+ Create S3 bucket with security best practices.
218
+
219
+ Args:
220
+ context: Operation context
221
+ bucket_name: Name of bucket to create
222
+ region: AWS region for bucket (defaults to context region)
223
+ acl: Bucket ACL (private, public-read, etc.)
224
+ encryption: Enable server-side encryption
225
+ versioning: Enable versioning
226
+ public_access_block: Enable public access block
227
+ tags: Bucket tags
228
+
229
+ Returns:
230
+ List of operation results
231
+ """
232
+ self.validate_bucket_name(bucket_name)
233
+
234
+ bucket_region = region or context.region
235
+ s3_client = self.get_client("s3", bucket_region)
236
+
237
+ result = self.create_operation_result(context, "create_bucket", "s3:bucket", bucket_name)
238
+
239
+ try:
240
+ if context.dry_run:
241
+ logger.info(f"[DRY-RUN] Would create bucket {bucket_name} in {bucket_region}")
242
+ result.mark_completed(OperationStatus.DRY_RUN)
243
+ return [result]
244
+
245
+ # Create bucket
246
+ create_params = {"Bucket": bucket_name, "ACL": acl}
247
+
248
+ # Add location constraint for regions other than us-east-1
249
+ if bucket_region != "us-east-1":
250
+ create_params["CreateBucketConfiguration"] = {"LocationConstraint": bucket_region}
251
+
252
+ response = self.execute_aws_call(s3_client, "create_bucket", **create_params)
253
+ logger.info(f"Created bucket {bucket_name}")
254
+
255
+ # Configure encryption
256
+ if encryption:
257
+ self.execute_aws_call(
258
+ s3_client,
259
+ "put_bucket_encryption",
260
+ Bucket=bucket_name,
261
+ ServerSideEncryptionConfiguration={
262
+ "Rules": [{"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}}]
263
+ },
264
+ )
265
+ logger.info(f"Enabled encryption for bucket {bucket_name}")
266
+
267
+ # Configure versioning
268
+ if versioning:
269
+ self.execute_aws_call(
270
+ s3_client,
271
+ "put_bucket_versioning",
272
+ Bucket=bucket_name,
273
+ VersioningConfiguration={"Status": "Enabled"},
274
+ )
275
+ logger.info(f"Enabled versioning for bucket {bucket_name}")
276
+
277
+ # Configure public access block
278
+ if public_access_block:
279
+ self.execute_aws_call(
280
+ s3_client,
281
+ "put_public_access_block",
282
+ Bucket=bucket_name,
283
+ PublicAccessBlockConfiguration={
284
+ "BlockPublicAcls": True,
285
+ "IgnorePublicAcls": True,
286
+ "BlockPublicPolicy": True,
287
+ "RestrictPublicBuckets": True,
288
+ },
289
+ )
290
+ logger.info(f"Enabled public access block for bucket {bucket_name}")
291
+
292
+ # Apply tags
293
+ if tags:
294
+ tag_set = [{"Key": k, "Value": v} for k, v in tags.items()]
295
+ self.execute_aws_call(s3_client, "put_bucket_tagging", Bucket=bucket_name, Tagging={"TagSet": tag_set})
296
+ logger.info(f"Applied tags to bucket {bucket_name}")
297
+
298
+ result.response_data = response
299
+ result.mark_completed(OperationStatus.SUCCESS)
300
+
301
+ except ClientError as e:
302
+ error_msg = f"Failed to create bucket {bucket_name}: {e}"
303
+ logger.error(error_msg)
304
+ result.mark_completed(OperationStatus.FAILED, error_msg)
305
+
306
+ return [result]
307
+
308
+ def delete_bucket(self, context: OperationContext, bucket_name: str) -> List[OperationResult]:
309
+ """
310
+ Delete S3 bucket.
311
+
312
+ Args:
313
+ context: Operation context
314
+ bucket_name: Name of bucket to delete
315
+
316
+ Returns:
317
+ List of operation results
318
+ """
319
+ s3_client = self.get_client("s3")
320
+
321
+ result = self.create_operation_result(context, "delete_bucket", "s3:bucket", bucket_name)
322
+
323
+ try:
324
+ if not self.confirm_operation(context, bucket_name, "delete bucket"):
325
+ result.mark_completed(OperationStatus.CANCELLED, "Operation cancelled by user")
326
+ return [result]
327
+
328
+ if context.dry_run:
329
+ logger.info(f"[DRY-RUN] Would delete bucket {bucket_name}")
330
+ result.mark_completed(OperationStatus.DRY_RUN)
331
+ else:
332
+ # Check if bucket is empty
333
+ try:
334
+ objects = self.execute_aws_call(s3_client, "list_objects_v2", Bucket=bucket_name, MaxKeys=1)
335
+ if objects.get("Contents"):
336
+ raise ValueError(f"Bucket {bucket_name} is not empty. Use empty_bucket operation first.")
337
+ except ClientError as e:
338
+ if e.response["Error"]["Code"] != "NoSuchBucket":
339
+ raise
340
+
341
+ response = self.execute_aws_call(s3_client, "delete_bucket", Bucket=bucket_name)
342
+
343
+ result.response_data = response
344
+ result.mark_completed(OperationStatus.SUCCESS)
345
+ logger.info(f"Successfully deleted bucket {bucket_name}")
346
+
347
+ except ClientError as e:
348
+ error_msg = f"Failed to delete bucket {bucket_name}: {e}"
349
+ logger.error(error_msg)
350
+ result.mark_completed(OperationStatus.FAILED, error_msg)
351
+
352
+ return [result]
353
+
354
+ def put_object(
355
+ self,
356
+ context: OperationContext,
357
+ bucket_name: str,
358
+ key: str,
359
+ body: Union[str, bytes] = None,
360
+ file_path: Optional[str] = None,
361
+ content_type: Optional[str] = None,
362
+ metadata: Optional[Dict[str, str]] = None,
363
+ tags: Optional[Dict[str, str]] = None,
364
+ ) -> List[OperationResult]:
365
+ """
366
+ Upload object to S3 bucket.
367
+
368
+ Args:
369
+ context: Operation context
370
+ bucket_name: Target bucket name
371
+ key: Object key (path)
372
+ body: Object content as string or bytes
373
+ file_path: Path to local file to upload
374
+ content_type: MIME type of object
375
+ metadata: Object metadata
376
+ tags: Object tags
377
+
378
+ Returns:
379
+ List of operation results
380
+ """
381
+ s3_client = self.get_client("s3")
382
+
383
+ result = self.create_operation_result(context, "put_object", "s3:object", f"{bucket_name}/{key}")
384
+
385
+ try:
386
+ if context.dry_run:
387
+ logger.info(f"[DRY-RUN] Would upload object to s3://{bucket_name}/{key}")
388
+ result.mark_completed(OperationStatus.DRY_RUN)
389
+ return [result]
390
+
391
+ put_params = {"Bucket": bucket_name, "Key": key}
392
+
393
+ if body is not None:
394
+ put_params["Body"] = body
395
+ elif file_path:
396
+ with open(file_path, "rb") as f:
397
+ put_params["Body"] = f.read()
398
+ else:
399
+ raise ValueError("Either body or file_path must be provided")
400
+
401
+ if content_type:
402
+ put_params["ContentType"] = content_type
403
+ if metadata:
404
+ put_params["Metadata"] = metadata
405
+
406
+ response = self.execute_aws_call(s3_client, "put_object", **put_params)
407
+
408
+ # Apply tags if provided
409
+ if tags:
410
+ tag_set = "&".join([f"{k}={v}" for k, v in tags.items()])
411
+ self.execute_aws_call(s3_client, "put_object_tagging", Bucket=bucket_name, Key=key, Tagging=tag_set)
412
+
413
+ result.response_data = response
414
+ result.mark_completed(OperationStatus.SUCCESS)
415
+ logger.info(f"Successfully uploaded object to s3://{bucket_name}/{key}")
416
+
417
+ except ClientError as e:
418
+ error_msg = f"Failed to upload object to s3://{bucket_name}/{key}: {e}"
419
+ logger.error(error_msg)
420
+ result.mark_completed(OperationStatus.FAILED, error_msg)
421
+ except Exception as e:
422
+ error_msg = f"Failed to read file {file_path}: {e}"
423
+ logger.error(error_msg)
424
+ result.mark_completed(OperationStatus.FAILED, error_msg)
425
+
426
+ return [result]
427
+
428
+ def delete_object(
429
+ self, context: OperationContext, bucket_name: str, key: str, version_id: Optional[str] = None
430
+ ) -> List[OperationResult]:
431
+ """
432
+ Delete object from S3 bucket.
433
+
434
+ Args:
435
+ context: Operation context
436
+ bucket_name: Source bucket name
437
+ key: Object key to delete
438
+ version_id: Specific version to delete (for versioned buckets)
439
+
440
+ Returns:
441
+ List of operation results
442
+ """
443
+ s3_client = self.get_client("s3")
444
+
445
+ result = self.create_operation_result(context, "delete_object", "s3:object", f"{bucket_name}/{key}")
446
+
447
+ try:
448
+ if not self.confirm_operation(context, f"s3://{bucket_name}/{key}", "delete object"):
449
+ result.mark_completed(OperationStatus.CANCELLED, "Operation cancelled by user")
450
+ return [result]
451
+
452
+ if context.dry_run:
453
+ logger.info(f"[DRY-RUN] Would delete object s3://{bucket_name}/{key}")
454
+ result.mark_completed(OperationStatus.DRY_RUN)
455
+ else:
456
+ delete_params = {"Bucket": bucket_name, "Key": key}
457
+ if version_id:
458
+ delete_params["VersionId"] = version_id
459
+
460
+ response = self.execute_aws_call(s3_client, "delete_object", **delete_params)
461
+
462
+ result.response_data = response
463
+ result.mark_completed(OperationStatus.SUCCESS)
464
+ logger.info(f"Successfully deleted object s3://{bucket_name}/{key}")
465
+
466
+ except ClientError as e:
467
+ error_msg = f"Failed to delete object s3://{bucket_name}/{key}: {e}"
468
+ logger.error(error_msg)
469
+ result.mark_completed(OperationStatus.FAILED, error_msg)
470
+
471
+ return [result]
472
+
473
+ def copy_object(
474
+ self,
475
+ context: OperationContext,
476
+ source_bucket: str,
477
+ source_key: str,
478
+ destination_bucket: str,
479
+ destination_key: str,
480
+ metadata_directive: str = "COPY",
481
+ ) -> List[OperationResult]:
482
+ """
483
+ Copy object between S3 locations.
484
+
485
+ Args:
486
+ context: Operation context
487
+ source_bucket: Source bucket name
488
+ source_key: Source object key
489
+ destination_bucket: Destination bucket name
490
+ destination_key: Destination object key
491
+ metadata_directive: COPY or REPLACE metadata
492
+
493
+ Returns:
494
+ List of operation results
495
+ """
496
+ s3_client = self.get_client("s3")
497
+
498
+ result = self.create_operation_result(context, "copy_object", "s3:object", f"{source_bucket}/{source_key}")
499
+
500
+ try:
501
+ if context.dry_run:
502
+ logger.info(
503
+ f"[DRY-RUN] Would copy s3://{source_bucket}/{source_key} to s3://{destination_bucket}/{destination_key}"
504
+ )
505
+ result.mark_completed(OperationStatus.DRY_RUN)
506
+ else:
507
+ copy_source = {"Bucket": source_bucket, "Key": source_key}
508
+
509
+ response = self.execute_aws_call(
510
+ s3_client,
511
+ "copy_object",
512
+ CopySource=copy_source,
513
+ Bucket=destination_bucket,
514
+ Key=destination_key,
515
+ MetadataDirective=metadata_directive,
516
+ )
517
+
518
+ result.response_data = response
519
+ result.mark_completed(OperationStatus.SUCCESS)
520
+ logger.info(f"Successfully copied object to s3://{destination_bucket}/{destination_key}")
521
+
522
+ except ClientError as e:
523
+ error_msg = f"Failed to copy object: {e}"
524
+ logger.error(error_msg)
525
+ result.mark_completed(OperationStatus.FAILED, error_msg)
526
+
527
+ return [result]
528
+
529
+ def empty_bucket(self, context: OperationContext, bucket_name: str) -> List[OperationResult]:
530
+ """
531
+ Delete all objects in S3 bucket.
532
+
533
+ Args:
534
+ context: Operation context
535
+ bucket_name: Bucket to empty
536
+
537
+ Returns:
538
+ List of operation results
539
+ """
540
+ s3_client = self.get_client("s3")
541
+
542
+ result = self.create_operation_result(context, "empty_bucket", "s3:bucket", bucket_name)
543
+
544
+ try:
545
+ if not self.confirm_operation(context, bucket_name, "empty bucket"):
546
+ result.mark_completed(OperationStatus.CANCELLED, "Operation cancelled by user")
547
+ return [result]
548
+
549
+ if context.dry_run:
550
+ logger.info(f"[DRY-RUN] Would empty bucket {bucket_name}")
551
+ result.mark_completed(OperationStatus.DRY_RUN)
552
+ return [result]
553
+
554
+ # List and delete all objects
555
+ deleted_count = 0
556
+ paginator = s3_client.get_paginator("list_objects_v2")
557
+
558
+ for page in paginator.paginate(Bucket=bucket_name):
559
+ objects = page.get("Contents", [])
560
+
561
+ if objects:
562
+ delete_keys = [{"Key": obj["Key"]} for obj in objects]
563
+
564
+ self.execute_aws_call(
565
+ s3_client, "delete_objects", Bucket=bucket_name, Delete={"Objects": delete_keys}
566
+ )
567
+
568
+ deleted_count += len(delete_keys)
569
+
570
+ # Handle versioned objects
571
+ version_paginator = s3_client.get_paginator("list_object_versions")
572
+ for page in version_paginator.paginate(Bucket=bucket_name):
573
+ versions = page.get("Versions", []) + page.get("DeleteMarkers", [])
574
+
575
+ if versions:
576
+ delete_keys = [{"Key": obj["Key"], "VersionId": obj["VersionId"]} for obj in versions]
577
+
578
+ self.execute_aws_call(
579
+ s3_client, "delete_objects", Bucket=bucket_name, Delete={"Objects": delete_keys}
580
+ )
581
+
582
+ deleted_count += len(delete_keys)
583
+
584
+ result.response_data = {"deleted_objects": deleted_count}
585
+ result.mark_completed(OperationStatus.SUCCESS)
586
+ logger.info(f"Successfully emptied bucket {bucket_name}, deleted {deleted_count} objects")
587
+
588
+ except ClientError as e:
589
+ error_msg = f"Failed to empty bucket {bucket_name}: {e}"
590
+ logger.error(error_msg)
591
+ result.mark_completed(OperationStatus.FAILED, error_msg)
592
+
593
+ return [result]
594
+
595
+ def set_bucket_policy(
596
+ self, context: OperationContext, bucket_name: str, policy: Union[str, Dict[str, Any]]
597
+ ) -> List[OperationResult]:
598
+ """
599
+ Set S3 bucket policy.
600
+
601
+ Args:
602
+ context: Operation context
603
+ bucket_name: Target bucket name
604
+ policy: Bucket policy as JSON string or dict
605
+
606
+ Returns:
607
+ List of operation results
608
+ """
609
+ s3_client = self.get_client("s3")
610
+
611
+ result = self.create_operation_result(context, "set_bucket_policy", "s3:bucket", bucket_name)
612
+
613
+ try:
614
+ if context.dry_run:
615
+ logger.info(f"[DRY-RUN] Would set policy on bucket {bucket_name}")
616
+ result.mark_completed(OperationStatus.DRY_RUN)
617
+ else:
618
+ policy_json = policy if isinstance(policy, str) else json.dumps(policy)
619
+
620
+ response = self.execute_aws_call(s3_client, "put_bucket_policy", Bucket=bucket_name, Policy=policy_json)
621
+
622
+ result.response_data = response
623
+ result.mark_completed(OperationStatus.SUCCESS)
624
+ logger.info(f"Successfully set policy on bucket {bucket_name}")
625
+
626
+ except ClientError as e:
627
+ error_msg = f"Failed to set bucket policy on {bucket_name}: {e}"
628
+ logger.error(error_msg)
629
+ result.mark_completed(OperationStatus.FAILED, error_msg)
630
+
631
+ return [result]
632
+
633
+ def set_bucket_versioning(
634
+ self, context: OperationContext, bucket_name: str, status: str = "Enabled"
635
+ ) -> List[OperationResult]:
636
+ """
637
+ Configure S3 bucket versioning.
638
+
639
+ Args:
640
+ context: Operation context
641
+ bucket_name: Target bucket name
642
+ status: Versioning status (Enabled, Suspended)
643
+
644
+ Returns:
645
+ List of operation results
646
+ """
647
+ s3_client = self.get_client("s3")
648
+
649
+ result = self.create_operation_result(context, "set_bucket_versioning", "s3:bucket", bucket_name)
650
+
651
+ try:
652
+ if context.dry_run:
653
+ logger.info(f"[DRY-RUN] Would set versioning to {status} on bucket {bucket_name}")
654
+ result.mark_completed(OperationStatus.DRY_RUN)
655
+ else:
656
+ response = self.execute_aws_call(
657
+ s3_client, "put_bucket_versioning", Bucket=bucket_name, VersioningConfiguration={"Status": status}
658
+ )
659
+
660
+ result.response_data = response
661
+ result.mark_completed(OperationStatus.SUCCESS)
662
+ logger.info(f"Successfully set versioning to {status} on bucket {bucket_name}")
663
+
664
+ except ClientError as e:
665
+ error_msg = f"Failed to set versioning on bucket {bucket_name}: {e}"
666
+ logger.error(error_msg)
667
+ result.mark_completed(OperationStatus.FAILED, error_msg)
668
+
669
+ return [result]
670
+
671
+ def set_bucket_encryption(
672
+ self,
673
+ context: OperationContext,
674
+ bucket_name: str,
675
+ sse_algorithm: str = "AES256",
676
+ kms_master_key_id: Optional[str] = None,
677
+ ) -> List[OperationResult]:
678
+ """
679
+ Configure S3 bucket encryption.
680
+
681
+ Args:
682
+ context: Operation context
683
+ bucket_name: Target bucket name
684
+ sse_algorithm: Encryption algorithm (AES256, aws:kms)
685
+ kms_master_key_id: KMS key ID for aws:kms encryption
686
+
687
+ Returns:
688
+ List of operation results
689
+ """
690
+ s3_client = self.get_client("s3")
691
+
692
+ result = self.create_operation_result(context, "set_bucket_encryption", "s3:bucket", bucket_name)
693
+
694
+ try:
695
+ if context.dry_run:
696
+ logger.info(f"[DRY-RUN] Would set encryption on bucket {bucket_name}")
697
+ result.mark_completed(OperationStatus.DRY_RUN)
698
+ else:
699
+ encryption_rule = {"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": sse_algorithm}}
700
+
701
+ if sse_algorithm == "aws:kms" and kms_master_key_id:
702
+ encryption_rule["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] = kms_master_key_id
703
+
704
+ response = self.execute_aws_call(
705
+ s3_client,
706
+ "put_bucket_encryption",
707
+ Bucket=bucket_name,
708
+ ServerSideEncryptionConfiguration={"Rules": [encryption_rule]},
709
+ )
710
+
711
+ result.response_data = response
712
+ result.mark_completed(OperationStatus.SUCCESS)
713
+ logger.info(f"Successfully set encryption on bucket {bucket_name}")
714
+
715
+ except ClientError as e:
716
+ error_msg = f"Failed to set encryption on bucket {bucket_name}: {e}"
717
+ logger.error(error_msg)
718
+ result.mark_completed(OperationStatus.FAILED, error_msg)
719
+
720
+ return [result]
721
+
722
+ def delete_bucket_and_objects(self, context: OperationContext, bucket_name: str) -> List[OperationResult]:
723
+ """
724
+ Delete S3 bucket and all its objects/versions (complete cleanup).
725
+
726
+ Migrated from inventory/delete_s3_buckets_objects.py
727
+
728
+ Args:
729
+ context: Operation context
730
+ bucket_name: Bucket to delete completely
731
+
732
+ Returns:
733
+ List of operation results
734
+ """
735
+ s3_client = self.get_client("s3")
736
+
737
+ result = self.create_operation_result(context, "delete_bucket_and_objects", "s3:bucket", bucket_name)
738
+
739
+ try:
740
+ if not self.confirm_operation(context, bucket_name, "delete bucket and all objects"):
741
+ result.mark_completed(OperationStatus.CANCELLED, "Operation cancelled by user")
742
+ return [result]
743
+
744
+ if context.dry_run:
745
+ logger.info(f"[DRY-RUN] Would delete bucket {bucket_name} and all objects")
746
+ result.mark_completed(OperationStatus.DRY_RUN)
747
+ return [result]
748
+
749
+ # First empty the bucket
750
+ empty_results = self.empty_bucket(context, bucket_name)
751
+ if not empty_results or not empty_results[0].success:
752
+ result.mark_completed(OperationStatus.FAILED, "Failed to empty bucket before deletion")
753
+ return [result]
754
+
755
+ # Then delete the bucket
756
+ response = self.execute_aws_call(s3_client, "delete_bucket", Bucket=bucket_name)
757
+
758
+ result.response_data = response
759
+ result.mark_completed(OperationStatus.SUCCESS)
760
+ logger.info(f"Successfully deleted bucket {bucket_name} and all objects")
761
+
762
+ except ClientError as e:
763
+ error_msg = f"Failed to delete bucket and objects {bucket_name}: {e}"
764
+ logger.error(error_msg)
765
+ result.mark_completed(OperationStatus.FAILED, error_msg)
766
+
767
+ return [result]
768
+
769
+ def set_public_access_block(
770
+ self,
771
+ context: OperationContext,
772
+ account_id: Optional[str] = None,
773
+ bucket_name: Optional[str] = None,
774
+ block_public_acls: bool = True,
775
+ ignore_public_acls: bool = True,
776
+ block_public_policy: bool = True,
777
+ restrict_public_buckets: bool = True,
778
+ ) -> List[OperationResult]:
779
+ """
780
+ Configure S3 public access block settings.
781
+
782
+ Migrated from inventory/update_s3_public_access_block.py
783
+
784
+ Args:
785
+ context: Operation context
786
+ account_id: Account ID for account-level settings
787
+ bucket_name: Bucket name for bucket-level settings
788
+ block_public_acls: Block public ACLs
789
+ ignore_public_acls: Ignore public ACLs
790
+ block_public_policy: Block public bucket policies
791
+ restrict_public_buckets: Restrict public bucket access
792
+
793
+ Returns:
794
+ List of operation results
795
+ """
796
+ if account_id:
797
+ # Account-level public access block
798
+ s3control_client = self.get_client("s3control")
799
+ resource_id = f"account:{account_id}"
800
+ elif bucket_name:
801
+ # Bucket-level public access block
802
+ s3_client = self.get_client("s3")
803
+ resource_id = f"bucket:{bucket_name}"
804
+ else:
805
+ raise ValueError("Either account_id or bucket_name must be provided")
806
+
807
+ result = self.create_operation_result(context, "set_public_access_block", "s3:public_access_block", resource_id)
808
+
809
+ try:
810
+ public_access_block_config = {
811
+ "BlockPublicAcls": block_public_acls,
812
+ "IgnorePublicAcls": ignore_public_acls,
813
+ "BlockPublicPolicy": block_public_policy,
814
+ "RestrictPublicBuckets": restrict_public_buckets,
815
+ }
816
+
817
+ if context.dry_run:
818
+ logger.info(f"[DRY-RUN] Would set public access block on {resource_id}")
819
+ result.mark_completed(OperationStatus.DRY_RUN)
820
+ else:
821
+ if account_id:
822
+ response = self.execute_aws_call(
823
+ s3control_client,
824
+ "put_public_access_block",
825
+ AccountId=account_id,
826
+ PublicAccessBlockConfiguration=public_access_block_config,
827
+ )
828
+ else:
829
+ response = self.execute_aws_call(
830
+ s3_client,
831
+ "put_public_access_block",
832
+ Bucket=bucket_name,
833
+ PublicAccessBlockConfiguration=public_access_block_config,
834
+ )
835
+
836
+ result.response_data = response
837
+ result.mark_completed(OperationStatus.SUCCESS)
838
+ logger.info(f"Successfully set public access block on {resource_id}")
839
+
840
+ except ClientError as e:
841
+ error_msg = f"Failed to set public access block on {resource_id}: {e}"
842
+ logger.error(error_msg)
843
+ result.mark_completed(OperationStatus.FAILED, error_msg)
844
+
845
+ return [result]
846
+
847
+ def sync_objects(
848
+ self,
849
+ context: OperationContext,
850
+ source_bucket: str,
851
+ destination_bucket: str,
852
+ source_prefix: Optional[str] = None,
853
+ destination_prefix: Optional[str] = None,
854
+ delete_removed: bool = False,
855
+ exclude_patterns: Optional[List[str]] = None,
856
+ ) -> List[OperationResult]:
857
+ """
858
+ Synchronize objects between S3 buckets or prefixes.
859
+
860
+ Args:
861
+ context: Operation context
862
+ source_bucket: Source bucket name
863
+ destination_bucket: Destination bucket name
864
+ source_prefix: Source prefix to sync from
865
+ destination_prefix: Destination prefix to sync to
866
+ delete_removed: Delete objects in destination that don't exist in source
867
+ exclude_patterns: Patterns to exclude from sync
868
+
869
+ Returns:
870
+ List of operation results
871
+ """
872
+ s3_client = self.get_client("s3", context.region)
873
+
874
+ result = self.create_operation_result(
875
+ context, "sync_objects", "s3:bucket", f"{source_bucket}->{destination_bucket}"
876
+ )
877
+
878
+ try:
879
+ if context.dry_run:
880
+ logger.info(f"[DRY-RUN] Would sync objects from {source_bucket} to {destination_bucket}")
881
+ result.mark_completed(OperationStatus.DRY_RUN)
882
+ return [result]
883
+
884
+ # List objects in source bucket
885
+ list_params = {"Bucket": source_bucket}
886
+ if source_prefix:
887
+ list_params["Prefix"] = source_prefix
888
+
889
+ paginator = s3_client.get_paginator("list_objects_v2")
890
+ source_objects = []
891
+
892
+ for page in paginator.paginate(**list_params):
893
+ if "Contents" in page:
894
+ source_objects.extend(page["Contents"])
895
+
896
+ # List objects in destination bucket for comparison
897
+ dest_list_params = {"Bucket": destination_bucket}
898
+ if destination_prefix:
899
+ dest_list_params["Prefix"] = destination_prefix
900
+
901
+ dest_paginator = s3_client.get_paginator("list_objects_v2")
902
+ dest_objects = {}
903
+
904
+ for page in dest_paginator.paginate(**dest_list_params):
905
+ if "Contents" in page:
906
+ for obj in page["Contents"]:
907
+ dest_objects[obj["Key"]] = obj
908
+
909
+ synced_count = 0
910
+ deleted_count = 0
911
+
912
+ # Sync objects from source to destination
913
+ for obj in source_objects:
914
+ source_key = obj["Key"]
915
+
916
+ # Apply prefix transformation if needed
917
+ if source_prefix and destination_prefix:
918
+ if source_key.startswith(source_prefix):
919
+ dest_key = destination_prefix + source_key[len(source_prefix) :]
920
+ else:
921
+ dest_key = source_key
922
+ else:
923
+ dest_key = source_key
924
+
925
+ # Check exclude patterns
926
+ if exclude_patterns:
927
+ excluded = any(pattern in source_key for pattern in exclude_patterns)
928
+ if excluded:
929
+ continue
930
+
931
+ # Check if object needs to be copied/updated
932
+ needs_copy = True
933
+ if dest_key in dest_objects:
934
+ dest_obj = dest_objects[dest_key]
935
+ if obj["ETag"] == dest_obj["ETag"] and obj["Size"] == dest_obj["Size"]:
936
+ needs_copy = False
937
+
938
+ if needs_copy:
939
+ copy_source = {"Bucket": source_bucket, "Key": source_key}
940
+ self.execute_aws_call(
941
+ s3_client, "copy_object", CopySource=copy_source, Bucket=destination_bucket, Key=dest_key
942
+ )
943
+ synced_count += 1
944
+ logger.info(f"Synced object: {source_key} -> {dest_key}")
945
+
946
+ # Delete objects in destination that don't exist in source
947
+ if delete_removed:
948
+ source_keys = {obj["Key"] for obj in source_objects}
949
+ for dest_key in dest_objects:
950
+ # Transform back to source key for comparison
951
+ if destination_prefix and source_prefix:
952
+ if dest_key.startswith(destination_prefix):
953
+ source_equiv = source_prefix + dest_key[len(destination_prefix) :]
954
+ else:
955
+ source_equiv = dest_key
956
+ else:
957
+ source_equiv = dest_key
958
+
959
+ if source_equiv not in source_keys:
960
+ self.execute_aws_call(s3_client, "delete_object", Bucket=destination_bucket, Key=dest_key)
961
+ deleted_count += 1
962
+ logger.info(f"Deleted object: {dest_key}")
963
+
964
+ result.response_data = {
965
+ "synced_objects": synced_count,
966
+ "deleted_objects": deleted_count,
967
+ "total_source_objects": len(source_objects),
968
+ }
969
+ result.mark_completed(OperationStatus.SUCCESS)
970
+ logger.info(f"Successfully synced {synced_count} objects, deleted {deleted_count} objects")
971
+
972
+ except ClientError as e:
973
+ error_msg = f"Failed to sync objects from {source_bucket} to {destination_bucket}: {e}"
974
+ logger.error(error_msg)
975
+ result.mark_completed(OperationStatus.FAILED, error_msg)
976
+
977
+ return [result]
978
+
979
+ def list_objects(
980
+ self,
981
+ context: OperationContext,
982
+ bucket_name: Optional[str] = None,
983
+ prefix: Optional[str] = None,
984
+ max_keys: int = 1000,
985
+ ) -> List[OperationResult]:
986
+ """
987
+ List objects in S3 bucket with pagination support.
988
+
989
+ Enhanced from original aws/s3_list_objects.py with pagination and formatting.
990
+
991
+ Args:
992
+ context: Operation context
993
+ bucket_name: Name of bucket to list (can use S3_BUCKET env var)
994
+ prefix: Filter objects by prefix
995
+ max_keys: Maximum number of keys per request
996
+
997
+ Returns:
998
+ List of operation results with formatted object data
999
+ """
1000
+ # Environment variable support from original file
1001
+ bucket_name = bucket_name or os.getenv("S3_BUCKET", "my-default-bucket")
1002
+
1003
+ s3_client = self.get_client("s3", context.region)
1004
+
1005
+ result = self.create_operation_result(context, "list_objects", "s3:bucket", bucket_name)
1006
+
1007
+ try:
1008
+ logger.info(f"Listing objects in bucket: {bucket_name}")
1009
+
1010
+ # Prepare parameters (from original file)
1011
+ params = {"Bucket": bucket_name, "MaxKeys": max_keys}
1012
+ if prefix:
1013
+ params["Prefix"] = prefix
1014
+
1015
+ # Fetch objects with pagination support (from original file)
1016
+ paginator = s3_client.get_paginator("list_objects_v2")
1017
+ page_iterator = paginator.paginate(**params)
1018
+
1019
+ object_list = []
1020
+ for page in page_iterator:
1021
+ if "Contents" in page: # Check if there are objects
1022
+ for obj in page["Contents"]:
1023
+ object_list.append(obj)
1024
+
1025
+ # Format objects for display (from original file)
1026
+ formatted_objects = self.format_object_list(object_list)
1027
+
1028
+ result.response_data = {
1029
+ "objects": formatted_objects,
1030
+ "count": len(object_list),
1031
+ "bucket": bucket_name,
1032
+ "prefix": prefix,
1033
+ }
1034
+ result.mark_completed(OperationStatus.SUCCESS)
1035
+ logger.info(f"Found {len(object_list)} object(s) in bucket '{bucket_name}'.")
1036
+
1037
+ except ClientError as e:
1038
+ error_msg = f"❌ AWS Client Error: {e}"
1039
+ logger.error(error_msg)
1040
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1041
+ except BotoCoreError as e:
1042
+ error_msg = f"❌ BotoCore Error: {e}"
1043
+ logger.error(error_msg)
1044
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1045
+ except Exception as e:
1046
+ error_msg = f"❌ Unexpected error: {e}"
1047
+ logger.error(error_msg)
1048
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1049
+
1050
+ return [result]
1051
+
1052
+ def list_buckets(self, context: OperationContext) -> List[OperationResult]:
1053
+ """
1054
+ List all S3 buckets in the account.
1055
+
1056
+ Enhanced from original aws/s3_list_buckets.py functionality.
1057
+ """
1058
+ s3_client = self.get_client("s3", context.region)
1059
+
1060
+ result = self.create_operation_result(context, "list_buckets", "s3:account", "all-buckets")
1061
+
1062
+ try:
1063
+ logger.info("Listing all S3 buckets...")
1064
+
1065
+ response = self.execute_aws_call(s3_client, "list_buckets")
1066
+ buckets = response.get("Buckets", [])
1067
+
1068
+ # Format bucket data
1069
+ formatted_buckets = []
1070
+ for bucket in buckets:
1071
+ formatted_buckets.append(
1072
+ {"Name": bucket["Name"], "CreationDate": bucket["CreationDate"].strftime("%Y-%m-%d %H:%M:%S")}
1073
+ )
1074
+
1075
+ result.response_data = {"buckets": formatted_buckets, "count": len(buckets)}
1076
+ result.mark_completed(OperationStatus.SUCCESS)
1077
+ logger.info(f"Found {len(buckets)} bucket(s)")
1078
+
1079
+ except ClientError as e:
1080
+ error_msg = f"❌ AWS Client Error: {e}"
1081
+ logger.error(error_msg)
1082
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1083
+ except BotoCoreError as e:
1084
+ error_msg = f"❌ BotoCore Error: {e}"
1085
+ logger.error(error_msg)
1086
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1087
+ except Exception as e:
1088
+ error_msg = f"❌ Unexpected error: {e}"
1089
+ logger.error(error_msg)
1090
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1091
+
1092
+ return [result]
1093
+
1094
+
1095
+ # ==============================
1096
+ # AWS LAMBDA HANDLERS
1097
+ # ==============================
1098
+
1099
+
1100
+ def lambda_handler_s3_object_operations(event, context):
1101
+ """
1102
+ AWS Lambda handler for S3 object operations.
1103
+
1104
+ Based on original aws/s3_object_operations.py Lambda handler.
1105
+ """
1106
+ try:
1107
+ from runbooks.inventory.models.account import AWSAccount
1108
+ from runbooks.operate.base import OperationContext
1109
+
1110
+ action = event.get("action") # 'upload' or 'delete'
1111
+ bucket = event.get("bucket", os.getenv("S3_BUCKET", "my-default-bucket"))
1112
+ key = event.get("key", os.getenv("S3_KEY", "default-key.txt"))
1113
+ file_path = event.get("file_path", os.getenv("LOCAL_FILE_PATH", "default.txt"))
1114
+ acl = event.get("acl", os.getenv("ACL", "private"))
1115
+ region = event.get("region", os.getenv("AWS_REGION", "us-east-1"))
1116
+
1117
+ s3_ops = S3Operations()
1118
+ account = AWSAccount(account_id="current", account_name="lambda-execution")
1119
+ operation_context = OperationContext(
1120
+ account=account, region=region, operation_type=action, resource_types=["s3:object"], dry_run=False
1121
+ )
1122
+
1123
+ if action == "upload":
1124
+ results = s3_ops.put_object(operation_context, bucket=bucket, key=key, file_path=file_path, acl=acl)
1125
+ return {"statusCode": 200, "body": f"File '{key}' uploaded to '{bucket}'."}
1126
+ elif action == "delete":
1127
+ results = s3_ops.delete_object(operation_context, bucket=bucket, key=key)
1128
+ return {"statusCode": 200, "body": f"File '{key}' deleted from '{bucket}'."}
1129
+ else:
1130
+ raise ValueError("Invalid action. Supported actions: 'upload', 'delete'.")
1131
+
1132
+ except Exception as e:
1133
+ logger.error(f"❌ Lambda Error: {e}")
1134
+ return {"statusCode": 500, "body": str(e)}
1135
+
1136
+
1137
+ # ==============================
1138
+ # SCRIPT ENTRY POINT (CLI Support)
1139
+ # ==============================
1140
+
1141
+
1142
+ def main():
1143
+ """
1144
+ Main entry point for standalone execution (CLI or Docker).
1145
+
1146
+ Provides compatibility with original AWS script execution patterns.
1147
+ """
1148
+ import sys
1149
+
1150
+ if len(sys.argv) < 2:
1151
+ print("Usage: python s3_operations.py <operation> [args...]")
1152
+ print("Operations: create-bucket, list-objects, list-buckets, put-object, delete-object")
1153
+ sys.exit(1)
1154
+
1155
+ operation = sys.argv[1]
1156
+
1157
+ try:
1158
+ from runbooks.inventory.models.account import AWSAccount
1159
+ from runbooks.operate.base import OperationContext
1160
+
1161
+ s3_ops = S3Operations()
1162
+ account = AWSAccount(account_id="current", account_name="cli-execution")
1163
+ operation_context = OperationContext(
1164
+ account=account,
1165
+ region=os.getenv("AWS_REGION", "us-east-1"),
1166
+ operation_type=operation.replace("-", "_"),
1167
+ resource_types=["s3"],
1168
+ dry_run=os.getenv("DRY_RUN", "false").lower() == "true",
1169
+ )
1170
+
1171
+ if operation == "create-bucket":
1172
+ bucket_name = sys.argv[2] if len(sys.argv) > 2 else os.getenv("S3_BUCKET_NAME", "1cloudops")
1173
+ results = s3_ops.create_bucket(operation_context, bucket_name=bucket_name)
1174
+
1175
+ elif operation == "list-objects":
1176
+ bucket_name = sys.argv[2] if len(sys.argv) > 2 else os.getenv("S3_BUCKET", "my-default-bucket")
1177
+ results = s3_ops.list_objects(operation_context, bucket_name=bucket_name)
1178
+
1179
+ elif operation == "list-buckets":
1180
+ results = s3_ops.list_buckets(operation_context)
1181
+
1182
+ elif operation == "put-object":
1183
+ bucket = os.getenv("S3_BUCKET", "my-default-bucket")
1184
+ key = os.getenv("S3_KEY", "default-key.txt")
1185
+ file_path = os.getenv("LOCAL_FILE_PATH", "default.txt")
1186
+ results = s3_ops.put_object(operation_context, bucket=bucket, key=key, file_path=file_path)
1187
+
1188
+ elif operation == "delete-object":
1189
+ bucket = os.getenv("S3_BUCKET", "my-default-bucket")
1190
+ key = os.getenv("S3_KEY", "default-key.txt")
1191
+ results = s3_ops.delete_object(operation_context, bucket=bucket, key=key)
1192
+
1193
+ else:
1194
+ raise ValueError(f"Unknown operation: {operation}")
1195
+
1196
+ # Print results
1197
+ for result in results:
1198
+ if result.success:
1199
+ print(f"✅ {result.operation_type} completed successfully")
1200
+ if result.response_data:
1201
+ print(f" Data: {json.dumps(result.response_data, default=str, indent=2)}")
1202
+ else:
1203
+ print(f"❌ {result.operation_type} failed: {result.error_message}")
1204
+
1205
+ except Exception as e:
1206
+ logger.error(f"Error during operation: {e}")
1207
+ sys.exit(1)
1208
+
1209
+
1210
+ if __name__ == "__main__":
1211
+ main()