runbooks 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. runbooks/cloudops/models.py +20 -14
  2. runbooks/common/aws_pricing_api.py +276 -44
  3. runbooks/common/dry_run_examples.py +587 -0
  4. runbooks/common/dry_run_framework.py +520 -0
  5. runbooks/common/memory_optimization.py +533 -0
  6. runbooks/common/performance_optimization_engine.py +1153 -0
  7. runbooks/common/profile_utils.py +10 -3
  8. runbooks/common/sre_performance_suite.py +574 -0
  9. runbooks/finops/business_case_config.py +314 -0
  10. runbooks/finops/cost_processor.py +19 -4
  11. runbooks/finops/ebs_cost_optimizer.py +1 -1
  12. runbooks/finops/embedded_mcp_validator.py +642 -36
  13. runbooks/finops/executive_export.py +789 -0
  14. runbooks/finops/finops_scenarios.py +34 -27
  15. runbooks/finops/notebook_utils.py +1 -1
  16. runbooks/finops/schemas.py +73 -58
  17. runbooks/finops/single_dashboard.py +20 -4
  18. runbooks/finops/vpc_cleanup_exporter.py +2 -1
  19. runbooks/inventory/models/account.py +5 -3
  20. runbooks/inventory/models/inventory.py +1 -1
  21. runbooks/inventory/models/resource.py +5 -3
  22. runbooks/inventory/organizations_discovery.py +89 -5
  23. runbooks/main.py +182 -61
  24. runbooks/operate/vpc_operations.py +60 -31
  25. runbooks/remediation/workspaces_list.py +2 -2
  26. runbooks/vpc/config.py +17 -8
  27. runbooks/vpc/heatmap_engine.py +425 -53
  28. runbooks/vpc/performance_optimized_analyzer.py +546 -0
  29. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/METADATA +1 -1
  30. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/RECORD +34 -26
  31. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/WHEEL +0 -0
  32. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/entry_points.txt +0 -0
  33. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/licenses/LICENSE +0 -0
  34. {runbooks-1.0.1.dist-info → runbooks-1.0.2.dist-info}/top_level.txt +0 -0
@@ -1,18 +1,18 @@
1
1
  """
2
- FinOps Business Scenarios - Manager Priority Cost Optimization Framework
2
+ FinOps Business Scenarios - Dynamic Business Case Framework
3
3
 
4
- Strategic Achievement: $132,720+ annual savings (380-757% above targets)
5
- - FinOps-24: WorkSpaces cleanup ($13,020 annual, 104% of target)
6
- - FinOps-23: RDS snapshots optimization ($119,700 annual, 498% of target)
7
- - FinOps-25: Commvault EC2 investigation framework (methodology established)
4
+ Strategic Achievement: Enterprise business case management with configurable scenarios
5
+ - Dynamic scenario configuration with environment variable overrides
6
+ - Business-focused naming conventions replacing hardcoded JIRA references
7
+ - Scalable template system for unlimited business case expansion
8
8
 
9
9
  This module provides business-oriented wrapper functions for executive presentations
10
10
  calling proven technical implementations from src/runbooks/remediation/ modules.
11
11
 
12
12
  Strategic Alignment:
13
- - "Do one thing and do it well": Business wrappers focusing on executive insights
14
- - "Move Fast, But Not So Fast We Crash": Proven technical implementations underneath
15
- - Enterprise FAANG SDLC: Evidence-based cost optimization with audit trails
13
+ - "Do one thing and do it well": Dynamic configuration management with enterprise templates
14
+ - "Move Fast, But Not So Fast We Crash": Proven technical implementations with configurable business cases
15
+ - Enterprise FAANG SDLC: Evidence-based cost optimization with reusable template framework
16
16
  """
17
17
 
18
18
  import asyncio
@@ -30,6 +30,10 @@ from ..common.rich_utils import (
30
30
  )
31
31
  from ..remediation import workspaces_list, rds_snapshot_list
32
32
  from . import commvault_ec2_analysis
33
+ from .business_case_config import (
34
+ get_business_case_config, get_scenario_display_name, get_scenario_savings_range,
35
+ format_business_achievement, migrate_legacy_scenario_reference
36
+ )
33
37
 
34
38
  logger = logging.getLogger(__name__)
35
39
 
@@ -55,9 +59,9 @@ def create_business_scenarios_validated(profile_name: Optional[str] = None) -> D
55
59
  commvault_data = scenarios_analyzer._get_real_commvault_data()
56
60
 
57
61
  scenarios = {
58
- 'FinOps-24_WorkSpaces': workspaces_data,
59
- 'FinOps-23_RDS_Snapshots': rds_data,
60
- 'FinOps-25_Commvault': commvault_data,
62
+ 'workspaces': workspaces_data,
63
+ 'rds_snapshots': rds_data,
64
+ 'backup_investigation': commvault_data,
61
65
  'metadata': {
62
66
  'generated_at': datetime.now().isoformat(),
63
67
  'data_source': 'Real AWS APIs via runbooks',
@@ -70,29 +74,32 @@ def create_business_scenarios_validated(profile_name: Optional[str] = None) -> D
70
74
 
71
75
  except Exception as e:
72
76
  logger.error(f"Error creating validated scenarios: {e}")
73
- # Return fallback business scenarios with manager's validated achievements
77
+ # Return fallback business scenarios using dynamic configuration
78
+ config = get_business_case_config()
79
+ workspaces_scenario = config.get_scenario('workspaces')
80
+ rds_scenario = config.get_scenario('rds-snapshots')
81
+ backup_scenario = config.get_scenario('backup-investigation')
82
+
74
83
  return {
75
- 'FinOps-24_WorkSpaces': {
76
- 'title': 'WorkSpaces Cleanup - Zero Usage Detection',
77
- 'validated_savings': 13020,
78
- 'achievement_rate': 104,
79
- 'risk_level': 'Low'
84
+ 'workspaces': {
85
+ 'title': workspaces_scenario.display_name if workspaces_scenario else 'WorkSpaces Resource Optimization',
86
+ 'savings_range': workspaces_scenario.savings_range_display if workspaces_scenario else '$12K-15K/year',
87
+ 'risk_level': workspaces_scenario.risk_level if workspaces_scenario else 'Low'
80
88
  },
81
- 'FinOps-23_RDS_Snapshots': {
82
- 'title': 'RDS Manual Snapshots Cleanup',
83
- 'validated_savings': 119700,
84
- 'achievement_rate': 498,
85
- 'risk_level': 'Medium'
89
+ 'rds_snapshots': {
90
+ 'title': rds_scenario.display_name if rds_scenario else 'RDS Storage Optimization',
91
+ 'savings_range': rds_scenario.savings_range_display if rds_scenario else '$5K-24K/year',
92
+ 'risk_level': rds_scenario.risk_level if rds_scenario else 'Medium'
86
93
  },
87
- 'FinOps-25_Commvault': {
88
- 'title': 'Commvault Account Investigation',
94
+ 'backup_investigation': {
95
+ 'title': backup_scenario.display_name if backup_scenario else 'Backup Infrastructure Analysis',
89
96
  'framework_status': 'Investigation Ready',
90
- 'risk_level': 'Medium'
97
+ 'risk_level': backup_scenario.risk_level if backup_scenario else 'Medium'
91
98
  },
92
99
  'metadata': {
93
100
  'generated_at': datetime.now().isoformat(),
94
- 'data_source': 'Manager scenarios fallback - $132,720+ validated',
95
- 'validation_method': 'Business case validation',
101
+ 'data_source': 'Dynamic business case configuration',
102
+ 'validation_method': 'Template-based business scenarios',
96
103
  'version': '0.9.5'
97
104
  }
98
105
  }
@@ -591,6 +591,6 @@ if __name__ == '__main__':
591
591
  config=config,
592
592
  optimization_focus=OptimizationCategory.COST_OPTIMIZATION
593
593
  )
594
- print(f"Dashboard created with {len(result.export_files)} export files")
594
+ console.print(f"Dashboard created with {len(result.export_files)} export files")
595
595
 
596
596
  asyncio.run(test_dashboard())
@@ -26,7 +26,7 @@ from typing import Dict, List, Optional, Union, Any, Literal
26
26
  from enum import Enum
27
27
  import re
28
28
 
29
- from pydantic import BaseModel, Field, validator, root_validator, ConfigDict
29
+ from pydantic import BaseModel, Field, field_validator, model_validator, ConfigDict
30
30
  from pydantic.types import UUID4, PositiveFloat, NonNegativeFloat
31
31
 
32
32
 
@@ -106,7 +106,8 @@ class CostBreakdown(BaseSchema):
106
106
  percentage_of_total: float = Field(..., ge=0, le=100)
107
107
  resource_count: int = Field(..., ge=0)
108
108
 
109
- @validator('service_name')
109
+ @field_validator('service_name')
110
+ @classmethod
110
111
  def validate_service_name(cls, v):
111
112
  """Validate AWS service names."""
112
113
  # Common AWS service patterns
@@ -130,14 +131,15 @@ class CostBreakdown(BaseSchema):
130
131
 
131
132
  return v.strip()
132
133
 
133
- @validator('annual_cost')
134
- def validate_annual_cost_consistency(cls, v, values):
134
+ @field_validator('annual_cost')
135
+ @classmethod
136
+ def validate_annual_cost_consistency(cls, v, info):
135
137
  """Ensure annual cost is approximately 12x monthly cost."""
136
- if 'monthly_cost' in values:
137
- expected_annual = values['monthly_cost'] * 12
138
+ if 'monthly_cost' in info.data:
139
+ expected_annual = info.data['monthly_cost'] * 12
138
140
  # Allow 1% tolerance for rounding differences
139
141
  if abs(v - expected_annual) > (expected_annual * 0.01):
140
- raise ValueError(f'Annual cost {v} should be approximately 12x monthly cost {values["monthly_cost"]}')
142
+ raise ValueError(f'Annual cost {v} should be approximately 12x monthly cost {info.data["monthly_cost"]}')
141
143
  return v
142
144
 
143
145
 
@@ -169,7 +171,8 @@ class OptimizationScenario(BaseSchema):
169
171
  validation_timestamp: Optional[datetime] = Field(None)
170
172
  mcp_variance_percent: Optional[float] = Field(None, ge=0, le=100)
171
173
 
172
- @validator('scenario_name')
174
+ @field_validator('scenario_name')
175
+ @classmethod
173
176
  def validate_scenario_name(cls, v):
174
177
  """Validate scenario naming conventions."""
175
178
  # Ensure professional naming
@@ -177,27 +180,30 @@ class OptimizationScenario(BaseSchema):
177
180
  raise ValueError('Scenario name must start with capital letter and contain only letters, numbers, spaces, hyphens, and parentheses')
178
181
  return v.strip()
179
182
 
180
- @validator('annual_savings')
181
- def validate_annual_savings_consistency(cls, v, values):
183
+ @field_validator('annual_savings')
184
+ @classmethod
185
+ def validate_annual_savings_consistency(cls, v, info):
182
186
  """Ensure annual savings consistency with monthly savings."""
183
- if 'monthly_savings' in values:
184
- expected_annual = values['monthly_savings'] * 12
187
+ if 'monthly_savings' in info.data:
188
+ expected_annual = info.data['monthly_savings'] * 12
185
189
  if abs(v - expected_annual) > (expected_annual * 0.01): # 1% tolerance
186
- raise ValueError(f'Annual savings {v} should be approximately 12x monthly savings {values["monthly_savings"]}')
190
+ raise ValueError(f'Annual savings {v} should be approximately 12x monthly savings {info.data["monthly_savings"]}')
187
191
  return v
188
192
 
189
- @validator('payback_period_months')
190
- def calculate_payback_period(cls, v, values):
193
+ @field_validator('payback_period_months')
194
+ @classmethod
195
+ def calculate_payback_period(cls, v, info):
191
196
  """Calculate payback period if not provided."""
192
- if v is None and 'implementation_cost' in values and 'monthly_savings' in values:
193
- impl_cost = values['implementation_cost']
194
- monthly_savings = values['monthly_savings']
197
+ if v is None and 'implementation_cost' in info.data and 'monthly_savings' in info.data:
198
+ impl_cost = info.data['implementation_cost']
199
+ monthly_savings = info.data['monthly_savings']
195
200
  if monthly_savings > 0:
196
201
  calculated_payback = impl_cost / monthly_savings
197
202
  return round(calculated_payback, 1)
198
203
  return v
199
204
 
200
- @validator('affected_services')
205
+ @field_validator('affected_services')
206
+ @classmethod
201
207
  def validate_aws_services(cls, v):
202
208
  """Validate AWS service names in affected services."""
203
209
  common_services = {
@@ -215,7 +221,8 @@ class OptimizationScenario(BaseSchema):
215
221
 
216
222
  return v
217
223
 
218
- @validator('affected_accounts')
224
+ @field_validator('affected_accounts')
225
+ @classmethod
219
226
  def validate_account_ids(cls, v):
220
227
  """Validate AWS account ID format."""
221
228
  account_pattern = r'^\d{12}$|^[\w\-\.]{1,50}$' # 12-digit ID or account name
@@ -270,47 +277,50 @@ class CostOptimizationResult(BaseSchema):
270
277
  default=[ExportFormat.JSON, ExportFormat.CSV, ExportFormat.PDF]
271
278
  )
272
279
 
273
- @validator('total_potential_annual_savings')
274
- def validate_annual_consistency(cls, v, values):
280
+ @field_validator('total_potential_annual_savings')
281
+ @classmethod
282
+ def validate_annual_consistency(cls, v, info):
275
283
  """Validate annual savings consistency."""
276
- if 'total_potential_monthly_savings' in values:
277
- expected = values['total_potential_monthly_savings'] * 12
284
+ if 'total_potential_monthly_savings' in info.data:
285
+ expected = info.data['total_potential_monthly_savings'] * 12
278
286
  if abs(v - expected) > (expected * 0.01):
279
287
  raise ValueError('Annual savings must be approximately 12x monthly savings')
280
288
  return v
281
289
 
282
- @validator('savings_percentage')
283
- def calculate_savings_percentage(cls, v, values):
290
+ @field_validator('savings_percentage')
291
+ @classmethod
292
+ def calculate_savings_percentage(cls, v, info):
284
293
  """Validate or calculate savings percentage."""
285
- if 'current_monthly_spend' in values and 'total_potential_monthly_savings' in values:
286
- current_spend = values['current_monthly_spend']
294
+ if 'current_monthly_spend' in info.data and 'total_potential_monthly_savings' in info.data:
295
+ current_spend = info.data['current_monthly_spend']
287
296
  if current_spend > 0:
288
- calculated = (values['total_potential_monthly_savings'] / current_spend) * 100
297
+ calculated = (info.data['total_potential_monthly_savings'] / current_spend) * 100
289
298
  if abs(v - calculated) > 0.1: # 0.1% tolerance
290
299
  raise ValueError(f'Savings percentage {v}% inconsistent with calculated {calculated:.1f}%')
291
300
  return v
292
301
 
293
- @validator('total_scenarios')
294
- def validate_scenario_count(cls, v, values):
302
+ @field_validator('total_scenarios')
303
+ @classmethod
304
+ def validate_scenario_count(cls, v, info):
295
305
  """Ensure scenario count matches actual scenarios."""
296
- if 'optimization_scenarios' in values:
297
- actual_count = len(values['optimization_scenarios'])
306
+ if 'optimization_scenarios' in info.data:
307
+ actual_count = len(info.data['optimization_scenarios'])
298
308
  if v != actual_count:
299
309
  raise ValueError(f'Total scenarios {v} does not match actual scenarios count {actual_count}')
300
310
  return v
301
311
 
302
- @root_validator
303
- def validate_complexity_distribution(cls, values):
312
+ @model_validator(mode='after')
313
+ def validate_complexity_distribution(self):
304
314
  """Validate complexity scenario counts."""
305
- scenarios = values.get('optimization_scenarios', [])
315
+ scenarios = self.optimization_scenarios or []
306
316
  if scenarios:
307
317
  low_count = sum(1 for s in scenarios if s.complexity == ComplexityLevel.LOW)
308
318
  medium_count = sum(1 for s in scenarios if s.complexity == ComplexityLevel.MEDIUM)
309
319
  high_count = sum(1 for s in scenarios if s.complexity == ComplexityLevel.HIGH)
310
320
 
311
- expected_low = values.get('low_complexity_scenarios', 0)
312
- expected_medium = values.get('medium_complexity_scenarios', 0)
313
- expected_high = values.get('high_complexity_scenarios', 0)
321
+ expected_low = self.low_complexity_scenarios or 0
322
+ expected_medium = self.medium_complexity_scenarios or 0
323
+ expected_high = self.high_complexity_scenarios or 0
314
324
 
315
325
  if (low_count != expected_low or
316
326
  medium_count != expected_medium or
@@ -320,7 +330,7 @@ class CostOptimizationResult(BaseSchema):
320
330
  f'actual L:{low_count} M:{medium_count} H:{high_count}'
321
331
  )
322
332
 
323
- return values
333
+ return self
324
334
 
325
335
 
326
336
  # Business Interface Schemas
@@ -350,7 +360,8 @@ class ExecutiveSummary(BaseSchema):
350
360
  data_validation_status: ValidationStatus = Field(...)
351
361
  last_validated: datetime = Field(...)
352
362
 
353
- @validator('roi_percentage')
363
+ @field_validator('roi_percentage')
364
+ @classmethod
354
365
  def validate_reasonable_roi(cls, v):
355
366
  """Ensure ROI is reasonable for executive presentation."""
356
367
  if v > 1000: # 1000% ROI
@@ -377,12 +388,13 @@ class MCPValidationResult(BaseSchema):
377
388
  mcp_source: str = Field(..., min_length=1)
378
389
  response_time_seconds: Optional[PositiveFloat] = Field(None, le=300) # 5 minute timeout
379
390
 
380
- @validator('variance_percent')
381
- def calculate_variance_percent(cls, v, values):
391
+ @field_validator('variance_percent')
392
+ @classmethod
393
+ def calculate_variance_percent(cls, v, info):
382
394
  """Calculate and validate variance percentage."""
383
- if 'notebook_value' in values and 'mcp_value' in values:
384
- notebook_val = values['notebook_value']
385
- mcp_val = values['mcp_value']
395
+ if 'notebook_value' in info.data and 'mcp_value' in info.data:
396
+ notebook_val = info.data['notebook_value']
397
+ mcp_val = info.data['mcp_value']
386
398
 
387
399
  if notebook_val > 0:
388
400
  calculated = abs((notebook_val - mcp_val) / notebook_val) * 100
@@ -441,22 +453,24 @@ class ComprehensiveTestSuite(BaseSchema):
441
453
  meets_production_criteria: bool = Field(...)
442
454
  quality_score: float = Field(..., ge=0, le=100)
443
455
 
444
- @validator('passed_tests')
445
- def validate_test_counts(cls, v, values):
456
+ @field_validator('passed_tests')
457
+ @classmethod
458
+ def validate_test_counts(cls, v, info):
446
459
  """Ensure test counts are consistent."""
447
- if 'failed_tests' in values and 'skipped_tests' in values and 'total_tests' in values:
448
- calculated_total = v + values['failed_tests'] + values['skipped_tests']
449
- if calculated_total != values['total_tests']:
450
- raise ValueError(f'Test counts inconsistent: {calculated_total} ≠ {values["total_tests"]}')
460
+ if 'failed_tests' in info.data and 'skipped_tests' in info.data and 'total_tests' in info.data:
461
+ calculated_total = v + info.data['failed_tests'] + info.data['skipped_tests']
462
+ if calculated_total != info.data['total_tests']:
463
+ raise ValueError(f'Test counts inconsistent: {calculated_total} ≠ {info.data["total_tests"]}')
451
464
  return v
452
465
 
453
- @validator('pass_rate_percent')
454
- def calculate_pass_rate(cls, v, values):
466
+ @field_validator('pass_rate_percent')
467
+ @classmethod
468
+ def calculate_pass_rate(cls, v, info):
455
469
  """Calculate and validate pass rate."""
456
- if 'passed_tests' in values and 'total_tests' in values:
457
- total = values['total_tests']
470
+ if 'passed_tests' in info.data and 'total_tests' in info.data:
471
+ total = info.data['total_tests']
458
472
  if total > 0:
459
- calculated = (values['passed_tests'] / total) * 100
473
+ calculated = (info.data['passed_tests'] / total) * 100
460
474
  if abs(v - calculated) > 0.01:
461
475
  raise ValueError(f'Pass rate {v}% inconsistent with calculated {calculated:.2f}%')
462
476
  return v
@@ -480,7 +494,8 @@ class ExportMetadata(BaseSchema):
480
494
  export_validated: bool = Field(...)
481
495
  validation_errors: List[str] = Field(default_factory=list)
482
496
 
483
- @validator('file_path')
497
+ @field_validator('file_path')
498
+ @classmethod
484
499
  def validate_file_path(cls, v):
485
500
  """Validate file path format."""
486
501
  # Basic path validation
@@ -346,18 +346,34 @@ class SingleAccountDashboard:
346
346
  # Integrate quarterly data into trend data structure
347
347
  corrected_trend_data["quarterly_costs_by_service"] = quarterly_costs
348
348
 
349
- # Log the trend analysis context for transparency
349
+ # Enhanced trend analysis context with MCP validation awareness
350
350
  if "period_metadata" in corrected_trend_data:
351
351
  metadata = corrected_trend_data["period_metadata"]
352
352
  current_days = metadata.get("current_days", 0)
353
353
  previous_days = metadata.get("previous_days", 0)
354
+ days_difference = metadata.get("days_difference", abs(current_days - previous_days))
354
355
  reliability = metadata.get("trend_reliability", "unknown")
356
+ alignment_strategy = metadata.get("period_alignment_strategy", "standard")
355
357
 
358
+ # ENHANCED LOGIC: Reduce warnings when using intelligent period alignment
356
359
  if metadata.get("is_partial_comparison", False):
357
- print_warning(f"Partial period comparison detected: {current_days} vs {previous_days} days")
358
- print_info(f"Trend reliability: {reliability}")
360
+ if alignment_strategy == "equal_days":
361
+ # Equal-day comparison reduces the severity of partial period concerns
362
+ print_info(f"🔄 Enhanced period alignment: {current_days} vs {previous_days} days (equal-day strategy)")
363
+ if reliability in ["high", "medium_with_validation_support"]:
364
+ print_success(f"✅ Trend reliability: {reliability} (enhanced alignment)")
365
+ else:
366
+ print_info(f"Trend reliability: {reliability}")
367
+ else:
368
+ # Standard partial period warning for traditional comparisons
369
+ print_warning(f"⚠️ Partial period comparison: {current_days} vs {previous_days} days")
370
+ print_info(f"Trend reliability: {reliability}")
371
+
372
+ # Add context for very small differences
373
+ if days_difference <= 5:
374
+ print_info(f"💡 Small period difference ({days_difference} days) - trends should be reliable")
359
375
  else:
360
- print_success(f"Equal period comparison: {current_days} vs {previous_days} days")
376
+ print_success(f"Equal period comparison: {current_days} vs {previous_days} days")
361
377
 
362
378
  return corrected_trend_data
363
379
 
@@ -16,6 +16,7 @@ from datetime import datetime
16
16
  from typing import Any, Dict, List
17
17
 
18
18
  from .markdown_exporter import MarkdownExporter
19
+ from runbooks.common.rich_utils import console
19
20
 
20
21
 
21
22
  def _format_tags_for_display(tags_dict: Dict[str, str]) -> str:
@@ -76,7 +77,7 @@ def export_vpc_cleanup_results(vpc_result: Any, export_formats: List[str], outpu
76
77
  )
77
78
  results['markdown'] = markdown_filename
78
79
  except Exception as e:
79
- print(f"Warning: Markdown export failed: {e}")
80
+ console.print(f"[yellow]Warning: Markdown export failed: {e}[/yellow]")
80
81
  results['markdown'] = None
81
82
 
82
83
  # Real implementations for other formats
@@ -10,7 +10,7 @@ from datetime import datetime
10
10
  from enum import Enum
11
11
  from typing import Dict, List, Optional, Set
12
12
 
13
- from pydantic import BaseModel, Field, validator
13
+ from pydantic import BaseModel, Field, field_validator
14
14
 
15
15
 
16
16
  class AccountStatus(str, Enum):
@@ -100,14 +100,16 @@ class AWSAccount(BaseModel):
100
100
  extra = "forbid"
101
101
  json_encoders = {datetime: lambda v: v.isoformat(), set: lambda v: list(v)}
102
102
 
103
- @validator("account_id")
103
+ @field_validator("account_id")
104
+ @classmethod
104
105
  def validate_account_id(cls, v):
105
106
  """Validate account ID format."""
106
107
  if not v.isdigit() or len(v) != 12:
107
108
  raise ValueError("Account ID must be exactly 12 digits")
108
109
  return v
109
110
 
110
- @validator("available_regions")
111
+ @field_validator("available_regions")
112
+ @classmethod
111
113
  def validate_regions(cls, v):
112
114
  """Validate region format."""
113
115
  valid_region_pattern = r"^[a-z]{2,3}-[a-z]+-\d+$"
@@ -10,7 +10,7 @@ from datetime import datetime, timedelta
10
10
  from enum import Enum
11
11
  from typing import Any, Dict, List, Optional, Set
12
12
 
13
- from pydantic import BaseModel, Field, validator
13
+ from pydantic import BaseModel, Field, field_validator
14
14
 
15
15
  from runbooks.inventory.models.account import AWSAccount
16
16
  from runbooks.inventory.models.resource import AWSResource
@@ -10,7 +10,7 @@ from datetime import datetime
10
10
  from enum import Enum
11
11
  from typing import Any, Dict, List, Optional, Union
12
12
 
13
- from pydantic import BaseModel, Field, validator
13
+ from pydantic import BaseModel, Field, field_validator
14
14
 
15
15
 
16
16
  class ResourceState(str, Enum):
@@ -178,14 +178,16 @@ class AWSResource(BaseModel):
178
178
  use_enum_values = True
179
179
  json_encoders = {datetime: lambda v: v.isoformat() if v else None}
180
180
 
181
- @validator("resource_arn")
181
+ @field_validator("resource_arn")
182
+ @classmethod
182
183
  def validate_arn_format(cls, v):
183
184
  """Validate ARN format if provided."""
184
185
  if v and not v.startswith("arn:aws:"):
185
186
  raise ValueError('ARN must start with "arn:aws:"')
186
187
  return v
187
188
 
188
- @validator("account_id")
189
+ @field_validator("account_id")
190
+ @classmethod
189
191
  def validate_account_id(cls, v):
190
192
  """Validate account ID format."""
191
193
  if not v.isdigit() or len(v) != 12:
@@ -34,6 +34,7 @@ from rich.table import Table
34
34
  console = Console()
35
35
 
36
36
  from ..utils.logger import configure_logger
37
+ from ..common.performance_optimization_engine import get_optimization_engine
37
38
 
38
39
  logger = configure_logger(__name__)
39
40
 
@@ -408,11 +409,25 @@ class EnhancedOrganizationsDiscovery:
408
409
  Discover complete organization structure with performance benchmarking
409
410
 
410
411
  Enhanced with:
411
- - Performance benchmark tracking (<45s target)
412
+ - Performance benchmark tracking (<30s target optimized from 52.3s)
412
413
  - Rich console progress monitoring
413
414
  - Comprehensive error recovery
414
415
  - Multi-profile fallback support
416
+ - Performance optimization engine integration
415
417
  """
418
+ # Get performance optimization engine
419
+ optimization_engine = get_optimization_engine(
420
+ max_workers=self.max_workers,
421
+ cache_ttl_minutes=30,
422
+ memory_limit_mb=2048
423
+ )
424
+
425
+ # Use optimized discovery with performance monitoring
426
+ with optimization_engine.optimize_operation("organization_structure_discovery", self.performance_target_seconds):
427
+ return await self._discover_organization_structure_optimized(optimization_engine)
428
+
429
+ async def _discover_organization_structure_optimized(self, optimization_engine) -> Dict:
430
+ """Optimized organization structure discovery implementation"""
416
431
  # Start performance benchmark
417
432
  self.current_benchmark = PerformanceBenchmark(
418
433
  operation_name="organization_structure_discovery",
@@ -420,7 +435,7 @@ class EnhancedOrganizationsDiscovery:
420
435
  target_seconds=self.performance_target_seconds,
421
436
  )
422
437
 
423
- logger.info("🏢 Starting enhanced organization structure discovery with performance tracking")
438
+ logger.info("🏢 Starting optimized organization structure discovery with SRE automation patterns")
424
439
 
425
440
  # Check global cache first to prevent duplicate calls
426
441
  cached_result = _get_global_organizations_cache()
@@ -472,9 +487,9 @@ class EnhancedOrganizationsDiscovery:
472
487
  ) as progress:
473
488
  discovery_task = progress.add_task("Discovering organization structure...", total=5)
474
489
 
475
- # Discover accounts
476
- progress.update(discovery_task, description="Discovering accounts...")
477
- accounts_result = await self._discover_accounts()
490
+ # Discover accounts using optimization engine
491
+ progress.update(discovery_task, description="Discovering accounts (optimized)...")
492
+ accounts_result = await self._discover_accounts_optimized(optimization_engine)
478
493
  self.current_benchmark.accounts_processed = accounts_result.get("total_accounts", 0)
479
494
  progress.advance(discovery_task)
480
495
 
@@ -588,6 +603,75 @@ class EnhancedOrganizationsDiscovery:
588
603
  "performance_benchmark": performance_benchmark_dict,
589
604
  }
590
605
 
606
+ async def _discover_accounts_optimized(self, optimization_engine) -> Dict:
607
+ """
608
+ Optimized account discovery using performance optimization engine
609
+
610
+ Addresses: Organization Discovery Performance (52.3s -> <30s target)
611
+ Features:
612
+ - Intelligent caching with TTL management
613
+ - Parallel account processing with batch optimization
614
+ - Connection pooling for Organizations API
615
+ - Memory-efficient processing
616
+ """
617
+ logger.info("📊 Discovering organization accounts with SRE optimization patterns")
618
+
619
+ # Use optimization engine for discovery
620
+ optimized_discover_accounts = optimization_engine.optimize_organization_discovery(
621
+ management_profile=self.management_profile,
622
+ use_parallel_processing=True,
623
+ batch_size=20
624
+ )
625
+
626
+ # Execute optimized discovery
627
+ try:
628
+ result = optimized_discover_accounts()
629
+
630
+ # Convert to expected format
631
+ accounts_data = result.get('accounts', [])
632
+
633
+ # Create AWSAccount objects for compatibility
634
+ for account_data in accounts_data:
635
+ account = AWSAccount(
636
+ account_id=account_data["Id"],
637
+ name=account_data["Name"],
638
+ email=account_data["Email"],
639
+ status=account_data["Status"],
640
+ joined_method=account_data["JoinedMethod"],
641
+ joined_timestamp=account_data.get("JoinedTimestamp"),
642
+ tags=account_data.get("Tags", {})
643
+ )
644
+ self.accounts_cache[account.account_id] = account
645
+
646
+ # Update metrics
647
+ self.discovery_metrics["accounts_discovered"] = len(accounts_data)
648
+
649
+ # Enhanced account categorization
650
+ active_accounts = [a for a in accounts_data if a.get("Status") == "ACTIVE"]
651
+ suspended_accounts = [a for a in accounts_data if a.get("Status") == "SUSPENDED"]
652
+ closed_accounts = [a for a in accounts_data if a.get("Status") == "CLOSED"]
653
+
654
+ optimization_info = result.get('optimizations_applied', [])
655
+ logger.info(f"✅ Optimized discovery: {len(accounts_data)} accounts ({len(active_accounts)} active)")
656
+ logger.info(f"🚀 Optimizations applied: {', '.join(optimization_info)}")
657
+
658
+ return {
659
+ "total_accounts": len(accounts_data),
660
+ "active_accounts": len(active_accounts),
661
+ "suspended_accounts": len(suspended_accounts),
662
+ "closed_accounts": len(closed_accounts),
663
+ "accounts": [asdict(account) for account_id, account in self.accounts_cache.items()],
664
+ "discovery_method": "optimized_organizations_api",
665
+ "profile_used": "management",
666
+ "optimizations_applied": optimization_info,
667
+ }
668
+
669
+ except Exception as e:
670
+ logger.error(f"Optimized account discovery failed: {e}")
671
+ # Fallback to original method
672
+ logger.info("Falling back to original discovery method...")
673
+ return await self._discover_accounts()
674
+
591
675
  async def _discover_accounts(self) -> Dict:
592
676
  """
593
677
  Discover all accounts in the organization using 4-profile architecture