mcp-souschef 2.5.3__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-3.0.0.dist-info}/METADATA +135 -28
- mcp_souschef-3.0.0.dist-info/RECORD +46 -0
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-3.0.0.dist-info}/WHEEL +1 -1
- souschef/__init__.py +43 -3
- souschef/assessment.py +1260 -69
- souschef/ci/common.py +126 -0
- souschef/ci/github_actions.py +4 -93
- souschef/ci/gitlab_ci.py +3 -53
- souschef/ci/jenkins_pipeline.py +3 -60
- souschef/cli.py +129 -20
- souschef/converters/__init__.py +2 -2
- souschef/converters/cookbook_specific.py +125 -0
- souschef/converters/cookbook_specific.py.backup +109 -0
- souschef/converters/playbook.py +1022 -15
- souschef/converters/resource.py +113 -10
- souschef/converters/template.py +177 -0
- souschef/core/constants.py +13 -0
- souschef/core/metrics.py +313 -0
- souschef/core/path_utils.py +12 -9
- souschef/core/validation.py +53 -0
- souschef/deployment.py +85 -33
- souschef/parsers/attributes.py +397 -32
- souschef/parsers/recipe.py +48 -10
- souschef/server.py +715 -37
- souschef/ui/app.py +1658 -379
- souschef/ui/health_check.py +36 -0
- souschef/ui/pages/ai_settings.py +563 -0
- souschef/ui/pages/cookbook_analysis.py +3270 -166
- souschef/ui/pages/validation_reports.py +274 -0
- mcp_souschef-2.5.3.dist-info/RECORD +0 -38
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-3.0.0.dist-info}/entry_points.txt +0 -0
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-3.0.0.dist-info}/licenses/LICENSE +0 -0
souschef/core/metrics.py
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
1
|
+
"""Centralized metrics and effort calculation module for consistent time estimations."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from typing import NamedTuple
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"ComplexityLevel",
|
|
9
|
+
"EffortMetrics",
|
|
10
|
+
"convert_days_to_hours",
|
|
11
|
+
"convert_days_to_weeks",
|
|
12
|
+
"convert_hours_to_days",
|
|
13
|
+
"estimate_effort_for_complexity",
|
|
14
|
+
"get_team_recommendation",
|
|
15
|
+
"get_timeline_weeks",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ComplexityLevel(str, Enum):
|
|
20
|
+
"""Standard complexity levels used across all components."""
|
|
21
|
+
|
|
22
|
+
LOW = "low"
|
|
23
|
+
MEDIUM = "medium"
|
|
24
|
+
HIGH = "high"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class EffortMetrics:
|
|
29
|
+
"""
|
|
30
|
+
Centralized container for all effort estimates.
|
|
31
|
+
|
|
32
|
+
Provides consistent representations across different formats:
|
|
33
|
+
- Base unit: person-days (with decimal precision)
|
|
34
|
+
- Derived: hours, weeks with consistent conversion factors
|
|
35
|
+
- Ranges: For display purposes, converting days to week ranges
|
|
36
|
+
|
|
37
|
+
Ensures all components (migration planning, dependency mapping,
|
|
38
|
+
validation reports) use the same underlying numbers.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
estimated_days: float
|
|
42
|
+
"""Base unit: person-days (e.g., 2.5, 5.0, 10.0)"""
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def estimated_hours(self) -> float:
|
|
46
|
+
"""Convert days to hours using standard 8-hour workday."""
|
|
47
|
+
return self.estimated_days * 8
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def estimated_weeks_low(self) -> int:
|
|
51
|
+
"""Conservative estimate: assumes optimal parallelization."""
|
|
52
|
+
return max(1, int(self.estimated_days / 7))
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def estimated_weeks_high(self) -> int:
|
|
56
|
+
"""Realistic estimate: assumes sequential/limited parallelization."""
|
|
57
|
+
return max(1, int(self.estimated_days / 3.5))
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def estimated_weeks_range(self) -> str:
|
|
61
|
+
"""Human-readable week range (e.g., '2-4 weeks')."""
|
|
62
|
+
low = self.estimated_weeks_low
|
|
63
|
+
high = self.estimated_weeks_high
|
|
64
|
+
if low == high:
|
|
65
|
+
return f"{low} week{'s' if low != 1 else ''}"
|
|
66
|
+
return f"{low}-{high} weeks"
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def estimated_days_formatted(self) -> str:
|
|
70
|
+
"""Formatted days with appropriate precision."""
|
|
71
|
+
if self.estimated_days == int(self.estimated_days):
|
|
72
|
+
return f"{int(self.estimated_days)} days"
|
|
73
|
+
return f"{self.estimated_days:.1f} days"
|
|
74
|
+
|
|
75
|
+
def __str__(self) -> str:
|
|
76
|
+
"""Return a string representation of effort metrics."""
|
|
77
|
+
return f"{self.estimated_days_formatted} ({self.estimated_weeks_range})"
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class TeamRecommendation(NamedTuple):
|
|
81
|
+
"""Team composition and timeline recommendation."""
|
|
82
|
+
|
|
83
|
+
team_size: str
|
|
84
|
+
"""e.g., '1 developer + 1 reviewer'"""
|
|
85
|
+
|
|
86
|
+
timeline_weeks: int
|
|
87
|
+
"""Recommended timeline in weeks"""
|
|
88
|
+
|
|
89
|
+
description: str
|
|
90
|
+
"""Human-readable description of the recommendation"""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
# Conversion constants - Single source of truth
|
|
94
|
+
HOURS_PER_WORKDAY = 8
|
|
95
|
+
DAYS_PER_WEEK = 7
|
|
96
|
+
|
|
97
|
+
# Complexity thresholds for automatic categorization
|
|
98
|
+
COMPLEXITY_THRESHOLD_LOW = 30
|
|
99
|
+
COMPLEXITY_THRESHOLD_HIGH = 70
|
|
100
|
+
|
|
101
|
+
# Effort multiplier per resource (base 1 resource = baseline effort)
|
|
102
|
+
EFFORT_MULTIPLIER_PER_RESOURCE = 0.125 # 0.125 days = 1 hour per resource
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def convert_days_to_hours(days: float) -> float:
|
|
106
|
+
"""Convert person-days to hours using standard 8-hour workday."""
|
|
107
|
+
return days * HOURS_PER_WORKDAY
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def convert_hours_to_days(hours: float) -> float:
|
|
111
|
+
"""Convert hours to person-days using standard 8-hour workday."""
|
|
112
|
+
return hours / HOURS_PER_WORKDAY
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def convert_days_to_weeks(days: float, conservative: bool = False) -> int:
|
|
116
|
+
"""
|
|
117
|
+
Convert days to weeks estimate.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
days: Number of person-days
|
|
121
|
+
conservative: If True, use realistic estimate (1 engineer, limited
|
|
122
|
+
parallelization). If False, use optimistic estimate (full
|
|
123
|
+
parallelization)
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
Number of weeks (integer)
|
|
127
|
+
|
|
128
|
+
"""
|
|
129
|
+
weeks = days / 3.5 if conservative else days / DAYS_PER_WEEK
|
|
130
|
+
return max(1, int(weeks))
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def estimate_effort_for_complexity(
|
|
134
|
+
complexity_score: float, resource_count: int = 1
|
|
135
|
+
) -> EffortMetrics:
|
|
136
|
+
"""
|
|
137
|
+
Estimate effort based on complexity score and resource count.
|
|
138
|
+
|
|
139
|
+
Provides consistent effort estimation across all components.
|
|
140
|
+
|
|
141
|
+
Formula:
|
|
142
|
+
- Base effort: resource_count * 0.125 days per recipe/resource
|
|
143
|
+
- Complexity multiplier: 1.0 + (complexity_score / 100)
|
|
144
|
+
- Final effort: base_effort * complexity_multiplier
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
complexity_score: Score from 0-100 (0=simple, 100=complex)
|
|
148
|
+
resource_count: Number of resources to migrate (recipes, templates, etc.)
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
EffortMetrics object with all representations
|
|
152
|
+
|
|
153
|
+
"""
|
|
154
|
+
base_effort = resource_count * EFFORT_MULTIPLIER_PER_RESOURCE
|
|
155
|
+
complexity_multiplier = 1.0 + (complexity_score / 100)
|
|
156
|
+
estimated_days = base_effort * complexity_multiplier
|
|
157
|
+
|
|
158
|
+
return EffortMetrics(estimated_days=round(estimated_days, 1))
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def categorize_complexity(score: float) -> ComplexityLevel:
|
|
162
|
+
"""
|
|
163
|
+
Categorize complexity score into standard levels.
|
|
164
|
+
|
|
165
|
+
Consistent thresholds across all components:
|
|
166
|
+
- Low: 0-29
|
|
167
|
+
- Medium: 30-69
|
|
168
|
+
- High: 70-100
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
score: Complexity score from 0-100
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
ComplexityLevel enum value
|
|
175
|
+
|
|
176
|
+
"""
|
|
177
|
+
if score < COMPLEXITY_THRESHOLD_LOW:
|
|
178
|
+
return ComplexityLevel.LOW
|
|
179
|
+
elif score < COMPLEXITY_THRESHOLD_HIGH:
|
|
180
|
+
return ComplexityLevel.MEDIUM
|
|
181
|
+
else:
|
|
182
|
+
return ComplexityLevel.HIGH
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def get_team_recommendation(total_effort_days: float) -> TeamRecommendation:
|
|
186
|
+
"""
|
|
187
|
+
Get team composition and timeline recommendation based on total effort.
|
|
188
|
+
|
|
189
|
+
Consistent recommendations across all components.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
total_effort_days: Total person-days of effort
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
TeamRecommendation with team size and timeline
|
|
196
|
+
|
|
197
|
+
"""
|
|
198
|
+
if total_effort_days < 20:
|
|
199
|
+
return TeamRecommendation(
|
|
200
|
+
team_size="1 developer + 1 reviewer",
|
|
201
|
+
timeline_weeks=4,
|
|
202
|
+
description="Single developer with oversight",
|
|
203
|
+
)
|
|
204
|
+
elif total_effort_days < 50:
|
|
205
|
+
return TeamRecommendation(
|
|
206
|
+
team_size="2 developers + 1 senior reviewer",
|
|
207
|
+
timeline_weeks=6,
|
|
208
|
+
description="Small dedicated team",
|
|
209
|
+
)
|
|
210
|
+
else:
|
|
211
|
+
return TeamRecommendation(
|
|
212
|
+
team_size="3-4 developers + 1 tech lead + 1 architect",
|
|
213
|
+
timeline_weeks=10,
|
|
214
|
+
description="Large dedicated migration team",
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def get_timeline_weeks(total_effort_days: float, strategy: str = "phased") -> int:
|
|
219
|
+
"""
|
|
220
|
+
Calculate recommended timeline in weeks based on effort and strategy.
|
|
221
|
+
|
|
222
|
+
Consistent timeline calculation across planning, dependency mapping, and reports.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
total_effort_days: Total person-days estimated
|
|
226
|
+
strategy: Migration strategy ('phased', 'big_bang', 'parallel')
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
Recommended timeline in weeks
|
|
230
|
+
|
|
231
|
+
"""
|
|
232
|
+
# Base calculation: distribute effort across team capacity
|
|
233
|
+
# Assume 3-5 person-days of output per week with normal team capacity
|
|
234
|
+
base_weeks = max(2, int(total_effort_days / 4.5))
|
|
235
|
+
|
|
236
|
+
# Apply strategy adjustments
|
|
237
|
+
if strategy == "phased":
|
|
238
|
+
# Phased adds overhead for testing between phases
|
|
239
|
+
return int(base_weeks * 1.1)
|
|
240
|
+
elif strategy == "big_bang":
|
|
241
|
+
# Big bang is faster but riskier
|
|
242
|
+
return int(base_weeks * 0.9)
|
|
243
|
+
else: # parallel
|
|
244
|
+
# Parallel has some overhead for coordination
|
|
245
|
+
return int(base_weeks * 1.05)
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def validate_metrics_consistency(
|
|
249
|
+
days: float, weeks: str, hours: float, complexity: str
|
|
250
|
+
) -> tuple[bool, list[str]]:
|
|
251
|
+
"""
|
|
252
|
+
Validate that different metric representations are consistent.
|
|
253
|
+
|
|
254
|
+
Used for validation reports to catch contradictions.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
days: Days estimate
|
|
258
|
+
weeks: Weeks range string (e.g., "2-4 weeks")
|
|
259
|
+
hours: Hours estimate
|
|
260
|
+
complexity: Complexity level string
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
Tuple of (is_valid, list_of_errors)
|
|
264
|
+
|
|
265
|
+
"""
|
|
266
|
+
errors = []
|
|
267
|
+
|
|
268
|
+
# Check hours consistency
|
|
269
|
+
expected_hours = days * 8
|
|
270
|
+
if abs(hours - expected_hours) > 1.0: # Allow 1 hour tolerance
|
|
271
|
+
errors.append(
|
|
272
|
+
f"Hours mismatch: {hours:.1f} hours but {days} days = "
|
|
273
|
+
f"{expected_hours:.1f} hours"
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# Check weeks consistency (loose check due to range)
|
|
277
|
+
# Valid formats: "1 week", "1-2 weeks"
|
|
278
|
+
if "week" not in weeks.lower():
|
|
279
|
+
errors.append(f"Invalid weeks format: {weeks}")
|
|
280
|
+
elif "-" in weeks:
|
|
281
|
+
# Range format: "X-Y weeks"
|
|
282
|
+
try:
|
|
283
|
+
parts = weeks.replace(" weeks", "").replace(" week", "").split("-")
|
|
284
|
+
week_min = int(parts[0].strip())
|
|
285
|
+
week_max = int(parts[1].strip())
|
|
286
|
+
expected_weeks = int(days / 3.5) # Conservative estimate
|
|
287
|
+
|
|
288
|
+
if not (week_min <= expected_weeks <= week_max + 1):
|
|
289
|
+
errors.append(
|
|
290
|
+
f"Weeks mismatch: {weeks} but {days} days should be "
|
|
291
|
+
f"approximately {expected_weeks} weeks"
|
|
292
|
+
)
|
|
293
|
+
except (ValueError, IndexError):
|
|
294
|
+
errors.append(f"Invalid weeks format: {weeks}")
|
|
295
|
+
else:
|
|
296
|
+
# Single week format: "X week" or "X weeks"
|
|
297
|
+
try:
|
|
298
|
+
num = int(weeks.replace(" weeks", "").replace(" week", "").strip())
|
|
299
|
+
expected_weeks = int(days / 3.5)
|
|
300
|
+
if num != expected_weeks and abs(num - expected_weeks) > 2:
|
|
301
|
+
errors.append(
|
|
302
|
+
f"Weeks mismatch: {weeks} but {days} days should be "
|
|
303
|
+
f"approximately {expected_weeks} weeks"
|
|
304
|
+
)
|
|
305
|
+
except ValueError:
|
|
306
|
+
errors.append(f"Invalid weeks format: {weeks}")
|
|
307
|
+
|
|
308
|
+
# Check complexity is valid
|
|
309
|
+
valid_complexities = {level.value for level in ComplexityLevel}
|
|
310
|
+
if complexity.lower() not in valid_complexities:
|
|
311
|
+
errors.append(f"Invalid complexity level: {complexity}")
|
|
312
|
+
|
|
313
|
+
return len(errors) == 0, errors
|
souschef/core/path_utils.py
CHANGED
|
@@ -7,10 +7,8 @@ def _normalize_path(path_str: str) -> Path:
|
|
|
7
7
|
"""
|
|
8
8
|
Normalize a file path for safe filesystem operations.
|
|
9
9
|
|
|
10
|
-
This function resolves relative paths and symlinks
|
|
11
|
-
preventing path traversal attacks (CWE-23).
|
|
12
|
-
intentionally allows full filesystem access as it runs in the user's
|
|
13
|
-
local environment with their permissions.
|
|
10
|
+
This function validates input and resolves relative paths and symlinks
|
|
11
|
+
to absolute paths, preventing path traversal attacks (CWE-23).
|
|
14
12
|
|
|
15
13
|
Args:
|
|
16
14
|
path_str: Path string to normalize.
|
|
@@ -19,17 +17,22 @@ def _normalize_path(path_str: str) -> Path:
|
|
|
19
17
|
Resolved absolute Path object.
|
|
20
18
|
|
|
21
19
|
Raises:
|
|
22
|
-
ValueError: If the path contains null bytes or is invalid.
|
|
20
|
+
ValueError: If the path contains null bytes, traversal attempts, or is invalid.
|
|
23
21
|
|
|
24
22
|
"""
|
|
23
|
+
if not isinstance(path_str, str):
|
|
24
|
+
raise ValueError(f"Path must be a string, got {type(path_str)}")
|
|
25
|
+
|
|
26
|
+
# Reject paths with null bytes
|
|
25
27
|
if "\x00" in path_str:
|
|
26
28
|
raise ValueError(f"Path contains null bytes: {path_str!r}")
|
|
27
29
|
|
|
30
|
+
# Reject paths with obvious directory traversal attempts
|
|
31
|
+
if ".." in path_str:
|
|
32
|
+
raise ValueError(f"Path contains directory traversal: {path_str!r}")
|
|
33
|
+
|
|
28
34
|
try:
|
|
29
|
-
# Resolve to absolute path, removing
|
|
30
|
-
# This is the path normalization function itself that validates input
|
|
31
|
-
# lgtm[py/path-injection]
|
|
32
|
-
# codeql[py/path-injection]
|
|
35
|
+
# Resolve to absolute path, removing ., and resolving symlinks
|
|
33
36
|
return Path(path_str).resolve()
|
|
34
37
|
except (OSError, RuntimeError) as e:
|
|
35
38
|
raise ValueError(f"Invalid path {path_str}: {e}") from e
|
souschef/core/validation.py
CHANGED
|
@@ -586,3 +586,56 @@ class ValidationEngine:
|
|
|
586
586
|
elif result.level == ValidationLevel.INFO:
|
|
587
587
|
summary["info"] += 1
|
|
588
588
|
return summary
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
def _format_validation_results_summary(
|
|
592
|
+
conversion_type: str, summary: dict[str, int]
|
|
593
|
+
) -> str:
|
|
594
|
+
"""
|
|
595
|
+
Format validation results as a summary.
|
|
596
|
+
|
|
597
|
+
Args:
|
|
598
|
+
conversion_type: Type of conversion.
|
|
599
|
+
summary: Summary of validation results.
|
|
600
|
+
|
|
601
|
+
Returns:
|
|
602
|
+
Formatted summary output.
|
|
603
|
+
|
|
604
|
+
"""
|
|
605
|
+
total_issues = summary["errors"] + summary["warnings"] + summary["info"]
|
|
606
|
+
|
|
607
|
+
if total_issues == 0:
|
|
608
|
+
return f"""# Validation Summary for {conversion_type} Conversion
|
|
609
|
+
|
|
610
|
+
✅ **All validation checks passed!** No issues found.
|
|
611
|
+
|
|
612
|
+
Errors: 0
|
|
613
|
+
Warnings: 0
|
|
614
|
+
Info: 0
|
|
615
|
+
"""
|
|
616
|
+
|
|
617
|
+
# Determine status icon based on error/warning counts
|
|
618
|
+
if summary["errors"] > 0:
|
|
619
|
+
status_icon = "❌"
|
|
620
|
+
elif summary["warnings"] > 0:
|
|
621
|
+
status_icon = "⚠️"
|
|
622
|
+
else:
|
|
623
|
+
status_icon = "ℹ️"
|
|
624
|
+
|
|
625
|
+
# Determine status message based on error/warning counts
|
|
626
|
+
if summary["errors"] > 0:
|
|
627
|
+
status = "Failed"
|
|
628
|
+
elif summary["warnings"] > 0:
|
|
629
|
+
status = "Warning"
|
|
630
|
+
else:
|
|
631
|
+
status = "Passed with info"
|
|
632
|
+
|
|
633
|
+
return f"""# Validation Summary for {conversion_type} Conversion
|
|
634
|
+
|
|
635
|
+
{status_icon} **Validation Results:**
|
|
636
|
+
• Errors: {summary["errors"]}
|
|
637
|
+
• Warnings: {summary["warnings"]}
|
|
638
|
+
• Info: {summary["info"]}
|
|
639
|
+
|
|
640
|
+
**Status:** {status}
|
|
641
|
+
"""
|
souschef/deployment.py
CHANGED
|
@@ -21,6 +21,11 @@ from souschef.core.errors import (
|
|
|
21
21
|
validate_cookbook_structure,
|
|
22
22
|
validate_directory_exists,
|
|
23
23
|
)
|
|
24
|
+
from souschef.core.metrics import (
|
|
25
|
+
ComplexityLevel,
|
|
26
|
+
EffortMetrics,
|
|
27
|
+
categorize_complexity,
|
|
28
|
+
)
|
|
24
29
|
from souschef.core.path_utils import _safe_join
|
|
25
30
|
|
|
26
31
|
# Maximum length for attribute values in Chef attribute parsing
|
|
@@ -51,7 +56,7 @@ def generate_awx_job_template_from_cookbook(
|
|
|
51
56
|
)
|
|
52
57
|
|
|
53
58
|
cookbook = validate_cookbook_structure(cookbook_path)
|
|
54
|
-
cookbook_analysis =
|
|
59
|
+
cookbook_analysis = _analyse_cookbook_for_awx(cookbook, cookbook_name)
|
|
55
60
|
job_template = _generate_awx_job_template(
|
|
56
61
|
cookbook_analysis, cookbook_name, target_environment, include_survey
|
|
57
62
|
)
|
|
@@ -184,7 +189,7 @@ def generate_awx_project_from_cookbooks(
|
|
|
184
189
|
)
|
|
185
190
|
|
|
186
191
|
# Analyze all cookbooks
|
|
187
|
-
cookbooks_analysis =
|
|
192
|
+
cookbooks_analysis = _analyse_cookbooks_directory(cookbooks_path)
|
|
188
193
|
|
|
189
194
|
# Generate project structure
|
|
190
195
|
project_config = _generate_awx_project_config(project_name, scm_type, scm_url)
|
|
@@ -329,7 +334,7 @@ def convert_chef_deployment_to_ansible_strategy(
|
|
|
329
334
|
)
|
|
330
335
|
|
|
331
336
|
# Analyze Chef deployment pattern
|
|
332
|
-
pattern_analysis =
|
|
337
|
+
pattern_analysis = _analyse_chef_deployment_pattern(cookbook)
|
|
333
338
|
|
|
334
339
|
# Determine best strategy if auto-detect
|
|
335
340
|
if deployment_pattern == "auto":
|
|
@@ -626,11 +631,11 @@ def generate_canary_deployment_strategy(
|
|
|
626
631
|
)
|
|
627
632
|
|
|
628
633
|
|
|
629
|
-
def
|
|
634
|
+
def analyse_chef_application_patterns(
|
|
630
635
|
cookbook_path: str, application_type: str = "web_application"
|
|
631
636
|
) -> str:
|
|
632
637
|
"""
|
|
633
|
-
|
|
638
|
+
Analyse cookbook deployment patterns and recommend Ansible strategies.
|
|
634
639
|
|
|
635
640
|
Detects blue/green, canary, rolling, or custom deployment approaches.
|
|
636
641
|
Application type helps tune recommendations for web/database/service workloads.
|
|
@@ -647,7 +652,7 @@ def analyze_chef_application_patterns(
|
|
|
647
652
|
)
|
|
648
653
|
|
|
649
654
|
# Analyze cookbook for application patterns
|
|
650
|
-
analysis =
|
|
655
|
+
analysis = _analyse_application_cookbook(cookbook, application_type)
|
|
651
656
|
|
|
652
657
|
return f"""# Chef Application Patterns Analysis
|
|
653
658
|
# Cookbook: {cookbook.name}
|
|
@@ -685,9 +690,9 @@ def analyze_chef_application_patterns(
|
|
|
685
690
|
# AWX Helper Functions
|
|
686
691
|
|
|
687
692
|
|
|
688
|
-
def
|
|
693
|
+
def _analyse_recipes(cookbook_path: Path) -> list[dict[str, Any]]:
|
|
689
694
|
"""
|
|
690
|
-
|
|
695
|
+
Analyse recipes directory for AWX job steps.
|
|
691
696
|
|
|
692
697
|
Args:
|
|
693
698
|
cookbook_path: Path to cookbook root
|
|
@@ -710,11 +715,11 @@ def _analyze_recipes(cookbook_path: Path) -> list[dict[str, Any]]:
|
|
|
710
715
|
return recipes
|
|
711
716
|
|
|
712
717
|
|
|
713
|
-
def
|
|
718
|
+
def _analyse_attributes_for_survey(
|
|
714
719
|
cookbook_path: Path,
|
|
715
720
|
) -> tuple[dict[str, Any], list[dict[str, Any]]]:
|
|
716
721
|
"""
|
|
717
|
-
|
|
722
|
+
Analyse attributes directory for survey field generation.
|
|
718
723
|
|
|
719
724
|
Args:
|
|
720
725
|
cookbook_path: Path to cookbook root
|
|
@@ -748,7 +753,7 @@ def _analyze_attributes_for_survey(
|
|
|
748
753
|
return attributes, survey_fields
|
|
749
754
|
|
|
750
755
|
|
|
751
|
-
def
|
|
756
|
+
def _analyse_metadata_dependencies(cookbook_path: Path) -> list[str]:
|
|
752
757
|
"""
|
|
753
758
|
Extract cookbook dependencies from metadata.
|
|
754
759
|
|
|
@@ -795,9 +800,9 @@ def _collect_static_files(cookbook_path: Path) -> tuple[list[str], list[str]]:
|
|
|
795
800
|
return templates, files
|
|
796
801
|
|
|
797
802
|
|
|
798
|
-
def
|
|
803
|
+
def _analyse_cookbook_for_awx(cookbook_path: Path, cookbook_name: str) -> dict:
|
|
799
804
|
"""
|
|
800
|
-
|
|
805
|
+
Analyse Chef cookbook structure for AWX job template generation.
|
|
801
806
|
|
|
802
807
|
Orchestrates multiple analysis helpers to build comprehensive cookbook metadata.
|
|
803
808
|
|
|
@@ -810,9 +815,9 @@ def _analyze_cookbook_for_awx(cookbook_path: Path, cookbook_name: str) -> dict:
|
|
|
810
815
|
|
|
811
816
|
"""
|
|
812
817
|
# Analyze each dimension independently
|
|
813
|
-
recipes =
|
|
814
|
-
attributes, survey_fields =
|
|
815
|
-
dependencies =
|
|
818
|
+
recipes = _analyse_recipes(cookbook_path)
|
|
819
|
+
attributes, survey_fields = _analyse_attributes_for_survey(cookbook_path)
|
|
820
|
+
dependencies = _analyse_metadata_dependencies(cookbook_path)
|
|
816
821
|
templates, files = _collect_static_files(cookbook_path)
|
|
817
822
|
|
|
818
823
|
# Assemble complete analysis
|
|
@@ -1155,8 +1160,8 @@ def _generate_survey_fields_from_attributes(attributes: dict) -> list:
|
|
|
1155
1160
|
return survey_fields
|
|
1156
1161
|
|
|
1157
1162
|
|
|
1158
|
-
def
|
|
1159
|
-
"""
|
|
1163
|
+
def _analyse_cookbooks_directory(cookbooks_path: Path) -> dict:
|
|
1164
|
+
"""Analyse entire cookbooks directory structure."""
|
|
1160
1165
|
analysis: dict[str, Any] = {
|
|
1161
1166
|
"total_cookbooks": 0,
|
|
1162
1167
|
"cookbooks": {},
|
|
@@ -1172,7 +1177,7 @@ def _analyze_cookbooks_directory(cookbooks_path: Path) -> dict:
|
|
|
1172
1177
|
cookbook_name = cookbook_dir.name
|
|
1173
1178
|
analysis["total_cookbooks"] += 1
|
|
1174
1179
|
|
|
1175
|
-
cookbook_analysis =
|
|
1180
|
+
cookbook_analysis = _analyse_cookbook_for_awx(cookbook_dir, cookbook_name)
|
|
1176
1181
|
analysis["cookbooks"][cookbook_name] = cookbook_analysis
|
|
1177
1182
|
|
|
1178
1183
|
# Aggregate stats
|
|
@@ -1186,8 +1191,8 @@ def _analyze_cookbooks_directory(cookbooks_path: Path) -> dict:
|
|
|
1186
1191
|
# Deployment Strategy Helper Functions
|
|
1187
1192
|
|
|
1188
1193
|
|
|
1189
|
-
def
|
|
1190
|
-
"""
|
|
1194
|
+
def _analyse_chef_deployment_pattern(cookbook_path: Path) -> dict:
|
|
1195
|
+
"""Analyse Chef cookbook for deployment patterns."""
|
|
1191
1196
|
analysis: dict[str, Any] = {
|
|
1192
1197
|
"deployment_steps": [],
|
|
1193
1198
|
"health_checks": [],
|
|
@@ -1496,17 +1501,60 @@ def _detect_patterns_from_content(content: str) -> list[str]:
|
|
|
1496
1501
|
return patterns
|
|
1497
1502
|
|
|
1498
1503
|
|
|
1499
|
-
def _assess_complexity_from_resource_count(
|
|
1500
|
-
|
|
1504
|
+
def _assess_complexity_from_resource_count(
|
|
1505
|
+
resource_count: int,
|
|
1506
|
+
) -> tuple[ComplexityLevel, str, str]:
|
|
1507
|
+
"""
|
|
1508
|
+
Assess complexity, effort estimate, and risk based on resource count.
|
|
1509
|
+
|
|
1510
|
+
Uses centralized metrics for consistent complexity categorization.
|
|
1511
|
+
|
|
1512
|
+
Args:
|
|
1513
|
+
resource_count: Number of resources in cookbook
|
|
1514
|
+
|
|
1515
|
+
Returns:
|
|
1516
|
+
Tuple of (complexity_level, effort_estimate_weeks, risk_level)
|
|
1517
|
+
|
|
1518
|
+
"""
|
|
1519
|
+
# Map resource count to complexity score (0-100 scale)
|
|
1520
|
+
# 50+ resources = high complexity (70-100)
|
|
1521
|
+
# 20-50 resources = medium complexity (30-69)
|
|
1522
|
+
# <20 resources = low complexity (0-29)
|
|
1501
1523
|
if resource_count > 50:
|
|
1502
|
-
|
|
1503
|
-
elif resource_count
|
|
1504
|
-
|
|
1505
|
-
|
|
1524
|
+
complexity_score = 80
|
|
1525
|
+
elif resource_count > 30:
|
|
1526
|
+
complexity_score = 50
|
|
1527
|
+
elif resource_count >= 20:
|
|
1528
|
+
complexity_score = 40
|
|
1529
|
+
else:
|
|
1530
|
+
complexity_score = 15
|
|
1531
|
+
|
|
1532
|
+
# Use centralized categorization
|
|
1533
|
+
complexity_level = categorize_complexity(complexity_score)
|
|
1534
|
+
|
|
1535
|
+
# Estimate effort based on resource count and complexity
|
|
1536
|
+
# Base: 0.2 days per resource (2.5 hours)
|
|
1537
|
+
base_days = resource_count * 0.2
|
|
1538
|
+
complexity_multiplier = 1 + (complexity_score / 100)
|
|
1539
|
+
estimated_days = round(base_days * complexity_multiplier, 1)
|
|
1540
|
+
|
|
1541
|
+
# Create metrics object for consistent week calculation
|
|
1542
|
+
metrics = EffortMetrics(estimated_days=estimated_days)
|
|
1543
|
+
effort_estimate = metrics.estimated_weeks_range
|
|
1544
|
+
|
|
1545
|
+
# Risk mapping based on complexity level
|
|
1546
|
+
if complexity_level == ComplexityLevel.HIGH:
|
|
1547
|
+
risk_level = "high"
|
|
1548
|
+
elif complexity_level == ComplexityLevel.MEDIUM:
|
|
1549
|
+
risk_level = "medium"
|
|
1550
|
+
else:
|
|
1551
|
+
risk_level = "low"
|
|
1506
1552
|
|
|
1553
|
+
return complexity_level, effort_estimate, risk_level
|
|
1507
1554
|
|
|
1508
|
-
|
|
1509
|
-
|
|
1555
|
+
|
|
1556
|
+
def _analyse_application_cookbook(cookbook_path: Path, app_type: str) -> dict:
|
|
1557
|
+
"""Analyse Chef cookbook for application deployment patterns."""
|
|
1510
1558
|
analysis: dict[str, Any] = {
|
|
1511
1559
|
"application_type": app_type,
|
|
1512
1560
|
"deployment_patterns": [],
|
|
@@ -1536,10 +1584,14 @@ def _analyze_application_cookbook(cookbook_path: Path, app_type: str) -> dict:
|
|
|
1536
1584
|
# Silently skip malformed files
|
|
1537
1585
|
pass
|
|
1538
1586
|
|
|
1539
|
-
# Assess complexity
|
|
1587
|
+
# Assess complexity using centralized function
|
|
1540
1588
|
resource_count = len(analysis["resources"])
|
|
1541
|
-
|
|
1542
|
-
|
|
1589
|
+
complexity_level, effort, risk = _assess_complexity_from_resource_count(
|
|
1590
|
+
resource_count
|
|
1591
|
+
)
|
|
1592
|
+
|
|
1593
|
+
# Convert complexity level enum to string for backward compatibility
|
|
1594
|
+
analysis["complexity"] = complexity_level.value
|
|
1543
1595
|
analysis["effort_estimate"] = effort
|
|
1544
1596
|
analysis["risk_level"] = risk
|
|
1545
1597
|
|
|
@@ -1650,7 +1702,7 @@ def _format_deployment_patterns(analysis: dict) -> str:
|
|
|
1650
1702
|
|
|
1651
1703
|
def _format_chef_resources_analysis(analysis: dict) -> str:
|
|
1652
1704
|
"""Format Chef resources analysis."""
|
|
1653
|
-
# Check for new format first (from
|
|
1705
|
+
# Check for new format first (from _analyse_application_cookbook)
|
|
1654
1706
|
resources = analysis.get("resources", [])
|
|
1655
1707
|
if resources:
|
|
1656
1708
|
# Count resource types
|