mcp-souschef 2.0.1__py3-none-any.whl → 2.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.1.2.dist-info}/METADATA +427 -79
- mcp_souschef-2.1.2.dist-info/RECORD +29 -0
- souschef/__init__.py +17 -0
- souschef/assessment.py +1230 -0
- souschef/converters/__init__.py +23 -0
- souschef/converters/habitat.py +674 -0
- souschef/converters/playbook.py +1698 -0
- souschef/converters/resource.py +228 -0
- souschef/core/__init__.py +58 -0
- souschef/core/constants.py +145 -0
- souschef/core/path_utils.py +58 -0
- souschef/core/ruby_utils.py +39 -0
- souschef/core/validation.py +555 -0
- souschef/deployment.py +1594 -0
- souschef/filesystem/__init__.py +5 -0
- souschef/filesystem/operations.py +67 -0
- souschef/parsers/__init__.py +36 -0
- souschef/parsers/attributes.py +257 -0
- souschef/parsers/habitat.py +288 -0
- souschef/parsers/inspec.py +771 -0
- souschef/parsers/metadata.py +175 -0
- souschef/parsers/recipe.py +200 -0
- souschef/parsers/resource.py +170 -0
- souschef/parsers/template.py +342 -0
- souschef/server.py +1532 -7599
- mcp_souschef-2.0.1.dist-info/RECORD +0 -8
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.1.2.dist-info}/WHEEL +0 -0
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.1.2.dist-info}/entry_points.txt +0 -0
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.1.2.dist-info}/licenses/LICENSE +0 -0
souschef/deployment.py
ADDED
|
@@ -0,0 +1,1594 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Deployment and AWX/AAP integration for Chef to Ansible migration.
|
|
3
|
+
|
|
4
|
+
This module provides tools for analyzing Chef deployment patterns, generating
|
|
5
|
+
Ansible deployment strategies (blue/green, canary, rolling), and creating
|
|
6
|
+
AWX/AAP configurations from Chef cookbooks.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import re
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from souschef.core.constants import (
|
|
15
|
+
CHEF_RECIPE_PREFIX,
|
|
16
|
+
CHEF_ROLE_PREFIX,
|
|
17
|
+
METADATA_FILENAME,
|
|
18
|
+
)
|
|
19
|
+
from souschef.core.path_utils import _normalize_path, _safe_join
|
|
20
|
+
|
|
21
|
+
# Maximum length for attribute values in Chef attribute parsing
|
|
22
|
+
# Prevents ReDoS attacks from extremely long attribute declarations
|
|
23
|
+
MAX_ATTRIBUTE_VALUE_LENGTH = 5000
|
|
24
|
+
|
|
25
|
+
# AWX/AAP Integration Functions
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def generate_awx_job_template_from_cookbook(
|
|
29
|
+
cookbook_path: str,
|
|
30
|
+
cookbook_name: str,
|
|
31
|
+
target_environment: str = "production",
|
|
32
|
+
include_survey: bool = True,
|
|
33
|
+
) -> str:
|
|
34
|
+
"""
|
|
35
|
+
Generate AWX/AAP job template from Chef cookbook.
|
|
36
|
+
|
|
37
|
+
Analyzes cookbook structure and generates importable AWX configuration.
|
|
38
|
+
Survey specs auto-generated from cookbook attributes when include_survey=True.
|
|
39
|
+
"""
|
|
40
|
+
try:
|
|
41
|
+
cookbook = _normalize_path(cookbook_path)
|
|
42
|
+
if not cookbook.exists():
|
|
43
|
+
return f"Cookbook not found at {cookbook_path}"
|
|
44
|
+
|
|
45
|
+
cookbook_analysis = _analyze_cookbook_for_awx(cookbook, cookbook_name)
|
|
46
|
+
job_template = _generate_awx_job_template(
|
|
47
|
+
cookbook_analysis, cookbook_name, target_environment, include_survey
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
return f"""# AWX/AAP Job Template Configuration
|
|
51
|
+
# Generated from Chef cookbook: {cookbook_name}
|
|
52
|
+
|
|
53
|
+
## Job Template JSON:
|
|
54
|
+
```json
|
|
55
|
+
{json.dumps(job_template, indent=2)}
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## CLI Import Command:
|
|
59
|
+
```bash
|
|
60
|
+
awx-cli job_templates create \\
|
|
61
|
+
--name "{job_template["name"]}" \\
|
|
62
|
+
--project "{job_template["project"]}" \\
|
|
63
|
+
--playbook "{job_template["playbook"]}" \\
|
|
64
|
+
--inventory "{job_template["inventory"]}" \\
|
|
65
|
+
--credential "{job_template["credential"]}" \\
|
|
66
|
+
--job_type run \\
|
|
67
|
+
--verbosity 1
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Cookbook Analysis Summary:
|
|
71
|
+
{_format_cookbook_analysis(cookbook_analysis)}
|
|
72
|
+
"""
|
|
73
|
+
except Exception as e:
|
|
74
|
+
return f"AWX template generation failed for {cookbook_name}: {e}"
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def generate_awx_workflow_from_chef_runlist(
|
|
78
|
+
runlist_content: str, workflow_name: str, environment: str = "production"
|
|
79
|
+
) -> str:
|
|
80
|
+
"""
|
|
81
|
+
Generate AWX workflow from Chef runlist.
|
|
82
|
+
|
|
83
|
+
Handles JSON arrays, comma-separated, or single recipe/role items.
|
|
84
|
+
Workflows preserve runlist execution order with success/failure paths.
|
|
85
|
+
"""
|
|
86
|
+
try:
|
|
87
|
+
# Parse runlist
|
|
88
|
+
runlist = _parse_chef_runlist(runlist_content)
|
|
89
|
+
|
|
90
|
+
# Generate workflow template
|
|
91
|
+
workflow_template = _generate_awx_workflow_template(
|
|
92
|
+
runlist, workflow_name, environment
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
return f"""# AWX/AAP Workflow Template Configuration
|
|
96
|
+
# Generated from Chef runlist for: {workflow_name}
|
|
97
|
+
|
|
98
|
+
## Workflow Template JSON:
|
|
99
|
+
```json
|
|
100
|
+
{json.dumps(workflow_template, indent=2)}
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
## Workflow Nodes Configuration:
|
|
104
|
+
{_format_workflow_nodes(workflow_template.get("workflow_nodes", []))}
|
|
105
|
+
|
|
106
|
+
## Chef Runlist Analysis:
|
|
107
|
+
- Total recipes/roles: {len(runlist)}
|
|
108
|
+
- Execution order preserved: Yes
|
|
109
|
+
- Dependencies mapped: Yes
|
|
110
|
+
|
|
111
|
+
## Import Instructions:
|
|
112
|
+
1. Create individual job templates for each cookbook
|
|
113
|
+
2. Import workflow template using AWX CLI or API
|
|
114
|
+
3. Configure workflow node dependencies
|
|
115
|
+
4. Test execution with survey parameters
|
|
116
|
+
"""
|
|
117
|
+
except Exception as e:
|
|
118
|
+
return f"Workflow generation failed: {e}"
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def generate_awx_project_from_cookbooks(
|
|
122
|
+
cookbooks_directory: str,
|
|
123
|
+
project_name: str,
|
|
124
|
+
scm_type: str = "git",
|
|
125
|
+
scm_url: str = "",
|
|
126
|
+
) -> str:
|
|
127
|
+
"""
|
|
128
|
+
Generate AWX/AAP project configuration from Chef cookbooks directory.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
cookbooks_directory: Path to Chef cookbooks directory.
|
|
132
|
+
project_name: Name for the AWX project.
|
|
133
|
+
scm_type: SCM type (git, svn, etc.).
|
|
134
|
+
scm_url: SCM repository URL.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
AWX/AAP project configuration with converted playbooks structure.
|
|
138
|
+
|
|
139
|
+
"""
|
|
140
|
+
try:
|
|
141
|
+
cookbooks_path = _normalize_path(cookbooks_directory)
|
|
142
|
+
if not cookbooks_path.exists():
|
|
143
|
+
return f"Error: Cookbooks directory not found: {cookbooks_directory}"
|
|
144
|
+
|
|
145
|
+
# Analyze all cookbooks
|
|
146
|
+
cookbooks_analysis = _analyze_cookbooks_directory(cookbooks_path)
|
|
147
|
+
|
|
148
|
+
# Generate project structure
|
|
149
|
+
project_config = _generate_awx_project_config(project_name, scm_type, scm_url)
|
|
150
|
+
|
|
151
|
+
return f"""# AWX/AAP Project Configuration
|
|
152
|
+
# Generated from Chef cookbooks: {project_name}
|
|
153
|
+
|
|
154
|
+
## Project Configuration:
|
|
155
|
+
```json
|
|
156
|
+
{json.dumps(project_config, indent=2)}
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
## Recommended Directory Structure:
|
|
160
|
+
```
|
|
161
|
+
{project_name}/
|
|
162
|
+
├── playbooks/
|
|
163
|
+
{_format_playbook_structure(cookbooks_analysis)}
|
|
164
|
+
├── inventories/
|
|
165
|
+
│ ├── production/
|
|
166
|
+
│ ├── staging/
|
|
167
|
+
│ └── development/
|
|
168
|
+
├── group_vars/
|
|
169
|
+
├── host_vars/
|
|
170
|
+
└── requirements.yml
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
## Cookbooks Analysis:
|
|
174
|
+
{_format_cookbooks_analysis(cookbooks_analysis)}
|
|
175
|
+
|
|
176
|
+
## Migration Steps:
|
|
177
|
+
1. Convert cookbooks to Ansible playbooks
|
|
178
|
+
2. Set up SCM repository with recommended structure
|
|
179
|
+
3. Create AWX project pointing to repository
|
|
180
|
+
4. Configure job templates for each converted cookbook
|
|
181
|
+
5. Set up inventories and credentials
|
|
182
|
+
"""
|
|
183
|
+
except Exception as e:
|
|
184
|
+
return f"Project configuration failed: {e}"
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def generate_awx_inventory_source_from_chef(
|
|
188
|
+
chef_server_url: str, organization: str = "Default", sync_schedule: str = "daily"
|
|
189
|
+
) -> str:
|
|
190
|
+
"""
|
|
191
|
+
Generate AWX/AAP inventory source from Chef server configuration.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
chef_server_url: Chef server URL for inventory sync.
|
|
195
|
+
organization: AWX organization name.
|
|
196
|
+
sync_schedule: Inventory sync schedule (hourly, daily, weekly).
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
AWX/AAP inventory source configuration for Chef server integration.
|
|
200
|
+
|
|
201
|
+
"""
|
|
202
|
+
try:
|
|
203
|
+
# Generate inventory source configuration
|
|
204
|
+
inventory_source = _generate_chef_inventory_source(
|
|
205
|
+
chef_server_url, sync_schedule
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
# Generate custom inventory script
|
|
209
|
+
custom_script = _generate_chef_inventory_script(chef_server_url)
|
|
210
|
+
|
|
211
|
+
return f"""# AWX/AAP Inventory Source Configuration
|
|
212
|
+
# Chef Server Integration: {chef_server_url}
|
|
213
|
+
|
|
214
|
+
## Inventory Source JSON:
|
|
215
|
+
```json
|
|
216
|
+
{json.dumps(inventory_source, indent=2)}
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
## Custom Inventory Script:
|
|
220
|
+
```python
|
|
221
|
+
{custom_script}
|
|
222
|
+
```
|
|
223
|
+
|
|
224
|
+
## Setup Instructions:
|
|
225
|
+
1. Create custom credential type for Chef server authentication
|
|
226
|
+
2. Create credential with Chef client key and node name
|
|
227
|
+
3. Upload custom inventory script to AWX
|
|
228
|
+
4. Create inventory source with Chef server configuration
|
|
229
|
+
5. Configure sync schedule and test inventory update
|
|
230
|
+
|
|
231
|
+
## Credential Type Fields:
|
|
232
|
+
- chef_server_url: Chef server URL
|
|
233
|
+
- chef_node_name: Chef client node name
|
|
234
|
+
- chef_client_key: Chef client private key
|
|
235
|
+
- chef_client_pem: Chef client PEM file content
|
|
236
|
+
|
|
237
|
+
## Environment Variables:
|
|
238
|
+
- CHEF_SERVER_URL: {chef_server_url}
|
|
239
|
+
- CHEF_NODE_NAME: ${{{{chef_node_name}}}}
|
|
240
|
+
- CHEF_CLIENT_KEY: ${{{{chef_client_key}}}}
|
|
241
|
+
"""
|
|
242
|
+
except Exception as e:
|
|
243
|
+
return f"Inventory source generation failed: {e}"
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
# Deployment Strategy Functions
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def convert_chef_deployment_to_ansible_strategy(
|
|
250
|
+
cookbook_path: str, deployment_pattern: str = "auto"
|
|
251
|
+
) -> str:
|
|
252
|
+
"""
|
|
253
|
+
Convert Chef deployment patterns to Ansible strategies.
|
|
254
|
+
|
|
255
|
+
Auto-detects blue/green, canary, or rolling patterns from recipe content.
|
|
256
|
+
Override auto-detection by specifying explicit pattern.
|
|
257
|
+
"""
|
|
258
|
+
try:
|
|
259
|
+
cookbook = _normalize_path(cookbook_path)
|
|
260
|
+
if not cookbook.exists():
|
|
261
|
+
return f"Error: Cookbook path not found: {cookbook_path}"
|
|
262
|
+
|
|
263
|
+
# Analyze Chef deployment pattern
|
|
264
|
+
pattern_analysis = _analyze_chef_deployment_pattern(cookbook)
|
|
265
|
+
|
|
266
|
+
# Determine best strategy if auto-detect
|
|
267
|
+
if deployment_pattern == "auto":
|
|
268
|
+
deployment_pattern = pattern_analysis.get(
|
|
269
|
+
"detected_pattern", "rolling_update"
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Generate appropriate Ansible strategy
|
|
273
|
+
strategy = _generate_ansible_deployment_strategy(
|
|
274
|
+
pattern_analysis, deployment_pattern
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
return f"""# Ansible Deployment Strategy
|
|
278
|
+
# Converted from Chef cookbook deployment pattern
|
|
279
|
+
|
|
280
|
+
## Detected Pattern: {pattern_analysis.get("detected_pattern", "unknown")}
|
|
281
|
+
## Recommended Strategy: {deployment_pattern}
|
|
282
|
+
|
|
283
|
+
{strategy}
|
|
284
|
+
|
|
285
|
+
## Analysis Summary:
|
|
286
|
+
{_format_deployment_analysis(pattern_analysis)}
|
|
287
|
+
|
|
288
|
+
## Migration Recommendations:
|
|
289
|
+
{_generate_deployment_migration_recommendations(pattern_analysis)}
|
|
290
|
+
"""
|
|
291
|
+
except Exception as e:
|
|
292
|
+
return f"Deployment pattern conversion failed: {e}"
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
def generate_blue_green_deployment_playbook(
|
|
296
|
+
app_name: str, health_check_url: str = "/health"
|
|
297
|
+
) -> str:
|
|
298
|
+
"""
|
|
299
|
+
Generate blue/green deployment playbook for zero-downtime deployments.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
app_name: Application name for deployment.
|
|
303
|
+
health_check_url: Health check endpoint URL.
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
Complete blue/green deployment playbook with health checks and rollback.
|
|
307
|
+
|
|
308
|
+
"""
|
|
309
|
+
try:
|
|
310
|
+
# Generate main deployment playbook
|
|
311
|
+
playbook = _generate_blue_green_playbook(app_name, health_check_url)
|
|
312
|
+
|
|
313
|
+
return f"""# Blue/Green Deployment Playbook
|
|
314
|
+
# Application: {app_name}
|
|
315
|
+
|
|
316
|
+
## Main Playbook (deploy_blue_green.yml):
|
|
317
|
+
```yaml
|
|
318
|
+
{playbook["main_playbook"]}
|
|
319
|
+
```
|
|
320
|
+
|
|
321
|
+
## Health Check Playbook (health_check.yml):
|
|
322
|
+
```yaml
|
|
323
|
+
{playbook["health_check"]}
|
|
324
|
+
```
|
|
325
|
+
|
|
326
|
+
## Rollback Playbook (rollback.yml):
|
|
327
|
+
```yaml
|
|
328
|
+
{playbook["rollback"]}
|
|
329
|
+
```
|
|
330
|
+
|
|
331
|
+
## Load Balancer Configuration:
|
|
332
|
+
```yaml
|
|
333
|
+
{playbook["load_balancer_config"]}
|
|
334
|
+
```
|
|
335
|
+
|
|
336
|
+
## Usage Instructions:
|
|
337
|
+
1. Deploy to blue environment:
|
|
338
|
+
`ansible-playbook deploy_blue_green.yml -e target_env=blue`
|
|
339
|
+
2. Verify health checks pass
|
|
340
|
+
3. Switch traffic to blue:
|
|
341
|
+
`ansible-playbook switch_traffic.yml -e target_env=blue`
|
|
342
|
+
4. Monitor and rollback if needed: `ansible-playbook rollback.yml`
|
|
343
|
+
|
|
344
|
+
## Prerequisites:
|
|
345
|
+
- Load balancer configured (HAProxy, Nginx, ALB, etc.)
|
|
346
|
+
- Health check endpoint available
|
|
347
|
+
- Blue and green environments provisioned
|
|
348
|
+
"""
|
|
349
|
+
except Exception as e:
|
|
350
|
+
return f"Failed to generate blue/green playbook: {e}"
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
def generate_canary_deployment_strategy(
|
|
354
|
+
app_name: str, canary_percentage: int = 10, rollout_steps: str = "10,25,50,100"
|
|
355
|
+
) -> str:
|
|
356
|
+
"""
|
|
357
|
+
Generate canary deployment with progressive rollout.
|
|
358
|
+
|
|
359
|
+
Starts at canary_percentage, progresses through rollout_steps.
|
|
360
|
+
Includes monitoring checks and automatic rollback on failure.
|
|
361
|
+
"""
|
|
362
|
+
try:
|
|
363
|
+
# Parse rollout steps
|
|
364
|
+
steps = [int(s.strip()) for s in rollout_steps.split(",")]
|
|
365
|
+
|
|
366
|
+
# Generate canary strategy
|
|
367
|
+
strategy = _generate_canary_strategy(app_name, canary_percentage, steps)
|
|
368
|
+
|
|
369
|
+
return f"""# Canary Deployment Strategy
|
|
370
|
+
# Application: {app_name}
|
|
371
|
+
# Initial Canary: {canary_percentage}%
|
|
372
|
+
# Rollout Steps: {rollout_steps}
|
|
373
|
+
|
|
374
|
+
## Canary Deployment Playbook (deploy_canary.yml):
|
|
375
|
+
```yaml
|
|
376
|
+
{strategy["canary_playbook"]}
|
|
377
|
+
```
|
|
378
|
+
|
|
379
|
+
## Monitoring Playbook (monitor_canary.yml):
|
|
380
|
+
```yaml
|
|
381
|
+
{strategy["monitoring"]}
|
|
382
|
+
```
|
|
383
|
+
|
|
384
|
+
## Progressive Rollout Playbook (progressive_rollout.yml):
|
|
385
|
+
```yaml
|
|
386
|
+
{strategy["progressive_rollout"]}
|
|
387
|
+
```
|
|
388
|
+
|
|
389
|
+
## Automated Rollback (rollback_canary.yml):
|
|
390
|
+
```yaml
|
|
391
|
+
{strategy["rollback"]}
|
|
392
|
+
```
|
|
393
|
+
|
|
394
|
+
## Deployment Workflow:
|
|
395
|
+
1. Deploy canary at {canary_percentage}%: `ansible-playbook deploy_canary.yml`
|
|
396
|
+
2. Monitor metrics: `ansible-playbook monitor_canary.yml`
|
|
397
|
+
3. Progressive rollout: `ansible-playbook progressive_rollout.yml`
|
|
398
|
+
- Step 1: {steps[0]}% traffic
|
|
399
|
+
- Step 2: {steps[1]}% traffic
|
|
400
|
+
- Step 3: {steps[2]}% traffic
|
|
401
|
+
- Step 4: {steps[3]}% traffic (full rollout)
|
|
402
|
+
4. Rollback if issues: `ansible-playbook rollback_canary.yml`
|
|
403
|
+
|
|
404
|
+
## Monitoring Points:
|
|
405
|
+
- Error rate comparison (canary vs stable)
|
|
406
|
+
- Response time percentiles (p50, p95, p99)
|
|
407
|
+
- Resource utilization (CPU, memory)
|
|
408
|
+
- Custom business metrics
|
|
409
|
+
|
|
410
|
+
## Rollback Triggers:
|
|
411
|
+
- Error rate increase > 5%
|
|
412
|
+
- Response time degradation > 20%
|
|
413
|
+
- Failed health checks
|
|
414
|
+
- Manual trigger
|
|
415
|
+
"""
|
|
416
|
+
except Exception as e:
|
|
417
|
+
return f"Canary deployment generation failed: {e}"
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def analyze_chef_application_patterns(
|
|
421
|
+
cookbook_path: str, application_type: str = "web_application"
|
|
422
|
+
) -> str:
|
|
423
|
+
"""
|
|
424
|
+
Analyze cookbook deployment patterns and recommend Ansible strategies.
|
|
425
|
+
|
|
426
|
+
Detects blue/green, canary, rolling, or custom deployment approaches.
|
|
427
|
+
Application type helps tune recommendations for web/database/service workloads.
|
|
428
|
+
"""
|
|
429
|
+
try:
|
|
430
|
+
cookbook = _normalize_path(cookbook_path)
|
|
431
|
+
if not cookbook.exists():
|
|
432
|
+
return f"Error: Cookbook path not found: {cookbook_path}"
|
|
433
|
+
|
|
434
|
+
# Analyze cookbook for application patterns
|
|
435
|
+
analysis = _analyze_application_cookbook(cookbook, application_type)
|
|
436
|
+
|
|
437
|
+
return f"""# Chef Application Patterns Analysis
|
|
438
|
+
# Cookbook: {cookbook.name}
|
|
439
|
+
# Application Type: {application_type}
|
|
440
|
+
|
|
441
|
+
## Detected Patterns:
|
|
442
|
+
{_format_deployment_patterns(analysis)}
|
|
443
|
+
|
|
444
|
+
## Chef Resources Analysis:
|
|
445
|
+
{_format_chef_resources_analysis(analysis)}
|
|
446
|
+
|
|
447
|
+
## Recommended Ansible Strategies:
|
|
448
|
+
{_recommend_ansible_strategies(analysis)}
|
|
449
|
+
|
|
450
|
+
## Migration Complexity:
|
|
451
|
+
- Overall: {analysis.get("complexity", "medium")}
|
|
452
|
+
- Estimated effort: {analysis.get("effort_estimate", "2-3 weeks")}
|
|
453
|
+
- Risk level: {analysis.get("risk_level", "medium")}
|
|
454
|
+
|
|
455
|
+
## Next Steps:
|
|
456
|
+
1. Review detected patterns and validate accuracy
|
|
457
|
+
2. Select appropriate deployment strategy
|
|
458
|
+
3. Prepare test environment for validation
|
|
459
|
+
4. Execute pilot migration with one environment
|
|
460
|
+
5. Document lessons learned and iterate
|
|
461
|
+
"""
|
|
462
|
+
except Exception as e:
|
|
463
|
+
return f"Couldn't analyze cookbook patterns: {e}"
|
|
464
|
+
|
|
465
|
+
|
|
466
|
+
# AWX Helper Functions
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
def _analyze_cookbook_for_awx(cookbook_path: Path, cookbook_name: str) -> dict:
|
|
470
|
+
"""Analyze Chef cookbook structure for AWX job template generation."""
|
|
471
|
+
analysis: dict[str, Any] = {
|
|
472
|
+
"name": cookbook_name,
|
|
473
|
+
"recipes": [],
|
|
474
|
+
"attributes": {},
|
|
475
|
+
"dependencies": [],
|
|
476
|
+
"templates": [],
|
|
477
|
+
"files": [],
|
|
478
|
+
"survey_fields": [],
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
# Check for recipes to convert into AWX job steps
|
|
482
|
+
recipes_dir = _safe_join(cookbook_path, "recipes")
|
|
483
|
+
if recipes_dir.exists():
|
|
484
|
+
for recipe_file in recipes_dir.glob("*.rb"):
|
|
485
|
+
recipe_name = recipe_file.stem
|
|
486
|
+
analysis["recipes"].append(
|
|
487
|
+
{
|
|
488
|
+
"name": recipe_name,
|
|
489
|
+
"file": str(recipe_file),
|
|
490
|
+
"size": recipe_file.stat().st_size,
|
|
491
|
+
}
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
# Analyze attributes for survey generation
|
|
495
|
+
attributes_dir = _safe_join(cookbook_path, "attributes")
|
|
496
|
+
if attributes_dir.exists():
|
|
497
|
+
for attr_file in attributes_dir.glob("*.rb"):
|
|
498
|
+
try:
|
|
499
|
+
with attr_file.open("r") as f:
|
|
500
|
+
content = f.read()
|
|
501
|
+
|
|
502
|
+
# Extract attribute declarations for survey
|
|
503
|
+
attributes = _extract_cookbook_attributes(content)
|
|
504
|
+
analysis["attributes"].update(attributes)
|
|
505
|
+
|
|
506
|
+
# Generate survey fields from attributes
|
|
507
|
+
survey_fields = _generate_survey_fields_from_attributes(attributes)
|
|
508
|
+
analysis["survey_fields"].extend(survey_fields)
|
|
509
|
+
|
|
510
|
+
except Exception:
|
|
511
|
+
# Silently skip malformed attribute files
|
|
512
|
+
pass
|
|
513
|
+
|
|
514
|
+
# Analyze dependencies
|
|
515
|
+
metadata_file = _safe_join(cookbook_path, METADATA_FILENAME)
|
|
516
|
+
if metadata_file.exists():
|
|
517
|
+
try:
|
|
518
|
+
with metadata_file.open("r") as f:
|
|
519
|
+
content = f.read()
|
|
520
|
+
|
|
521
|
+
dependencies = _extract_cookbook_dependencies(content)
|
|
522
|
+
analysis["dependencies"] = dependencies
|
|
523
|
+
|
|
524
|
+
except Exception:
|
|
525
|
+
# Silently skip malformed metadata
|
|
526
|
+
pass
|
|
527
|
+
|
|
528
|
+
# Count templates and files
|
|
529
|
+
templates_dir = _safe_join(cookbook_path, "templates")
|
|
530
|
+
if templates_dir.exists():
|
|
531
|
+
analysis["templates"] = [
|
|
532
|
+
f.name for f in templates_dir.rglob("*") if f.is_file()
|
|
533
|
+
]
|
|
534
|
+
|
|
535
|
+
files_dir = _safe_join(cookbook_path, "files")
|
|
536
|
+
if files_dir.exists():
|
|
537
|
+
analysis["files"] = [f.name for f in files_dir.rglob("*") if f.is_file()]
|
|
538
|
+
|
|
539
|
+
return analysis
|
|
540
|
+
|
|
541
|
+
|
|
542
|
+
def _generate_awx_job_template(
|
|
543
|
+
analysis: dict, cookbook_name: str, environment: str, include_survey: bool
|
|
544
|
+
) -> dict:
|
|
545
|
+
"""Generate AWX job template configuration from cookbook analysis."""
|
|
546
|
+
job_template = {
|
|
547
|
+
"name": f"{cookbook_name}-{environment}",
|
|
548
|
+
"description": f"Deploy {cookbook_name} cookbook to {environment}",
|
|
549
|
+
"job_type": "run",
|
|
550
|
+
"project": f"{cookbook_name}-project",
|
|
551
|
+
"playbook": f"playbooks/{cookbook_name}.yml",
|
|
552
|
+
"inventory": environment,
|
|
553
|
+
"credential": f"{environment}-ssh",
|
|
554
|
+
"verbosity": 1,
|
|
555
|
+
"ask_variables_on_launch": True,
|
|
556
|
+
"ask_limit_on_launch": True,
|
|
557
|
+
"ask_tags_on_launch": False,
|
|
558
|
+
"ask_skip_tags_on_launch": False,
|
|
559
|
+
"ask_job_type_on_launch": False,
|
|
560
|
+
"ask_verbosity_on_launch": False,
|
|
561
|
+
"ask_inventory_on_launch": False,
|
|
562
|
+
"ask_credential_on_launch": False,
|
|
563
|
+
"survey_enabled": include_survey and len(analysis.get("survey_fields", [])) > 0,
|
|
564
|
+
"become_enabled": True,
|
|
565
|
+
"host_config_key": "",
|
|
566
|
+
"auto_run_on_commit": False,
|
|
567
|
+
"timeout": 3600,
|
|
568
|
+
}
|
|
569
|
+
|
|
570
|
+
if include_survey and analysis.get("survey_fields"):
|
|
571
|
+
job_template["survey_spec"] = {
|
|
572
|
+
"name": f"{cookbook_name} Configuration",
|
|
573
|
+
"description": f"Configuration parameters for {cookbook_name} cookbook",
|
|
574
|
+
"spec": analysis["survey_fields"],
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
return job_template
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
def _generate_awx_workflow_template(
|
|
581
|
+
runlist: list, workflow_name: str, environment: str
|
|
582
|
+
) -> dict:
|
|
583
|
+
"""Generate AWX workflow template from Chef runlist."""
|
|
584
|
+
workflow_template: dict[str, Any] = {
|
|
585
|
+
"name": f"{workflow_name}-{environment}",
|
|
586
|
+
"description": f"Execute {workflow_name} runlist in {environment}",
|
|
587
|
+
"organization": "Default",
|
|
588
|
+
"survey_enabled": True,
|
|
589
|
+
"ask_variables_on_launch": True,
|
|
590
|
+
"ask_limit_on_launch": True,
|
|
591
|
+
"workflow_nodes": [],
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
# Generate workflow nodes from runlist
|
|
595
|
+
for index, recipe in enumerate(runlist):
|
|
596
|
+
node_id = index + 1
|
|
597
|
+
node = {
|
|
598
|
+
"id": node_id,
|
|
599
|
+
"unified_job_template": f"{recipe.replace('::', '-')}-{environment}",
|
|
600
|
+
"unified_job_template_type": "job_template",
|
|
601
|
+
"success_nodes": [node_id + 1] if index < len(runlist) - 1 else [],
|
|
602
|
+
"failure_nodes": [],
|
|
603
|
+
"always_nodes": [],
|
|
604
|
+
"inventory": environment,
|
|
605
|
+
"credential": f"{environment}-ssh",
|
|
606
|
+
}
|
|
607
|
+
workflow_template["workflow_nodes"].append(node)
|
|
608
|
+
|
|
609
|
+
return workflow_template
|
|
610
|
+
|
|
611
|
+
|
|
612
|
+
def _generate_awx_project_config(
|
|
613
|
+
project_name: str, scm_type: str, scm_url: str
|
|
614
|
+
) -> dict:
|
|
615
|
+
"""Generate AWX project configuration from cookbooks analysis."""
|
|
616
|
+
project_config = {
|
|
617
|
+
"name": project_name,
|
|
618
|
+
"description": "Ansible playbooks converted from Chef cookbooks",
|
|
619
|
+
"organization": "Default",
|
|
620
|
+
"scm_type": scm_type,
|
|
621
|
+
"scm_url": scm_url,
|
|
622
|
+
"scm_branch": "main",
|
|
623
|
+
"scm_clean": True,
|
|
624
|
+
"scm_delete_on_update": False,
|
|
625
|
+
"credential": f"{scm_type}-credential",
|
|
626
|
+
"timeout": 300,
|
|
627
|
+
"scm_update_on_launch": True,
|
|
628
|
+
"scm_update_cache_timeout": 0,
|
|
629
|
+
"allow_override": False,
|
|
630
|
+
"default_environment": None,
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
return project_config
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
def _generate_chef_inventory_source(chef_server_url: str, sync_schedule: str) -> dict:
|
|
637
|
+
"""Generate Chef server inventory source configuration."""
|
|
638
|
+
inventory_source = {
|
|
639
|
+
"name": "Chef Server Inventory",
|
|
640
|
+
"description": f"Dynamic inventory from Chef server: {chef_server_url}",
|
|
641
|
+
"inventory": "Chef Nodes",
|
|
642
|
+
"source": "scm",
|
|
643
|
+
"source_project": "chef-inventory-scripts",
|
|
644
|
+
"source_path": "chef_inventory.py",
|
|
645
|
+
"credential": "chef-server-credential", # NOSONAR - credential name, not secret
|
|
646
|
+
"overwrite": True,
|
|
647
|
+
"overwrite_vars": True,
|
|
648
|
+
"timeout": 300,
|
|
649
|
+
"verbosity": 1,
|
|
650
|
+
"update_on_launch": True,
|
|
651
|
+
"update_cache_timeout": 86400, # 24 hours
|
|
652
|
+
"source_vars": json.dumps(
|
|
653
|
+
{
|
|
654
|
+
"chef_server_url": chef_server_url,
|
|
655
|
+
"ssl_verify": True,
|
|
656
|
+
"group_by_environment": True,
|
|
657
|
+
"group_by_roles": True,
|
|
658
|
+
"group_by_platform": True,
|
|
659
|
+
},
|
|
660
|
+
indent=2,
|
|
661
|
+
),
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
# Map sync schedule to update frequency
|
|
665
|
+
schedule_mapping = {"hourly": 3600, "daily": 86400, "weekly": 604800}
|
|
666
|
+
|
|
667
|
+
inventory_source["update_cache_timeout"] = schedule_mapping.get(
|
|
668
|
+
sync_schedule, 86400
|
|
669
|
+
)
|
|
670
|
+
|
|
671
|
+
return inventory_source
|
|
672
|
+
|
|
673
|
+
|
|
674
|
+
def _generate_chef_inventory_script(chef_server_url: str) -> str:
|
|
675
|
+
"""Generate custom inventory script for Chef server integration."""
|
|
676
|
+
return f'''#!/usr/bin/env python3
|
|
677
|
+
"""AWX/AAP Custom Inventory Script for Chef Server.
|
|
678
|
+
|
|
679
|
+
Connects to Chef server and generates Ansible inventory.
|
|
680
|
+
"""
|
|
681
|
+
import json
|
|
682
|
+
import os
|
|
683
|
+
import sys
|
|
684
|
+
|
|
685
|
+
from chef import ChefAPI
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
def main():
|
|
689
|
+
"""Main inventory generation function."""
|
|
690
|
+
# Chef server configuration
|
|
691
|
+
chef_server_url = os.environ.get('CHEF_SERVER_URL', '{chef_server_url}')
|
|
692
|
+
client_name = os.environ.get('CHEF_NODE_NAME', 'admin')
|
|
693
|
+
client_key = os.environ.get('CHEF_CLIENT_KEY', '/etc/chef/client.pem')
|
|
694
|
+
|
|
695
|
+
# Initialize Chef API
|
|
696
|
+
try:
|
|
697
|
+
api = ChefAPI(chef_server_url, client_key, client_name)
|
|
698
|
+
|
|
699
|
+
# Build Ansible inventory
|
|
700
|
+
inventory = {{
|
|
701
|
+
'_meta': {{'hostvars': {{}}}},
|
|
702
|
+
'all': {{'children': []}},
|
|
703
|
+
'ungrouped': {{'hosts': []}}
|
|
704
|
+
}}
|
|
705
|
+
|
|
706
|
+
# Get all nodes from Chef server
|
|
707
|
+
nodes = api['/nodes']
|
|
708
|
+
|
|
709
|
+
for node_name in nodes:
|
|
710
|
+
node = api[f'/nodes/{{node_name}}']
|
|
711
|
+
|
|
712
|
+
# Extract node information
|
|
713
|
+
node_data = {{
|
|
714
|
+
'ansible_host': node.get('automatic', {{}}).get(
|
|
715
|
+
'ipaddress', node_name
|
|
716
|
+
),
|
|
717
|
+
'chef_environment': node.get('chef_environment', '_default'),
|
|
718
|
+
'chef_roles': node.get('run_list', []),
|
|
719
|
+
'chef_platform': node.get('automatic', {{}}).get('platform'),
|
|
720
|
+
'chef_platform_version': (
|
|
721
|
+
node.get('automatic', {{}}).get('platform_version')
|
|
722
|
+
)
|
|
723
|
+
}}
|
|
724
|
+
|
|
725
|
+
# Add to hostvars
|
|
726
|
+
inventory['_meta']['hostvars'][node_name] = node_data
|
|
727
|
+
|
|
728
|
+
# Group by environment
|
|
729
|
+
env_group = f"environment_{{node_data['chef_environment']}}"
|
|
730
|
+
if env_group not in inventory:
|
|
731
|
+
inventory[env_group] = {{'hosts': []}}
|
|
732
|
+
inventory['all']['children'].append(env_group)
|
|
733
|
+
inventory[env_group]['hosts'].append(node_name)
|
|
734
|
+
|
|
735
|
+
# Group by roles
|
|
736
|
+
for role in node.get('run_list', []):
|
|
737
|
+
role_name = role.replace('role[', '').replace(']', '')
|
|
738
|
+
if role_name.startswith('recipe['):
|
|
739
|
+
continue
|
|
740
|
+
|
|
741
|
+
role_group = f"role_{{role_name}}"
|
|
742
|
+
if role_group not in inventory:
|
|
743
|
+
inventory[role_group] = {{'hosts': []}}
|
|
744
|
+
inventory['all']['children'].append(role_group)
|
|
745
|
+
inventory[role_group]['hosts'].append(node_name)
|
|
746
|
+
|
|
747
|
+
# Group by platform
|
|
748
|
+
if node_data['chef_platform']:
|
|
749
|
+
platform_group = f"platform_{{node_data['chef_platform']}}"
|
|
750
|
+
if platform_group not in inventory:
|
|
751
|
+
inventory[platform_group] = {{'hosts': []}}
|
|
752
|
+
inventory['all']['children'].append(platform_group)
|
|
753
|
+
inventory[platform_group]['hosts'].append(node_name)
|
|
754
|
+
|
|
755
|
+
# Output inventory JSON
|
|
756
|
+
print(json.dumps(inventory, indent=2))
|
|
757
|
+
|
|
758
|
+
except Exception as e:
|
|
759
|
+
print(f"Error connecting to Chef server: {{e}}", file=sys.stderr)
|
|
760
|
+
sys.exit(1)
|
|
761
|
+
|
|
762
|
+
|
|
763
|
+
if __name__ == '__main__':
|
|
764
|
+
main()
|
|
765
|
+
'''
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
def _parse_chef_runlist(runlist_content: str) -> list:
|
|
769
|
+
"""Parse Chef runlist content into list of recipes/roles."""
|
|
770
|
+
try:
|
|
771
|
+
# Try parsing as JSON first
|
|
772
|
+
if runlist_content.strip().startswith("["):
|
|
773
|
+
runlist = json.loads(runlist_content)
|
|
774
|
+
return [
|
|
775
|
+
item.replace(CHEF_RECIPE_PREFIX, "")
|
|
776
|
+
.replace(CHEF_ROLE_PREFIX, "")
|
|
777
|
+
.replace("]", "")
|
|
778
|
+
for item in runlist
|
|
779
|
+
]
|
|
780
|
+
except json.JSONDecodeError:
|
|
781
|
+
# Not valid JSON; fall through to parse as comma-separated or single item
|
|
782
|
+
pass
|
|
783
|
+
|
|
784
|
+
# Parse as comma-separated list
|
|
785
|
+
if "," in runlist_content:
|
|
786
|
+
items = [item.strip() for item in runlist_content.split(",")]
|
|
787
|
+
return [
|
|
788
|
+
item.replace(CHEF_RECIPE_PREFIX, "")
|
|
789
|
+
.replace(CHEF_ROLE_PREFIX, "")
|
|
790
|
+
.replace("]", "")
|
|
791
|
+
for item in items
|
|
792
|
+
]
|
|
793
|
+
|
|
794
|
+
# Parse single item
|
|
795
|
+
return [
|
|
796
|
+
runlist_content.replace(CHEF_RECIPE_PREFIX, "")
|
|
797
|
+
.replace(CHEF_ROLE_PREFIX, "")
|
|
798
|
+
.replace("]", "")
|
|
799
|
+
]
|
|
800
|
+
|
|
801
|
+
|
|
802
|
+
def _extract_cookbook_attributes(content: str) -> dict:
|
|
803
|
+
"""Extract cookbook attributes for survey generation."""
|
|
804
|
+
attributes = {}
|
|
805
|
+
|
|
806
|
+
# Find default attribute declarations
|
|
807
|
+
# Pattern handles multiline values with line continuations, hashes, and arrays
|
|
808
|
+
# Uses bounded quantifier to prevent ReDoS on malformed input
|
|
809
|
+
attr_pattern = (
|
|
810
|
+
r"default\[['\"]([^'\"]+)['\"]\]\s*=\s*"
|
|
811
|
+
rf"(.{{0,{MAX_ATTRIBUTE_VALUE_LENGTH}}}?)"
|
|
812
|
+
r"(?=\n(?!.*\\$)|$|#)"
|
|
813
|
+
)
|
|
814
|
+
for match in re.finditer(attr_pattern, content, re.MULTILINE | re.DOTALL):
|
|
815
|
+
attr_name = match.group(1)
|
|
816
|
+
attr_value = match.group(2).strip()
|
|
817
|
+
|
|
818
|
+
# Clean up value - remove trailing backslashes and extra whitespace
|
|
819
|
+
attr_value = re.sub(r"\\\s*\n\s*", " ", attr_value)
|
|
820
|
+
attr_value = attr_value.strip()
|
|
821
|
+
|
|
822
|
+
# Clean up quotes
|
|
823
|
+
if attr_value.startswith(("'", '"')) and attr_value.endswith(("'", '"')):
|
|
824
|
+
attr_value = attr_value[1:-1]
|
|
825
|
+
|
|
826
|
+
attributes[attr_name] = attr_value
|
|
827
|
+
|
|
828
|
+
return attributes
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
def _extract_cookbook_dependencies(content: str) -> list:
|
|
832
|
+
"""Extract cookbook dependencies from metadata."""
|
|
833
|
+
dependencies = []
|
|
834
|
+
|
|
835
|
+
# Find depends declarations
|
|
836
|
+
depends_pattern = r"depends\s+['\"]([^'\"]+)['\"]"
|
|
837
|
+
for match in re.finditer(depends_pattern, content):
|
|
838
|
+
dependencies.append(match.group(1))
|
|
839
|
+
|
|
840
|
+
return dependencies
|
|
841
|
+
|
|
842
|
+
|
|
843
|
+
def _generate_survey_fields_from_attributes(attributes: dict) -> list:
|
|
844
|
+
"""Generate AWX survey fields from cookbook attributes."""
|
|
845
|
+
survey_fields = []
|
|
846
|
+
|
|
847
|
+
for attr_name, attr_value in attributes.items():
|
|
848
|
+
# Determine field type based on value
|
|
849
|
+
field_type = "text"
|
|
850
|
+
if attr_value.lower() in ["true", "false"]:
|
|
851
|
+
field_type = "boolean"
|
|
852
|
+
elif attr_value.isdigit():
|
|
853
|
+
field_type = "integer"
|
|
854
|
+
|
|
855
|
+
field = {
|
|
856
|
+
"variable": attr_name.replace(".", "_"),
|
|
857
|
+
"question_name": attr_name.replace(".", " ").title(),
|
|
858
|
+
"question_description": f"Chef attribute: {attr_name}",
|
|
859
|
+
"required": False,
|
|
860
|
+
"type": field_type,
|
|
861
|
+
"default": attr_value,
|
|
862
|
+
"choices": "",
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
survey_fields.append(field)
|
|
866
|
+
|
|
867
|
+
return survey_fields
|
|
868
|
+
|
|
869
|
+
|
|
870
|
+
def _analyze_cookbooks_directory(cookbooks_path: Path) -> dict:
|
|
871
|
+
"""Analyze entire cookbooks directory structure."""
|
|
872
|
+
analysis: dict[str, Any] = {
|
|
873
|
+
"total_cookbooks": 0,
|
|
874
|
+
"cookbooks": {},
|
|
875
|
+
"total_recipes": 0,
|
|
876
|
+
"total_templates": 0,
|
|
877
|
+
"total_files": 0,
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
for cookbook_dir in cookbooks_path.iterdir():
|
|
881
|
+
if not cookbook_dir.is_dir():
|
|
882
|
+
continue
|
|
883
|
+
|
|
884
|
+
cookbook_name = cookbook_dir.name
|
|
885
|
+
analysis["total_cookbooks"] += 1
|
|
886
|
+
|
|
887
|
+
cookbook_analysis = _analyze_cookbook_for_awx(cookbook_dir, cookbook_name)
|
|
888
|
+
analysis["cookbooks"][cookbook_name] = cookbook_analysis
|
|
889
|
+
|
|
890
|
+
# Aggregate stats
|
|
891
|
+
analysis["total_recipes"] += len(cookbook_analysis["recipes"])
|
|
892
|
+
analysis["total_templates"] += len(cookbook_analysis["templates"])
|
|
893
|
+
analysis["total_files"] += len(cookbook_analysis["files"])
|
|
894
|
+
|
|
895
|
+
return analysis
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
# Deployment Strategy Helper Functions
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
def _analyze_chef_deployment_pattern(cookbook_path: Path) -> dict:
|
|
902
|
+
"""Analyze Chef cookbook for deployment patterns."""
|
|
903
|
+
analysis: dict[str, Any] = {
|
|
904
|
+
"deployment_steps": [],
|
|
905
|
+
"health_checks": [],
|
|
906
|
+
"service_management": [],
|
|
907
|
+
"load_balancer_config": {},
|
|
908
|
+
"detected_pattern": "rolling_update",
|
|
909
|
+
"complexity": "medium",
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
# Analyze recipes for deployment indicators
|
|
913
|
+
recipes_dir = _safe_join(cookbook_path, "recipes")
|
|
914
|
+
if recipes_dir.exists():
|
|
915
|
+
for recipe_file in recipes_dir.glob("*.rb"):
|
|
916
|
+
try:
|
|
917
|
+
with recipe_file.open("r") as f:
|
|
918
|
+
content = f.read()
|
|
919
|
+
|
|
920
|
+
# Extract deployment steps
|
|
921
|
+
steps = _extract_deployment_steps(content)
|
|
922
|
+
analysis["deployment_steps"].extend(steps)
|
|
923
|
+
|
|
924
|
+
# Extract health checks
|
|
925
|
+
health_checks = _extract_health_checks(content)
|
|
926
|
+
analysis["health_checks"].extend(health_checks)
|
|
927
|
+
|
|
928
|
+
# Extract service management
|
|
929
|
+
services = _extract_service_management(content)
|
|
930
|
+
analysis["service_management"].extend(services)
|
|
931
|
+
|
|
932
|
+
# Detect deployment pattern
|
|
933
|
+
if "blue" in content.lower() or "green" in content.lower():
|
|
934
|
+
analysis["detected_pattern"] = "blue_green"
|
|
935
|
+
elif "canary" in content.lower():
|
|
936
|
+
analysis["detected_pattern"] = "canary"
|
|
937
|
+
elif "rolling" in content.lower():
|
|
938
|
+
analysis["detected_pattern"] = "rolling_update"
|
|
939
|
+
|
|
940
|
+
except Exception:
|
|
941
|
+
# Silently skip malformed files
|
|
942
|
+
pass
|
|
943
|
+
|
|
944
|
+
return analysis
|
|
945
|
+
|
|
946
|
+
|
|
947
|
+
def _generate_ansible_deployment_strategy(analysis: dict, pattern: str) -> str:
|
|
948
|
+
"""Generate Ansible deployment strategy based on pattern."""
|
|
949
|
+
if pattern == "blue_green":
|
|
950
|
+
return _generate_blue_green_conversion_playbook(analysis)
|
|
951
|
+
elif pattern == "canary":
|
|
952
|
+
return _generate_canary_conversion_playbook(analysis)
|
|
953
|
+
else:
|
|
954
|
+
return _generate_rolling_update_playbook(analysis)
|
|
955
|
+
|
|
956
|
+
|
|
957
|
+
def _generate_blue_green_playbook(app_name: str, health_check_url: str) -> dict:
|
|
958
|
+
"""
|
|
959
|
+
Generate blue/green deployment playbook structure.
|
|
960
|
+
|
|
961
|
+
Args:
|
|
962
|
+
app_name: Name of the application.
|
|
963
|
+
health_check_url: URL for health checks.
|
|
964
|
+
|
|
965
|
+
"""
|
|
966
|
+
main_playbook = f"""---
|
|
967
|
+
# Blue/Green Deployment for {app_name}
|
|
968
|
+
- name: Deploy {app_name} (Blue/Green)
|
|
969
|
+
hosts: "{{{{ target_env }}}}"
|
|
970
|
+
become: yes
|
|
971
|
+
vars:
|
|
972
|
+
app_name: {app_name}
|
|
973
|
+
health_check_url: {health_check_url}
|
|
974
|
+
deployment_version: "{{{{ lookup('env', 'VERSION') | default('latest') }}}}"
|
|
975
|
+
|
|
976
|
+
tasks:
|
|
977
|
+
- name: Deploy application to target environment
|
|
978
|
+
include_tasks: deploy_app.yml
|
|
979
|
+
|
|
980
|
+
- name: Run health checks
|
|
981
|
+
include_tasks: health_check.yml
|
|
982
|
+
|
|
983
|
+
- name: Switch load balancer traffic
|
|
984
|
+
include_tasks: switch_traffic.yml
|
|
985
|
+
when: health_check_passed
|
|
986
|
+
"""
|
|
987
|
+
health_check = """---
|
|
988
|
+
# Health Check Playbook
|
|
989
|
+
- name: Verify application health
|
|
990
|
+
uri:
|
|
991
|
+
url: "http://{{ ansible_host }}{health_check_url}"
|
|
992
|
+
method: GET
|
|
993
|
+
status_code: 200
|
|
994
|
+
timeout: 10
|
|
995
|
+
register: health_check_result
|
|
996
|
+
retries: 5
|
|
997
|
+
delay: 10
|
|
998
|
+
until: health_check_result.status == 200
|
|
999
|
+
|
|
1000
|
+
- name: Set health check status
|
|
1001
|
+
set_fact:
|
|
1002
|
+
health_check_passed: "{{ health_check_result.status == 200 }}"
|
|
1003
|
+
"""
|
|
1004
|
+
rollback = f"""---
|
|
1005
|
+
# Rollback Playbook
|
|
1006
|
+
- name: Rollback {app_name} deployment
|
|
1007
|
+
hosts: load_balancers
|
|
1008
|
+
become: yes
|
|
1009
|
+
tasks:
|
|
1010
|
+
- name: Switch traffic back to previous environment
|
|
1011
|
+
include_tasks: switch_traffic.yml
|
|
1012
|
+
vars:
|
|
1013
|
+
target_env: "{{{{ previous_env }}}}"
|
|
1014
|
+
|
|
1015
|
+
- name: Verify rollback health
|
|
1016
|
+
include_tasks: health_check.yml
|
|
1017
|
+
"""
|
|
1018
|
+
load_balancer_config = """---
|
|
1019
|
+
# Load Balancer Configuration
|
|
1020
|
+
- name: Update load balancer configuration
|
|
1021
|
+
template:
|
|
1022
|
+
src: nginx.conf.j2
|
|
1023
|
+
dest: /etc/nginx/sites-enabled/{{ app_name }}.conf
|
|
1024
|
+
notify: reload nginx
|
|
1025
|
+
|
|
1026
|
+
- name: Reload nginx
|
|
1027
|
+
service:
|
|
1028
|
+
name: nginx
|
|
1029
|
+
state: reloaded
|
|
1030
|
+
"""
|
|
1031
|
+
return {
|
|
1032
|
+
"main_playbook": main_playbook,
|
|
1033
|
+
"health_check": health_check,
|
|
1034
|
+
"rollback": rollback,
|
|
1035
|
+
"load_balancer_config": load_balancer_config,
|
|
1036
|
+
}
|
|
1037
|
+
|
|
1038
|
+
|
|
1039
|
+
def _generate_canary_strategy(app_name: str, canary_pct: int, steps: list) -> dict:
|
|
1040
|
+
"""Generate canary deployment strategy structure."""
|
|
1041
|
+
canary_playbook = f"""---
|
|
1042
|
+
# Canary Deployment for {app_name}
|
|
1043
|
+
- name: Deploy {app_name} (Canary)
|
|
1044
|
+
hosts: canary_servers
|
|
1045
|
+
become: yes
|
|
1046
|
+
vars:
|
|
1047
|
+
app_name: {app_name}
|
|
1048
|
+
canary_percentage: {canary_pct}
|
|
1049
|
+
deployment_version: "{{{{ lookup('env', 'VERSION') }}}}"
|
|
1050
|
+
|
|
1051
|
+
tasks:
|
|
1052
|
+
- name: Deploy to canary servers
|
|
1053
|
+
include_tasks: deploy_app.yml
|
|
1054
|
+
|
|
1055
|
+
- name: Configure canary traffic routing
|
|
1056
|
+
include_tasks: configure_canary_routing.yml
|
|
1057
|
+
|
|
1058
|
+
- name: Monitor canary metrics
|
|
1059
|
+
include_tasks: monitor_metrics.yml
|
|
1060
|
+
"""
|
|
1061
|
+
monitoring = """---
|
|
1062
|
+
# Monitoring Playbook
|
|
1063
|
+
- name: Collect canary metrics
|
|
1064
|
+
uri:
|
|
1065
|
+
url: "http://{{ ansible_host }}/metrics"
|
|
1066
|
+
method: GET
|
|
1067
|
+
return_content: yes
|
|
1068
|
+
register: canary_metrics
|
|
1069
|
+
|
|
1070
|
+
- name: Compare with stable metrics
|
|
1071
|
+
uri:
|
|
1072
|
+
url: "http://{{ stable_server }}/metrics"
|
|
1073
|
+
method: GET
|
|
1074
|
+
return_content: yes
|
|
1075
|
+
register: stable_metrics
|
|
1076
|
+
|
|
1077
|
+
- name: Evaluate canary performance
|
|
1078
|
+
set_fact:
|
|
1079
|
+
canary_passed: "{{ canary_metrics.error_rate < stable_metrics.error_rate * 1.05 }}"
|
|
1080
|
+
"""
|
|
1081
|
+
progressive_rollout = _format_canary_workflow(steps)
|
|
1082
|
+
|
|
1083
|
+
rollback = f"""---
|
|
1084
|
+
# Canary Rollback
|
|
1085
|
+
- name: Rollback canary deployment for {app_name}
|
|
1086
|
+
hosts: canary_servers
|
|
1087
|
+
become: yes
|
|
1088
|
+
tasks:
|
|
1089
|
+
- name: Remove canary traffic routing
|
|
1090
|
+
include_tasks: remove_canary_routing.yml
|
|
1091
|
+
|
|
1092
|
+
- name: Restore previous version
|
|
1093
|
+
include_tasks: restore_previous_version.yml
|
|
1094
|
+
|
|
1095
|
+
- name: Verify stable operation
|
|
1096
|
+
include_tasks: health_check.yml
|
|
1097
|
+
"""
|
|
1098
|
+
return {
|
|
1099
|
+
"canary_playbook": canary_playbook,
|
|
1100
|
+
"monitoring": monitoring,
|
|
1101
|
+
"progressive_rollout": progressive_rollout,
|
|
1102
|
+
"rollback": rollback,
|
|
1103
|
+
}
|
|
1104
|
+
|
|
1105
|
+
|
|
1106
|
+
def _extract_deployment_steps(content: str) -> list:
|
|
1107
|
+
"""Extract deployment steps from Chef recipe content."""
|
|
1108
|
+
steps = []
|
|
1109
|
+
|
|
1110
|
+
# Look for execute resources with deployment commands
|
|
1111
|
+
execute_pattern = r'execute\s+[\'"]([^\'"]+)[\'"]'
|
|
1112
|
+
for match in re.finditer(execute_pattern, content):
|
|
1113
|
+
command = match.group(1)
|
|
1114
|
+
if any(
|
|
1115
|
+
keyword in command.lower()
|
|
1116
|
+
for keyword in ["deploy", "restart", "reload", "migrate"]
|
|
1117
|
+
):
|
|
1118
|
+
steps.append({"type": "execute", "command": command})
|
|
1119
|
+
|
|
1120
|
+
return steps
|
|
1121
|
+
|
|
1122
|
+
|
|
1123
|
+
def _extract_health_checks(content: str) -> list:
|
|
1124
|
+
"""Extract health check patterns from Chef recipe content."""
|
|
1125
|
+
health_checks = []
|
|
1126
|
+
|
|
1127
|
+
# Look for http_request or similar resources
|
|
1128
|
+
http_pattern = r'http_request\s+[\'"]([^\'"]+)[\'"]'
|
|
1129
|
+
for match in re.finditer(http_pattern, content):
|
|
1130
|
+
health_checks.append({"type": "http_check", "url": match.group(1)})
|
|
1131
|
+
|
|
1132
|
+
return health_checks
|
|
1133
|
+
|
|
1134
|
+
|
|
1135
|
+
def _extract_service_management(content: str) -> list:
|
|
1136
|
+
"""Extract service management patterns from Chef recipe content."""
|
|
1137
|
+
services = []
|
|
1138
|
+
|
|
1139
|
+
# Look for service resources
|
|
1140
|
+
service_pattern = r'service\s+[\'"]([^\'"]+)[\'"]'
|
|
1141
|
+
for match in re.finditer(service_pattern, content):
|
|
1142
|
+
services.append({"type": "service", "name": match.group(1)})
|
|
1143
|
+
|
|
1144
|
+
return services
|
|
1145
|
+
|
|
1146
|
+
|
|
1147
|
+
def _detect_deployment_patterns_in_recipe(content: str, recipe_name: str) -> list:
|
|
1148
|
+
"""Detect deployment patterns in a Chef recipe."""
|
|
1149
|
+
patterns: list[dict[str, str]] = []
|
|
1150
|
+
|
|
1151
|
+
pattern_indicators = {
|
|
1152
|
+
"blue_green": [
|
|
1153
|
+
r"blue.*green|green.*blue",
|
|
1154
|
+
r"switch.*traffic|traffic.*switch",
|
|
1155
|
+
r"active.*inactive|inactive.*active",
|
|
1156
|
+
],
|
|
1157
|
+
"rolling": [
|
|
1158
|
+
r"rolling.*update|serial.*update",
|
|
1159
|
+
r"batch.*deployment|phased.*rollout",
|
|
1160
|
+
r"gradual.*deployment",
|
|
1161
|
+
],
|
|
1162
|
+
"canary": [
|
|
1163
|
+
r"canary.*deployment|canary.*release",
|
|
1164
|
+
r"percentage.*traffic|traffic.*percentage",
|
|
1165
|
+
r"A/B.*test|split.*traffic",
|
|
1166
|
+
],
|
|
1167
|
+
"immutable": [
|
|
1168
|
+
r"immutable.*deployment|replace.*instance",
|
|
1169
|
+
r"new.*server|fresh.*deployment",
|
|
1170
|
+
],
|
|
1171
|
+
}
|
|
1172
|
+
|
|
1173
|
+
for pattern_type, indicators in pattern_indicators.items():
|
|
1174
|
+
for indicator in indicators:
|
|
1175
|
+
if re.search(indicator, content, re.IGNORECASE):
|
|
1176
|
+
patterns.append(
|
|
1177
|
+
{
|
|
1178
|
+
"type": pattern_type,
|
|
1179
|
+
"recipe": recipe_name,
|
|
1180
|
+
"confidence": "high"
|
|
1181
|
+
if len(
|
|
1182
|
+
[
|
|
1183
|
+
i
|
|
1184
|
+
for i in indicators
|
|
1185
|
+
if re.search(i, content, re.IGNORECASE)
|
|
1186
|
+
]
|
|
1187
|
+
)
|
|
1188
|
+
> 1
|
|
1189
|
+
else "medium",
|
|
1190
|
+
}
|
|
1191
|
+
)
|
|
1192
|
+
break
|
|
1193
|
+
|
|
1194
|
+
return patterns
|
|
1195
|
+
|
|
1196
|
+
|
|
1197
|
+
def _detect_patterns_from_content(content: str) -> list[str]:
|
|
1198
|
+
"""Detect deployment patterns from recipe content."""
|
|
1199
|
+
patterns = []
|
|
1200
|
+
if "package" in content:
|
|
1201
|
+
patterns.append("package_management")
|
|
1202
|
+
if "template" in content:
|
|
1203
|
+
patterns.append("configuration_management")
|
|
1204
|
+
if "service" in content:
|
|
1205
|
+
patterns.append("service_management")
|
|
1206
|
+
if "git" in content:
|
|
1207
|
+
patterns.append("source_deployment")
|
|
1208
|
+
return patterns
|
|
1209
|
+
|
|
1210
|
+
|
|
1211
|
+
def _assess_complexity_from_resource_count(resource_count: int) -> tuple[str, str, str]:
|
|
1212
|
+
"""Assess complexity, effort, and risk based on resource count."""
|
|
1213
|
+
if resource_count > 50:
|
|
1214
|
+
return "high", "4-6 weeks", "high"
|
|
1215
|
+
elif resource_count < 20:
|
|
1216
|
+
return "low", "1-2 weeks", "low"
|
|
1217
|
+
return "medium", "2-3 weeks", "medium"
|
|
1218
|
+
|
|
1219
|
+
|
|
1220
|
+
def _analyze_application_cookbook(cookbook_path: Path, app_type: str) -> dict:
|
|
1221
|
+
"""Analyze Chef cookbook for application deployment patterns."""
|
|
1222
|
+
analysis: dict[str, Any] = {
|
|
1223
|
+
"application_type": app_type,
|
|
1224
|
+
"deployment_patterns": [],
|
|
1225
|
+
"resources": [],
|
|
1226
|
+
"complexity": "medium",
|
|
1227
|
+
"effort_estimate": "2-3 weeks",
|
|
1228
|
+
"risk_level": "medium",
|
|
1229
|
+
}
|
|
1230
|
+
|
|
1231
|
+
# Analyze recipes
|
|
1232
|
+
recipes_dir = _safe_join(cookbook_path, "recipes")
|
|
1233
|
+
if recipes_dir.exists():
|
|
1234
|
+
for recipe_file in recipes_dir.glob("*.rb"):
|
|
1235
|
+
try:
|
|
1236
|
+
with recipe_file.open("r") as f:
|
|
1237
|
+
content = f.read()
|
|
1238
|
+
|
|
1239
|
+
# Count resources
|
|
1240
|
+
resource_types = re.findall(r"^(\w+)\s+['\"]", content, re.MULTILINE)
|
|
1241
|
+
analysis["resources"].extend(resource_types)
|
|
1242
|
+
|
|
1243
|
+
# Detect patterns
|
|
1244
|
+
patterns = _detect_patterns_from_content(content)
|
|
1245
|
+
analysis["deployment_patterns"].extend(patterns)
|
|
1246
|
+
|
|
1247
|
+
except Exception:
|
|
1248
|
+
# Silently skip malformed files
|
|
1249
|
+
pass
|
|
1250
|
+
|
|
1251
|
+
# Assess complexity
|
|
1252
|
+
resource_count = len(analysis["resources"])
|
|
1253
|
+
complexity, effort, risk = _assess_complexity_from_resource_count(resource_count)
|
|
1254
|
+
analysis["complexity"] = complexity
|
|
1255
|
+
analysis["effort_estimate"] = effort
|
|
1256
|
+
analysis["risk_level"] = risk
|
|
1257
|
+
|
|
1258
|
+
return analysis
|
|
1259
|
+
|
|
1260
|
+
|
|
1261
|
+
# Formatting Functions
|
|
1262
|
+
|
|
1263
|
+
|
|
1264
|
+
def _format_cookbook_analysis(analysis: dict) -> str:
|
|
1265
|
+
"""Format cookbook analysis for display."""
|
|
1266
|
+
formatted = [
|
|
1267
|
+
f"• Recipes: {len(analysis['recipes'])}",
|
|
1268
|
+
f"• Attributes: {len(analysis['attributes'])}",
|
|
1269
|
+
f"• Dependencies: {len(analysis['dependencies'])}",
|
|
1270
|
+
f"• Templates: {len(analysis['templates'])}",
|
|
1271
|
+
f"• Files: {len(analysis['files'])}",
|
|
1272
|
+
f"• Survey fields: {len(analysis['survey_fields'])}",
|
|
1273
|
+
]
|
|
1274
|
+
|
|
1275
|
+
return "\n".join(formatted)
|
|
1276
|
+
|
|
1277
|
+
|
|
1278
|
+
def _format_workflow_nodes(nodes: list) -> str:
|
|
1279
|
+
"""Format workflow nodes for display."""
|
|
1280
|
+
if not nodes:
|
|
1281
|
+
return "No workflow nodes defined."
|
|
1282
|
+
|
|
1283
|
+
formatted = []
|
|
1284
|
+
for node in nodes:
|
|
1285
|
+
formatted.append(f"• Node {node['id']}: {node['unified_job_template']}")
|
|
1286
|
+
if node.get("success_nodes"):
|
|
1287
|
+
formatted.append(f" → Success: Node {node['success_nodes'][0]}")
|
|
1288
|
+
|
|
1289
|
+
return "\n".join(formatted)
|
|
1290
|
+
|
|
1291
|
+
|
|
1292
|
+
def _format_playbook_structure(analysis: dict) -> str:
|
|
1293
|
+
"""Format recommended playbook structure."""
|
|
1294
|
+
structure_lines = []
|
|
1295
|
+
|
|
1296
|
+
for cookbook_name in analysis.get("cookbooks", {}):
|
|
1297
|
+
structure_lines.append(f"│ ├── {cookbook_name}.yml")
|
|
1298
|
+
|
|
1299
|
+
return "\n".join(structure_lines)
|
|
1300
|
+
|
|
1301
|
+
|
|
1302
|
+
def _format_cookbooks_analysis(analysis: dict) -> str:
|
|
1303
|
+
"""Format cookbooks directory analysis."""
|
|
1304
|
+
formatted = [
|
|
1305
|
+
f"• Total cookbooks: {analysis['total_cookbooks']}",
|
|
1306
|
+
f"• Total recipes: {analysis['total_recipes']}",
|
|
1307
|
+
f"• Total templates: {analysis['total_templates']}",
|
|
1308
|
+
f"• Total files: {analysis['total_files']}",
|
|
1309
|
+
]
|
|
1310
|
+
|
|
1311
|
+
if analysis["cookbooks"]:
|
|
1312
|
+
formatted.append("\n### Cookbook Details:")
|
|
1313
|
+
for name, info in list(analysis["cookbooks"].items())[:5]:
|
|
1314
|
+
formatted.append(
|
|
1315
|
+
f"• {name}: {len(info['recipes'])} recipes, "
|
|
1316
|
+
f"{len(info['attributes'])} attributes"
|
|
1317
|
+
)
|
|
1318
|
+
|
|
1319
|
+
if len(analysis["cookbooks"]) > 5:
|
|
1320
|
+
formatted.append(f"... and {len(analysis['cookbooks']) - 5} more cookbooks")
|
|
1321
|
+
|
|
1322
|
+
return "\n".join(formatted)
|
|
1323
|
+
|
|
1324
|
+
|
|
1325
|
+
def _format_deployment_analysis(analysis: dict) -> str:
|
|
1326
|
+
"""Format deployment pattern analysis."""
|
|
1327
|
+
formatted = [
|
|
1328
|
+
f"• Deployment steps: {len(analysis.get('deployment_steps', []))}",
|
|
1329
|
+
f"• Health checks: {len(analysis.get('health_checks', []))}",
|
|
1330
|
+
f"• Services managed: {len(analysis.get('service_management', []))}",
|
|
1331
|
+
f"• Complexity: {analysis.get('complexity', 'unknown')}",
|
|
1332
|
+
]
|
|
1333
|
+
|
|
1334
|
+
return "\n".join(formatted)
|
|
1335
|
+
|
|
1336
|
+
|
|
1337
|
+
def _format_deployment_patterns(analysis: dict) -> str:
|
|
1338
|
+
"""Format detected deployment patterns."""
|
|
1339
|
+
patterns = analysis.get("deployment_patterns", [])
|
|
1340
|
+
if not patterns:
|
|
1341
|
+
return "No specific deployment patterns detected."
|
|
1342
|
+
|
|
1343
|
+
formatted = []
|
|
1344
|
+
for pattern in patterns:
|
|
1345
|
+
if isinstance(pattern, dict):
|
|
1346
|
+
# Format: {"type": "...", "recipe": "...", "confidence": "..."}
|
|
1347
|
+
pattern_type = pattern.get("type", "unknown")
|
|
1348
|
+
recipe = pattern.get("recipe", "")
|
|
1349
|
+
confidence = pattern.get("confidence", "")
|
|
1350
|
+
line = f"• {pattern_type.replace('_', ' ').title()}"
|
|
1351
|
+
if recipe:
|
|
1352
|
+
line += f" (in {recipe})"
|
|
1353
|
+
if confidence:
|
|
1354
|
+
line += f" - {confidence} confidence"
|
|
1355
|
+
formatted.append(line)
|
|
1356
|
+
else:
|
|
1357
|
+
# Format: just a string like "package_management"
|
|
1358
|
+
formatted.append(f"• {pattern.replace('_', ' ').title()}")
|
|
1359
|
+
|
|
1360
|
+
return "\n".join(formatted)
|
|
1361
|
+
|
|
1362
|
+
|
|
1363
|
+
def _format_chef_resources_analysis(analysis: dict) -> str:
|
|
1364
|
+
"""Format Chef resources analysis."""
|
|
1365
|
+
# Check for new format first (from _analyze_application_cookbook)
|
|
1366
|
+
resources = analysis.get("resources", [])
|
|
1367
|
+
if resources:
|
|
1368
|
+
# Count resource types
|
|
1369
|
+
resource_counts: dict = {}
|
|
1370
|
+
for resource_type in resources:
|
|
1371
|
+
resource_counts[resource_type] = resource_counts.get(resource_type, 0) + 1
|
|
1372
|
+
|
|
1373
|
+
# Format top resource types
|
|
1374
|
+
top_resources = sorted(
|
|
1375
|
+
resource_counts.items(), key=lambda x: x[1], reverse=True
|
|
1376
|
+
)[:5]
|
|
1377
|
+
|
|
1378
|
+
formatted = []
|
|
1379
|
+
for resource_type, count in top_resources:
|
|
1380
|
+
formatted.append(f"• {resource_type}: {count}")
|
|
1381
|
+
|
|
1382
|
+
return "\n".join(formatted)
|
|
1383
|
+
|
|
1384
|
+
# Check for legacy format (from tests)
|
|
1385
|
+
service_resources = analysis.get("service_resources", [])
|
|
1386
|
+
configuration_files = analysis.get("configuration_files", [])
|
|
1387
|
+
health_checks = analysis.get("health_checks", [])
|
|
1388
|
+
scaling_mechanisms = analysis.get("scaling_mechanisms", [])
|
|
1389
|
+
|
|
1390
|
+
if any([service_resources, configuration_files, health_checks, scaling_mechanisms]):
|
|
1391
|
+
formatted = [
|
|
1392
|
+
f"• Service Resources: {len(service_resources)}",
|
|
1393
|
+
f"• Configuration Files: {len(configuration_files)}",
|
|
1394
|
+
f"• Health Checks: {len(health_checks)}",
|
|
1395
|
+
f"• Scaling Mechanisms: {len(scaling_mechanisms)}",
|
|
1396
|
+
]
|
|
1397
|
+
return "\n".join(formatted)
|
|
1398
|
+
|
|
1399
|
+
return "No Chef resources found."
|
|
1400
|
+
|
|
1401
|
+
|
|
1402
|
+
def _format_canary_workflow(steps: list) -> str:
|
|
1403
|
+
"""Format canary progressive rollout workflow."""
|
|
1404
|
+
workflow = """---
|
|
1405
|
+
# Progressive Rollout Workflow
|
|
1406
|
+
- name: Progressive canary rollout
|
|
1407
|
+
hosts: localhost
|
|
1408
|
+
gather_facts: no
|
|
1409
|
+
vars:
|
|
1410
|
+
rollout_steps: """
|
|
1411
|
+
workflow += str(steps)
|
|
1412
|
+
workflow += """
|
|
1413
|
+
tasks:
|
|
1414
|
+
- name: Execute progressive rollout
|
|
1415
|
+
include_tasks: rollout_step.yml
|
|
1416
|
+
loop: "{{ rollout_steps }}"
|
|
1417
|
+
loop_control:
|
|
1418
|
+
loop_var: target_percentage
|
|
1419
|
+
"""
|
|
1420
|
+
return workflow
|
|
1421
|
+
|
|
1422
|
+
|
|
1423
|
+
def _generate_blue_green_conversion_playbook(_analysis: dict) -> str:
|
|
1424
|
+
"""Generate blue/green playbook from Chef pattern analysis."""
|
|
1425
|
+
return """## Blue/Green Deployment Strategy
|
|
1426
|
+
|
|
1427
|
+
Recommended based on detected Chef deployment patterns.
|
|
1428
|
+
|
|
1429
|
+
### Playbook Structure:
|
|
1430
|
+
- Deploy to blue environment
|
|
1431
|
+
- Health check validation
|
|
1432
|
+
- Traffic switch to blue
|
|
1433
|
+
- Monitor blue environment
|
|
1434
|
+
- Keep green as rollback target
|
|
1435
|
+
|
|
1436
|
+
### Implementation:
|
|
1437
|
+
Use `generate_blue_green_deployment_playbook` tool for complete playbooks.
|
|
1438
|
+
"""
|
|
1439
|
+
|
|
1440
|
+
|
|
1441
|
+
def _generate_canary_conversion_playbook(_analysis: dict) -> str:
|
|
1442
|
+
"""Generate canary playbook from Chef pattern analysis."""
|
|
1443
|
+
return """## Canary Deployment Strategy
|
|
1444
|
+
|
|
1445
|
+
Recommended for gradual rollout with monitoring.
|
|
1446
|
+
|
|
1447
|
+
### Playbook Structure:
|
|
1448
|
+
- Deploy to small canary subset
|
|
1449
|
+
- Monitor error rates and metrics
|
|
1450
|
+
- Progressive rollout (10% → 25% → 50% → 100%)
|
|
1451
|
+
- Automated rollback on failure
|
|
1452
|
+
|
|
1453
|
+
### Implementation:
|
|
1454
|
+
Use `generate_canary_deployment_strategy` tool for complete playbooks.
|
|
1455
|
+
"""
|
|
1456
|
+
|
|
1457
|
+
|
|
1458
|
+
def _generate_rolling_update_playbook(_analysis: dict) -> str:
|
|
1459
|
+
"""Generate rolling update playbook from Chef pattern analysis."""
|
|
1460
|
+
return """## Rolling Update Strategy
|
|
1461
|
+
|
|
1462
|
+
Recommended for standard application deployments.
|
|
1463
|
+
|
|
1464
|
+
### Playbook Structure:
|
|
1465
|
+
- Update servers in batches
|
|
1466
|
+
- Health check between batches
|
|
1467
|
+
- Continue if healthy, rollback if failures
|
|
1468
|
+
- Maintain service availability
|
|
1469
|
+
|
|
1470
|
+
### Implementation:
|
|
1471
|
+
```yaml
|
|
1472
|
+
- name: Rolling update
|
|
1473
|
+
hosts: app_servers
|
|
1474
|
+
serial: "25%"
|
|
1475
|
+
max_fail_percentage: 10
|
|
1476
|
+
tasks:
|
|
1477
|
+
- name: Update application
|
|
1478
|
+
# ... deployment tasks
|
|
1479
|
+
- name: Health check
|
|
1480
|
+
# ... validation tasks
|
|
1481
|
+
```
|
|
1482
|
+
"""
|
|
1483
|
+
|
|
1484
|
+
|
|
1485
|
+
def _generate_deployment_migration_recommendations(
|
|
1486
|
+
patterns: dict, app_type: str = ""
|
|
1487
|
+
) -> str:
|
|
1488
|
+
"""
|
|
1489
|
+
Generate migration recommendations based on analysis.
|
|
1490
|
+
|
|
1491
|
+
Args:
|
|
1492
|
+
patterns: Dictionary containing deployment patterns analysis.
|
|
1493
|
+
app_type: Application type (web_application, microservice, database).
|
|
1494
|
+
|
|
1495
|
+
Returns:
|
|
1496
|
+
Formatted migration recommendations.
|
|
1497
|
+
|
|
1498
|
+
"""
|
|
1499
|
+
recommendations: list[str] = []
|
|
1500
|
+
|
|
1501
|
+
deployment_count = len(patterns.get("deployment_patterns", []))
|
|
1502
|
+
|
|
1503
|
+
if deployment_count == 0:
|
|
1504
|
+
recommendations.append(
|
|
1505
|
+
"• No advanced deployment patterns detected - start with rolling updates"
|
|
1506
|
+
)
|
|
1507
|
+
recommendations.append("• Implement health checks for reliable deployments")
|
|
1508
|
+
recommendations.append("• Add rollback mechanisms for quick recovery")
|
|
1509
|
+
else:
|
|
1510
|
+
for pattern in patterns.get("deployment_patterns", []):
|
|
1511
|
+
if pattern["type"] == "blue_green":
|
|
1512
|
+
recommendations.append(
|
|
1513
|
+
"• Convert blue/green logic to Ansible blue/green strategy"
|
|
1514
|
+
)
|
|
1515
|
+
elif pattern["type"] == "canary":
|
|
1516
|
+
recommendations.append(
|
|
1517
|
+
"• Implement canary deployment with automated metrics validation"
|
|
1518
|
+
)
|
|
1519
|
+
elif pattern["type"] == "rolling":
|
|
1520
|
+
recommendations.append(
|
|
1521
|
+
"• Use Ansible serial deployment with health checks"
|
|
1522
|
+
)
|
|
1523
|
+
|
|
1524
|
+
# Application-specific recommendations
|
|
1525
|
+
if app_type == "web_application":
|
|
1526
|
+
recommendations.append(
|
|
1527
|
+
"• Implement load balancer integration for traffic management"
|
|
1528
|
+
)
|
|
1529
|
+
recommendations.append("• Add SSL/TLS certificate handling in deployment")
|
|
1530
|
+
elif app_type == "microservice":
|
|
1531
|
+
recommendations.append(
|
|
1532
|
+
"• Consider service mesh integration for traffic splitting"
|
|
1533
|
+
)
|
|
1534
|
+
recommendations.append("• Implement service discovery updates")
|
|
1535
|
+
elif app_type == "database":
|
|
1536
|
+
recommendations.append("• Add database migration handling")
|
|
1537
|
+
recommendations.append("• Implement backup and restore procedures")
|
|
1538
|
+
|
|
1539
|
+
# If no specific recommendations, add general ones
|
|
1540
|
+
if not recommendations:
|
|
1541
|
+
recommendations.extend(
|
|
1542
|
+
[
|
|
1543
|
+
"1. Start with non-production environment for validation",
|
|
1544
|
+
"2. Implement health checks before migration",
|
|
1545
|
+
"3. Set up monitoring and alerting",
|
|
1546
|
+
"4. Document rollback procedures",
|
|
1547
|
+
"5. Train operations team on new deployment process",
|
|
1548
|
+
"6. Plan for gradual migration (pilot → staging → production)",
|
|
1549
|
+
]
|
|
1550
|
+
)
|
|
1551
|
+
|
|
1552
|
+
return "\n".join(recommendations)
|
|
1553
|
+
|
|
1554
|
+
|
|
1555
|
+
def _recommend_ansible_strategies(patterns: dict) -> str:
|
|
1556
|
+
"""Recommend appropriate Ansible strategies."""
|
|
1557
|
+
strategies: list[str] = []
|
|
1558
|
+
|
|
1559
|
+
# Handle both formats: list of dicts with 'type' key or list of strings
|
|
1560
|
+
pattern_list = patterns.get("deployment_patterns", [])
|
|
1561
|
+
if pattern_list and isinstance(pattern_list[0], dict):
|
|
1562
|
+
detected_patterns = [p["type"] for p in pattern_list]
|
|
1563
|
+
else:
|
|
1564
|
+
detected_patterns = pattern_list
|
|
1565
|
+
|
|
1566
|
+
if "blue_green" in detected_patterns:
|
|
1567
|
+
strategies.append(
|
|
1568
|
+
"• Blue/Green: Zero-downtime deployment with instant rollback"
|
|
1569
|
+
)
|
|
1570
|
+
if "canary" in detected_patterns:
|
|
1571
|
+
strategies.append("• Canary: Risk-reduced deployment with gradual rollout")
|
|
1572
|
+
if "rolling" in detected_patterns:
|
|
1573
|
+
strategies.append(
|
|
1574
|
+
"• Rolling Update: Balanced approach with configurable parallelism"
|
|
1575
|
+
)
|
|
1576
|
+
|
|
1577
|
+
# Application-pattern specific strategies
|
|
1578
|
+
if "package_management" in detected_patterns:
|
|
1579
|
+
strategies.append("• Package: Use `package` module for package installation")
|
|
1580
|
+
if "configuration_management" in detected_patterns:
|
|
1581
|
+
strategies.append("• Config: Use `template` module for configuration files")
|
|
1582
|
+
if "service_management" in detected_patterns:
|
|
1583
|
+
strategies.append("• Service: Use `service` or `systemd` module for services")
|
|
1584
|
+
if "source_deployment" in detected_patterns:
|
|
1585
|
+
strategies.append("• Source: Use `git` module for source code deployment")
|
|
1586
|
+
|
|
1587
|
+
if not strategies:
|
|
1588
|
+
strategies = [
|
|
1589
|
+
"• Rolling Update: Recommended starting strategy",
|
|
1590
|
+
"• Blue/Green: For critical applications requiring zero downtime",
|
|
1591
|
+
"• Canary: For high-risk deployments requiring validation",
|
|
1592
|
+
]
|
|
1593
|
+
|
|
1594
|
+
return "\n".join(strategies)
|