mcp-souschef 2.0.1__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.2.0.dist-info}/METADATA +453 -77
- mcp_souschef-2.2.0.dist-info/RECORD +31 -0
- souschef/__init__.py +17 -0
- souschef/assessment.py +1498 -0
- souschef/cli.py +90 -0
- souschef/converters/__init__.py +23 -0
- souschef/converters/habitat.py +674 -0
- souschef/converters/playbook.py +1736 -0
- souschef/converters/resource.py +325 -0
- souschef/core/__init__.py +80 -0
- souschef/core/constants.py +145 -0
- souschef/core/errors.py +275 -0
- souschef/core/path_utils.py +58 -0
- souschef/core/ruby_utils.py +39 -0
- souschef/core/validation.py +555 -0
- souschef/deployment.py +1906 -0
- souschef/filesystem/__init__.py +5 -0
- souschef/filesystem/operations.py +67 -0
- souschef/parsers/__init__.py +36 -0
- souschef/parsers/attributes.py +257 -0
- souschef/parsers/habitat.py +317 -0
- souschef/parsers/inspec.py +809 -0
- souschef/parsers/metadata.py +211 -0
- souschef/parsers/recipe.py +200 -0
- souschef/parsers/resource.py +170 -0
- souschef/parsers/template.py +342 -0
- souschef/profiling.py +568 -0
- souschef/server.py +1854 -7481
- mcp_souschef-2.0.1.dist-info/RECORD +0 -8
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.2.0.dist-info}/WHEEL +0 -0
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.2.0.dist-info}/entry_points.txt +0 -0
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.2.0.dist-info}/licenses/LICENSE +0 -0
souschef/deployment.py
ADDED
|
@@ -0,0 +1,1906 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Deployment and AWX/AAP integration for Chef to Ansible migration.
|
|
3
|
+
|
|
4
|
+
This module provides tools for analyzing Chef deployment patterns, generating
|
|
5
|
+
Ansible deployment strategies (blue/green, canary, rolling), and creating
|
|
6
|
+
AWX/AAP configurations from Chef cookbooks.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import re
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from souschef.core.constants import (
|
|
15
|
+
CHEF_RECIPE_PREFIX,
|
|
16
|
+
CHEF_ROLE_PREFIX,
|
|
17
|
+
METADATA_FILENAME,
|
|
18
|
+
)
|
|
19
|
+
from souschef.core.errors import (
|
|
20
|
+
format_error_with_context,
|
|
21
|
+
validate_cookbook_structure,
|
|
22
|
+
validate_directory_exists,
|
|
23
|
+
)
|
|
24
|
+
from souschef.core.path_utils import _safe_join
|
|
25
|
+
|
|
26
|
+
# Maximum length for attribute values in Chef attribute parsing
|
|
27
|
+
# Prevents ReDoS attacks from extremely long attribute declarations
|
|
28
|
+
MAX_ATTRIBUTE_VALUE_LENGTH = 5000
|
|
29
|
+
|
|
30
|
+
# AWX/AAP Integration Functions
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def generate_awx_job_template_from_cookbook(
|
|
34
|
+
cookbook_path: str,
|
|
35
|
+
cookbook_name: str,
|
|
36
|
+
target_environment: str = "production",
|
|
37
|
+
include_survey: bool = True,
|
|
38
|
+
) -> str:
|
|
39
|
+
"""
|
|
40
|
+
Generate AWX/AAP job template from Chef cookbook.
|
|
41
|
+
|
|
42
|
+
Analyzes cookbook structure and generates importable AWX configuration.
|
|
43
|
+
Survey specs auto-generated from cookbook attributes when include_survey=True.
|
|
44
|
+
"""
|
|
45
|
+
try:
|
|
46
|
+
# Validate inputs
|
|
47
|
+
if not cookbook_name or not cookbook_name.strip():
|
|
48
|
+
return (
|
|
49
|
+
"Error: Cookbook name cannot be empty\n\n"
|
|
50
|
+
"Suggestion: Provide a valid cookbook name"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
cookbook = validate_cookbook_structure(cookbook_path)
|
|
54
|
+
cookbook_analysis = _analyze_cookbook_for_awx(cookbook, cookbook_name)
|
|
55
|
+
job_template = _generate_awx_job_template(
|
|
56
|
+
cookbook_analysis, cookbook_name, target_environment, include_survey
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
return f"""# AWX/AAP Job Template Configuration
|
|
60
|
+
# Generated from Chef cookbook: {cookbook_name}
|
|
61
|
+
|
|
62
|
+
## Job Template JSON:
|
|
63
|
+
```json
|
|
64
|
+
{json.dumps(job_template, indent=2)}
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
## CLI Import Command:
|
|
68
|
+
```bash
|
|
69
|
+
awx-cli job_templates create \\
|
|
70
|
+
--name "{job_template["name"]}" \\
|
|
71
|
+
--project "{job_template["project"]}" \\
|
|
72
|
+
--playbook "{job_template["playbook"]}" \\
|
|
73
|
+
--inventory "{job_template["inventory"]}" \\
|
|
74
|
+
--credential "{job_template["credential"]}" \\
|
|
75
|
+
--job_type run \\
|
|
76
|
+
--verbosity 1
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## Cookbook Analysis Summary:
|
|
80
|
+
{_format_cookbook_analysis(cookbook_analysis)}
|
|
81
|
+
"""
|
|
82
|
+
except Exception as e:
|
|
83
|
+
return format_error_with_context(
|
|
84
|
+
e, f"generating AWX job template for {cookbook_name}", cookbook_path
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def generate_awx_workflow_from_chef_runlist(
|
|
89
|
+
runlist_content: str, workflow_name: str, environment: str = "production"
|
|
90
|
+
) -> str:
|
|
91
|
+
"""
|
|
92
|
+
Generate AWX workflow from Chef runlist.
|
|
93
|
+
|
|
94
|
+
Handles JSON arrays, comma-separated, or single recipe/role items.
|
|
95
|
+
Workflows preserve runlist execution order with success/failure paths.
|
|
96
|
+
"""
|
|
97
|
+
try:
|
|
98
|
+
# Validate inputs
|
|
99
|
+
if not runlist_content or not runlist_content.strip():
|
|
100
|
+
return (
|
|
101
|
+
"Error: Runlist content cannot be empty\n\n"
|
|
102
|
+
"Suggestion: Provide a valid Chef runlist "
|
|
103
|
+
"(e.g., 'recipe[cookbook::recipe]' or JSON array)"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if not workflow_name or not workflow_name.strip():
|
|
107
|
+
return (
|
|
108
|
+
"Error: Workflow name cannot be empty\n\n"
|
|
109
|
+
"Suggestion: Provide a descriptive name for the AWX workflow"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Parse runlist
|
|
113
|
+
runlist = _parse_chef_runlist(runlist_content)
|
|
114
|
+
|
|
115
|
+
if not runlist:
|
|
116
|
+
return (
|
|
117
|
+
"Error: Runlist parsing resulted in no items\n\n"
|
|
118
|
+
"Suggestion: Check runlist format. Expected 'recipe[name]' "
|
|
119
|
+
"or 'role[name]' entries"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Generate workflow template
|
|
123
|
+
workflow_template = _generate_awx_workflow_template(
|
|
124
|
+
runlist, workflow_name, environment
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
return f"""# AWX/AAP Workflow Template Configuration
|
|
128
|
+
# Generated from Chef runlist for: {workflow_name}
|
|
129
|
+
|
|
130
|
+
## Workflow Template JSON:
|
|
131
|
+
```json
|
|
132
|
+
{json.dumps(workflow_template, indent=2)}
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## Workflow Nodes Configuration:
|
|
136
|
+
{_format_workflow_nodes(workflow_template.get("workflow_nodes", []))}
|
|
137
|
+
|
|
138
|
+
## Chef Runlist Analysis:
|
|
139
|
+
- Total recipes/roles: {len(runlist)}
|
|
140
|
+
- Execution order preserved: Yes
|
|
141
|
+
- Dependencies mapped: Yes
|
|
142
|
+
|
|
143
|
+
## Import Instructions:
|
|
144
|
+
1. Create individual job templates for each cookbook
|
|
145
|
+
2. Import workflow template using AWX CLI or API
|
|
146
|
+
3. Configure workflow node dependencies
|
|
147
|
+
4. Test execution with survey parameters
|
|
148
|
+
"""
|
|
149
|
+
except Exception as e:
|
|
150
|
+
return format_error_with_context(
|
|
151
|
+
e, f"generating AWX workflow from runlist for {workflow_name}"
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def generate_awx_project_from_cookbooks(
|
|
156
|
+
cookbooks_directory: str,
|
|
157
|
+
project_name: str,
|
|
158
|
+
scm_type: str = "git",
|
|
159
|
+
scm_url: str = "",
|
|
160
|
+
) -> str:
|
|
161
|
+
"""
|
|
162
|
+
Generate AWX/AAP project configuration from Chef cookbooks directory.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
cookbooks_directory: Path to Chef cookbooks directory.
|
|
166
|
+
project_name: Name for the AWX project.
|
|
167
|
+
scm_type: SCM type (git, svn, etc.).
|
|
168
|
+
scm_url: SCM repository URL.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
AWX/AAP project configuration with converted playbooks structure.
|
|
172
|
+
|
|
173
|
+
"""
|
|
174
|
+
try:
|
|
175
|
+
# Validate inputs
|
|
176
|
+
if not project_name or not project_name.strip():
|
|
177
|
+
return (
|
|
178
|
+
"Error: Project name cannot be empty\n\n"
|
|
179
|
+
"Suggestion: Provide a descriptive name for the AWX project"
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
cookbooks_path = validate_directory_exists(
|
|
183
|
+
cookbooks_directory, "cookbooks directory"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Analyze all cookbooks
|
|
187
|
+
cookbooks_analysis = _analyze_cookbooks_directory(cookbooks_path)
|
|
188
|
+
|
|
189
|
+
# Generate project structure
|
|
190
|
+
project_config = _generate_awx_project_config(project_name, scm_type, scm_url)
|
|
191
|
+
|
|
192
|
+
return f"""# AWX/AAP Project Configuration
|
|
193
|
+
# Generated from Chef cookbooks: {project_name}
|
|
194
|
+
|
|
195
|
+
## Project Configuration:
|
|
196
|
+
```json
|
|
197
|
+
{json.dumps(project_config, indent=2)}
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
## Recommended Directory Structure:
|
|
201
|
+
```
|
|
202
|
+
{project_name}/
|
|
203
|
+
├── playbooks/
|
|
204
|
+
{_format_playbook_structure(cookbooks_analysis)}
|
|
205
|
+
├── inventories/
|
|
206
|
+
│ ├── production/
|
|
207
|
+
│ ├── staging/
|
|
208
|
+
│ └── development/
|
|
209
|
+
├── group_vars/
|
|
210
|
+
├── host_vars/
|
|
211
|
+
└── requirements.yml
|
|
212
|
+
```
|
|
213
|
+
|
|
214
|
+
## Cookbooks Analysis:
|
|
215
|
+
{_format_cookbooks_analysis(cookbooks_analysis)}
|
|
216
|
+
|
|
217
|
+
## Migration Steps:
|
|
218
|
+
1. Convert cookbooks to Ansible playbooks
|
|
219
|
+
2. Set up SCM repository with recommended structure
|
|
220
|
+
3. Create AWX project pointing to repository
|
|
221
|
+
4. Configure job templates for each converted cookbook
|
|
222
|
+
5. Set up inventories and credentials
|
|
223
|
+
"""
|
|
224
|
+
except Exception as e:
|
|
225
|
+
return format_error_with_context(
|
|
226
|
+
e,
|
|
227
|
+
f"generating AWX project configuration for {project_name}",
|
|
228
|
+
cookbooks_directory,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def generate_awx_inventory_source_from_chef(
|
|
233
|
+
chef_server_url: str, organization: str = "Default", sync_schedule: str = "daily"
|
|
234
|
+
) -> str:
|
|
235
|
+
"""
|
|
236
|
+
Generate AWX/AAP inventory source from Chef server configuration.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
chef_server_url: Chef server URL for inventory sync.
|
|
240
|
+
organization: AWX organization name.
|
|
241
|
+
sync_schedule: Inventory sync schedule (hourly, daily, weekly).
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
AWX/AAP inventory source configuration for Chef server integration.
|
|
245
|
+
|
|
246
|
+
"""
|
|
247
|
+
try:
|
|
248
|
+
# Validate inputs
|
|
249
|
+
if not chef_server_url or not chef_server_url.strip():
|
|
250
|
+
return (
|
|
251
|
+
"Error: Chef server URL cannot be empty\n\n"
|
|
252
|
+
"Suggestion: Provide a valid Chef server URL "
|
|
253
|
+
"(e.g., https://chef.example.com)"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
if not chef_server_url.startswith("https://"):
|
|
257
|
+
return (
|
|
258
|
+
f"Error: Invalid Chef server URL: {chef_server_url}\n\n"
|
|
259
|
+
"Suggestion: URL must use HTTPS protocol for security "
|
|
260
|
+
"(e.g., https://chef.example.com)"
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Generate inventory source configuration
|
|
264
|
+
inventory_source = _generate_chef_inventory_source(
|
|
265
|
+
chef_server_url, sync_schedule
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# Generate custom inventory script
|
|
269
|
+
custom_script = _generate_chef_inventory_script(chef_server_url)
|
|
270
|
+
|
|
271
|
+
return f"""# AWX/AAP Inventory Source Configuration
|
|
272
|
+
# Chef Server Integration: {chef_server_url}
|
|
273
|
+
|
|
274
|
+
## Inventory Source JSON:
|
|
275
|
+
```json
|
|
276
|
+
{json.dumps(inventory_source, indent=2)}
|
|
277
|
+
```
|
|
278
|
+
|
|
279
|
+
## Custom Inventory Script:
|
|
280
|
+
```python
|
|
281
|
+
{custom_script}
|
|
282
|
+
```
|
|
283
|
+
|
|
284
|
+
## Setup Instructions:
|
|
285
|
+
1. Create custom credential type for Chef server authentication
|
|
286
|
+
2. Create credential with Chef client key and node name
|
|
287
|
+
3. Upload custom inventory script to AWX
|
|
288
|
+
4. Create inventory source with Chef server configuration
|
|
289
|
+
5. Configure sync schedule and test inventory update
|
|
290
|
+
|
|
291
|
+
## Credential Type Fields:
|
|
292
|
+
- chef_server_url: Chef server URL
|
|
293
|
+
- chef_node_name: Chef client node name
|
|
294
|
+
- chef_client_key: Chef client private key
|
|
295
|
+
- chef_client_pem: Chef client PEM file content
|
|
296
|
+
|
|
297
|
+
## Environment Variables:
|
|
298
|
+
- CHEF_SERVER_URL: {chef_server_url}
|
|
299
|
+
- CHEF_NODE_NAME: ${{{{chef_node_name}}}}
|
|
300
|
+
- CHEF_CLIENT_KEY: ${{{{chef_client_key}}}}
|
|
301
|
+
"""
|
|
302
|
+
except Exception as e:
|
|
303
|
+
return format_error_with_context(
|
|
304
|
+
e, "generating AWX inventory source from Chef server", chef_server_url
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
# Deployment Strategy Functions
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
def convert_chef_deployment_to_ansible_strategy(
|
|
312
|
+
cookbook_path: str, deployment_pattern: str = "auto"
|
|
313
|
+
) -> str:
|
|
314
|
+
"""
|
|
315
|
+
Convert Chef deployment patterns to Ansible strategies.
|
|
316
|
+
|
|
317
|
+
Auto-detects blue/green, canary, or rolling patterns from recipe content.
|
|
318
|
+
Override auto-detection by specifying explicit pattern.
|
|
319
|
+
"""
|
|
320
|
+
try:
|
|
321
|
+
cookbook = validate_cookbook_structure(cookbook_path)
|
|
322
|
+
|
|
323
|
+
# Validate deployment pattern
|
|
324
|
+
valid_patterns = ["auto", "blue_green", "canary", "rolling_update"]
|
|
325
|
+
if deployment_pattern not in valid_patterns:
|
|
326
|
+
return (
|
|
327
|
+
f"Error: Invalid deployment pattern '{deployment_pattern}'\n\n"
|
|
328
|
+
f"Suggestion: Use one of {', '.join(valid_patterns)}"
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
# Analyze Chef deployment pattern
|
|
332
|
+
pattern_analysis = _analyze_chef_deployment_pattern(cookbook)
|
|
333
|
+
|
|
334
|
+
# Determine best strategy if auto-detect
|
|
335
|
+
if deployment_pattern == "auto":
|
|
336
|
+
deployment_pattern = pattern_analysis.get(
|
|
337
|
+
"detected_pattern", "rolling_update"
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
# Generate appropriate Ansible strategy
|
|
341
|
+
strategy = _generate_ansible_deployment_strategy(
|
|
342
|
+
pattern_analysis, deployment_pattern
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
return f"""# Ansible Deployment Strategy
|
|
346
|
+
# Converted from Chef cookbook deployment pattern
|
|
347
|
+
|
|
348
|
+
## Detected Pattern: {pattern_analysis.get("detected_pattern", "unknown")}
|
|
349
|
+
## Recommended Strategy: {deployment_pattern}
|
|
350
|
+
|
|
351
|
+
{strategy}
|
|
352
|
+
|
|
353
|
+
## Analysis Summary:
|
|
354
|
+
{_format_deployment_analysis(pattern_analysis)}
|
|
355
|
+
|
|
356
|
+
## Migration Recommendations:
|
|
357
|
+
{_generate_deployment_migration_recommendations(pattern_analysis)}
|
|
358
|
+
"""
|
|
359
|
+
except Exception as e:
|
|
360
|
+
return format_error_with_context(
|
|
361
|
+
e, "converting Chef deployment pattern to Ansible strategy", cookbook_path
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def generate_blue_green_deployment_playbook(
|
|
366
|
+
app_name: str, health_check_url: str = "/health"
|
|
367
|
+
) -> str:
|
|
368
|
+
"""
|
|
369
|
+
Generate blue/green deployment playbook for zero-downtime deployments.
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
app_name: Application name for deployment.
|
|
373
|
+
health_check_url: Health check endpoint URL.
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Complete blue/green deployment playbook with health checks and rollback.
|
|
377
|
+
|
|
378
|
+
"""
|
|
379
|
+
try:
|
|
380
|
+
# Validate inputs
|
|
381
|
+
if not app_name or not app_name.strip():
|
|
382
|
+
return (
|
|
383
|
+
"Error: Application name cannot be empty\n\n"
|
|
384
|
+
"Suggestion: Provide a descriptive name for the application "
|
|
385
|
+
"being deployed"
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
if not health_check_url.startswith("/"):
|
|
389
|
+
return (
|
|
390
|
+
f"Error: Health check URL must be a path starting with '/': "
|
|
391
|
+
f"{health_check_url}\n\n"
|
|
392
|
+
"Suggestion: Use a relative path like '/health' or '/api/health'"
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
# Generate main deployment playbook
|
|
396
|
+
playbook = _generate_blue_green_playbook(app_name, health_check_url)
|
|
397
|
+
|
|
398
|
+
return f"""# Blue/Green Deployment Playbook
|
|
399
|
+
# Application: {app_name}
|
|
400
|
+
|
|
401
|
+
## Main Playbook (deploy_blue_green.yml):
|
|
402
|
+
```yaml
|
|
403
|
+
{playbook["main_playbook"]}
|
|
404
|
+
```
|
|
405
|
+
|
|
406
|
+
## Health Check Playbook (health_check.yml):
|
|
407
|
+
```yaml
|
|
408
|
+
{playbook["health_check"]}
|
|
409
|
+
```
|
|
410
|
+
|
|
411
|
+
## Rollback Playbook (rollback.yml):
|
|
412
|
+
```yaml
|
|
413
|
+
{playbook["rollback"]}
|
|
414
|
+
```
|
|
415
|
+
|
|
416
|
+
## Load Balancer Configuration:
|
|
417
|
+
```yaml
|
|
418
|
+
{playbook["load_balancer_config"]}
|
|
419
|
+
```
|
|
420
|
+
|
|
421
|
+
## Usage Instructions:
|
|
422
|
+
1. Deploy to blue environment:
|
|
423
|
+
`ansible-playbook deploy_blue_green.yml -e target_env=blue`
|
|
424
|
+
2. Verify health checks pass
|
|
425
|
+
3. Switch traffic to blue:
|
|
426
|
+
`ansible-playbook switch_traffic.yml -e target_env=blue`
|
|
427
|
+
4. Monitor and rollback if needed: `ansible-playbook rollback.yml`
|
|
428
|
+
|
|
429
|
+
## Prerequisites:
|
|
430
|
+
- Load balancer configured (HAProxy, Nginx, ALB, etc.)
|
|
431
|
+
- Health check endpoint available
|
|
432
|
+
- Blue and green environments provisioned
|
|
433
|
+
"""
|
|
434
|
+
except Exception as e:
|
|
435
|
+
return format_error_with_context(
|
|
436
|
+
e, f"generating blue/green deployment playbook for {app_name}"
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
def _validate_canary_inputs(
|
|
441
|
+
app_name: str, canary_percentage: int, rollout_steps: str
|
|
442
|
+
) -> tuple[list[int] | None, str | None]:
|
|
443
|
+
"""
|
|
444
|
+
Validate canary deployment inputs.
|
|
445
|
+
|
|
446
|
+
Args:
|
|
447
|
+
app_name: Application name
|
|
448
|
+
canary_percentage: Initial canary percentage
|
|
449
|
+
rollout_steps: Comma-separated rollout steps
|
|
450
|
+
|
|
451
|
+
Returns:
|
|
452
|
+
Tuple of (parsed steps list, error message). If error, steps is None.
|
|
453
|
+
|
|
454
|
+
"""
|
|
455
|
+
# Validate app name
|
|
456
|
+
if not app_name or not app_name.strip():
|
|
457
|
+
return None, (
|
|
458
|
+
"Error: Application name cannot be empty\n\n"
|
|
459
|
+
"Suggestion: Provide a descriptive name for the application"
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
# Validate canary percentage
|
|
463
|
+
if not (1 <= canary_percentage <= 100):
|
|
464
|
+
return None, (
|
|
465
|
+
f"Error: Canary percentage must be between 1 and 100, "
|
|
466
|
+
f"got {canary_percentage}\n\n"
|
|
467
|
+
"Suggestion: Start with 10% for safety"
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
# Parse and validate rollout steps
|
|
471
|
+
try:
|
|
472
|
+
steps = [int(s.strip()) for s in rollout_steps.split(",")]
|
|
473
|
+
if not all(1 <= s <= 100 for s in steps):
|
|
474
|
+
raise ValueError("Steps must be between 1 and 100")
|
|
475
|
+
if steps != sorted(steps):
|
|
476
|
+
return None, (
|
|
477
|
+
f"Error: Rollout steps must be in ascending order: {rollout_steps}\n\n"
|
|
478
|
+
"Suggestion: Use format like '10,25,50,100'"
|
|
479
|
+
)
|
|
480
|
+
return steps, None
|
|
481
|
+
except ValueError as e:
|
|
482
|
+
return None, (
|
|
483
|
+
f"Error: Invalid rollout steps '{rollout_steps}': {e}\n\n"
|
|
484
|
+
"Suggestion: Use comma-separated percentages like '10,25,50,100'"
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
def _build_canary_workflow_guide(canary_percentage: int, steps: list[int]) -> str:
|
|
489
|
+
"""
|
|
490
|
+
Build deployment workflow guide.
|
|
491
|
+
|
|
492
|
+
Args:
|
|
493
|
+
canary_percentage: Initial canary percentage
|
|
494
|
+
steps: List of rollout step percentages
|
|
495
|
+
|
|
496
|
+
Returns:
|
|
497
|
+
Formatted workflow guide
|
|
498
|
+
|
|
499
|
+
"""
|
|
500
|
+
workflow = f"""## Deployment Workflow:
|
|
501
|
+
1. Deploy canary at {canary_percentage}%: `ansible-playbook deploy_canary.yml`
|
|
502
|
+
2. Monitor metrics: `ansible-playbook monitor_canary.yml`
|
|
503
|
+
3. Progressive rollout: `ansible-playbook progressive_rollout.yml`
|
|
504
|
+
"""
|
|
505
|
+
|
|
506
|
+
# Add step details
|
|
507
|
+
for i, step_pct in enumerate(steps, 1):
|
|
508
|
+
workflow += f" - Step {i}: {step_pct}% traffic"
|
|
509
|
+
if i == len(steps):
|
|
510
|
+
workflow += " (full rollout)"
|
|
511
|
+
workflow += "\n"
|
|
512
|
+
|
|
513
|
+
workflow += """4. Rollback if issues: `ansible-playbook rollback_canary.yml`
|
|
514
|
+
|
|
515
|
+
## Monitoring Points:
|
|
516
|
+
- Error rate comparison (canary vs stable)
|
|
517
|
+
- Response time percentiles (p50, p95, p99)
|
|
518
|
+
- Resource utilization (CPU, memory)
|
|
519
|
+
- Custom business metrics
|
|
520
|
+
|
|
521
|
+
## Rollback Triggers:
|
|
522
|
+
- Error rate increase > 5%
|
|
523
|
+
- Response time degradation > 20%
|
|
524
|
+
- Failed health checks
|
|
525
|
+
- Manual trigger
|
|
526
|
+
"""
|
|
527
|
+
return workflow
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
def _format_canary_output(
|
|
531
|
+
app_name: str,
|
|
532
|
+
canary_percentage: int,
|
|
533
|
+
rollout_steps: str,
|
|
534
|
+
steps: list[int],
|
|
535
|
+
strategy: dict,
|
|
536
|
+
) -> str:
|
|
537
|
+
"""
|
|
538
|
+
Format complete canary deployment output.
|
|
539
|
+
|
|
540
|
+
Args:
|
|
541
|
+
app_name: Application name
|
|
542
|
+
canary_percentage: Initial canary percentage
|
|
543
|
+
rollout_steps: Original rollout steps string
|
|
544
|
+
steps: Parsed rollout steps
|
|
545
|
+
strategy: Generated strategy dict
|
|
546
|
+
|
|
547
|
+
Returns:
|
|
548
|
+
Formatted output string
|
|
549
|
+
|
|
550
|
+
"""
|
|
551
|
+
workflow = _build_canary_workflow_guide(canary_percentage, steps)
|
|
552
|
+
|
|
553
|
+
return f"""# Canary Deployment Strategy
|
|
554
|
+
# Application: {app_name}
|
|
555
|
+
# Initial Canary: {canary_percentage}%
|
|
556
|
+
# Rollout Steps: {rollout_steps}
|
|
557
|
+
|
|
558
|
+
## Canary Deployment Playbook (deploy_canary.yml):
|
|
559
|
+
```yaml
|
|
560
|
+
{strategy["canary_playbook"]}
|
|
561
|
+
```
|
|
562
|
+
|
|
563
|
+
## Monitoring Playbook (monitor_canary.yml):
|
|
564
|
+
```yaml
|
|
565
|
+
{strategy["monitoring"]}
|
|
566
|
+
```
|
|
567
|
+
|
|
568
|
+
## Progressive Rollout Playbook (progressive_rollout.yml):
|
|
569
|
+
```yaml
|
|
570
|
+
{strategy["progressive_rollout"]}
|
|
571
|
+
```
|
|
572
|
+
|
|
573
|
+
## Automated Rollback (rollback_canary.yml):
|
|
574
|
+
```yaml
|
|
575
|
+
{strategy["rollback"]}
|
|
576
|
+
```
|
|
577
|
+
|
|
578
|
+
{workflow}"""
|
|
579
|
+
|
|
580
|
+
|
|
581
|
+
def generate_canary_deployment_strategy(
|
|
582
|
+
app_name: str, canary_percentage: int = 10, rollout_steps: str = "10,25,50,100"
|
|
583
|
+
) -> str:
|
|
584
|
+
"""
|
|
585
|
+
Generate canary deployment with progressive rollout.
|
|
586
|
+
|
|
587
|
+
Starts at canary_percentage, progresses through rollout_steps.
|
|
588
|
+
Includes monitoring checks and automatic rollback on failure.
|
|
589
|
+
|
|
590
|
+
Args:
|
|
591
|
+
app_name: Name of the application
|
|
592
|
+
canary_percentage: Initial canary traffic percentage (1-100)
|
|
593
|
+
rollout_steps: Comma-separated progressive rollout steps
|
|
594
|
+
|
|
595
|
+
Returns:
|
|
596
|
+
Formatted canary deployment strategy with playbooks
|
|
597
|
+
|
|
598
|
+
"""
|
|
599
|
+
try:
|
|
600
|
+
# Validate inputs
|
|
601
|
+
steps, error = _validate_canary_inputs(
|
|
602
|
+
app_name, canary_percentage, rollout_steps
|
|
603
|
+
)
|
|
604
|
+
if error:
|
|
605
|
+
return error
|
|
606
|
+
|
|
607
|
+
assert steps is not None, "steps must be non-None after successful validation"
|
|
608
|
+
|
|
609
|
+
# Generate canary strategy
|
|
610
|
+
strategy = _generate_canary_strategy(app_name, canary_percentage, steps)
|
|
611
|
+
|
|
612
|
+
# Format output
|
|
613
|
+
return _format_canary_output(
|
|
614
|
+
app_name,
|
|
615
|
+
canary_percentage,
|
|
616
|
+
rollout_steps,
|
|
617
|
+
steps,
|
|
618
|
+
strategy,
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
except Exception as e:
|
|
622
|
+
return format_error_with_context(
|
|
623
|
+
e, f"generating canary deployment strategy for {app_name}"
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
|
|
627
|
+
def analyze_chef_application_patterns(
|
|
628
|
+
cookbook_path: str, application_type: str = "web_application"
|
|
629
|
+
) -> str:
|
|
630
|
+
"""
|
|
631
|
+
Analyze cookbook deployment patterns and recommend Ansible strategies.
|
|
632
|
+
|
|
633
|
+
Detects blue/green, canary, rolling, or custom deployment approaches.
|
|
634
|
+
Application type helps tune recommendations for web/database/service workloads.
|
|
635
|
+
"""
|
|
636
|
+
try:
|
|
637
|
+
cookbook = validate_cookbook_structure(cookbook_path)
|
|
638
|
+
|
|
639
|
+
# Validate application type
|
|
640
|
+
valid_app_types = ["web_application", "database", "service", "batch", "api"]
|
|
641
|
+
if application_type not in valid_app_types:
|
|
642
|
+
return (
|
|
643
|
+
f"Error: Invalid application type '{application_type}'\n\n"
|
|
644
|
+
f"Suggestion: Use one of {', '.join(valid_app_types)}"
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
# Analyze cookbook for application patterns
|
|
648
|
+
analysis = _analyze_application_cookbook(cookbook, application_type)
|
|
649
|
+
|
|
650
|
+
return f"""# Chef Application Patterns Analysis
|
|
651
|
+
# Cookbook: {cookbook.name}
|
|
652
|
+
# Application Type: {application_type}
|
|
653
|
+
|
|
654
|
+
## Detected Patterns:
|
|
655
|
+
{_format_deployment_patterns(analysis)}
|
|
656
|
+
|
|
657
|
+
## Chef Resources Analysis:
|
|
658
|
+
{_format_chef_resources_analysis(analysis)}
|
|
659
|
+
|
|
660
|
+
## Recommended Ansible Strategies:
|
|
661
|
+
{_recommend_ansible_strategies(analysis)}
|
|
662
|
+
|
|
663
|
+
## Migration Complexity:
|
|
664
|
+
- Overall: {analysis.get("complexity", "medium")}
|
|
665
|
+
- Estimated effort: {analysis.get("effort_estimate", "2-3 weeks")}
|
|
666
|
+
- Risk level: {analysis.get("risk_level", "medium")}
|
|
667
|
+
|
|
668
|
+
## Next Steps:
|
|
669
|
+
1. Review detected patterns and validate accuracy
|
|
670
|
+
2. Select appropriate deployment strategy
|
|
671
|
+
3. Prepare test environment for validation
|
|
672
|
+
4. Execute pilot migration with one environment
|
|
673
|
+
5. Document lessons learned and iterate
|
|
674
|
+
"""
|
|
675
|
+
except Exception as e:
|
|
676
|
+
return format_error_with_context(
|
|
677
|
+
e,
|
|
678
|
+
f"analyzing Chef application patterns for {application_type}",
|
|
679
|
+
cookbook_path,
|
|
680
|
+
)
|
|
681
|
+
|
|
682
|
+
|
|
683
|
+
# AWX Helper Functions
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
def _analyze_recipes(cookbook_path: Path) -> list[dict[str, Any]]:
|
|
687
|
+
"""
|
|
688
|
+
Analyze recipes directory for AWX job steps.
|
|
689
|
+
|
|
690
|
+
Args:
|
|
691
|
+
cookbook_path: Path to cookbook root
|
|
692
|
+
|
|
693
|
+
Returns:
|
|
694
|
+
List of recipe metadata dicts
|
|
695
|
+
|
|
696
|
+
"""
|
|
697
|
+
recipes = []
|
|
698
|
+
recipes_dir = _safe_join(cookbook_path, "recipes")
|
|
699
|
+
if recipes_dir.exists():
|
|
700
|
+
for recipe_file in recipes_dir.glob("*.rb"):
|
|
701
|
+
recipes.append(
|
|
702
|
+
{
|
|
703
|
+
"name": recipe_file.stem,
|
|
704
|
+
"file": str(recipe_file),
|
|
705
|
+
"size": recipe_file.stat().st_size,
|
|
706
|
+
}
|
|
707
|
+
)
|
|
708
|
+
return recipes
|
|
709
|
+
|
|
710
|
+
|
|
711
|
+
def _analyze_attributes_for_survey(
|
|
712
|
+
cookbook_path: Path,
|
|
713
|
+
) -> tuple[dict[str, Any], list[dict[str, Any]]]:
|
|
714
|
+
"""
|
|
715
|
+
Analyze attributes directory for survey field generation.
|
|
716
|
+
|
|
717
|
+
Args:
|
|
718
|
+
cookbook_path: Path to cookbook root
|
|
719
|
+
|
|
720
|
+
Returns:
|
|
721
|
+
Tuple of (attributes dict, survey fields list)
|
|
722
|
+
|
|
723
|
+
"""
|
|
724
|
+
attributes = {}
|
|
725
|
+
survey_fields = []
|
|
726
|
+
attributes_dir = _safe_join(cookbook_path, "attributes")
|
|
727
|
+
|
|
728
|
+
if attributes_dir.exists():
|
|
729
|
+
for attr_file in attributes_dir.glob("*.rb"):
|
|
730
|
+
try:
|
|
731
|
+
with attr_file.open("r") as f:
|
|
732
|
+
content = f.read()
|
|
733
|
+
|
|
734
|
+
# Extract attribute declarations
|
|
735
|
+
attrs = _extract_cookbook_attributes(content)
|
|
736
|
+
attributes.update(attrs)
|
|
737
|
+
|
|
738
|
+
# Generate survey fields
|
|
739
|
+
fields = _generate_survey_fields_from_attributes(attrs)
|
|
740
|
+
survey_fields.extend(fields)
|
|
741
|
+
|
|
742
|
+
except Exception:
|
|
743
|
+
# Silently skip malformed attribute files
|
|
744
|
+
pass
|
|
745
|
+
|
|
746
|
+
return attributes, survey_fields
|
|
747
|
+
|
|
748
|
+
|
|
749
|
+
def _analyze_metadata_dependencies(cookbook_path: Path) -> list[str]:
|
|
750
|
+
"""
|
|
751
|
+
Extract cookbook dependencies from metadata.
|
|
752
|
+
|
|
753
|
+
Args:
|
|
754
|
+
cookbook_path: Path to cookbook root
|
|
755
|
+
|
|
756
|
+
Returns:
|
|
757
|
+
List of dependency names
|
|
758
|
+
|
|
759
|
+
"""
|
|
760
|
+
metadata_file = _safe_join(cookbook_path, METADATA_FILENAME)
|
|
761
|
+
if metadata_file.exists():
|
|
762
|
+
try:
|
|
763
|
+
with metadata_file.open("r") as f:
|
|
764
|
+
content = f.read()
|
|
765
|
+
return _extract_cookbook_dependencies(content)
|
|
766
|
+
except Exception:
|
|
767
|
+
pass
|
|
768
|
+
return []
|
|
769
|
+
|
|
770
|
+
|
|
771
|
+
def _collect_static_files(cookbook_path: Path) -> tuple[list[str], list[str]]:
|
|
772
|
+
"""
|
|
773
|
+
Collect templates and static files from cookbook.
|
|
774
|
+
|
|
775
|
+
Args:
|
|
776
|
+
cookbook_path: Path to cookbook root
|
|
777
|
+
|
|
778
|
+
Returns:
|
|
779
|
+
Tuple of (template names list, file names list)
|
|
780
|
+
|
|
781
|
+
"""
|
|
782
|
+
templates = []
|
|
783
|
+
files = []
|
|
784
|
+
|
|
785
|
+
templates_dir = _safe_join(cookbook_path, "templates")
|
|
786
|
+
if templates_dir.exists():
|
|
787
|
+
templates = [f.name for f in templates_dir.rglob("*") if f.is_file()]
|
|
788
|
+
|
|
789
|
+
files_dir = _safe_join(cookbook_path, "files")
|
|
790
|
+
if files_dir.exists():
|
|
791
|
+
files = [f.name for f in files_dir.rglob("*") if f.is_file()]
|
|
792
|
+
|
|
793
|
+
return templates, files
|
|
794
|
+
|
|
795
|
+
|
|
796
|
+
def _analyze_cookbook_for_awx(cookbook_path: Path, cookbook_name: str) -> dict:
|
|
797
|
+
"""
|
|
798
|
+
Analyze Chef cookbook structure for AWX job template generation.
|
|
799
|
+
|
|
800
|
+
Orchestrates multiple analysis helpers to build comprehensive cookbook metadata.
|
|
801
|
+
|
|
802
|
+
Args:
|
|
803
|
+
cookbook_path: Path to cookbook root
|
|
804
|
+
cookbook_name: Name of the cookbook
|
|
805
|
+
|
|
806
|
+
Returns:
|
|
807
|
+
Analysis dict with recipes, attributes, dependencies, templates, files, surveys
|
|
808
|
+
|
|
809
|
+
"""
|
|
810
|
+
# Analyze each dimension independently
|
|
811
|
+
recipes = _analyze_recipes(cookbook_path)
|
|
812
|
+
attributes, survey_fields = _analyze_attributes_for_survey(cookbook_path)
|
|
813
|
+
dependencies = _analyze_metadata_dependencies(cookbook_path)
|
|
814
|
+
templates, files = _collect_static_files(cookbook_path)
|
|
815
|
+
|
|
816
|
+
# Assemble complete analysis
|
|
817
|
+
return {
|
|
818
|
+
"name": cookbook_name,
|
|
819
|
+
"recipes": recipes,
|
|
820
|
+
"attributes": attributes,
|
|
821
|
+
"dependencies": dependencies,
|
|
822
|
+
"templates": templates,
|
|
823
|
+
"files": files,
|
|
824
|
+
"survey_fields": survey_fields,
|
|
825
|
+
}
|
|
826
|
+
|
|
827
|
+
|
|
828
|
+
def _generate_awx_job_template(
|
|
829
|
+
analysis: dict, cookbook_name: str, environment: str, include_survey: bool
|
|
830
|
+
) -> dict:
|
|
831
|
+
"""Generate AWX job template configuration from cookbook analysis."""
|
|
832
|
+
job_template = {
|
|
833
|
+
"name": f"{cookbook_name}-{environment}",
|
|
834
|
+
"description": f"Deploy {cookbook_name} cookbook to {environment}",
|
|
835
|
+
"job_type": "run",
|
|
836
|
+
"project": f"{cookbook_name}-project",
|
|
837
|
+
"playbook": f"playbooks/{cookbook_name}.yml",
|
|
838
|
+
"inventory": environment,
|
|
839
|
+
"credential": f"{environment}-ssh",
|
|
840
|
+
"verbosity": 1,
|
|
841
|
+
"ask_variables_on_launch": True,
|
|
842
|
+
"ask_limit_on_launch": True,
|
|
843
|
+
"ask_tags_on_launch": False,
|
|
844
|
+
"ask_skip_tags_on_launch": False,
|
|
845
|
+
"ask_job_type_on_launch": False,
|
|
846
|
+
"ask_verbosity_on_launch": False,
|
|
847
|
+
"ask_inventory_on_launch": False,
|
|
848
|
+
"ask_credential_on_launch": False,
|
|
849
|
+
"survey_enabled": include_survey and len(analysis.get("survey_fields", [])) > 0,
|
|
850
|
+
"become_enabled": True,
|
|
851
|
+
"host_config_key": "",
|
|
852
|
+
"auto_run_on_commit": False,
|
|
853
|
+
"timeout": 3600,
|
|
854
|
+
}
|
|
855
|
+
|
|
856
|
+
if include_survey and analysis.get("survey_fields"):
|
|
857
|
+
job_template["survey_spec"] = {
|
|
858
|
+
"name": f"{cookbook_name} Configuration",
|
|
859
|
+
"description": f"Configuration parameters for {cookbook_name} cookbook",
|
|
860
|
+
"spec": analysis["survey_fields"],
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
return job_template
|
|
864
|
+
|
|
865
|
+
|
|
866
|
+
def _generate_awx_workflow_template(
|
|
867
|
+
runlist: list, workflow_name: str, environment: str
|
|
868
|
+
) -> dict:
|
|
869
|
+
"""Generate AWX workflow template from Chef runlist."""
|
|
870
|
+
workflow_template: dict[str, Any] = {
|
|
871
|
+
"name": f"{workflow_name}-{environment}",
|
|
872
|
+
"description": f"Execute {workflow_name} runlist in {environment}",
|
|
873
|
+
"organization": "Default",
|
|
874
|
+
"survey_enabled": True,
|
|
875
|
+
"ask_variables_on_launch": True,
|
|
876
|
+
"ask_limit_on_launch": True,
|
|
877
|
+
"workflow_nodes": [],
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
# Generate workflow nodes from runlist
|
|
881
|
+
for index, recipe in enumerate(runlist):
|
|
882
|
+
node_id = index + 1
|
|
883
|
+
node = {
|
|
884
|
+
"id": node_id,
|
|
885
|
+
"unified_job_template": f"{recipe.replace('::', '-')}-{environment}",
|
|
886
|
+
"unified_job_template_type": "job_template",
|
|
887
|
+
"success_nodes": [node_id + 1] if index < len(runlist) - 1 else [],
|
|
888
|
+
"failure_nodes": [],
|
|
889
|
+
"always_nodes": [],
|
|
890
|
+
"inventory": environment,
|
|
891
|
+
"credential": f"{environment}-ssh",
|
|
892
|
+
}
|
|
893
|
+
workflow_template["workflow_nodes"].append(node)
|
|
894
|
+
|
|
895
|
+
return workflow_template
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
def _generate_awx_project_config(
|
|
899
|
+
project_name: str, scm_type: str, scm_url: str
|
|
900
|
+
) -> dict:
|
|
901
|
+
"""Generate AWX project configuration from cookbooks analysis."""
|
|
902
|
+
project_config = {
|
|
903
|
+
"name": project_name,
|
|
904
|
+
"description": "Ansible playbooks converted from Chef cookbooks",
|
|
905
|
+
"organization": "Default",
|
|
906
|
+
"scm_type": scm_type,
|
|
907
|
+
"scm_url": scm_url,
|
|
908
|
+
"scm_branch": "main",
|
|
909
|
+
"scm_clean": True,
|
|
910
|
+
"scm_delete_on_update": False,
|
|
911
|
+
"credential": f"{scm_type}-credential",
|
|
912
|
+
"timeout": 300,
|
|
913
|
+
"scm_update_on_launch": True,
|
|
914
|
+
"scm_update_cache_timeout": 0,
|
|
915
|
+
"allow_override": False,
|
|
916
|
+
"default_environment": None,
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
return project_config
|
|
920
|
+
|
|
921
|
+
|
|
922
|
+
def _generate_chef_inventory_source(chef_server_url: str, sync_schedule: str) -> dict:
|
|
923
|
+
"""Generate Chef server inventory source configuration."""
|
|
924
|
+
inventory_source = {
|
|
925
|
+
"name": "Chef Server Inventory",
|
|
926
|
+
"description": f"Dynamic inventory from Chef server: {chef_server_url}",
|
|
927
|
+
"inventory": "Chef Nodes",
|
|
928
|
+
"source": "scm",
|
|
929
|
+
"source_project": "chef-inventory-scripts",
|
|
930
|
+
"source_path": "chef_inventory.py",
|
|
931
|
+
"credential": "chef-server-credential", # NOSONAR - credential name, not secret
|
|
932
|
+
"overwrite": True,
|
|
933
|
+
"overwrite_vars": True,
|
|
934
|
+
"timeout": 300,
|
|
935
|
+
"verbosity": 1,
|
|
936
|
+
"update_on_launch": True,
|
|
937
|
+
"update_cache_timeout": 86400, # 24 hours
|
|
938
|
+
"source_vars": json.dumps(
|
|
939
|
+
{
|
|
940
|
+
"chef_server_url": chef_server_url,
|
|
941
|
+
"ssl_verify": True,
|
|
942
|
+
"group_by_environment": True,
|
|
943
|
+
"group_by_roles": True,
|
|
944
|
+
"group_by_platform": True,
|
|
945
|
+
},
|
|
946
|
+
indent=2,
|
|
947
|
+
),
|
|
948
|
+
}
|
|
949
|
+
|
|
950
|
+
# Map sync schedule to update frequency
|
|
951
|
+
schedule_mapping = {"hourly": 3600, "daily": 86400, "weekly": 604800}
|
|
952
|
+
|
|
953
|
+
inventory_source["update_cache_timeout"] = schedule_mapping.get(
|
|
954
|
+
sync_schedule, 86400
|
|
955
|
+
)
|
|
956
|
+
|
|
957
|
+
return inventory_source
|
|
958
|
+
|
|
959
|
+
|
|
960
|
+
def _generate_chef_inventory_script(chef_server_url: str) -> str:
|
|
961
|
+
"""Generate custom inventory script for Chef server integration."""
|
|
962
|
+
return f'''#!/usr/bin/env python3
|
|
963
|
+
"""AWX/AAP Custom Inventory Script for Chef Server.
|
|
964
|
+
|
|
965
|
+
Connects to Chef server and generates Ansible inventory.
|
|
966
|
+
"""
|
|
967
|
+
import json
|
|
968
|
+
import os
|
|
969
|
+
import sys
|
|
970
|
+
|
|
971
|
+
from chef import ChefAPI
|
|
972
|
+
|
|
973
|
+
|
|
974
|
+
def main():
|
|
975
|
+
"""Main inventory generation function."""
|
|
976
|
+
# Chef server configuration
|
|
977
|
+
chef_server_url = os.environ.get('CHEF_SERVER_URL', '{chef_server_url}')
|
|
978
|
+
client_name = os.environ.get('CHEF_NODE_NAME', 'admin')
|
|
979
|
+
client_key = os.environ.get('CHEF_CLIENT_KEY', '/etc/chef/client.pem')
|
|
980
|
+
|
|
981
|
+
# Initialize Chef API
|
|
982
|
+
try:
|
|
983
|
+
api = ChefAPI(chef_server_url, client_key, client_name)
|
|
984
|
+
|
|
985
|
+
# Build Ansible inventory
|
|
986
|
+
inventory = {{
|
|
987
|
+
'_meta': {{'hostvars': {{}}}},
|
|
988
|
+
'all': {{'children': []}},
|
|
989
|
+
'ungrouped': {{'hosts': []}}
|
|
990
|
+
}}
|
|
991
|
+
|
|
992
|
+
# Get all nodes from Chef server
|
|
993
|
+
nodes = api['/nodes']
|
|
994
|
+
|
|
995
|
+
for node_name in nodes:
|
|
996
|
+
node = api[f'/nodes/{{node_name}}']
|
|
997
|
+
|
|
998
|
+
# Extract node information
|
|
999
|
+
node_data = {{
|
|
1000
|
+
'ansible_host': node.get('automatic', {{}}).get(
|
|
1001
|
+
'ipaddress', node_name
|
|
1002
|
+
),
|
|
1003
|
+
'chef_environment': node.get('chef_environment', '_default'),
|
|
1004
|
+
'chef_roles': node.get('run_list', []),
|
|
1005
|
+
'chef_platform': node.get('automatic', {{}}).get('platform'),
|
|
1006
|
+
'chef_platform_version': (
|
|
1007
|
+
node.get('automatic', {{}}).get('platform_version')
|
|
1008
|
+
)
|
|
1009
|
+
}}
|
|
1010
|
+
|
|
1011
|
+
# Add to hostvars
|
|
1012
|
+
inventory['_meta']['hostvars'][node_name] = node_data
|
|
1013
|
+
|
|
1014
|
+
# Group by environment
|
|
1015
|
+
env_group = f"environment_{{node_data['chef_environment']}}"
|
|
1016
|
+
if env_group not in inventory:
|
|
1017
|
+
inventory[env_group] = {{'hosts': []}}
|
|
1018
|
+
inventory['all']['children'].append(env_group)
|
|
1019
|
+
inventory[env_group]['hosts'].append(node_name)
|
|
1020
|
+
|
|
1021
|
+
# Group by roles
|
|
1022
|
+
for role in node.get('run_list', []):
|
|
1023
|
+
role_name = role.replace('role[', '').replace(']', '')
|
|
1024
|
+
if role_name.startswith('recipe['):
|
|
1025
|
+
continue
|
|
1026
|
+
|
|
1027
|
+
role_group = f"role_{{role_name}}"
|
|
1028
|
+
if role_group not in inventory:
|
|
1029
|
+
inventory[role_group] = {{'hosts': []}}
|
|
1030
|
+
inventory['all']['children'].append(role_group)
|
|
1031
|
+
inventory[role_group]['hosts'].append(node_name)
|
|
1032
|
+
|
|
1033
|
+
# Group by platform
|
|
1034
|
+
if node_data['chef_platform']:
|
|
1035
|
+
platform_group = f"platform_{{node_data['chef_platform']}}"
|
|
1036
|
+
if platform_group not in inventory:
|
|
1037
|
+
inventory[platform_group] = {{'hosts': []}}
|
|
1038
|
+
inventory['all']['children'].append(platform_group)
|
|
1039
|
+
inventory[platform_group]['hosts'].append(node_name)
|
|
1040
|
+
|
|
1041
|
+
# Output inventory JSON
|
|
1042
|
+
print(json.dumps(inventory, indent=2))
|
|
1043
|
+
|
|
1044
|
+
except Exception as e:
|
|
1045
|
+
print(f"Error connecting to Chef server: {{e}}", file=sys.stderr)
|
|
1046
|
+
sys.exit(1)
|
|
1047
|
+
|
|
1048
|
+
|
|
1049
|
+
if __name__ == '__main__':
|
|
1050
|
+
main()
|
|
1051
|
+
'''
|
|
1052
|
+
|
|
1053
|
+
|
|
1054
|
+
def _parse_chef_runlist(runlist_content: str) -> list:
|
|
1055
|
+
"""Parse Chef runlist content into list of recipes/roles."""
|
|
1056
|
+
try:
|
|
1057
|
+
# Try parsing as JSON first
|
|
1058
|
+
if runlist_content.strip().startswith("["):
|
|
1059
|
+
runlist = json.loads(runlist_content)
|
|
1060
|
+
return [
|
|
1061
|
+
item.replace(CHEF_RECIPE_PREFIX, "")
|
|
1062
|
+
.replace(CHEF_ROLE_PREFIX, "")
|
|
1063
|
+
.replace("]", "")
|
|
1064
|
+
for item in runlist
|
|
1065
|
+
]
|
|
1066
|
+
except json.JSONDecodeError:
|
|
1067
|
+
# Not valid JSON; fall through to parse as comma-separated or single item
|
|
1068
|
+
pass
|
|
1069
|
+
|
|
1070
|
+
# Parse as comma-separated list
|
|
1071
|
+
if "," in runlist_content:
|
|
1072
|
+
items = [item.strip() for item in runlist_content.split(",")]
|
|
1073
|
+
return [
|
|
1074
|
+
item.replace(CHEF_RECIPE_PREFIX, "")
|
|
1075
|
+
.replace(CHEF_ROLE_PREFIX, "")
|
|
1076
|
+
.replace("]", "")
|
|
1077
|
+
for item in items
|
|
1078
|
+
]
|
|
1079
|
+
|
|
1080
|
+
# Parse single item
|
|
1081
|
+
return [
|
|
1082
|
+
runlist_content.replace(CHEF_RECIPE_PREFIX, "")
|
|
1083
|
+
.replace(CHEF_ROLE_PREFIX, "")
|
|
1084
|
+
.replace("]", "")
|
|
1085
|
+
]
|
|
1086
|
+
|
|
1087
|
+
|
|
1088
|
+
def _extract_cookbook_attributes(content: str) -> dict:
|
|
1089
|
+
"""Extract cookbook attributes for survey generation."""
|
|
1090
|
+
attributes = {}
|
|
1091
|
+
|
|
1092
|
+
# Find default attribute declarations
|
|
1093
|
+
# Pattern handles multiline values with line continuations, hashes, and arrays
|
|
1094
|
+
# Uses bounded quantifier to prevent ReDoS on malformed input
|
|
1095
|
+
attr_pattern = (
|
|
1096
|
+
r"default\[['\"]([^'\"]+)['\"]\]\s*=\s*"
|
|
1097
|
+
rf"(.{{0,{MAX_ATTRIBUTE_VALUE_LENGTH}}}?)"
|
|
1098
|
+
r"(?=\n(?!.*\\$)|$|#)"
|
|
1099
|
+
)
|
|
1100
|
+
for match in re.finditer(attr_pattern, content, re.MULTILINE | re.DOTALL):
|
|
1101
|
+
attr_name = match.group(1)
|
|
1102
|
+
attr_value = match.group(2).strip()
|
|
1103
|
+
|
|
1104
|
+
# Clean up value - remove trailing backslashes and extra whitespace
|
|
1105
|
+
attr_value = re.sub(r"\\\s*\n\s*", " ", attr_value)
|
|
1106
|
+
attr_value = attr_value.strip()
|
|
1107
|
+
|
|
1108
|
+
# Clean up quotes
|
|
1109
|
+
if attr_value.startswith(("'", '"')) and attr_value.endswith(("'", '"')):
|
|
1110
|
+
attr_value = attr_value[1:-1]
|
|
1111
|
+
|
|
1112
|
+
attributes[attr_name] = attr_value
|
|
1113
|
+
|
|
1114
|
+
return attributes
|
|
1115
|
+
|
|
1116
|
+
|
|
1117
|
+
def _extract_cookbook_dependencies(content: str) -> list:
|
|
1118
|
+
"""Extract cookbook dependencies from metadata."""
|
|
1119
|
+
dependencies = []
|
|
1120
|
+
|
|
1121
|
+
# Find depends declarations
|
|
1122
|
+
depends_pattern = r"depends\s+['\"]([^'\"]+)['\"]"
|
|
1123
|
+
for match in re.finditer(depends_pattern, content):
|
|
1124
|
+
dependencies.append(match.group(1))
|
|
1125
|
+
|
|
1126
|
+
return dependencies
|
|
1127
|
+
|
|
1128
|
+
|
|
1129
|
+
def _generate_survey_fields_from_attributes(attributes: dict) -> list:
|
|
1130
|
+
"""Generate AWX survey fields from cookbook attributes."""
|
|
1131
|
+
survey_fields = []
|
|
1132
|
+
|
|
1133
|
+
for attr_name, attr_value in attributes.items():
|
|
1134
|
+
# Determine field type based on value
|
|
1135
|
+
field_type = "text"
|
|
1136
|
+
if attr_value.lower() in ["true", "false"]:
|
|
1137
|
+
field_type = "boolean"
|
|
1138
|
+
elif attr_value.isdigit():
|
|
1139
|
+
field_type = "integer"
|
|
1140
|
+
|
|
1141
|
+
field = {
|
|
1142
|
+
"variable": attr_name.replace(".", "_"),
|
|
1143
|
+
"question_name": attr_name.replace(".", " ").title(),
|
|
1144
|
+
"question_description": f"Chef attribute: {attr_name}",
|
|
1145
|
+
"required": False,
|
|
1146
|
+
"type": field_type,
|
|
1147
|
+
"default": attr_value,
|
|
1148
|
+
"choices": "",
|
|
1149
|
+
}
|
|
1150
|
+
|
|
1151
|
+
survey_fields.append(field)
|
|
1152
|
+
|
|
1153
|
+
return survey_fields
|
|
1154
|
+
|
|
1155
|
+
|
|
1156
|
+
def _analyze_cookbooks_directory(cookbooks_path: Path) -> dict:
|
|
1157
|
+
"""Analyze entire cookbooks directory structure."""
|
|
1158
|
+
analysis: dict[str, Any] = {
|
|
1159
|
+
"total_cookbooks": 0,
|
|
1160
|
+
"cookbooks": {},
|
|
1161
|
+
"total_recipes": 0,
|
|
1162
|
+
"total_templates": 0,
|
|
1163
|
+
"total_files": 0,
|
|
1164
|
+
}
|
|
1165
|
+
|
|
1166
|
+
for cookbook_dir in cookbooks_path.iterdir():
|
|
1167
|
+
if not cookbook_dir.is_dir():
|
|
1168
|
+
continue
|
|
1169
|
+
|
|
1170
|
+
cookbook_name = cookbook_dir.name
|
|
1171
|
+
analysis["total_cookbooks"] += 1
|
|
1172
|
+
|
|
1173
|
+
cookbook_analysis = _analyze_cookbook_for_awx(cookbook_dir, cookbook_name)
|
|
1174
|
+
analysis["cookbooks"][cookbook_name] = cookbook_analysis
|
|
1175
|
+
|
|
1176
|
+
# Aggregate stats
|
|
1177
|
+
analysis["total_recipes"] += len(cookbook_analysis["recipes"])
|
|
1178
|
+
analysis["total_templates"] += len(cookbook_analysis["templates"])
|
|
1179
|
+
analysis["total_files"] += len(cookbook_analysis["files"])
|
|
1180
|
+
|
|
1181
|
+
return analysis
|
|
1182
|
+
|
|
1183
|
+
|
|
1184
|
+
# Deployment Strategy Helper Functions
|
|
1185
|
+
|
|
1186
|
+
|
|
1187
|
+
def _analyze_chef_deployment_pattern(cookbook_path: Path) -> dict:
|
|
1188
|
+
"""Analyze Chef cookbook for deployment patterns."""
|
|
1189
|
+
analysis: dict[str, Any] = {
|
|
1190
|
+
"deployment_steps": [],
|
|
1191
|
+
"health_checks": [],
|
|
1192
|
+
"service_management": [],
|
|
1193
|
+
"load_balancer_config": {},
|
|
1194
|
+
"detected_pattern": "rolling_update",
|
|
1195
|
+
"complexity": "medium",
|
|
1196
|
+
}
|
|
1197
|
+
|
|
1198
|
+
# Analyze recipes for deployment indicators
|
|
1199
|
+
recipes_dir = _safe_join(cookbook_path, "recipes")
|
|
1200
|
+
if recipes_dir.exists():
|
|
1201
|
+
for recipe_file in recipes_dir.glob("*.rb"):
|
|
1202
|
+
try:
|
|
1203
|
+
with recipe_file.open("r") as f:
|
|
1204
|
+
content = f.read()
|
|
1205
|
+
|
|
1206
|
+
# Extract deployment steps
|
|
1207
|
+
steps = _extract_deployment_steps(content)
|
|
1208
|
+
analysis["deployment_steps"].extend(steps)
|
|
1209
|
+
|
|
1210
|
+
# Extract health checks
|
|
1211
|
+
health_checks = _extract_health_checks(content)
|
|
1212
|
+
analysis["health_checks"].extend(health_checks)
|
|
1213
|
+
|
|
1214
|
+
# Extract service management
|
|
1215
|
+
services = _extract_service_management(content)
|
|
1216
|
+
analysis["service_management"].extend(services)
|
|
1217
|
+
|
|
1218
|
+
# Detect deployment pattern
|
|
1219
|
+
if "blue" in content.lower() or "green" in content.lower():
|
|
1220
|
+
analysis["detected_pattern"] = "blue_green"
|
|
1221
|
+
elif "canary" in content.lower():
|
|
1222
|
+
analysis["detected_pattern"] = "canary"
|
|
1223
|
+
elif "rolling" in content.lower():
|
|
1224
|
+
analysis["detected_pattern"] = "rolling_update"
|
|
1225
|
+
|
|
1226
|
+
except Exception:
|
|
1227
|
+
# Silently skip malformed files
|
|
1228
|
+
pass
|
|
1229
|
+
|
|
1230
|
+
return analysis
|
|
1231
|
+
|
|
1232
|
+
|
|
1233
|
+
def _generate_ansible_deployment_strategy(analysis: dict, pattern: str) -> str:
|
|
1234
|
+
"""Generate Ansible deployment strategy based on pattern."""
|
|
1235
|
+
if pattern == "blue_green":
|
|
1236
|
+
return _generate_blue_green_conversion_playbook(analysis)
|
|
1237
|
+
elif pattern == "canary":
|
|
1238
|
+
return _generate_canary_conversion_playbook(analysis)
|
|
1239
|
+
else:
|
|
1240
|
+
return _generate_rolling_update_playbook(analysis)
|
|
1241
|
+
|
|
1242
|
+
|
|
1243
|
+
def _generate_blue_green_playbook(app_name: str, health_check_url: str) -> dict:
|
|
1244
|
+
"""
|
|
1245
|
+
Generate blue/green deployment playbook structure.
|
|
1246
|
+
|
|
1247
|
+
Args:
|
|
1248
|
+
app_name: Name of the application.
|
|
1249
|
+
health_check_url: URL for health checks.
|
|
1250
|
+
|
|
1251
|
+
"""
|
|
1252
|
+
main_playbook = f"""---
|
|
1253
|
+
# Blue/Green Deployment for {app_name}
|
|
1254
|
+
- name: Deploy {app_name} (Blue/Green)
|
|
1255
|
+
hosts: "{{{{ target_env }}}}"
|
|
1256
|
+
become: yes
|
|
1257
|
+
vars:
|
|
1258
|
+
app_name: {app_name}
|
|
1259
|
+
health_check_url: {health_check_url}
|
|
1260
|
+
deployment_version: "{{{{ lookup('env', 'VERSION') | default('latest') }}}}"
|
|
1261
|
+
|
|
1262
|
+
tasks:
|
|
1263
|
+
- name: Deploy application to target environment
|
|
1264
|
+
include_tasks: deploy_app.yml
|
|
1265
|
+
|
|
1266
|
+
- name: Run health checks
|
|
1267
|
+
include_tasks: health_check.yml
|
|
1268
|
+
|
|
1269
|
+
- name: Switch load balancer traffic
|
|
1270
|
+
include_tasks: switch_traffic.yml
|
|
1271
|
+
when: health_check_passed
|
|
1272
|
+
"""
|
|
1273
|
+
health_check = """---
|
|
1274
|
+
# Health Check Playbook
|
|
1275
|
+
- name: Verify application health
|
|
1276
|
+
uri:
|
|
1277
|
+
url: "http://{{ ansible_host }}{health_check_url}"
|
|
1278
|
+
method: GET
|
|
1279
|
+
status_code: 200
|
|
1280
|
+
timeout: 10
|
|
1281
|
+
register: health_check_result
|
|
1282
|
+
retries: 5
|
|
1283
|
+
delay: 10
|
|
1284
|
+
until: health_check_result.status == 200
|
|
1285
|
+
|
|
1286
|
+
- name: Set health check status
|
|
1287
|
+
set_fact:
|
|
1288
|
+
health_check_passed: "{{ health_check_result.status == 200 }}"
|
|
1289
|
+
"""
|
|
1290
|
+
rollback = f"""---
|
|
1291
|
+
# Rollback Playbook
|
|
1292
|
+
- name: Rollback {app_name} deployment
|
|
1293
|
+
hosts: load_balancers
|
|
1294
|
+
become: yes
|
|
1295
|
+
tasks:
|
|
1296
|
+
- name: Switch traffic back to previous environment
|
|
1297
|
+
include_tasks: switch_traffic.yml
|
|
1298
|
+
vars:
|
|
1299
|
+
target_env: "{{{{ previous_env }}}}"
|
|
1300
|
+
|
|
1301
|
+
- name: Verify rollback health
|
|
1302
|
+
include_tasks: health_check.yml
|
|
1303
|
+
"""
|
|
1304
|
+
load_balancer_config = """---
|
|
1305
|
+
# Load Balancer Configuration
|
|
1306
|
+
- name: Update load balancer configuration
|
|
1307
|
+
template:
|
|
1308
|
+
src: nginx.conf.j2
|
|
1309
|
+
dest: /etc/nginx/sites-enabled/{{ app_name }}.conf
|
|
1310
|
+
notify: reload nginx
|
|
1311
|
+
|
|
1312
|
+
- name: Reload nginx
|
|
1313
|
+
service:
|
|
1314
|
+
name: nginx
|
|
1315
|
+
state: reloaded
|
|
1316
|
+
"""
|
|
1317
|
+
return {
|
|
1318
|
+
"main_playbook": main_playbook,
|
|
1319
|
+
"health_check": health_check,
|
|
1320
|
+
"rollback": rollback,
|
|
1321
|
+
"load_balancer_config": load_balancer_config,
|
|
1322
|
+
}
|
|
1323
|
+
|
|
1324
|
+
|
|
1325
|
+
def _generate_canary_strategy(app_name: str, canary_pct: int, steps: list) -> dict:
|
|
1326
|
+
"""Generate canary deployment strategy structure."""
|
|
1327
|
+
canary_playbook = f"""---
|
|
1328
|
+
# Canary Deployment for {app_name}
|
|
1329
|
+
- name: Deploy {app_name} (Canary)
|
|
1330
|
+
hosts: canary_servers
|
|
1331
|
+
become: yes
|
|
1332
|
+
vars:
|
|
1333
|
+
app_name: {app_name}
|
|
1334
|
+
canary_percentage: {canary_pct}
|
|
1335
|
+
deployment_version: "{{{{ lookup('env', 'VERSION') }}}}"
|
|
1336
|
+
|
|
1337
|
+
tasks:
|
|
1338
|
+
- name: Deploy to canary servers
|
|
1339
|
+
include_tasks: deploy_app.yml
|
|
1340
|
+
|
|
1341
|
+
- name: Configure canary traffic routing
|
|
1342
|
+
include_tasks: configure_canary_routing.yml
|
|
1343
|
+
|
|
1344
|
+
- name: Monitor canary metrics
|
|
1345
|
+
include_tasks: monitor_metrics.yml
|
|
1346
|
+
"""
|
|
1347
|
+
monitoring = """---
|
|
1348
|
+
# Monitoring Playbook
|
|
1349
|
+
- name: Collect canary metrics
|
|
1350
|
+
uri:
|
|
1351
|
+
url: "http://{{ ansible_host }}/metrics"
|
|
1352
|
+
method: GET
|
|
1353
|
+
return_content: yes
|
|
1354
|
+
register: canary_metrics
|
|
1355
|
+
|
|
1356
|
+
- name: Compare with stable metrics
|
|
1357
|
+
uri:
|
|
1358
|
+
url: "http://{{ stable_server }}/metrics"
|
|
1359
|
+
method: GET
|
|
1360
|
+
return_content: yes
|
|
1361
|
+
register: stable_metrics
|
|
1362
|
+
|
|
1363
|
+
- name: Evaluate canary performance
|
|
1364
|
+
set_fact:
|
|
1365
|
+
canary_passed: "{{ canary_metrics.error_rate < stable_metrics.error_rate * 1.05 }}"
|
|
1366
|
+
"""
|
|
1367
|
+
progressive_rollout = _format_canary_workflow(steps)
|
|
1368
|
+
|
|
1369
|
+
rollback = f"""---
|
|
1370
|
+
# Canary Rollback
|
|
1371
|
+
- name: Rollback canary deployment for {app_name}
|
|
1372
|
+
hosts: canary_servers
|
|
1373
|
+
become: yes
|
|
1374
|
+
tasks:
|
|
1375
|
+
- name: Remove canary traffic routing
|
|
1376
|
+
include_tasks: remove_canary_routing.yml
|
|
1377
|
+
|
|
1378
|
+
- name: Restore previous version
|
|
1379
|
+
include_tasks: restore_previous_version.yml
|
|
1380
|
+
|
|
1381
|
+
- name: Verify stable operation
|
|
1382
|
+
include_tasks: health_check.yml
|
|
1383
|
+
"""
|
|
1384
|
+
return {
|
|
1385
|
+
"canary_playbook": canary_playbook,
|
|
1386
|
+
"monitoring": monitoring,
|
|
1387
|
+
"progressive_rollout": progressive_rollout,
|
|
1388
|
+
"rollback": rollback,
|
|
1389
|
+
}
|
|
1390
|
+
|
|
1391
|
+
|
|
1392
|
+
def _extract_deployment_steps(content: str) -> list:
|
|
1393
|
+
"""Extract deployment steps from Chef recipe content."""
|
|
1394
|
+
steps = []
|
|
1395
|
+
|
|
1396
|
+
# Look for execute resources with deployment commands
|
|
1397
|
+
execute_pattern = r'execute\s+[\'"]([^\'"]+)[\'"]'
|
|
1398
|
+
for match in re.finditer(execute_pattern, content):
|
|
1399
|
+
command = match.group(1)
|
|
1400
|
+
if any(
|
|
1401
|
+
keyword in command.lower()
|
|
1402
|
+
for keyword in ["deploy", "restart", "reload", "migrate"]
|
|
1403
|
+
):
|
|
1404
|
+
steps.append({"type": "execute", "command": command})
|
|
1405
|
+
|
|
1406
|
+
return steps
|
|
1407
|
+
|
|
1408
|
+
|
|
1409
|
+
def _extract_health_checks(content: str) -> list:
|
|
1410
|
+
"""Extract health check patterns from Chef recipe content."""
|
|
1411
|
+
health_checks = []
|
|
1412
|
+
|
|
1413
|
+
# Look for http_request or similar resources
|
|
1414
|
+
http_pattern = r'http_request\s+[\'"]([^\'"]+)[\'"]'
|
|
1415
|
+
for match in re.finditer(http_pattern, content):
|
|
1416
|
+
health_checks.append({"type": "http_check", "url": match.group(1)})
|
|
1417
|
+
|
|
1418
|
+
return health_checks
|
|
1419
|
+
|
|
1420
|
+
|
|
1421
|
+
def _extract_service_management(content: str) -> list:
|
|
1422
|
+
"""Extract service management patterns from Chef recipe content."""
|
|
1423
|
+
services = []
|
|
1424
|
+
|
|
1425
|
+
# Look for service resources
|
|
1426
|
+
service_pattern = r'service\s+[\'"]([^\'"]+)[\'"]'
|
|
1427
|
+
for match in re.finditer(service_pattern, content):
|
|
1428
|
+
services.append({"type": "service", "name": match.group(1)})
|
|
1429
|
+
|
|
1430
|
+
return services
|
|
1431
|
+
|
|
1432
|
+
|
|
1433
|
+
def _detect_deployment_patterns_in_recipe(content: str, recipe_name: str) -> list:
|
|
1434
|
+
"""Detect deployment patterns in a Chef recipe."""
|
|
1435
|
+
patterns: list[dict[str, str]] = []
|
|
1436
|
+
|
|
1437
|
+
pattern_indicators = {
|
|
1438
|
+
"blue_green": [
|
|
1439
|
+
r"blue.*green|green.*blue",
|
|
1440
|
+
r"switch.*traffic|traffic.*switch",
|
|
1441
|
+
r"active.*inactive|inactive.*active",
|
|
1442
|
+
],
|
|
1443
|
+
"rolling": [
|
|
1444
|
+
r"rolling.*update|serial.*update",
|
|
1445
|
+
r"batch.*deployment|phased.*rollout",
|
|
1446
|
+
r"gradual.*deployment",
|
|
1447
|
+
],
|
|
1448
|
+
"canary": [
|
|
1449
|
+
r"canary.*deployment|canary.*release",
|
|
1450
|
+
r"percentage.*traffic|traffic.*percentage",
|
|
1451
|
+
r"A/B.*test|split.*traffic",
|
|
1452
|
+
],
|
|
1453
|
+
"immutable": [
|
|
1454
|
+
r"immutable.*deployment|replace.*instance",
|
|
1455
|
+
r"new.*server|fresh.*deployment",
|
|
1456
|
+
],
|
|
1457
|
+
}
|
|
1458
|
+
|
|
1459
|
+
for pattern_type, indicators in pattern_indicators.items():
|
|
1460
|
+
for indicator in indicators:
|
|
1461
|
+
if re.search(indicator, content, re.IGNORECASE):
|
|
1462
|
+
patterns.append(
|
|
1463
|
+
{
|
|
1464
|
+
"type": pattern_type,
|
|
1465
|
+
"recipe": recipe_name,
|
|
1466
|
+
"confidence": "high"
|
|
1467
|
+
if len(
|
|
1468
|
+
[
|
|
1469
|
+
i
|
|
1470
|
+
for i in indicators
|
|
1471
|
+
if re.search(i, content, re.IGNORECASE)
|
|
1472
|
+
]
|
|
1473
|
+
)
|
|
1474
|
+
> 1
|
|
1475
|
+
else "medium",
|
|
1476
|
+
}
|
|
1477
|
+
)
|
|
1478
|
+
break
|
|
1479
|
+
|
|
1480
|
+
return patterns
|
|
1481
|
+
|
|
1482
|
+
|
|
1483
|
+
def _detect_patterns_from_content(content: str) -> list[str]:
|
|
1484
|
+
"""Detect deployment patterns from recipe content."""
|
|
1485
|
+
patterns = []
|
|
1486
|
+
if "package" in content:
|
|
1487
|
+
patterns.append("package_management")
|
|
1488
|
+
if "template" in content:
|
|
1489
|
+
patterns.append("configuration_management")
|
|
1490
|
+
if "service" in content:
|
|
1491
|
+
patterns.append("service_management")
|
|
1492
|
+
if "git" in content:
|
|
1493
|
+
patterns.append("source_deployment")
|
|
1494
|
+
return patterns
|
|
1495
|
+
|
|
1496
|
+
|
|
1497
|
+
def _assess_complexity_from_resource_count(resource_count: int) -> tuple[str, str, str]:
|
|
1498
|
+
"""Assess complexity, effort, and risk based on resource count."""
|
|
1499
|
+
if resource_count > 50:
|
|
1500
|
+
return "high", "4-6 weeks", "high"
|
|
1501
|
+
elif resource_count < 20:
|
|
1502
|
+
return "low", "1-2 weeks", "low"
|
|
1503
|
+
return "medium", "2-3 weeks", "medium"
|
|
1504
|
+
|
|
1505
|
+
|
|
1506
|
+
def _analyze_application_cookbook(cookbook_path: Path, app_type: str) -> dict:
|
|
1507
|
+
"""Analyze Chef cookbook for application deployment patterns."""
|
|
1508
|
+
analysis: dict[str, Any] = {
|
|
1509
|
+
"application_type": app_type,
|
|
1510
|
+
"deployment_patterns": [],
|
|
1511
|
+
"resources": [],
|
|
1512
|
+
"complexity": "medium",
|
|
1513
|
+
"effort_estimate": "2-3 weeks",
|
|
1514
|
+
"risk_level": "medium",
|
|
1515
|
+
}
|
|
1516
|
+
|
|
1517
|
+
# Analyze recipes
|
|
1518
|
+
recipes_dir = _safe_join(cookbook_path, "recipes")
|
|
1519
|
+
if recipes_dir.exists():
|
|
1520
|
+
for recipe_file in recipes_dir.glob("*.rb"):
|
|
1521
|
+
try:
|
|
1522
|
+
with recipe_file.open("r") as f:
|
|
1523
|
+
content = f.read()
|
|
1524
|
+
|
|
1525
|
+
# Count resources
|
|
1526
|
+
resource_types = re.findall(r"^(\w+)\s+['\"]", content, re.MULTILINE)
|
|
1527
|
+
analysis["resources"].extend(resource_types)
|
|
1528
|
+
|
|
1529
|
+
# Detect patterns
|
|
1530
|
+
patterns = _detect_patterns_from_content(content)
|
|
1531
|
+
analysis["deployment_patterns"].extend(patterns)
|
|
1532
|
+
|
|
1533
|
+
except Exception:
|
|
1534
|
+
# Silently skip malformed files
|
|
1535
|
+
pass
|
|
1536
|
+
|
|
1537
|
+
# Assess complexity
|
|
1538
|
+
resource_count = len(analysis["resources"])
|
|
1539
|
+
complexity, effort, risk = _assess_complexity_from_resource_count(resource_count)
|
|
1540
|
+
analysis["complexity"] = complexity
|
|
1541
|
+
analysis["effort_estimate"] = effort
|
|
1542
|
+
analysis["risk_level"] = risk
|
|
1543
|
+
|
|
1544
|
+
return analysis
|
|
1545
|
+
|
|
1546
|
+
|
|
1547
|
+
# Formatting Functions
|
|
1548
|
+
|
|
1549
|
+
|
|
1550
|
+
def _format_cookbook_analysis(analysis: dict) -> str:
|
|
1551
|
+
"""Format cookbook analysis for display."""
|
|
1552
|
+
formatted = [
|
|
1553
|
+
f"• Recipes: {len(analysis['recipes'])}",
|
|
1554
|
+
f"• Attributes: {len(analysis['attributes'])}",
|
|
1555
|
+
f"• Dependencies: {len(analysis['dependencies'])}",
|
|
1556
|
+
f"• Templates: {len(analysis['templates'])}",
|
|
1557
|
+
f"• Files: {len(analysis['files'])}",
|
|
1558
|
+
f"• Survey fields: {len(analysis['survey_fields'])}",
|
|
1559
|
+
]
|
|
1560
|
+
|
|
1561
|
+
return "\n".join(formatted)
|
|
1562
|
+
|
|
1563
|
+
|
|
1564
|
+
def _format_workflow_nodes(nodes: list) -> str:
|
|
1565
|
+
"""Format workflow nodes for display."""
|
|
1566
|
+
if not nodes:
|
|
1567
|
+
return "No workflow nodes defined."
|
|
1568
|
+
|
|
1569
|
+
formatted = []
|
|
1570
|
+
for node in nodes:
|
|
1571
|
+
formatted.append(f"• Node {node['id']}: {node['unified_job_template']}")
|
|
1572
|
+
if node.get("success_nodes"):
|
|
1573
|
+
formatted.append(f" → Success: Node {node['success_nodes'][0]}")
|
|
1574
|
+
|
|
1575
|
+
return "\n".join(formatted)
|
|
1576
|
+
|
|
1577
|
+
|
|
1578
|
+
def _format_playbook_structure(analysis: dict) -> str:
|
|
1579
|
+
"""Format recommended playbook structure."""
|
|
1580
|
+
structure_lines = []
|
|
1581
|
+
|
|
1582
|
+
for cookbook_name in analysis.get("cookbooks", {}):
|
|
1583
|
+
structure_lines.append(f"│ ├── {cookbook_name}.yml")
|
|
1584
|
+
|
|
1585
|
+
return "\n".join(structure_lines)
|
|
1586
|
+
|
|
1587
|
+
|
|
1588
|
+
def _format_cookbooks_analysis(analysis: dict) -> str:
|
|
1589
|
+
"""Format cookbooks directory analysis."""
|
|
1590
|
+
formatted = [
|
|
1591
|
+
f"• Total cookbooks: {analysis['total_cookbooks']}",
|
|
1592
|
+
f"• Total recipes: {analysis['total_recipes']}",
|
|
1593
|
+
f"• Total templates: {analysis['total_templates']}",
|
|
1594
|
+
f"• Total files: {analysis['total_files']}",
|
|
1595
|
+
]
|
|
1596
|
+
|
|
1597
|
+
if analysis["cookbooks"]:
|
|
1598
|
+
formatted.append("\n### Cookbook Details:")
|
|
1599
|
+
for name, info in list(analysis["cookbooks"].items())[:5]:
|
|
1600
|
+
formatted.append(
|
|
1601
|
+
f"• {name}: {len(info['recipes'])} recipes, "
|
|
1602
|
+
f"{len(info['attributes'])} attributes"
|
|
1603
|
+
)
|
|
1604
|
+
|
|
1605
|
+
if len(analysis["cookbooks"]) > 5:
|
|
1606
|
+
formatted.append(f"... and {len(analysis['cookbooks']) - 5} more cookbooks")
|
|
1607
|
+
|
|
1608
|
+
return "\n".join(formatted)
|
|
1609
|
+
|
|
1610
|
+
|
|
1611
|
+
def _format_deployment_analysis(analysis: dict) -> str:
|
|
1612
|
+
"""Format deployment pattern analysis."""
|
|
1613
|
+
formatted = [
|
|
1614
|
+
f"• Deployment steps: {len(analysis.get('deployment_steps', []))}",
|
|
1615
|
+
f"• Health checks: {len(analysis.get('health_checks', []))}",
|
|
1616
|
+
f"• Services managed: {len(analysis.get('service_management', []))}",
|
|
1617
|
+
f"• Complexity: {analysis.get('complexity', 'unknown')}",
|
|
1618
|
+
]
|
|
1619
|
+
|
|
1620
|
+
return "\n".join(formatted)
|
|
1621
|
+
|
|
1622
|
+
|
|
1623
|
+
def _format_deployment_patterns(analysis: dict) -> str:
|
|
1624
|
+
"""Format detected deployment patterns."""
|
|
1625
|
+
patterns = analysis.get("deployment_patterns", [])
|
|
1626
|
+
if not patterns:
|
|
1627
|
+
return "No specific deployment patterns detected."
|
|
1628
|
+
|
|
1629
|
+
formatted = []
|
|
1630
|
+
for pattern in patterns:
|
|
1631
|
+
if isinstance(pattern, dict):
|
|
1632
|
+
# Format: {"type": "...", "recipe": "...", "confidence": "..."}
|
|
1633
|
+
pattern_type = pattern.get("type", "unknown")
|
|
1634
|
+
recipe = pattern.get("recipe", "")
|
|
1635
|
+
confidence = pattern.get("confidence", "")
|
|
1636
|
+
line = f"• {pattern_type.replace('_', ' ').title()}"
|
|
1637
|
+
if recipe:
|
|
1638
|
+
line += f" (in {recipe})"
|
|
1639
|
+
if confidence:
|
|
1640
|
+
line += f" - {confidence} confidence"
|
|
1641
|
+
formatted.append(line)
|
|
1642
|
+
else:
|
|
1643
|
+
# Format: just a string like "package_management"
|
|
1644
|
+
formatted.append(f"• {pattern.replace('_', ' ').title()}")
|
|
1645
|
+
|
|
1646
|
+
return "\n".join(formatted)
|
|
1647
|
+
|
|
1648
|
+
|
|
1649
|
+
def _format_chef_resources_analysis(analysis: dict) -> str:
|
|
1650
|
+
"""Format Chef resources analysis."""
|
|
1651
|
+
# Check for new format first (from _analyze_application_cookbook)
|
|
1652
|
+
resources = analysis.get("resources", [])
|
|
1653
|
+
if resources:
|
|
1654
|
+
# Count resource types
|
|
1655
|
+
resource_counts: dict = {}
|
|
1656
|
+
for resource_type in resources:
|
|
1657
|
+
resource_counts[resource_type] = resource_counts.get(resource_type, 0) + 1
|
|
1658
|
+
|
|
1659
|
+
# Format top resource types
|
|
1660
|
+
top_resources = sorted(
|
|
1661
|
+
resource_counts.items(), key=lambda x: x[1], reverse=True
|
|
1662
|
+
)[:5]
|
|
1663
|
+
|
|
1664
|
+
formatted = []
|
|
1665
|
+
for resource_type, count in top_resources:
|
|
1666
|
+
formatted.append(f"• {resource_type}: {count}")
|
|
1667
|
+
|
|
1668
|
+
return "\n".join(formatted)
|
|
1669
|
+
|
|
1670
|
+
# Check for legacy format (from tests)
|
|
1671
|
+
service_resources = analysis.get("service_resources", [])
|
|
1672
|
+
configuration_files = analysis.get("configuration_files", [])
|
|
1673
|
+
health_checks = analysis.get("health_checks", [])
|
|
1674
|
+
scaling_mechanisms = analysis.get("scaling_mechanisms", [])
|
|
1675
|
+
|
|
1676
|
+
if any([service_resources, configuration_files, health_checks, scaling_mechanisms]):
|
|
1677
|
+
formatted = [
|
|
1678
|
+
f"• Service Resources: {len(service_resources)}",
|
|
1679
|
+
f"• Configuration Files: {len(configuration_files)}",
|
|
1680
|
+
f"• Health Checks: {len(health_checks)}",
|
|
1681
|
+
f"• Scaling Mechanisms: {len(scaling_mechanisms)}",
|
|
1682
|
+
]
|
|
1683
|
+
return "\n".join(formatted)
|
|
1684
|
+
|
|
1685
|
+
return "No Chef resources found."
|
|
1686
|
+
|
|
1687
|
+
|
|
1688
|
+
def _format_canary_workflow(steps: list) -> str:
|
|
1689
|
+
"""Format canary progressive rollout workflow."""
|
|
1690
|
+
workflow = """---
|
|
1691
|
+
# Progressive Rollout Workflow
|
|
1692
|
+
- name: Progressive canary rollout
|
|
1693
|
+
hosts: localhost
|
|
1694
|
+
gather_facts: no
|
|
1695
|
+
vars:
|
|
1696
|
+
rollout_steps: """
|
|
1697
|
+
workflow += str(steps)
|
|
1698
|
+
workflow += """
|
|
1699
|
+
tasks:
|
|
1700
|
+
- name: Execute progressive rollout
|
|
1701
|
+
include_tasks: rollout_step.yml
|
|
1702
|
+
loop: "{{ rollout_steps }}"
|
|
1703
|
+
loop_control:
|
|
1704
|
+
loop_var: target_percentage
|
|
1705
|
+
"""
|
|
1706
|
+
return workflow
|
|
1707
|
+
|
|
1708
|
+
|
|
1709
|
+
def _generate_blue_green_conversion_playbook(_analysis: dict) -> str:
|
|
1710
|
+
"""Generate blue/green playbook from Chef pattern analysis."""
|
|
1711
|
+
return """## Blue/Green Deployment Strategy
|
|
1712
|
+
|
|
1713
|
+
Recommended based on detected Chef deployment patterns.
|
|
1714
|
+
|
|
1715
|
+
### Playbook Structure:
|
|
1716
|
+
- Deploy to blue environment
|
|
1717
|
+
- Health check validation
|
|
1718
|
+
- Traffic switch to blue
|
|
1719
|
+
- Monitor blue environment
|
|
1720
|
+
- Keep green as rollback target
|
|
1721
|
+
|
|
1722
|
+
### Implementation:
|
|
1723
|
+
Use `generate_blue_green_deployment_playbook` tool for complete playbooks.
|
|
1724
|
+
"""
|
|
1725
|
+
|
|
1726
|
+
|
|
1727
|
+
def _generate_canary_conversion_playbook(_analysis: dict) -> str:
|
|
1728
|
+
"""Generate canary playbook from Chef pattern analysis."""
|
|
1729
|
+
return """## Canary Deployment Strategy
|
|
1730
|
+
|
|
1731
|
+
Recommended for gradual rollout with monitoring.
|
|
1732
|
+
|
|
1733
|
+
### Playbook Structure:
|
|
1734
|
+
- Deploy to small canary subset
|
|
1735
|
+
- Monitor error rates and metrics
|
|
1736
|
+
- Progressive rollout (10% → 25% → 50% → 100%)
|
|
1737
|
+
- Automated rollback on failure
|
|
1738
|
+
|
|
1739
|
+
### Implementation:
|
|
1740
|
+
Use `generate_canary_deployment_strategy` tool for complete playbooks.
|
|
1741
|
+
"""
|
|
1742
|
+
|
|
1743
|
+
|
|
1744
|
+
def _generate_rolling_update_playbook(_analysis: dict) -> str:
|
|
1745
|
+
"""Generate rolling update playbook from Chef pattern analysis."""
|
|
1746
|
+
return """## Rolling Update Strategy
|
|
1747
|
+
|
|
1748
|
+
Recommended for standard application deployments.
|
|
1749
|
+
|
|
1750
|
+
### Playbook Structure:
|
|
1751
|
+
- Update servers in batches
|
|
1752
|
+
- Health check between batches
|
|
1753
|
+
- Continue if healthy, rollback if failures
|
|
1754
|
+
- Maintain service availability
|
|
1755
|
+
|
|
1756
|
+
### Implementation:
|
|
1757
|
+
```yaml
|
|
1758
|
+
- name: Rolling update
|
|
1759
|
+
hosts: app_servers
|
|
1760
|
+
serial: "25%"
|
|
1761
|
+
max_fail_percentage: 10
|
|
1762
|
+
tasks:
|
|
1763
|
+
- name: Update application
|
|
1764
|
+
# ... deployment tasks
|
|
1765
|
+
- name: Health check
|
|
1766
|
+
# ... validation tasks
|
|
1767
|
+
```
|
|
1768
|
+
"""
|
|
1769
|
+
|
|
1770
|
+
|
|
1771
|
+
def _generate_deployment_migration_recommendations(
|
|
1772
|
+
patterns: dict, app_type: str = ""
|
|
1773
|
+
) -> str:
|
|
1774
|
+
"""
|
|
1775
|
+
Generate migration recommendations based on analysis.
|
|
1776
|
+
|
|
1777
|
+
Args:
|
|
1778
|
+
patterns: Dictionary containing deployment patterns analysis.
|
|
1779
|
+
app_type: Application type (web_application, microservice, database).
|
|
1780
|
+
|
|
1781
|
+
Returns:
|
|
1782
|
+
Formatted migration recommendations.
|
|
1783
|
+
|
|
1784
|
+
"""
|
|
1785
|
+
recommendations: list[str] = []
|
|
1786
|
+
|
|
1787
|
+
deployment_count = len(patterns.get("deployment_patterns", []))
|
|
1788
|
+
|
|
1789
|
+
if deployment_count == 0:
|
|
1790
|
+
recommendations.append(
|
|
1791
|
+
"• No advanced deployment patterns detected - start with rolling updates"
|
|
1792
|
+
)
|
|
1793
|
+
recommendations.append("• Implement health checks for reliable deployments")
|
|
1794
|
+
recommendations.append("• Add rollback mechanisms for quick recovery")
|
|
1795
|
+
else:
|
|
1796
|
+
for pattern in patterns.get("deployment_patterns", []):
|
|
1797
|
+
if pattern["type"] == "blue_green":
|
|
1798
|
+
recommendations.append(
|
|
1799
|
+
"• Convert blue/green logic to Ansible blue/green strategy"
|
|
1800
|
+
)
|
|
1801
|
+
elif pattern["type"] == "canary":
|
|
1802
|
+
recommendations.append(
|
|
1803
|
+
"• Implement canary deployment with automated metrics validation"
|
|
1804
|
+
)
|
|
1805
|
+
elif pattern["type"] == "rolling":
|
|
1806
|
+
recommendations.append(
|
|
1807
|
+
"• Use Ansible serial deployment with health checks"
|
|
1808
|
+
)
|
|
1809
|
+
|
|
1810
|
+
# Application-specific recommendations
|
|
1811
|
+
if app_type == "web_application":
|
|
1812
|
+
recommendations.append(
|
|
1813
|
+
"• Implement load balancer integration for traffic management"
|
|
1814
|
+
)
|
|
1815
|
+
recommendations.append("• Add SSL/TLS certificate handling in deployment")
|
|
1816
|
+
elif app_type == "microservice":
|
|
1817
|
+
recommendations.append(
|
|
1818
|
+
"• Consider service mesh integration for traffic splitting"
|
|
1819
|
+
)
|
|
1820
|
+
recommendations.append("• Implement service discovery updates")
|
|
1821
|
+
elif app_type == "database":
|
|
1822
|
+
recommendations.append("• Add database migration handling")
|
|
1823
|
+
recommendations.append("• Implement backup and restore procedures")
|
|
1824
|
+
|
|
1825
|
+
# If no specific recommendations, add general ones
|
|
1826
|
+
if not recommendations:
|
|
1827
|
+
recommendations.extend(
|
|
1828
|
+
[
|
|
1829
|
+
"1. Start with non-production environment for validation",
|
|
1830
|
+
"2. Implement health checks before migration",
|
|
1831
|
+
"3. Set up monitoring and alerting",
|
|
1832
|
+
"4. Document rollback procedures",
|
|
1833
|
+
"5. Train operations team on new deployment process",
|
|
1834
|
+
"6. Plan for gradual migration (pilot → staging → production)",
|
|
1835
|
+
]
|
|
1836
|
+
)
|
|
1837
|
+
|
|
1838
|
+
return "\n".join(recommendations)
|
|
1839
|
+
|
|
1840
|
+
|
|
1841
|
+
def _extract_detected_patterns(patterns: dict) -> list[str]:
|
|
1842
|
+
"""Extract detected patterns from patterns dictionary."""
|
|
1843
|
+
pattern_list: list = patterns.get("deployment_patterns", [])
|
|
1844
|
+
if pattern_list and isinstance(pattern_list[0], dict):
|
|
1845
|
+
return [p["type"] for p in pattern_list]
|
|
1846
|
+
return list(pattern_list)
|
|
1847
|
+
|
|
1848
|
+
|
|
1849
|
+
def _build_deployment_strategy_recommendations(
|
|
1850
|
+
detected_patterns: list[str],
|
|
1851
|
+
) -> list[str]:
|
|
1852
|
+
"""Build deployment strategy recommendations based on detected patterns."""
|
|
1853
|
+
strategies: list[str] = []
|
|
1854
|
+
|
|
1855
|
+
if "blue_green" in detected_patterns:
|
|
1856
|
+
strategies.append(
|
|
1857
|
+
"• Blue/Green: Zero-downtime deployment with instant rollback"
|
|
1858
|
+
)
|
|
1859
|
+
if "canary" in detected_patterns:
|
|
1860
|
+
strategies.append("• Canary: Risk-reduced deployment with gradual rollout")
|
|
1861
|
+
if "rolling" in detected_patterns:
|
|
1862
|
+
strategies.append(
|
|
1863
|
+
"• Rolling Update: Balanced approach with configurable parallelism"
|
|
1864
|
+
)
|
|
1865
|
+
|
|
1866
|
+
return strategies
|
|
1867
|
+
|
|
1868
|
+
|
|
1869
|
+
def _build_application_strategy_recommendations(
|
|
1870
|
+
detected_patterns: list[str],
|
|
1871
|
+
) -> list[str]:
|
|
1872
|
+
"""Build application-pattern specific strategy recommendations."""
|
|
1873
|
+
strategies: list[str] = []
|
|
1874
|
+
|
|
1875
|
+
if "package_management" in detected_patterns:
|
|
1876
|
+
strategies.append("• Package: Use `package` module for package installation")
|
|
1877
|
+
if "configuration_management" in detected_patterns:
|
|
1878
|
+
strategies.append("• Config: Use `template` module for configuration files")
|
|
1879
|
+
if "service_management" in detected_patterns:
|
|
1880
|
+
strategies.append("• Service: Use `service` or `systemd` module for services")
|
|
1881
|
+
if "source_deployment" in detected_patterns:
|
|
1882
|
+
strategies.append("• Source: Use `git` module for source code deployment")
|
|
1883
|
+
|
|
1884
|
+
return strategies
|
|
1885
|
+
|
|
1886
|
+
|
|
1887
|
+
def _get_default_strategy_recommendations() -> list[str]:
|
|
1888
|
+
"""Get default strategy recommendations when no patterns detected."""
|
|
1889
|
+
return [
|
|
1890
|
+
"• Rolling Update: Recommended starting strategy",
|
|
1891
|
+
"• Blue/Green: For critical applications requiring zero downtime",
|
|
1892
|
+
"• Canary: For high-risk deployments requiring validation",
|
|
1893
|
+
]
|
|
1894
|
+
|
|
1895
|
+
|
|
1896
|
+
def _recommend_ansible_strategies(patterns: dict) -> str:
|
|
1897
|
+
"""Recommend appropriate Ansible strategies."""
|
|
1898
|
+
detected_patterns = _extract_detected_patterns(patterns)
|
|
1899
|
+
|
|
1900
|
+
strategies = _build_deployment_strategy_recommendations(detected_patterns)
|
|
1901
|
+
strategies.extend(_build_application_strategy_recommendations(detected_patterns))
|
|
1902
|
+
|
|
1903
|
+
if not strategies:
|
|
1904
|
+
strategies = _get_default_strategy_recommendations()
|
|
1905
|
+
|
|
1906
|
+
return "\n".join(strategies)
|