mcp-souschef 2.1.2__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.1.2.dist-info → mcp_souschef-2.2.0.dist-info}/METADATA +36 -8
- mcp_souschef-2.2.0.dist-info/RECORD +31 -0
- souschef/assessment.py +448 -180
- souschef/cli.py +90 -0
- souschef/converters/playbook.py +43 -5
- souschef/converters/resource.py +146 -49
- souschef/core/__init__.py +22 -0
- souschef/core/errors.py +275 -0
- souschef/deployment.py +412 -100
- souschef/parsers/habitat.py +35 -6
- souschef/parsers/inspec.py +72 -34
- souschef/parsers/metadata.py +59 -23
- souschef/profiling.py +568 -0
- souschef/server.py +589 -149
- mcp_souschef-2.1.2.dist-info/RECORD +0 -29
- {mcp_souschef-2.1.2.dist-info → mcp_souschef-2.2.0.dist-info}/WHEEL +0 -0
- {mcp_souschef-2.1.2.dist-info → mcp_souschef-2.2.0.dist-info}/entry_points.txt +0 -0
- {mcp_souschef-2.1.2.dist-info → mcp_souschef-2.2.0.dist-info}/licenses/LICENSE +0 -0
souschef/deployment.py
CHANGED
|
@@ -16,7 +16,12 @@ from souschef.core.constants import (
|
|
|
16
16
|
CHEF_ROLE_PREFIX,
|
|
17
17
|
METADATA_FILENAME,
|
|
18
18
|
)
|
|
19
|
-
from souschef.core.
|
|
19
|
+
from souschef.core.errors import (
|
|
20
|
+
format_error_with_context,
|
|
21
|
+
validate_cookbook_structure,
|
|
22
|
+
validate_directory_exists,
|
|
23
|
+
)
|
|
24
|
+
from souschef.core.path_utils import _safe_join
|
|
20
25
|
|
|
21
26
|
# Maximum length for attribute values in Chef attribute parsing
|
|
22
27
|
# Prevents ReDoS attacks from extremely long attribute declarations
|
|
@@ -38,10 +43,14 @@ def generate_awx_job_template_from_cookbook(
|
|
|
38
43
|
Survey specs auto-generated from cookbook attributes when include_survey=True.
|
|
39
44
|
"""
|
|
40
45
|
try:
|
|
41
|
-
|
|
42
|
-
if not
|
|
43
|
-
return
|
|
46
|
+
# Validate inputs
|
|
47
|
+
if not cookbook_name or not cookbook_name.strip():
|
|
48
|
+
return (
|
|
49
|
+
"Error: Cookbook name cannot be empty\n\n"
|
|
50
|
+
"Suggestion: Provide a valid cookbook name"
|
|
51
|
+
)
|
|
44
52
|
|
|
53
|
+
cookbook = validate_cookbook_structure(cookbook_path)
|
|
45
54
|
cookbook_analysis = _analyze_cookbook_for_awx(cookbook, cookbook_name)
|
|
46
55
|
job_template = _generate_awx_job_template(
|
|
47
56
|
cookbook_analysis, cookbook_name, target_environment, include_survey
|
|
@@ -71,7 +80,9 @@ awx-cli job_templates create \\
|
|
|
71
80
|
{_format_cookbook_analysis(cookbook_analysis)}
|
|
72
81
|
"""
|
|
73
82
|
except Exception as e:
|
|
74
|
-
return
|
|
83
|
+
return format_error_with_context(
|
|
84
|
+
e, f"generating AWX job template for {cookbook_name}", cookbook_path
|
|
85
|
+
)
|
|
75
86
|
|
|
76
87
|
|
|
77
88
|
def generate_awx_workflow_from_chef_runlist(
|
|
@@ -84,9 +95,30 @@ def generate_awx_workflow_from_chef_runlist(
|
|
|
84
95
|
Workflows preserve runlist execution order with success/failure paths.
|
|
85
96
|
"""
|
|
86
97
|
try:
|
|
98
|
+
# Validate inputs
|
|
99
|
+
if not runlist_content or not runlist_content.strip():
|
|
100
|
+
return (
|
|
101
|
+
"Error: Runlist content cannot be empty\n\n"
|
|
102
|
+
"Suggestion: Provide a valid Chef runlist "
|
|
103
|
+
"(e.g., 'recipe[cookbook::recipe]' or JSON array)"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if not workflow_name or not workflow_name.strip():
|
|
107
|
+
return (
|
|
108
|
+
"Error: Workflow name cannot be empty\n\n"
|
|
109
|
+
"Suggestion: Provide a descriptive name for the AWX workflow"
|
|
110
|
+
)
|
|
111
|
+
|
|
87
112
|
# Parse runlist
|
|
88
113
|
runlist = _parse_chef_runlist(runlist_content)
|
|
89
114
|
|
|
115
|
+
if not runlist:
|
|
116
|
+
return (
|
|
117
|
+
"Error: Runlist parsing resulted in no items\n\n"
|
|
118
|
+
"Suggestion: Check runlist format. Expected 'recipe[name]' "
|
|
119
|
+
"or 'role[name]' entries"
|
|
120
|
+
)
|
|
121
|
+
|
|
90
122
|
# Generate workflow template
|
|
91
123
|
workflow_template = _generate_awx_workflow_template(
|
|
92
124
|
runlist, workflow_name, environment
|
|
@@ -115,7 +147,9 @@ def generate_awx_workflow_from_chef_runlist(
|
|
|
115
147
|
4. Test execution with survey parameters
|
|
116
148
|
"""
|
|
117
149
|
except Exception as e:
|
|
118
|
-
return
|
|
150
|
+
return format_error_with_context(
|
|
151
|
+
e, f"generating AWX workflow from runlist for {workflow_name}"
|
|
152
|
+
)
|
|
119
153
|
|
|
120
154
|
|
|
121
155
|
def generate_awx_project_from_cookbooks(
|
|
@@ -138,9 +172,16 @@ def generate_awx_project_from_cookbooks(
|
|
|
138
172
|
|
|
139
173
|
"""
|
|
140
174
|
try:
|
|
141
|
-
|
|
142
|
-
if not
|
|
143
|
-
return
|
|
175
|
+
# Validate inputs
|
|
176
|
+
if not project_name or not project_name.strip():
|
|
177
|
+
return (
|
|
178
|
+
"Error: Project name cannot be empty\n\n"
|
|
179
|
+
"Suggestion: Provide a descriptive name for the AWX project"
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
cookbooks_path = validate_directory_exists(
|
|
183
|
+
cookbooks_directory, "cookbooks directory"
|
|
184
|
+
)
|
|
144
185
|
|
|
145
186
|
# Analyze all cookbooks
|
|
146
187
|
cookbooks_analysis = _analyze_cookbooks_directory(cookbooks_path)
|
|
@@ -181,7 +222,11 @@ def generate_awx_project_from_cookbooks(
|
|
|
181
222
|
5. Set up inventories and credentials
|
|
182
223
|
"""
|
|
183
224
|
except Exception as e:
|
|
184
|
-
return
|
|
225
|
+
return format_error_with_context(
|
|
226
|
+
e,
|
|
227
|
+
f"generating AWX project configuration for {project_name}",
|
|
228
|
+
cookbooks_directory,
|
|
229
|
+
)
|
|
185
230
|
|
|
186
231
|
|
|
187
232
|
def generate_awx_inventory_source_from_chef(
|
|
@@ -200,6 +245,21 @@ def generate_awx_inventory_source_from_chef(
|
|
|
200
245
|
|
|
201
246
|
"""
|
|
202
247
|
try:
|
|
248
|
+
# Validate inputs
|
|
249
|
+
if not chef_server_url or not chef_server_url.strip():
|
|
250
|
+
return (
|
|
251
|
+
"Error: Chef server URL cannot be empty\n\n"
|
|
252
|
+
"Suggestion: Provide a valid Chef server URL "
|
|
253
|
+
"(e.g., https://chef.example.com)"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
if not chef_server_url.startswith("https://"):
|
|
257
|
+
return (
|
|
258
|
+
f"Error: Invalid Chef server URL: {chef_server_url}\n\n"
|
|
259
|
+
"Suggestion: URL must use HTTPS protocol for security "
|
|
260
|
+
"(e.g., https://chef.example.com)"
|
|
261
|
+
)
|
|
262
|
+
|
|
203
263
|
# Generate inventory source configuration
|
|
204
264
|
inventory_source = _generate_chef_inventory_source(
|
|
205
265
|
chef_server_url, sync_schedule
|
|
@@ -240,7 +300,9 @@ def generate_awx_inventory_source_from_chef(
|
|
|
240
300
|
- CHEF_CLIENT_KEY: ${{{{chef_client_key}}}}
|
|
241
301
|
"""
|
|
242
302
|
except Exception as e:
|
|
243
|
-
return
|
|
303
|
+
return format_error_with_context(
|
|
304
|
+
e, "generating AWX inventory source from Chef server", chef_server_url
|
|
305
|
+
)
|
|
244
306
|
|
|
245
307
|
|
|
246
308
|
# Deployment Strategy Functions
|
|
@@ -256,9 +318,15 @@ def convert_chef_deployment_to_ansible_strategy(
|
|
|
256
318
|
Override auto-detection by specifying explicit pattern.
|
|
257
319
|
"""
|
|
258
320
|
try:
|
|
259
|
-
cookbook =
|
|
260
|
-
|
|
261
|
-
|
|
321
|
+
cookbook = validate_cookbook_structure(cookbook_path)
|
|
322
|
+
|
|
323
|
+
# Validate deployment pattern
|
|
324
|
+
valid_patterns = ["auto", "blue_green", "canary", "rolling_update"]
|
|
325
|
+
if deployment_pattern not in valid_patterns:
|
|
326
|
+
return (
|
|
327
|
+
f"Error: Invalid deployment pattern '{deployment_pattern}'\n\n"
|
|
328
|
+
f"Suggestion: Use one of {', '.join(valid_patterns)}"
|
|
329
|
+
)
|
|
262
330
|
|
|
263
331
|
# Analyze Chef deployment pattern
|
|
264
332
|
pattern_analysis = _analyze_chef_deployment_pattern(cookbook)
|
|
@@ -289,7 +357,9 @@ def convert_chef_deployment_to_ansible_strategy(
|
|
|
289
357
|
{_generate_deployment_migration_recommendations(pattern_analysis)}
|
|
290
358
|
"""
|
|
291
359
|
except Exception as e:
|
|
292
|
-
return
|
|
360
|
+
return format_error_with_context(
|
|
361
|
+
e, "converting Chef deployment pattern to Ansible strategy", cookbook_path
|
|
362
|
+
)
|
|
293
363
|
|
|
294
364
|
|
|
295
365
|
def generate_blue_green_deployment_playbook(
|
|
@@ -307,6 +377,21 @@ def generate_blue_green_deployment_playbook(
|
|
|
307
377
|
|
|
308
378
|
"""
|
|
309
379
|
try:
|
|
380
|
+
# Validate inputs
|
|
381
|
+
if not app_name or not app_name.strip():
|
|
382
|
+
return (
|
|
383
|
+
"Error: Application name cannot be empty\n\n"
|
|
384
|
+
"Suggestion: Provide a descriptive name for the application "
|
|
385
|
+
"being deployed"
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
if not health_check_url.startswith("/"):
|
|
389
|
+
return (
|
|
390
|
+
f"Error: Health check URL must be a path starting with '/': "
|
|
391
|
+
f"{health_check_url}\n\n"
|
|
392
|
+
"Suggestion: Use a relative path like '/health' or '/api/health'"
|
|
393
|
+
)
|
|
394
|
+
|
|
310
395
|
# Generate main deployment playbook
|
|
311
396
|
playbook = _generate_blue_green_playbook(app_name, health_check_url)
|
|
312
397
|
|
|
@@ -347,26 +432,125 @@ def generate_blue_green_deployment_playbook(
|
|
|
347
432
|
- Blue and green environments provisioned
|
|
348
433
|
"""
|
|
349
434
|
except Exception as e:
|
|
350
|
-
return
|
|
435
|
+
return format_error_with_context(
|
|
436
|
+
e, f"generating blue/green deployment playbook for {app_name}"
|
|
437
|
+
)
|
|
351
438
|
|
|
352
439
|
|
|
353
|
-
def
|
|
354
|
-
app_name: str, canary_percentage: int
|
|
355
|
-
) -> str:
|
|
440
|
+
def _validate_canary_inputs(
|
|
441
|
+
app_name: str, canary_percentage: int, rollout_steps: str
|
|
442
|
+
) -> tuple[list[int] | None, str | None]:
|
|
356
443
|
"""
|
|
357
|
-
|
|
444
|
+
Validate canary deployment inputs.
|
|
445
|
+
|
|
446
|
+
Args:
|
|
447
|
+
app_name: Application name
|
|
448
|
+
canary_percentage: Initial canary percentage
|
|
449
|
+
rollout_steps: Comma-separated rollout steps
|
|
450
|
+
|
|
451
|
+
Returns:
|
|
452
|
+
Tuple of (parsed steps list, error message). If error, steps is None.
|
|
358
453
|
|
|
359
|
-
Starts at canary_percentage, progresses through rollout_steps.
|
|
360
|
-
Includes monitoring checks and automatic rollback on failure.
|
|
361
454
|
"""
|
|
455
|
+
# Validate app name
|
|
456
|
+
if not app_name or not app_name.strip():
|
|
457
|
+
return None, (
|
|
458
|
+
"Error: Application name cannot be empty\n\n"
|
|
459
|
+
"Suggestion: Provide a descriptive name for the application"
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
# Validate canary percentage
|
|
463
|
+
if not (1 <= canary_percentage <= 100):
|
|
464
|
+
return None, (
|
|
465
|
+
f"Error: Canary percentage must be between 1 and 100, "
|
|
466
|
+
f"got {canary_percentage}\n\n"
|
|
467
|
+
"Suggestion: Start with 10% for safety"
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
# Parse and validate rollout steps
|
|
362
471
|
try:
|
|
363
|
-
# Parse rollout steps
|
|
364
472
|
steps = [int(s.strip()) for s in rollout_steps.split(",")]
|
|
473
|
+
if not all(1 <= s <= 100 for s in steps):
|
|
474
|
+
raise ValueError("Steps must be between 1 and 100")
|
|
475
|
+
if steps != sorted(steps):
|
|
476
|
+
return None, (
|
|
477
|
+
f"Error: Rollout steps must be in ascending order: {rollout_steps}\n\n"
|
|
478
|
+
"Suggestion: Use format like '10,25,50,100'"
|
|
479
|
+
)
|
|
480
|
+
return steps, None
|
|
481
|
+
except ValueError as e:
|
|
482
|
+
return None, (
|
|
483
|
+
f"Error: Invalid rollout steps '{rollout_steps}': {e}\n\n"
|
|
484
|
+
"Suggestion: Use comma-separated percentages like '10,25,50,100'"
|
|
485
|
+
)
|
|
365
486
|
|
|
366
|
-
# Generate canary strategy
|
|
367
|
-
strategy = _generate_canary_strategy(app_name, canary_percentage, steps)
|
|
368
487
|
|
|
369
|
-
|
|
488
|
+
def _build_canary_workflow_guide(canary_percentage: int, steps: list[int]) -> str:
|
|
489
|
+
"""
|
|
490
|
+
Build deployment workflow guide.
|
|
491
|
+
|
|
492
|
+
Args:
|
|
493
|
+
canary_percentage: Initial canary percentage
|
|
494
|
+
steps: List of rollout step percentages
|
|
495
|
+
|
|
496
|
+
Returns:
|
|
497
|
+
Formatted workflow guide
|
|
498
|
+
|
|
499
|
+
"""
|
|
500
|
+
workflow = f"""## Deployment Workflow:
|
|
501
|
+
1. Deploy canary at {canary_percentage}%: `ansible-playbook deploy_canary.yml`
|
|
502
|
+
2. Monitor metrics: `ansible-playbook monitor_canary.yml`
|
|
503
|
+
3. Progressive rollout: `ansible-playbook progressive_rollout.yml`
|
|
504
|
+
"""
|
|
505
|
+
|
|
506
|
+
# Add step details
|
|
507
|
+
for i, step_pct in enumerate(steps, 1):
|
|
508
|
+
workflow += f" - Step {i}: {step_pct}% traffic"
|
|
509
|
+
if i == len(steps):
|
|
510
|
+
workflow += " (full rollout)"
|
|
511
|
+
workflow += "\n"
|
|
512
|
+
|
|
513
|
+
workflow += """4. Rollback if issues: `ansible-playbook rollback_canary.yml`
|
|
514
|
+
|
|
515
|
+
## Monitoring Points:
|
|
516
|
+
- Error rate comparison (canary vs stable)
|
|
517
|
+
- Response time percentiles (p50, p95, p99)
|
|
518
|
+
- Resource utilization (CPU, memory)
|
|
519
|
+
- Custom business metrics
|
|
520
|
+
|
|
521
|
+
## Rollback Triggers:
|
|
522
|
+
- Error rate increase > 5%
|
|
523
|
+
- Response time degradation > 20%
|
|
524
|
+
- Failed health checks
|
|
525
|
+
- Manual trigger
|
|
526
|
+
"""
|
|
527
|
+
return workflow
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
def _format_canary_output(
|
|
531
|
+
app_name: str,
|
|
532
|
+
canary_percentage: int,
|
|
533
|
+
rollout_steps: str,
|
|
534
|
+
steps: list[int],
|
|
535
|
+
strategy: dict,
|
|
536
|
+
) -> str:
|
|
537
|
+
"""
|
|
538
|
+
Format complete canary deployment output.
|
|
539
|
+
|
|
540
|
+
Args:
|
|
541
|
+
app_name: Application name
|
|
542
|
+
canary_percentage: Initial canary percentage
|
|
543
|
+
rollout_steps: Original rollout steps string
|
|
544
|
+
steps: Parsed rollout steps
|
|
545
|
+
strategy: Generated strategy dict
|
|
546
|
+
|
|
547
|
+
Returns:
|
|
548
|
+
Formatted output string
|
|
549
|
+
|
|
550
|
+
"""
|
|
551
|
+
workflow = _build_canary_workflow_guide(canary_percentage, steps)
|
|
552
|
+
|
|
553
|
+
return f"""# Canary Deployment Strategy
|
|
370
554
|
# Application: {app_name}
|
|
371
555
|
# Initial Canary: {canary_percentage}%
|
|
372
556
|
# Rollout Steps: {rollout_steps}
|
|
@@ -391,30 +575,53 @@ def generate_canary_deployment_strategy(
|
|
|
391
575
|
{strategy["rollback"]}
|
|
392
576
|
```
|
|
393
577
|
|
|
394
|
-
|
|
395
|
-
1. Deploy canary at {canary_percentage}%: `ansible-playbook deploy_canary.yml`
|
|
396
|
-
2. Monitor metrics: `ansible-playbook monitor_canary.yml`
|
|
397
|
-
3. Progressive rollout: `ansible-playbook progressive_rollout.yml`
|
|
398
|
-
- Step 1: {steps[0]}% traffic
|
|
399
|
-
- Step 2: {steps[1]}% traffic
|
|
400
|
-
- Step 3: {steps[2]}% traffic
|
|
401
|
-
- Step 4: {steps[3]}% traffic (full rollout)
|
|
402
|
-
4. Rollback if issues: `ansible-playbook rollback_canary.yml`
|
|
578
|
+
{workflow}"""
|
|
403
579
|
|
|
404
|
-
## Monitoring Points:
|
|
405
|
-
- Error rate comparison (canary vs stable)
|
|
406
|
-
- Response time percentiles (p50, p95, p99)
|
|
407
|
-
- Resource utilization (CPU, memory)
|
|
408
|
-
- Custom business metrics
|
|
409
580
|
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
581
|
+
def generate_canary_deployment_strategy(
|
|
582
|
+
app_name: str, canary_percentage: int = 10, rollout_steps: str = "10,25,50,100"
|
|
583
|
+
) -> str:
|
|
584
|
+
"""
|
|
585
|
+
Generate canary deployment with progressive rollout.
|
|
586
|
+
|
|
587
|
+
Starts at canary_percentage, progresses through rollout_steps.
|
|
588
|
+
Includes monitoring checks and automatic rollback on failure.
|
|
589
|
+
|
|
590
|
+
Args:
|
|
591
|
+
app_name: Name of the application
|
|
592
|
+
canary_percentage: Initial canary traffic percentage (1-100)
|
|
593
|
+
rollout_steps: Comma-separated progressive rollout steps
|
|
594
|
+
|
|
595
|
+
Returns:
|
|
596
|
+
Formatted canary deployment strategy with playbooks
|
|
597
|
+
|
|
598
|
+
"""
|
|
599
|
+
try:
|
|
600
|
+
# Validate inputs
|
|
601
|
+
steps, error = _validate_canary_inputs(
|
|
602
|
+
app_name, canary_percentage, rollout_steps
|
|
603
|
+
)
|
|
604
|
+
if error:
|
|
605
|
+
return error
|
|
606
|
+
|
|
607
|
+
assert steps is not None, "steps must be non-None after successful validation"
|
|
608
|
+
|
|
609
|
+
# Generate canary strategy
|
|
610
|
+
strategy = _generate_canary_strategy(app_name, canary_percentage, steps)
|
|
611
|
+
|
|
612
|
+
# Format output
|
|
613
|
+
return _format_canary_output(
|
|
614
|
+
app_name,
|
|
615
|
+
canary_percentage,
|
|
616
|
+
rollout_steps,
|
|
617
|
+
steps,
|
|
618
|
+
strategy,
|
|
619
|
+
)
|
|
620
|
+
|
|
416
621
|
except Exception as e:
|
|
417
|
-
return
|
|
622
|
+
return format_error_with_context(
|
|
623
|
+
e, f"generating canary deployment strategy for {app_name}"
|
|
624
|
+
)
|
|
418
625
|
|
|
419
626
|
|
|
420
627
|
def analyze_chef_application_patterns(
|
|
@@ -427,9 +634,15 @@ def analyze_chef_application_patterns(
|
|
|
427
634
|
Application type helps tune recommendations for web/database/service workloads.
|
|
428
635
|
"""
|
|
429
636
|
try:
|
|
430
|
-
cookbook =
|
|
431
|
-
|
|
432
|
-
|
|
637
|
+
cookbook = validate_cookbook_structure(cookbook_path)
|
|
638
|
+
|
|
639
|
+
# Validate application type
|
|
640
|
+
valid_app_types = ["web_application", "database", "service", "batch", "api"]
|
|
641
|
+
if application_type not in valid_app_types:
|
|
642
|
+
return (
|
|
643
|
+
f"Error: Invalid application type '{application_type}'\n\n"
|
|
644
|
+
f"Suggestion: Use one of {', '.join(valid_app_types)}"
|
|
645
|
+
)
|
|
433
646
|
|
|
434
647
|
# Analyze cookbook for application patterns
|
|
435
648
|
analysis = _analyze_application_cookbook(cookbook, application_type)
|
|
@@ -460,83 +673,156 @@ def analyze_chef_application_patterns(
|
|
|
460
673
|
5. Document lessons learned and iterate
|
|
461
674
|
"""
|
|
462
675
|
except Exception as e:
|
|
463
|
-
return
|
|
676
|
+
return format_error_with_context(
|
|
677
|
+
e,
|
|
678
|
+
f"analyzing Chef application patterns for {application_type}",
|
|
679
|
+
cookbook_path,
|
|
680
|
+
)
|
|
464
681
|
|
|
465
682
|
|
|
466
683
|
# AWX Helper Functions
|
|
467
684
|
|
|
468
685
|
|
|
469
|
-
def
|
|
470
|
-
"""
|
|
471
|
-
|
|
472
|
-
"name": cookbook_name,
|
|
473
|
-
"recipes": [],
|
|
474
|
-
"attributes": {},
|
|
475
|
-
"dependencies": [],
|
|
476
|
-
"templates": [],
|
|
477
|
-
"files": [],
|
|
478
|
-
"survey_fields": [],
|
|
479
|
-
}
|
|
686
|
+
def _analyze_recipes(cookbook_path: Path) -> list[dict[str, Any]]:
|
|
687
|
+
"""
|
|
688
|
+
Analyze recipes directory for AWX job steps.
|
|
480
689
|
|
|
481
|
-
|
|
690
|
+
Args:
|
|
691
|
+
cookbook_path: Path to cookbook root
|
|
692
|
+
|
|
693
|
+
Returns:
|
|
694
|
+
List of recipe metadata dicts
|
|
695
|
+
|
|
696
|
+
"""
|
|
697
|
+
recipes = []
|
|
482
698
|
recipes_dir = _safe_join(cookbook_path, "recipes")
|
|
483
699
|
if recipes_dir.exists():
|
|
484
700
|
for recipe_file in recipes_dir.glob("*.rb"):
|
|
485
|
-
|
|
486
|
-
analysis["recipes"].append(
|
|
701
|
+
recipes.append(
|
|
487
702
|
{
|
|
488
|
-
"name":
|
|
703
|
+
"name": recipe_file.stem,
|
|
489
704
|
"file": str(recipe_file),
|
|
490
705
|
"size": recipe_file.stat().st_size,
|
|
491
706
|
}
|
|
492
707
|
)
|
|
708
|
+
return recipes
|
|
709
|
+
|
|
710
|
+
|
|
711
|
+
def _analyze_attributes_for_survey(
|
|
712
|
+
cookbook_path: Path,
|
|
713
|
+
) -> tuple[dict[str, Any], list[dict[str, Any]]]:
|
|
714
|
+
"""
|
|
715
|
+
Analyze attributes directory for survey field generation.
|
|
493
716
|
|
|
494
|
-
|
|
717
|
+
Args:
|
|
718
|
+
cookbook_path: Path to cookbook root
|
|
719
|
+
|
|
720
|
+
Returns:
|
|
721
|
+
Tuple of (attributes dict, survey fields list)
|
|
722
|
+
|
|
723
|
+
"""
|
|
724
|
+
attributes = {}
|
|
725
|
+
survey_fields = []
|
|
495
726
|
attributes_dir = _safe_join(cookbook_path, "attributes")
|
|
727
|
+
|
|
496
728
|
if attributes_dir.exists():
|
|
497
729
|
for attr_file in attributes_dir.glob("*.rb"):
|
|
498
730
|
try:
|
|
499
731
|
with attr_file.open("r") as f:
|
|
500
732
|
content = f.read()
|
|
501
733
|
|
|
502
|
-
# Extract attribute declarations
|
|
503
|
-
|
|
504
|
-
|
|
734
|
+
# Extract attribute declarations
|
|
735
|
+
attrs = _extract_cookbook_attributes(content)
|
|
736
|
+
attributes.update(attrs)
|
|
505
737
|
|
|
506
|
-
# Generate survey fields
|
|
507
|
-
|
|
508
|
-
|
|
738
|
+
# Generate survey fields
|
|
739
|
+
fields = _generate_survey_fields_from_attributes(attrs)
|
|
740
|
+
survey_fields.extend(fields)
|
|
509
741
|
|
|
510
742
|
except Exception:
|
|
511
743
|
# Silently skip malformed attribute files
|
|
512
744
|
pass
|
|
513
745
|
|
|
514
|
-
|
|
746
|
+
return attributes, survey_fields
|
|
747
|
+
|
|
748
|
+
|
|
749
|
+
def _analyze_metadata_dependencies(cookbook_path: Path) -> list[str]:
|
|
750
|
+
"""
|
|
751
|
+
Extract cookbook dependencies from metadata.
|
|
752
|
+
|
|
753
|
+
Args:
|
|
754
|
+
cookbook_path: Path to cookbook root
|
|
755
|
+
|
|
756
|
+
Returns:
|
|
757
|
+
List of dependency names
|
|
758
|
+
|
|
759
|
+
"""
|
|
515
760
|
metadata_file = _safe_join(cookbook_path, METADATA_FILENAME)
|
|
516
761
|
if metadata_file.exists():
|
|
517
762
|
try:
|
|
518
763
|
with metadata_file.open("r") as f:
|
|
519
764
|
content = f.read()
|
|
520
|
-
|
|
521
|
-
dependencies = _extract_cookbook_dependencies(content)
|
|
522
|
-
analysis["dependencies"] = dependencies
|
|
523
|
-
|
|
765
|
+
return _extract_cookbook_dependencies(content)
|
|
524
766
|
except Exception:
|
|
525
|
-
# Silently skip malformed metadata
|
|
526
767
|
pass
|
|
768
|
+
return []
|
|
769
|
+
|
|
770
|
+
|
|
771
|
+
def _collect_static_files(cookbook_path: Path) -> tuple[list[str], list[str]]:
|
|
772
|
+
"""
|
|
773
|
+
Collect templates and static files from cookbook.
|
|
774
|
+
|
|
775
|
+
Args:
|
|
776
|
+
cookbook_path: Path to cookbook root
|
|
777
|
+
|
|
778
|
+
Returns:
|
|
779
|
+
Tuple of (template names list, file names list)
|
|
780
|
+
|
|
781
|
+
"""
|
|
782
|
+
templates = []
|
|
783
|
+
files = []
|
|
527
784
|
|
|
528
|
-
# Count templates and files
|
|
529
785
|
templates_dir = _safe_join(cookbook_path, "templates")
|
|
530
786
|
if templates_dir.exists():
|
|
531
|
-
|
|
532
|
-
f.name for f in templates_dir.rglob("*") if f.is_file()
|
|
533
|
-
]
|
|
787
|
+
templates = [f.name for f in templates_dir.rglob("*") if f.is_file()]
|
|
534
788
|
|
|
535
789
|
files_dir = _safe_join(cookbook_path, "files")
|
|
536
790
|
if files_dir.exists():
|
|
537
|
-
|
|
791
|
+
files = [f.name for f in files_dir.rglob("*") if f.is_file()]
|
|
538
792
|
|
|
539
|
-
return
|
|
793
|
+
return templates, files
|
|
794
|
+
|
|
795
|
+
|
|
796
|
+
def _analyze_cookbook_for_awx(cookbook_path: Path, cookbook_name: str) -> dict:
|
|
797
|
+
"""
|
|
798
|
+
Analyze Chef cookbook structure for AWX job template generation.
|
|
799
|
+
|
|
800
|
+
Orchestrates multiple analysis helpers to build comprehensive cookbook metadata.
|
|
801
|
+
|
|
802
|
+
Args:
|
|
803
|
+
cookbook_path: Path to cookbook root
|
|
804
|
+
cookbook_name: Name of the cookbook
|
|
805
|
+
|
|
806
|
+
Returns:
|
|
807
|
+
Analysis dict with recipes, attributes, dependencies, templates, files, surveys
|
|
808
|
+
|
|
809
|
+
"""
|
|
810
|
+
# Analyze each dimension independently
|
|
811
|
+
recipes = _analyze_recipes(cookbook_path)
|
|
812
|
+
attributes, survey_fields = _analyze_attributes_for_survey(cookbook_path)
|
|
813
|
+
dependencies = _analyze_metadata_dependencies(cookbook_path)
|
|
814
|
+
templates, files = _collect_static_files(cookbook_path)
|
|
815
|
+
|
|
816
|
+
# Assemble complete analysis
|
|
817
|
+
return {
|
|
818
|
+
"name": cookbook_name,
|
|
819
|
+
"recipes": recipes,
|
|
820
|
+
"attributes": attributes,
|
|
821
|
+
"dependencies": dependencies,
|
|
822
|
+
"templates": templates,
|
|
823
|
+
"files": files,
|
|
824
|
+
"survey_fields": survey_fields,
|
|
825
|
+
}
|
|
540
826
|
|
|
541
827
|
|
|
542
828
|
def _generate_awx_job_template(
|
|
@@ -1552,16 +1838,19 @@ def _generate_deployment_migration_recommendations(
|
|
|
1552
1838
|
return "\n".join(recommendations)
|
|
1553
1839
|
|
|
1554
1840
|
|
|
1555
|
-
def
|
|
1556
|
-
"""
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
# Handle both formats: list of dicts with 'type' key or list of strings
|
|
1560
|
-
pattern_list = patterns.get("deployment_patterns", [])
|
|
1841
|
+
def _extract_detected_patterns(patterns: dict) -> list[str]:
|
|
1842
|
+
"""Extract detected patterns from patterns dictionary."""
|
|
1843
|
+
pattern_list: list = patterns.get("deployment_patterns", [])
|
|
1561
1844
|
if pattern_list and isinstance(pattern_list[0], dict):
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1845
|
+
return [p["type"] for p in pattern_list]
|
|
1846
|
+
return list(pattern_list)
|
|
1847
|
+
|
|
1848
|
+
|
|
1849
|
+
def _build_deployment_strategy_recommendations(
|
|
1850
|
+
detected_patterns: list[str],
|
|
1851
|
+
) -> list[str]:
|
|
1852
|
+
"""Build deployment strategy recommendations based on detected patterns."""
|
|
1853
|
+
strategies: list[str] = []
|
|
1565
1854
|
|
|
1566
1855
|
if "blue_green" in detected_patterns:
|
|
1567
1856
|
strategies.append(
|
|
@@ -1574,7 +1863,15 @@ def _recommend_ansible_strategies(patterns: dict) -> str:
|
|
|
1574
1863
|
"• Rolling Update: Balanced approach with configurable parallelism"
|
|
1575
1864
|
)
|
|
1576
1865
|
|
|
1577
|
-
|
|
1866
|
+
return strategies
|
|
1867
|
+
|
|
1868
|
+
|
|
1869
|
+
def _build_application_strategy_recommendations(
|
|
1870
|
+
detected_patterns: list[str],
|
|
1871
|
+
) -> list[str]:
|
|
1872
|
+
"""Build application-pattern specific strategy recommendations."""
|
|
1873
|
+
strategies: list[str] = []
|
|
1874
|
+
|
|
1578
1875
|
if "package_management" in detected_patterns:
|
|
1579
1876
|
strategies.append("• Package: Use `package` module for package installation")
|
|
1580
1877
|
if "configuration_management" in detected_patterns:
|
|
@@ -1584,11 +1881,26 @@ def _recommend_ansible_strategies(patterns: dict) -> str:
|
|
|
1584
1881
|
if "source_deployment" in detected_patterns:
|
|
1585
1882
|
strategies.append("• Source: Use `git` module for source code deployment")
|
|
1586
1883
|
|
|
1884
|
+
return strategies
|
|
1885
|
+
|
|
1886
|
+
|
|
1887
|
+
def _get_default_strategy_recommendations() -> list[str]:
|
|
1888
|
+
"""Get default strategy recommendations when no patterns detected."""
|
|
1889
|
+
return [
|
|
1890
|
+
"• Rolling Update: Recommended starting strategy",
|
|
1891
|
+
"• Blue/Green: For critical applications requiring zero downtime",
|
|
1892
|
+
"• Canary: For high-risk deployments requiring validation",
|
|
1893
|
+
]
|
|
1894
|
+
|
|
1895
|
+
|
|
1896
|
+
def _recommend_ansible_strategies(patterns: dict) -> str:
|
|
1897
|
+
"""Recommend appropriate Ansible strategies."""
|
|
1898
|
+
detected_patterns = _extract_detected_patterns(patterns)
|
|
1899
|
+
|
|
1900
|
+
strategies = _build_deployment_strategy_recommendations(detected_patterns)
|
|
1901
|
+
strategies.extend(_build_application_strategy_recommendations(detected_patterns))
|
|
1902
|
+
|
|
1587
1903
|
if not strategies:
|
|
1588
|
-
strategies =
|
|
1589
|
-
"• Rolling Update: Recommended starting strategy",
|
|
1590
|
-
"• Blue/Green: For critical applications requiring zero downtime",
|
|
1591
|
-
"• Canary: For high-risk deployments requiring validation",
|
|
1592
|
-
]
|
|
1904
|
+
strategies = _get_default_strategy_recommendations()
|
|
1593
1905
|
|
|
1594
1906
|
return "\n".join(strategies)
|