mcp-souschef 2.5.3__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-3.0.0.dist-info}/METADATA +135 -28
- mcp_souschef-3.0.0.dist-info/RECORD +46 -0
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-3.0.0.dist-info}/WHEEL +1 -1
- souschef/__init__.py +43 -3
- souschef/assessment.py +1260 -69
- souschef/ci/common.py +126 -0
- souschef/ci/github_actions.py +4 -93
- souschef/ci/gitlab_ci.py +3 -53
- souschef/ci/jenkins_pipeline.py +3 -60
- souschef/cli.py +129 -20
- souschef/converters/__init__.py +2 -2
- souschef/converters/cookbook_specific.py +125 -0
- souschef/converters/cookbook_specific.py.backup +109 -0
- souschef/converters/playbook.py +1022 -15
- souschef/converters/resource.py +113 -10
- souschef/converters/template.py +177 -0
- souschef/core/constants.py +13 -0
- souschef/core/metrics.py +313 -0
- souschef/core/path_utils.py +12 -9
- souschef/core/validation.py +53 -0
- souschef/deployment.py +85 -33
- souschef/parsers/attributes.py +397 -32
- souschef/parsers/recipe.py +48 -10
- souschef/server.py +715 -37
- souschef/ui/app.py +1658 -379
- souschef/ui/health_check.py +36 -0
- souschef/ui/pages/ai_settings.py +563 -0
- souschef/ui/pages/cookbook_analysis.py +3270 -166
- souschef/ui/pages/validation_reports.py +274 -0
- mcp_souschef-2.5.3.dist-info/RECORD +0 -38
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-3.0.0.dist-info}/entry_points.txt +0 -0
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-3.0.0.dist-info}/licenses/LICENSE +0 -0
souschef/converters/playbook.py
CHANGED
|
@@ -8,6 +8,9 @@ inventory scripts.
|
|
|
8
8
|
|
|
9
9
|
import json
|
|
10
10
|
import re
|
|
11
|
+
import shutil
|
|
12
|
+
import subprocess
|
|
13
|
+
import tempfile
|
|
11
14
|
from datetime import datetime
|
|
12
15
|
from pathlib import Path
|
|
13
16
|
from typing import Any
|
|
@@ -18,6 +21,7 @@ from souschef.converters.resource import (
|
|
|
18
21
|
)
|
|
19
22
|
from souschef.core.constants import (
|
|
20
23
|
ANSIBLE_SERVICE_MODULE,
|
|
24
|
+
ATTRIBUTE_PREFIX,
|
|
21
25
|
ERROR_PREFIX,
|
|
22
26
|
JINJA2_VAR_REPLACEMENT,
|
|
23
27
|
NODE_PREFIX,
|
|
@@ -25,10 +29,25 @@ from souschef.core.constants import (
|
|
|
25
29
|
REGEX_RESOURCE_BRACKET,
|
|
26
30
|
REGEX_RUBY_INTERPOLATION,
|
|
27
31
|
REGEX_WHITESPACE_QUOTE,
|
|
32
|
+
VALUE_PREFIX,
|
|
28
33
|
)
|
|
29
34
|
from souschef.core.path_utils import _normalize_path, _safe_join
|
|
35
|
+
from souschef.parsers.attributes import parse_attributes
|
|
30
36
|
from souschef.parsers.recipe import parse_recipe
|
|
31
37
|
|
|
38
|
+
# Optional AI provider imports
|
|
39
|
+
try:
|
|
40
|
+
import requests # type: ignore[import-untyped]
|
|
41
|
+
except ImportError:
|
|
42
|
+
requests = None
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
from ibm_watsonx_ai import ( # type: ignore[import-not-found]
|
|
46
|
+
APIClient,
|
|
47
|
+
)
|
|
48
|
+
except ImportError:
|
|
49
|
+
APIClient = None
|
|
50
|
+
|
|
32
51
|
# Maximum length for guard condition patterns in regex matching
|
|
33
52
|
MAX_GUARD_LENGTH = 500
|
|
34
53
|
|
|
@@ -61,7 +80,7 @@ def generate_playbook_from_recipe(recipe_path: str) -> str:
|
|
|
61
80
|
|
|
62
81
|
# Generate playbook structure
|
|
63
82
|
playbook: str = _generate_playbook_structure(
|
|
64
|
-
recipe_content, raw_content, recipe_file
|
|
83
|
+
recipe_content, raw_content, recipe_file
|
|
65
84
|
)
|
|
66
85
|
|
|
67
86
|
return playbook
|
|
@@ -70,6 +89,618 @@ def generate_playbook_from_recipe(recipe_path: str) -> str:
|
|
|
70
89
|
return f"Error generating playbook: {e}"
|
|
71
90
|
|
|
72
91
|
|
|
92
|
+
def generate_playbook_from_recipe_with_ai(
|
|
93
|
+
recipe_path: str,
|
|
94
|
+
ai_provider: str = "anthropic",
|
|
95
|
+
api_key: str = "",
|
|
96
|
+
model: str = "claude-3-5-sonnet-20241022",
|
|
97
|
+
temperature: float = 0.7,
|
|
98
|
+
max_tokens: int = 4000,
|
|
99
|
+
project_id: str = "",
|
|
100
|
+
base_url: str = "",
|
|
101
|
+
project_recommendations: dict | None = None,
|
|
102
|
+
) -> str:
|
|
103
|
+
"""
|
|
104
|
+
Generate an AI-enhanced Ansible playbook from a Chef recipe.
|
|
105
|
+
|
|
106
|
+
Uses AI to intelligently convert Chef recipes to Ansible playbooks,
|
|
107
|
+
considering context, best practices, and optimization opportunities.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
recipe_path: Path to the Chef recipe (.rb) file.
|
|
111
|
+
ai_provider: AI provider to use ('anthropic', 'openai', 'watson',
|
|
112
|
+
'lightspeed'). Note: 'github_copilot' is listed but not supported as
|
|
113
|
+
GitHub Copilot does not have a public REST API.
|
|
114
|
+
api_key: API key for the AI provider.
|
|
115
|
+
model: AI model to use.
|
|
116
|
+
temperature: Creativity/randomness parameter (0.0-2.0).
|
|
117
|
+
max_tokens: Maximum tokens to generate.
|
|
118
|
+
project_id: Project ID for IBM Watsonx (required for watson provider).
|
|
119
|
+
base_url: Custom base URL for the AI provider.
|
|
120
|
+
project_recommendations: Dictionary containing project-level analysis
|
|
121
|
+
and recommendations from cookbook assessment.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
AI-generated Ansible playbook in YAML format.
|
|
125
|
+
|
|
126
|
+
"""
|
|
127
|
+
try:
|
|
128
|
+
# Parse the recipe file
|
|
129
|
+
recipe_file = _normalize_path(recipe_path)
|
|
130
|
+
if not recipe_file.exists():
|
|
131
|
+
return f"{ERROR_PREFIX} Recipe file does not exist: {recipe_path}"
|
|
132
|
+
|
|
133
|
+
raw_content = recipe_file.read_text()
|
|
134
|
+
|
|
135
|
+
# Get basic recipe parsing for context
|
|
136
|
+
parsed_content = parse_recipe(recipe_path)
|
|
137
|
+
if parsed_content.startswith(ERROR_PREFIX):
|
|
138
|
+
return parsed_content
|
|
139
|
+
|
|
140
|
+
# Use AI to generate the playbook
|
|
141
|
+
ai_playbook = _generate_playbook_with_ai(
|
|
142
|
+
raw_content,
|
|
143
|
+
parsed_content,
|
|
144
|
+
recipe_file.name,
|
|
145
|
+
ai_provider,
|
|
146
|
+
api_key,
|
|
147
|
+
model,
|
|
148
|
+
temperature,
|
|
149
|
+
max_tokens,
|
|
150
|
+
project_id,
|
|
151
|
+
base_url,
|
|
152
|
+
project_recommendations,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
return ai_playbook
|
|
156
|
+
|
|
157
|
+
except Exception as e:
|
|
158
|
+
return f"Error generating AI-enhanced playbook: {e}"
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def _generate_playbook_with_ai(
|
|
162
|
+
raw_content: str,
|
|
163
|
+
parsed_content: str,
|
|
164
|
+
recipe_name: str,
|
|
165
|
+
ai_provider: str,
|
|
166
|
+
api_key: str,
|
|
167
|
+
model: str,
|
|
168
|
+
temperature: float,
|
|
169
|
+
max_tokens: int,
|
|
170
|
+
project_id: str = "",
|
|
171
|
+
base_url: str = "",
|
|
172
|
+
project_recommendations: dict | None = None,
|
|
173
|
+
) -> str:
|
|
174
|
+
"""Generate Ansible playbook using AI for intelligent conversion."""
|
|
175
|
+
try:
|
|
176
|
+
# Initialize AI client based on provider
|
|
177
|
+
client = _initialize_ai_client(ai_provider, api_key, project_id, base_url)
|
|
178
|
+
if isinstance(client, str): # Error message returned
|
|
179
|
+
return client
|
|
180
|
+
|
|
181
|
+
# Create the AI prompt
|
|
182
|
+
prompt = _create_ai_conversion_prompt(
|
|
183
|
+
raw_content, parsed_content, recipe_name, project_recommendations
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Call the AI API and get response
|
|
187
|
+
ai_response = _call_ai_api(
|
|
188
|
+
client, ai_provider, prompt, model, temperature, max_tokens
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# Clean and validate the AI response
|
|
192
|
+
cleaned_playbook = _clean_ai_playbook_response(ai_response)
|
|
193
|
+
|
|
194
|
+
# Validate with ansible-lint and self-correct if possible
|
|
195
|
+
cleaned_playbook = _validate_and_fix_playbook(
|
|
196
|
+
cleaned_playbook, client, ai_provider, model, temperature, max_tokens
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
return cleaned_playbook
|
|
200
|
+
|
|
201
|
+
except ImportError as e:
|
|
202
|
+
return f"{ERROR_PREFIX} AI library not available: {e}"
|
|
203
|
+
except Exception as e:
|
|
204
|
+
return f"{ERROR_PREFIX} AI conversion failed: {e}"
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def _initialize_ai_client(
|
|
208
|
+
ai_provider: str, api_key: str, project_id: str = "", base_url: str = ""
|
|
209
|
+
) -> Any:
|
|
210
|
+
"""Initialize AI client based on provider."""
|
|
211
|
+
if ai_provider.lower() == "anthropic":
|
|
212
|
+
import anthropic
|
|
213
|
+
|
|
214
|
+
return anthropic.Anthropic(api_key=api_key)
|
|
215
|
+
elif ai_provider.lower() == "openai":
|
|
216
|
+
import openai
|
|
217
|
+
|
|
218
|
+
return openai.OpenAI(api_key=api_key)
|
|
219
|
+
elif ai_provider.lower() == "watson":
|
|
220
|
+
if APIClient is None:
|
|
221
|
+
return f"{ERROR_PREFIX} ibm_watsonx_ai library not available"
|
|
222
|
+
|
|
223
|
+
return APIClient(
|
|
224
|
+
api_key=api_key,
|
|
225
|
+
project_id=project_id,
|
|
226
|
+
url=base_url or "https://us-south.ml.cloud.ibm.com",
|
|
227
|
+
)
|
|
228
|
+
elif ai_provider.lower() == "lightspeed":
|
|
229
|
+
if requests is None:
|
|
230
|
+
return f"{ERROR_PREFIX} requests library not available"
|
|
231
|
+
|
|
232
|
+
return {
|
|
233
|
+
"api_key": api_key,
|
|
234
|
+
"base_url": base_url or "https://api.redhat.com",
|
|
235
|
+
}
|
|
236
|
+
elif ai_provider.lower() == "github_copilot":
|
|
237
|
+
return (
|
|
238
|
+
f"{ERROR_PREFIX} GitHub Copilot does not have a public REST API. "
|
|
239
|
+
"GitHub Copilot is only available through IDE integrations and "
|
|
240
|
+
"cannot be used "
|
|
241
|
+
"for programmatic API calls. Please use Anthropic Claude, OpenAI, or IBM "
|
|
242
|
+
"Watsonx instead."
|
|
243
|
+
)
|
|
244
|
+
else:
|
|
245
|
+
return f"{ERROR_PREFIX} Unsupported AI provider: {ai_provider}"
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def _call_ai_api(
|
|
249
|
+
client: Any,
|
|
250
|
+
ai_provider: str,
|
|
251
|
+
prompt: str,
|
|
252
|
+
model: str,
|
|
253
|
+
temperature: float,
|
|
254
|
+
max_tokens: int,
|
|
255
|
+
) -> str:
|
|
256
|
+
"""Call the appropriate AI API based on provider."""
|
|
257
|
+
if ai_provider.lower() == "anthropic":
|
|
258
|
+
response = client.messages.create(
|
|
259
|
+
model=model,
|
|
260
|
+
max_tokens=max_tokens,
|
|
261
|
+
temperature=temperature,
|
|
262
|
+
messages=[{"role": "user", "content": prompt}],
|
|
263
|
+
)
|
|
264
|
+
return str(response.content[0].text)
|
|
265
|
+
elif ai_provider.lower() == "watson":
|
|
266
|
+
response = client.generate_text(
|
|
267
|
+
model_id=model,
|
|
268
|
+
input=prompt,
|
|
269
|
+
parameters={
|
|
270
|
+
"max_new_tokens": max_tokens,
|
|
271
|
+
"temperature": temperature,
|
|
272
|
+
"min_new_tokens": 1,
|
|
273
|
+
},
|
|
274
|
+
)
|
|
275
|
+
return str(response["results"][0]["generated_text"])
|
|
276
|
+
elif ai_provider.lower() == "lightspeed":
|
|
277
|
+
if requests is None:
|
|
278
|
+
return f"{ERROR_PREFIX} requests library not available"
|
|
279
|
+
|
|
280
|
+
headers = {
|
|
281
|
+
"Authorization": f"Bearer {client['api_key']}",
|
|
282
|
+
"Content-Type": "application/json",
|
|
283
|
+
}
|
|
284
|
+
payload = {
|
|
285
|
+
"model": model,
|
|
286
|
+
"prompt": prompt,
|
|
287
|
+
"max_tokens": max_tokens,
|
|
288
|
+
"temperature": temperature,
|
|
289
|
+
}
|
|
290
|
+
response = requests.post(
|
|
291
|
+
f"{client['base_url']}/v1/completions",
|
|
292
|
+
headers=headers,
|
|
293
|
+
json=payload,
|
|
294
|
+
timeout=60,
|
|
295
|
+
)
|
|
296
|
+
if response.status_code == 200:
|
|
297
|
+
return str(response.json()["choices"][0]["text"])
|
|
298
|
+
else:
|
|
299
|
+
return (
|
|
300
|
+
f"{ERROR_PREFIX} Red Hat Lightspeed API error: "
|
|
301
|
+
f"{response.status_code} - {response.text}"
|
|
302
|
+
)
|
|
303
|
+
elif ai_provider.lower() == "github_copilot":
|
|
304
|
+
if requests is None:
|
|
305
|
+
return f"{ERROR_PREFIX} requests library not available"
|
|
306
|
+
|
|
307
|
+
headers = {
|
|
308
|
+
"Authorization": f"Bearer {client['api_key']}",
|
|
309
|
+
"Content-Type": "application/json",
|
|
310
|
+
"User-Agent": "SousChef/1.0",
|
|
311
|
+
}
|
|
312
|
+
payload = {
|
|
313
|
+
"model": model,
|
|
314
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
315
|
+
"max_tokens": max_tokens,
|
|
316
|
+
"temperature": temperature,
|
|
317
|
+
}
|
|
318
|
+
# GitHub Copilot uses OpenAI-compatible chat completions endpoint
|
|
319
|
+
response = requests.post(
|
|
320
|
+
f"{client['base_url']}/copilot/chat/completions",
|
|
321
|
+
headers=headers,
|
|
322
|
+
json=payload,
|
|
323
|
+
timeout=60,
|
|
324
|
+
)
|
|
325
|
+
if response.status_code == 200:
|
|
326
|
+
return str(response.json()["choices"][0]["message"]["content"])
|
|
327
|
+
else:
|
|
328
|
+
return (
|
|
329
|
+
f"{ERROR_PREFIX} GitHub Copilot API error: "
|
|
330
|
+
f"{response.status_code} - {response.text}"
|
|
331
|
+
)
|
|
332
|
+
else: # OpenAI
|
|
333
|
+
response = client.chat.completions.create(
|
|
334
|
+
model=model,
|
|
335
|
+
max_tokens=max_tokens,
|
|
336
|
+
temperature=temperature,
|
|
337
|
+
messages=[{"role": "user", "content": prompt}],
|
|
338
|
+
)
|
|
339
|
+
return str(response.choices[0].message.content)
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
def _create_ai_conversion_prompt(
|
|
343
|
+
raw_content: str,
|
|
344
|
+
parsed_content: str,
|
|
345
|
+
recipe_name: str,
|
|
346
|
+
project_recommendations: dict | None = None,
|
|
347
|
+
) -> str:
|
|
348
|
+
"""Create a comprehensive prompt for AI conversion."""
|
|
349
|
+
prompt_parts = _build_base_prompt_parts(raw_content, parsed_content, recipe_name)
|
|
350
|
+
|
|
351
|
+
# Add project context if available
|
|
352
|
+
if project_recommendations:
|
|
353
|
+
prompt_parts.extend(
|
|
354
|
+
_build_project_context_parts(project_recommendations, recipe_name)
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
prompt_parts.extend(_build_conversion_requirements_parts())
|
|
358
|
+
|
|
359
|
+
# Add project-specific guidance if available
|
|
360
|
+
if project_recommendations:
|
|
361
|
+
prompt_parts.extend(_build_project_guidance_parts(project_recommendations))
|
|
362
|
+
|
|
363
|
+
prompt_parts.extend(_build_output_format_parts())
|
|
364
|
+
|
|
365
|
+
return "\n".join(prompt_parts)
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def _build_base_prompt_parts(
|
|
369
|
+
raw_content: str, parsed_content: str, recipe_name: str
|
|
370
|
+
) -> list[str]:
|
|
371
|
+
"""Build the base prompt parts."""
|
|
372
|
+
return [
|
|
373
|
+
"You are an expert at converting Chef recipes to Ansible playbooks.",
|
|
374
|
+
"Your task is to convert the following Chef recipe into a high-quality,",
|
|
375
|
+
"production-ready Ansible playbook.",
|
|
376
|
+
"",
|
|
377
|
+
"CHEF RECIPE CONTENT:",
|
|
378
|
+
raw_content,
|
|
379
|
+
"",
|
|
380
|
+
"PARSED RECIPE ANALYSIS:",
|
|
381
|
+
parsed_content,
|
|
382
|
+
"",
|
|
383
|
+
f"RECIPE NAME: {recipe_name}",
|
|
384
|
+
]
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def _build_project_context_parts(
|
|
388
|
+
project_recommendations: dict, recipe_name: str
|
|
389
|
+
) -> list[str]:
|
|
390
|
+
"""Build project context parts."""
|
|
391
|
+
# Extract values to shorten f-strings
|
|
392
|
+
complexity = project_recommendations.get("project_complexity", "Unknown")
|
|
393
|
+
strategy = project_recommendations.get("migration_strategy", "Unknown")
|
|
394
|
+
effort_days = project_recommendations.get("project_effort_days", 0)
|
|
395
|
+
density = project_recommendations.get("dependency_density", 0)
|
|
396
|
+
|
|
397
|
+
parts = [
|
|
398
|
+
"",
|
|
399
|
+
"PROJECT CONTEXT:",
|
|
400
|
+
f"Project Complexity: {complexity}",
|
|
401
|
+
f"Migration Strategy: {strategy}",
|
|
402
|
+
f"Total Project Effort: {effort_days:.1f} days",
|
|
403
|
+
f"Dependency Density: {density:.2f}",
|
|
404
|
+
]
|
|
405
|
+
|
|
406
|
+
# Add migration recommendations
|
|
407
|
+
recommendations = project_recommendations.get("recommendations", [])
|
|
408
|
+
if recommendations:
|
|
409
|
+
parts.extend(
|
|
410
|
+
[
|
|
411
|
+
"",
|
|
412
|
+
"PROJECT MIGRATION RECOMMENDATIONS:",
|
|
413
|
+
]
|
|
414
|
+
)
|
|
415
|
+
for rec in recommendations[:5]: # Limit to first 5 recommendations
|
|
416
|
+
parts.append(f"- {rec}")
|
|
417
|
+
|
|
418
|
+
# Add dependency information
|
|
419
|
+
migration_order = project_recommendations.get("migration_order", [])
|
|
420
|
+
if migration_order:
|
|
421
|
+
recipe_position = _find_recipe_position_in_migration_order(
|
|
422
|
+
migration_order, recipe_name
|
|
423
|
+
)
|
|
424
|
+
if recipe_position:
|
|
425
|
+
dependencies = ", ".join(recipe_position.get("dependencies", [])) or "None"
|
|
426
|
+
parts.extend(
|
|
427
|
+
[
|
|
428
|
+
"",
|
|
429
|
+
"MIGRATION CONTEXT FOR THIS RECIPE:",
|
|
430
|
+
f"Phase: {recipe_position.get('phase', 'Unknown')}",
|
|
431
|
+
f"Complexity: {recipe_position.get('complexity', 'Unknown')}",
|
|
432
|
+
f"Dependencies: {dependencies}",
|
|
433
|
+
f"Migration Reason: {recipe_position.get('reason', 'Unknown')}",
|
|
434
|
+
]
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
return parts
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
def _find_recipe_position_in_migration_order(
|
|
441
|
+
migration_order: list[dict[str, Any]], recipe_name: str
|
|
442
|
+
) -> dict[str, Any] | None:
|
|
443
|
+
"""Find this recipe's position in migration order."""
|
|
444
|
+
for item in migration_order:
|
|
445
|
+
cookbook_name = recipe_name.replace(".rb", "").replace("recipes/", "")
|
|
446
|
+
if item.get("cookbook") == cookbook_name:
|
|
447
|
+
return item
|
|
448
|
+
return None
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
def _build_conversion_requirements_parts() -> list[str]:
|
|
452
|
+
"""Build conversion requirements parts."""
|
|
453
|
+
return [
|
|
454
|
+
"",
|
|
455
|
+
"CONVERSION REQUIREMENTS:",
|
|
456
|
+
"",
|
|
457
|
+
"1. **Understand the Intent**: Analyze what this Chef recipe is trying to",
|
|
458
|
+
" accomplish. Look at the resources, their properties, and the overall",
|
|
459
|
+
" workflow.",
|
|
460
|
+
"",
|
|
461
|
+
"2. **Best Practices**: Generate Ansible code that follows Ansible best",
|
|
462
|
+
" practices:",
|
|
463
|
+
" - Use appropriate modules (ansible.builtin.* when possible)",
|
|
464
|
+
" - Include proper error handling and idempotency",
|
|
465
|
+
" - Use meaningful variable names",
|
|
466
|
+
" - Include comments explaining complex logic",
|
|
467
|
+
" - Handle edge cases and failure scenarios",
|
|
468
|
+
"",
|
|
469
|
+
"3. **Resource Mapping**: Convert Chef resources to appropriate Ansible",
|
|
470
|
+
" modules:",
|
|
471
|
+
" - package → ansible.builtin.package or specific package managers",
|
|
472
|
+
" - service → ansible.builtin.service",
|
|
473
|
+
" - file/directory → ansible.builtin.file",
|
|
474
|
+
" - template → ansible.builtin.template (CHANGE .erb to .j2 extension)",
|
|
475
|
+
" - execute → ansible.builtin.command/shell",
|
|
476
|
+
" - user/group → ansible.builtin.user/group",
|
|
477
|
+
" - mount → ansible.builtin.mount",
|
|
478
|
+
" - include_recipe → ansible.builtin.include_role (for unknown cookbooks)",
|
|
479
|
+
" - include_recipe → specific role imports (for known cookbooks)",
|
|
480
|
+
"",
|
|
481
|
+
"4. **Template Conversions**: CRITICAL for template resources:",
|
|
482
|
+
" - Change file extension from .erb to .j2 in the 'src' parameter",
|
|
483
|
+
" - Example: 'config.erb' becomes 'config.j2'",
|
|
484
|
+
" - Add note: Templates need manual ERB→Jinja2 conversion",
|
|
485
|
+
" - ERB syntax: <%= variable %> → Jinja2: {{ variable }}",
|
|
486
|
+
" - ERB blocks: <% code %> → Jinja2: {% code %}",
|
|
487
|
+
"",
|
|
488
|
+
"5. **Chef Data Bags to Ansible Vault**: Convert data bag lookups:",
|
|
489
|
+
" - Chef::EncryptedDataBagItem.load('bag', 'item') →",
|
|
490
|
+
" - Ansible: Use group_vars/ or host_vars/ with ansible-vault",
|
|
491
|
+
" - Store sensitive data in encrypted YAML files, not inline lookups",
|
|
492
|
+
" - Example: Define ssh_key in group_vars/all/vault.yml (encrypted)",
|
|
493
|
+
"",
|
|
494
|
+
"6. **Variables and Facts**: Convert Chef node attributes to Ansible",
|
|
495
|
+
" variables/facts appropriately:",
|
|
496
|
+
" - node['attribute'] → {{ ansible_facts.attribute }} or vars",
|
|
497
|
+
" - node['platform'] → {{ ansible_distribution }}",
|
|
498
|
+
" - node['platform_version'] → {{ ansible_distribution_version }}",
|
|
499
|
+
"",
|
|
500
|
+
"7. **Conditionals**: Convert Chef guards (only_if/not_if) to Ansible when",
|
|
501
|
+
" conditions.",
|
|
502
|
+
"",
|
|
503
|
+
"8. **Notifications**: Convert Chef notifications to Ansible handlers",
|
|
504
|
+
" where appropriate.",
|
|
505
|
+
"",
|
|
506
|
+
"9. **Idempotency**: Ensure the playbook is idempotent and can be run",
|
|
507
|
+
" multiple times safely.",
|
|
508
|
+
"",
|
|
509
|
+
"10. **Error Handling**: Include proper error handling and rollback",
|
|
510
|
+
" considerations.",
|
|
511
|
+
"",
|
|
512
|
+
"11. **Task Ordering**: CRITICAL: Ensure tasks are ordered logically.",
|
|
513
|
+
" - Install packages BEFORE configuring them.",
|
|
514
|
+
" - create users/groups BEFORE using them in file permissions.",
|
|
515
|
+
" - Place configuration files BEFORE starting/restarting services.",
|
|
516
|
+
" - Ensure directories exist BEFORE creating files in them.",
|
|
517
|
+
"",
|
|
518
|
+
"12. **Handlers**: Verify that all notified handlers are actually defined",
|
|
519
|
+
" in the handlers section.",
|
|
520
|
+
]
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
def _build_project_guidance_parts(project_recommendations: dict) -> list[str]:
|
|
524
|
+
"""Build project-specific guidance parts."""
|
|
525
|
+
strategy = project_recommendations.get("migration_strategy", "").lower()
|
|
526
|
+
parts = []
|
|
527
|
+
|
|
528
|
+
if "parallel" in strategy:
|
|
529
|
+
parallel_tracks = project_recommendations.get("parallel_tracks", 2)
|
|
530
|
+
parts.extend(
|
|
531
|
+
[
|
|
532
|
+
"",
|
|
533
|
+
"11. **Parallel Migration Context**: This recipe is part of a",
|
|
534
|
+
f" parallel migration with {parallel_tracks} tracks.",
|
|
535
|
+
" Ensure this playbook can run independently without",
|
|
536
|
+
" dependencies on other cookbooks in the project.",
|
|
537
|
+
]
|
|
538
|
+
)
|
|
539
|
+
elif "phased" in strategy:
|
|
540
|
+
parts.extend(
|
|
541
|
+
[
|
|
542
|
+
"",
|
|
543
|
+
"11. **Phased Migration Context**: This recipe is part of a phased",
|
|
544
|
+
" migration approach. Consider dependencies and ensure proper",
|
|
545
|
+
" ordering within the broader project migration plan.",
|
|
546
|
+
]
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
return parts
|
|
550
|
+
|
|
551
|
+
|
|
552
|
+
def _build_output_format_parts() -> list[str]:
|
|
553
|
+
"""Build output format parts."""
|
|
554
|
+
return [
|
|
555
|
+
"",
|
|
556
|
+
"OUTPUT FORMAT:",
|
|
557
|
+
"Return ONLY a valid YAML Ansible playbook. Do not include any",
|
|
558
|
+
" explanation, markdown formatting, or code blocks. The output should",
|
|
559
|
+
" be pure YAML that can be directly used as an Ansible playbook.",
|
|
560
|
+
"",
|
|
561
|
+
"CRITICAL YAML SYNTAX RULES:",
|
|
562
|
+
"- Use block mapping style (one key per line) NOT flow mapping style",
|
|
563
|
+
"- NEVER use { } for mappings with conditionals or complex expressions",
|
|
564
|
+
"- Correct: Use multi-line format:",
|
|
565
|
+
" - name: Include role conditionally",
|
|
566
|
+
" ansible.builtin.import_role:",
|
|
567
|
+
" name: my_role",
|
|
568
|
+
" when: condition_here",
|
|
569
|
+
"- WRONG: { role: my_role, when: condition } # This is INVALID",
|
|
570
|
+
"",
|
|
571
|
+
"The playbook should include:",
|
|
572
|
+
"- A proper name",
|
|
573
|
+
"- Appropriate hosts (default to 'all')",
|
|
574
|
+
"- Variables section if needed",
|
|
575
|
+
"- Tasks section with all converted resources",
|
|
576
|
+
"- Handlers section if notifications are used",
|
|
577
|
+
"- Any necessary pre_tasks or post_tasks",
|
|
578
|
+
"",
|
|
579
|
+
"Example structure:",
|
|
580
|
+
"---",
|
|
581
|
+
"- name: Convert of <recipe_name>",
|
|
582
|
+
" hosts: all",
|
|
583
|
+
" become: true",
|
|
584
|
+
" vars:",
|
|
585
|
+
" # Variables here",
|
|
586
|
+
" tasks:",
|
|
587
|
+
" # Tasks here",
|
|
588
|
+
" handlers:",
|
|
589
|
+
" # Handlers here",
|
|
590
|
+
"",
|
|
591
|
+
"Focus on creating a functional, well-structured Ansible playbook that",
|
|
592
|
+
"achieves the same outcome as the Chef recipe.",
|
|
593
|
+
]
|
|
594
|
+
|
|
595
|
+
|
|
596
|
+
def _clean_ai_playbook_response(ai_response: str) -> str:
|
|
597
|
+
"""Clean and validate the AI-generated playbook response."""
|
|
598
|
+
if not ai_response or not ai_response.strip():
|
|
599
|
+
return f"{ERROR_PREFIX} AI returned empty response"
|
|
600
|
+
|
|
601
|
+
# Remove markdown code blocks if present
|
|
602
|
+
cleaned = re.sub(r"```\w*\n?", "", ai_response)
|
|
603
|
+
cleaned = cleaned.strip()
|
|
604
|
+
|
|
605
|
+
# Basic validation - check if it looks like YAML
|
|
606
|
+
if not cleaned.startswith("---") and not cleaned.startswith("- name:"):
|
|
607
|
+
return f"{ERROR_PREFIX} AI response does not appear to be valid YAML playbook"
|
|
608
|
+
|
|
609
|
+
# Try to parse as YAML to validate structure
|
|
610
|
+
try:
|
|
611
|
+
import yaml
|
|
612
|
+
|
|
613
|
+
yaml.safe_load(cleaned)
|
|
614
|
+
except Exception as e:
|
|
615
|
+
return f"{ERROR_PREFIX} AI generated invalid YAML: {e}"
|
|
616
|
+
|
|
617
|
+
return cleaned
|
|
618
|
+
|
|
619
|
+
|
|
620
|
+
def _validate_and_fix_playbook(
|
|
621
|
+
playbook_content: str,
|
|
622
|
+
client: Any,
|
|
623
|
+
ai_provider: str,
|
|
624
|
+
model: str,
|
|
625
|
+
temperature: float,
|
|
626
|
+
max_tokens: int,
|
|
627
|
+
) -> str:
|
|
628
|
+
"""Validate playbook with ansible-lint and attempt AI self-correction."""
|
|
629
|
+
if playbook_content.startswith(ERROR_PREFIX):
|
|
630
|
+
return playbook_content
|
|
631
|
+
|
|
632
|
+
validation_error = _run_ansible_lint(playbook_content)
|
|
633
|
+
if not validation_error:
|
|
634
|
+
return playbook_content
|
|
635
|
+
|
|
636
|
+
# Limit simple loops to 1 retry for now to save tokens/time
|
|
637
|
+
fix_prompt = f"""The Ansible playbook you generated has validation errors.
|
|
638
|
+
Please fix the errors below and return the corrected playbook.
|
|
639
|
+
|
|
640
|
+
ERRORS:
|
|
641
|
+
{validation_error}
|
|
642
|
+
|
|
643
|
+
PLAYBOOK:
|
|
644
|
+
{playbook_content}
|
|
645
|
+
|
|
646
|
+
Ensure the logical ordering of tasks is correct (e.g., packages installed before
|
|
647
|
+
config files, config files before services).
|
|
648
|
+
Return ONLY the corrected YAML playbook.
|
|
649
|
+
Do NOT include any introduction, cleanup text, explanations, or markdown code blocks.
|
|
650
|
+
Just the YAML content.
|
|
651
|
+
"""
|
|
652
|
+
|
|
653
|
+
try:
|
|
654
|
+
fixed_response = _call_ai_api(
|
|
655
|
+
client, ai_provider, fix_prompt, model, temperature, max_tokens
|
|
656
|
+
)
|
|
657
|
+
cleaned_response = _clean_ai_playbook_response(fixed_response)
|
|
658
|
+
|
|
659
|
+
# If the cleaner returns an error string, it means the fixed response
|
|
660
|
+
# was still invalid
|
|
661
|
+
if cleaned_response.startswith(ERROR_PREFIX):
|
|
662
|
+
# Fallback to the original (valid-but-lint-failing) playbook
|
|
663
|
+
# rather than returning an error string
|
|
664
|
+
return playbook_content
|
|
665
|
+
|
|
666
|
+
return cleaned_response
|
|
667
|
+
except Exception:
|
|
668
|
+
# If fix fails, return original with warning (or original error)
|
|
669
|
+
return playbook_content
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
def _run_ansible_lint(playbook_content: str) -> str | None:
|
|
673
|
+
"""Run ansible-lint on the playbook content."""
|
|
674
|
+
# Check if ansible-lint is available
|
|
675
|
+
if shutil.which("ansible-lint") is None:
|
|
676
|
+
return None
|
|
677
|
+
|
|
678
|
+
tmp_path = None
|
|
679
|
+
try:
|
|
680
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".yml", delete=False) as tmp:
|
|
681
|
+
tmp.write(playbook_content)
|
|
682
|
+
tmp_path = tmp.name
|
|
683
|
+
|
|
684
|
+
# Run ansible-lint
|
|
685
|
+
# We ignore return code because we want to capture output even on failure
|
|
686
|
+
result = subprocess.run(
|
|
687
|
+
["ansible-lint", "--nocolor", "-p", tmp_path],
|
|
688
|
+
capture_output=True,
|
|
689
|
+
text=True,
|
|
690
|
+
check=False,
|
|
691
|
+
)
|
|
692
|
+
|
|
693
|
+
if result.returncode != 0:
|
|
694
|
+
return result.stdout + "\n" + result.stderr
|
|
695
|
+
|
|
696
|
+
return None
|
|
697
|
+
except Exception:
|
|
698
|
+
return None
|
|
699
|
+
finally:
|
|
700
|
+
if tmp_path is not None and Path(tmp_path).exists():
|
|
701
|
+
Path(tmp_path).unlink()
|
|
702
|
+
|
|
703
|
+
|
|
73
704
|
def convert_chef_search_to_inventory(search_query: str) -> str:
|
|
74
705
|
"""
|
|
75
706
|
Convert a Chef search query to Ansible inventory patterns and groups.
|
|
@@ -121,9 +752,9 @@ def generate_dynamic_inventory_script(search_queries: str) -> str:
|
|
|
121
752
|
return f"Error generating dynamic inventory script: {e}"
|
|
122
753
|
|
|
123
754
|
|
|
124
|
-
def
|
|
755
|
+
def analyse_chef_search_patterns(recipe_or_cookbook_path: str) -> str:
|
|
125
756
|
"""
|
|
126
|
-
|
|
757
|
+
Analyse recipes/cookbooks to extract search patterns for inventory planning.
|
|
127
758
|
|
|
128
759
|
Args:
|
|
129
760
|
recipe_or_cookbook_path: Path to Chef recipe file or cookbook directory.
|
|
@@ -796,9 +1427,23 @@ def _build_playbook_header(recipe_name: str) -> list[str]:
|
|
|
796
1427
|
]
|
|
797
1428
|
|
|
798
1429
|
|
|
799
|
-
def _add_playbook_variables(
|
|
1430
|
+
def _add_playbook_variables(
|
|
1431
|
+
playbook_lines: list[str], raw_content: str, recipe_file: Path
|
|
1432
|
+
) -> None:
|
|
800
1433
|
"""Extract and add variables section to playbook."""
|
|
801
1434
|
variables = _extract_recipe_variables(raw_content)
|
|
1435
|
+
|
|
1436
|
+
# Try to parse attributes file
|
|
1437
|
+
attributes_path = recipe_file.parent.parent / "attributes" / "default.rb"
|
|
1438
|
+
if attributes_path.exists():
|
|
1439
|
+
attributes_content = parse_attributes(str(attributes_path))
|
|
1440
|
+
if not attributes_content.startswith(
|
|
1441
|
+
"Error:"
|
|
1442
|
+
) and not attributes_content.startswith("Warning:"):
|
|
1443
|
+
# Parse the resolved attributes
|
|
1444
|
+
attr_vars = _extract_attribute_variables(attributes_content)
|
|
1445
|
+
variables.update(attr_vars)
|
|
1446
|
+
|
|
802
1447
|
for var_name, var_value in variables.items():
|
|
803
1448
|
playbook_lines.append(f" {var_name}: {var_value}")
|
|
804
1449
|
|
|
@@ -812,7 +1457,7 @@ def _convert_and_collect_resources(
|
|
|
812
1457
|
parsed_content: str, raw_content: str
|
|
813
1458
|
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
|
|
814
1459
|
"""Convert Chef resources to Ansible tasks and collect handlers."""
|
|
815
|
-
resources = _extract_resources_from_parsed_content(parsed_content)
|
|
1460
|
+
resources = _extract_resources_from_parsed_content(parsed_content, raw_content)
|
|
816
1461
|
tasks = []
|
|
817
1462
|
handlers = []
|
|
818
1463
|
|
|
@@ -829,10 +1474,8 @@ def _format_item_lines(item_yaml: str) -> list[str]:
|
|
|
829
1474
|
"""Format a single task/handler's YAML lines with proper indentation."""
|
|
830
1475
|
formatted = []
|
|
831
1476
|
for i, line in enumerate(item_yaml.split("\n")):
|
|
832
|
-
if i == 0: # First line gets 4-space indent
|
|
1477
|
+
if i == 0 or line.strip(): # First line gets 4-space indent
|
|
833
1478
|
formatted.append(f" {line}")
|
|
834
|
-
elif line.strip(): # Non-empty property lines get 6-space indent
|
|
835
|
-
formatted.append(f" {line}")
|
|
836
1479
|
else: # Empty lines preserved as-is
|
|
837
1480
|
formatted.append(line)
|
|
838
1481
|
return formatted
|
|
@@ -855,11 +1498,11 @@ def _add_formatted_items(
|
|
|
855
1498
|
|
|
856
1499
|
|
|
857
1500
|
def _generate_playbook_structure(
|
|
858
|
-
parsed_content: str, raw_content: str,
|
|
1501
|
+
parsed_content: str, raw_content: str, recipe_file: Path
|
|
859
1502
|
) -> str:
|
|
860
1503
|
"""Generate complete playbook structure from parsed recipe content."""
|
|
861
|
-
playbook_lines = _build_playbook_header(
|
|
862
|
-
_add_playbook_variables(playbook_lines, raw_content)
|
|
1504
|
+
playbook_lines = _build_playbook_header(recipe_file.name)
|
|
1505
|
+
_add_playbook_variables(playbook_lines, raw_content, recipe_file)
|
|
863
1506
|
|
|
864
1507
|
# Convert resources to tasks and handlers
|
|
865
1508
|
tasks, handlers = _convert_and_collect_resources(parsed_content, raw_content)
|
|
@@ -944,6 +1587,301 @@ def _extract_mode_variables(raw_content: str) -> dict[str, str]:
|
|
|
944
1587
|
return {}
|
|
945
1588
|
|
|
946
1589
|
|
|
1590
|
+
def _convert_ruby_value_to_yaml(ruby_value: str) -> str: # noqa: C901
|
|
1591
|
+
"""Convert Ruby value syntax to YAML-compatible format."""
|
|
1592
|
+
ruby_value = ruby_value.strip()
|
|
1593
|
+
|
|
1594
|
+
# Handle Ruby hash syntax { key => value, ... }
|
|
1595
|
+
if ruby_value.startswith("{") and ruby_value.endswith("}"):
|
|
1596
|
+
return _convert_ruby_hash_to_yaml(ruby_value)
|
|
1597
|
+
|
|
1598
|
+
# Handle arrays [item1, item2, ...]
|
|
1599
|
+
if ruby_value.startswith("[") and ruby_value.endswith("]"):
|
|
1600
|
+
return _convert_ruby_array_to_yaml(ruby_value)
|
|
1601
|
+
|
|
1602
|
+
# Handle primitive values (strings, numbers, booleans, nil)
|
|
1603
|
+
return _convert_primitive_value(ruby_value)
|
|
1604
|
+
|
|
1605
|
+
|
|
1606
|
+
def _is_attribute_separator(line: str) -> bool:
|
|
1607
|
+
"""Check if a line indicates the start of a new attribute or metadata."""
|
|
1608
|
+
stripped = line.strip()
|
|
1609
|
+
return (
|
|
1610
|
+
stripped.startswith(ATTRIBUTE_PREFIX)
|
|
1611
|
+
or stripped.startswith("Precedence: ")
|
|
1612
|
+
or stripped.startswith("=")
|
|
1613
|
+
or stripped.startswith("Total attributes:")
|
|
1614
|
+
or stripped.startswith("⚠️")
|
|
1615
|
+
)
|
|
1616
|
+
|
|
1617
|
+
|
|
1618
|
+
def _collect_value_lines(lines: list[str], start_index: int) -> tuple[list[str], int]:
|
|
1619
|
+
"""Collect all lines belonging to a value until the next attribute separator."""
|
|
1620
|
+
value_lines = []
|
|
1621
|
+
i = start_index
|
|
1622
|
+
|
|
1623
|
+
while i < len(lines):
|
|
1624
|
+
next_line = lines[i]
|
|
1625
|
+
if _is_attribute_separator(next_line):
|
|
1626
|
+
break
|
|
1627
|
+
value_lines.append(lines[i])
|
|
1628
|
+
i += 1
|
|
1629
|
+
|
|
1630
|
+
return value_lines, i
|
|
1631
|
+
|
|
1632
|
+
|
|
1633
|
+
def _extract_attribute_variables(attributes_content: str) -> dict[str, str]:
|
|
1634
|
+
"""Extract Ansible variables from parsed Chef attributes."""
|
|
1635
|
+
variables = {}
|
|
1636
|
+
lines = attributes_content.split("\n")
|
|
1637
|
+
i = 0
|
|
1638
|
+
|
|
1639
|
+
while i < len(lines):
|
|
1640
|
+
line = lines[i].strip()
|
|
1641
|
+
if line.startswith(ATTRIBUTE_PREFIX):
|
|
1642
|
+
current_attr = line.split(ATTRIBUTE_PREFIX)[1]
|
|
1643
|
+
i += 1 # Move to next line to look for Value:
|
|
1644
|
+
|
|
1645
|
+
# Find and collect value lines
|
|
1646
|
+
value_lines, i = _find_and_collect_value_lines(lines, i)
|
|
1647
|
+
|
|
1648
|
+
# Process the collected value lines
|
|
1649
|
+
if current_attr and value_lines:
|
|
1650
|
+
full_value = "\n".join(value_lines).strip()
|
|
1651
|
+
ansible_var = _convert_chef_attr_path_to_ansible_var(current_attr)
|
|
1652
|
+
yaml_value = _convert_ruby_value_to_yaml(full_value)
|
|
1653
|
+
variables[ansible_var] = yaml_value
|
|
1654
|
+
else:
|
|
1655
|
+
i += 1
|
|
1656
|
+
|
|
1657
|
+
return variables
|
|
1658
|
+
|
|
1659
|
+
|
|
1660
|
+
def _find_and_collect_value_lines(
|
|
1661
|
+
lines: list[str], start_index: int
|
|
1662
|
+
) -> tuple[list[str], int]:
|
|
1663
|
+
"""
|
|
1664
|
+
Find the Value: line and collect all value lines until next attribute separator.
|
|
1665
|
+
|
|
1666
|
+
Find the Value: line and collect all value lines until next attribute separator.
|
|
1667
|
+
"""
|
|
1668
|
+
value_lines: list[str] = []
|
|
1669
|
+
i = start_index
|
|
1670
|
+
|
|
1671
|
+
# Look for the Value: line
|
|
1672
|
+
while i < len(lines):
|
|
1673
|
+
next_line = lines[i]
|
|
1674
|
+
if next_line.strip().startswith(VALUE_PREFIX):
|
|
1675
|
+
# Found value start, collect all value lines
|
|
1676
|
+
value_start = next_line.split(VALUE_PREFIX, 1)[1]
|
|
1677
|
+
collected_lines, i = _collect_value_lines(lines, i + 1)
|
|
1678
|
+
value_lines = [value_start] + collected_lines
|
|
1679
|
+
break
|
|
1680
|
+
elif _is_attribute_separator(next_line):
|
|
1681
|
+
# Hit another attribute before finding value
|
|
1682
|
+
break
|
|
1683
|
+
i += 1
|
|
1684
|
+
else:
|
|
1685
|
+
# No more lines
|
|
1686
|
+
i += 1
|
|
1687
|
+
|
|
1688
|
+
return value_lines, i
|
|
1689
|
+
|
|
1690
|
+
|
|
1691
|
+
def _convert_ruby_hash_to_yaml(ruby_hash: str) -> str:
|
|
1692
|
+
"""Convert Ruby hash syntax { key => value, ... } to YAML flow style."""
|
|
1693
|
+
try:
|
|
1694
|
+
# Remove outer braces
|
|
1695
|
+
hash_content = ruby_hash[1:-1].strip()
|
|
1696
|
+
if not hash_content:
|
|
1697
|
+
return "{}"
|
|
1698
|
+
|
|
1699
|
+
# Split by commas, respecting nested structures
|
|
1700
|
+
yaml_pairs = _split_by_commas_with_nesting(hash_content)
|
|
1701
|
+
|
|
1702
|
+
# Convert each pair from Ruby syntax to YAML
|
|
1703
|
+
flow_pairs = []
|
|
1704
|
+
for pair in yaml_pairs:
|
|
1705
|
+
if "=>" in pair:
|
|
1706
|
+
key_part, value_part = pair.split("=>", 1)
|
|
1707
|
+
key = key_part.strip()
|
|
1708
|
+
value = value_part.strip()
|
|
1709
|
+
|
|
1710
|
+
# Remove quotes from key if present
|
|
1711
|
+
key = key.strip("'\"")
|
|
1712
|
+
|
|
1713
|
+
# Convert value recursively if it's complex
|
|
1714
|
+
value = _convert_ruby_value_to_yaml(value)
|
|
1715
|
+
flow_pairs.append(f"{key}: {value}")
|
|
1716
|
+
else:
|
|
1717
|
+
# Malformed pair, keep as comment
|
|
1718
|
+
flow_pairs.append(f"# TODO: Fix malformed pair: {pair}")
|
|
1719
|
+
|
|
1720
|
+
return "{" + ", ".join(flow_pairs) + "}" if flow_pairs else "{}"
|
|
1721
|
+
|
|
1722
|
+
except Exception:
|
|
1723
|
+
# If conversion fails, return as-is with a comment
|
|
1724
|
+
return f"# TODO: Convert Ruby hash: {ruby_hash}"
|
|
1725
|
+
|
|
1726
|
+
|
|
1727
|
+
def _convert_ruby_array_to_yaml(ruby_array: str) -> str:
|
|
1728
|
+
"""Convert Ruby array syntax [item1, item2, ...] to YAML flow style."""
|
|
1729
|
+
try:
|
|
1730
|
+
# Remove outer brackets
|
|
1731
|
+
array_content = ruby_array[1:-1].strip()
|
|
1732
|
+
if not array_content:
|
|
1733
|
+
return "[]"
|
|
1734
|
+
|
|
1735
|
+
# Split by commas, respecting nested structures
|
|
1736
|
+
items = _split_by_commas_with_nesting(array_content)
|
|
1737
|
+
|
|
1738
|
+
# Convert items recursively if they're complex
|
|
1739
|
+
flow_items = [_convert_ruby_value_to_yaml(item) for item in items]
|
|
1740
|
+
|
|
1741
|
+
return "[" + ", ".join(flow_items) + "]" if flow_items else "[]"
|
|
1742
|
+
|
|
1743
|
+
except Exception:
|
|
1744
|
+
return ruby_array # Return as-is if parsing fails
|
|
1745
|
+
|
|
1746
|
+
|
|
1747
|
+
def _split_by_commas_with_nesting(content: str) -> list[str]:
|
|
1748
|
+
"""Split content by commas while respecting nested braces and brackets."""
|
|
1749
|
+
parts = []
|
|
1750
|
+
current_part = ""
|
|
1751
|
+
in_quotes = False
|
|
1752
|
+
quote_char = None
|
|
1753
|
+
brace_depth = 0
|
|
1754
|
+
bracket_depth = 0
|
|
1755
|
+
|
|
1756
|
+
for char in content:
|
|
1757
|
+
if _is_quote_char(char) and brace_depth == 0 and bracket_depth == 0:
|
|
1758
|
+
in_quotes, quote_char = _handle_quote_transition(
|
|
1759
|
+
char, in_quotes, quote_char
|
|
1760
|
+
)
|
|
1761
|
+
elif not in_quotes:
|
|
1762
|
+
brace_depth, bracket_depth = _update_nesting_depths(
|
|
1763
|
+
char, brace_depth, bracket_depth
|
|
1764
|
+
)
|
|
1765
|
+
if _should_split_at_comma(char, in_quotes, brace_depth, bracket_depth):
|
|
1766
|
+
parts.append(current_part.strip())
|
|
1767
|
+
current_part = ""
|
|
1768
|
+
continue
|
|
1769
|
+
|
|
1770
|
+
current_part += char
|
|
1771
|
+
|
|
1772
|
+
# Add the last part
|
|
1773
|
+
if current_part.strip():
|
|
1774
|
+
parts.append(current_part.strip())
|
|
1775
|
+
|
|
1776
|
+
return parts
|
|
1777
|
+
|
|
1778
|
+
|
|
1779
|
+
def _is_quote_char(char: str) -> bool:
|
|
1780
|
+
"""Check if character is a quote."""
|
|
1781
|
+
return char in ['"', "'"]
|
|
1782
|
+
|
|
1783
|
+
|
|
1784
|
+
def _handle_quote_transition(
|
|
1785
|
+
char: str, in_quotes: bool, quote_char: str | None
|
|
1786
|
+
) -> tuple[bool, str | None]:
|
|
1787
|
+
"""Handle quote character transitions."""
|
|
1788
|
+
if not in_quotes:
|
|
1789
|
+
return True, char
|
|
1790
|
+
elif char == quote_char:
|
|
1791
|
+
return False, None
|
|
1792
|
+
return in_quotes, quote_char
|
|
1793
|
+
|
|
1794
|
+
|
|
1795
|
+
def _update_nesting_depths(
|
|
1796
|
+
char: str, brace_depth: int, bracket_depth: int
|
|
1797
|
+
) -> tuple[int, int]:
|
|
1798
|
+
"""Update brace and bracket nesting depths."""
|
|
1799
|
+
if char == "{":
|
|
1800
|
+
brace_depth += 1
|
|
1801
|
+
elif char == "}":
|
|
1802
|
+
brace_depth -= 1
|
|
1803
|
+
elif char == "[":
|
|
1804
|
+
bracket_depth += 1
|
|
1805
|
+
elif char == "]":
|
|
1806
|
+
bracket_depth -= 1
|
|
1807
|
+
return brace_depth, bracket_depth
|
|
1808
|
+
|
|
1809
|
+
|
|
1810
|
+
def _should_split_at_comma(
|
|
1811
|
+
char: str, in_quotes: bool, brace_depth: int, bracket_depth: int
|
|
1812
|
+
) -> bool:
|
|
1813
|
+
"""Determine if we should split at this comma."""
|
|
1814
|
+
return char == "," and not in_quotes and brace_depth == 0 and bracket_depth == 0
|
|
1815
|
+
|
|
1816
|
+
|
|
1817
|
+
def _convert_primitive_value(ruby_value: str) -> str:
|
|
1818
|
+
"""Convert primitive Ruby values (strings, numbers, booleans, nil)."""
|
|
1819
|
+
# Handle quoted strings
|
|
1820
|
+
if (
|
|
1821
|
+
ruby_value.startswith('"')
|
|
1822
|
+
and ruby_value.endswith('"')
|
|
1823
|
+
or ruby_value.startswith("'")
|
|
1824
|
+
and ruby_value.endswith("'")
|
|
1825
|
+
):
|
|
1826
|
+
return ruby_value # Already properly quoted
|
|
1827
|
+
|
|
1828
|
+
# Handle numbers
|
|
1829
|
+
try:
|
|
1830
|
+
int(ruby_value)
|
|
1831
|
+
return ruby_value
|
|
1832
|
+
except ValueError:
|
|
1833
|
+
pass
|
|
1834
|
+
|
|
1835
|
+
try:
|
|
1836
|
+
float(ruby_value)
|
|
1837
|
+
return ruby_value
|
|
1838
|
+
except ValueError:
|
|
1839
|
+
pass
|
|
1840
|
+
|
|
1841
|
+
# Handle booleans
|
|
1842
|
+
if ruby_value.lower() in ["true", "false"]:
|
|
1843
|
+
return ruby_value.lower()
|
|
1844
|
+
|
|
1845
|
+
# Handle nil
|
|
1846
|
+
if ruby_value.lower() == "nil":
|
|
1847
|
+
return "null"
|
|
1848
|
+
|
|
1849
|
+
# For strings that aren't quoted, quote them
|
|
1850
|
+
return f'"{ruby_value}"'
|
|
1851
|
+
|
|
1852
|
+
|
|
1853
|
+
def _convert_chef_attr_path_to_ansible_var(attr_path: str) -> str:
|
|
1854
|
+
"""Convert Chef attribute path to Ansible variable name."""
|
|
1855
|
+
# Replace dots with underscores, handle special cases
|
|
1856
|
+
parts = attr_path.split(".")
|
|
1857
|
+
if len(parts) >= 2:
|
|
1858
|
+
# For cookbook-specific attrs like '301.version' -> 'threeohone_version'
|
|
1859
|
+
cookbook_name = parts[0]
|
|
1860
|
+
attr_name = "_".join(parts[1:])
|
|
1861
|
+
|
|
1862
|
+
# Convert numbers to words for readability
|
|
1863
|
+
number_words = {
|
|
1864
|
+
"301": "threeohone",
|
|
1865
|
+
"1": "one",
|
|
1866
|
+
"2": "two",
|
|
1867
|
+
"3": "three",
|
|
1868
|
+
"4": "four",
|
|
1869
|
+
"5": "five",
|
|
1870
|
+
"6": "six",
|
|
1871
|
+
"7": "seven",
|
|
1872
|
+
"8": "eight",
|
|
1873
|
+
"9": "nine",
|
|
1874
|
+
"0": "zero",
|
|
1875
|
+
}
|
|
1876
|
+
|
|
1877
|
+
# Replace digits with words
|
|
1878
|
+
readable_cookbook = "".join(number_words.get(c, c) for c in cookbook_name)
|
|
1879
|
+
return f"{readable_cookbook}_{attr_name}"
|
|
1880
|
+
|
|
1881
|
+
# Fallback: just replace dots with underscores
|
|
1882
|
+
return attr_path.replace(".", "_")
|
|
1883
|
+
|
|
1884
|
+
|
|
947
1885
|
def _extract_recipe_variables(raw_content: str) -> dict[str, str]:
|
|
948
1886
|
"""Extract variables from Chef recipe content."""
|
|
949
1887
|
variables = {}
|
|
@@ -961,8 +1899,24 @@ def _extract_recipe_variables(raw_content: str) -> dict[str, str]:
|
|
|
961
1899
|
|
|
962
1900
|
|
|
963
1901
|
def _parse_resource_block(block: str) -> dict[str, str] | None:
|
|
964
|
-
"""Parse a single resource block into a dictionary."""
|
|
965
|
-
|
|
1902
|
+
"""Parse a single resource (or include_recipe) block into a dictionary."""
|
|
1903
|
+
trimmed = block.strip()
|
|
1904
|
+
if not trimmed:
|
|
1905
|
+
return None
|
|
1906
|
+
|
|
1907
|
+
# Handle include_recipe blocks emitted by parse_recipe
|
|
1908
|
+
if trimmed.startswith("Include Recipe"):
|
|
1909
|
+
recipe_match = re.search(r"Recipe:\s*([^\n]+)", block)
|
|
1910
|
+
if not recipe_match:
|
|
1911
|
+
return None
|
|
1912
|
+
return {
|
|
1913
|
+
"type": "include_recipe",
|
|
1914
|
+
"name": recipe_match.group(1).strip(),
|
|
1915
|
+
"action": "include",
|
|
1916
|
+
"properties": "",
|
|
1917
|
+
}
|
|
1918
|
+
|
|
1919
|
+
if not trimmed.startswith("Resource"):
|
|
966
1920
|
return None
|
|
967
1921
|
|
|
968
1922
|
resource: dict[str, str] = {}
|
|
@@ -992,17 +1946,64 @@ def _parse_resource_block(block: str) -> dict[str, str] | None:
|
|
|
992
1946
|
return resource
|
|
993
1947
|
|
|
994
1948
|
|
|
995
|
-
def _extract_resources_from_parsed_content(
|
|
1949
|
+
def _extract_resources_from_parsed_content(
|
|
1950
|
+
parsed_content: str, raw_content: str
|
|
1951
|
+
) -> list[dict[str, str]]:
|
|
996
1952
|
"""Extract resource information from parsed recipe content."""
|
|
997
|
-
resource_blocks = re.split(
|
|
1953
|
+
resource_blocks = re.split(
|
|
1954
|
+
r"\n(?=(Resource \d+:|Include Recipe \d+:))", parsed_content
|
|
1955
|
+
)
|
|
998
1956
|
resources = []
|
|
999
1957
|
for block in resource_blocks:
|
|
1000
1958
|
resource = _parse_resource_block(block)
|
|
1001
1959
|
if resource:
|
|
1960
|
+
# Find position in raw_content to preserve order
|
|
1961
|
+
position = _find_resource_position_in_raw(resource, raw_content)
|
|
1962
|
+
resource["_position"] = position # type: ignore
|
|
1002
1963
|
resources.append(resource)
|
|
1964
|
+
|
|
1965
|
+
# Sort by position to preserve original order
|
|
1966
|
+
resources.sort(key=lambda r: r.get("_position", 999999))
|
|
1967
|
+
|
|
1968
|
+
# Remove position key
|
|
1969
|
+
for r in resources:
|
|
1970
|
+
r.pop("_position", None)
|
|
1971
|
+
|
|
1003
1972
|
return resources
|
|
1004
1973
|
|
|
1005
1974
|
|
|
1975
|
+
def _find_resource_position_in_raw(resource: dict[str, str], raw_content: str) -> int:
|
|
1976
|
+
"""Find the position of a resource in raw content."""
|
|
1977
|
+
if resource["type"] == "include_recipe":
|
|
1978
|
+
pattern = rf'include_recipe\s+[\'"]({re.escape(resource["name"])})[\'"]'
|
|
1979
|
+
elif resource["type"] == "nodejs_npm":
|
|
1980
|
+
pattern = rf'nodejs_npm\s+[\'"]({re.escape(resource["name"])})[\'"]'
|
|
1981
|
+
else:
|
|
1982
|
+
# Generic pattern
|
|
1983
|
+
pattern = rf'{resource["type"]}\s+[\'"]({re.escape(resource["name"])})[\'"]'
|
|
1984
|
+
|
|
1985
|
+
match = re.search(pattern, raw_content)
|
|
1986
|
+
return match.start() if match else 999999
|
|
1987
|
+
|
|
1988
|
+
|
|
1989
|
+
def _extract_nodejs_npm_version(raw_content: str, package_name: str) -> str | None:
|
|
1990
|
+
"""Extract version for nodejs_npm resources from raw recipe content."""
|
|
1991
|
+
pattern = (
|
|
1992
|
+
rf"nodejs_npm\s+[\"']{re.escape(package_name)}[\"']\s+do"
|
|
1993
|
+
rf"(?P<body>.{{0,400}}?)^end"
|
|
1994
|
+
)
|
|
1995
|
+
match = re.search(pattern, raw_content, re.DOTALL | re.MULTILINE)
|
|
1996
|
+
if not match:
|
|
1997
|
+
return None
|
|
1998
|
+
|
|
1999
|
+
body = match.group("body")
|
|
2000
|
+
version_match = re.search(r"version\s+([^\n#]+)", body)
|
|
2001
|
+
if not version_match:
|
|
2002
|
+
return None
|
|
2003
|
+
|
|
2004
|
+
return version_match.group(1).strip()
|
|
2005
|
+
|
|
2006
|
+
|
|
1006
2007
|
# Notification handling
|
|
1007
2008
|
|
|
1008
2009
|
|
|
@@ -1101,6 +2102,12 @@ def _convert_resource_to_task_dict(
|
|
|
1101
2102
|
resource: dict[str, str], raw_content: str
|
|
1102
2103
|
) -> dict[str, Any]:
|
|
1103
2104
|
"""Convert a Chef resource to an Ansible task dictionary with handlers."""
|
|
2105
|
+
# Enrich nodejs_npm resources with version info when it could not be parsed
|
|
2106
|
+
if resource["type"] == "nodejs_npm" and not resource.get("properties"):
|
|
2107
|
+
extracted_version = _extract_nodejs_npm_version(raw_content, resource["name"])
|
|
2108
|
+
if extracted_version is not None:
|
|
2109
|
+
resource["properties"] = str({"version": extracted_version})
|
|
2110
|
+
|
|
1104
2111
|
# Convert basic resource to task
|
|
1105
2112
|
task = _convert_chef_resource_to_ansible(
|
|
1106
2113
|
resource["type"], resource["name"], resource["action"], resource["properties"]
|