mcp-souschef 2.5.3__py3-none-any.whl → 2.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-2.8.0.dist-info}/METADATA +56 -21
- mcp_souschef-2.8.0.dist-info/RECORD +42 -0
- souschef/__init__.py +10 -2
- souschef/assessment.py +14 -14
- souschef/ci/github_actions.py +5 -5
- souschef/ci/gitlab_ci.py +4 -4
- souschef/ci/jenkins_pipeline.py +4 -4
- souschef/cli.py +12 -12
- souschef/converters/__init__.py +2 -2
- souschef/converters/cookbook_specific.py +125 -0
- souschef/converters/cookbook_specific.py.backup +109 -0
- souschef/converters/playbook.py +853 -15
- souschef/converters/resource.py +103 -1
- souschef/core/constants.py +13 -0
- souschef/core/path_utils.py +12 -9
- souschef/deployment.py +24 -24
- souschef/parsers/attributes.py +397 -32
- souschef/parsers/recipe.py +48 -10
- souschef/server.py +35 -37
- souschef/ui/app.py +1413 -252
- souschef/ui/health_check.py +36 -0
- souschef/ui/pages/ai_settings.py +497 -0
- souschef/ui/pages/cookbook_analysis.py +1010 -75
- mcp_souschef-2.5.3.dist-info/RECORD +0 -38
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-2.8.0.dist-info}/WHEEL +0 -0
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-2.8.0.dist-info}/entry_points.txt +0 -0
- {mcp_souschef-2.5.3.dist-info → mcp_souschef-2.8.0.dist-info}/licenses/LICENSE +0 -0
souschef/converters/playbook.py
CHANGED
|
@@ -8,6 +8,9 @@ inventory scripts.
|
|
|
8
8
|
|
|
9
9
|
import json
|
|
10
10
|
import re
|
|
11
|
+
import shutil
|
|
12
|
+
import subprocess
|
|
13
|
+
import tempfile
|
|
11
14
|
from datetime import datetime
|
|
12
15
|
from pathlib import Path
|
|
13
16
|
from typing import Any
|
|
@@ -18,6 +21,7 @@ from souschef.converters.resource import (
|
|
|
18
21
|
)
|
|
19
22
|
from souschef.core.constants import (
|
|
20
23
|
ANSIBLE_SERVICE_MODULE,
|
|
24
|
+
ATTRIBUTE_PREFIX,
|
|
21
25
|
ERROR_PREFIX,
|
|
22
26
|
JINJA2_VAR_REPLACEMENT,
|
|
23
27
|
NODE_PREFIX,
|
|
@@ -25,10 +29,25 @@ from souschef.core.constants import (
|
|
|
25
29
|
REGEX_RESOURCE_BRACKET,
|
|
26
30
|
REGEX_RUBY_INTERPOLATION,
|
|
27
31
|
REGEX_WHITESPACE_QUOTE,
|
|
32
|
+
VALUE_PREFIX,
|
|
28
33
|
)
|
|
29
34
|
from souschef.core.path_utils import _normalize_path, _safe_join
|
|
35
|
+
from souschef.parsers.attributes import parse_attributes
|
|
30
36
|
from souschef.parsers.recipe import parse_recipe
|
|
31
37
|
|
|
38
|
+
# Optional AI provider imports
|
|
39
|
+
try:
|
|
40
|
+
import requests # type: ignore[import-untyped]
|
|
41
|
+
except ImportError:
|
|
42
|
+
requests = None
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
from ibm_watsonx_ai import ( # type: ignore[import-not-found]
|
|
46
|
+
APIClient,
|
|
47
|
+
)
|
|
48
|
+
except ImportError:
|
|
49
|
+
APIClient = None
|
|
50
|
+
|
|
32
51
|
# Maximum length for guard condition patterns in regex matching
|
|
33
52
|
MAX_GUARD_LENGTH = 500
|
|
34
53
|
|
|
@@ -61,7 +80,7 @@ def generate_playbook_from_recipe(recipe_path: str) -> str:
|
|
|
61
80
|
|
|
62
81
|
# Generate playbook structure
|
|
63
82
|
playbook: str = _generate_playbook_structure(
|
|
64
|
-
recipe_content, raw_content, recipe_file
|
|
83
|
+
recipe_content, raw_content, recipe_file
|
|
65
84
|
)
|
|
66
85
|
|
|
67
86
|
return playbook
|
|
@@ -70,6 +89,449 @@ def generate_playbook_from_recipe(recipe_path: str) -> str:
|
|
|
70
89
|
return f"Error generating playbook: {e}"
|
|
71
90
|
|
|
72
91
|
|
|
92
|
+
def generate_playbook_from_recipe_with_ai(
|
|
93
|
+
recipe_path: str,
|
|
94
|
+
ai_provider: str = "anthropic",
|
|
95
|
+
api_key: str = "",
|
|
96
|
+
model: str = "claude-3-5-sonnet-20241022",
|
|
97
|
+
temperature: float = 0.7,
|
|
98
|
+
max_tokens: int = 4000,
|
|
99
|
+
project_id: str = "",
|
|
100
|
+
base_url: str = "",
|
|
101
|
+
) -> str:
|
|
102
|
+
"""
|
|
103
|
+
Generate an AI-enhanced Ansible playbook from a Chef recipe.
|
|
104
|
+
|
|
105
|
+
Uses AI to intelligently convert Chef recipes to Ansible playbooks,
|
|
106
|
+
considering context, best practices, and optimization opportunities.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
recipe_path: Path to the Chef recipe (.rb) file.
|
|
110
|
+
ai_provider: AI provider to use ('anthropic', 'openai', 'watson',
|
|
111
|
+
'lightspeed'). Note: 'github_copilot' is listed but not supported as
|
|
112
|
+
GitHub Copilot does not have a public REST API.
|
|
113
|
+
api_key: API key for the AI provider.
|
|
114
|
+
model: AI model to use.
|
|
115
|
+
temperature: Creativity/randomness parameter (0.0-2.0).
|
|
116
|
+
max_tokens: Maximum tokens to generate.
|
|
117
|
+
project_id: Project ID for IBM Watsonx (required for watson provider).
|
|
118
|
+
base_url: Custom base URL for the AI provider.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
AI-generated Ansible playbook in YAML format.
|
|
122
|
+
|
|
123
|
+
"""
|
|
124
|
+
try:
|
|
125
|
+
# Parse the recipe file
|
|
126
|
+
recipe_file = _normalize_path(recipe_path)
|
|
127
|
+
if not recipe_file.exists():
|
|
128
|
+
return f"{ERROR_PREFIX} Recipe file does not exist: {recipe_path}"
|
|
129
|
+
|
|
130
|
+
raw_content = recipe_file.read_text()
|
|
131
|
+
|
|
132
|
+
# Get basic recipe parsing for context
|
|
133
|
+
parsed_content = parse_recipe(recipe_path)
|
|
134
|
+
if parsed_content.startswith(ERROR_PREFIX):
|
|
135
|
+
return parsed_content
|
|
136
|
+
|
|
137
|
+
# Use AI to generate the playbook
|
|
138
|
+
ai_playbook = _generate_playbook_with_ai(
|
|
139
|
+
raw_content,
|
|
140
|
+
parsed_content,
|
|
141
|
+
recipe_file.name,
|
|
142
|
+
ai_provider,
|
|
143
|
+
api_key,
|
|
144
|
+
model,
|
|
145
|
+
temperature,
|
|
146
|
+
max_tokens,
|
|
147
|
+
project_id,
|
|
148
|
+
base_url,
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
return ai_playbook
|
|
152
|
+
|
|
153
|
+
except Exception as e:
|
|
154
|
+
return f"Error generating AI-enhanced playbook: {e}"
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def _generate_playbook_with_ai(
|
|
158
|
+
raw_content: str,
|
|
159
|
+
parsed_content: str,
|
|
160
|
+
recipe_name: str,
|
|
161
|
+
ai_provider: str,
|
|
162
|
+
api_key: str,
|
|
163
|
+
model: str,
|
|
164
|
+
temperature: float,
|
|
165
|
+
max_tokens: int,
|
|
166
|
+
project_id: str = "",
|
|
167
|
+
base_url: str = "",
|
|
168
|
+
) -> str:
|
|
169
|
+
"""Generate Ansible playbook using AI for intelligent conversion."""
|
|
170
|
+
try:
|
|
171
|
+
# Initialize AI client based on provider
|
|
172
|
+
client = _initialize_ai_client(ai_provider, api_key, project_id, base_url)
|
|
173
|
+
if isinstance(client, str): # Error message returned
|
|
174
|
+
return client
|
|
175
|
+
|
|
176
|
+
# Create the AI prompt
|
|
177
|
+
prompt = _create_ai_conversion_prompt(raw_content, parsed_content, recipe_name)
|
|
178
|
+
|
|
179
|
+
# Call the AI API and get response
|
|
180
|
+
ai_response = _call_ai_api(
|
|
181
|
+
client, ai_provider, prompt, model, temperature, max_tokens
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
# Clean and validate the AI response
|
|
185
|
+
cleaned_playbook = _clean_ai_playbook_response(ai_response)
|
|
186
|
+
|
|
187
|
+
# Validate with ansible-lint and self-correct if possible
|
|
188
|
+
cleaned_playbook = _validate_and_fix_playbook(
|
|
189
|
+
cleaned_playbook, client, ai_provider, model, temperature, max_tokens
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
return cleaned_playbook
|
|
193
|
+
|
|
194
|
+
except ImportError as e:
|
|
195
|
+
return f"{ERROR_PREFIX} AI library not available: {e}"
|
|
196
|
+
except Exception as e:
|
|
197
|
+
return f"{ERROR_PREFIX} AI conversion failed: {e}"
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _initialize_ai_client(
|
|
201
|
+
ai_provider: str, api_key: str, project_id: str = "", base_url: str = ""
|
|
202
|
+
) -> Any:
|
|
203
|
+
"""Initialize AI client based on provider."""
|
|
204
|
+
if ai_provider.lower() == "anthropic":
|
|
205
|
+
import anthropic
|
|
206
|
+
|
|
207
|
+
return anthropic.Anthropic(api_key=api_key)
|
|
208
|
+
elif ai_provider.lower() == "openai":
|
|
209
|
+
import openai
|
|
210
|
+
|
|
211
|
+
return openai.OpenAI(api_key=api_key)
|
|
212
|
+
elif ai_provider.lower() == "watson":
|
|
213
|
+
if APIClient is None:
|
|
214
|
+
return f"{ERROR_PREFIX} ibm_watsonx_ai library not available"
|
|
215
|
+
|
|
216
|
+
return APIClient(
|
|
217
|
+
api_key=api_key,
|
|
218
|
+
project_id=project_id,
|
|
219
|
+
url=base_url or "https://us-south.ml.cloud.ibm.com",
|
|
220
|
+
)
|
|
221
|
+
elif ai_provider.lower() == "lightspeed":
|
|
222
|
+
if requests is None:
|
|
223
|
+
return f"{ERROR_PREFIX} requests library not available"
|
|
224
|
+
|
|
225
|
+
return {
|
|
226
|
+
"api_key": api_key,
|
|
227
|
+
"base_url": base_url or "https://api.redhat.com",
|
|
228
|
+
}
|
|
229
|
+
elif ai_provider.lower() == "github_copilot":
|
|
230
|
+
return (
|
|
231
|
+
f"{ERROR_PREFIX} GitHub Copilot does not have a public REST API. "
|
|
232
|
+
"GitHub Copilot is only available through IDE integrations and "
|
|
233
|
+
"cannot be used "
|
|
234
|
+
"for programmatic API calls. Please use Anthropic Claude, OpenAI, or IBM "
|
|
235
|
+
"Watsonx instead."
|
|
236
|
+
)
|
|
237
|
+
else:
|
|
238
|
+
return f"{ERROR_PREFIX} Unsupported AI provider: {ai_provider}"
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _call_ai_api(
|
|
242
|
+
client: Any,
|
|
243
|
+
ai_provider: str,
|
|
244
|
+
prompt: str,
|
|
245
|
+
model: str,
|
|
246
|
+
temperature: float,
|
|
247
|
+
max_tokens: int,
|
|
248
|
+
) -> str:
|
|
249
|
+
"""Call the appropriate AI API based on provider."""
|
|
250
|
+
if ai_provider.lower() == "anthropic":
|
|
251
|
+
response = client.messages.create(
|
|
252
|
+
model=model,
|
|
253
|
+
max_tokens=max_tokens,
|
|
254
|
+
temperature=temperature,
|
|
255
|
+
messages=[{"role": "user", "content": prompt}],
|
|
256
|
+
)
|
|
257
|
+
return str(response.content[0].text)
|
|
258
|
+
elif ai_provider.lower() == "watson":
|
|
259
|
+
response = client.generate_text(
|
|
260
|
+
model_id=model,
|
|
261
|
+
input=prompt,
|
|
262
|
+
parameters={
|
|
263
|
+
"max_new_tokens": max_tokens,
|
|
264
|
+
"temperature": temperature,
|
|
265
|
+
"min_new_tokens": 1,
|
|
266
|
+
},
|
|
267
|
+
)
|
|
268
|
+
return str(response["results"][0]["generated_text"])
|
|
269
|
+
elif ai_provider.lower() == "lightspeed":
|
|
270
|
+
if requests is None:
|
|
271
|
+
return f"{ERROR_PREFIX} requests library not available"
|
|
272
|
+
|
|
273
|
+
headers = {
|
|
274
|
+
"Authorization": f"Bearer {client['api_key']}",
|
|
275
|
+
"Content-Type": "application/json",
|
|
276
|
+
}
|
|
277
|
+
payload = {
|
|
278
|
+
"model": model,
|
|
279
|
+
"prompt": prompt,
|
|
280
|
+
"max_tokens": max_tokens,
|
|
281
|
+
"temperature": temperature,
|
|
282
|
+
}
|
|
283
|
+
response = requests.post(
|
|
284
|
+
f"{client['base_url']}/v1/completions",
|
|
285
|
+
headers=headers,
|
|
286
|
+
json=payload,
|
|
287
|
+
timeout=60,
|
|
288
|
+
)
|
|
289
|
+
if response.status_code == 200:
|
|
290
|
+
return str(response.json()["choices"][0]["text"])
|
|
291
|
+
else:
|
|
292
|
+
return (
|
|
293
|
+
f"{ERROR_PREFIX} Red Hat Lightspeed API error: "
|
|
294
|
+
f"{response.status_code} - {response.text}"
|
|
295
|
+
)
|
|
296
|
+
elif ai_provider.lower() == "github_copilot":
|
|
297
|
+
if requests is None:
|
|
298
|
+
return f"{ERROR_PREFIX} requests library not available"
|
|
299
|
+
|
|
300
|
+
headers = {
|
|
301
|
+
"Authorization": f"Bearer {client['api_key']}",
|
|
302
|
+
"Content-Type": "application/json",
|
|
303
|
+
"User-Agent": "SousChef/1.0",
|
|
304
|
+
}
|
|
305
|
+
payload = {
|
|
306
|
+
"model": model,
|
|
307
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
308
|
+
"max_tokens": max_tokens,
|
|
309
|
+
"temperature": temperature,
|
|
310
|
+
}
|
|
311
|
+
# GitHub Copilot uses OpenAI-compatible chat completions endpoint
|
|
312
|
+
response = requests.post(
|
|
313
|
+
f"{client['base_url']}/copilot/chat/completions",
|
|
314
|
+
headers=headers,
|
|
315
|
+
json=payload,
|
|
316
|
+
timeout=60,
|
|
317
|
+
)
|
|
318
|
+
if response.status_code == 200:
|
|
319
|
+
return str(response.json()["choices"][0]["message"]["content"])
|
|
320
|
+
else:
|
|
321
|
+
return (
|
|
322
|
+
f"{ERROR_PREFIX} GitHub Copilot API error: "
|
|
323
|
+
f"{response.status_code} - {response.text}"
|
|
324
|
+
)
|
|
325
|
+
else: # OpenAI
|
|
326
|
+
response = client.chat.completions.create(
|
|
327
|
+
model=model,
|
|
328
|
+
max_tokens=max_tokens,
|
|
329
|
+
temperature=temperature,
|
|
330
|
+
messages=[{"role": "user", "content": prompt}],
|
|
331
|
+
)
|
|
332
|
+
return str(response.choices[0].message.content)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def _create_ai_conversion_prompt(
|
|
336
|
+
raw_content: str, parsed_content: str, recipe_name: str
|
|
337
|
+
) -> str:
|
|
338
|
+
"""Create a comprehensive prompt for AI conversion."""
|
|
339
|
+
return f"""You are an expert at converting Chef recipes to Ansible playbooks.
|
|
340
|
+
Your task is to convert the following Chef recipe into a high-quality,
|
|
341
|
+
production-ready Ansible playbook.
|
|
342
|
+
|
|
343
|
+
CHEF RECIPE CONTENT:
|
|
344
|
+
{raw_content}
|
|
345
|
+
|
|
346
|
+
PARSED RECIPE ANALYSIS:
|
|
347
|
+
{parsed_content}
|
|
348
|
+
|
|
349
|
+
RECIPE NAME: {recipe_name}
|
|
350
|
+
|
|
351
|
+
CONVERSION REQUIREMENTS:
|
|
352
|
+
|
|
353
|
+
1. **Understand the Intent**: Analyze what this Chef recipe is trying to
|
|
354
|
+
accomplish. Look at the resources, their properties, and the overall
|
|
355
|
+
workflow.
|
|
356
|
+
|
|
357
|
+
2. **Best Practices**: Generate Ansible code that follows Ansible best
|
|
358
|
+
practices:
|
|
359
|
+
- Use appropriate modules (ansible.builtin.* when possible)
|
|
360
|
+
- Include proper error handling and idempotency
|
|
361
|
+
- Use meaningful variable names
|
|
362
|
+
- Include comments explaining complex logic
|
|
363
|
+
- Handle edge cases and failure scenarios
|
|
364
|
+
|
|
365
|
+
3. **Resource Mapping**: Convert Chef resources to appropriate Ansible
|
|
366
|
+
modules:
|
|
367
|
+
- package → ansible.builtin.package or specific package managers
|
|
368
|
+
- service → ansible.builtin.service
|
|
369
|
+
- file/directory → ansible.builtin.file
|
|
370
|
+
- template → ansible.builtin.template
|
|
371
|
+
- execute → ansible.builtin.command/shell
|
|
372
|
+
- user/group → ansible.builtin.user/group
|
|
373
|
+
- mount → ansible.builtin.mount
|
|
374
|
+
|
|
375
|
+
4. **Variables and Facts**: Convert Chef node attributes to Ansible
|
|
376
|
+
variables/facts appropriately.
|
|
377
|
+
|
|
378
|
+
5. **Conditionals**: Convert Chef guards (only_if/not_if) to Ansible when
|
|
379
|
+
conditions.
|
|
380
|
+
|
|
381
|
+
6. **Notifications**: Convert Chef notifications to Ansible handlers where
|
|
382
|
+
appropriate.
|
|
383
|
+
|
|
384
|
+
7. **Idempotency**: Ensure the playbook is idempotent and can be run
|
|
385
|
+
multiple times safely.
|
|
386
|
+
|
|
387
|
+
8. **Error Handling**: Include proper error handling and rollback
|
|
388
|
+
considerations.
|
|
389
|
+
|
|
390
|
+
9. **Task Ordering**: CRITICAL: Ensure tasks are ordered logically.
|
|
391
|
+
- Install packages BEFORE configuring them.
|
|
392
|
+
- create users/groups BEFORE using them in file permissions.
|
|
393
|
+
- Place configuration files BEFORE starting/restarting services.
|
|
394
|
+
- Ensure directories exist BEFORE creating files in them.
|
|
395
|
+
|
|
396
|
+
10. **Handlers**: Verify that all notified handlers are actually defined
|
|
397
|
+
in the handlers section.
|
|
398
|
+
|
|
399
|
+
OUTPUT FORMAT:
|
|
400
|
+
Return ONLY a valid YAML Ansible playbook. Do not include any explanation,
|
|
401
|
+
markdown formatting, or code blocks. The output should be pure YAML that can
|
|
402
|
+
be directly used as an Ansible playbook.
|
|
403
|
+
|
|
404
|
+
The playbook should include:
|
|
405
|
+
- A proper name
|
|
406
|
+
- Appropriate hosts (default to 'all')
|
|
407
|
+
- Variables section if needed
|
|
408
|
+
- Tasks section with all converted resources
|
|
409
|
+
- Handlers section if notifications are used
|
|
410
|
+
- Any necessary pre_tasks or post_tasks
|
|
411
|
+
|
|
412
|
+
Example structure:
|
|
413
|
+
---
|
|
414
|
+
- name: Convert of {recipe_name}
|
|
415
|
+
hosts: all
|
|
416
|
+
become: true
|
|
417
|
+
vars:
|
|
418
|
+
# Variables here
|
|
419
|
+
tasks:
|
|
420
|
+
# Tasks here
|
|
421
|
+
handlers:
|
|
422
|
+
# Handlers here
|
|
423
|
+
|
|
424
|
+
Focus on creating a functional, well-structured Ansible playbook that achieves
|
|
425
|
+
the same outcome as the Chef recipe."""
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def _clean_ai_playbook_response(ai_response: str) -> str:
|
|
429
|
+
"""Clean and validate the AI-generated playbook response."""
|
|
430
|
+
if not ai_response or not ai_response.strip():
|
|
431
|
+
return f"{ERROR_PREFIX} AI returned empty response"
|
|
432
|
+
|
|
433
|
+
# Remove markdown code blocks if present
|
|
434
|
+
cleaned = re.sub(r"```\w*\n?", "", ai_response)
|
|
435
|
+
cleaned = cleaned.strip()
|
|
436
|
+
|
|
437
|
+
# Basic validation - check if it looks like YAML
|
|
438
|
+
if not cleaned.startswith("---") and not cleaned.startswith("- name:"):
|
|
439
|
+
return f"{ERROR_PREFIX} AI response does not appear to be valid YAML playbook"
|
|
440
|
+
|
|
441
|
+
# Try to parse as YAML to validate structure
|
|
442
|
+
try:
|
|
443
|
+
import yaml
|
|
444
|
+
|
|
445
|
+
yaml.safe_load(cleaned)
|
|
446
|
+
except Exception as e:
|
|
447
|
+
return f"{ERROR_PREFIX} AI generated invalid YAML: {e}"
|
|
448
|
+
|
|
449
|
+
return cleaned
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
def _validate_and_fix_playbook(
|
|
453
|
+
playbook_content: str,
|
|
454
|
+
client: Any,
|
|
455
|
+
ai_provider: str,
|
|
456
|
+
model: str,
|
|
457
|
+
temperature: float,
|
|
458
|
+
max_tokens: int,
|
|
459
|
+
) -> str:
|
|
460
|
+
"""Validate playbook with ansible-lint and attempt AI self-correction."""
|
|
461
|
+
if playbook_content.startswith(ERROR_PREFIX):
|
|
462
|
+
return playbook_content
|
|
463
|
+
|
|
464
|
+
validation_error = _run_ansible_lint(playbook_content)
|
|
465
|
+
if not validation_error:
|
|
466
|
+
return playbook_content
|
|
467
|
+
|
|
468
|
+
# Limit simple loops to 1 retry for now to save tokens/time
|
|
469
|
+
fix_prompt = f"""The Ansible playbook you generated has validation errors.
|
|
470
|
+
Please fix the errors below and return the corrected playbook.
|
|
471
|
+
|
|
472
|
+
ERRORS:
|
|
473
|
+
{validation_error}
|
|
474
|
+
|
|
475
|
+
PLAYBOOK:
|
|
476
|
+
{playbook_content}
|
|
477
|
+
|
|
478
|
+
Ensure the logical ordering of tasks is correct (e.g., packages installed before
|
|
479
|
+
config files, config files before services).
|
|
480
|
+
Return ONLY the corrected YAML playbook.
|
|
481
|
+
Do NOT include any introduction, cleanup text, explanations, or markdown code blocks.
|
|
482
|
+
Just the YAML content.
|
|
483
|
+
"""
|
|
484
|
+
|
|
485
|
+
try:
|
|
486
|
+
fixed_response = _call_ai_api(
|
|
487
|
+
client, ai_provider, fix_prompt, model, temperature, max_tokens
|
|
488
|
+
)
|
|
489
|
+
cleaned_response = _clean_ai_playbook_response(fixed_response)
|
|
490
|
+
|
|
491
|
+
# If the cleaner returns an error string, it means the fixed response
|
|
492
|
+
# was still invalid
|
|
493
|
+
if cleaned_response.startswith(ERROR_PREFIX):
|
|
494
|
+
# Fallback to the original (valid-but-lint-failing) playbook
|
|
495
|
+
# rather than returning an error string
|
|
496
|
+
return playbook_content
|
|
497
|
+
|
|
498
|
+
return cleaned_response
|
|
499
|
+
except Exception:
|
|
500
|
+
# If fix fails, return original with warning (or original error)
|
|
501
|
+
return playbook_content
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
def _run_ansible_lint(playbook_content: str) -> str | None:
|
|
505
|
+
"""Run ansible-lint on the playbook content."""
|
|
506
|
+
# Check if ansible-lint is available
|
|
507
|
+
if shutil.which("ansible-lint") is None:
|
|
508
|
+
return None
|
|
509
|
+
|
|
510
|
+
try:
|
|
511
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".yml", delete=False) as tmp:
|
|
512
|
+
tmp.write(playbook_content)
|
|
513
|
+
tmp_path = tmp.name
|
|
514
|
+
|
|
515
|
+
# Run ansible-lint
|
|
516
|
+
# We ignore return code because we want to capture output even on failure
|
|
517
|
+
result = subprocess.run(
|
|
518
|
+
["ansible-lint", "--nocolor", "-p", tmp_path],
|
|
519
|
+
capture_output=True,
|
|
520
|
+
text=True,
|
|
521
|
+
check=False,
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
if result.returncode != 0:
|
|
525
|
+
return result.stdout + "\n" + result.stderr
|
|
526
|
+
|
|
527
|
+
return None
|
|
528
|
+
except Exception:
|
|
529
|
+
return None
|
|
530
|
+
finally:
|
|
531
|
+
if "tmp_path" in locals() and Path(tmp_path).exists():
|
|
532
|
+
Path(tmp_path).unlink()
|
|
533
|
+
|
|
534
|
+
|
|
73
535
|
def convert_chef_search_to_inventory(search_query: str) -> str:
|
|
74
536
|
"""
|
|
75
537
|
Convert a Chef search query to Ansible inventory patterns and groups.
|
|
@@ -121,9 +583,9 @@ def generate_dynamic_inventory_script(search_queries: str) -> str:
|
|
|
121
583
|
return f"Error generating dynamic inventory script: {e}"
|
|
122
584
|
|
|
123
585
|
|
|
124
|
-
def
|
|
586
|
+
def analyse_chef_search_patterns(recipe_or_cookbook_path: str) -> str:
|
|
125
587
|
"""
|
|
126
|
-
|
|
588
|
+
Analyse recipes/cookbooks to extract search patterns for inventory planning.
|
|
127
589
|
|
|
128
590
|
Args:
|
|
129
591
|
recipe_or_cookbook_path: Path to Chef recipe file or cookbook directory.
|
|
@@ -796,9 +1258,23 @@ def _build_playbook_header(recipe_name: str) -> list[str]:
|
|
|
796
1258
|
]
|
|
797
1259
|
|
|
798
1260
|
|
|
799
|
-
def _add_playbook_variables(
|
|
1261
|
+
def _add_playbook_variables(
|
|
1262
|
+
playbook_lines: list[str], raw_content: str, recipe_file: Path
|
|
1263
|
+
) -> None:
|
|
800
1264
|
"""Extract and add variables section to playbook."""
|
|
801
1265
|
variables = _extract_recipe_variables(raw_content)
|
|
1266
|
+
|
|
1267
|
+
# Try to parse attributes file
|
|
1268
|
+
attributes_path = recipe_file.parent.parent / "attributes" / "default.rb"
|
|
1269
|
+
if attributes_path.exists():
|
|
1270
|
+
attributes_content = parse_attributes(str(attributes_path))
|
|
1271
|
+
if not attributes_content.startswith(
|
|
1272
|
+
"Error:"
|
|
1273
|
+
) and not attributes_content.startswith("Warning:"):
|
|
1274
|
+
# Parse the resolved attributes
|
|
1275
|
+
attr_vars = _extract_attribute_variables(attributes_content)
|
|
1276
|
+
variables.update(attr_vars)
|
|
1277
|
+
|
|
802
1278
|
for var_name, var_value in variables.items():
|
|
803
1279
|
playbook_lines.append(f" {var_name}: {var_value}")
|
|
804
1280
|
|
|
@@ -812,7 +1288,7 @@ def _convert_and_collect_resources(
|
|
|
812
1288
|
parsed_content: str, raw_content: str
|
|
813
1289
|
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
|
|
814
1290
|
"""Convert Chef resources to Ansible tasks and collect handlers."""
|
|
815
|
-
resources = _extract_resources_from_parsed_content(parsed_content)
|
|
1291
|
+
resources = _extract_resources_from_parsed_content(parsed_content, raw_content)
|
|
816
1292
|
tasks = []
|
|
817
1293
|
handlers = []
|
|
818
1294
|
|
|
@@ -829,10 +1305,8 @@ def _format_item_lines(item_yaml: str) -> list[str]:
|
|
|
829
1305
|
"""Format a single task/handler's YAML lines with proper indentation."""
|
|
830
1306
|
formatted = []
|
|
831
1307
|
for i, line in enumerate(item_yaml.split("\n")):
|
|
832
|
-
if i == 0: # First line gets 4-space indent
|
|
1308
|
+
if i == 0 or line.strip(): # First line gets 4-space indent
|
|
833
1309
|
formatted.append(f" {line}")
|
|
834
|
-
elif line.strip(): # Non-empty property lines get 6-space indent
|
|
835
|
-
formatted.append(f" {line}")
|
|
836
1310
|
else: # Empty lines preserved as-is
|
|
837
1311
|
formatted.append(line)
|
|
838
1312
|
return formatted
|
|
@@ -855,11 +1329,11 @@ def _add_formatted_items(
|
|
|
855
1329
|
|
|
856
1330
|
|
|
857
1331
|
def _generate_playbook_structure(
|
|
858
|
-
parsed_content: str, raw_content: str,
|
|
1332
|
+
parsed_content: str, raw_content: str, recipe_file: Path
|
|
859
1333
|
) -> str:
|
|
860
1334
|
"""Generate complete playbook structure from parsed recipe content."""
|
|
861
|
-
playbook_lines = _build_playbook_header(
|
|
862
|
-
_add_playbook_variables(playbook_lines, raw_content)
|
|
1335
|
+
playbook_lines = _build_playbook_header(recipe_file.name)
|
|
1336
|
+
_add_playbook_variables(playbook_lines, raw_content, recipe_file)
|
|
863
1337
|
|
|
864
1338
|
# Convert resources to tasks and handlers
|
|
865
1339
|
tasks, handlers = _convert_and_collect_resources(parsed_content, raw_content)
|
|
@@ -944,6 +1418,301 @@ def _extract_mode_variables(raw_content: str) -> dict[str, str]:
|
|
|
944
1418
|
return {}
|
|
945
1419
|
|
|
946
1420
|
|
|
1421
|
+
def _convert_ruby_value_to_yaml(ruby_value: str) -> str: # noqa: C901
|
|
1422
|
+
"""Convert Ruby value syntax to YAML-compatible format."""
|
|
1423
|
+
ruby_value = ruby_value.strip()
|
|
1424
|
+
|
|
1425
|
+
# Handle Ruby hash syntax { key => value, ... }
|
|
1426
|
+
if ruby_value.startswith("{") and ruby_value.endswith("}"):
|
|
1427
|
+
return _convert_ruby_hash_to_yaml(ruby_value)
|
|
1428
|
+
|
|
1429
|
+
# Handle arrays [item1, item2, ...]
|
|
1430
|
+
if ruby_value.startswith("[") and ruby_value.endswith("]"):
|
|
1431
|
+
return _convert_ruby_array_to_yaml(ruby_value)
|
|
1432
|
+
|
|
1433
|
+
# Handle primitive values (strings, numbers, booleans, nil)
|
|
1434
|
+
return _convert_primitive_value(ruby_value)
|
|
1435
|
+
|
|
1436
|
+
|
|
1437
|
+
def _is_attribute_separator(line: str) -> bool:
|
|
1438
|
+
"""Check if a line indicates the start of a new attribute or metadata."""
|
|
1439
|
+
stripped = line.strip()
|
|
1440
|
+
return (
|
|
1441
|
+
stripped.startswith(ATTRIBUTE_PREFIX)
|
|
1442
|
+
or stripped.startswith("Precedence: ")
|
|
1443
|
+
or stripped.startswith("=")
|
|
1444
|
+
or stripped.startswith("Total attributes:")
|
|
1445
|
+
or stripped.startswith("⚠️")
|
|
1446
|
+
)
|
|
1447
|
+
|
|
1448
|
+
|
|
1449
|
+
def _collect_value_lines(lines: list[str], start_index: int) -> tuple[list[str], int]:
|
|
1450
|
+
"""Collect all lines belonging to a value until the next attribute separator."""
|
|
1451
|
+
value_lines = []
|
|
1452
|
+
i = start_index
|
|
1453
|
+
|
|
1454
|
+
while i < len(lines):
|
|
1455
|
+
next_line = lines[i]
|
|
1456
|
+
if _is_attribute_separator(next_line):
|
|
1457
|
+
break
|
|
1458
|
+
value_lines.append(lines[i])
|
|
1459
|
+
i += 1
|
|
1460
|
+
|
|
1461
|
+
return value_lines, i
|
|
1462
|
+
|
|
1463
|
+
|
|
1464
|
+
def _extract_attribute_variables(attributes_content: str) -> dict[str, str]:
|
|
1465
|
+
"""Extract Ansible variables from parsed Chef attributes."""
|
|
1466
|
+
variables = {}
|
|
1467
|
+
lines = attributes_content.split("\n")
|
|
1468
|
+
i = 0
|
|
1469
|
+
|
|
1470
|
+
while i < len(lines):
|
|
1471
|
+
line = lines[i].strip()
|
|
1472
|
+
if line.startswith(ATTRIBUTE_PREFIX):
|
|
1473
|
+
current_attr = line.split(ATTRIBUTE_PREFIX)[1]
|
|
1474
|
+
i += 1 # Move to next line to look for Value:
|
|
1475
|
+
|
|
1476
|
+
# Find and collect value lines
|
|
1477
|
+
value_lines, i = _find_and_collect_value_lines(lines, i)
|
|
1478
|
+
|
|
1479
|
+
# Process the collected value lines
|
|
1480
|
+
if current_attr and value_lines:
|
|
1481
|
+
full_value = "\n".join(value_lines).strip()
|
|
1482
|
+
ansible_var = _convert_chef_attr_path_to_ansible_var(current_attr)
|
|
1483
|
+
yaml_value = _convert_ruby_value_to_yaml(full_value)
|
|
1484
|
+
variables[ansible_var] = yaml_value
|
|
1485
|
+
else:
|
|
1486
|
+
i += 1
|
|
1487
|
+
|
|
1488
|
+
return variables
|
|
1489
|
+
|
|
1490
|
+
|
|
1491
|
+
def _find_and_collect_value_lines(
|
|
1492
|
+
lines: list[str], start_index: int
|
|
1493
|
+
) -> tuple[list[str], int]:
|
|
1494
|
+
"""
|
|
1495
|
+
Find the Value: line and collect all value lines until next attribute separator.
|
|
1496
|
+
|
|
1497
|
+
Find the Value: line and collect all value lines until next attribute separator.
|
|
1498
|
+
"""
|
|
1499
|
+
value_lines: list[str] = []
|
|
1500
|
+
i = start_index
|
|
1501
|
+
|
|
1502
|
+
# Look for the Value: line
|
|
1503
|
+
while i < len(lines):
|
|
1504
|
+
next_line = lines[i]
|
|
1505
|
+
if next_line.strip().startswith(VALUE_PREFIX):
|
|
1506
|
+
# Found value start, collect all value lines
|
|
1507
|
+
value_start = next_line.split(VALUE_PREFIX, 1)[1]
|
|
1508
|
+
collected_lines, i = _collect_value_lines(lines, i + 1)
|
|
1509
|
+
value_lines = [value_start] + collected_lines
|
|
1510
|
+
break
|
|
1511
|
+
elif _is_attribute_separator(next_line):
|
|
1512
|
+
# Hit another attribute before finding value
|
|
1513
|
+
break
|
|
1514
|
+
i += 1
|
|
1515
|
+
else:
|
|
1516
|
+
# No more lines
|
|
1517
|
+
i += 1
|
|
1518
|
+
|
|
1519
|
+
return value_lines, i
|
|
1520
|
+
|
|
1521
|
+
|
|
1522
|
+
def _convert_ruby_hash_to_yaml(ruby_hash: str) -> str:
|
|
1523
|
+
"""Convert Ruby hash syntax { key => value, ... } to YAML flow style."""
|
|
1524
|
+
try:
|
|
1525
|
+
# Remove outer braces
|
|
1526
|
+
hash_content = ruby_hash[1:-1].strip()
|
|
1527
|
+
if not hash_content:
|
|
1528
|
+
return "{}"
|
|
1529
|
+
|
|
1530
|
+
# Split by commas, respecting nested structures
|
|
1531
|
+
yaml_pairs = _split_by_commas_with_nesting(hash_content)
|
|
1532
|
+
|
|
1533
|
+
# Convert each pair from Ruby syntax to YAML
|
|
1534
|
+
flow_pairs = []
|
|
1535
|
+
for pair in yaml_pairs:
|
|
1536
|
+
if "=>" in pair:
|
|
1537
|
+
key_part, value_part = pair.split("=>", 1)
|
|
1538
|
+
key = key_part.strip()
|
|
1539
|
+
value = value_part.strip()
|
|
1540
|
+
|
|
1541
|
+
# Remove quotes from key if present
|
|
1542
|
+
key = key.strip("'\"")
|
|
1543
|
+
|
|
1544
|
+
# Convert value recursively if it's complex
|
|
1545
|
+
value = _convert_ruby_value_to_yaml(value)
|
|
1546
|
+
flow_pairs.append(f"{key}: {value}")
|
|
1547
|
+
else:
|
|
1548
|
+
# Malformed pair, keep as comment
|
|
1549
|
+
flow_pairs.append(f"# TODO: Fix malformed pair: {pair}")
|
|
1550
|
+
|
|
1551
|
+
return "{" + ", ".join(flow_pairs) + "}" if flow_pairs else "{}"
|
|
1552
|
+
|
|
1553
|
+
except Exception:
|
|
1554
|
+
# If conversion fails, return as-is with a comment
|
|
1555
|
+
return f"# TODO: Convert Ruby hash: {ruby_hash}"
|
|
1556
|
+
|
|
1557
|
+
|
|
1558
|
+
def _convert_ruby_array_to_yaml(ruby_array: str) -> str:
|
|
1559
|
+
"""Convert Ruby array syntax [item1, item2, ...] to YAML flow style."""
|
|
1560
|
+
try:
|
|
1561
|
+
# Remove outer brackets
|
|
1562
|
+
array_content = ruby_array[1:-1].strip()
|
|
1563
|
+
if not array_content:
|
|
1564
|
+
return "[]"
|
|
1565
|
+
|
|
1566
|
+
# Split by commas, respecting nested structures
|
|
1567
|
+
items = _split_by_commas_with_nesting(array_content)
|
|
1568
|
+
|
|
1569
|
+
# Convert items recursively if they're complex
|
|
1570
|
+
flow_items = [_convert_ruby_value_to_yaml(item) for item in items]
|
|
1571
|
+
|
|
1572
|
+
return "[" + ", ".join(flow_items) + "]" if flow_items else "[]"
|
|
1573
|
+
|
|
1574
|
+
except Exception:
|
|
1575
|
+
return ruby_array # Return as-is if parsing fails
|
|
1576
|
+
|
|
1577
|
+
|
|
1578
|
+
def _split_by_commas_with_nesting(content: str) -> list[str]:
|
|
1579
|
+
"""Split content by commas while respecting nested braces and brackets."""
|
|
1580
|
+
parts = []
|
|
1581
|
+
current_part = ""
|
|
1582
|
+
in_quotes = False
|
|
1583
|
+
quote_char = None
|
|
1584
|
+
brace_depth = 0
|
|
1585
|
+
bracket_depth = 0
|
|
1586
|
+
|
|
1587
|
+
for char in content:
|
|
1588
|
+
if _is_quote_char(char) and brace_depth == 0 and bracket_depth == 0:
|
|
1589
|
+
in_quotes, quote_char = _handle_quote_transition(
|
|
1590
|
+
char, in_quotes, quote_char
|
|
1591
|
+
)
|
|
1592
|
+
elif not in_quotes:
|
|
1593
|
+
brace_depth, bracket_depth = _update_nesting_depths(
|
|
1594
|
+
char, brace_depth, bracket_depth
|
|
1595
|
+
)
|
|
1596
|
+
if _should_split_at_comma(char, in_quotes, brace_depth, bracket_depth):
|
|
1597
|
+
parts.append(current_part.strip())
|
|
1598
|
+
current_part = ""
|
|
1599
|
+
continue
|
|
1600
|
+
|
|
1601
|
+
current_part += char
|
|
1602
|
+
|
|
1603
|
+
# Add the last part
|
|
1604
|
+
if current_part.strip():
|
|
1605
|
+
parts.append(current_part.strip())
|
|
1606
|
+
|
|
1607
|
+
return parts
|
|
1608
|
+
|
|
1609
|
+
|
|
1610
|
+
def _is_quote_char(char: str) -> bool:
|
|
1611
|
+
"""Check if character is a quote."""
|
|
1612
|
+
return char in ['"', "'"]
|
|
1613
|
+
|
|
1614
|
+
|
|
1615
|
+
def _handle_quote_transition(
|
|
1616
|
+
char: str, in_quotes: bool, quote_char: str | None
|
|
1617
|
+
) -> tuple[bool, str | None]:
|
|
1618
|
+
"""Handle quote character transitions."""
|
|
1619
|
+
if not in_quotes:
|
|
1620
|
+
return True, char
|
|
1621
|
+
elif char == quote_char:
|
|
1622
|
+
return False, None
|
|
1623
|
+
return in_quotes, quote_char
|
|
1624
|
+
|
|
1625
|
+
|
|
1626
|
+
def _update_nesting_depths(
|
|
1627
|
+
char: str, brace_depth: int, bracket_depth: int
|
|
1628
|
+
) -> tuple[int, int]:
|
|
1629
|
+
"""Update brace and bracket nesting depths."""
|
|
1630
|
+
if char == "{":
|
|
1631
|
+
brace_depth += 1
|
|
1632
|
+
elif char == "}":
|
|
1633
|
+
brace_depth -= 1
|
|
1634
|
+
elif char == "[":
|
|
1635
|
+
bracket_depth += 1
|
|
1636
|
+
elif char == "]":
|
|
1637
|
+
bracket_depth -= 1
|
|
1638
|
+
return brace_depth, bracket_depth
|
|
1639
|
+
|
|
1640
|
+
|
|
1641
|
+
def _should_split_at_comma(
|
|
1642
|
+
char: str, in_quotes: bool, brace_depth: int, bracket_depth: int
|
|
1643
|
+
) -> bool:
|
|
1644
|
+
"""Determine if we should split at this comma."""
|
|
1645
|
+
return char == "," and not in_quotes and brace_depth == 0 and bracket_depth == 0
|
|
1646
|
+
|
|
1647
|
+
|
|
1648
|
+
def _convert_primitive_value(ruby_value: str) -> str:
|
|
1649
|
+
"""Convert primitive Ruby values (strings, numbers, booleans, nil)."""
|
|
1650
|
+
# Handle quoted strings
|
|
1651
|
+
if (
|
|
1652
|
+
ruby_value.startswith('"')
|
|
1653
|
+
and ruby_value.endswith('"')
|
|
1654
|
+
or ruby_value.startswith("'")
|
|
1655
|
+
and ruby_value.endswith("'")
|
|
1656
|
+
):
|
|
1657
|
+
return ruby_value # Already properly quoted
|
|
1658
|
+
|
|
1659
|
+
# Handle numbers
|
|
1660
|
+
try:
|
|
1661
|
+
int(ruby_value)
|
|
1662
|
+
return ruby_value
|
|
1663
|
+
except ValueError:
|
|
1664
|
+
pass
|
|
1665
|
+
|
|
1666
|
+
try:
|
|
1667
|
+
float(ruby_value)
|
|
1668
|
+
return ruby_value
|
|
1669
|
+
except ValueError:
|
|
1670
|
+
pass
|
|
1671
|
+
|
|
1672
|
+
# Handle booleans
|
|
1673
|
+
if ruby_value.lower() in ["true", "false"]:
|
|
1674
|
+
return ruby_value.lower()
|
|
1675
|
+
|
|
1676
|
+
# Handle nil
|
|
1677
|
+
if ruby_value.lower() == "nil":
|
|
1678
|
+
return "null"
|
|
1679
|
+
|
|
1680
|
+
# For strings that aren't quoted, quote them
|
|
1681
|
+
return f'"{ruby_value}"'
|
|
1682
|
+
|
|
1683
|
+
|
|
1684
|
+
def _convert_chef_attr_path_to_ansible_var(attr_path: str) -> str:
|
|
1685
|
+
"""Convert Chef attribute path to Ansible variable name."""
|
|
1686
|
+
# Replace dots with underscores, handle special cases
|
|
1687
|
+
parts = attr_path.split(".")
|
|
1688
|
+
if len(parts) >= 2:
|
|
1689
|
+
# For cookbook-specific attrs like '301.version' -> 'threeohone_version'
|
|
1690
|
+
cookbook_name = parts[0]
|
|
1691
|
+
attr_name = "_".join(parts[1:])
|
|
1692
|
+
|
|
1693
|
+
# Convert numbers to words for readability
|
|
1694
|
+
number_words = {
|
|
1695
|
+
"301": "threeohone",
|
|
1696
|
+
"1": "one",
|
|
1697
|
+
"2": "two",
|
|
1698
|
+
"3": "three",
|
|
1699
|
+
"4": "four",
|
|
1700
|
+
"5": "five",
|
|
1701
|
+
"6": "six",
|
|
1702
|
+
"7": "seven",
|
|
1703
|
+
"8": "eight",
|
|
1704
|
+
"9": "nine",
|
|
1705
|
+
"0": "zero",
|
|
1706
|
+
}
|
|
1707
|
+
|
|
1708
|
+
# Replace digits with words
|
|
1709
|
+
readable_cookbook = "".join(number_words.get(c, c) for c in cookbook_name)
|
|
1710
|
+
return f"{readable_cookbook}_{attr_name}"
|
|
1711
|
+
|
|
1712
|
+
# Fallback: just replace dots with underscores
|
|
1713
|
+
return attr_path.replace(".", "_")
|
|
1714
|
+
|
|
1715
|
+
|
|
947
1716
|
def _extract_recipe_variables(raw_content: str) -> dict[str, str]:
|
|
948
1717
|
"""Extract variables from Chef recipe content."""
|
|
949
1718
|
variables = {}
|
|
@@ -961,8 +1730,24 @@ def _extract_recipe_variables(raw_content: str) -> dict[str, str]:
|
|
|
961
1730
|
|
|
962
1731
|
|
|
963
1732
|
def _parse_resource_block(block: str) -> dict[str, str] | None:
|
|
964
|
-
"""Parse a single resource block into a dictionary."""
|
|
965
|
-
|
|
1733
|
+
"""Parse a single resource (or include_recipe) block into a dictionary."""
|
|
1734
|
+
trimmed = block.strip()
|
|
1735
|
+
if not trimmed:
|
|
1736
|
+
return None
|
|
1737
|
+
|
|
1738
|
+
# Handle include_recipe blocks emitted by parse_recipe
|
|
1739
|
+
if trimmed.startswith("Include Recipe"):
|
|
1740
|
+
recipe_match = re.search(r"Recipe:\s*([^\n]+)", block)
|
|
1741
|
+
if not recipe_match:
|
|
1742
|
+
return None
|
|
1743
|
+
return {
|
|
1744
|
+
"type": "include_recipe",
|
|
1745
|
+
"name": recipe_match.group(1).strip(),
|
|
1746
|
+
"action": "include",
|
|
1747
|
+
"properties": "",
|
|
1748
|
+
}
|
|
1749
|
+
|
|
1750
|
+
if not trimmed.startswith("Resource"):
|
|
966
1751
|
return None
|
|
967
1752
|
|
|
968
1753
|
resource: dict[str, str] = {}
|
|
@@ -992,17 +1777,64 @@ def _parse_resource_block(block: str) -> dict[str, str] | None:
|
|
|
992
1777
|
return resource
|
|
993
1778
|
|
|
994
1779
|
|
|
995
|
-
def _extract_resources_from_parsed_content(
|
|
1780
|
+
def _extract_resources_from_parsed_content(
|
|
1781
|
+
parsed_content: str, raw_content: str
|
|
1782
|
+
) -> list[dict[str, str]]:
|
|
996
1783
|
"""Extract resource information from parsed recipe content."""
|
|
997
|
-
resource_blocks = re.split(
|
|
1784
|
+
resource_blocks = re.split(
|
|
1785
|
+
r"\n(?=(Resource \d+:|Include Recipe \d+:))", parsed_content
|
|
1786
|
+
)
|
|
998
1787
|
resources = []
|
|
999
1788
|
for block in resource_blocks:
|
|
1000
1789
|
resource = _parse_resource_block(block)
|
|
1001
1790
|
if resource:
|
|
1791
|
+
# Find position in raw_content to preserve order
|
|
1792
|
+
position = _find_resource_position_in_raw(resource, raw_content)
|
|
1793
|
+
resource["_position"] = position # type: ignore
|
|
1002
1794
|
resources.append(resource)
|
|
1795
|
+
|
|
1796
|
+
# Sort by position to preserve original order
|
|
1797
|
+
resources.sort(key=lambda r: r.get("_position", 999999))
|
|
1798
|
+
|
|
1799
|
+
# Remove position key
|
|
1800
|
+
for r in resources:
|
|
1801
|
+
r.pop("_position", None)
|
|
1802
|
+
|
|
1003
1803
|
return resources
|
|
1004
1804
|
|
|
1005
1805
|
|
|
1806
|
+
def _find_resource_position_in_raw(resource: dict[str, str], raw_content: str) -> int:
|
|
1807
|
+
"""Find the position of a resource in raw content."""
|
|
1808
|
+
if resource["type"] == "include_recipe":
|
|
1809
|
+
pattern = rf'include_recipe\s+[\'"]({re.escape(resource["name"])})[\'"]'
|
|
1810
|
+
elif resource["type"] == "nodejs_npm":
|
|
1811
|
+
pattern = rf'nodejs_npm\s+[\'"]({re.escape(resource["name"])})[\'"]'
|
|
1812
|
+
else:
|
|
1813
|
+
# Generic pattern
|
|
1814
|
+
pattern = rf'{resource["type"]}\s+[\'"]({re.escape(resource["name"])})[\'"]'
|
|
1815
|
+
|
|
1816
|
+
match = re.search(pattern, raw_content)
|
|
1817
|
+
return match.start() if match else 999999
|
|
1818
|
+
|
|
1819
|
+
|
|
1820
|
+
def _extract_nodejs_npm_version(raw_content: str, package_name: str) -> str | None:
|
|
1821
|
+
"""Extract version for nodejs_npm resources from raw recipe content."""
|
|
1822
|
+
pattern = (
|
|
1823
|
+
rf"nodejs_npm\s+[\"']{re.escape(package_name)}[\"']\s+do"
|
|
1824
|
+
rf"(?P<body>.{{0,400}}?)^end"
|
|
1825
|
+
)
|
|
1826
|
+
match = re.search(pattern, raw_content, re.DOTALL | re.MULTILINE)
|
|
1827
|
+
if not match:
|
|
1828
|
+
return None
|
|
1829
|
+
|
|
1830
|
+
body = match.group("body")
|
|
1831
|
+
version_match = re.search(r"version\s+([^\n#]+)", body)
|
|
1832
|
+
if not version_match:
|
|
1833
|
+
return None
|
|
1834
|
+
|
|
1835
|
+
return version_match.group(1).strip()
|
|
1836
|
+
|
|
1837
|
+
|
|
1006
1838
|
# Notification handling
|
|
1007
1839
|
|
|
1008
1840
|
|
|
@@ -1101,6 +1933,12 @@ def _convert_resource_to_task_dict(
|
|
|
1101
1933
|
resource: dict[str, str], raw_content: str
|
|
1102
1934
|
) -> dict[str, Any]:
|
|
1103
1935
|
"""Convert a Chef resource to an Ansible task dictionary with handlers."""
|
|
1936
|
+
# Enrich nodejs_npm resources with version info when it could not be parsed
|
|
1937
|
+
if resource["type"] == "nodejs_npm" and not resource.get("properties"):
|
|
1938
|
+
extracted_version = _extract_nodejs_npm_version(raw_content, resource["name"])
|
|
1939
|
+
if extracted_version is not None:
|
|
1940
|
+
resource["properties"] = str({"version": extracted_version})
|
|
1941
|
+
|
|
1104
1942
|
# Convert basic resource to task
|
|
1105
1943
|
task = _convert_chef_resource_to_ansible(
|
|
1106
1944
|
resource["type"], resource["name"], resource["action"], resource["properties"]
|