mcp-souschef 2.0.1__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.2.0.dist-info}/METADATA +453 -77
- mcp_souschef-2.2.0.dist-info/RECORD +31 -0
- souschef/__init__.py +17 -0
- souschef/assessment.py +1498 -0
- souschef/cli.py +90 -0
- souschef/converters/__init__.py +23 -0
- souschef/converters/habitat.py +674 -0
- souschef/converters/playbook.py +1736 -0
- souschef/converters/resource.py +325 -0
- souschef/core/__init__.py +80 -0
- souschef/core/constants.py +145 -0
- souschef/core/errors.py +275 -0
- souschef/core/path_utils.py +58 -0
- souschef/core/ruby_utils.py +39 -0
- souschef/core/validation.py +555 -0
- souschef/deployment.py +1906 -0
- souschef/filesystem/__init__.py +5 -0
- souschef/filesystem/operations.py +67 -0
- souschef/parsers/__init__.py +36 -0
- souschef/parsers/attributes.py +257 -0
- souschef/parsers/habitat.py +317 -0
- souschef/parsers/inspec.py +809 -0
- souschef/parsers/metadata.py +211 -0
- souschef/parsers/recipe.py +200 -0
- souschef/parsers/resource.py +170 -0
- souschef/parsers/template.py +342 -0
- souschef/profiling.py +568 -0
- souschef/server.py +1854 -7481
- mcp_souschef-2.0.1.dist-info/RECORD +0 -8
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.2.0.dist-info}/WHEEL +0 -0
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.2.0.dist-info}/entry_points.txt +0 -0
- {mcp_souschef-2.0.1.dist-info → mcp_souschef-2.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,809 @@
|
|
|
1
|
+
"""InSpec profile parser and converter."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from souschef.core.constants import ERROR_PREFIX, INSPEC_END_INDENT, INSPEC_SHOULD_EXIST
|
|
9
|
+
from souschef.core.path_utils import _normalize_path, _safe_join
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def parse_inspec_profile(path: str) -> str:
|
|
13
|
+
"""
|
|
14
|
+
Parse an InSpec profile and extract controls.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
path: Path to InSpec profile directory or control file (.rb).
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
JSON string with parsed controls, or error message.
|
|
21
|
+
|
|
22
|
+
"""
|
|
23
|
+
try:
|
|
24
|
+
profile_path = _normalize_path(path)
|
|
25
|
+
|
|
26
|
+
if not profile_path.exists():
|
|
27
|
+
return f"Error: Path does not exist: {path}"
|
|
28
|
+
|
|
29
|
+
if profile_path.is_dir():
|
|
30
|
+
controls = _parse_controls_from_directory(profile_path)
|
|
31
|
+
elif profile_path.is_file():
|
|
32
|
+
controls = _parse_controls_from_file(profile_path)
|
|
33
|
+
else:
|
|
34
|
+
return f"Error: Invalid path type: {path}"
|
|
35
|
+
|
|
36
|
+
return json.dumps(
|
|
37
|
+
{
|
|
38
|
+
"profile_path": str(profile_path),
|
|
39
|
+
"controls_count": len(controls),
|
|
40
|
+
"controls": controls,
|
|
41
|
+
},
|
|
42
|
+
indent=2,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
except (FileNotFoundError, RuntimeError) as e:
|
|
46
|
+
return f"Error: {e}"
|
|
47
|
+
except Exception as e:
|
|
48
|
+
return f"An error occurred while parsing InSpec profile: {e}"
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def convert_inspec_to_test(inspec_path: str, output_format: str = "testinfra") -> str:
|
|
52
|
+
"""
|
|
53
|
+
Convert InSpec controls to Ansible test format.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
inspec_path: Path to InSpec profile or control file.
|
|
57
|
+
output_format: Output format ('testinfra' or 'ansible_assert').
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Converted test code or error message.
|
|
61
|
+
|
|
62
|
+
"""
|
|
63
|
+
try:
|
|
64
|
+
# First parse the InSpec profile
|
|
65
|
+
parse_result = parse_inspec_profile(inspec_path)
|
|
66
|
+
|
|
67
|
+
# Check if parsing failed
|
|
68
|
+
if parse_result.startswith(ERROR_PREFIX):
|
|
69
|
+
return parse_result
|
|
70
|
+
|
|
71
|
+
# Parse JSON result
|
|
72
|
+
profile_data = json.loads(parse_result)
|
|
73
|
+
controls = profile_data["controls"]
|
|
74
|
+
|
|
75
|
+
if not controls:
|
|
76
|
+
return "Warning: No controls found to convert"
|
|
77
|
+
|
|
78
|
+
# Convert each control
|
|
79
|
+
converted = []
|
|
80
|
+
for control in controls:
|
|
81
|
+
if output_format == "testinfra":
|
|
82
|
+
converted.append(_convert_inspec_to_testinfra(control))
|
|
83
|
+
elif output_format == "ansible_assert":
|
|
84
|
+
converted.append(_convert_inspec_to_ansible_assert(control))
|
|
85
|
+
else:
|
|
86
|
+
return f"Error: Unsupported output format: {output_format}"
|
|
87
|
+
|
|
88
|
+
return "\n".join(converted)
|
|
89
|
+
|
|
90
|
+
except json.JSONDecodeError as e:
|
|
91
|
+
return f"Error parsing InSpec result: {e}"
|
|
92
|
+
except Exception as e:
|
|
93
|
+
return f"An error occurred during conversion: {e}"
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def generate_inspec_from_chef(
|
|
97
|
+
resource_type: str, resource_name: str, properties: dict[str, Any]
|
|
98
|
+
) -> str:
|
|
99
|
+
"""
|
|
100
|
+
Generate InSpec control from Chef resource.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
resource_type: Type of Chef resource.
|
|
104
|
+
resource_name: Name of the resource.
|
|
105
|
+
properties: Resource properties.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
InSpec control code.
|
|
109
|
+
|
|
110
|
+
"""
|
|
111
|
+
return _generate_inspec_from_resource(resource_type, resource_name, properties)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _parse_controls_from_directory(profile_path: Path) -> list[dict[str, Any]]:
|
|
115
|
+
"""
|
|
116
|
+
Parse all control files from an InSpec profile directory.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
profile_path: Path to the InSpec profile directory.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
List of parsed controls.
|
|
123
|
+
|
|
124
|
+
Raises:
|
|
125
|
+
FileNotFoundError: If controls directory doesn't exist.
|
|
126
|
+
RuntimeError: If error reading control files.
|
|
127
|
+
|
|
128
|
+
"""
|
|
129
|
+
controls_dir = _safe_join(profile_path, "controls")
|
|
130
|
+
if not controls_dir.exists():
|
|
131
|
+
raise FileNotFoundError(f"No controls directory found in {profile_path}")
|
|
132
|
+
|
|
133
|
+
controls = []
|
|
134
|
+
for control_file in controls_dir.glob("*.rb"):
|
|
135
|
+
try:
|
|
136
|
+
content = control_file.read_text()
|
|
137
|
+
file_controls = _parse_inspec_control(content)
|
|
138
|
+
for ctrl in file_controls:
|
|
139
|
+
ctrl["file"] = str(control_file.relative_to(profile_path))
|
|
140
|
+
controls.extend(file_controls)
|
|
141
|
+
except Exception as e:
|
|
142
|
+
raise RuntimeError(f"Error reading {control_file}: {e}") from e
|
|
143
|
+
|
|
144
|
+
return controls
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def _parse_controls_from_file(profile_path: Path) -> list[dict[str, Any]]:
|
|
148
|
+
"""
|
|
149
|
+
Parse controls from a single InSpec control file.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
profile_path: Path to the control file.
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
List of parsed controls.
|
|
156
|
+
|
|
157
|
+
Raises:
|
|
158
|
+
RuntimeError: If error reading the file.
|
|
159
|
+
|
|
160
|
+
"""
|
|
161
|
+
try:
|
|
162
|
+
content = profile_path.read_text()
|
|
163
|
+
controls = _parse_inspec_control(content)
|
|
164
|
+
for ctrl in controls:
|
|
165
|
+
ctrl["file"] = profile_path.name
|
|
166
|
+
return controls
|
|
167
|
+
except Exception as e:
|
|
168
|
+
raise RuntimeError(f"Error reading file: {e}") from e
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _extract_control_metadata(control_body: str) -> dict[str, Any]:
|
|
172
|
+
"""
|
|
173
|
+
Extract title, description, and impact from control body.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
control_body: Content of the control block.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Dictionary with title, desc, and impact.
|
|
180
|
+
|
|
181
|
+
"""
|
|
182
|
+
metadata = {"title": "", "desc": "", "impact": 1.0}
|
|
183
|
+
|
|
184
|
+
# Extract title
|
|
185
|
+
title_match = re.search(r"title\s+['\"]([^'\"]+)['\"]", control_body)
|
|
186
|
+
if title_match:
|
|
187
|
+
metadata["title"] = title_match.group(1)
|
|
188
|
+
|
|
189
|
+
# Extract description
|
|
190
|
+
desc_match = re.search(r"desc\s+['\"]([^'\"]+)['\"]", control_body)
|
|
191
|
+
if desc_match:
|
|
192
|
+
metadata["desc"] = desc_match.group(1)
|
|
193
|
+
|
|
194
|
+
# Extract impact
|
|
195
|
+
impact_match = re.search(r"impact\s+([\d.]+)", control_body)
|
|
196
|
+
if impact_match:
|
|
197
|
+
metadata["impact"] = float(impact_match.group(1))
|
|
198
|
+
|
|
199
|
+
return metadata
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def _parse_inspec_control(content: str) -> list[dict[str, Any]]:
|
|
203
|
+
"""
|
|
204
|
+
Parse InSpec control blocks from content.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
content: InSpec profile content.
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
List of parsed control dictionaries with id, title, desc, impact, tests.
|
|
211
|
+
|
|
212
|
+
"""
|
|
213
|
+
controls = []
|
|
214
|
+
lines = content.split("\n")
|
|
215
|
+
lines_len = len(lines)
|
|
216
|
+
|
|
217
|
+
i = 0
|
|
218
|
+
while i < lines_len:
|
|
219
|
+
line = lines[i].strip()
|
|
220
|
+
|
|
221
|
+
# Look for control start
|
|
222
|
+
control_match = re.match(r"control\s+['\"]([^'\"]+)['\"]\s+do", line)
|
|
223
|
+
if control_match:
|
|
224
|
+
control_id = control_match.group(1)
|
|
225
|
+
|
|
226
|
+
# Find the matching end for this control
|
|
227
|
+
control_body_lines, end_index = _find_nested_block_end(lines, i + 1)
|
|
228
|
+
i = end_index
|
|
229
|
+
|
|
230
|
+
# Parse the control body
|
|
231
|
+
control_body = "\n".join(control_body_lines)
|
|
232
|
+
|
|
233
|
+
control_data: dict[str, Any] = {
|
|
234
|
+
"id": control_id,
|
|
235
|
+
**_extract_control_metadata(control_body),
|
|
236
|
+
"tests": _extract_inspec_describe_blocks(control_body),
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
controls.append(control_data)
|
|
240
|
+
|
|
241
|
+
i += 1
|
|
242
|
+
|
|
243
|
+
return controls
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def _find_nested_block_end(lines: list[str], start_index: int) -> tuple[list[str], int]:
|
|
247
|
+
"""
|
|
248
|
+
Find the end of a nested Ruby block (do...end).
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
lines: All lines of content.
|
|
252
|
+
start_index: Starting line index (after the 'do' line).
|
|
253
|
+
|
|
254
|
+
Returns:
|
|
255
|
+
Tuple of (body_lines, ending_index).
|
|
256
|
+
|
|
257
|
+
"""
|
|
258
|
+
nesting_level = 0
|
|
259
|
+
body_lines = []
|
|
260
|
+
lines_len = len(lines)
|
|
261
|
+
i = start_index
|
|
262
|
+
|
|
263
|
+
while i < lines_len:
|
|
264
|
+
current_line = lines[i]
|
|
265
|
+
stripped = current_line.strip()
|
|
266
|
+
|
|
267
|
+
if re.search(r"\bdo\s*$", stripped):
|
|
268
|
+
nesting_level += 1
|
|
269
|
+
elif stripped == "end":
|
|
270
|
+
if nesting_level == 0:
|
|
271
|
+
break
|
|
272
|
+
else:
|
|
273
|
+
nesting_level -= 1
|
|
274
|
+
|
|
275
|
+
body_lines.append(current_line)
|
|
276
|
+
i += 1
|
|
277
|
+
|
|
278
|
+
return body_lines, i
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def _extract_it_expectations(describe_body: str) -> list[dict[str, Any]]:
|
|
282
|
+
"""
|
|
283
|
+
Extract 'it { should ... }' expectations from describe block.
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
describe_body: Content of the describe block.
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
List of expectation dictionaries.
|
|
290
|
+
|
|
291
|
+
"""
|
|
292
|
+
expectations = []
|
|
293
|
+
it_pattern = re.compile(r"it\s+\{([^}]+)\}")
|
|
294
|
+
for it_match in it_pattern.finditer(describe_body):
|
|
295
|
+
expectation = it_match.group(1).strip()
|
|
296
|
+
expectations.append({"type": "should", "matcher": expectation})
|
|
297
|
+
return expectations
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def _extract_its_expectations(describe_body: str) -> list[dict[str, Any]]:
|
|
301
|
+
"""
|
|
302
|
+
Extract 'its(...) { should ... }' expectations from describe block.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
describe_body: Content of the describe block.
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
List of expectation dictionaries.
|
|
309
|
+
|
|
310
|
+
"""
|
|
311
|
+
expectations = []
|
|
312
|
+
its_pattern = re.compile(r"its\(['\"]([^'\"]+)['\"]\)\s+\{([^}]+)\}")
|
|
313
|
+
for its_match in its_pattern.finditer(describe_body):
|
|
314
|
+
property_name = its_match.group(1)
|
|
315
|
+
expectation = its_match.group(2).strip()
|
|
316
|
+
expectations.append(
|
|
317
|
+
{"type": "its", "property": property_name, "matcher": expectation}
|
|
318
|
+
)
|
|
319
|
+
return expectations
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
def _extract_inspec_describe_blocks(content: str) -> list[dict[str, Any]]:
|
|
323
|
+
"""
|
|
324
|
+
Extract InSpec describe blocks and their matchers.
|
|
325
|
+
|
|
326
|
+
Args:
|
|
327
|
+
content: Content to parse for describe blocks.
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
List of test dictionaries with resource type, name, and expectations.
|
|
331
|
+
|
|
332
|
+
"""
|
|
333
|
+
tests = []
|
|
334
|
+
lines = content.split("\n")
|
|
335
|
+
lines_len = len(lines)
|
|
336
|
+
|
|
337
|
+
i = 0
|
|
338
|
+
while i < lines_len:
|
|
339
|
+
line = lines[i].strip()
|
|
340
|
+
|
|
341
|
+
# Look for describe start
|
|
342
|
+
describe_match = re.match(
|
|
343
|
+
r"describe\s+(\w+)\(['\"]?([^'\")\n]+)['\"]?\)\s+do", line
|
|
344
|
+
)
|
|
345
|
+
if describe_match:
|
|
346
|
+
resource_type = describe_match.group(1)
|
|
347
|
+
resource_name = describe_match.group(2).strip()
|
|
348
|
+
|
|
349
|
+
# Find the matching end for this describe block
|
|
350
|
+
describe_body_lines, end_index = _find_nested_block_end(lines, i + 1)
|
|
351
|
+
i = end_index
|
|
352
|
+
|
|
353
|
+
# Parse the describe body
|
|
354
|
+
describe_body = "\n".join(describe_body_lines)
|
|
355
|
+
|
|
356
|
+
test_data: dict[str, Any] = {
|
|
357
|
+
"resource_type": resource_type,
|
|
358
|
+
"resource_name": resource_name,
|
|
359
|
+
"expectations": [],
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
# Extract expectations
|
|
363
|
+
test_data["expectations"].extend(_extract_it_expectations(describe_body))
|
|
364
|
+
test_data["expectations"].extend(_extract_its_expectations(describe_body))
|
|
365
|
+
|
|
366
|
+
if test_data["expectations"]:
|
|
367
|
+
tests.append(test_data)
|
|
368
|
+
|
|
369
|
+
i += 1
|
|
370
|
+
|
|
371
|
+
return tests
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
def _convert_package_to_testinfra(
|
|
375
|
+
lines: list[str], resource_name: str, expectations: list[dict[str, Any]]
|
|
376
|
+
) -> None:
|
|
377
|
+
"""
|
|
378
|
+
Convert package resource to Testinfra assertions.
|
|
379
|
+
|
|
380
|
+
Args:
|
|
381
|
+
lines: List to append test lines to.
|
|
382
|
+
resource_name: Name of the package.
|
|
383
|
+
expectations: List of InSpec expectations.
|
|
384
|
+
|
|
385
|
+
"""
|
|
386
|
+
lines.append(f' pkg = host.package("{resource_name}")')
|
|
387
|
+
for exp in expectations:
|
|
388
|
+
if "be_installed" in exp["matcher"]:
|
|
389
|
+
lines.append(" assert pkg.is_installed")
|
|
390
|
+
elif exp["type"] == "its" and exp["property"] == "version":
|
|
391
|
+
version_match = re.search(r"match\s+/([^/]+)/", exp["matcher"])
|
|
392
|
+
if version_match:
|
|
393
|
+
version = version_match.group(1)
|
|
394
|
+
lines.append(f' assert pkg.version.startswith("{version}")')
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def _convert_service_to_testinfra(
|
|
398
|
+
lines: list[str], resource_name: str, expectations: list[dict[str, Any]]
|
|
399
|
+
) -> None:
|
|
400
|
+
"""
|
|
401
|
+
Convert service resource to Testinfra assertions.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
lines: List to append test lines to.
|
|
405
|
+
resource_name: Name of the service.
|
|
406
|
+
expectations: List of InSpec expectations.
|
|
407
|
+
|
|
408
|
+
"""
|
|
409
|
+
lines.append(f' svc = host.service("{resource_name}")')
|
|
410
|
+
for exp in expectations:
|
|
411
|
+
if "be_running" in exp["matcher"]:
|
|
412
|
+
lines.append(" assert svc.is_running")
|
|
413
|
+
elif "be_enabled" in exp["matcher"]:
|
|
414
|
+
lines.append(" assert svc.is_enabled")
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def _convert_file_to_testinfra(
|
|
418
|
+
lines: list[str], resource_name: str, expectations: list[dict[str, Any]]
|
|
419
|
+
) -> None:
|
|
420
|
+
"""
|
|
421
|
+
Convert file resource to Testinfra assertions.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
lines: List to append test lines to.
|
|
425
|
+
resource_name: Path to the file.
|
|
426
|
+
expectations: List of InSpec expectations.
|
|
427
|
+
|
|
428
|
+
"""
|
|
429
|
+
lines.append(f' f = host.file("{resource_name}")')
|
|
430
|
+
for exp in expectations:
|
|
431
|
+
if "exist" in exp["matcher"]:
|
|
432
|
+
lines.append(" assert f.exists")
|
|
433
|
+
elif exp["type"] == "its" and exp["property"] == "mode":
|
|
434
|
+
mode_match = re.search(r"cmp\s+'([^']+)'", exp["matcher"])
|
|
435
|
+
if mode_match:
|
|
436
|
+
mode = mode_match.group(1)
|
|
437
|
+
lines.append(f' assert oct(f.mode) == "{mode}"')
|
|
438
|
+
elif exp["type"] == "its" and exp["property"] == "owner":
|
|
439
|
+
owner_match = re.search(r"eq\s+['\"]([^'\"]+)['\"]", exp["matcher"])
|
|
440
|
+
if owner_match:
|
|
441
|
+
owner = owner_match.group(1)
|
|
442
|
+
lines.append(f' assert f.user == "{owner}"')
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
def _convert_port_to_testinfra(
|
|
446
|
+
lines: list[str], resource_name: str, expectations: list[dict[str, Any]]
|
|
447
|
+
) -> None:
|
|
448
|
+
"""
|
|
449
|
+
Convert port resource to Testinfra assertions.
|
|
450
|
+
|
|
451
|
+
Args:
|
|
452
|
+
lines: List to append test lines to.
|
|
453
|
+
resource_name: Port number or address.
|
|
454
|
+
expectations: List of InSpec expectations.
|
|
455
|
+
|
|
456
|
+
"""
|
|
457
|
+
lines.append(f' port = host.socket("tcp://{resource_name}")')
|
|
458
|
+
for exp in expectations:
|
|
459
|
+
if "be_listening" in exp["matcher"]:
|
|
460
|
+
lines.append(" assert port.is_listening")
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
def _convert_inspec_to_testinfra(control: dict[str, Any]) -> str:
|
|
464
|
+
"""
|
|
465
|
+
Convert InSpec control to Testinfra test.
|
|
466
|
+
|
|
467
|
+
Args:
|
|
468
|
+
control: Parsed InSpec control dictionary.
|
|
469
|
+
|
|
470
|
+
Returns:
|
|
471
|
+
Testinfra test code as string.
|
|
472
|
+
|
|
473
|
+
"""
|
|
474
|
+
lines = []
|
|
475
|
+
|
|
476
|
+
# Add test function header
|
|
477
|
+
test_name = control["id"].replace("-", "_")
|
|
478
|
+
lines.append(f"def test_{test_name}(host):")
|
|
479
|
+
|
|
480
|
+
if control["desc"]:
|
|
481
|
+
lines.append(f' """{control["desc"]}"""')
|
|
482
|
+
|
|
483
|
+
# Convert each describe block
|
|
484
|
+
for test in control["tests"]:
|
|
485
|
+
resource_type = test["resource_type"]
|
|
486
|
+
resource_name = test["resource_name"]
|
|
487
|
+
expectations = test["expectations"]
|
|
488
|
+
|
|
489
|
+
# Map InSpec resources to Testinfra using dedicated converters
|
|
490
|
+
if resource_type == "package":
|
|
491
|
+
_convert_package_to_testinfra(lines, resource_name, expectations)
|
|
492
|
+
elif resource_type == "service":
|
|
493
|
+
_convert_service_to_testinfra(lines, resource_name, expectations)
|
|
494
|
+
elif resource_type == "file":
|
|
495
|
+
_convert_file_to_testinfra(lines, resource_name, expectations)
|
|
496
|
+
elif resource_type == "port":
|
|
497
|
+
_convert_port_to_testinfra(lines, resource_name, expectations)
|
|
498
|
+
|
|
499
|
+
lines.append("")
|
|
500
|
+
return "\n".join(lines)
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
def _convert_package_to_ansible_assert(
|
|
504
|
+
lines: list[str], resource_name: str, expectations: list[dict[str, Any]]
|
|
505
|
+
) -> None:
|
|
506
|
+
"""
|
|
507
|
+
Convert package expectations to Ansible assert conditions.
|
|
508
|
+
|
|
509
|
+
Args:
|
|
510
|
+
lines: List to append assertion lines to.
|
|
511
|
+
resource_name: Name of the package.
|
|
512
|
+
expectations: List of InSpec expectations.
|
|
513
|
+
|
|
514
|
+
"""
|
|
515
|
+
for exp in expectations:
|
|
516
|
+
if "be_installed" in exp["matcher"]:
|
|
517
|
+
lines.append(
|
|
518
|
+
f" - ansible_facts.packages['{resource_name}'] is defined"
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
|
|
522
|
+
def _convert_service_to_ansible_assert(
|
|
523
|
+
lines: list[str], resource_name: str, expectations: list[dict[str, Any]]
|
|
524
|
+
) -> None:
|
|
525
|
+
"""
|
|
526
|
+
Convert service expectations to Ansible assert conditions.
|
|
527
|
+
|
|
528
|
+
Args:
|
|
529
|
+
lines: List to append assertion lines to.
|
|
530
|
+
resource_name: Name of the service.
|
|
531
|
+
expectations: List of InSpec expectations.
|
|
532
|
+
|
|
533
|
+
"""
|
|
534
|
+
for exp in expectations:
|
|
535
|
+
if "be_running" in exp["matcher"]:
|
|
536
|
+
lines.append(f" - services['{resource_name}'].state == 'running'")
|
|
537
|
+
elif "be_enabled" in exp["matcher"]:
|
|
538
|
+
lines.append(f" - services['{resource_name}'].status == 'enabled'")
|
|
539
|
+
|
|
540
|
+
|
|
541
|
+
def _convert_file_to_ansible_assert(
|
|
542
|
+
lines: list[str], expectations: list[dict[str, Any]]
|
|
543
|
+
) -> None:
|
|
544
|
+
"""
|
|
545
|
+
Convert file expectations to Ansible assert conditions.
|
|
546
|
+
|
|
547
|
+
Args:
|
|
548
|
+
lines: List to append assertion lines to.
|
|
549
|
+
expectations: List of InSpec expectations.
|
|
550
|
+
|
|
551
|
+
"""
|
|
552
|
+
for exp in expectations:
|
|
553
|
+
if "exist" in exp["matcher"]:
|
|
554
|
+
lines.append(" - stat_result.stat.exists")
|
|
555
|
+
|
|
556
|
+
|
|
557
|
+
def _convert_inspec_to_ansible_assert(control: dict[str, Any]) -> str:
|
|
558
|
+
"""
|
|
559
|
+
Convert InSpec control to Ansible assert task.
|
|
560
|
+
|
|
561
|
+
Args:
|
|
562
|
+
control: Parsed InSpec control dictionary.
|
|
563
|
+
|
|
564
|
+
Returns:
|
|
565
|
+
Ansible assert task in YAML format.
|
|
566
|
+
|
|
567
|
+
"""
|
|
568
|
+
lines = [
|
|
569
|
+
f"- name: Verify {control['title'] or control['id']}",
|
|
570
|
+
" ansible.builtin.assert:",
|
|
571
|
+
" that:",
|
|
572
|
+
]
|
|
573
|
+
|
|
574
|
+
# Convert each describe block to assertions
|
|
575
|
+
for test in control["tests"]:
|
|
576
|
+
resource_type = test["resource_type"]
|
|
577
|
+
resource_name = test["resource_name"]
|
|
578
|
+
expectations = test["expectations"]
|
|
579
|
+
|
|
580
|
+
if resource_type == "package":
|
|
581
|
+
_convert_package_to_ansible_assert(lines, resource_name, expectations)
|
|
582
|
+
elif resource_type == "service":
|
|
583
|
+
_convert_service_to_ansible_assert(lines, resource_name, expectations)
|
|
584
|
+
elif resource_type == "file":
|
|
585
|
+
_convert_file_to_ansible_assert(lines, expectations)
|
|
586
|
+
|
|
587
|
+
# Add failure message
|
|
588
|
+
fail_msg = f"{control['desc'] or control['id']} validation failed"
|
|
589
|
+
lines.append(f' fail_msg: "{fail_msg}"')
|
|
590
|
+
|
|
591
|
+
return "\n".join(lines)
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
def _generate_inspec_package_checks(
|
|
595
|
+
resource_name: str, properties: dict[str, Any]
|
|
596
|
+
) -> list[str]:
|
|
597
|
+
"""
|
|
598
|
+
Generate InSpec checks for package resource.
|
|
599
|
+
|
|
600
|
+
Args:
|
|
601
|
+
resource_name: Name of the package.
|
|
602
|
+
properties: Resource properties.
|
|
603
|
+
|
|
604
|
+
Returns:
|
|
605
|
+
List of InSpec check lines.
|
|
606
|
+
|
|
607
|
+
"""
|
|
608
|
+
lines = [
|
|
609
|
+
f" describe package('{resource_name}') do",
|
|
610
|
+
" it { should be_installed }",
|
|
611
|
+
]
|
|
612
|
+
if "version" in properties:
|
|
613
|
+
version = properties["version"]
|
|
614
|
+
lines.append(f" its('version') {{ should match /{version}/ }}")
|
|
615
|
+
lines.append(INSPEC_END_INDENT)
|
|
616
|
+
return lines
|
|
617
|
+
|
|
618
|
+
|
|
619
|
+
def _generate_inspec_resource_checks(
|
|
620
|
+
resource_type: str,
|
|
621
|
+
resource_name: str,
|
|
622
|
+
properties: dict[str, Any] | None = None,
|
|
623
|
+
custom_checks: list[str] | None = None,
|
|
624
|
+
) -> list[str]:
|
|
625
|
+
"""
|
|
626
|
+
Generate InSpec checks for a resource using a generic pattern.
|
|
627
|
+
|
|
628
|
+
Args:
|
|
629
|
+
resource_type: InSpec resource type (e.g., 'file', 'user', 'service').
|
|
630
|
+
resource_name: Name/path of the resource.
|
|
631
|
+
properties: Optional resource properties to check.
|
|
632
|
+
custom_checks: Optional list of custom check lines.
|
|
633
|
+
|
|
634
|
+
Returns:
|
|
635
|
+
List of InSpec check lines.
|
|
636
|
+
|
|
637
|
+
"""
|
|
638
|
+
lines = [f" describe {resource_type}('{resource_name}') do"]
|
|
639
|
+
|
|
640
|
+
# Add custom checks if provided
|
|
641
|
+
if custom_checks:
|
|
642
|
+
lines.extend(custom_checks)
|
|
643
|
+
else:
|
|
644
|
+
# Default: should exist
|
|
645
|
+
lines.append(INSPEC_SHOULD_EXIST)
|
|
646
|
+
|
|
647
|
+
# Add property checks
|
|
648
|
+
if properties:
|
|
649
|
+
property_map = {
|
|
650
|
+
"mode": lambda v: f" its('mode') {{ should cmp '{v}' }}",
|
|
651
|
+
"owner": lambda v: f" its('owner') {{ should eq '{v}' }}",
|
|
652
|
+
"group": lambda v: f" its('group') {{ should eq '{v}' }}",
|
|
653
|
+
"shell": lambda v: f" its('shell') {{ should eq '{v}' }}",
|
|
654
|
+
}
|
|
655
|
+
for prop, value in properties.items():
|
|
656
|
+
if prop in property_map:
|
|
657
|
+
lines.append(property_map[prop](value))
|
|
658
|
+
|
|
659
|
+
lines.append(INSPEC_END_INDENT)
|
|
660
|
+
return lines
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
def _generate_inspec_service_checks(resource_name: str) -> list[str]:
|
|
664
|
+
"""
|
|
665
|
+
Generate InSpec checks for service resource.
|
|
666
|
+
|
|
667
|
+
Args:
|
|
668
|
+
resource_name: Name of the service.
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
List of InSpec check lines.
|
|
672
|
+
|
|
673
|
+
"""
|
|
674
|
+
return _generate_inspec_resource_checks(
|
|
675
|
+
"service",
|
|
676
|
+
resource_name,
|
|
677
|
+
custom_checks=[
|
|
678
|
+
" it { should be_running }",
|
|
679
|
+
" it { should be_enabled }",
|
|
680
|
+
],
|
|
681
|
+
)
|
|
682
|
+
|
|
683
|
+
|
|
684
|
+
def _generate_inspec_file_checks(
|
|
685
|
+
resource_name: str, properties: dict[str, Any]
|
|
686
|
+
) -> list[str]:
|
|
687
|
+
"""
|
|
688
|
+
Generate InSpec checks for file/template resource.
|
|
689
|
+
|
|
690
|
+
Args:
|
|
691
|
+
resource_name: Name/path of the file.
|
|
692
|
+
properties: Resource properties.
|
|
693
|
+
|
|
694
|
+
Returns:
|
|
695
|
+
List of InSpec check lines.
|
|
696
|
+
|
|
697
|
+
"""
|
|
698
|
+
return _generate_inspec_resource_checks(
|
|
699
|
+
"file",
|
|
700
|
+
resource_name,
|
|
701
|
+
properties=properties,
|
|
702
|
+
)
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
def _generate_inspec_directory_checks(
|
|
706
|
+
resource_name: str, properties: dict[str, Any]
|
|
707
|
+
) -> list[str]:
|
|
708
|
+
"""
|
|
709
|
+
Generate InSpec checks for directory resource.
|
|
710
|
+
|
|
711
|
+
Args:
|
|
712
|
+
resource_name: Path of the directory.
|
|
713
|
+
properties: Resource properties.
|
|
714
|
+
|
|
715
|
+
Returns:
|
|
716
|
+
List of InSpec check lines.
|
|
717
|
+
|
|
718
|
+
"""
|
|
719
|
+
return _generate_inspec_resource_checks(
|
|
720
|
+
"file",
|
|
721
|
+
resource_name,
|
|
722
|
+
properties=properties,
|
|
723
|
+
custom_checks=[
|
|
724
|
+
INSPEC_SHOULD_EXIST,
|
|
725
|
+
" it { should be_directory }",
|
|
726
|
+
],
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
|
|
730
|
+
def _generate_inspec_user_checks(
|
|
731
|
+
resource_name: str, properties: dict[str, Any]
|
|
732
|
+
) -> list[str]:
|
|
733
|
+
"""
|
|
734
|
+
Generate InSpec checks for user resource.
|
|
735
|
+
|
|
736
|
+
Args:
|
|
737
|
+
resource_name: Username.
|
|
738
|
+
properties: Resource properties.
|
|
739
|
+
|
|
740
|
+
Returns:
|
|
741
|
+
List of InSpec check lines.
|
|
742
|
+
|
|
743
|
+
"""
|
|
744
|
+
return _generate_inspec_resource_checks(
|
|
745
|
+
"user",
|
|
746
|
+
resource_name,
|
|
747
|
+
properties=properties,
|
|
748
|
+
)
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
def _generate_inspec_group_checks(resource_name: str) -> list[str]:
|
|
752
|
+
"""
|
|
753
|
+
Generate InSpec checks for group resource.
|
|
754
|
+
|
|
755
|
+
Args:
|
|
756
|
+
resource_name: Group name.
|
|
757
|
+
|
|
758
|
+
Returns:
|
|
759
|
+
List of InSpec check lines.
|
|
760
|
+
|
|
761
|
+
"""
|
|
762
|
+
return _generate_inspec_resource_checks("group", resource_name)
|
|
763
|
+
|
|
764
|
+
|
|
765
|
+
def _generate_inspec_from_resource(
|
|
766
|
+
resource_type: str, resource_name: str, properties: dict[str, Any]
|
|
767
|
+
) -> str:
|
|
768
|
+
"""
|
|
769
|
+
Generate InSpec control from Chef resource.
|
|
770
|
+
|
|
771
|
+
Args:
|
|
772
|
+
resource_type: Type of Chef resource.
|
|
773
|
+
resource_name: Name of the resource.
|
|
774
|
+
properties: Resource properties.
|
|
775
|
+
|
|
776
|
+
Returns:
|
|
777
|
+
InSpec control code as string.
|
|
778
|
+
|
|
779
|
+
"""
|
|
780
|
+
control_id = f"{resource_type}-{resource_name.replace('/', '-')}"
|
|
781
|
+
|
|
782
|
+
lines = [
|
|
783
|
+
f"control '{control_id}' do",
|
|
784
|
+
f" title 'Verify {resource_type} {resource_name}'",
|
|
785
|
+
f" desc 'Ensure {resource_type} {resource_name} is properly configured'",
|
|
786
|
+
" impact 1.0",
|
|
787
|
+
"",
|
|
788
|
+
]
|
|
789
|
+
|
|
790
|
+
# Generate resource-specific checks
|
|
791
|
+
resource_generators = {
|
|
792
|
+
"package": lambda: _generate_inspec_package_checks(resource_name, properties),
|
|
793
|
+
"service": lambda: _generate_inspec_service_checks(resource_name),
|
|
794
|
+
"file": lambda: _generate_inspec_file_checks(resource_name, properties),
|
|
795
|
+
"template": lambda: _generate_inspec_file_checks(resource_name, properties),
|
|
796
|
+
"directory": lambda: _generate_inspec_directory_checks(
|
|
797
|
+
resource_name, properties
|
|
798
|
+
),
|
|
799
|
+
"user": lambda: _generate_inspec_user_checks(resource_name, properties),
|
|
800
|
+
"group": lambda: _generate_inspec_group_checks(resource_name),
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
generator = resource_generators.get(resource_type)
|
|
804
|
+
if generator:
|
|
805
|
+
lines.extend(generator())
|
|
806
|
+
|
|
807
|
+
lines.extend(["end", ""])
|
|
808
|
+
|
|
809
|
+
return "\n".join(lines)
|