mcp-souschef 2.2.0__py3-none-any.whl → 2.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/assessment.py CHANGED
@@ -7,6 +7,7 @@ generating migration plans, analyzing dependencies, and validating conversions.
7
7
 
8
8
  import json
9
9
  import re
10
+ from pathlib import Path
10
11
  from typing import Any
11
12
 
12
13
  from souschef.core import METADATA_FILENAME, _normalize_path, _safe_join
@@ -36,18 +37,81 @@ def assess_chef_migration_complexity(
36
37
 
37
38
  """
38
39
  try:
39
- # Validate inputs
40
+ # Validate and parse inputs
40
41
  error_msg = _validate_assessment_inputs(
41
42
  cookbook_paths, migration_scope, target_platform
42
43
  )
43
44
  if error_msg:
44
45
  return error_msg
45
46
 
47
+ # Process cookbook analysis
48
+ return _process_cookbook_assessment(
49
+ cookbook_paths, migration_scope, target_platform
50
+ )
51
+
52
+ except Exception as e:
53
+ return format_error_with_context(
54
+ e, "assessing Chef migration complexity", cookbook_paths
55
+ )
56
+
57
+
58
+ def _process_cookbook_assessment(
59
+ cookbook_paths: str, migration_scope: str, target_platform: str
60
+ ) -> str:
61
+ """Process the cookbook assessment workflow."""
62
+ # Parse cookbook paths (may be empty if none exist)
63
+ valid_paths = _parse_cookbook_paths(cookbook_paths)
64
+
65
+ # Analyze all cookbooks (handles empty list gracefully)
66
+ cookbook_assessments, overall_metrics = _analyse_cookbook_metrics(valid_paths)
67
+
68
+ # Generate recommendations and reports
69
+ recommendations = _generate_migration_recommendations_from_assessment(
70
+ cookbook_assessments, overall_metrics, target_platform
71
+ )
72
+ roadmap = _create_migration_roadmap(cookbook_assessments)
73
+
74
+ # Format final assessment report
75
+ return _format_assessment_report(
76
+ migration_scope,
77
+ target_platform,
78
+ overall_metrics,
79
+ cookbook_assessments,
80
+ recommendations,
81
+ roadmap,
82
+ )
83
+
84
+
85
+ def parse_chef_migration_assessment(
86
+ cookbook_paths: str,
87
+ migration_scope: str = "full",
88
+ target_platform: str = "ansible_awx",
89
+ ) -> dict[str, Any]:
90
+ """
91
+ Parse Chef cookbook migration assessment and return as dictionary.
92
+
93
+ Args:
94
+ cookbook_paths: Comma-separated paths to Chef cookbooks or cookbook directory
95
+ migration_scope: Scope of migration (full, recipes_only, infrastructure_only)
96
+ target_platform: Target platform (ansible_awx, ansible_core, ansible_tower)
97
+
98
+ Returns:
99
+ Dictionary containing assessment data with complexity, recommendations, etc.
100
+
101
+ """
102
+ try:
103
+ # Validate inputs
104
+ error_msg = _validate_assessment_inputs(
105
+ cookbook_paths, migration_scope, target_platform
106
+ )
107
+ if error_msg:
108
+ return {"error": error_msg}
109
+
46
110
  # Parse cookbook paths (may be empty if none exist)
47
111
  valid_paths = _parse_cookbook_paths(cookbook_paths)
48
112
 
49
113
  # Analyze all cookbooks (handles empty list gracefully)
50
- cookbook_assessments, overall_metrics = _analyze_cookbook_metrics(valid_paths)
114
+ cookbook_assessments, overall_metrics = _analyse_cookbook_metrics(valid_paths)
51
115
 
52
116
  # Generate recommendations and reports
53
117
  recommendations = _generate_migration_recommendations_from_assessment(
@@ -55,19 +119,35 @@ def assess_chef_migration_complexity(
55
119
  )
56
120
  roadmap = _create_migration_roadmap(cookbook_assessments)
57
121
 
58
- # Format final assessment report
59
- return _format_assessment_report(
60
- migration_scope,
61
- target_platform,
62
- overall_metrics,
63
- cookbook_assessments,
64
- recommendations,
65
- roadmap,
66
- )
122
+ return {
123
+ "migration_scope": migration_scope,
124
+ "target_platform": target_platform,
125
+ "overall_metrics": overall_metrics,
126
+ "cookbook_assessments": cookbook_assessments,
127
+ "recommendations": recommendations,
128
+ "roadmap": roadmap,
129
+ "complexity": _get_overall_complexity_level(overall_metrics),
130
+ "estimated_hours": overall_metrics.get("estimated_effort_days", 0)
131
+ * 8, # Convert days to hours
132
+ }
133
+
67
134
  except Exception as e:
68
- return format_error_with_context(
69
- e, "assessing Chef migration complexity", cookbook_paths
70
- )
135
+ return {
136
+ "error": format_error_with_context(
137
+ e, "assessing Chef migration complexity", cookbook_paths
138
+ )
139
+ }
140
+
141
+
142
+ def _get_overall_complexity_level(metrics: dict[str, int]) -> str:
143
+ """Get overall complexity level based on metrics."""
144
+ avg_complexity = metrics.get("avg_complexity", 0)
145
+ if avg_complexity < 30:
146
+ return "Low"
147
+ elif avg_complexity < 70:
148
+ return "Medium"
149
+ else:
150
+ return "High"
71
151
 
72
152
 
73
153
  def _validate_migration_plan_inputs(
@@ -214,11 +294,11 @@ def generate_migration_plan(
214
294
  return format_error_with_context(e, "generating migration plan", cookbook_paths)
215
295
 
216
296
 
217
- def analyze_cookbook_dependencies(
297
+ def analyse_cookbook_dependencies(
218
298
  cookbook_path: str, dependency_depth: str = "direct"
219
299
  ) -> str:
220
300
  """
221
- Analyze cookbook dependencies and identify migration order requirements.
301
+ Analyse cookbook dependencies and identify migration order requirements.
222
302
 
223
303
  Args:
224
304
  cookbook_path: Path to Chef cookbook or cookbooks directory
@@ -245,7 +325,7 @@ def analyze_cookbook_dependencies(
245
325
  )
246
326
 
247
327
  # Analyze dependencies
248
- dependency_analysis = _analyze_cookbook_dependencies_detailed(cookbook_path_obj)
328
+ dependency_analysis = _analyse_cookbook_dependencies_detailed(cookbook_path_obj)
249
329
 
250
330
  # Determine migration order
251
331
  migration_order = _determine_migration_order(dependency_analysis)
@@ -276,7 +356,7 @@ def analyze_cookbook_dependencies(
276
356
  {_format_community_cookbooks(dependency_analysis)}
277
357
 
278
358
  ## Migration Impact Analysis:
279
- {_analyze_dependency_migration_impact(dependency_analysis)}
359
+ {_analyse_dependency_migration_impact(dependency_analysis)}
280
360
  """
281
361
  except Exception as e:
282
362
  return format_error_with_context(
@@ -459,11 +539,11 @@ def _parse_cookbook_paths(cookbook_paths: str) -> list[Any]:
459
539
  return valid_paths
460
540
 
461
541
 
462
- def _analyze_cookbook_metrics(
542
+ def _analyse_cookbook_metrics(
463
543
  valid_paths: list[Any],
464
544
  ) -> tuple[list[Any], dict[str, int]]:
465
545
  """
466
- Analyze metrics for all cookbooks.
546
+ Analyse metrics for all cookbooks.
467
547
 
468
548
  Args:
469
549
  valid_paths: List of valid cookbook paths
@@ -576,8 +656,8 @@ def _count_cookbook_artifacts(cookbook_path) -> dict[str, int]:
576
656
  }
577
657
 
578
658
 
579
- def _analyze_recipe_complexity(cookbook_path) -> dict[str, int]:
580
- """Analyze recipe files for resource counts, Ruby blocks, and custom resources."""
659
+ def _analyse_recipe_complexity(cookbook_path) -> dict[str, int]:
660
+ """Analyse recipe files for resource counts, Ruby blocks, and custom resources."""
581
661
  recipes_dir = _safe_join(cookbook_path, "recipes")
582
662
  resource_count = 0
583
663
  custom_resources = 0
@@ -654,18 +734,21 @@ def _determine_migration_priority(complexity_score: int) -> str:
654
734
  return "medium"
655
735
 
656
736
 
657
- def _assess_single_cookbook(cookbook_path) -> dict:
737
+ def _assess_single_cookbook(cookbook_path: Path) -> dict:
658
738
  """Assess complexity of a single cookbook."""
659
- cookbook = _normalize_path(cookbook_path)
739
+ # cookbook_path is already normalized to a Path object
740
+ cookbook = cookbook_path
660
741
 
661
742
  # Collect metrics
662
743
  artifact_counts = _count_cookbook_artifacts(cookbook)
663
- recipe_complexity = _analyze_recipe_complexity(cookbook)
744
+ recipe_complexity = _analyse_recipe_complexity(cookbook)
664
745
  metrics = {**artifact_counts, **recipe_complexity}
665
746
 
666
747
  # Calculate complexity and effort
667
748
  complexity_score = _calculate_complexity_score(metrics)
668
- base_effort = metrics["recipe_count"] * 0.5 # 0.5 days per recipe
749
+ # More realistic effort: 0.5-2 hours per recipe with AI assistance
750
+ # Base: 1 hour per recipe = 0.125 days (8-hour day)
751
+ base_effort = metrics["recipe_count"] * 0.125 # 0.125 days per recipe
669
752
  complexity_multiplier = 1 + (complexity_score / 100)
670
753
  estimated_effort = round(base_effort * complexity_multiplier, 1)
671
754
 
@@ -689,7 +772,7 @@ def _format_overall_metrics(metrics: dict) -> str:
689
772
  • Total Resources: {metrics["total_resources"]}
690
773
  • Average Complexity: {metrics.get("avg_complexity", 0):.1f}/100
691
774
  • Estimated Total Effort: {metrics["estimated_effort_days"]:.1f} person-days
692
- • Estimated Duration: {int(metrics["estimated_effort_days"] / 5)}-{int(metrics["estimated_effort_days"] / 3)} weeks"""
775
+ • Estimated Duration: {max(1, int(metrics["estimated_effort_days"] / 2))}-{max(2, int(metrics["estimated_effort_days"]))} weeks (with 2-4 parallel engineers)"""
693
776
 
694
777
 
695
778
  def _format_cookbook_assessments(assessments: list) -> str:
@@ -942,7 +1025,7 @@ def _estimate_resource_requirements(metrics: dict, target_platform: str) -> str:
942
1025
  • **Training:** 2-3 days Ansible/AWX training for team"""
943
1026
 
944
1027
 
945
- def _analyze_cookbook_dependencies_detailed(cookbook_path) -> dict:
1028
+ def _analyse_cookbook_dependencies_detailed(cookbook_path) -> dict:
946
1029
  """Analyze cookbook dependencies in detail."""
947
1030
  analysis = {
948
1031
  "cookbook_name": cookbook_path.name,
@@ -1281,8 +1364,8 @@ def _format_community_cookbooks(analysis: dict) -> str:
1281
1364
  )
1282
1365
 
1283
1366
 
1284
- def _analyze_dependency_migration_impact(analysis: dict) -> str:
1285
- """Analyze migration impact of dependencies."""
1367
+ def _analyse_dependency_migration_impact(analysis: dict) -> str:
1368
+ """Analyse migration impact of dependencies."""
1286
1369
  impacts = []
1287
1370
 
1288
1371
  if analysis["community_cookbooks"]:
@@ -0,0 +1,11 @@
1
+ """CI/CD pipeline generation from Chef patterns."""
2
+
3
+ from souschef.ci.github_actions import generate_github_workflow_from_chef_ci
4
+ from souschef.ci.gitlab_ci import generate_gitlab_ci_from_chef_ci
5
+ from souschef.ci.jenkins_pipeline import generate_jenkinsfile_from_chef_ci
6
+
7
+ __all__ = [
8
+ "generate_jenkinsfile_from_chef_ci",
9
+ "generate_gitlab_ci_from_chef_ci",
10
+ "generate_github_workflow_from_chef_ci",
11
+ ]
@@ -0,0 +1,379 @@
1
+ """
2
+ GitHub Actions workflow generation from Chef CI/CD patterns.
3
+
4
+ Analyzes Chef testing tools (Test Kitchen, ChefSpec, Cookstyle) and
5
+ generates equivalent GitHub Actions workflows with proper job
6
+ configuration and caching.
7
+ """
8
+
9
+ from pathlib import Path
10
+ from typing import Any
11
+
12
+ import yaml
13
+
14
+ # GitHub Actions constants
15
+ ACTION_CHECKOUT = "actions/checkout@v4"
16
+ ACTION_SETUP_RUBY = "ruby/setup-ruby@v1"
17
+ ACTION_CACHE = "actions/cache@v4"
18
+ ACTION_UPLOAD_ARTIFACT = "actions/upload-artifact@v4"
19
+
20
+ STEP_NAME_CHECKOUT = "Checkout code"
21
+ STEP_NAME_SETUP_RUBY = "Setup Ruby"
22
+ STEP_NAME_CACHE_GEMS = "Cache gems"
23
+ STEP_NAME_INSTALL_DEPS = "Install dependencies"
24
+
25
+ GEM_BUNDLE_PATH = "vendor/bundle"
26
+ GEM_CACHE_KEY = "gems-${{ runner.os }}-${{ hashFiles('**/Gemfile.lock') }}"
27
+ GEM_CACHE_RESTORE_KEY = "gems-${{ runner.os }}-"
28
+
29
+ BUNDLE_INSTALL_CMD = "bundle install --jobs 4 --retry 3"
30
+
31
+
32
+ def generate_github_workflow_from_chef_ci(
33
+ cookbook_path: str,
34
+ workflow_name: str = "Chef Cookbook CI",
35
+ enable_cache: bool = True,
36
+ enable_artifacts: bool = True,
37
+ ) -> str:
38
+ """
39
+ Generate GitHub Actions workflow from Chef cookbook CI/CD patterns.
40
+
41
+ Args:
42
+ cookbook_path: Path to Chef cookbook directory.
43
+ workflow_name: Name for the GitHub Actions workflow.
44
+ enable_cache: Enable caching for Chef dependencies.
45
+ enable_artifacts: Enable artifacts for test results.
46
+
47
+ Returns:
48
+ GitHub Actions workflow YAML content.
49
+
50
+ """
51
+ cookbook_dir = Path(cookbook_path)
52
+ if not cookbook_dir.exists():
53
+ raise FileNotFoundError(f"Cookbook directory not found: {cookbook_path}")
54
+
55
+ # Analyse Chef CI patterns
56
+ patterns = _analyse_chef_ci_patterns(cookbook_dir)
57
+
58
+ # Build workflow structure
59
+ workflow = _build_workflow_structure(
60
+ workflow_name, patterns, enable_cache, enable_artifacts
61
+ )
62
+
63
+ return yaml.dump(workflow, default_flow_style=False, sort_keys=False)
64
+
65
+
66
+ def _analyse_chef_ci_patterns(cookbook_dir: Path) -> dict[str, Any]:
67
+ """
68
+ Analyse Chef cookbook for CI/CD patterns and testing configurations.
69
+
70
+ This function examines a Chef cookbook directory to detect various
71
+ testing and linting tools, as well as Test Kitchen configurations
72
+ including suites and platforms.
73
+
74
+ Args:
75
+ cookbook_dir: Path to the Chef cookbook directory to analyse.
76
+
77
+ Returns:
78
+ Dictionary containing detected patterns with the following keys:
79
+ - has_kitchen (bool): Whether Test Kitchen is configured
80
+ (.kitchen.yml exists)
81
+ - has_chefspec (bool): Whether ChefSpec tests are present
82
+ (spec/**/*_spec.rb files)
83
+ - has_cookstyle (bool): Whether Cookstyle is configured
84
+ (.cookstyle.yml exists)
85
+ - has_foodcritic (bool): Whether Foodcritic (legacy) is
86
+ configured (.foodcritic exists)
87
+ - kitchen_suites (list[str]): Names of Test Kitchen suites
88
+ found in .kitchen.yml
89
+ - kitchen_platforms (list[str]): Names of Test Kitchen
90
+ platforms found in .kitchen.yml
91
+
92
+ Note:
93
+ If .kitchen.yml is malformed or cannot be parsed, the function
94
+ continues with empty suite and platform lists rather than
95
+ raising an exception.
96
+
97
+ Example:
98
+ >>> patterns = _analyze_chef_ci_patterns(Path("/path/to/cookbook"))
99
+ >>> patterns["has_kitchen"]
100
+ True
101
+ >>> patterns["kitchen_suites"]
102
+ ['default', 'integration']
103
+
104
+ """
105
+ patterns: dict[str, Any] = {
106
+ "has_kitchen": False,
107
+ "has_chefspec": False,
108
+ "has_cookstyle": False,
109
+ "has_foodcritic": False,
110
+ "kitchen_suites": [],
111
+ "kitchen_platforms": [],
112
+ }
113
+
114
+ # Check for Test Kitchen
115
+ kitchen_yml = cookbook_dir / ".kitchen.yml"
116
+ if kitchen_yml.exists():
117
+ patterns["has_kitchen"] = True
118
+ try:
119
+ with kitchen_yml.open() as f:
120
+ kitchen_config = yaml.safe_load(f)
121
+ if kitchen_config:
122
+ # Extract suites
123
+ suites = kitchen_config.get("suites", [])
124
+ if suites:
125
+ patterns["kitchen_suites"] = [
126
+ s.get("name", "default") for s in suites
127
+ ]
128
+ # Extract platforms
129
+ platforms = kitchen_config.get("platforms", [])
130
+ if platforms:
131
+ patterns["kitchen_platforms"] = [
132
+ p.get("name", "unknown") for p in platforms
133
+ ]
134
+ except (yaml.YAMLError, OSError, KeyError, TypeError, AttributeError):
135
+ # Gracefully handle malformed .kitchen.yml - continue with empty config
136
+ # Catches: YAML syntax errors, file I/O errors, missing config keys,
137
+ # type mismatches in config structure, and missing dict attributes
138
+ pass
139
+
140
+ # Check for ChefSpec
141
+ spec_dir = cookbook_dir / "spec"
142
+ if spec_dir.exists() and any(spec_dir.glob("**/*_spec.rb")):
143
+ patterns["has_chefspec"] = True
144
+
145
+ # Check for Cookstyle
146
+ cookstyle_yml = cookbook_dir / ".cookstyle.yml"
147
+ if cookstyle_yml.exists():
148
+ patterns["has_cookstyle"] = True
149
+
150
+ # Check for Foodcritic (legacy)
151
+ if (cookbook_dir / ".foodcritic").exists():
152
+ patterns["has_foodcritic"] = True
153
+
154
+ return patterns
155
+
156
+
157
+ def _build_workflow_structure(
158
+ workflow_name: str,
159
+ patterns: dict[str, Any],
160
+ enable_cache: bool,
161
+ enable_artifacts: bool,
162
+ ) -> dict[str, Any]:
163
+ """
164
+ Build GitHub Actions workflow structure.
165
+
166
+ Args:
167
+ workflow_name: Workflow name.
168
+ patterns: Detected Chef CI patterns.
169
+ enable_cache: Enable caching.
170
+ enable_artifacts: Enable artifacts.
171
+
172
+ Returns:
173
+ Workflow dictionary structure.
174
+
175
+ """
176
+ workflow: dict[str, Any] = {
177
+ "name": workflow_name,
178
+ "on": {
179
+ "push": {"branches": ["main", "develop"]},
180
+ "pull_request": {"branches": ["main", "develop"]},
181
+ },
182
+ "jobs": {},
183
+ }
184
+
185
+ # Add lint job
186
+ if patterns["has_cookstyle"] or patterns["has_foodcritic"]:
187
+ workflow["jobs"]["lint"] = _build_lint_job(patterns, enable_cache)
188
+
189
+ # Add unit test job
190
+ if patterns["has_chefspec"]:
191
+ workflow["jobs"]["unit-test"] = _build_unit_test_job(enable_cache)
192
+
193
+ # Add integration test jobs
194
+ if patterns["has_kitchen"]:
195
+ workflow["jobs"]["integration-test"] = _build_integration_test_job(
196
+ patterns, enable_cache, enable_artifacts
197
+ )
198
+
199
+ return workflow
200
+
201
+
202
+ def _build_lint_job(patterns: dict[str, Any], enable_cache: bool) -> dict[str, Any]:
203
+ """
204
+ Build lint job configuration.
205
+
206
+ Args:
207
+ patterns: Detected CI patterns.
208
+ enable_cache: Enable caching.
209
+
210
+ Returns:
211
+ Lint job configuration.
212
+
213
+ """
214
+ job: dict[str, Any] = {
215
+ "name": "Lint Cookbook",
216
+ "runs-on": "ubuntu-latest",
217
+ "steps": [
218
+ {"name": STEP_NAME_CHECKOUT, "uses": ACTION_CHECKOUT},
219
+ {
220
+ "name": STEP_NAME_SETUP_RUBY,
221
+ "uses": ACTION_SETUP_RUBY,
222
+ "with": {"ruby-version": "3.2"},
223
+ },
224
+ ],
225
+ }
226
+
227
+ if enable_cache:
228
+ job["steps"].append(
229
+ {
230
+ "name": STEP_NAME_CACHE_GEMS,
231
+ "uses": ACTION_CACHE,
232
+ "with": {
233
+ "path": GEM_BUNDLE_PATH,
234
+ "key": GEM_CACHE_KEY,
235
+ "restore-keys": GEM_CACHE_RESTORE_KEY,
236
+ },
237
+ }
238
+ )
239
+
240
+ job["steps"].extend(
241
+ [
242
+ {
243
+ "name": STEP_NAME_INSTALL_DEPS,
244
+ "run": BUNDLE_INSTALL_CMD,
245
+ },
246
+ ]
247
+ )
248
+
249
+ # Add appropriate lint commands
250
+ if patterns["has_cookstyle"]:
251
+ job["steps"].append({"name": "Run Cookstyle", "run": "bundle exec cookstyle"})
252
+
253
+ if patterns["has_foodcritic"]:
254
+ job["steps"].append(
255
+ {"name": "Run Foodcritic", "run": "bundle exec foodcritic ."}
256
+ )
257
+
258
+ return job
259
+
260
+
261
+ def _build_unit_test_job(enable_cache: bool) -> dict[str, Any]:
262
+ """
263
+ Build unit test job configuration.
264
+
265
+ Args:
266
+ enable_cache: Enable caching.
267
+
268
+ Returns:
269
+ Unit test job configuration.
270
+
271
+ """
272
+ job: dict[str, Any] = {
273
+ "name": "Unit Tests (ChefSpec)",
274
+ "runs-on": "ubuntu-latest",
275
+ "steps": [
276
+ {"name": STEP_NAME_CHECKOUT, "uses": ACTION_CHECKOUT},
277
+ {
278
+ "name": STEP_NAME_SETUP_RUBY,
279
+ "uses": ACTION_SETUP_RUBY,
280
+ "with": {"ruby-version": "3.2"},
281
+ },
282
+ ],
283
+ }
284
+
285
+ if enable_cache:
286
+ job["steps"].append(
287
+ {
288
+ "name": STEP_NAME_CACHE_GEMS,
289
+ "uses": ACTION_CACHE,
290
+ "with": {
291
+ "path": GEM_BUNDLE_PATH,
292
+ "key": GEM_CACHE_KEY,
293
+ "restore-keys": GEM_CACHE_RESTORE_KEY,
294
+ },
295
+ }
296
+ )
297
+
298
+ job["steps"].extend(
299
+ [
300
+ {
301
+ "name": STEP_NAME_INSTALL_DEPS,
302
+ "run": BUNDLE_INSTALL_CMD,
303
+ },
304
+ {"name": "Run ChefSpec tests", "run": "bundle exec rspec"},
305
+ ]
306
+ )
307
+
308
+ return job
309
+
310
+
311
+ def _build_integration_test_job(
312
+ patterns: dict[str, Any], enable_cache: bool, enable_artifacts: bool
313
+ ) -> dict[str, Any]:
314
+ """
315
+ Build integration test job configuration.
316
+
317
+ Args:
318
+ patterns: Detected CI patterns.
319
+ enable_cache: Enable caching.
320
+ enable_artifacts: Enable artifacts.
321
+
322
+ Returns:
323
+ Integration test job configuration.
324
+
325
+ """
326
+ job: dict[str, Any] = {
327
+ "name": "Integration Tests (Test Kitchen)",
328
+ "runs-on": "ubuntu-latest",
329
+ "strategy": {"matrix": {"suite": patterns["kitchen_suites"] or ["default"]}},
330
+ "steps": [
331
+ {"name": STEP_NAME_CHECKOUT, "uses": ACTION_CHECKOUT},
332
+ {
333
+ "name": STEP_NAME_SETUP_RUBY,
334
+ "uses": ACTION_SETUP_RUBY,
335
+ "with": {"ruby-version": "3.2"},
336
+ },
337
+ ],
338
+ }
339
+
340
+ if enable_cache:
341
+ job["steps"].append(
342
+ {
343
+ "name": STEP_NAME_CACHE_GEMS,
344
+ "uses": ACTION_CACHE,
345
+ "with": {
346
+ "path": GEM_BUNDLE_PATH,
347
+ "key": GEM_CACHE_KEY,
348
+ "restore-keys": GEM_CACHE_RESTORE_KEY,
349
+ },
350
+ }
351
+ )
352
+
353
+ job["steps"].extend(
354
+ [
355
+ {
356
+ "name": STEP_NAME_INSTALL_DEPS,
357
+ "run": BUNDLE_INSTALL_CMD,
358
+ },
359
+ {
360
+ "name": "Run Test Kitchen",
361
+ "run": "bundle exec kitchen test ${{ matrix.suite }}",
362
+ },
363
+ ]
364
+ )
365
+
366
+ if enable_artifacts:
367
+ job["steps"].append(
368
+ {
369
+ "name": "Upload test results",
370
+ "uses": ACTION_UPLOAD_ARTIFACT,
371
+ "if": "always()",
372
+ "with": {
373
+ "name": "kitchen-logs-${{ matrix.suite }}",
374
+ "path": ".kitchen/logs/",
375
+ },
376
+ }
377
+ )
378
+
379
+ return job