mcp-souschef 2.2.0__py3-none-any.whl → 2.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1360 @@
1
+ """Cookbook Analysis Page for SousChef UI."""
2
+
3
+ import io
4
+ import json
5
+ import shutil
6
+ import sys
7
+ import tarfile
8
+ import tempfile
9
+ import zipfile
10
+ from pathlib import Path
11
+ from typing import Any
12
+
13
+ import pandas as pd # type: ignore[import-untyped]
14
+ import streamlit as st
15
+
16
+ # Add the parent directory to the path so we can import souschef modules
17
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent))
18
+
19
+ from souschef.assessment import parse_chef_migration_assessment
20
+ from souschef.converters.playbook import (
21
+ generate_playbook_from_recipe,
22
+ generate_playbook_from_recipe_with_ai,
23
+ )
24
+ from souschef.core.constants import METADATA_FILENAME
25
+ from souschef.parsers.metadata import parse_cookbook_metadata
26
+
27
+ # AI Settings
28
+ ANTHROPIC_PROVIDER = "Anthropic (Claude)"
29
+ OPENAI_PROVIDER = "OpenAI (GPT)"
30
+ LOCAL_PROVIDER = "Local Model"
31
+
32
+
33
+ def load_ai_settings():
34
+ """Load AI settings from configuration file."""
35
+ try:
36
+ # Use /tmp/.souschef for container compatibility (tmpfs is writable)
37
+ config_file = Path("/tmp/.souschef/ai_config.json")
38
+ if config_file.exists():
39
+ with config_file.open() as f:
40
+ return json.load(f)
41
+ except Exception:
42
+ pass # Ignore errors when loading config file; return empty dict as fallback
43
+ return {}
44
+
45
+
46
+ # Constants for repeated strings
47
+ METADATA_STATUS_YES = "Yes"
48
+ METADATA_STATUS_NO = "No"
49
+ ANALYSIS_STATUS_ANALYSED = "Analysed"
50
+ ANALYSIS_STATUS_FAILED = "Failed"
51
+ METADATA_COLUMN_NAME = "Has Metadata"
52
+
53
+ # Security limits for archive extraction
54
+ MAX_ARCHIVE_SIZE = 100 * 1024 * 1024 # 100MB total
55
+ MAX_FILE_SIZE = 50 * 1024 * 1024 # 50MB per file
56
+ MAX_FILES = 1000 # Maximum number of files
57
+ MAX_DEPTH = 10 # Maximum directory depth
58
+ BLOCKED_EXTENSIONS = {
59
+ ".exe",
60
+ ".bat",
61
+ ".cmd",
62
+ ".com",
63
+ ".pif",
64
+ ".scr",
65
+ ".vbs",
66
+ ".js",
67
+ ".jar",
68
+ # Note: .sh files are allowed as they are common in Chef cookbooks
69
+ }
70
+
71
+
72
+ def extract_archive(uploaded_file) -> tuple[Path, Path]:
73
+ """
74
+ Extract uploaded archive to a temporary directory with security checks.
75
+
76
+ Returns:
77
+ tuple: (temp_dir_path, cookbook_root_path)
78
+
79
+ Implements multiple security measures to prevent:
80
+ - Zip bombs (size limits, file count limits)
81
+ - Path traversal attacks (../ validation)
82
+ - Resource exhaustion (depth limits, size limits)
83
+ - Malicious files (symlinks, executables blocked)
84
+
85
+ """
86
+ # Check initial file size
87
+ file_size = len(uploaded_file.getbuffer())
88
+ if file_size > MAX_ARCHIVE_SIZE:
89
+ raise ValueError(
90
+ f"Archive too large: {file_size} bytes (max: {MAX_ARCHIVE_SIZE})"
91
+ )
92
+
93
+ # Create temporary directory (will be cleaned up by caller)
94
+ temp_dir = Path(tempfile.mkdtemp())
95
+ temp_path = temp_dir
96
+
97
+ # Save uploaded file
98
+ archive_path = temp_path / uploaded_file.name
99
+ with archive_path.open("wb") as f:
100
+ f.write(uploaded_file.getbuffer())
101
+
102
+ # Extract archive with security checks
103
+ extraction_dir = temp_path / "extracted"
104
+ extraction_dir.mkdir()
105
+
106
+ _extract_archive_by_type(archive_path, extraction_dir, uploaded_file.name)
107
+
108
+ # Find the root directory (should contain cookbooks)
109
+ cookbook_root = _determine_cookbook_root(extraction_dir)
110
+
111
+ return temp_dir, cookbook_root
112
+
113
+
114
+ def _extract_archive_by_type(
115
+ archive_path: Path, extraction_dir: Path, filename: str
116
+ ) -> None:
117
+ """Extract archive based on file extension."""
118
+ if filename.endswith(".zip"):
119
+ _extract_zip_securely(archive_path, extraction_dir)
120
+ elif filename.endswith((".tar.gz", ".tgz")):
121
+ _extract_tar_securely(archive_path, extraction_dir, gzipped=True)
122
+ elif filename.endswith(".tar"):
123
+ _extract_tar_securely(archive_path, extraction_dir, gzipped=False)
124
+ else:
125
+ raise ValueError(f"Unsupported archive format: {filename}")
126
+
127
+
128
+ def _determine_cookbook_root(extraction_dir: Path) -> Path:
129
+ """Determine the root directory containing cookbooks."""
130
+ subdirs = [d for d in extraction_dir.iterdir() if d.is_dir()]
131
+
132
+ # Check if this looks like a single cookbook archive (contains typical
133
+ # cookbook dirs)
134
+ cookbook_dirs = {
135
+ "recipes",
136
+ "attributes",
137
+ "templates",
138
+ "files",
139
+ "libraries",
140
+ "definitions",
141
+ }
142
+ extracted_dirs = {d.name for d in subdirs}
143
+
144
+ cookbook_root = extraction_dir
145
+
146
+ if len(subdirs) > 1 and cookbook_dirs.intersection(extracted_dirs):
147
+ # Case 1: Multiple cookbook directories at root level
148
+ cookbook_root = _handle_multiple_cookbook_dirs(extraction_dir, subdirs)
149
+ elif len(subdirs) == 1:
150
+ # Case 2: Single directory - check if it contains cookbook components
151
+ cookbook_root = _handle_single_cookbook_dir(
152
+ extraction_dir, subdirs[0], cookbook_dirs
153
+ )
154
+ # else: Multiple directories that are not cookbook components - use extraction_dir
155
+
156
+ return cookbook_root
157
+
158
+
159
+ def _handle_multiple_cookbook_dirs(extraction_dir: Path, subdirs: list) -> Path:
160
+ """Handle case where multiple cookbook directories are at root level."""
161
+ synthetic_cookbook_dir = extraction_dir / "cookbook"
162
+ synthetic_cookbook_dir.mkdir(exist_ok=True)
163
+
164
+ # Move all extracted directories into the synthetic cookbook
165
+ for subdir in subdirs:
166
+ if subdir.name in {
167
+ "recipes",
168
+ "attributes",
169
+ "templates",
170
+ "files",
171
+ "libraries",
172
+ "definitions",
173
+ }:
174
+ shutil.move(str(subdir), str(synthetic_cookbook_dir / subdir.name))
175
+
176
+ # Create a basic metadata.rb file
177
+ metadata_content = """name 'extracted_cookbook'
178
+ maintainer 'SousChef'
179
+ maintainer_email 'souschef@example.com'
180
+ license 'All rights reserved'
181
+ description 'Automatically extracted cookbook from archive'
182
+ version '1.0.0'
183
+ """
184
+ (synthetic_cookbook_dir / METADATA_FILENAME).write_text(metadata_content)
185
+
186
+ return extraction_dir
187
+
188
+
189
+ def _handle_single_cookbook_dir(
190
+ extraction_dir: Path, single_dir: Path, cookbook_dirs: set
191
+ ) -> Path:
192
+ """Handle case where single directory contains cookbook components."""
193
+ single_dir_contents = {d.name for d in single_dir.iterdir() if d.is_dir()}
194
+
195
+ if cookbook_dirs.intersection(single_dir_contents):
196
+ # This single directory contains cookbook components - treat it as a cookbook
197
+ # Check if it already has metadata.rb
198
+ if not (single_dir / METADATA_FILENAME).exists():
199
+ # Create synthetic metadata.rb
200
+ metadata_content = f"""name '{single_dir.name}'
201
+ maintainer 'SousChef'
202
+ maintainer_email 'souschef@example.com'
203
+ license 'All rights reserved'
204
+ description 'Automatically extracted cookbook from archive'
205
+ version '1.0.0'
206
+ """
207
+ (single_dir / METADATA_FILENAME).write_text(metadata_content)
208
+
209
+ return extraction_dir
210
+ else:
211
+ # Single directory that doesn't contain cookbook components
212
+ return single_dir
213
+
214
+
215
+ def _extract_zip_securely(archive_path: Path, extraction_dir: Path) -> None:
216
+ """Extract ZIP archive with security checks."""
217
+ total_size = 0
218
+
219
+ with zipfile.ZipFile(archive_path, "r") as zip_ref:
220
+ # Pre-scan for security issues
221
+ for file_count, info in enumerate(zip_ref.filelist, start=1):
222
+ _validate_zip_file_security(info, file_count, total_size)
223
+ total_size += info.file_size
224
+
225
+ # Safe extraction with manual path handling
226
+ for info in zip_ref.filelist:
227
+ # Construct safe relative path
228
+ safe_path = _get_safe_extraction_path(info.filename, extraction_dir)
229
+
230
+ if info.is_dir():
231
+ # Create directory
232
+ safe_path.mkdir(parents=True, exist_ok=True)
233
+ else:
234
+ # Create parent directories if needed
235
+ safe_path.parent.mkdir(parents=True, exist_ok=True)
236
+ # Extract file content manually
237
+ with zip_ref.open(info) as source, safe_path.open("wb") as target:
238
+ # Read in chunks to control memory usage
239
+ while True:
240
+ chunk = source.read(8192)
241
+ if not chunk:
242
+ break
243
+ target.write(chunk)
244
+
245
+
246
+ def _validate_zip_file_security(info, file_count: int, total_size: int) -> None:
247
+ """Validate a single ZIP file entry for security issues."""
248
+ file_count += 1
249
+ if file_count > MAX_FILES:
250
+ raise ValueError(f"Too many files in archive: {file_count} (max: {MAX_FILES})")
251
+
252
+ # Check file size
253
+ if info.file_size > MAX_FILE_SIZE:
254
+ raise ValueError(f"File too large: {info.filename} ({info.file_size} bytes)")
255
+
256
+ total_size += info.file_size
257
+ if total_size > MAX_ARCHIVE_SIZE:
258
+ raise ValueError(f"Total archive size too large: {total_size} bytes")
259
+
260
+ # Check for path traversal
261
+ if _has_path_traversal(info.filename):
262
+ raise ValueError(f"Path traversal detected: {info.filename}")
263
+
264
+ # Check directory depth
265
+ if _exceeds_depth_limit(info.filename):
266
+ raise ValueError(f"Directory depth too deep: {info.filename}")
267
+
268
+ # Check for blocked file extensions
269
+ if _is_blocked_extension(info.filename):
270
+ raise ValueError(f"Blocked file type: {info.filename}")
271
+
272
+ # Check for symlinks
273
+ if _is_symlink(info):
274
+ raise ValueError(f"Symlinks not allowed: {info.filename}")
275
+
276
+
277
+ def _extract_tar_securely(
278
+ archive_path: Path, extraction_dir: Path, gzipped: bool
279
+ ) -> None:
280
+ """Extract TAR archive with security checks."""
281
+ mode = "r:gz" if gzipped else "r"
282
+
283
+ try:
284
+ with tarfile.open(str(archive_path), mode=mode) as tar_ref: # type: ignore[call-overload]
285
+ members = tar_ref.getmembers()
286
+ _pre_scan_tar_members(members)
287
+ _extract_tar_members(tar_ref, members, extraction_dir)
288
+ except tarfile.TarError as e:
289
+ raise ValueError(f"Invalid or corrupted TAR archive: {e}") from e
290
+ except Exception as e:
291
+ raise ValueError(f"Failed to process TAR archive: {e}") from e
292
+
293
+
294
+ def _pre_scan_tar_members(members):
295
+ """Pre-scan TAR members for security issues and accumulate totals."""
296
+ total_size = 0
297
+ for file_count, member in enumerate(members, start=1):
298
+ total_size += member.size
299
+ _validate_tar_file_security(member, file_count, total_size)
300
+
301
+
302
+ def _extract_tar_members(tar_ref, members, extraction_dir):
303
+ """Extract validated TAR members to the extraction directory."""
304
+ for member in members:
305
+ safe_path = _get_safe_extraction_path(member.name, extraction_dir)
306
+ if member.isdir():
307
+ safe_path.mkdir(parents=True, exist_ok=True)
308
+ else:
309
+ safe_path.parent.mkdir(parents=True, exist_ok=True)
310
+ _extract_file_content(tar_ref, member, safe_path)
311
+
312
+
313
+ def _extract_file_content(tar_ref, member, safe_path):
314
+ """Extract the content of a single TAR member to a file."""
315
+ source = tar_ref.extractfile(member)
316
+ if source:
317
+ with source, safe_path.open("wb") as target:
318
+ while True:
319
+ chunk = source.read(8192)
320
+ if not chunk:
321
+ break
322
+ target.write(chunk)
323
+
324
+
325
+ def _validate_tar_file_security(member, file_count: int, total_size: int) -> None:
326
+ """Validate a single TAR file entry for security issues."""
327
+ file_count += 1
328
+ if file_count > MAX_FILES:
329
+ raise ValueError(f"Too many files in archive: {file_count} (max: {MAX_FILES})")
330
+
331
+ # Check file size
332
+ if member.size > MAX_FILE_SIZE:
333
+ raise ValueError(f"File too large: {member.name} ({member.size} bytes)")
334
+
335
+ total_size += member.size
336
+ if total_size > MAX_ARCHIVE_SIZE:
337
+ raise ValueError(f"Total archive size too large: {total_size} bytes")
338
+
339
+ # Check for path traversal
340
+ if _has_path_traversal(member.name):
341
+ raise ValueError(f"Path traversal detected: {member.name}")
342
+
343
+ # Check directory depth
344
+ if _exceeds_depth_limit(member.name):
345
+ raise ValueError(f"Directory depth too deep: {member.name}")
346
+
347
+ # Check for blocked file extensions
348
+ if _is_blocked_extension(member.name):
349
+ raise ValueError(f"Blocked file type: {member.name}")
350
+
351
+ # Check for symlinks
352
+ if member.issym() or member.islnk():
353
+ raise ValueError(f"Symlinks not allowed: {member.name}")
354
+
355
+ # Check for device files, fifos, etc.
356
+ if not member.isfile() and not member.isdir():
357
+ raise ValueError(f"Unsupported file type: {member.name} (type: {member.type})")
358
+
359
+
360
+ def _has_path_traversal(filename: str) -> bool:
361
+ """Check if filename contains path traversal attempts."""
362
+ return ".." in filename
363
+
364
+
365
+ def _exceeds_depth_limit(filename: str) -> bool:
366
+ """Check if filename exceeds directory depth limit."""
367
+ return filename.count("/") > MAX_DEPTH or filename.count("\\") > MAX_DEPTH
368
+
369
+
370
+ def _is_blocked_extension(filename: str) -> bool:
371
+ """Check if filename has a blocked extension."""
372
+ file_ext = Path(filename).suffix.lower()
373
+ return file_ext in BLOCKED_EXTENSIONS
374
+
375
+
376
+ def _is_symlink(info) -> bool:
377
+ """Check if ZIP file info indicates a symlink."""
378
+ return bool(info.external_attr & 0xA000 == 0xA000) # Symlink flag
379
+
380
+
381
+ def _get_safe_extraction_path(filename: str, extraction_dir: Path) -> Path:
382
+ """Get a safe path for extraction that prevents directory traversal."""
383
+ # Reject paths with directory traversal attempts or absolute paths
384
+ if (
385
+ ".." in filename
386
+ or filename.startswith("/")
387
+ or "\\" in filename
388
+ or ":" in filename
389
+ ):
390
+ raise ValueError(f"Path traversal or absolute path detected: {filename}")
391
+
392
+ # Normalize path separators and remove leading/trailing slashes
393
+ normalized = filename.replace("\\", "/").strip("/")
394
+
395
+ # Split into components and filter out dangerous ones
396
+ parts: list[str] = []
397
+ for part in normalized.split("/"):
398
+ if part == "" or part == ".":
399
+ continue
400
+ elif part == "..":
401
+ # Remove parent directory if we have one
402
+ if parts:
403
+ parts.pop()
404
+ else:
405
+ parts.append(part)
406
+
407
+ # Join parts back and resolve against extraction_dir
408
+ safe_path = extraction_dir / "/".join(parts)
409
+
410
+ # Ensure the final path is still within extraction_dir
411
+ try:
412
+ safe_path.resolve().relative_to(extraction_dir.resolve())
413
+ except ValueError:
414
+ raise ValueError(f"Path traversal detected: {filename}") from None
415
+
416
+ return safe_path
417
+
418
+
419
+ def create_results_archive(results: list, cookbook_path: str) -> bytes:
420
+ """Create a ZIP archive containing analysis results."""
421
+ zip_buffer = io.BytesIO()
422
+
423
+ with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
424
+ # Add JSON summary
425
+ json_data = pd.DataFrame(results).to_json(indent=2)
426
+ zip_file.writestr("analysis_results.json", json_data)
427
+
428
+ # Add individual cookbook reports
429
+ for result in results:
430
+ if result["status"] == ANALYSIS_STATUS_ANALYSED:
431
+ report_content = f"""# Cookbook Analysis Report: {result["name"]}
432
+
433
+ ## Metadata
434
+ - **Version**: {result["version"]}
435
+ - **Maintainer**: {result["maintainer"]}
436
+ - **Dependencies**: {result["dependencies"]}
437
+ - **Complexity**: {result["complexity"]}
438
+ - **Estimated Hours**: {result["estimated_hours"]:.1f}
439
+
440
+ ## Recommendations
441
+ {result["recommendations"]}
442
+
443
+ ## Source Path
444
+ {result["path"]}
445
+ """
446
+ zip_file.writestr(f"{result['name']}_report.md", report_content)
447
+
448
+ # Add summary report
449
+ successful = len(
450
+ [r for r in results if r["status"] == ANALYSIS_STATUS_ANALYSED]
451
+ )
452
+ total_hours = sum(r.get("estimated_hours", 0) for r in results)
453
+
454
+ summary_content = f"""# SousChef Cookbook Analysis Summary
455
+
456
+ ## Overview
457
+ - **Cookbooks Analysed**: {len(results)}
458
+
459
+ - **Successfully Analysed**: {successful}
460
+
461
+ - **Total Estimated Hours**: {total_hours:.1f}
462
+ - **Source**: {cookbook_path}
463
+
464
+ ## Results Summary
465
+ """
466
+ for result in results:
467
+ status_icon = "✅" if result["status"] == ANALYSIS_STATUS_ANALYSED else "❌"
468
+ summary_content += f"- {status_icon} {result['name']}: {result['status']}"
469
+ if result["status"] == ANALYSIS_STATUS_ANALYSED:
470
+ summary_content += (
471
+ f" ({result['estimated_hours']:.1f} hours, "
472
+ f"{result['complexity']} complexity)"
473
+ )
474
+ summary_content += "\n"
475
+
476
+ zip_file.writestr("analysis_summary.md", summary_content)
477
+
478
+ zip_buffer.seek(0)
479
+ return zip_buffer.getvalue()
480
+
481
+
482
+ def show_cookbook_analysis_page():
483
+ """Show the cookbook analysis page."""
484
+ _setup_cookbook_analysis_ui()
485
+
486
+ # Initialise session state for analysis results
487
+
488
+ if "analysis_results" not in st.session_state:
489
+ st.session_state.analysis_results = None
490
+ st.session_state.analysis_cookbook_path = None
491
+ st.session_state.total_cookbooks = 0
492
+ st.session_state.temp_dir = None
493
+
494
+ # Check if we have analysis results to display
495
+ if st.session_state.analysis_results is not None:
496
+ _display_analysis_results(
497
+ st.session_state.analysis_results,
498
+ st.session_state.total_cookbooks,
499
+ )
500
+ return
501
+
502
+ # Check if we have an uploaded file from the dashboard
503
+ if "uploaded_file_data" in st.session_state:
504
+ _handle_dashboard_upload()
505
+ return
506
+
507
+ # Input method selection
508
+ input_method = st.radio(
509
+ "Choose Input Method",
510
+ ["Upload Archive", "Directory Path"],
511
+ horizontal=True,
512
+ help="Select how to provide cookbooks for analysis",
513
+ )
514
+
515
+ cookbook_path = None
516
+ temp_dir = None
517
+ uploaded_file = None
518
+
519
+ if input_method == "Directory Path":
520
+ cookbook_path = _get_cookbook_path_input()
521
+ else:
522
+ uploaded_file = _get_archive_upload_input()
523
+ if uploaded_file:
524
+ try:
525
+ with st.spinner("Extracting archive..."):
526
+ temp_dir, cookbook_path = extract_archive(uploaded_file)
527
+ # Store temp_dir in session state to prevent premature cleanup
528
+ st.session_state.temp_dir = temp_dir
529
+ st.success("Archive extracted successfully to temporary location")
530
+ except Exception as e:
531
+ st.error(f"Failed to extract archive: {e}")
532
+ return
533
+
534
+ try:
535
+ if cookbook_path:
536
+ _validate_and_list_cookbooks(cookbook_path)
537
+
538
+ _display_instructions()
539
+ finally:
540
+ # Only clean up temp_dir if it wasn't stored in session state
541
+ # (i.e., if we didn't successfully extract an archive)
542
+ if temp_dir and temp_dir.exists() and st.session_state.temp_dir != temp_dir:
543
+ shutil.rmtree(temp_dir, ignore_errors=True)
544
+
545
+
546
+ def _setup_cookbook_analysis_ui():
547
+ """Set up the cookbook analysis page header."""
548
+ st.title("SousChef - Cookbook Analysis")
549
+ st.markdown("""
550
+ Analyse your Chef cookbooks and get detailed migration assessments for
551
+ converting to Ansible playbooks.
552
+
553
+ Upload a cookbook archive or specify a directory path to begin analysis.
554
+ """)
555
+
556
+
557
+ def _get_cookbook_path_input():
558
+ """Get the cookbook path input from the user."""
559
+ return st.text_input(
560
+ "Cookbook Directory Path",
561
+ placeholder="cookbooks/ or ../shared/cookbooks/",
562
+ help="Enter a path to your Chef cookbooks directory. "
563
+ "Relative paths (e.g., 'cookbooks/') and absolute paths inside the workspace "
564
+ "(e.g., '/workspaces/souschef/cookbooks/') are allowed.",
565
+ )
566
+
567
+
568
+ def _get_archive_upload_input():
569
+ """Get archive upload input from the user."""
570
+ uploaded_file = st.file_uploader(
571
+ "Upload Cookbook Archive",
572
+ type=["zip", "tar.gz", "tgz", "tar"],
573
+ help="Upload a ZIP or TAR archive containing your Chef cookbooks",
574
+ )
575
+ return uploaded_file
576
+
577
+
578
+ def _validate_and_list_cookbooks(cookbook_path):
579
+ """Validate the cookbook path and list available cookbooks."""
580
+ safe_dir = _get_safe_cookbook_directory(cookbook_path)
581
+ if safe_dir is None:
582
+ return
583
+
584
+ if safe_dir.exists() and safe_dir.is_dir():
585
+ _list_and_display_cookbooks(safe_dir)
586
+ else:
587
+ st.error(f"Directory not found: {safe_dir}")
588
+
589
+
590
+ def _get_safe_cookbook_directory(cookbook_path):
591
+ """
592
+ Resolve the user-provided cookbook path to a safe directory.
593
+
594
+ The path is validated and normalized to prevent directory traversal
595
+ outside the allowed root before any path operations.
596
+ """
597
+ try:
598
+ base_dir = Path.cwd().resolve()
599
+ temp_dir = Path(tempfile.gettempdir()).resolve()
600
+
601
+ path_str = str(cookbook_path).strip()
602
+
603
+ # Reject obviously malicious patterns
604
+ if "\x00" in path_str or ":\\" in path_str or "\\" in path_str:
605
+ st.error(
606
+ "❌ Invalid path: Path contains null bytes or backslashes, "
607
+ "which are not allowed."
608
+ )
609
+ return None
610
+
611
+ # Reject paths with directory traversal attempts
612
+ if ".." in path_str:
613
+ st.error(
614
+ "❌ Invalid path: Path contains '..' which is not allowed "
615
+ "for security reasons."
616
+ )
617
+ return None
618
+
619
+ user_path = Path(path_str)
620
+
621
+ # Resolve the path safely
622
+ if user_path.is_absolute():
623
+ resolved_path = user_path.resolve()
624
+ else:
625
+ resolved_path = (base_dir / user_path).resolve()
626
+
627
+ # Check if the resolved path is within allowed directories
628
+ try:
629
+ resolved_path.relative_to(base_dir)
630
+ return resolved_path
631
+ except ValueError:
632
+ pass
633
+
634
+ try:
635
+ resolved_path.relative_to(temp_dir)
636
+ return resolved_path
637
+ except ValueError:
638
+ st.error(
639
+ "❌ Invalid path: The resolved path is outside the allowed "
640
+ "directories (workspace or temporary directory). Paths cannot go above "
641
+ "the workspace root for security reasons."
642
+ )
643
+ return None
644
+
645
+ except Exception as exc:
646
+ st.error(f"❌ Invalid path: {exc}. Please enter a valid relative path.")
647
+ return None
648
+
649
+
650
+ def _list_and_display_cookbooks(cookbook_path: Path):
651
+ """List cookbooks in the directory and display them."""
652
+ try:
653
+ cookbooks = [d for d in cookbook_path.iterdir() if d.is_dir()]
654
+ if cookbooks:
655
+ st.subheader("Available Cookbooks")
656
+ cookbook_data = _collect_cookbook_data(cookbooks)
657
+ _display_cookbook_table(cookbook_data)
658
+ _handle_cookbook_selection(str(cookbook_path), cookbook_data)
659
+ else:
660
+ st.warning(
661
+ "No subdirectories found in the specified path. "
662
+ "Are these individual cookbooks?"
663
+ )
664
+ except Exception as e:
665
+ st.error(f"Error reading directory: {e}")
666
+
667
+
668
+ def _collect_cookbook_data(cookbooks):
669
+ """Collect data for all cookbooks."""
670
+ cookbook_data = []
671
+ for cookbook in cookbooks:
672
+ cookbook_info = _analyse_cookbook_metadata(cookbook)
673
+ cookbook_data.append(cookbook_info)
674
+ return cookbook_data
675
+
676
+
677
+ def _analyse_cookbook_metadata(cookbook):
678
+ """Analyse metadata for a single cookbook."""
679
+ metadata_file = cookbook / METADATA_FILENAME
680
+ if metadata_file.exists():
681
+ return _parse_metadata_with_fallback(cookbook, metadata_file)
682
+ else:
683
+ return _create_no_metadata_entry(cookbook)
684
+
685
+
686
+ def _parse_metadata_with_fallback(cookbook, metadata_file):
687
+ """Parse metadata with error handling."""
688
+ try:
689
+ metadata = parse_cookbook_metadata(str(metadata_file))
690
+ return _extract_cookbook_info(metadata, cookbook, METADATA_STATUS_YES)
691
+ except Exception as e:
692
+ return _create_error_entry(cookbook, str(e))
693
+
694
+
695
+ def _extract_cookbook_info(metadata, cookbook, metadata_status):
696
+ """Extract key information from cookbook metadata."""
697
+ name = metadata.get("name", cookbook.name)
698
+ version = metadata.get("version", "Unknown")
699
+ maintainer = metadata.get("maintainer", "Unknown")
700
+ description = _normalize_description(metadata.get("description", "No description"))
701
+ dependencies = len(metadata.get("depends", []))
702
+
703
+ return {
704
+ "Name": name,
705
+ "Version": version,
706
+ "Maintainer": maintainer,
707
+ "Description": _truncate_description(description),
708
+ "Dependencies": dependencies,
709
+ "Path": str(cookbook),
710
+ METADATA_COLUMN_NAME: metadata_status,
711
+ }
712
+
713
+
714
+ def _normalize_description(description: Any) -> str:
715
+ """
716
+ Normalize description to string format.
717
+
718
+ The metadata parser currently returns a string for the description
719
+ field, but this helper defensively converts any unexpected value to
720
+ a string to keep the UI resilient to future changes.
721
+ """
722
+ if not isinstance(description, str):
723
+ return str(description)
724
+ return description
725
+
726
+
727
+ def _truncate_description(description):
728
+ """Truncate description if too long."""
729
+ if len(description) > 50:
730
+ return description[:50] + "..."
731
+ return description
732
+
733
+
734
+ def _create_error_entry(cookbook, error_message):
735
+ """Create an entry for cookbooks with parsing errors."""
736
+ return {
737
+ "Name": cookbook.name,
738
+ "Version": "Error",
739
+ "Maintainer": "Error",
740
+ "Description": f"Parse error: {error_message[:50]}",
741
+ "Dependencies": 0,
742
+ "Path": str(cookbook),
743
+ METADATA_COLUMN_NAME: METADATA_STATUS_NO,
744
+ }
745
+
746
+
747
+ def _create_no_metadata_entry(cookbook):
748
+ """Create an entry for cookbooks without metadata."""
749
+ return {
750
+ "Name": cookbook.name,
751
+ "Version": "No metadata",
752
+ "Maintainer": "Unknown",
753
+ "Description": "No metadata.rb found",
754
+ "Dependencies": 0,
755
+ "Path": str(cookbook),
756
+ METADATA_COLUMN_NAME: METADATA_STATUS_NO,
757
+ }
758
+
759
+
760
+ def _display_cookbook_table(cookbook_data):
761
+ """Display the cookbook data in a table."""
762
+ df = pd.DataFrame(cookbook_data)
763
+ st.dataframe(df, use_container_width=True)
764
+
765
+
766
+ def _handle_cookbook_selection(cookbook_path: str, cookbook_data: list):
767
+ """Handle selection of cookbooks for analysis."""
768
+ st.subheader("Select Cookbooks to Analyse")
769
+
770
+ # Create a multiselect widget for cookbook selection
771
+ cookbook_names = [cookbook["Name"] for cookbook in cookbook_data]
772
+ selected_cookbooks = st.multiselect(
773
+ "Choose cookbooks to analyse:",
774
+ options=cookbook_names,
775
+ default=[], # No default selection
776
+ help="Select one or more cookbooks to analyse for migration to Ansible",
777
+ )
778
+
779
+ # Show selection summary
780
+ if selected_cookbooks:
781
+ st.info(f"Selected {len(selected_cookbooks)} cookbook(s) for analysis")
782
+
783
+ # Analyse button
784
+ if st.button("Analyse Selected Cookbooks", type="primary"):
785
+ analyse_selected_cookbooks(cookbook_path, selected_cookbooks)
786
+ else:
787
+ st.info("Please select at least one cookbook to analyse")
788
+
789
+
790
+ def _handle_dashboard_upload():
791
+ """Handle file uploaded from the dashboard."""
792
+ # Create a file-like object from the stored data
793
+ file_data = st.session_state.uploaded_file_data
794
+ file_name = st.session_state.uploaded_file_name
795
+
796
+ # Create a file-like object that mimics the UploadedFile interface
797
+ class MockUploadedFile:
798
+ def __init__(self, data, name, mime_type):
799
+ self.data = data
800
+ self.name = name
801
+ self.type = mime_type
802
+
803
+ def getbuffer(self):
804
+ return self.data
805
+
806
+ def getvalue(self):
807
+ return self.data
808
+
809
+ mock_file = MockUploadedFile(
810
+ file_data, file_name, st.session_state.uploaded_file_type
811
+ )
812
+
813
+ # Display upload info
814
+ st.info(f"📁 Using file uploaded from Dashboard: {file_name}")
815
+
816
+ # Add option to clear and upload a different file
817
+ col1, col2 = st.columns([1, 1])
818
+ with col1:
819
+ if st.button(
820
+ "Use Different File", help="Clear this file and upload a different one"
821
+ ):
822
+ # Clear the uploaded file from session state
823
+ del st.session_state.uploaded_file_data
824
+ del st.session_state.uploaded_file_name
825
+ del st.session_state.uploaded_file_type
826
+ st.rerun()
827
+
828
+ with col2:
829
+ if st.button("Back to Dashboard", help="Return to dashboard"):
830
+ st.session_state.current_page = "Dashboard"
831
+ st.rerun()
832
+
833
+ # Process the file
834
+ try:
835
+ with st.spinner("Extracting archive..."):
836
+ temp_dir, cookbook_path = extract_archive(mock_file)
837
+ # Store temp_dir in session state to prevent premature cleanup
838
+ st.session_state.temp_dir = temp_dir
839
+ st.success("Archive extracted successfully!")
840
+
841
+ # Validate and list cookbooks
842
+ if cookbook_path:
843
+ _validate_and_list_cookbooks(cookbook_path)
844
+
845
+ except Exception as e:
846
+ st.error(f"Failed to process uploaded file: {e}")
847
+ # Clear the uploaded file on error
848
+ if "uploaded_file_data" in st.session_state:
849
+ del st.session_state.uploaded_file_data
850
+ del st.session_state.uploaded_file_name
851
+ del st.session_state.uploaded_file_type
852
+
853
+
854
+ def _display_instructions():
855
+ """Display usage instructions."""
856
+ with st.expander("How to Use"):
857
+ st.markdown("""
858
+ ## Input Methods
859
+
860
+ ### Directory Path
861
+ 1. **Enter Cookbook Path**: Provide a **relative path** to your cookbooks
862
+ (absolute paths not allowed)
863
+ 2. **Review Cookbooks**: The interface will list all cookbooks with metadata
864
+ 3. **Select Cookbooks**: Choose which cookbooks to analyse
865
+ 4. **Run Analysis**: Click "Analyse Selected Cookbooks" to get detailed insights
866
+
867
+ **Path Examples:**
868
+ - `cookbooks/` - subdirectory in current workspace
869
+ - `../shared/cookbooks/` - parent directory
870
+ - `./my-cookbooks/` - explicit current directory
871
+
872
+ ### Archive Upload
873
+ 1. **Upload Archive**: Upload a ZIP or TAR archive containing your cookbooks
874
+ 2. **Automatic Extraction**: The system will extract and analyse the archive
875
+
876
+ 3. **Review Cookbooks**: Interface will list all cookbooks found in archive
877
+ 4. **Select Cookbooks**: Choose which cookbooks to analyse
878
+ 5. **Run Analysis**: Click "Analyse Selected Cookbooks" to get insights
879
+
880
+
881
+ ## Expected Structure
882
+ ```
883
+ cookbooks/ or archive.zip/
884
+ ├── nginx/
885
+ │ ├── metadata.rb
886
+ │ ├── recipes/
887
+ │ └── attributes/
888
+ ├── apache2/
889
+ │ └── metadata.rb
890
+ └── mysql/
891
+ └── metadata.rb
892
+ ```
893
+
894
+ ## Supported Archive Formats
895
+ - ZIP (.zip)
896
+ - TAR (.tar)
897
+ - GZIP-compressed TAR (.tar.gz, .tgz)
898
+ """)
899
+
900
+
901
+ def analyse_selected_cookbooks(cookbook_path: str, selected_cookbooks: list[str]):
902
+ """Analyse the selected cookbooks and store results in session state."""
903
+ st.subheader("Analysis Results")
904
+
905
+ progress_bar, status_text = _setup_analysis_progress()
906
+ results = _perform_cookbook_analysis(
907
+ cookbook_path, selected_cookbooks, progress_bar, status_text
908
+ )
909
+
910
+ _cleanup_progress_indicators(progress_bar, status_text)
911
+
912
+ # Store results in session state
913
+ st.session_state.analysis_results = results
914
+ st.session_state.analysis_cookbook_path = cookbook_path
915
+ st.session_state.total_cookbooks = len(selected_cookbooks)
916
+
917
+ # Trigger rerun to display results
918
+ st.rerun()
919
+
920
+
921
+ def _setup_analysis_progress():
922
+ """Set up progress tracking for analysis."""
923
+ progress_bar = st.progress(0)
924
+ status_text = st.empty()
925
+ return progress_bar, status_text
926
+
927
+
928
+ def _perform_cookbook_analysis(
929
+ cookbook_path, selected_cookbooks, progress_bar, status_text
930
+ ):
931
+ """Perform analysis on selected cookbooks."""
932
+ results = []
933
+ total = len(selected_cookbooks)
934
+
935
+ for i, cookbook_name in enumerate(selected_cookbooks):
936
+ _update_progress(status_text, cookbook_name, i + 1, total)
937
+ progress_bar.progress((i + 1) / total)
938
+
939
+ cookbook_dir = _find_cookbook_directory(cookbook_path, cookbook_name)
940
+ if cookbook_dir:
941
+ analysis_result = _analyse_single_cookbook(cookbook_name, cookbook_dir)
942
+ results.append(analysis_result)
943
+
944
+ return results
945
+
946
+
947
+ def _update_progress(status_text, cookbook_name, current, total):
948
+ """Update progress display."""
949
+ status_text.text(f"Analyzing {cookbook_name}... ({current}/{total})")
950
+
951
+
952
+ def _find_cookbook_directory(cookbook_path, cookbook_name):
953
+ """Find the directory for a specific cookbook."""
954
+ for d in Path(cookbook_path).iterdir():
955
+ if d.is_dir() and d.name == cookbook_name:
956
+ return d
957
+ return None
958
+
959
+
960
+ def _analyse_single_cookbook(cookbook_name, cookbook_dir):
961
+ """Analyse a single cookbook."""
962
+ try:
963
+ assessment = parse_chef_migration_assessment(str(cookbook_dir))
964
+ metadata = parse_cookbook_metadata(str(cookbook_dir / METADATA_FILENAME))
965
+
966
+ return _create_successful_analysis(
967
+ cookbook_name, cookbook_dir, assessment, metadata
968
+ )
969
+ except Exception as e:
970
+ return _create_failed_analysis(cookbook_name, cookbook_dir, str(e))
971
+
972
+
973
+ def _create_successful_analysis(cookbook_name, cookbook_dir, assessment, metadata):
974
+ """Create analysis result for successful analysis."""
975
+ return {
976
+ "name": cookbook_name,
977
+ "path": str(cookbook_dir),
978
+ "version": metadata.get("version", "Unknown"),
979
+ "maintainer": metadata.get("maintainer", "Unknown"),
980
+ "description": metadata.get("description", "No description"),
981
+ "dependencies": len(metadata.get("depends", [])),
982
+ "complexity": assessment.get("complexity", "Unknown"),
983
+ "estimated_hours": assessment.get("estimated_hours", 0),
984
+ "recommendations": assessment.get("recommendations", ""),
985
+ "status": ANALYSIS_STATUS_ANALYSED,
986
+ }
987
+
988
+
989
+ def _create_failed_analysis(cookbook_name, cookbook_dir, error_message):
990
+ """Create analysis result for failed analysis."""
991
+ return {
992
+ "name": cookbook_name,
993
+ "path": str(cookbook_dir),
994
+ "version": "Error",
995
+ "maintainer": "Error",
996
+ "description": f"Analysis failed: {error_message}",
997
+ "dependencies": 0,
998
+ "complexity": "Error",
999
+ "estimated_hours": 0,
1000
+ "recommendations": f"Error: {error_message}",
1001
+ "status": ANALYSIS_STATUS_FAILED,
1002
+ }
1003
+
1004
+
1005
+ def _cleanup_progress_indicators(progress_bar, status_text):
1006
+ """Clean up progress indicators."""
1007
+ progress_bar.empty()
1008
+ status_text.empty()
1009
+
1010
+
1011
+ def _display_analysis_results(results, total_cookbooks):
1012
+ """Display the complete analysis results."""
1013
+ # Add a back button to return to analysis selection
1014
+ col1, col2 = st.columns([1, 4])
1015
+ with col1:
1016
+ if st.button("⬅️ Analyse More Cookbooks", help="Return to cookbook selection"):
1017
+ # Clear session state to go back to selection
1018
+ st.session_state.analysis_results = None
1019
+ st.session_state.analysis_cookbook_path = None
1020
+ st.session_state.total_cookbooks = 0
1021
+ # Clean up temporary directory when going back
1022
+ if st.session_state.temp_dir and st.session_state.temp_dir.exists():
1023
+ shutil.rmtree(st.session_state.temp_dir, ignore_errors=True)
1024
+ st.session_state.temp_dir = None
1025
+ st.rerun()
1026
+
1027
+ with col2:
1028
+ st.subheader("Analysis Results")
1029
+
1030
+ _display_analysis_summary(results, total_cookbooks)
1031
+ _display_results_table(results)
1032
+ _display_detailed_analysis(results)
1033
+ _display_download_option(results)
1034
+
1035
+
1036
+ def _display_download_option(results):
1037
+ """Display download options for analysis results."""
1038
+ st.subheader("Download Options")
1039
+
1040
+ successful_results = [r for r in results if r["status"] == ANALYSIS_STATUS_ANALYSED]
1041
+
1042
+ if not successful_results:
1043
+ st.info("No successfully analysed cookbooks available for download.")
1044
+
1045
+ return
1046
+
1047
+ col1, _col2 = st.columns(2)
1048
+
1049
+ with col1:
1050
+ # Download analysis report
1051
+ analysis_data = _create_analysis_report(results)
1052
+ st.download_button(
1053
+ label="Download Analysis Report",
1054
+ data=analysis_data,
1055
+ file_name="cookbook_analysis_report.json",
1056
+ mime="application/json",
1057
+ help="Download detailed analysis results as JSON",
1058
+ )
1059
+
1060
+ # Convert to Ansible Playbooks button - moved outside columns for better reliability
1061
+ if st.button(
1062
+ "Convert to Ansible Playbooks",
1063
+ type="primary",
1064
+ help="Convert analysed cookbooks to Ansible playbooks and download as ZIP",
1065
+ ):
1066
+ # Check AI configuration status
1067
+ ai_config = load_ai_settings()
1068
+ ai_available = (
1069
+ ai_config.get("provider")
1070
+ and ai_config.get("provider") != LOCAL_PROVIDER
1071
+ and ai_config.get("api_key")
1072
+ )
1073
+
1074
+ if ai_available:
1075
+ provider = ai_config.get("provider", "Unknown")
1076
+ model = ai_config.get("model", "Unknown")
1077
+ st.info(f"🤖 Using AI-enhanced conversion with {provider} ({model})")
1078
+ else:
1079
+ st.info(
1080
+ "⚙️ Using deterministic conversion. Configure AI settings "
1081
+ "for enhanced results."
1082
+ )
1083
+
1084
+ _convert_and_download_playbooks(results)
1085
+
1086
+
1087
+ def _display_analysis_summary(results, total_cookbooks):
1088
+ """Display summary metrics for the analysis."""
1089
+ col1, col2, col3 = st.columns(3)
1090
+
1091
+ with col1:
1092
+ successful = len(
1093
+ [r for r in results if r["status"] == ANALYSIS_STATUS_ANALYSED]
1094
+ )
1095
+ st.metric("Successfully Analysed", f"{successful}/{total_cookbooks}")
1096
+
1097
+ with col2:
1098
+ total_hours = sum(r.get("estimated_hours", 0) for r in results)
1099
+ st.metric("Total Estimated Hours", f"{total_hours:.1f}")
1100
+
1101
+ with col3:
1102
+ complexities = [r.get("complexity", "Unknown") for r in results]
1103
+ high_complexity = complexities.count("High")
1104
+ st.metric("High Complexity Cookbooks", high_complexity)
1105
+
1106
+
1107
+ def _display_results_table(results):
1108
+ """Display results in a table format."""
1109
+ df = pd.DataFrame(results)
1110
+ st.dataframe(df, width="stretch")
1111
+
1112
+
1113
+ def _display_detailed_analysis(results):
1114
+ """Display detailed analysis for each cookbook."""
1115
+ st.subheader("Detailed Analysis")
1116
+
1117
+ for result in results:
1118
+ if result["status"] == ANALYSIS_STATUS_ANALYSED:
1119
+ _display_single_cookbook_details(result)
1120
+
1121
+
1122
+ def _display_single_cookbook_details(result):
1123
+ """Display detailed analysis for a single cookbook."""
1124
+ with st.expander(f"{result['name']} - {result['complexity']} Complexity"):
1125
+ col1, col2 = st.columns(2)
1126
+
1127
+ with col1:
1128
+ st.write(f"**Version:** {result['version']}")
1129
+ st.write(f"**Maintainer:** {result['maintainer']}")
1130
+ st.write(f"**Dependencies:** {result['dependencies']}")
1131
+
1132
+ with col2:
1133
+ st.write(f"**Estimated Hours:** {result['estimated_hours']:.1f}")
1134
+ st.write(f"**Complexity:** {result['complexity']}")
1135
+
1136
+ st.write(f"**Recommendations:** {result['recommendations']}")
1137
+
1138
+
1139
+ def _convert_and_download_playbooks(results):
1140
+ """Convert analysed cookbooks to Ansible playbooks and provide download."""
1141
+ successful_results = [r for r in results if r["status"] == ANALYSIS_STATUS_ANALYSED]
1142
+
1143
+ if not successful_results:
1144
+ st.warning("No successfully analysed cookbooks to convert.")
1145
+ return
1146
+
1147
+ with st.spinner("Converting cookbooks to Ansible playbooks..."):
1148
+ playbooks = []
1149
+
1150
+ for result in successful_results:
1151
+ playbook_data = _convert_single_cookbook(result)
1152
+ if playbook_data:
1153
+ playbooks.append(playbook_data)
1154
+
1155
+ if playbooks:
1156
+ # Save converted playbooks to temporary directory for validation
1157
+ try:
1158
+ output_dir = Path(tempfile.mkdtemp(prefix="souschef_converted_"))
1159
+ for playbook in playbooks:
1160
+ # Sanitize filename
1161
+ filename = f"{playbook['cookbook_name']}.yml"
1162
+ (output_dir / filename).write_text(playbook["playbook_content"])
1163
+
1164
+ # Store path in session state for validation page
1165
+ st.session_state.converted_playbooks_path = str(output_dir)
1166
+ st.success("Playbooks converted and staged for validation.")
1167
+ except Exception as e:
1168
+ st.warning(f"Could not stage playbooks for validation: {e}")
1169
+
1170
+ _handle_playbook_download(playbooks)
1171
+
1172
+
1173
+ def _convert_single_cookbook(result):
1174
+ """Convert a single cookbook to Ansible playbook."""
1175
+ cookbook_dir = Path(result["path"])
1176
+ recipe_file = _find_recipe_file(cookbook_dir, result["name"])
1177
+
1178
+ if not recipe_file:
1179
+ return None
1180
+
1181
+ try:
1182
+ # Check if AI-enhanced conversion is available and enabled
1183
+ ai_config = load_ai_settings()
1184
+ use_ai = (
1185
+ ai_config.get("provider")
1186
+ and ai_config.get("provider") != LOCAL_PROVIDER
1187
+ and ai_config.get("api_key")
1188
+ )
1189
+
1190
+ if use_ai:
1191
+ # Use AI-enhanced conversion
1192
+ # Map provider display names to API provider strings
1193
+ provider_mapping = {
1194
+ "Anthropic Claude": "anthropic",
1195
+ "Anthropic (Claude)": "anthropic",
1196
+ "OpenAI": "openai",
1197
+ "OpenAI (GPT)": "openai",
1198
+ "IBM Watsonx": "watson",
1199
+ "Red Hat Lightspeed": "lightspeed",
1200
+ }
1201
+ provider_name = ai_config.get("provider", "")
1202
+ ai_provider = provider_mapping.get(
1203
+ provider_name, provider_name.lower().replace(" ", "_")
1204
+ )
1205
+
1206
+ playbook_content = generate_playbook_from_recipe_with_ai(
1207
+ str(recipe_file),
1208
+ ai_provider=ai_provider,
1209
+ api_key=ai_config.get("api_key", ""),
1210
+ model=ai_config.get("model", "claude-3-5-sonnet-20241022"),
1211
+ temperature=ai_config.get("temperature", 0.7),
1212
+ max_tokens=ai_config.get("max_tokens", 4000),
1213
+ project_id=ai_config.get("project_id", ""),
1214
+ base_url=ai_config.get("base_url", ""),
1215
+ )
1216
+ else:
1217
+ # Use deterministic conversion
1218
+ playbook_content = generate_playbook_from_recipe(str(recipe_file))
1219
+
1220
+ if not playbook_content.startswith("Error"):
1221
+ return {
1222
+ "cookbook_name": result["name"],
1223
+ "playbook_content": playbook_content,
1224
+ "recipe_file": recipe_file.name,
1225
+ "conversion_method": "AI-enhanced" if use_ai else "Deterministic",
1226
+ }
1227
+ else:
1228
+ st.warning(f"Failed to convert {result['name']}: {playbook_content}")
1229
+ return None
1230
+ except Exception as e:
1231
+ st.warning(f"Failed to convert {result['name']}: {e}")
1232
+ return None
1233
+
1234
+
1235
+ def _find_recipe_file(cookbook_dir, cookbook_name):
1236
+ """Find the appropriate recipe file for a cookbook."""
1237
+ recipes_dir = cookbook_dir / "recipes"
1238
+ if not recipes_dir.exists():
1239
+ st.warning(f"No recipes directory found in {cookbook_name}")
1240
+ return None
1241
+
1242
+ recipe_files = list(recipes_dir.glob("*.rb"))
1243
+ if not recipe_files:
1244
+ st.warning(f"No recipe files found in {cookbook_name}")
1245
+ return None
1246
+
1247
+ # Use the default.rb recipe if available, otherwise first recipe
1248
+ default_recipe = recipes_dir / "default.rb"
1249
+ return default_recipe if default_recipe.exists() else recipe_files[0]
1250
+
1251
+
1252
+ def _handle_playbook_download(playbooks):
1253
+ """Handle the download of generated playbooks."""
1254
+ if not playbooks:
1255
+ st.error("No playbooks were successfully generated.")
1256
+ return
1257
+
1258
+ # Create ZIP archive with all playbooks
1259
+ playbook_archive = _create_playbook_archive(playbooks)
1260
+
1261
+ st.success(
1262
+ f"Successfully converted {len(playbooks)} cookbooks to Ansible playbooks!"
1263
+ )
1264
+
1265
+ # Provide download button
1266
+ st.download_button(
1267
+ label="Download Ansible Playbooks",
1268
+ data=playbook_archive,
1269
+ file_name="ansible_playbooks.zip",
1270
+ mime="application/zip",
1271
+ help="Download ZIP archive containing all generated Ansible playbooks",
1272
+ )
1273
+
1274
+ # Show preview of generated playbooks
1275
+ with st.expander("Preview Generated Playbooks"):
1276
+ for playbook in playbooks:
1277
+ conversion_badge = (
1278
+ "🤖 AI-Enhanced"
1279
+ if playbook.get("conversion_method") == "AI-enhanced"
1280
+ else "⚙️ Deterministic"
1281
+ )
1282
+ st.subheader(
1283
+ f"{playbook['cookbook_name']} ({conversion_badge}) - "
1284
+ f"from {playbook['recipe_file']}"
1285
+ )
1286
+ st.code(
1287
+ playbook["playbook_content"][:1000] + "..."
1288
+ if len(playbook["playbook_content"]) > 1000
1289
+ else playbook["playbook_content"],
1290
+ language="yaml",
1291
+ )
1292
+ st.divider()
1293
+
1294
+
1295
+ def _create_playbook_archive(playbooks):
1296
+ """Create a ZIP archive containing all generated Ansible playbooks."""
1297
+ zip_buffer = io.BytesIO()
1298
+
1299
+ with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
1300
+ # Add individual playbook files
1301
+ for playbook in playbooks:
1302
+ playbook_filename = f"{playbook['cookbook_name']}.yml"
1303
+ zip_file.writestr(playbook_filename, playbook["playbook_content"])
1304
+
1305
+ # Add a summary README
1306
+ readme_content = f"""# Ansible Playbooks Generated by SousChef
1307
+
1308
+ This archive contains {len(playbooks)} Ansible playbooks converted from Chef cookbooks.
1309
+
1310
+ ## Contents:
1311
+ """
1312
+
1313
+ for playbook in playbooks:
1314
+ conversion_method = playbook.get("conversion_method", "Deterministic")
1315
+ readme_content += (
1316
+ f"- {playbook['cookbook_name']}.yml "
1317
+ f"(converted from {playbook['recipe_file']}, "
1318
+ f"method: {conversion_method})\n"
1319
+ )
1320
+
1321
+ readme_content += """
1322
+
1323
+ ## Usage:
1324
+ Run these playbooks with Ansible:
1325
+ ansible-playbook <playbook_name>.yml
1326
+
1327
+ ## Notes:
1328
+ - These playbooks were automatically generated from Chef recipes
1329
+ - Review and test the playbooks before using in production
1330
+ - Some manual adjustments may be required for complex recipes
1331
+ """
1332
+
1333
+ zip_file.writestr("README.md", readme_content)
1334
+
1335
+ zip_buffer.seek(0)
1336
+ return zip_buffer.getvalue()
1337
+
1338
+
1339
+ def _create_analysis_report(results):
1340
+ """Create a JSON report of the analysis results."""
1341
+ report = {
1342
+ "analysis_summary": {
1343
+ "total_cookbooks": len(results),
1344
+ "successful_analyses": len(
1345
+ [r for r in results if r["status"] == ANALYSIS_STATUS_ANALYSED]
1346
+ ),
1347
+ "total_estimated_hours": sum(r.get("estimated_hours", 0) for r in results),
1348
+ "high_complexity_count": len(
1349
+ [r for r in results if r.get("complexity") == "High"]
1350
+ ),
1351
+ },
1352
+ "cookbook_details": results,
1353
+ "generated_at": str(pd.Timestamp.now()),
1354
+ }
1355
+
1356
+ return json.dumps(report, indent=2)
1357
+
1358
+
1359
+ if __name__ == "__main__":
1360
+ show_cookbook_analysis_page()