mcp-souschef 2.8.0__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.8.0.dist-info → mcp_souschef-3.0.0.dist-info}/METADATA +82 -10
- {mcp_souschef-2.8.0.dist-info → mcp_souschef-3.0.0.dist-info}/RECORD +23 -19
- {mcp_souschef-2.8.0.dist-info → mcp_souschef-3.0.0.dist-info}/WHEEL +1 -1
- souschef/__init__.py +37 -5
- souschef/assessment.py +1248 -57
- souschef/ci/common.py +126 -0
- souschef/ci/github_actions.py +3 -92
- souschef/ci/gitlab_ci.py +2 -52
- souschef/ci/jenkins_pipeline.py +2 -59
- souschef/cli.py +117 -8
- souschef/converters/playbook.py +259 -90
- souschef/converters/resource.py +12 -11
- souschef/converters/template.py +177 -0
- souschef/core/metrics.py +313 -0
- souschef/core/validation.py +53 -0
- souschef/deployment.py +61 -9
- souschef/server.py +680 -0
- souschef/ui/app.py +469 -351
- souschef/ui/pages/ai_settings.py +74 -8
- souschef/ui/pages/cookbook_analysis.py +2467 -298
- souschef/ui/pages/validation_reports.py +274 -0
- {mcp_souschef-2.8.0.dist-info → mcp_souschef-3.0.0.dist-info}/entry_points.txt +0 -0
- {mcp_souschef-2.8.0.dist-info → mcp_souschef-3.0.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
"""Cookbook Analysis Page for SousChef UI."""
|
|
2
2
|
|
|
3
|
+
import contextlib
|
|
3
4
|
import io
|
|
4
5
|
import json
|
|
6
|
+
import os
|
|
5
7
|
import shutil
|
|
6
8
|
import sys
|
|
7
9
|
import tarfile
|
|
@@ -16,33 +18,223 @@ import streamlit as st
|
|
|
16
18
|
# Add the parent directory to the path so we can import souschef modules
|
|
17
19
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
|
18
20
|
|
|
19
|
-
from souschef.assessment import
|
|
21
|
+
from souschef.assessment import (
|
|
22
|
+
analyse_cookbook_dependencies,
|
|
23
|
+
assess_single_cookbook_with_ai,
|
|
24
|
+
)
|
|
20
25
|
from souschef.converters.playbook import (
|
|
21
26
|
generate_playbook_from_recipe,
|
|
22
27
|
generate_playbook_from_recipe_with_ai,
|
|
23
28
|
)
|
|
29
|
+
from souschef.converters.template import convert_cookbook_templates
|
|
24
30
|
from souschef.core.constants import METADATA_FILENAME
|
|
31
|
+
from souschef.core.metrics import (
|
|
32
|
+
EffortMetrics,
|
|
33
|
+
get_timeline_weeks,
|
|
34
|
+
validate_metrics_consistency,
|
|
35
|
+
)
|
|
25
36
|
from souschef.parsers.metadata import parse_cookbook_metadata
|
|
26
37
|
|
|
27
38
|
# AI Settings
|
|
28
39
|
ANTHROPIC_PROVIDER = "Anthropic (Claude)"
|
|
40
|
+
ANTHROPIC_CLAUDE_DISPLAY = "Anthropic Claude"
|
|
29
41
|
OPENAI_PROVIDER = "OpenAI (GPT)"
|
|
30
42
|
LOCAL_PROVIDER = "Local Model"
|
|
43
|
+
IBM_WATSONX = "IBM Watsonx"
|
|
44
|
+
RED_HAT_LIGHTSPEED = "Red Hat Lightspeed"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _sanitize_filename(filename: str) -> str:
|
|
48
|
+
"""
|
|
49
|
+
Sanitise filename to prevent path injection attacks.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
filename: The filename to sanitise.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Sanitised filename safe for file operations.
|
|
56
|
+
|
|
57
|
+
"""
|
|
58
|
+
import re
|
|
59
|
+
|
|
60
|
+
# Remove any path separators and parent directory references
|
|
61
|
+
sanitised = filename.replace("..", "_").replace("/", "_").replace("\\", "_")
|
|
62
|
+
# Remove any null bytes or control characters
|
|
63
|
+
sanitised = re.sub(r"[\x00-\x1f\x7f]", "_", sanitised)
|
|
64
|
+
# Remove leading/trailing whitespace and dots
|
|
65
|
+
sanitised = sanitised.strip(". ")
|
|
66
|
+
# Limit length to prevent issues
|
|
67
|
+
sanitised = sanitised[:255]
|
|
68
|
+
return sanitised if sanitised else "unnamed"
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _get_secure_ai_config_path() -> Path:
|
|
72
|
+
"""Return a private, non-world-writable path for AI config storage."""
|
|
73
|
+
config_dir = Path(tempfile.gettempdir()) / ".souschef"
|
|
74
|
+
config_dir.mkdir(mode=0o700, exist_ok=True)
|
|
75
|
+
with contextlib.suppress(OSError):
|
|
76
|
+
config_dir.chmod(0o700)
|
|
77
|
+
|
|
78
|
+
if config_dir.is_symlink():
|
|
79
|
+
raise ValueError("AI config directory cannot be a symlink")
|
|
80
|
+
|
|
81
|
+
return config_dir / "ai_config.json"
|
|
82
|
+
|
|
31
83
|
|
|
84
|
+
def load_ai_settings() -> dict[str, str | float | int]:
|
|
85
|
+
"""Load AI settings from environment variables or configuration file."""
|
|
86
|
+
# First try to load from environment variables
|
|
87
|
+
env_config = _load_ai_settings_from_env()
|
|
32
88
|
|
|
33
|
-
|
|
89
|
+
# If we have environment config, use it
|
|
90
|
+
if env_config:
|
|
91
|
+
return env_config
|
|
92
|
+
|
|
93
|
+
# Fall back to loading from configuration file
|
|
94
|
+
return _load_ai_settings_from_file()
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def _load_ai_settings_from_env() -> dict[str, str | float | int]:
|
|
98
|
+
"""Load AI settings from environment variables."""
|
|
99
|
+
import os
|
|
100
|
+
from contextlib import suppress
|
|
101
|
+
|
|
102
|
+
env_config: dict[str, str | float | int] = {}
|
|
103
|
+
env_mappings = {
|
|
104
|
+
"SOUSCHEF_AI_PROVIDER": "provider",
|
|
105
|
+
"SOUSCHEF_AI_MODEL": "model",
|
|
106
|
+
"SOUSCHEF_AI_API_KEY": "api_key",
|
|
107
|
+
"SOUSCHEF_AI_BASE_URL": "base_url",
|
|
108
|
+
"SOUSCHEF_AI_PROJECT_ID": "project_id",
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
# Handle string values
|
|
112
|
+
for env_var, config_key in env_mappings.items():
|
|
113
|
+
env_value = os.environ.get(env_var)
|
|
114
|
+
if env_value:
|
|
115
|
+
env_config[config_key] = env_value
|
|
116
|
+
|
|
117
|
+
# Handle numeric values with error suppression
|
|
118
|
+
temp_value = os.environ.get("SOUSCHEF_AI_TEMPERATURE")
|
|
119
|
+
if temp_value:
|
|
120
|
+
with suppress(ValueError):
|
|
121
|
+
env_config["temperature"] = float(temp_value)
|
|
122
|
+
|
|
123
|
+
tokens_value = os.environ.get("SOUSCHEF_AI_MAX_TOKENS")
|
|
124
|
+
if tokens_value:
|
|
125
|
+
with suppress(ValueError):
|
|
126
|
+
env_config["max_tokens"] = int(tokens_value)
|
|
127
|
+
|
|
128
|
+
return env_config
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _load_ai_settings_from_file() -> dict[str, str | float | int]:
|
|
34
132
|
"""Load AI settings from configuration file."""
|
|
35
133
|
try:
|
|
36
|
-
|
|
37
|
-
config_file = Path("/tmp/.souschef/ai_config.json")
|
|
134
|
+
config_file = _get_secure_ai_config_path()
|
|
38
135
|
if config_file.exists():
|
|
39
136
|
with config_file.open() as f:
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
137
|
+
file_config = json.load(f)
|
|
138
|
+
return dict(file_config) if isinstance(file_config, dict) else {}
|
|
139
|
+
except (ValueError, OSError):
|
|
140
|
+
return {}
|
|
43
141
|
return {}
|
|
44
142
|
|
|
45
143
|
|
|
144
|
+
def _get_ai_provider(ai_config: dict[str, str | float | int]) -> str:
|
|
145
|
+
"""
|
|
146
|
+
Safely get the AI provider from config with proper type handling.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
ai_config: The AI configuration dictionary.
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
The AI provider string, or empty string if not found.
|
|
153
|
+
|
|
154
|
+
"""
|
|
155
|
+
provider_raw = ai_config.get("provider", "")
|
|
156
|
+
if isinstance(provider_raw, str):
|
|
157
|
+
return provider_raw
|
|
158
|
+
return str(provider_raw) if provider_raw else ""
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def _get_ai_string_value(
|
|
162
|
+
ai_config: dict[str, str | float | int], key: str, default: str = ""
|
|
163
|
+
) -> str:
|
|
164
|
+
"""
|
|
165
|
+
Safely get a string value from AI config.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
ai_config: The AI configuration dictionary.
|
|
169
|
+
key: The key to retrieve.
|
|
170
|
+
default: Default value if key not found.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
The string value or default.
|
|
174
|
+
|
|
175
|
+
"""
|
|
176
|
+
value = ai_config.get(key, default)
|
|
177
|
+
if isinstance(value, str):
|
|
178
|
+
return value
|
|
179
|
+
return str(value) if value else default
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _get_ai_float_value(
|
|
183
|
+
ai_config: dict[str, str | float | int], key: str, default: float = 0.7
|
|
184
|
+
) -> float:
|
|
185
|
+
"""
|
|
186
|
+
Safely get a float value from AI config.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
ai_config: The AI configuration dictionary.
|
|
190
|
+
key: The key to retrieve.
|
|
191
|
+
default: Default value if key not found or conversion fails.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
The float value or default.
|
|
195
|
+
|
|
196
|
+
"""
|
|
197
|
+
value = ai_config.get(key)
|
|
198
|
+
if isinstance(value, float):
|
|
199
|
+
return value
|
|
200
|
+
elif isinstance(value, int):
|
|
201
|
+
return float(value)
|
|
202
|
+
elif isinstance(value, str):
|
|
203
|
+
try:
|
|
204
|
+
return float(value)
|
|
205
|
+
except ValueError:
|
|
206
|
+
return default
|
|
207
|
+
return default
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _get_ai_int_value(
|
|
211
|
+
ai_config: dict[str, str | float | int], key: str, default: int = 4000
|
|
212
|
+
) -> int:
|
|
213
|
+
"""
|
|
214
|
+
Safely get an int value from AI config.
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
ai_config: The AI configuration dictionary.
|
|
218
|
+
key: The key to retrieve.
|
|
219
|
+
default: Default value if key not found or conversion fails.
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
The int value or default.
|
|
223
|
+
|
|
224
|
+
"""
|
|
225
|
+
value = ai_config.get(key)
|
|
226
|
+
if isinstance(value, int):
|
|
227
|
+
return value
|
|
228
|
+
elif isinstance(value, float):
|
|
229
|
+
return int(value)
|
|
230
|
+
elif isinstance(value, str):
|
|
231
|
+
try:
|
|
232
|
+
return int(value)
|
|
233
|
+
except ValueError:
|
|
234
|
+
return default
|
|
235
|
+
return default
|
|
236
|
+
|
|
237
|
+
|
|
46
238
|
# Constants for repeated strings
|
|
47
239
|
METADATA_STATUS_YES = "Yes"
|
|
48
240
|
METADATA_STATUS_NO = "No"
|
|
@@ -90,8 +282,10 @@ def extract_archive(uploaded_file) -> tuple[Path, Path]:
|
|
|
90
282
|
f"Archive too large: {file_size} bytes (max: {MAX_ARCHIVE_SIZE})"
|
|
91
283
|
)
|
|
92
284
|
|
|
93
|
-
# Create temporary directory
|
|
285
|
+
# Create temporary directory with secure permissions (owner-only access)
|
|
94
286
|
temp_dir = Path(tempfile.mkdtemp())
|
|
287
|
+
with contextlib.suppress(FileNotFoundError, OSError):
|
|
288
|
+
temp_dir.chmod(0o700) # Secure permissions: rwx------
|
|
95
289
|
temp_path = temp_dir
|
|
96
290
|
|
|
97
291
|
# Save uploaded file
|
|
@@ -206,9 +400,11 @@ version '1.0.0'
|
|
|
206
400
|
"""
|
|
207
401
|
(single_dir / METADATA_FILENAME).write_text(metadata_content)
|
|
208
402
|
|
|
403
|
+
# Return the parent directory so it will scan and find the cookbook inside
|
|
209
404
|
return extraction_dir
|
|
210
405
|
else:
|
|
211
406
|
# Single directory that doesn't contain cookbook components
|
|
407
|
+
# It might be a wrapper directory containing multiple cookbooks
|
|
212
408
|
return single_dir
|
|
213
409
|
|
|
214
410
|
|
|
@@ -280,8 +476,16 @@ def _extract_tar_securely(
|
|
|
280
476
|
"""Extract TAR archive with security checks."""
|
|
281
477
|
mode = "r:gz" if gzipped else "r"
|
|
282
478
|
|
|
479
|
+
if not archive_path.is_file():
|
|
480
|
+
raise ValueError(f"Archive path is not a file: {archive_path}")
|
|
481
|
+
|
|
482
|
+
if not tarfile.is_tarfile(str(archive_path)):
|
|
483
|
+
raise ValueError(f"Invalid or corrupted TAR archive: {archive_path.name}")
|
|
484
|
+
|
|
283
485
|
try:
|
|
284
|
-
with tarfile.open(
|
|
486
|
+
with tarfile.open( # type: ignore[call-overload] # NOSONAR
|
|
487
|
+
str(archive_path), mode=mode, filter="data"
|
|
488
|
+
) as tar_ref:
|
|
285
489
|
members = tar_ref.getmembers()
|
|
286
490
|
_pre_scan_tar_members(members)
|
|
287
491
|
_extract_tar_members(tar_ref, members, extraction_dir)
|
|
@@ -441,7 +645,7 @@ def create_results_archive(results: list, cookbook_path: str) -> bytes:
|
|
|
441
645
|
{result["recommendations"]}
|
|
442
646
|
|
|
443
647
|
## Source Path
|
|
444
|
-
{
|
|
648
|
+
{cookbook_path} # deepcode ignore PT: used for display only, not file operations
|
|
445
649
|
"""
|
|
446
650
|
zip_file.writestr(f"{result['name']}_report.md", report_content)
|
|
447
651
|
|
|
@@ -459,12 +663,14 @@ def create_results_archive(results: list, cookbook_path: str) -> bytes:
|
|
|
459
663
|
- **Successfully Analysed**: {successful}
|
|
460
664
|
|
|
461
665
|
- **Total Estimated Hours**: {total_hours:.1f}
|
|
462
|
-
- **Source**: {cookbook_path}
|
|
666
|
+
- **Source**: {cookbook_path} # deepcode ignore PT: used for display only
|
|
463
667
|
|
|
464
668
|
## Results Summary
|
|
465
669
|
"""
|
|
466
670
|
for result in results:
|
|
467
|
-
status_icon =
|
|
671
|
+
status_icon = (
|
|
672
|
+
"PASS" if result["status"] == ANALYSIS_STATUS_ANALYSED else "FAIL"
|
|
673
|
+
)
|
|
468
674
|
summary_content += f"- {status_icon} {result['name']}: {result['status']}"
|
|
469
675
|
if result["status"] == ANALYSIS_STATUS_ANALYSED:
|
|
470
676
|
summary_content += (
|
|
@@ -479,24 +685,24 @@ def create_results_archive(results: list, cookbook_path: str) -> bytes:
|
|
|
479
685
|
return zip_buffer.getvalue()
|
|
480
686
|
|
|
481
687
|
|
|
482
|
-
def show_cookbook_analysis_page():
|
|
688
|
+
def show_cookbook_analysis_page() -> None:
|
|
483
689
|
"""Show the cookbook analysis page."""
|
|
484
|
-
_setup_cookbook_analysis_ui()
|
|
485
|
-
|
|
486
690
|
# Initialise session state for analysis results
|
|
487
|
-
|
|
488
691
|
if "analysis_results" not in st.session_state:
|
|
489
692
|
st.session_state.analysis_results = None
|
|
490
693
|
st.session_state.analysis_cookbook_path = None
|
|
491
694
|
st.session_state.total_cookbooks = 0
|
|
492
695
|
st.session_state.temp_dir = None
|
|
493
696
|
|
|
697
|
+
# Add unique key to track if this is a new page load
|
|
698
|
+
if "analysis_page_key" not in st.session_state:
|
|
699
|
+
st.session_state.analysis_page_key = 0
|
|
700
|
+
|
|
701
|
+
_setup_cookbook_analysis_ui()
|
|
702
|
+
|
|
494
703
|
# Check if we have analysis results to display
|
|
495
704
|
if st.session_state.analysis_results is not None:
|
|
496
|
-
|
|
497
|
-
st.session_state.analysis_results,
|
|
498
|
-
st.session_state.total_cookbooks,
|
|
499
|
-
)
|
|
705
|
+
_display_results_view()
|
|
500
706
|
return
|
|
501
707
|
|
|
502
708
|
# Check if we have an uploaded file from the dashboard
|
|
@@ -504,6 +710,11 @@ def show_cookbook_analysis_page():
|
|
|
504
710
|
_handle_dashboard_upload()
|
|
505
711
|
return
|
|
506
712
|
|
|
713
|
+
_show_analysis_input()
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def _show_analysis_input() -> None:
|
|
717
|
+
"""Show analysis input interface."""
|
|
507
718
|
# Input method selection
|
|
508
719
|
input_method = st.radio(
|
|
509
720
|
"Choose Input Method",
|
|
@@ -512,7 +723,7 @@ def show_cookbook_analysis_page():
|
|
|
512
723
|
help="Select how to provide cookbooks for analysis",
|
|
513
724
|
)
|
|
514
725
|
|
|
515
|
-
cookbook_path = None
|
|
726
|
+
cookbook_path: str | Path | None = None
|
|
516
727
|
temp_dir = None
|
|
517
728
|
uploaded_file = None
|
|
518
729
|
|
|
@@ -523,6 +734,10 @@ def show_cookbook_analysis_page():
|
|
|
523
734
|
if uploaded_file:
|
|
524
735
|
try:
|
|
525
736
|
with st.spinner("Extracting archive..."):
|
|
737
|
+
# Clear any previous analysis results
|
|
738
|
+
st.session_state.analysis_results = None
|
|
739
|
+
st.session_state.holistic_assessment = None
|
|
740
|
+
|
|
526
741
|
temp_dir, cookbook_path = extract_archive(uploaded_file)
|
|
527
742
|
# Store temp_dir in session state to prevent premature cleanup
|
|
528
743
|
st.session_state.temp_dir = temp_dir
|
|
@@ -533,7 +748,7 @@ def show_cookbook_analysis_page():
|
|
|
533
748
|
|
|
534
749
|
try:
|
|
535
750
|
if cookbook_path:
|
|
536
|
-
_validate_and_list_cookbooks(cookbook_path)
|
|
751
|
+
_validate_and_list_cookbooks(str(cookbook_path))
|
|
537
752
|
|
|
538
753
|
_display_instructions()
|
|
539
754
|
finally:
|
|
@@ -543,7 +758,33 @@ def show_cookbook_analysis_page():
|
|
|
543
758
|
shutil.rmtree(temp_dir, ignore_errors=True)
|
|
544
759
|
|
|
545
760
|
|
|
546
|
-
def
|
|
761
|
+
def _display_results_view() -> None:
|
|
762
|
+
"""Display the results view with new analysis button."""
|
|
763
|
+
# Add a "New Analysis" button at the top of results page
|
|
764
|
+
col1, col2 = st.columns([6, 1])
|
|
765
|
+
with col1:
|
|
766
|
+
st.write("") # Spacer
|
|
767
|
+
with col2:
|
|
768
|
+
if st.button(
|
|
769
|
+
"New Analysis",
|
|
770
|
+
help="Start a new analysis",
|
|
771
|
+
key=f"new_analysis_{st.session_state.analysis_page_key}",
|
|
772
|
+
):
|
|
773
|
+
st.session_state.analysis_results = None
|
|
774
|
+
st.session_state.holistic_assessment = None
|
|
775
|
+
st.session_state.analysis_cookbook_path = None
|
|
776
|
+
st.session_state.total_cookbooks = None
|
|
777
|
+
st.session_state.analysis_info_messages = None
|
|
778
|
+
st.session_state.analysis_page_key += 1
|
|
779
|
+
st.rerun()
|
|
780
|
+
|
|
781
|
+
_display_analysis_results(
|
|
782
|
+
st.session_state.analysis_results,
|
|
783
|
+
st.session_state.total_cookbooks,
|
|
784
|
+
)
|
|
785
|
+
|
|
786
|
+
|
|
787
|
+
def _setup_cookbook_analysis_ui() -> None:
|
|
547
788
|
"""Set up the cookbook analysis page header."""
|
|
548
789
|
st.title("SousChef - Cookbook Analysis")
|
|
549
790
|
st.markdown("""
|
|
@@ -553,8 +794,24 @@ def _setup_cookbook_analysis_ui():
|
|
|
553
794
|
Upload a cookbook archive or specify a directory path to begin analysis.
|
|
554
795
|
""")
|
|
555
796
|
|
|
797
|
+
# Add back to dashboard button
|
|
798
|
+
col1, _ = st.columns([1, 4])
|
|
799
|
+
with col1:
|
|
800
|
+
if st.button(
|
|
801
|
+
"← Back to Dashboard",
|
|
802
|
+
help="Return to main dashboard",
|
|
803
|
+
key="back_to_dashboard_from_analysis",
|
|
804
|
+
):
|
|
805
|
+
# Clear all analysis state
|
|
806
|
+
st.session_state.analysis_results = None
|
|
807
|
+
st.session_state.holistic_assessment = None
|
|
808
|
+
st.session_state.analysis_cookbook_path = None
|
|
809
|
+
st.session_state.total_cookbooks = None
|
|
810
|
+
st.session_state.current_page = "Dashboard"
|
|
811
|
+
st.rerun()
|
|
812
|
+
|
|
556
813
|
|
|
557
|
-
def _get_cookbook_path_input():
|
|
814
|
+
def _get_cookbook_path_input() -> str:
|
|
558
815
|
"""Get the cookbook path input from the user."""
|
|
559
816
|
return st.text_input(
|
|
560
817
|
"Cookbook Directory Path",
|
|
@@ -565,7 +822,7 @@ def _get_cookbook_path_input():
|
|
|
565
822
|
)
|
|
566
823
|
|
|
567
824
|
|
|
568
|
-
def _get_archive_upload_input():
|
|
825
|
+
def _get_archive_upload_input() -> Any:
|
|
569
826
|
"""Get archive upload input from the user."""
|
|
570
827
|
uploaded_file = st.file_uploader(
|
|
571
828
|
"Upload Cookbook Archive",
|
|
@@ -575,7 +832,7 @@ def _get_archive_upload_input():
|
|
|
575
832
|
return uploaded_file
|
|
576
833
|
|
|
577
834
|
|
|
578
|
-
def _validate_and_list_cookbooks(cookbook_path):
|
|
835
|
+
def _validate_and_list_cookbooks(cookbook_path: str) -> None:
|
|
579
836
|
"""Validate the cookbook path and list available cookbooks."""
|
|
580
837
|
safe_dir = _get_safe_cookbook_directory(cookbook_path)
|
|
581
838
|
if safe_dir is None:
|
|
@@ -603,7 +860,7 @@ def _get_safe_cookbook_directory(cookbook_path):
|
|
|
603
860
|
# Reject obviously malicious patterns
|
|
604
861
|
if "\x00" in path_str or ":\\" in path_str or "\\" in path_str:
|
|
605
862
|
st.error(
|
|
606
|
-
"
|
|
863
|
+
"Invalid path: Path contains null bytes or backslashes, "
|
|
607
864
|
"which are not allowed."
|
|
608
865
|
)
|
|
609
866
|
return None
|
|
@@ -611,7 +868,7 @@ def _get_safe_cookbook_directory(cookbook_path):
|
|
|
611
868
|
# Reject paths with directory traversal attempts
|
|
612
869
|
if ".." in path_str:
|
|
613
870
|
st.error(
|
|
614
|
-
"
|
|
871
|
+
"Invalid path: Path contains '..' which is not allowed "
|
|
615
872
|
"for security reasons."
|
|
616
873
|
)
|
|
617
874
|
return None
|
|
@@ -636,14 +893,14 @@ def _get_safe_cookbook_directory(cookbook_path):
|
|
|
636
893
|
return resolved_path
|
|
637
894
|
except ValueError:
|
|
638
895
|
st.error(
|
|
639
|
-
"
|
|
896
|
+
"Invalid path: The resolved path is outside the allowed "
|
|
640
897
|
"directories (workspace or temporary directory). Paths cannot go above "
|
|
641
898
|
"the workspace root for security reasons."
|
|
642
899
|
)
|
|
643
900
|
return None
|
|
644
901
|
|
|
645
902
|
except Exception as exc:
|
|
646
|
-
st.error(f"
|
|
903
|
+
st.error(f"Invalid path: {exc}. Please enter a valid relative path.")
|
|
647
904
|
return None
|
|
648
905
|
|
|
649
906
|
|
|
@@ -760,217 +1017,1124 @@ def _create_no_metadata_entry(cookbook):
|
|
|
760
1017
|
def _display_cookbook_table(cookbook_data):
|
|
761
1018
|
"""Display the cookbook data in a table."""
|
|
762
1019
|
df = pd.DataFrame(cookbook_data)
|
|
763
|
-
st.dataframe(df,
|
|
1020
|
+
st.dataframe(df, width="stretch")
|
|
764
1021
|
|
|
765
1022
|
|
|
766
1023
|
def _handle_cookbook_selection(cookbook_path: str, cookbook_data: list):
|
|
767
|
-
"""Handle selection
|
|
768
|
-
st.subheader("
|
|
1024
|
+
"""Handle the cookbook selection interface with individual and holistic options."""
|
|
1025
|
+
st.subheader("Cookbook Selection & Analysis")
|
|
769
1026
|
|
|
770
|
-
#
|
|
771
|
-
|
|
772
|
-
selected_cookbooks = st.multiselect(
|
|
773
|
-
"Choose cookbooks to analyse:",
|
|
774
|
-
options=cookbook_names,
|
|
775
|
-
default=[], # No default selection
|
|
776
|
-
help="Select one or more cookbooks to analyse for migration to Ansible",
|
|
777
|
-
)
|
|
778
|
-
|
|
779
|
-
# Show selection summary
|
|
780
|
-
if selected_cookbooks:
|
|
781
|
-
st.info(f"Selected {len(selected_cookbooks)} cookbook(s) for analysis")
|
|
782
|
-
|
|
783
|
-
# Analyse button
|
|
784
|
-
if st.button("Analyse Selected Cookbooks", type="primary"):
|
|
785
|
-
analyse_selected_cookbooks(cookbook_path, selected_cookbooks)
|
|
786
|
-
else:
|
|
787
|
-
st.info("Please select at least one cookbook to analyse")
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
def _handle_dashboard_upload():
|
|
791
|
-
"""Handle file uploaded from the dashboard."""
|
|
792
|
-
# Create a file-like object from the stored data
|
|
793
|
-
file_data = st.session_state.uploaded_file_data
|
|
794
|
-
file_name = st.session_state.uploaded_file_name
|
|
795
|
-
|
|
796
|
-
# Create a file-like object that mimics the UploadedFile interface
|
|
797
|
-
class MockUploadedFile:
|
|
798
|
-
def __init__(self, data, name, mime_type):
|
|
799
|
-
self.data = data
|
|
800
|
-
self.name = name
|
|
801
|
-
self.type = mime_type
|
|
802
|
-
|
|
803
|
-
def getbuffer(self):
|
|
804
|
-
return self.data
|
|
1027
|
+
# Show validation warnings if any cookbooks have issues
|
|
1028
|
+
_show_cookbook_validation_warnings(cookbook_data)
|
|
805
1029
|
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
1030
|
+
# Holistic analysis/conversion buttons
|
|
1031
|
+
st.markdown("### Holistic Analysis & Conversion")
|
|
1032
|
+
st.markdown(
|
|
1033
|
+
"Analyse and convert **ALL cookbooks** in the archive holistically, "
|
|
1034
|
+
"considering dependencies between cookbooks."
|
|
811
1035
|
)
|
|
812
1036
|
|
|
813
|
-
|
|
814
|
-
st.info(f"📁 Using file uploaded from Dashboard: {file_name}")
|
|
1037
|
+
col1, col2 = st.columns(2)
|
|
815
1038
|
|
|
816
|
-
# Add option to clear and upload a different file
|
|
817
|
-
col1, col2 = st.columns([1, 1])
|
|
818
1039
|
with col1:
|
|
819
1040
|
if st.button(
|
|
820
|
-
"
|
|
1041
|
+
"🔍 Analyse ALL Cookbooks",
|
|
1042
|
+
type="primary",
|
|
1043
|
+
help="Analyse all cookbooks together considering inter-cookbook "
|
|
1044
|
+
"dependencies",
|
|
1045
|
+
key="holistic_analysis",
|
|
821
1046
|
):
|
|
822
|
-
|
|
823
|
-
del st.session_state.uploaded_file_data
|
|
824
|
-
del st.session_state.uploaded_file_name
|
|
825
|
-
del st.session_state.uploaded_file_type
|
|
826
|
-
st.rerun()
|
|
1047
|
+
_analyze_all_cookbooks_holistically(cookbook_path, cookbook_data)
|
|
827
1048
|
|
|
828
1049
|
with col2:
|
|
829
|
-
if st.button(
|
|
830
|
-
|
|
831
|
-
|
|
1050
|
+
if st.button(
|
|
1051
|
+
"🔄 Convert ALL Cookbooks",
|
|
1052
|
+
type="secondary",
|
|
1053
|
+
help="Convert all cookbooks to Ansible roles considering dependencies",
|
|
1054
|
+
key="holistic_conversion",
|
|
1055
|
+
):
|
|
1056
|
+
_convert_all_cookbooks_holistically(cookbook_path)
|
|
832
1057
|
|
|
833
|
-
|
|
834
|
-
try:
|
|
835
|
-
with st.spinner("Extracting archive..."):
|
|
836
|
-
temp_dir, cookbook_path = extract_archive(mock_file)
|
|
837
|
-
# Store temp_dir in session state to prevent premature cleanup
|
|
838
|
-
st.session_state.temp_dir = temp_dir
|
|
839
|
-
st.success("Archive extracted successfully!")
|
|
1058
|
+
st.divider()
|
|
840
1059
|
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
1060
|
+
# Individual cookbook selection
|
|
1061
|
+
st.markdown("### Individual Cookbook Selection")
|
|
1062
|
+
st.markdown("Select specific cookbooks to analyse individually.")
|
|
844
1063
|
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
# Clear the uploaded file on error
|
|
848
|
-
if "uploaded_file_data" in st.session_state:
|
|
849
|
-
del st.session_state.uploaded_file_data
|
|
850
|
-
del st.session_state.uploaded_file_name
|
|
851
|
-
del st.session_state.uploaded_file_type
|
|
1064
|
+
# Get list of cookbook names for multiselect
|
|
1065
|
+
cookbook_names = [cb["Name"] for cb in cookbook_data]
|
|
852
1066
|
|
|
1067
|
+
selected_cookbooks = st.multiselect(
|
|
1068
|
+
"Select cookbooks to analyse:",
|
|
1069
|
+
options=cookbook_names,
|
|
1070
|
+
default=[],
|
|
1071
|
+
help="Choose which cookbooks to analyse individually",
|
|
1072
|
+
)
|
|
853
1073
|
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
with st.expander("How to Use"):
|
|
857
|
-
st.markdown("""
|
|
858
|
-
## Input Methods
|
|
1074
|
+
if selected_cookbooks:
|
|
1075
|
+
col1, col2, col3 = st.columns(3)
|
|
859
1076
|
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
1077
|
+
with col1:
|
|
1078
|
+
if st.button(
|
|
1079
|
+
f"📊 Analyse Selected ({len(selected_cookbooks)})",
|
|
1080
|
+
help=f"Analyse {len(selected_cookbooks)} selected cookbooks",
|
|
1081
|
+
key="analyze_selected",
|
|
1082
|
+
):
|
|
1083
|
+
analyse_selected_cookbooks(cookbook_path, selected_cookbooks)
|
|
866
1084
|
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
1085
|
+
with col2:
|
|
1086
|
+
if st.button(
|
|
1087
|
+
f"🔗 Analyse as Project ({len(selected_cookbooks)})",
|
|
1088
|
+
help=f"Analyse {len(selected_cookbooks)} cookbooks as a project "
|
|
1089
|
+
f"with dependency analysis",
|
|
1090
|
+
key="analyze_project",
|
|
1091
|
+
):
|
|
1092
|
+
analyse_project_cookbooks(cookbook_path, selected_cookbooks)
|
|
1093
|
+
|
|
1094
|
+
with col3:
|
|
1095
|
+
if st.button(
|
|
1096
|
+
f"📋 Select All ({len(cookbook_names)})",
|
|
1097
|
+
help=f"Select all {len(cookbook_names)} cookbooks",
|
|
1098
|
+
key="select_all",
|
|
1099
|
+
):
|
|
1100
|
+
# This will trigger a rerun with all cookbooks selected
|
|
1101
|
+
st.session_state.selected_cookbooks = cookbook_names
|
|
1102
|
+
st.rerun()
|
|
1103
|
+
|
|
1104
|
+
|
|
1105
|
+
def _show_cookbook_validation_warnings(cookbook_data: list):
|
|
1106
|
+
"""Show validation warnings for cookbooks that might not be analyzable."""
|
|
1107
|
+
problematic_cookbooks = []
|
|
1108
|
+
|
|
1109
|
+
for cookbook in cookbook_data:
|
|
1110
|
+
if cookbook.get(METADATA_COLUMN_NAME) == METADATA_STATUS_NO:
|
|
1111
|
+
problematic_cookbooks.append(cookbook["Name"])
|
|
1112
|
+
|
|
1113
|
+
if problematic_cookbooks:
|
|
1114
|
+
st.warning("Some cookbooks may not be analyzable:")
|
|
1115
|
+
st.markdown("**Cookbooks without valid metadata.rb:**")
|
|
1116
|
+
for name in problematic_cookbooks:
|
|
1117
|
+
st.write(f"• {name}")
|
|
1118
|
+
|
|
1119
|
+
with st.expander("Why this matters"):
|
|
1120
|
+
st.markdown("""
|
|
1121
|
+
Cookbooks need a valid `metadata.rb` file for proper analysis. Without it:
|
|
1122
|
+
- Version and maintainer information cannot be determined
|
|
1123
|
+
- Dependencies cannot be identified
|
|
1124
|
+
- Analysis may fail or produce incomplete results
|
|
1125
|
+
|
|
1126
|
+
**To fix:** Ensure each cookbook has a `metadata.rb` file with
|
|
1127
|
+
proper Ruby syntax.
|
|
1128
|
+
""")
|
|
1129
|
+
|
|
1130
|
+
# Check for cookbooks without recipes
|
|
1131
|
+
cookbooks_without_recipes = []
|
|
1132
|
+
for cookbook in cookbook_data:
|
|
1133
|
+
cookbook_dir = Path(cookbook["Path"])
|
|
1134
|
+
recipes_dir = cookbook_dir / "recipes"
|
|
1135
|
+
if not recipes_dir.exists() or not list(recipes_dir.glob("*.rb")):
|
|
1136
|
+
cookbooks_without_recipes.append(cookbook["Name"])
|
|
1137
|
+
|
|
1138
|
+
if cookbooks_without_recipes:
|
|
1139
|
+
st.warning("Some cookbooks may not have recipes:")
|
|
1140
|
+
st.markdown("**Cookbooks without recipe files:**")
|
|
1141
|
+
for name in cookbooks_without_recipes:
|
|
1142
|
+
st.write(f"• {name}")
|
|
1143
|
+
|
|
1144
|
+
with st.expander("Why this matters"):
|
|
1145
|
+
st.markdown("""
|
|
1146
|
+
Cookbooks need recipe files (`.rb` files in the `recipes/` directory)
|
|
1147
|
+
to be converted to Ansible.
|
|
1148
|
+
Without recipes, the cookbook cannot be analyzed or converted.
|
|
1149
|
+
|
|
1150
|
+
**To fix:** Ensure each cookbook has at least one `.rb` file in its
|
|
1151
|
+
`recipes/` directory.
|
|
1152
|
+
""")
|
|
1153
|
+
|
|
1154
|
+
|
|
1155
|
+
def _analyze_all_cookbooks_holistically(
|
|
1156
|
+
cookbook_path: str, cookbook_data: list
|
|
1157
|
+
) -> None:
|
|
1158
|
+
"""Analyse all cookbooks holistically."""
|
|
1159
|
+
st.subheader("Holistic Cookbook Analysis")
|
|
871
1160
|
|
|
872
|
-
|
|
873
|
-
1. **Upload Archive**: Upload a ZIP or TAR archive containing your cookbooks
|
|
874
|
-
2. **Automatic Extraction**: The system will extract and analyse the archive
|
|
1161
|
+
progress_bar, status_text = _setup_analysis_progress()
|
|
875
1162
|
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
5. **Run Analysis**: Click "Analyse Selected Cookbooks" to get insights
|
|
1163
|
+
try:
|
|
1164
|
+
status_text.text("Performing holistic analysis of all cookbooks...")
|
|
879
1165
|
|
|
1166
|
+
# Check if AI-enhanced analysis is available
|
|
1167
|
+
ai_config = load_ai_settings()
|
|
1168
|
+
provider_name = _get_ai_provider(ai_config)
|
|
1169
|
+
use_ai = (
|
|
1170
|
+
provider_name
|
|
1171
|
+
and provider_name != LOCAL_PROVIDER
|
|
1172
|
+
and ai_config.get("api_key")
|
|
1173
|
+
)
|
|
880
1174
|
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
1175
|
+
if use_ai:
|
|
1176
|
+
results = _analyze_with_ai(cookbook_data, provider_name, progress_bar)
|
|
1177
|
+
assessment_result = {
|
|
1178
|
+
"cookbook_assessments": results,
|
|
1179
|
+
"recommendations": "AI-enhanced per-cookbook recommendations above",
|
|
1180
|
+
}
|
|
1181
|
+
st.session_state.analysis_info_messages = [
|
|
1182
|
+
f"Using AI-enhanced analysis with {provider_name} "
|
|
1183
|
+
f"({_get_ai_string_value(ai_config, 'model', 'claude-3-5-sonnet-20241022')})", # noqa: E501
|
|
1184
|
+
f"Detected {len(cookbook_data)} cookbook(s)",
|
|
1185
|
+
]
|
|
1186
|
+
else:
|
|
1187
|
+
results, assessment_result = _analyze_rule_based(cookbook_data)
|
|
893
1188
|
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
""")
|
|
1189
|
+
st.session_state.holistic_assessment = assessment_result
|
|
1190
|
+
st.session_state.analysis_results = results
|
|
1191
|
+
st.session_state.analysis_cookbook_path = cookbook_path
|
|
1192
|
+
st.session_state.total_cookbooks = len(results)
|
|
899
1193
|
|
|
1194
|
+
progress_bar.progress(1.0)
|
|
1195
|
+
st.rerun()
|
|
900
1196
|
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
1197
|
+
except Exception as e:
|
|
1198
|
+
progress_bar.empty()
|
|
1199
|
+
status_text.empty()
|
|
1200
|
+
st.error(f"Holistic analysis failed: {e}")
|
|
1201
|
+
finally:
|
|
1202
|
+
progress_bar.empty()
|
|
1203
|
+
status_text.empty()
|
|
904
1204
|
|
|
905
|
-
progress_bar, status_text = _setup_analysis_progress()
|
|
906
|
-
results = _perform_cookbook_analysis(
|
|
907
|
-
cookbook_path, selected_cookbooks, progress_bar, status_text
|
|
908
|
-
)
|
|
909
1205
|
|
|
910
|
-
|
|
1206
|
+
def _analyze_with_ai(
|
|
1207
|
+
cookbook_data: list,
|
|
1208
|
+
provider_name: str,
|
|
1209
|
+
progress_bar,
|
|
1210
|
+
) -> list:
|
|
1211
|
+
"""
|
|
1212
|
+
Analyze cookbooks using AI-enhanced analysis.
|
|
911
1213
|
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
1214
|
+
Args:
|
|
1215
|
+
cookbook_data: List of cookbook data.
|
|
1216
|
+
provider_name: Name of the AI provider.
|
|
1217
|
+
progress_bar: Streamlit progress bar.
|
|
916
1218
|
|
|
917
|
-
|
|
918
|
-
|
|
1219
|
+
Returns:
|
|
1220
|
+
List of analysis results.
|
|
919
1221
|
|
|
1222
|
+
"""
|
|
1223
|
+
from souschef.assessment import assess_single_cookbook_with_ai
|
|
1224
|
+
|
|
1225
|
+
ai_config = load_ai_settings()
|
|
1226
|
+
provider_mapping = {
|
|
1227
|
+
ANTHROPIC_CLAUDE_DISPLAY: "anthropic",
|
|
1228
|
+
ANTHROPIC_PROVIDER: "anthropic",
|
|
1229
|
+
"OpenAI": "openai",
|
|
1230
|
+
OPENAI_PROVIDER: "openai",
|
|
1231
|
+
IBM_WATSONX: "watson",
|
|
1232
|
+
RED_HAT_LIGHTSPEED: "lightspeed",
|
|
1233
|
+
}
|
|
1234
|
+
provider = provider_mapping.get(
|
|
1235
|
+
provider_name,
|
|
1236
|
+
provider_name.lower().replace(" ", "_"),
|
|
1237
|
+
)
|
|
920
1238
|
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
1239
|
+
model = _get_ai_string_value(ai_config, "model", "claude-3-5-sonnet-20241022")
|
|
1240
|
+
api_key = _get_ai_string_value(ai_config, "api_key", "")
|
|
1241
|
+
temperature = _get_ai_float_value(ai_config, "temperature", 0.7)
|
|
1242
|
+
max_tokens = _get_ai_int_value(ai_config, "max_tokens", 4000)
|
|
1243
|
+
project_id = _get_ai_string_value(ai_config, "project_id", "")
|
|
1244
|
+
base_url = _get_ai_string_value(ai_config, "base_url", "")
|
|
1245
|
+
|
|
1246
|
+
st.info(f"Using AI-enhanced analysis with {provider_name} ({model})")
|
|
1247
|
+
|
|
1248
|
+
# Count total recipes across all cookbooks
|
|
1249
|
+
total_recipes = sum(
|
|
1250
|
+
len(list((Path(cb["Path"]) / "recipes").glob("*.rb")))
|
|
1251
|
+
if (Path(cb["Path"]) / "recipes").exists()
|
|
1252
|
+
else 0
|
|
1253
|
+
for cb in cookbook_data
|
|
1254
|
+
)
|
|
926
1255
|
|
|
1256
|
+
st.info(f"Detected {len(cookbook_data)} cookbook(s) with {total_recipes} recipe(s)")
|
|
927
1257
|
|
|
928
|
-
def _perform_cookbook_analysis(
|
|
929
|
-
cookbook_path, selected_cookbooks, progress_bar, status_text
|
|
930
|
-
):
|
|
931
|
-
"""Perform analysis on selected cookbooks."""
|
|
932
1258
|
results = []
|
|
933
|
-
|
|
1259
|
+
for i, cb_data in enumerate(cookbook_data):
|
|
1260
|
+
# Count recipes in this cookbook
|
|
1261
|
+
recipes_dir = Path(cb_data["Path"]) / "recipes"
|
|
1262
|
+
recipe_count = (
|
|
1263
|
+
len(list(recipes_dir.glob("*.rb"))) if recipes_dir.exists() else 0
|
|
1264
|
+
)
|
|
934
1265
|
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
1266
|
+
st.info(
|
|
1267
|
+
f"Analyzing {cb_data['Name']} ({recipe_count} recipes)... "
|
|
1268
|
+
f"({i + 1}/{len(cookbook_data)})"
|
|
1269
|
+
)
|
|
1270
|
+
progress_bar.progress((i + 1) / len(cookbook_data))
|
|
1271
|
+
|
|
1272
|
+
assessment = assess_single_cookbook_with_ai(
|
|
1273
|
+
cb_data["Path"],
|
|
1274
|
+
ai_provider=provider,
|
|
1275
|
+
api_key=api_key,
|
|
1276
|
+
model=model,
|
|
1277
|
+
temperature=temperature,
|
|
1278
|
+
max_tokens=max_tokens,
|
|
1279
|
+
project_id=project_id,
|
|
1280
|
+
base_url=base_url,
|
|
1281
|
+
)
|
|
938
1282
|
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
analysis_result = _analyse_single_cookbook(cookbook_name, cookbook_dir)
|
|
942
|
-
results.append(analysis_result)
|
|
1283
|
+
result = _build_cookbook_result(cb_data, assessment, ANALYSIS_STATUS_ANALYSED)
|
|
1284
|
+
results.append(result)
|
|
943
1285
|
|
|
944
1286
|
return results
|
|
945
1287
|
|
|
946
1288
|
|
|
947
|
-
def
|
|
948
|
-
|
|
949
|
-
|
|
1289
|
+
def _analyze_rule_based(
|
|
1290
|
+
cookbook_data: list,
|
|
1291
|
+
) -> tuple[list, dict]:
|
|
1292
|
+
"""
|
|
1293
|
+
Analyze cookbooks using rule-based analysis.
|
|
950
1294
|
|
|
1295
|
+
Args:
|
|
1296
|
+
cookbook_data: List of cookbook data.
|
|
951
1297
|
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
1298
|
+
Returns:
|
|
1299
|
+
Tuple of (results list, assessment_result dict).
|
|
1300
|
+
|
|
1301
|
+
"""
|
|
1302
|
+
from souschef.assessment import parse_chef_migration_assessment
|
|
1303
|
+
|
|
1304
|
+
cookbook_paths_list = [cb["Path"] for cb in cookbook_data]
|
|
1305
|
+
cookbook_paths_str = ",".join(cookbook_paths_list)
|
|
1306
|
+
|
|
1307
|
+
assessment_result = parse_chef_migration_assessment(cookbook_paths_str)
|
|
1308
|
+
|
|
1309
|
+
if "error" in assessment_result:
|
|
1310
|
+
st.error(f"Holistic analysis failed: {assessment_result['error']}")
|
|
1311
|
+
return [], {}
|
|
1312
|
+
|
|
1313
|
+
results = _process_cookbook_assessments(assessment_result, cookbook_data)
|
|
1314
|
+
return results, assessment_result
|
|
1315
|
+
|
|
1316
|
+
|
|
1317
|
+
def _process_cookbook_assessments(assessment_result: dict, cookbook_data: list) -> list:
|
|
1318
|
+
"""
|
|
1319
|
+
Process cookbook assessments and build results.
|
|
1320
|
+
|
|
1321
|
+
Args:
|
|
1322
|
+
assessment_result: Assessment result dictionary.
|
|
1323
|
+
cookbook_data: List of cookbook data.
|
|
1324
|
+
|
|
1325
|
+
Returns:
|
|
1326
|
+
List of result dictionaries.
|
|
1327
|
+
|
|
1328
|
+
"""
|
|
1329
|
+
results: list[dict] = []
|
|
1330
|
+
if "cookbook_assessments" not in assessment_result:
|
|
1331
|
+
return results
|
|
1332
|
+
|
|
1333
|
+
top_recommendations = assessment_result.get("recommendations", "")
|
|
1334
|
+
|
|
1335
|
+
for cookbook_assessment in assessment_result["cookbook_assessments"]:
|
|
1336
|
+
result = _build_assessment_result(
|
|
1337
|
+
cookbook_assessment, cookbook_data, top_recommendations
|
|
1338
|
+
)
|
|
1339
|
+
results.append(result)
|
|
1340
|
+
|
|
1341
|
+
return results
|
|
1342
|
+
|
|
1343
|
+
|
|
1344
|
+
def _build_assessment_result(
|
|
1345
|
+
cookbook_assessment: dict, cookbook_data: list, top_recommendations: str
|
|
1346
|
+
) -> dict:
|
|
1347
|
+
"""
|
|
1348
|
+
Build result dictionary from cookbook assessment.
|
|
1349
|
+
|
|
1350
|
+
Args:
|
|
1351
|
+
cookbook_assessment: Single cookbook assessment.
|
|
1352
|
+
cookbook_data: List of cookbook data.
|
|
1353
|
+
top_recommendations: Top-level recommendations.
|
|
1354
|
+
|
|
1355
|
+
Returns:
|
|
1356
|
+
Result dictionary.
|
|
1357
|
+
|
|
1358
|
+
"""
|
|
1359
|
+
cookbook_path = cookbook_assessment.get("cookbook_path", "")
|
|
1360
|
+
cookbook_info = _find_cookbook_info(cookbook_data, cookbook_path)
|
|
1361
|
+
|
|
1362
|
+
recommendations = _build_recommendations(cookbook_assessment, top_recommendations)
|
|
1363
|
+
|
|
1364
|
+
estimated_days = cookbook_assessment.get("estimated_effort_days", 0)
|
|
1365
|
+
effort_metrics = EffortMetrics(estimated_days)
|
|
1366
|
+
|
|
1367
|
+
return {
|
|
1368
|
+
"name": (
|
|
1369
|
+
cookbook_info["Name"]
|
|
1370
|
+
if cookbook_info
|
|
1371
|
+
else cookbook_assessment["cookbook_name"]
|
|
1372
|
+
),
|
|
1373
|
+
"path": cookbook_info["Path"] if cookbook_info else cookbook_path,
|
|
1374
|
+
"version": cookbook_info["Version"] if cookbook_info else "Unknown",
|
|
1375
|
+
"maintainer": cookbook_info["Maintainer"] if cookbook_info else "Unknown",
|
|
1376
|
+
"description": (
|
|
1377
|
+
cookbook_info["Description"] if cookbook_info else "Analysed holistically"
|
|
1378
|
+
),
|
|
1379
|
+
"dependencies": int(cookbook_assessment.get("dependencies", 0) or 0),
|
|
1380
|
+
"complexity": cookbook_assessment.get("migration_priority", "Unknown").title(),
|
|
1381
|
+
"estimated_hours": effort_metrics.estimated_hours,
|
|
1382
|
+
"recommendations": recommendations,
|
|
1383
|
+
"status": ANALYSIS_STATUS_ANALYSED,
|
|
1384
|
+
}
|
|
1385
|
+
|
|
1386
|
+
|
|
1387
|
+
def _find_cookbook_info(cookbook_data: list, cookbook_path: str) -> dict | None:
|
|
1388
|
+
"""
|
|
1389
|
+
Find cookbook info matching the given path.
|
|
1390
|
+
|
|
1391
|
+
Args:
|
|
1392
|
+
cookbook_data: List of cookbook data.
|
|
1393
|
+
cookbook_path: Path to match.
|
|
1394
|
+
|
|
1395
|
+
Returns:
|
|
1396
|
+
Matching cookbook info or None.
|
|
1397
|
+
|
|
1398
|
+
"""
|
|
1399
|
+
return next(
|
|
1400
|
+
(cd for cd in cookbook_data if cd["Path"] == cookbook_path),
|
|
1401
|
+
None,
|
|
1402
|
+
)
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
def _build_cookbook_result(cb_data: dict, assessment: dict, status: str) -> dict:
|
|
1406
|
+
"""
|
|
1407
|
+
Build a cookbook result from assessment data.
|
|
1408
|
+
|
|
1409
|
+
Args:
|
|
1410
|
+
cb_data: Cookbook data.
|
|
1411
|
+
assessment: Assessment dictionary.
|
|
1412
|
+
status: Status of analysis.
|
|
1413
|
+
|
|
1414
|
+
Returns:
|
|
1415
|
+
Result dictionary.
|
|
1416
|
+
|
|
1417
|
+
"""
|
|
1418
|
+
if "error" not in assessment:
|
|
1419
|
+
return {
|
|
1420
|
+
"name": cb_data["Name"],
|
|
1421
|
+
"path": cb_data["Path"],
|
|
1422
|
+
"version": cb_data["Version"],
|
|
1423
|
+
"maintainer": cb_data["Maintainer"],
|
|
1424
|
+
"description": cb_data["Description"],
|
|
1425
|
+
"dependencies": cb_data["Dependencies"],
|
|
1426
|
+
"complexity": assessment.get("complexity", "Unknown"),
|
|
1427
|
+
"estimated_hours": assessment.get("estimated_hours", 0),
|
|
1428
|
+
"recommendations": assessment.get(
|
|
1429
|
+
"recommendations", "No recommendations available"
|
|
1430
|
+
),
|
|
1431
|
+
"status": status,
|
|
1432
|
+
}
|
|
1433
|
+
return {
|
|
1434
|
+
"name": cb_data["Name"],
|
|
1435
|
+
"path": cb_data["Path"],
|
|
1436
|
+
"version": cb_data["Version"],
|
|
1437
|
+
"maintainer": cb_data["Maintainer"],
|
|
1438
|
+
"description": cb_data["Description"],
|
|
1439
|
+
"dependencies": cb_data["Dependencies"],
|
|
1440
|
+
"complexity": "Error",
|
|
1441
|
+
"estimated_hours": 0,
|
|
1442
|
+
"recommendations": f"Analysis failed: {assessment['error']}",
|
|
1443
|
+
"status": ANALYSIS_STATUS_FAILED,
|
|
1444
|
+
}
|
|
1445
|
+
|
|
1446
|
+
|
|
1447
|
+
def _build_recommendations(cookbook_assessment: dict, top_recommendations: str) -> str:
|
|
1448
|
+
"""
|
|
1449
|
+
Build recommendations from cookbook assessment.
|
|
1450
|
+
|
|
1451
|
+
Args:
|
|
1452
|
+
cookbook_assessment: Assessment data for a cookbook.
|
|
1453
|
+
top_recommendations: Top-level recommendations.
|
|
1454
|
+
|
|
1455
|
+
Returns:
|
|
1456
|
+
Formatted recommendations string.
|
|
1457
|
+
|
|
1458
|
+
"""
|
|
1459
|
+
recommendations: list[str] = []
|
|
1460
|
+
if cookbook_assessment.get("challenges"):
|
|
1461
|
+
for challenge in cookbook_assessment["challenges"]:
|
|
1462
|
+
recommendations.append(f"• {challenge}")
|
|
1463
|
+
return "\n".join(recommendations)
|
|
1464
|
+
|
|
1465
|
+
return (
|
|
1466
|
+
top_recommendations
|
|
1467
|
+
if top_recommendations
|
|
1468
|
+
else f"Complexity: {str(cookbook_assessment.get('complexity_score', 0))}/100"
|
|
1469
|
+
)
|
|
1470
|
+
|
|
1471
|
+
|
|
1472
|
+
def _convert_all_cookbooks_holistically(cookbook_path: str):
|
|
1473
|
+
"""Convert all cookbooks to Ansible roles."""
|
|
1474
|
+
st.subheader("Holistic Cookbook Conversion")
|
|
1475
|
+
|
|
1476
|
+
progress_bar, status_text = _setup_analysis_progress()
|
|
1477
|
+
|
|
1478
|
+
try:
|
|
1479
|
+
status_text.text("Converting all cookbooks holistically...")
|
|
1480
|
+
|
|
1481
|
+
# Create temporary output directory with secure permissions
|
|
1482
|
+
import tempfile
|
|
1483
|
+
from pathlib import Path
|
|
1484
|
+
|
|
1485
|
+
output_dir = Path(tempfile.mkdtemp(prefix="souschef_holistic_conversion_"))
|
|
1486
|
+
with contextlib.suppress(FileNotFoundError, OSError):
|
|
1487
|
+
output_dir.chmod(0o700) # Secure permissions: rwx------
|
|
1488
|
+
|
|
1489
|
+
# Get assessment data if available
|
|
1490
|
+
assessment_data = ""
|
|
1491
|
+
if (
|
|
1492
|
+
"holistic_assessment" in st.session_state
|
|
1493
|
+
and st.session_state.holistic_assessment
|
|
1494
|
+
):
|
|
1495
|
+
assessment_data = json.dumps(st.session_state.holistic_assessment)
|
|
1496
|
+
|
|
1497
|
+
# Call the new holistic conversion function
|
|
1498
|
+
from souschef.server import convert_all_cookbooks_comprehensive
|
|
1499
|
+
|
|
1500
|
+
conversion_result = convert_all_cookbooks_comprehensive(
|
|
1501
|
+
cookbooks_path=cookbook_path,
|
|
1502
|
+
output_path=str(output_dir),
|
|
1503
|
+
assessment_data=assessment_data,
|
|
1504
|
+
include_templates=True,
|
|
1505
|
+
include_attributes=True,
|
|
1506
|
+
include_recipes=True,
|
|
1507
|
+
)
|
|
1508
|
+
|
|
1509
|
+
if conversion_result.startswith("Error"):
|
|
1510
|
+
st.error(f"Holistic conversion failed: {conversion_result}")
|
|
1511
|
+
return
|
|
1512
|
+
|
|
1513
|
+
# Store conversion result for display
|
|
1514
|
+
st.session_state.holistic_conversion_result = {
|
|
1515
|
+
"result": conversion_result,
|
|
1516
|
+
"output_path": str(output_dir),
|
|
1517
|
+
}
|
|
1518
|
+
|
|
1519
|
+
progress_bar.progress(1.0)
|
|
1520
|
+
status_text.text("Holistic conversion completed!")
|
|
1521
|
+
st.success("Holistically converted all cookbooks to Ansible roles!")
|
|
1522
|
+
|
|
1523
|
+
# Display conversion results
|
|
1524
|
+
_display_holistic_conversion_results(
|
|
1525
|
+
st.session_state.holistic_conversion_result
|
|
1526
|
+
)
|
|
1527
|
+
|
|
1528
|
+
# Trigger rerun to display results
|
|
1529
|
+
st.rerun()
|
|
1530
|
+
|
|
1531
|
+
except Exception as e:
|
|
1532
|
+
progress_bar.empty()
|
|
1533
|
+
status_text.empty()
|
|
1534
|
+
st.error(f"Holistic conversion failed: {e}")
|
|
1535
|
+
finally:
|
|
1536
|
+
progress_bar.empty()
|
|
1537
|
+
status_text.empty()
|
|
1538
|
+
|
|
1539
|
+
|
|
1540
|
+
def _parse_conversion_result_text(result_text: str) -> dict:
|
|
1541
|
+
"""Parse the conversion result text to extract structured data."""
|
|
1542
|
+
structured: dict[str, Any] = {
|
|
1543
|
+
"summary": {},
|
|
1544
|
+
"cookbook_results": [],
|
|
1545
|
+
"warnings": [],
|
|
1546
|
+
"errors": [],
|
|
1547
|
+
}
|
|
1548
|
+
|
|
1549
|
+
lines = result_text.split("\n")
|
|
1550
|
+
current_section = None
|
|
1551
|
+
|
|
1552
|
+
for line in lines:
|
|
1553
|
+
line = line.strip()
|
|
1554
|
+
|
|
1555
|
+
# Parse summary section
|
|
1556
|
+
if "## Overview:" in line:
|
|
1557
|
+
current_section = "summary"
|
|
1558
|
+
elif current_section == "summary" and "- " in line:
|
|
1559
|
+
_parse_summary_line(line, structured)
|
|
1560
|
+
|
|
1561
|
+
# Parse successfully converted cookbooks
|
|
1562
|
+
elif "## Successfully Converted Cookbooks:" in line:
|
|
1563
|
+
current_section = "converted"
|
|
1564
|
+
elif current_section == "converted" and line.startswith("- **"):
|
|
1565
|
+
_parse_converted_cookbook(line, structured)
|
|
1566
|
+
|
|
1567
|
+
# Parse failed conversions
|
|
1568
|
+
elif "## Failed Conversions:" in line:
|
|
1569
|
+
current_section = "failed"
|
|
1570
|
+
elif current_section == "failed" and line.startswith("- ❌ **"):
|
|
1571
|
+
_parse_failed_cookbook(line, structured)
|
|
1572
|
+
|
|
1573
|
+
# Extract warnings from the result text
|
|
1574
|
+
_extract_warnings_from_text(result_text, structured)
|
|
1575
|
+
|
|
1576
|
+
return structured
|
|
1577
|
+
|
|
1578
|
+
|
|
1579
|
+
def _parse_summary_line(line: str, structured: dict):
|
|
1580
|
+
"""Parse a single summary line."""
|
|
1581
|
+
if "Total cookbooks found:" in line:
|
|
1582
|
+
try:
|
|
1583
|
+
count = int(line.split(":")[-1].strip())
|
|
1584
|
+
structured["summary"]["total_cookbooks"] = count
|
|
1585
|
+
except ValueError:
|
|
1586
|
+
pass
|
|
1587
|
+
elif "Successfully converted:" in line:
|
|
1588
|
+
try:
|
|
1589
|
+
count = int(line.split(":")[-1].strip())
|
|
1590
|
+
structured["summary"]["cookbooks_converted"] = count
|
|
1591
|
+
except ValueError:
|
|
1592
|
+
pass
|
|
1593
|
+
elif "Total files converted:" in line:
|
|
1594
|
+
try:
|
|
1595
|
+
count = int(line.split(":")[-1].strip())
|
|
1596
|
+
structured["summary"]["total_converted_files"] = count
|
|
1597
|
+
except ValueError:
|
|
1598
|
+
pass
|
|
1599
|
+
|
|
1600
|
+
|
|
1601
|
+
def _parse_converted_cookbook(line: str, structured: dict):
|
|
1602
|
+
"""Parse a successfully converted cookbook line."""
|
|
1603
|
+
try:
|
|
1604
|
+
parts = line.split("**")
|
|
1605
|
+
if len(parts) >= 3:
|
|
1606
|
+
cookbook_name = parts[1]
|
|
1607
|
+
role_name = parts[3].strip("`→ ")
|
|
1608
|
+
structured["cookbook_results"].append(
|
|
1609
|
+
{
|
|
1610
|
+
"cookbook_name": cookbook_name,
|
|
1611
|
+
"role_name": role_name,
|
|
1612
|
+
"status": "success",
|
|
1613
|
+
"tasks_count": 0, # Will be updated if more details available
|
|
1614
|
+
"templates_count": 0,
|
|
1615
|
+
"variables_count": 0,
|
|
1616
|
+
"files_count": 0,
|
|
1617
|
+
}
|
|
1618
|
+
)
|
|
1619
|
+
except (IndexError, ValueError):
|
|
1620
|
+
pass
|
|
1621
|
+
|
|
1622
|
+
|
|
1623
|
+
def _parse_failed_cookbook(line: str, structured: dict):
|
|
1624
|
+
"""Parse a failed conversion cookbook line."""
|
|
1625
|
+
try:
|
|
1626
|
+
parts = line.split("**")
|
|
1627
|
+
if len(parts) >= 3:
|
|
1628
|
+
cookbook_name = parts[1]
|
|
1629
|
+
error = parts[3].strip(": ")
|
|
1630
|
+
structured["cookbook_results"].append(
|
|
1631
|
+
{
|
|
1632
|
+
"cookbook_name": cookbook_name,
|
|
1633
|
+
"status": "failed",
|
|
1634
|
+
"error": error,
|
|
1635
|
+
}
|
|
1636
|
+
)
|
|
1637
|
+
except (IndexError, ValueError):
|
|
1638
|
+
pass
|
|
1639
|
+
|
|
1640
|
+
|
|
1641
|
+
def _extract_warnings_from_text(result_text: str, structured: dict):
|
|
1642
|
+
"""Extract warnings from the conversion result text."""
|
|
1643
|
+
# Extract warnings from the result text (look for common warning patterns)
|
|
1644
|
+
if "No recipes directory found" in result_text:
|
|
1645
|
+
structured["warnings"].append(
|
|
1646
|
+
"Some cookbooks are missing recipes directories and cannot be "
|
|
1647
|
+
"converted to Ansible tasks"
|
|
1648
|
+
)
|
|
1649
|
+
if "No recipe files" in result_text.lower():
|
|
1650
|
+
structured["warnings"].append("Some cookbooks have empty recipes directories")
|
|
1651
|
+
|
|
1652
|
+
# If no cookbooks were successfully converted but some were found,
|
|
1653
|
+
# add a general warning
|
|
1654
|
+
total_found = structured["summary"].get("total_cookbooks", 0)
|
|
1655
|
+
converted = structured["summary"].get("cookbooks_converted", 0)
|
|
1656
|
+
if total_found > 0 and converted == 0:
|
|
1657
|
+
structured["warnings"].append(
|
|
1658
|
+
"No cookbooks were successfully converted. Check that cookbooks "
|
|
1659
|
+
"contain recipes directories with .rb files."
|
|
1660
|
+
)
|
|
1661
|
+
|
|
1662
|
+
|
|
1663
|
+
def _display_holistic_conversion_results(conversion_result: dict):
|
|
1664
|
+
"""Display the results of holistic cookbook conversion."""
|
|
1665
|
+
st.subheader("Holistic Conversion Results")
|
|
1666
|
+
|
|
1667
|
+
# Parse the conversion result string to extract structured data
|
|
1668
|
+
result_text = conversion_result.get("result", "")
|
|
1669
|
+
structured_result = _parse_conversion_result_text(result_text)
|
|
1670
|
+
|
|
1671
|
+
_display_conversion_summary(structured_result)
|
|
1672
|
+
_display_conversion_warnings_errors(structured_result)
|
|
1673
|
+
_display_conversion_details(structured_result)
|
|
1674
|
+
_display_conversion_report(result_text)
|
|
1675
|
+
_display_conversion_download_options(conversion_result)
|
|
1676
|
+
|
|
1677
|
+
|
|
1678
|
+
def _display_conversion_summary(structured_result: dict):
|
|
1679
|
+
"""Display the conversion summary metrics."""
|
|
1680
|
+
if "summary" in structured_result:
|
|
1681
|
+
summary = structured_result["summary"]
|
|
1682
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
1683
|
+
|
|
1684
|
+
with col1:
|
|
1685
|
+
st.metric("Cookbooks Converted", summary.get("cookbooks_converted", 0))
|
|
1686
|
+
|
|
1687
|
+
with col2:
|
|
1688
|
+
st.metric("Roles Created", summary.get("roles_created", 0))
|
|
1689
|
+
|
|
1690
|
+
with col3:
|
|
1691
|
+
st.metric("Tasks Generated", summary.get("tasks_generated", 0))
|
|
1692
|
+
|
|
1693
|
+
with col4:
|
|
1694
|
+
st.metric("Templates Converted", summary.get("templates_converted", 0))
|
|
1695
|
+
|
|
1696
|
+
|
|
1697
|
+
def _display_conversion_warnings_errors(structured_result: dict):
|
|
1698
|
+
"""Display conversion warnings and errors."""
|
|
1699
|
+
if "warnings" in structured_result and structured_result["warnings"]:
|
|
1700
|
+
st.warning("⚠️ Conversion Warnings")
|
|
1701
|
+
for warning in structured_result["warnings"]:
|
|
1702
|
+
st.write(f"• {warning}")
|
|
1703
|
+
|
|
1704
|
+
if "errors" in structured_result and structured_result["errors"]:
|
|
1705
|
+
st.error("❌ Conversion Errors")
|
|
1706
|
+
for error in structured_result["errors"]:
|
|
1707
|
+
st.write(f"• {error}")
|
|
1708
|
+
|
|
1709
|
+
|
|
1710
|
+
def _display_conversion_details(structured_result: dict):
|
|
1711
|
+
"""Display detailed conversion results."""
|
|
1712
|
+
if "cookbook_results" in structured_result:
|
|
1713
|
+
st.subheader("Conversion Details")
|
|
1714
|
+
|
|
1715
|
+
for cookbook_result in structured_result["cookbook_results"]:
|
|
1716
|
+
with st.expander(
|
|
1717
|
+
f"📁 {cookbook_result.get('cookbook_name', 'Unknown')}", expanded=False
|
|
1718
|
+
):
|
|
1719
|
+
col1, col2 = st.columns(2)
|
|
1720
|
+
|
|
1721
|
+
with col1:
|
|
1722
|
+
st.metric("Tasks", cookbook_result.get("tasks_count", 0))
|
|
1723
|
+
st.metric("Templates", cookbook_result.get("templates_count", 0))
|
|
1724
|
+
|
|
1725
|
+
with col2:
|
|
1726
|
+
st.metric("Variables", cookbook_result.get("variables_count", 0))
|
|
1727
|
+
st.metric("Files", cookbook_result.get("files_count", 0))
|
|
1728
|
+
|
|
1729
|
+
if cookbook_result.get("status") == "success":
|
|
1730
|
+
st.success("✅ Conversion successful")
|
|
1731
|
+
else:
|
|
1732
|
+
error_msg = cookbook_result.get("error", "Unknown error")
|
|
1733
|
+
st.error(f"❌ Conversion failed: {error_msg}")
|
|
1734
|
+
|
|
1735
|
+
|
|
1736
|
+
def _display_conversion_report(result_text: str):
|
|
1737
|
+
"""Display the raw conversion report."""
|
|
1738
|
+
with st.expander("Full Conversion Report"):
|
|
1739
|
+
st.code(result_text, language="markdown")
|
|
1740
|
+
|
|
1741
|
+
|
|
1742
|
+
def _display_conversion_download_options(conversion_result: dict):
|
|
1743
|
+
"""Display download options for converted roles."""
|
|
1744
|
+
if "output_path" in conversion_result:
|
|
1745
|
+
st.subheader("Download Converted Roles")
|
|
1746
|
+
|
|
1747
|
+
# Validate output_path before use
|
|
1748
|
+
output_path = conversion_result["output_path"]
|
|
1749
|
+
try:
|
|
1750
|
+
from souschef.core.path_utils import _normalize_path
|
|
1751
|
+
|
|
1752
|
+
# nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected
|
|
1753
|
+
safe_output_path = _normalize_path(str(output_path))
|
|
1754
|
+
except ValueError:
|
|
1755
|
+
st.error("Invalid output path")
|
|
1756
|
+
return
|
|
1757
|
+
|
|
1758
|
+
if safe_output_path.exists():
|
|
1759
|
+
# Create ZIP archive of all converted roles
|
|
1760
|
+
zip_buffer = io.BytesIO()
|
|
1761
|
+
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
|
|
1762
|
+
# nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected
|
|
1763
|
+
for root, _dirs, files in os.walk(str(safe_output_path)):
|
|
1764
|
+
for file in files:
|
|
1765
|
+
file_path = Path(root) / file
|
|
1766
|
+
arcname = file_path.relative_to(safe_output_path)
|
|
1767
|
+
zip_file.write(str(file_path), str(arcname))
|
|
1768
|
+
|
|
1769
|
+
zip_buffer.seek(0)
|
|
1770
|
+
|
|
1771
|
+
st.download_button(
|
|
1772
|
+
label="📦 Download All Ansible Roles",
|
|
1773
|
+
data=zip_buffer.getvalue(),
|
|
1774
|
+
file_name="ansible_roles_holistic.zip",
|
|
1775
|
+
mime="application/zip",
|
|
1776
|
+
help="Download ZIP archive containing all converted Ansible roles",
|
|
1777
|
+
key="download_holistic_roles",
|
|
1778
|
+
)
|
|
1779
|
+
|
|
1780
|
+
st.info(f"📂 Roles saved to: {output_path}")
|
|
1781
|
+
else:
|
|
1782
|
+
st.warning("Output directory not found for download")
|
|
1783
|
+
|
|
1784
|
+
|
|
1785
|
+
def _handle_dashboard_upload():
|
|
1786
|
+
"""Handle file uploaded from the dashboard."""
|
|
1787
|
+
# Create a file-like object from the stored data
|
|
1788
|
+
file_data = st.session_state.uploaded_file_data
|
|
1789
|
+
file_name = st.session_state.uploaded_file_name
|
|
1790
|
+
|
|
1791
|
+
# Create a file-like object that mimics the UploadedFile interface
|
|
1792
|
+
class MockUploadedFile:
|
|
1793
|
+
def __init__(self, data, name, mime_type):
|
|
1794
|
+
self.data = data
|
|
1795
|
+
self.name = name
|
|
1796
|
+
self.type = mime_type
|
|
1797
|
+
|
|
1798
|
+
def getbuffer(self):
|
|
1799
|
+
return self.data
|
|
1800
|
+
|
|
1801
|
+
def getvalue(self):
|
|
1802
|
+
return self.data
|
|
1803
|
+
|
|
1804
|
+
mock_file = MockUploadedFile(
|
|
1805
|
+
file_data, file_name, st.session_state.uploaded_file_type
|
|
1806
|
+
)
|
|
1807
|
+
|
|
1808
|
+
# Display upload info
|
|
1809
|
+
st.info(f"Using file uploaded from Dashboard: {file_name}")
|
|
1810
|
+
|
|
1811
|
+
# Add option to clear and upload a different file
|
|
1812
|
+
col1, col2 = st.columns([1, 1])
|
|
1813
|
+
with col1:
|
|
1814
|
+
if st.button(
|
|
1815
|
+
"Use Different File",
|
|
1816
|
+
help="Clear this file and upload a different one",
|
|
1817
|
+
key="use_different_file",
|
|
1818
|
+
):
|
|
1819
|
+
# Clear the uploaded file from session state
|
|
1820
|
+
del st.session_state.uploaded_file_data
|
|
1821
|
+
del st.session_state.uploaded_file_name
|
|
1822
|
+
del st.session_state.uploaded_file_type
|
|
1823
|
+
st.rerun()
|
|
1824
|
+
|
|
1825
|
+
with col2:
|
|
1826
|
+
if st.button(
|
|
1827
|
+
"Back to Dashboard", help="Return to dashboard", key="back_to_dashboard"
|
|
1828
|
+
):
|
|
1829
|
+
st.session_state.current_page = "Dashboard"
|
|
1830
|
+
st.rerun()
|
|
1831
|
+
|
|
1832
|
+
# Process the file
|
|
1833
|
+
try:
|
|
1834
|
+
with st.spinner("Extracting archive..."):
|
|
1835
|
+
temp_dir, cookbook_path = extract_archive(mock_file)
|
|
1836
|
+
# Store temp_dir in session state to prevent premature cleanup
|
|
1837
|
+
st.session_state.temp_dir = temp_dir
|
|
1838
|
+
st.success("Archive extracted successfully!")
|
|
1839
|
+
|
|
1840
|
+
# Validate and list cookbooks
|
|
1841
|
+
if cookbook_path:
|
|
1842
|
+
_validate_and_list_cookbooks(str(cookbook_path))
|
|
1843
|
+
|
|
1844
|
+
except Exception as e:
|
|
1845
|
+
st.error(f"Failed to process uploaded file: {e}")
|
|
1846
|
+
# Clear the uploaded file on error
|
|
1847
|
+
if "uploaded_file_data" in st.session_state:
|
|
1848
|
+
del st.session_state.uploaded_file_data
|
|
1849
|
+
del st.session_state.uploaded_file_name
|
|
1850
|
+
del st.session_state.uploaded_file_type
|
|
1851
|
+
|
|
1852
|
+
|
|
1853
|
+
def _display_instructions():
|
|
1854
|
+
"""Display usage instructions."""
|
|
1855
|
+
with st.expander("How to Use"):
|
|
1856
|
+
st.markdown("""
|
|
1857
|
+
## Input Methods
|
|
1858
|
+
|
|
1859
|
+
### Directory Path
|
|
1860
|
+
1. **Enter Cookbook Path**: Provide a **relative path** to your cookbooks
|
|
1861
|
+
(absolute paths not allowed)
|
|
1862
|
+
2. **Review Cookbooks**: The interface will list all cookbooks with metadata
|
|
1863
|
+
3. **Select Cookbooks**: Choose which cookbooks to analyse
|
|
1864
|
+
4. **Run Analysis**: Click "Analyse Selected Cookbooks" to get detailed insights
|
|
1865
|
+
|
|
1866
|
+
**Path Examples:**
|
|
1867
|
+
- `cookbooks/` - subdirectory in current workspace
|
|
1868
|
+
- `../shared/cookbooks/` - parent directory
|
|
1869
|
+
- `./my-cookbooks/` - explicit current directory
|
|
1870
|
+
|
|
1871
|
+
### Archive Upload
|
|
1872
|
+
1. **Upload Archive**: Upload a ZIP or TAR archive containing your cookbooks
|
|
1873
|
+
2. **Automatic Extraction**: The system will extract and analyse the archive
|
|
1874
|
+
|
|
1875
|
+
3. **Review Cookbooks**: Interface will list all cookbooks found in archive
|
|
1876
|
+
4. **Select Cookbooks**: Choose which cookbooks to analyse
|
|
1877
|
+
5. **Run Analysis**: Click "Analyse Selected Cookbooks" to get insights
|
|
1878
|
+
|
|
1879
|
+
|
|
1880
|
+
## Expected Structure
|
|
1881
|
+
```
|
|
1882
|
+
cookbooks/ or archive.zip/
|
|
1883
|
+
├── nginx/
|
|
1884
|
+
│ ├── metadata.rb
|
|
1885
|
+
│ ├── recipes/
|
|
1886
|
+
│ └── attributes/
|
|
1887
|
+
├── apache2/
|
|
1888
|
+
│ └── metadata.rb
|
|
1889
|
+
└── mysql/
|
|
1890
|
+
└── metadata.rb
|
|
1891
|
+
```
|
|
1892
|
+
|
|
1893
|
+
## Supported Archive Formats
|
|
1894
|
+
- ZIP (.zip)
|
|
1895
|
+
- TAR (.tar)
|
|
1896
|
+
- GZIP-compressed TAR (.tar.gz, .tgz)
|
|
1897
|
+
""")
|
|
1898
|
+
|
|
1899
|
+
|
|
1900
|
+
def analyse_selected_cookbooks(cookbook_path: str, selected_cookbooks: list[str]):
|
|
1901
|
+
"""Analyse the selected cookbooks and store results in session state."""
|
|
1902
|
+
st.subheader("Analysis Results")
|
|
1903
|
+
|
|
1904
|
+
progress_bar, status_text = _setup_analysis_progress()
|
|
1905
|
+
results = _perform_cookbook_analysis(
|
|
1906
|
+
cookbook_path, selected_cookbooks, progress_bar, status_text
|
|
1907
|
+
)
|
|
1908
|
+
|
|
1909
|
+
_cleanup_progress_indicators(progress_bar, status_text)
|
|
1910
|
+
|
|
1911
|
+
# Store results in session state
|
|
1912
|
+
st.session_state.analysis_results = results
|
|
1913
|
+
st.session_state.analysis_cookbook_path = cookbook_path
|
|
1914
|
+
st.session_state.total_cookbooks = len(selected_cookbooks)
|
|
1915
|
+
|
|
1916
|
+
# Trigger rerun to display results
|
|
1917
|
+
st.rerun()
|
|
1918
|
+
|
|
1919
|
+
|
|
1920
|
+
def _setup_analysis_progress():
|
|
1921
|
+
"""Set up progress tracking for analysis."""
|
|
1922
|
+
progress_bar = st.progress(0)
|
|
1923
|
+
status_text = st.empty()
|
|
1924
|
+
return progress_bar, status_text
|
|
1925
|
+
|
|
1926
|
+
|
|
1927
|
+
def _perform_cookbook_analysis(
|
|
1928
|
+
cookbook_path, selected_cookbooks, progress_bar, status_text
|
|
1929
|
+
):
|
|
1930
|
+
"""Perform analysis on selected cookbooks."""
|
|
1931
|
+
results = []
|
|
1932
|
+
total = len(selected_cookbooks)
|
|
1933
|
+
|
|
1934
|
+
for i, cookbook_name in enumerate(selected_cookbooks):
|
|
1935
|
+
_update_progress(status_text, cookbook_name, i + 1, total)
|
|
1936
|
+
progress_bar.progress((i + 1) / total)
|
|
1937
|
+
|
|
1938
|
+
cookbook_dir = _find_cookbook_directory(cookbook_path, cookbook_name)
|
|
1939
|
+
if cookbook_dir:
|
|
1940
|
+
analysis_result = _analyse_single_cookbook(cookbook_name, cookbook_dir)
|
|
1941
|
+
results.append(analysis_result)
|
|
1942
|
+
|
|
1943
|
+
return results
|
|
1944
|
+
|
|
1945
|
+
|
|
1946
|
+
def _update_progress(status_text, cookbook_name, current, total):
|
|
1947
|
+
"""Update progress display."""
|
|
1948
|
+
# Check if AI is configured
|
|
1949
|
+
ai_config = load_ai_settings()
|
|
1950
|
+
ai_available = (
|
|
1951
|
+
ai_config.get("provider")
|
|
1952
|
+
and ai_config.get("provider") != LOCAL_PROVIDER
|
|
1953
|
+
and ai_config.get("api_key")
|
|
1954
|
+
)
|
|
1955
|
+
|
|
1956
|
+
ai_indicator = " [AI-ENHANCED]" if ai_available else " [RULE-BASED]"
|
|
1957
|
+
status_text.text(f"Analyzing {cookbook_name}{ai_indicator}... ({current}/{total})")
|
|
1958
|
+
|
|
1959
|
+
|
|
1960
|
+
def _find_cookbook_directory(cookbook_path, cookbook_name):
|
|
1961
|
+
"""Find the directory for a specific cookbook by checking metadata."""
|
|
1962
|
+
for d in Path(cookbook_path).iterdir():
|
|
1963
|
+
if d.is_dir():
|
|
1964
|
+
# Check if this directory contains a cookbook with the matching name
|
|
1965
|
+
metadata_file = d / METADATA_FILENAME
|
|
1966
|
+
if metadata_file.exists():
|
|
1967
|
+
try:
|
|
1968
|
+
metadata = parse_cookbook_metadata(str(metadata_file))
|
|
1969
|
+
if metadata.get("name") == cookbook_name:
|
|
1970
|
+
return d
|
|
1971
|
+
except Exception:
|
|
1972
|
+
# If metadata parsing fails, skip this directory
|
|
1973
|
+
continue
|
|
1974
|
+
return None
|
|
958
1975
|
|
|
959
1976
|
|
|
960
1977
|
def _analyse_single_cookbook(cookbook_name, cookbook_dir):
|
|
961
1978
|
"""Analyse a single cookbook."""
|
|
962
1979
|
try:
|
|
963
|
-
|
|
964
|
-
|
|
1980
|
+
metadata = _load_cookbook_metadata(cookbook_name, cookbook_dir)
|
|
1981
|
+
if "error" in metadata:
|
|
1982
|
+
return metadata # Return error result
|
|
1983
|
+
|
|
1984
|
+
ai_config = load_ai_settings()
|
|
1985
|
+
use_ai = _should_use_ai(ai_config)
|
|
1986
|
+
|
|
1987
|
+
if use_ai:
|
|
1988
|
+
assessment = _run_ai_analysis(cookbook_dir, ai_config)
|
|
1989
|
+
else:
|
|
1990
|
+
assessment = _run_rule_based_analysis(cookbook_dir)
|
|
1991
|
+
|
|
1992
|
+
if isinstance(assessment, dict) and "error" in assessment:
|
|
1993
|
+
return _create_failed_analysis(
|
|
1994
|
+
cookbook_name, cookbook_dir, assessment["error"]
|
|
1995
|
+
)
|
|
965
1996
|
|
|
966
1997
|
return _create_successful_analysis(
|
|
967
1998
|
cookbook_name, cookbook_dir, assessment, metadata
|
|
968
1999
|
)
|
|
969
2000
|
except Exception as e:
|
|
970
|
-
|
|
2001
|
+
import traceback
|
|
2002
|
+
|
|
2003
|
+
error_details = f"{str(e)}\n\nTraceback:\n{traceback.format_exc()}"
|
|
2004
|
+
return _create_failed_analysis(cookbook_name, cookbook_dir, error_details)
|
|
2005
|
+
|
|
2006
|
+
|
|
2007
|
+
def _load_cookbook_metadata(cookbook_name: str, cookbook_dir: Path) -> dict[str, Any]:
|
|
2008
|
+
"""
|
|
2009
|
+
Load and parse cookbook metadata.
|
|
2010
|
+
|
|
2011
|
+
Args:
|
|
2012
|
+
cookbook_name: Name of the cookbook.
|
|
2013
|
+
cookbook_dir: Directory containing the cookbook.
|
|
2014
|
+
|
|
2015
|
+
Returns:
|
|
2016
|
+
Metadata dictionary or error result.
|
|
2017
|
+
|
|
2018
|
+
"""
|
|
2019
|
+
metadata_file = cookbook_dir / METADATA_FILENAME
|
|
2020
|
+
if not metadata_file.exists():
|
|
2021
|
+
return _create_failed_analysis( # type: ignore[no-any-return]
|
|
2022
|
+
cookbook_name,
|
|
2023
|
+
cookbook_dir,
|
|
2024
|
+
f"No {METADATA_FILENAME} found in {cookbook_dir}",
|
|
2025
|
+
)
|
|
2026
|
+
|
|
2027
|
+
try:
|
|
2028
|
+
return parse_cookbook_metadata(str(metadata_file))
|
|
2029
|
+
except Exception as e:
|
|
2030
|
+
return _create_failed_analysis( # type: ignore[no-any-return]
|
|
2031
|
+
cookbook_name, cookbook_dir, f"Failed to parse metadata: {e}"
|
|
2032
|
+
)
|
|
2033
|
+
|
|
2034
|
+
|
|
2035
|
+
def _should_use_ai(ai_config: dict) -> bool:
|
|
2036
|
+
"""
|
|
2037
|
+
Check if AI-enhanced analysis should be used.
|
|
2038
|
+
|
|
2039
|
+
Args:
|
|
2040
|
+
ai_config: AI configuration dictionary.
|
|
2041
|
+
|
|
2042
|
+
Returns:
|
|
2043
|
+
True if AI analysis should be used.
|
|
2044
|
+
|
|
2045
|
+
"""
|
|
2046
|
+
return bool(
|
|
2047
|
+
ai_config.get("provider")
|
|
2048
|
+
and ai_config.get("provider") != LOCAL_PROVIDER
|
|
2049
|
+
and ai_config.get("api_key")
|
|
2050
|
+
)
|
|
2051
|
+
|
|
2052
|
+
|
|
2053
|
+
def _run_ai_analysis(cookbook_dir: Path, ai_config: dict) -> dict:
|
|
2054
|
+
"""
|
|
2055
|
+
Run AI-enhanced cookbook analysis.
|
|
2056
|
+
|
|
2057
|
+
Args:
|
|
2058
|
+
cookbook_dir: Directory containing the cookbook.
|
|
2059
|
+
ai_config: AI configuration dictionary.
|
|
2060
|
+
|
|
2061
|
+
Returns:
|
|
2062
|
+
Assessment dictionary.
|
|
2063
|
+
|
|
2064
|
+
"""
|
|
2065
|
+
ai_provider = _determine_ai_provider(ai_config)
|
|
2066
|
+
|
|
2067
|
+
return assess_single_cookbook_with_ai(
|
|
2068
|
+
str(cookbook_dir),
|
|
2069
|
+
ai_provider=ai_provider or "anthropic",
|
|
2070
|
+
api_key=str(ai_config.get("api_key", "")),
|
|
2071
|
+
model=str(ai_config.get("model", "claude-3-5-sonnet-20241022")),
|
|
2072
|
+
temperature=float(ai_config.get("temperature", 0.7)),
|
|
2073
|
+
max_tokens=int(ai_config.get("max_tokens", 4000)),
|
|
2074
|
+
project_id=str(ai_config.get("project_id", "")),
|
|
2075
|
+
base_url=str(ai_config.get("base_url", "")),
|
|
2076
|
+
)
|
|
2077
|
+
|
|
2078
|
+
|
|
2079
|
+
def _determine_ai_provider(ai_config: dict) -> str:
|
|
2080
|
+
"""
|
|
2081
|
+
Determine AI provider name from config.
|
|
2082
|
+
|
|
2083
|
+
Args:
|
|
2084
|
+
ai_config: AI configuration dictionary.
|
|
2085
|
+
|
|
2086
|
+
Returns:
|
|
2087
|
+
Provider string.
|
|
2088
|
+
|
|
2089
|
+
"""
|
|
2090
|
+
provider_mapping = {
|
|
2091
|
+
ANTHROPIC_CLAUDE_DISPLAY: "anthropic",
|
|
2092
|
+
ANTHROPIC_PROVIDER: "anthropic",
|
|
2093
|
+
"OpenAI": "openai",
|
|
2094
|
+
OPENAI_PROVIDER: "openai",
|
|
2095
|
+
IBM_WATSONX: "watson",
|
|
2096
|
+
RED_HAT_LIGHTSPEED: "lightspeed",
|
|
2097
|
+
}
|
|
2098
|
+
provider_name_raw = ai_config.get("provider", "")
|
|
2099
|
+
provider_name = str(provider_name_raw) if provider_name_raw else ""
|
|
2100
|
+
return provider_mapping.get(
|
|
2101
|
+
provider_name,
|
|
2102
|
+
provider_name.lower().replace(" ", "_") if provider_name else "anthropic",
|
|
2103
|
+
)
|
|
2104
|
+
|
|
2105
|
+
|
|
2106
|
+
def _run_rule_based_analysis(cookbook_dir: Path) -> dict:
|
|
2107
|
+
"""
|
|
2108
|
+
Run rule-based cookbook analysis.
|
|
2109
|
+
|
|
2110
|
+
Args:
|
|
2111
|
+
cookbook_dir: Directory containing the cookbook.
|
|
2112
|
+
|
|
2113
|
+
Returns:
|
|
2114
|
+
Assessment dictionary.
|
|
2115
|
+
|
|
2116
|
+
"""
|
|
2117
|
+
from souschef.assessment import parse_chef_migration_assessment
|
|
2118
|
+
|
|
2119
|
+
assessment = parse_chef_migration_assessment(str(cookbook_dir))
|
|
2120
|
+
|
|
2121
|
+
# Extract single cookbook assessment if multi-cookbook structure returned
|
|
2122
|
+
if "cookbook_assessments" in assessment and assessment["cookbook_assessments"]:
|
|
2123
|
+
cookbook_assessment = assessment["cookbook_assessments"][0]
|
|
2124
|
+
return {
|
|
2125
|
+
"complexity": assessment.get("complexity", "Unknown"),
|
|
2126
|
+
"estimated_hours": assessment.get("estimated_hours", 0),
|
|
2127
|
+
"recommendations": _format_recommendations_from_assessment(
|
|
2128
|
+
cookbook_assessment, assessment
|
|
2129
|
+
),
|
|
2130
|
+
}
|
|
971
2131
|
|
|
2132
|
+
return assessment
|
|
972
2133
|
|
|
973
|
-
|
|
2134
|
+
|
|
2135
|
+
def _create_successful_analysis(
|
|
2136
|
+
cookbook_name: str, cookbook_dir: Path, assessment: dict, metadata: dict
|
|
2137
|
+
) -> dict:
|
|
974
2138
|
"""Create analysis result for successful analysis."""
|
|
975
2139
|
return {
|
|
976
2140
|
"name": cookbook_name,
|
|
@@ -986,18 +2150,150 @@ def _create_successful_analysis(cookbook_name, cookbook_dir, assessment, metadat
|
|
|
986
2150
|
}
|
|
987
2151
|
|
|
988
2152
|
|
|
2153
|
+
def _format_recommendations_from_assessment(
|
|
2154
|
+
cookbook_assessment: dict, overall_assessment: dict
|
|
2155
|
+
) -> str:
|
|
2156
|
+
"""Format recommendations from the detailed assessment structure."""
|
|
2157
|
+
recommendations: list[str] = []
|
|
2158
|
+
|
|
2159
|
+
# Add cookbook-specific details
|
|
2160
|
+
_add_complexity_score(recommendations, cookbook_assessment)
|
|
2161
|
+
_add_effort_estimate(recommendations, cookbook_assessment)
|
|
2162
|
+
_add_migration_priority(recommendations, cookbook_assessment)
|
|
2163
|
+
_add_key_findings(recommendations, cookbook_assessment)
|
|
2164
|
+
_add_overall_recommendations(recommendations, overall_assessment)
|
|
2165
|
+
|
|
2166
|
+
return "\n".join(recommendations) if recommendations else "Analysis completed"
|
|
2167
|
+
|
|
2168
|
+
|
|
2169
|
+
def _add_complexity_score(recommendations: list[str], assessment: dict) -> None:
|
|
2170
|
+
"""Add complexity score to recommendations."""
|
|
2171
|
+
if "complexity_score" in assessment:
|
|
2172
|
+
recommendations.append(
|
|
2173
|
+
f"Complexity Score: {assessment['complexity_score']}/100"
|
|
2174
|
+
)
|
|
2175
|
+
|
|
2176
|
+
|
|
2177
|
+
def _add_effort_estimate(recommendations: list[str], assessment: dict) -> None:
|
|
2178
|
+
"""Add effort estimate to recommendations."""
|
|
2179
|
+
if "estimated_effort_days" not in assessment:
|
|
2180
|
+
return
|
|
2181
|
+
|
|
2182
|
+
estimated_days = assessment["estimated_effort_days"]
|
|
2183
|
+
effort_metrics = EffortMetrics(estimated_days)
|
|
2184
|
+
complexity = assessment.get("complexity", "Medium")
|
|
2185
|
+
is_valid, _ = validate_metrics_consistency(
|
|
2186
|
+
days=effort_metrics.estimated_days,
|
|
2187
|
+
weeks=effort_metrics.estimated_weeks_range,
|
|
2188
|
+
hours=effort_metrics.estimated_hours,
|
|
2189
|
+
complexity=complexity,
|
|
2190
|
+
)
|
|
2191
|
+
if is_valid:
|
|
2192
|
+
recommendations.append(
|
|
2193
|
+
f"Estimated Effort: {effort_metrics.estimated_days_formatted}"
|
|
2194
|
+
)
|
|
2195
|
+
else:
|
|
2196
|
+
recommendations.append(f"Estimated Effort: {estimated_days} days")
|
|
2197
|
+
|
|
2198
|
+
|
|
2199
|
+
def _add_migration_priority(recommendations: list[str], assessment: dict) -> None:
|
|
2200
|
+
"""Add migration priority to recommendations."""
|
|
2201
|
+
if "migration_priority" in assessment:
|
|
2202
|
+
recommendations.append(
|
|
2203
|
+
f"Migration Priority: {assessment['migration_priority']}"
|
|
2204
|
+
)
|
|
2205
|
+
|
|
2206
|
+
|
|
2207
|
+
def _add_key_findings(recommendations: list[str], assessment: dict) -> None:
|
|
2208
|
+
"""Add key findings to recommendations."""
|
|
2209
|
+
if not assessment.get("key_findings"):
|
|
2210
|
+
return
|
|
2211
|
+
|
|
2212
|
+
recommendations.append("\nKey Findings:")
|
|
2213
|
+
for finding in assessment["key_findings"]:
|
|
2214
|
+
recommendations.append(f" - {finding}")
|
|
2215
|
+
|
|
2216
|
+
|
|
2217
|
+
def _add_overall_recommendations(
|
|
2218
|
+
recommendations: list[str], overall_assessment: dict
|
|
2219
|
+
) -> None:
|
|
2220
|
+
"""Add overall recommendations to recommendations."""
|
|
2221
|
+
rec_data = overall_assessment.get("recommendations")
|
|
2222
|
+
if not rec_data:
|
|
2223
|
+
return
|
|
2224
|
+
|
|
2225
|
+
recommendations.append("\nRecommendations:")
|
|
2226
|
+
if isinstance(rec_data, list):
|
|
2227
|
+
for rec in rec_data:
|
|
2228
|
+
if isinstance(rec, dict) and "recommendation" in rec:
|
|
2229
|
+
recommendations.append(f" - {rec['recommendation']}")
|
|
2230
|
+
elif isinstance(rec, str):
|
|
2231
|
+
recommendations.append(f" - {rec}")
|
|
2232
|
+
elif isinstance(rec_data, str):
|
|
2233
|
+
recommendations.append(f" - {rec_data}")
|
|
2234
|
+
|
|
2235
|
+
|
|
2236
|
+
def _get_error_context(cookbook_dir: Path) -> str:
|
|
2237
|
+
"""Get context information about why analysis might have failed."""
|
|
2238
|
+
context_parts = []
|
|
2239
|
+
|
|
2240
|
+
# Check basic structure
|
|
2241
|
+
validation = _validate_cookbook_structure(cookbook_dir)
|
|
2242
|
+
|
|
2243
|
+
missing_items = [check for check, valid in validation.items() if not valid]
|
|
2244
|
+
if missing_items:
|
|
2245
|
+
context_parts.append(f"Missing: {', '.join(missing_items)}")
|
|
2246
|
+
|
|
2247
|
+
# Check if metadata parsing failed
|
|
2248
|
+
metadata_file = cookbook_dir / METADATA_FILENAME
|
|
2249
|
+
if metadata_file.exists():
|
|
2250
|
+
try:
|
|
2251
|
+
parse_cookbook_metadata(str(metadata_file))
|
|
2252
|
+
context_parts.append("metadata.rb exists and parses successfully")
|
|
2253
|
+
except Exception as e:
|
|
2254
|
+
context_parts.append(f"metadata.rb parsing error: {str(e)[:100]}")
|
|
2255
|
+
|
|
2256
|
+
# Check AI configuration if using AI
|
|
2257
|
+
ai_config = load_ai_settings()
|
|
2258
|
+
use_ai = (
|
|
2259
|
+
ai_config.get("provider")
|
|
2260
|
+
and ai_config.get("provider") != LOCAL_PROVIDER
|
|
2261
|
+
and ai_config.get("api_key")
|
|
2262
|
+
)
|
|
2263
|
+
|
|
2264
|
+
if use_ai:
|
|
2265
|
+
context_parts.append(
|
|
2266
|
+
f"Using AI analysis with {ai_config.get('provider', 'Unknown')}"
|
|
2267
|
+
)
|
|
2268
|
+
if not ai_config.get("api_key"):
|
|
2269
|
+
context_parts.append("AI configured but no API key provided")
|
|
2270
|
+
else:
|
|
2271
|
+
context_parts.append("Using rule-based analysis (AI not configured)")
|
|
2272
|
+
|
|
2273
|
+
return (
|
|
2274
|
+
"; ".join(context_parts) if context_parts else "No additional context available"
|
|
2275
|
+
)
|
|
2276
|
+
|
|
2277
|
+
|
|
989
2278
|
def _create_failed_analysis(cookbook_name, cookbook_dir, error_message):
|
|
990
2279
|
"""Create analysis result for failed analysis."""
|
|
2280
|
+
# Add context to the error message
|
|
2281
|
+
context_info = _get_error_context(cookbook_dir)
|
|
2282
|
+
full_error = f"{error_message}\n\nContext: {context_info}"
|
|
2283
|
+
|
|
991
2284
|
return {
|
|
992
2285
|
"name": cookbook_name,
|
|
993
2286
|
"path": str(cookbook_dir),
|
|
994
2287
|
"version": "Error",
|
|
995
2288
|
"maintainer": "Error",
|
|
996
|
-
"description":
|
|
2289
|
+
"description": (
|
|
2290
|
+
f"Analysis failed: {error_message[:100]}"
|
|
2291
|
+
f"{'...' if len(error_message) > 100 else ''}"
|
|
2292
|
+
),
|
|
997
2293
|
"dependencies": 0,
|
|
998
2294
|
"complexity": "Error",
|
|
999
2295
|
"estimated_hours": 0,
|
|
1000
|
-
"recommendations":
|
|
2296
|
+
"recommendations": full_error,
|
|
1001
2297
|
"status": ANALYSIS_STATUS_FAILED,
|
|
1002
2298
|
}
|
|
1003
2299
|
|
|
@@ -1008,31 +2304,570 @@ def _cleanup_progress_indicators(progress_bar, status_text):
|
|
|
1008
2304
|
status_text.empty()
|
|
1009
2305
|
|
|
1010
2306
|
|
|
2307
|
+
def analyse_project_cookbooks(cookbook_path: str, selected_cookbooks: list[str]):
|
|
2308
|
+
"""Analyse cookbooks as a project with dependency analysis."""
|
|
2309
|
+
st.subheader("Project-Level Analysis Results")
|
|
2310
|
+
|
|
2311
|
+
progress_bar, status_text = _setup_analysis_progress()
|
|
2312
|
+
results = _perform_cookbook_analysis(
|
|
2313
|
+
cookbook_path, selected_cookbooks, progress_bar, status_text
|
|
2314
|
+
)
|
|
2315
|
+
|
|
2316
|
+
# Perform project-level dependency analysis
|
|
2317
|
+
status_text.text("Analyzing project dependencies...")
|
|
2318
|
+
project_analysis = _analyse_project_dependencies(
|
|
2319
|
+
cookbook_path, selected_cookbooks, results
|
|
2320
|
+
)
|
|
2321
|
+
|
|
2322
|
+
_cleanup_progress_indicators(progress_bar, status_text)
|
|
2323
|
+
|
|
2324
|
+
# Store results in session state
|
|
2325
|
+
st.session_state.analysis_results = results
|
|
2326
|
+
st.session_state.analysis_cookbook_path = cookbook_path
|
|
2327
|
+
st.session_state.total_cookbooks = len(selected_cookbooks)
|
|
2328
|
+
st.session_state.project_analysis = project_analysis
|
|
2329
|
+
|
|
2330
|
+
# Trigger rerun to display results
|
|
2331
|
+
st.rerun()
|
|
2332
|
+
|
|
2333
|
+
|
|
2334
|
+
def _analyse_project_dependencies(
|
|
2335
|
+
cookbook_path: str, selected_cookbooks: list[str], individual_results: list
|
|
2336
|
+
) -> dict:
|
|
2337
|
+
"""Analyze dependencies across all cookbooks in the project."""
|
|
2338
|
+
project_analysis = {
|
|
2339
|
+
"dependency_graph": {},
|
|
2340
|
+
"migration_order": [],
|
|
2341
|
+
"circular_dependencies": [],
|
|
2342
|
+
"project_complexity": "Low",
|
|
2343
|
+
"project_effort_days": 0,
|
|
2344
|
+
"migration_strategy": "phased",
|
|
2345
|
+
"risks": [],
|
|
2346
|
+
"recommendations": [],
|
|
2347
|
+
}
|
|
2348
|
+
|
|
2349
|
+
try:
|
|
2350
|
+
# Build dependency graph
|
|
2351
|
+
dependency_graph = _build_dependency_graph(cookbook_path, selected_cookbooks)
|
|
2352
|
+
project_analysis["dependency_graph"] = dependency_graph
|
|
2353
|
+
|
|
2354
|
+
# Determine migration order using topological sort
|
|
2355
|
+
migration_order = _calculate_migration_order(
|
|
2356
|
+
dependency_graph, individual_results
|
|
2357
|
+
)
|
|
2358
|
+
project_analysis["migration_order"] = migration_order
|
|
2359
|
+
|
|
2360
|
+
# Identify circular dependencies
|
|
2361
|
+
circular_deps = _find_circular_dependencies(dependency_graph)
|
|
2362
|
+
project_analysis["circular_dependencies"] = circular_deps
|
|
2363
|
+
|
|
2364
|
+
# Calculate project-level metrics
|
|
2365
|
+
project_metrics = _calculate_project_metrics(
|
|
2366
|
+
individual_results, dependency_graph
|
|
2367
|
+
)
|
|
2368
|
+
project_analysis.update(project_metrics)
|
|
2369
|
+
|
|
2370
|
+
# Generate project recommendations
|
|
2371
|
+
recommendations = _generate_project_recommendations(
|
|
2372
|
+
project_analysis, individual_results
|
|
2373
|
+
)
|
|
2374
|
+
project_analysis["recommendations"] = recommendations
|
|
2375
|
+
|
|
2376
|
+
except Exception as e:
|
|
2377
|
+
st.warning(f"Project dependency analysis failed: {e}")
|
|
2378
|
+
# Continue with basic analysis
|
|
2379
|
+
|
|
2380
|
+
return project_analysis
|
|
2381
|
+
|
|
2382
|
+
|
|
2383
|
+
def _build_dependency_graph(cookbook_path: str, selected_cookbooks: list[str]) -> dict:
|
|
2384
|
+
"""Build a dependency graph for all cookbooks in the project."""
|
|
2385
|
+
dependency_graph = {}
|
|
2386
|
+
|
|
2387
|
+
for cookbook_name in selected_cookbooks:
|
|
2388
|
+
cookbook_dir = _find_cookbook_directory(cookbook_path, cookbook_name)
|
|
2389
|
+
if cookbook_dir:
|
|
2390
|
+
try:
|
|
2391
|
+
# Use the existing dependency analysis function
|
|
2392
|
+
dep_analysis = analyse_cookbook_dependencies(str(cookbook_dir))
|
|
2393
|
+
# Parse the markdown response to extract dependencies
|
|
2394
|
+
dependencies = _extract_dependencies_from_markdown(dep_analysis)
|
|
2395
|
+
dependency_graph[cookbook_name] = dependencies
|
|
2396
|
+
except Exception:
|
|
2397
|
+
# If dependency analysis fails, assume no dependencies
|
|
2398
|
+
dependency_graph[cookbook_name] = []
|
|
2399
|
+
|
|
2400
|
+
return dependency_graph
|
|
2401
|
+
|
|
2402
|
+
|
|
2403
|
+
def _extract_dependencies_from_markdown(markdown_text: str) -> list[str]:
|
|
2404
|
+
"""Extract dependencies from markdown output of analyse_cookbook_dependencies."""
|
|
2405
|
+
dependencies = []
|
|
2406
|
+
|
|
2407
|
+
# Look for the dependency graph section
|
|
2408
|
+
lines = markdown_text.split("\n")
|
|
2409
|
+
in_graph_section = False
|
|
2410
|
+
|
|
2411
|
+
for line in lines:
|
|
2412
|
+
if "## Dependency Graph:" in line:
|
|
2413
|
+
in_graph_section = True
|
|
2414
|
+
elif in_graph_section and line.startswith("##"):
|
|
2415
|
+
break
|
|
2416
|
+
elif in_graph_section and "├──" in line:
|
|
2417
|
+
# Extract dependency name
|
|
2418
|
+
dep_line = line.strip()
|
|
2419
|
+
if "├──" in dep_line:
|
|
2420
|
+
dep_name = dep_line.split("├──")[-1].strip()
|
|
2421
|
+
if dep_name and dep_name != "External dependencies:":
|
|
2422
|
+
dependencies.append(dep_name)
|
|
2423
|
+
|
|
2424
|
+
return dependencies
|
|
2425
|
+
|
|
2426
|
+
|
|
2427
|
+
def _calculate_migration_order(
|
|
2428
|
+
dependency_graph: dict, individual_results: list
|
|
2429
|
+
) -> list[dict]:
|
|
2430
|
+
"""Calculate optimal migration order using topological sort."""
|
|
2431
|
+
order = _perform_topological_sort(dependency_graph)
|
|
2432
|
+
|
|
2433
|
+
# If topological sort failed due to cycles, fall back to complexity-based ordering
|
|
2434
|
+
if len(order) != len(dependency_graph):
|
|
2435
|
+
order = _fallback_migration_order(individual_results)
|
|
2436
|
+
|
|
2437
|
+
# Convert to detailed order with metadata
|
|
2438
|
+
return _build_detailed_migration_order(order, dependency_graph, individual_results)
|
|
2439
|
+
|
|
2440
|
+
|
|
2441
|
+
def _perform_topological_sort(dependency_graph: dict) -> list[str]:
|
|
2442
|
+
"""Perform topological sort on dependency graph."""
|
|
2443
|
+
visited = set()
|
|
2444
|
+
temp_visited = set()
|
|
2445
|
+
order = []
|
|
2446
|
+
|
|
2447
|
+
def visit(cookbook_name: str) -> bool:
|
|
2448
|
+
if cookbook_name in temp_visited:
|
|
2449
|
+
return False # Circular dependency detected
|
|
2450
|
+
if cookbook_name in visited:
|
|
2451
|
+
return True
|
|
2452
|
+
|
|
2453
|
+
temp_visited.add(cookbook_name)
|
|
2454
|
+
|
|
2455
|
+
# Visit all dependencies first
|
|
2456
|
+
for dep in dependency_graph.get(cookbook_name, []):
|
|
2457
|
+
if dep in dependency_graph and not visit(dep):
|
|
2458
|
+
return False
|
|
2459
|
+
|
|
2460
|
+
temp_visited.remove(cookbook_name)
|
|
2461
|
+
visited.add(cookbook_name)
|
|
2462
|
+
order.append(cookbook_name)
|
|
2463
|
+
return True
|
|
2464
|
+
|
|
2465
|
+
# Visit all cookbooks
|
|
2466
|
+
for cookbook_name in dependency_graph:
|
|
2467
|
+
if cookbook_name not in visited and not visit(cookbook_name):
|
|
2468
|
+
break # Circular dependency detected
|
|
2469
|
+
|
|
2470
|
+
return order
|
|
2471
|
+
|
|
2472
|
+
|
|
2473
|
+
def _build_detailed_migration_order(
|
|
2474
|
+
order: list[str], dependency_graph: dict, individual_results: list
|
|
2475
|
+
) -> list[dict]:
|
|
2476
|
+
"""Build detailed migration order with metadata."""
|
|
2477
|
+
detailed_order = []
|
|
2478
|
+
for i, cookbook_name in enumerate(reversed(order), 1):
|
|
2479
|
+
cookbook_result = next(
|
|
2480
|
+
(r for r in individual_results if r["name"] == cookbook_name), None
|
|
2481
|
+
)
|
|
2482
|
+
if cookbook_result:
|
|
2483
|
+
detailed_order.append(
|
|
2484
|
+
{
|
|
2485
|
+
"phase": i,
|
|
2486
|
+
"cookbook": cookbook_name,
|
|
2487
|
+
"complexity": cookbook_result.get("complexity", "Unknown"),
|
|
2488
|
+
"effort_days": cookbook_result.get("estimated_hours", 0) / 8,
|
|
2489
|
+
"dependencies": dependency_graph.get(cookbook_name, []),
|
|
2490
|
+
"reason": _get_migration_reason(cookbook_name, dependency_graph, i),
|
|
2491
|
+
}
|
|
2492
|
+
)
|
|
2493
|
+
|
|
2494
|
+
return detailed_order
|
|
2495
|
+
|
|
2496
|
+
|
|
2497
|
+
def _fallback_migration_order(individual_results: list) -> list[str]:
|
|
2498
|
+
"""Fallback migration order based on complexity (low to high)."""
|
|
2499
|
+
# Sort by complexity score (ascending) and then by dependencies (fewer first)
|
|
2500
|
+
sorted_results = sorted(
|
|
2501
|
+
individual_results,
|
|
2502
|
+
key=lambda x: (
|
|
2503
|
+
{"Low": 0, "Medium": 1, "High": 2}.get(x.get("complexity", "Medium"), 1),
|
|
2504
|
+
x.get("dependencies", 0),
|
|
2505
|
+
),
|
|
2506
|
+
)
|
|
2507
|
+
return [r["name"] for r in sorted_results]
|
|
2508
|
+
|
|
2509
|
+
|
|
2510
|
+
def _get_migration_reason(
|
|
2511
|
+
cookbook_name: str, dependency_graph: dict, phase: int
|
|
2512
|
+
) -> str:
|
|
2513
|
+
"""Get the reason for migrating a cookbook at this phase."""
|
|
2514
|
+
dependencies = dependency_graph.get(cookbook_name, [])
|
|
2515
|
+
|
|
2516
|
+
if not dependencies:
|
|
2517
|
+
return "No dependencies - can be migrated early"
|
|
2518
|
+
elif phase == 1:
|
|
2519
|
+
return "Foundation cookbook with minimal dependencies"
|
|
2520
|
+
else:
|
|
2521
|
+
dep_names = ", ".join(dependencies[:3]) # Show first 3 dependencies
|
|
2522
|
+
if len(dependencies) > 3:
|
|
2523
|
+
dep_names += f" and {len(dependencies) - 3} more"
|
|
2524
|
+
return f"Depends on: {dep_names}"
|
|
2525
|
+
|
|
2526
|
+
|
|
2527
|
+
def _detect_cycle_dependency(
|
|
2528
|
+
dependency_graph: dict, start: str, current: str, path: list[str]
|
|
2529
|
+
) -> list[str] | None:
|
|
2530
|
+
"""Detect a cycle in the dependency graph starting from current node."""
|
|
2531
|
+
if current in path:
|
|
2532
|
+
# Found a cycle
|
|
2533
|
+
cycle_start = path.index(current)
|
|
2534
|
+
return path[cycle_start:] + [current]
|
|
2535
|
+
|
|
2536
|
+
path.append(current)
|
|
2537
|
+
|
|
2538
|
+
for dep in dependency_graph.get(current, []):
|
|
2539
|
+
if dep in dependency_graph: # Only check cookbooks in our project
|
|
2540
|
+
cycle = _detect_cycle_dependency(dependency_graph, start, dep, path)
|
|
2541
|
+
if cycle:
|
|
2542
|
+
return cycle
|
|
2543
|
+
|
|
2544
|
+
path.pop()
|
|
2545
|
+
return None
|
|
2546
|
+
|
|
2547
|
+
|
|
2548
|
+
def _find_circular_dependencies(dependency_graph: dict) -> list[dict]:
|
|
2549
|
+
"""Find circular dependencies in the dependency graph."""
|
|
2550
|
+
circular_deps = []
|
|
2551
|
+
visited = set()
|
|
2552
|
+
|
|
2553
|
+
for cookbook in dependency_graph:
|
|
2554
|
+
if cookbook not in visited:
|
|
2555
|
+
cycle = _detect_cycle_dependency(dependency_graph, cookbook, cookbook, [])
|
|
2556
|
+
if cycle:
|
|
2557
|
+
circular_deps.append(
|
|
2558
|
+
{
|
|
2559
|
+
"cookbooks": cycle,
|
|
2560
|
+
"type": "circular_dependency",
|
|
2561
|
+
"severity": "high",
|
|
2562
|
+
}
|
|
2563
|
+
)
|
|
2564
|
+
# Mark all cycle members as visited to avoid duplicate detection
|
|
2565
|
+
visited.update(cycle)
|
|
2566
|
+
|
|
2567
|
+
return circular_deps
|
|
2568
|
+
|
|
2569
|
+
|
|
2570
|
+
def _calculate_project_metrics(
|
|
2571
|
+
individual_results: list, dependency_graph: dict
|
|
2572
|
+
) -> dict:
|
|
2573
|
+
"""Calculate project-level complexity and effort metrics."""
|
|
2574
|
+
total_effort = sum(
|
|
2575
|
+
r.get("estimated_hours", 0) / 8 for r in individual_results
|
|
2576
|
+
) # Convert hours to days
|
|
2577
|
+
avg_complexity = (
|
|
2578
|
+
sum(
|
|
2579
|
+
{"Low": 30, "Medium": 50, "High": 80}.get(r.get("complexity", "Medium"), 50)
|
|
2580
|
+
for r in individual_results
|
|
2581
|
+
)
|
|
2582
|
+
/ len(individual_results)
|
|
2583
|
+
if individual_results
|
|
2584
|
+
else 50
|
|
2585
|
+
)
|
|
2586
|
+
|
|
2587
|
+
# Determine project complexity
|
|
2588
|
+
if avg_complexity > 70:
|
|
2589
|
+
project_complexity = "High"
|
|
2590
|
+
elif avg_complexity > 40:
|
|
2591
|
+
project_complexity = "Medium"
|
|
2592
|
+
else:
|
|
2593
|
+
project_complexity = "Low"
|
|
2594
|
+
|
|
2595
|
+
# Determine migration strategy based on dependencies and complexity
|
|
2596
|
+
total_dependencies = sum(len(deps) for deps in dependency_graph.values())
|
|
2597
|
+
has_circular_deps = any(
|
|
2598
|
+
len(dependency_graph.get(cb, [])) > 0 for cb in dependency_graph
|
|
2599
|
+
)
|
|
2600
|
+
|
|
2601
|
+
if project_complexity == "High" or total_dependencies > len(individual_results) * 2:
|
|
2602
|
+
migration_strategy = "phased"
|
|
2603
|
+
elif has_circular_deps:
|
|
2604
|
+
migration_strategy = "parallel"
|
|
2605
|
+
else:
|
|
2606
|
+
migration_strategy = "big_bang"
|
|
2607
|
+
|
|
2608
|
+
# Calculate parallel tracks if needed
|
|
2609
|
+
parallel_tracks = 1
|
|
2610
|
+
if migration_strategy == "parallel":
|
|
2611
|
+
parallel_tracks = min(3, max(2, len(individual_results) // 5))
|
|
2612
|
+
|
|
2613
|
+
# Calculate calendar timeline based on strategy
|
|
2614
|
+
# This applies strategy multipliers (phased +10%, big_bang -10%, parallel +5%)
|
|
2615
|
+
timeline_weeks = get_timeline_weeks(total_effort, strategy=migration_strategy)
|
|
2616
|
+
|
|
2617
|
+
return {
|
|
2618
|
+
"project_complexity": project_complexity,
|
|
2619
|
+
"project_effort_days": round(total_effort, 1),
|
|
2620
|
+
"project_timeline_weeks": timeline_weeks,
|
|
2621
|
+
"migration_strategy": migration_strategy,
|
|
2622
|
+
"parallel_tracks": parallel_tracks,
|
|
2623
|
+
"total_dependencies": total_dependencies,
|
|
2624
|
+
"dependency_density": round(total_dependencies / len(individual_results), 2)
|
|
2625
|
+
if individual_results
|
|
2626
|
+
else 0,
|
|
2627
|
+
}
|
|
2628
|
+
|
|
2629
|
+
|
|
2630
|
+
def _generate_project_recommendations(
|
|
2631
|
+
project_analysis: dict, individual_results: list
|
|
2632
|
+
) -> list[str]:
|
|
2633
|
+
"""Generate project-level recommendations."""
|
|
2634
|
+
recommendations = []
|
|
2635
|
+
|
|
2636
|
+
strategy = project_analysis.get("migration_strategy", "phased")
|
|
2637
|
+
complexity = project_analysis.get("project_complexity", "Medium")
|
|
2638
|
+
effort_days = project_analysis.get("project_effort_days", 0)
|
|
2639
|
+
circular_deps = project_analysis.get("circular_dependencies", [])
|
|
2640
|
+
|
|
2641
|
+
# Strategy recommendations
|
|
2642
|
+
if strategy == "phased":
|
|
2643
|
+
recommendations.append(
|
|
2644
|
+
"• Use phased migration approach due to project complexity and dependencies"
|
|
2645
|
+
)
|
|
2646
|
+
recommendations.append(
|
|
2647
|
+
"• Start with foundation cookbooks (minimal dependencies) in Phase 1"
|
|
2648
|
+
)
|
|
2649
|
+
recommendations.append("• Migrate dependent cookbooks in subsequent phases")
|
|
2650
|
+
elif strategy == "parallel":
|
|
2651
|
+
tracks = project_analysis.get("parallel_tracks", 2)
|
|
2652
|
+
recommendations.append(
|
|
2653
|
+
f"• Use parallel migration with {tracks} tracks to handle complexity"
|
|
2654
|
+
)
|
|
2655
|
+
recommendations.append("• Assign dedicated teams to each migration track")
|
|
2656
|
+
else:
|
|
2657
|
+
recommendations.append("• Big-bang migration suitable for this project scope")
|
|
2658
|
+
|
|
2659
|
+
# Complexity-based recommendations
|
|
2660
|
+
if complexity == "High":
|
|
2661
|
+
recommendations.append(
|
|
2662
|
+
"• Allocate senior Ansible engineers for complex cookbook conversions"
|
|
2663
|
+
)
|
|
2664
|
+
recommendations.append("• Plan for extensive testing and validation phases")
|
|
2665
|
+
elif complexity == "Medium":
|
|
2666
|
+
recommendations.append(
|
|
2667
|
+
"• Standard engineering team with Ansible experience sufficient"
|
|
2668
|
+
)
|
|
2669
|
+
recommendations.append("• Include peer reviews for quality assurance")
|
|
2670
|
+
|
|
2671
|
+
# Effort-based recommendations
|
|
2672
|
+
if effort_days > 30:
|
|
2673
|
+
recommendations.append("• Consider extending timeline to reduce team pressure")
|
|
2674
|
+
recommendations.append(
|
|
2675
|
+
"• Break migration into 2-week sprints with deliverables"
|
|
2676
|
+
)
|
|
2677
|
+
else:
|
|
2678
|
+
recommendations.append("• Timeline suitable for focused migration effort")
|
|
2679
|
+
|
|
2680
|
+
# Dependency recommendations
|
|
2681
|
+
dependency_density = project_analysis.get("dependency_density", 0)
|
|
2682
|
+
if dependency_density > 2:
|
|
2683
|
+
recommendations.append(
|
|
2684
|
+
"• High dependency density - prioritize dependency resolution"
|
|
2685
|
+
)
|
|
2686
|
+
recommendations.append("• Create shared Ansible roles for common dependencies")
|
|
2687
|
+
|
|
2688
|
+
# Circular dependency warnings
|
|
2689
|
+
if circular_deps:
|
|
2690
|
+
recommendations.append(
|
|
2691
|
+
f"• {len(circular_deps)} circular dependency groups detected"
|
|
2692
|
+
)
|
|
2693
|
+
recommendations.append(
|
|
2694
|
+
"• Resolve circular dependencies before migration begins"
|
|
2695
|
+
)
|
|
2696
|
+
recommendations.append("• Consider refactoring interdependent cookbooks")
|
|
2697
|
+
|
|
2698
|
+
# Team and resource recommendations
|
|
2699
|
+
total_cookbooks = len(individual_results)
|
|
2700
|
+
if total_cookbooks > 10:
|
|
2701
|
+
recommendations.append(
|
|
2702
|
+
"• Large project scope - consider dedicated migration team"
|
|
2703
|
+
)
|
|
2704
|
+
else:
|
|
2705
|
+
recommendations.append("• Project size manageable with existing team capacity")
|
|
2706
|
+
|
|
2707
|
+
return recommendations
|
|
2708
|
+
|
|
2709
|
+
|
|
1011
2710
|
def _display_analysis_results(results, total_cookbooks):
|
|
1012
2711
|
"""Display the complete analysis results."""
|
|
2712
|
+
# Display stored analysis info messages if available
|
|
2713
|
+
if "analysis_info_messages" in st.session_state:
|
|
2714
|
+
for message in st.session_state.analysis_info_messages:
|
|
2715
|
+
st.info(message)
|
|
2716
|
+
st.success(
|
|
2717
|
+
f"✓ Analysis completed! Analysed {len(results)} cookbook(s) with "
|
|
2718
|
+
f"detailed AI insights."
|
|
2719
|
+
)
|
|
2720
|
+
|
|
1013
2721
|
# Add a back button to return to analysis selection
|
|
1014
|
-
col1,
|
|
2722
|
+
col1, _ = st.columns([1, 4])
|
|
1015
2723
|
with col1:
|
|
1016
|
-
if st.button(
|
|
2724
|
+
if st.button(
|
|
2725
|
+
"Analyse More Cookbooks",
|
|
2726
|
+
help="Return to cookbook selection",
|
|
2727
|
+
key="analyse_more",
|
|
2728
|
+
):
|
|
1017
2729
|
# Clear session state to go back to selection
|
|
1018
2730
|
st.session_state.analysis_results = None
|
|
1019
2731
|
st.session_state.analysis_cookbook_path = None
|
|
1020
2732
|
st.session_state.total_cookbooks = 0
|
|
2733
|
+
st.session_state.project_analysis = None
|
|
1021
2734
|
# Clean up temporary directory when going back
|
|
1022
2735
|
if st.session_state.temp_dir and st.session_state.temp_dir.exists():
|
|
1023
2736
|
shutil.rmtree(st.session_state.temp_dir, ignore_errors=True)
|
|
1024
2737
|
st.session_state.temp_dir = None
|
|
1025
2738
|
st.rerun()
|
|
1026
2739
|
|
|
1027
|
-
|
|
1028
|
-
st.subheader("Analysis Results")
|
|
2740
|
+
st.subheader("Analysis Results")
|
|
1029
2741
|
|
|
1030
2742
|
_display_analysis_summary(results, total_cookbooks)
|
|
2743
|
+
|
|
2744
|
+
# Display project-level analysis if available
|
|
2745
|
+
if "project_analysis" in st.session_state and st.session_state.project_analysis:
|
|
2746
|
+
_display_project_analysis(st.session_state.project_analysis)
|
|
2747
|
+
|
|
1031
2748
|
_display_results_table(results)
|
|
1032
2749
|
_display_detailed_analysis(results)
|
|
1033
2750
|
_display_download_option(results)
|
|
1034
2751
|
|
|
1035
2752
|
|
|
2753
|
+
def _display_project_analysis(project_analysis: dict):
|
|
2754
|
+
"""Display project-level analysis results."""
|
|
2755
|
+
st.subheader("Project-Level Analysis")
|
|
2756
|
+
|
|
2757
|
+
# Project metrics
|
|
2758
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
2759
|
+
|
|
2760
|
+
with col1:
|
|
2761
|
+
st.metric(
|
|
2762
|
+
"Project Complexity", project_analysis.get("project_complexity", "Unknown")
|
|
2763
|
+
)
|
|
2764
|
+
|
|
2765
|
+
with col2:
|
|
2766
|
+
effort_days = project_analysis.get("project_effort_days", 0)
|
|
2767
|
+
timeline_weeks = project_analysis.get("project_timeline_weeks", 2)
|
|
2768
|
+
effort_hours = effort_days * 8
|
|
2769
|
+
st.metric(
|
|
2770
|
+
"Total Effort",
|
|
2771
|
+
f"{effort_hours:.0f} hours ({timeline_weeks} weeks calendar time)",
|
|
2772
|
+
)
|
|
2773
|
+
|
|
2774
|
+
with col3:
|
|
2775
|
+
strategy = (
|
|
2776
|
+
project_analysis.get("migration_strategy", "phased")
|
|
2777
|
+
.replace("_", " ")
|
|
2778
|
+
.title()
|
|
2779
|
+
)
|
|
2780
|
+
st.metric("Migration Strategy", strategy)
|
|
2781
|
+
|
|
2782
|
+
with col4:
|
|
2783
|
+
dependencies = project_analysis.get("total_dependencies", 0)
|
|
2784
|
+
st.metric("Total Dependencies", dependencies)
|
|
2785
|
+
|
|
2786
|
+
# Migration order
|
|
2787
|
+
if project_analysis.get("migration_order"):
|
|
2788
|
+
st.subheader("Recommended Migration Order")
|
|
2789
|
+
|
|
2790
|
+
migration_df = pd.DataFrame(project_analysis["migration_order"])
|
|
2791
|
+
migration_df = migration_df.rename(
|
|
2792
|
+
columns={
|
|
2793
|
+
"phase": "Phase",
|
|
2794
|
+
"cookbook": "Cookbook",
|
|
2795
|
+
"complexity": "Complexity",
|
|
2796
|
+
"effort_days": "Effort (Days)",
|
|
2797
|
+
"dependencies": "Dependencies",
|
|
2798
|
+
"reason": "Migration Reason",
|
|
2799
|
+
}
|
|
2800
|
+
)
|
|
2801
|
+
|
|
2802
|
+
st.dataframe(migration_df, width="stretch")
|
|
2803
|
+
|
|
2804
|
+
# Dependency graph visualization
|
|
2805
|
+
if project_analysis.get("dependency_graph"):
|
|
2806
|
+
with st.expander("Dependency Graph"):
|
|
2807
|
+
_display_dependency_graph(project_analysis["dependency_graph"])
|
|
2808
|
+
|
|
2809
|
+
# Circular dependencies warning
|
|
2810
|
+
if project_analysis.get("circular_dependencies"):
|
|
2811
|
+
st.warning("Circular Dependencies Detected")
|
|
2812
|
+
for circ in project_analysis["circular_dependencies"]:
|
|
2813
|
+
cookbooks = " → ".join(circ["cookbooks"])
|
|
2814
|
+
st.write(f"**Cycle:** {cookbooks}")
|
|
2815
|
+
|
|
2816
|
+
# Effort explanation
|
|
2817
|
+
with st.expander("Effort vs Timeline"):
|
|
2818
|
+
effort_days = project_analysis.get("project_effort_days", 0)
|
|
2819
|
+
effort_hours = effort_days * 8
|
|
2820
|
+
timeline_weeks = project_analysis.get("project_timeline_weeks", 2)
|
|
2821
|
+
strategy = (
|
|
2822
|
+
project_analysis.get("migration_strategy", "phased")
|
|
2823
|
+
.replace("_", " ")
|
|
2824
|
+
.title()
|
|
2825
|
+
)
|
|
2826
|
+
explanation = (
|
|
2827
|
+
f"**Effort**: {effort_hours:.0f} hours ({effort_days:.1f} person-days) "
|
|
2828
|
+
f"of actual work\n\n"
|
|
2829
|
+
f"**Calendar Timeline**: {timeline_weeks} weeks\n\n"
|
|
2830
|
+
f"**Strategy**: {strategy}\n\n"
|
|
2831
|
+
f"The difference between effort and timeline accounts for:\n"
|
|
2832
|
+
f"• Phased approach adds ~10% overhead for testing between phases\n"
|
|
2833
|
+
f"• Parallel execution allows some tasks to overlap\n"
|
|
2834
|
+
f"• Dependency constraints may extend the critical path\n"
|
|
2835
|
+
f"• Team coordination and integration time"
|
|
2836
|
+
)
|
|
2837
|
+
st.write(explanation)
|
|
2838
|
+
|
|
2839
|
+
# Project recommendations
|
|
2840
|
+
if project_analysis.get("recommendations"):
|
|
2841
|
+
with st.expander("Project Recommendations"):
|
|
2842
|
+
for rec in project_analysis["recommendations"]:
|
|
2843
|
+
st.write(rec)
|
|
2844
|
+
|
|
2845
|
+
|
|
2846
|
+
def _display_dependency_graph(dependency_graph: dict):
|
|
2847
|
+
"""Display a visual representation of the dependency graph."""
|
|
2848
|
+
st.write("**Cookbook Dependencies:**")
|
|
2849
|
+
|
|
2850
|
+
for cookbook, deps in dependency_graph.items():
|
|
2851
|
+
if deps:
|
|
2852
|
+
deps_str = ", ".join(deps)
|
|
2853
|
+
st.write(f"• **{cookbook}** depends on: {deps_str}")
|
|
2854
|
+
else:
|
|
2855
|
+
st.write(f"• **{cookbook}** (no dependencies)")
|
|
2856
|
+
|
|
2857
|
+
# Show dependency statistics
|
|
2858
|
+
total_deps = sum(len(deps) for deps in dependency_graph.values())
|
|
2859
|
+
cookbooks_with_deps = sum(1 for deps in dependency_graph.values() if deps)
|
|
2860
|
+
isolated_cookbooks = len(dependency_graph) - cookbooks_with_deps
|
|
2861
|
+
|
|
2862
|
+
st.write(f"""
|
|
2863
|
+
**Dependency Statistics:**
|
|
2864
|
+
- Total dependencies: {total_deps}
|
|
2865
|
+
- Cookbooks with dependencies: {cookbooks_with_deps}
|
|
2866
|
+
- Independent cookbooks: {isolated_cookbooks}
|
|
2867
|
+
- Average dependencies per cookbook: {total_deps / len(dependency_graph):.1f}
|
|
2868
|
+
""")
|
|
2869
|
+
|
|
2870
|
+
|
|
1036
2871
|
def _display_download_option(results):
|
|
1037
2872
|
"""Display download options for analysis results."""
|
|
1038
2873
|
st.subheader("Download Options")
|
|
@@ -1062,6 +2897,7 @@ def _display_download_option(results):
|
|
|
1062
2897
|
"Convert to Ansible Playbooks",
|
|
1063
2898
|
type="primary",
|
|
1064
2899
|
help="Convert analysed cookbooks to Ansible playbooks and download as ZIP",
|
|
2900
|
+
key="convert_to_ansible_playbooks",
|
|
1065
2901
|
):
|
|
1066
2902
|
# Check AI configuration status
|
|
1067
2903
|
ai_config = load_ai_settings()
|
|
@@ -1074,10 +2910,10 @@ def _display_download_option(results):
|
|
|
1074
2910
|
if ai_available:
|
|
1075
2911
|
provider = ai_config.get("provider", "Unknown")
|
|
1076
2912
|
model = ai_config.get("model", "Unknown")
|
|
1077
|
-
st.info(f"
|
|
2913
|
+
st.info(f"Using AI-enhanced conversion with {provider} ({model})")
|
|
1078
2914
|
else:
|
|
1079
2915
|
st.info(
|
|
1080
|
-
"
|
|
2916
|
+
"Using deterministic conversion. Configure AI settings "
|
|
1081
2917
|
"for enhanced results."
|
|
1082
2918
|
)
|
|
1083
2919
|
|
|
@@ -1114,26 +2950,127 @@ def _display_detailed_analysis(results):
|
|
|
1114
2950
|
"""Display detailed analysis for each cookbook."""
|
|
1115
2951
|
st.subheader("Detailed Analysis")
|
|
1116
2952
|
|
|
1117
|
-
for
|
|
1118
|
-
|
|
2953
|
+
successful_results = [r for r in results if r["status"] == ANALYSIS_STATUS_ANALYSED]
|
|
2954
|
+
failed_results = [r for r in results if r["status"] == ANALYSIS_STATUS_FAILED]
|
|
2955
|
+
|
|
2956
|
+
if successful_results:
|
|
2957
|
+
st.markdown("### Successfully Analysed Cookbooks")
|
|
2958
|
+
for result in successful_results:
|
|
1119
2959
|
_display_single_cookbook_details(result)
|
|
1120
2960
|
|
|
2961
|
+
if failed_results:
|
|
2962
|
+
st.markdown("### Failed Analysis Cookbooks")
|
|
2963
|
+
for result in failed_results:
|
|
2964
|
+
_display_failed_cookbook_details(result)
|
|
1121
2965
|
|
|
1122
|
-
def _display_single_cookbook_details(result):
|
|
1123
|
-
"""Display detailed analysis for a single cookbook."""
|
|
1124
|
-
with st.expander(f"{result['name']} - {result['complexity']} Complexity"):
|
|
1125
|
-
col1, col2 = st.columns(2)
|
|
1126
2966
|
|
|
2967
|
+
def _validate_cookbook_structure(cookbook_dir: Path) -> dict:
|
|
2968
|
+
"""Validate the basic structure of a cookbook for analysis."""
|
|
2969
|
+
validation = {}
|
|
2970
|
+
|
|
2971
|
+
# Check if directory exists
|
|
2972
|
+
validation["Cookbook directory exists"] = (
|
|
2973
|
+
cookbook_dir.exists() and cookbook_dir.is_dir()
|
|
2974
|
+
)
|
|
2975
|
+
|
|
2976
|
+
if not validation["Cookbook directory exists"]:
|
|
2977
|
+
return validation
|
|
2978
|
+
|
|
2979
|
+
# Check metadata.rb
|
|
2980
|
+
metadata_file = cookbook_dir / METADATA_FILENAME
|
|
2981
|
+
validation["metadata.rb exists"] = metadata_file.exists()
|
|
2982
|
+
|
|
2983
|
+
# Check recipes directory
|
|
2984
|
+
recipes_dir = cookbook_dir / "recipes"
|
|
2985
|
+
validation["recipes/ directory exists"] = (
|
|
2986
|
+
recipes_dir.exists() and recipes_dir.is_dir()
|
|
2987
|
+
)
|
|
2988
|
+
|
|
2989
|
+
if validation["recipes/ directory exists"]:
|
|
2990
|
+
recipe_files = list(recipes_dir.glob("*.rb"))
|
|
2991
|
+
validation["Has recipe files"] = len(recipe_files) > 0
|
|
2992
|
+
validation["Has default.rb recipe"] = (recipes_dir / "default.rb").exists()
|
|
2993
|
+
else:
|
|
2994
|
+
validation["Has recipe files"] = False
|
|
2995
|
+
validation["Has default.rb recipe"] = False
|
|
2996
|
+
|
|
2997
|
+
# Check for common cookbook directories
|
|
2998
|
+
common_dirs = ["attributes", "templates", "files", "libraries", "definitions"]
|
|
2999
|
+
for dir_name in common_dirs:
|
|
3000
|
+
dir_path = cookbook_dir / dir_name
|
|
3001
|
+
validation[f"{dir_name}/ directory exists"] = (
|
|
3002
|
+
dir_path.exists() and dir_path.is_dir()
|
|
3003
|
+
)
|
|
3004
|
+
|
|
3005
|
+
return validation
|
|
3006
|
+
|
|
3007
|
+
|
|
3008
|
+
def _display_single_cookbook_details(result):
|
|
3009
|
+
"""Display detailed information for a successfully analysed cookbook."""
|
|
3010
|
+
with st.expander(f"{result['name']} - Analysis Complete", expanded=True):
|
|
3011
|
+
# Basic information
|
|
3012
|
+
col1, col2, col3 = st.columns(3)
|
|
1127
3013
|
with col1:
|
|
1128
|
-
st.
|
|
1129
|
-
|
|
1130
|
-
st.
|
|
3014
|
+
st.metric("Version", result.get("version", "Unknown"))
|
|
3015
|
+
with col2:
|
|
3016
|
+
st.metric("Maintainer", result.get("maintainer", "Unknown"))
|
|
3017
|
+
with col3:
|
|
3018
|
+
st.metric("Dependencies", result.get("dependencies", 0))
|
|
1131
3019
|
|
|
3020
|
+
# Complexity and effort
|
|
3021
|
+
col1, col2 = st.columns(2)
|
|
3022
|
+
with col1:
|
|
3023
|
+
complexity = result.get("complexity", "Unknown")
|
|
3024
|
+
if complexity == "High":
|
|
3025
|
+
st.metric("Complexity", complexity, delta="High")
|
|
3026
|
+
elif complexity == "Medium":
|
|
3027
|
+
st.metric("Complexity", complexity, delta="Medium")
|
|
3028
|
+
else:
|
|
3029
|
+
st.metric("Complexity", complexity, delta="Low")
|
|
1132
3030
|
with col2:
|
|
1133
|
-
|
|
1134
|
-
st.
|
|
3031
|
+
hours = result.get("estimated_hours", 0)
|
|
3032
|
+
st.metric("Estimated Hours", f"{hours:.1f}")
|
|
3033
|
+
|
|
3034
|
+
# Path
|
|
3035
|
+
st.write(f"**Cookbook Path:** {result['path']}")
|
|
3036
|
+
|
|
3037
|
+
# Recommendations
|
|
3038
|
+
if result.get("recommendations"):
|
|
3039
|
+
st.markdown("**Analysis Recommendations:**")
|
|
3040
|
+
st.info(result["recommendations"])
|
|
3041
|
+
|
|
3042
|
+
|
|
3043
|
+
def _display_failed_cookbook_details(result):
|
|
3044
|
+
"""Display detailed failure information for a cookbook."""
|
|
3045
|
+
with st.expander(f"{result['name']} - Analysis Failed", expanded=True):
|
|
3046
|
+
st.error(f"**Analysis Error:** {result['recommendations']}")
|
|
3047
|
+
|
|
3048
|
+
# Show cookbook path
|
|
3049
|
+
st.write(f"**Cookbook Path:** {result['path']}")
|
|
3050
|
+
|
|
3051
|
+
# Try to show some basic validation info
|
|
3052
|
+
cookbook_dir = Path(result["path"])
|
|
3053
|
+
validation_info = _validate_cookbook_structure(cookbook_dir)
|
|
3054
|
+
|
|
3055
|
+
if validation_info:
|
|
3056
|
+
st.markdown("**Cookbook Structure Validation:**")
|
|
3057
|
+
for check, status in validation_info.items():
|
|
3058
|
+
icon = "✓" if status else "✗"
|
|
3059
|
+
st.write(f"{icon} {check}")
|
|
3060
|
+
|
|
3061
|
+
# Suggest fixes
|
|
3062
|
+
st.markdown("**Suggested Fixes:**")
|
|
3063
|
+
st.markdown("""
|
|
3064
|
+
- Check if `metadata.rb` exists and is valid Ruby syntax
|
|
3065
|
+
- Ensure `recipes/` directory exists with at least one `.rb` file
|
|
3066
|
+
- Verify cookbook dependencies are properly declared
|
|
3067
|
+
- Check for syntax errors in recipe files
|
|
3068
|
+
- Ensure the cookbook follows standard Chef structure
|
|
3069
|
+
""")
|
|
1135
3070
|
|
|
1136
|
-
|
|
3071
|
+
# Show raw error details in a collapsible section
|
|
3072
|
+
with st.expander("Technical Error Details"):
|
|
3073
|
+
st.code(result["recommendations"], language="text")
|
|
1137
3074
|
|
|
1138
3075
|
|
|
1139
3076
|
def _convert_and_download_playbooks(results):
|
|
@@ -1144,21 +3081,42 @@ def _convert_and_download_playbooks(results):
|
|
|
1144
3081
|
st.warning("No successfully analysed cookbooks to convert.")
|
|
1145
3082
|
return
|
|
1146
3083
|
|
|
3084
|
+
# Get project recommendations from session state
|
|
3085
|
+
project_recommendations = None
|
|
3086
|
+
if "project_analysis" in st.session_state and st.session_state.project_analysis:
|
|
3087
|
+
project_recommendations = st.session_state.project_analysis
|
|
3088
|
+
|
|
1147
3089
|
with st.spinner("Converting cookbooks to Ansible playbooks..."):
|
|
1148
3090
|
playbooks = []
|
|
3091
|
+
templates = []
|
|
1149
3092
|
|
|
1150
3093
|
for result in successful_results:
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
3094
|
+
# _convert_single_cookbook now returns tuple of (playbooks, templates)
|
|
3095
|
+
cookbook_playbooks, cookbook_templates = _convert_single_cookbook(
|
|
3096
|
+
result, project_recommendations
|
|
3097
|
+
)
|
|
3098
|
+
if cookbook_playbooks:
|
|
3099
|
+
playbooks.extend(cookbook_playbooks)
|
|
3100
|
+
if cookbook_templates:
|
|
3101
|
+
templates.extend(cookbook_templates)
|
|
3102
|
+
|
|
3103
|
+
st.info(
|
|
3104
|
+
f"Total: {len(playbooks)} playbook(s) and {len(templates)} "
|
|
3105
|
+
f"template(s) ready for download"
|
|
3106
|
+
)
|
|
1154
3107
|
|
|
1155
3108
|
if playbooks:
|
|
1156
3109
|
# Save converted playbooks to temporary directory for validation
|
|
1157
3110
|
try:
|
|
1158
3111
|
output_dir = Path(tempfile.mkdtemp(prefix="souschef_converted_"))
|
|
1159
|
-
|
|
1160
|
-
#
|
|
1161
|
-
|
|
3112
|
+
with contextlib.suppress(FileNotFoundError, OSError):
|
|
3113
|
+
output_dir.chmod(0o700) # Secure permissions: rwx------
|
|
3114
|
+
for _i, playbook in enumerate(playbooks):
|
|
3115
|
+
# Sanitize filename - include recipe name to avoid conflicts
|
|
3116
|
+
recipe_name = playbook["recipe_file"].replace(".rb", "")
|
|
3117
|
+
cookbook_name = _sanitize_filename(playbook["cookbook_name"])
|
|
3118
|
+
recipe_name = _sanitize_filename(recipe_name)
|
|
3119
|
+
filename = f"{cookbook_name}_{recipe_name}.yml"
|
|
1162
3120
|
(output_dir / filename).write_text(playbook["playbook_content"])
|
|
1163
3121
|
|
|
1164
3122
|
# Store path in session state for validation page
|
|
@@ -1167,72 +3125,165 @@ def _convert_and_download_playbooks(results):
|
|
|
1167
3125
|
except Exception as e:
|
|
1168
3126
|
st.warning(f"Could not stage playbooks for validation: {e}")
|
|
1169
3127
|
|
|
1170
|
-
_handle_playbook_download(playbooks)
|
|
3128
|
+
_handle_playbook_download(playbooks, templates)
|
|
3129
|
+
|
|
3130
|
+
|
|
3131
|
+
def _convert_single_cookbook(
|
|
3132
|
+
result: dict, project_recommendations: dict | None = None
|
|
3133
|
+
) -> tuple[list, list]:
|
|
3134
|
+
"""
|
|
3135
|
+
Convert entire cookbook (all recipes) to Ansible playbooks.
|
|
3136
|
+
|
|
3137
|
+
Args:
|
|
3138
|
+
result: Cookbook analysis result.
|
|
3139
|
+
project_recommendations: Optional project recommendations.
|
|
1171
3140
|
|
|
3141
|
+
Returns:
|
|
3142
|
+
Tuple of (playbooks list, templates list).
|
|
1172
3143
|
|
|
1173
|
-
|
|
1174
|
-
"""Convert a single cookbook to Ansible playbook."""
|
|
3144
|
+
"""
|
|
1175
3145
|
cookbook_dir = Path(result["path"])
|
|
1176
|
-
|
|
3146
|
+
recipes_dir = cookbook_dir / "recipes"
|
|
1177
3147
|
|
|
1178
|
-
if not
|
|
1179
|
-
|
|
3148
|
+
if not recipes_dir.exists():
|
|
3149
|
+
st.warning(f"No recipes directory found in {result['name']}")
|
|
3150
|
+
return [], []
|
|
1180
3151
|
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
ai_config.get("provider")
|
|
1186
|
-
and ai_config.get("provider") != LOCAL_PROVIDER
|
|
1187
|
-
and ai_config.get("api_key")
|
|
1188
|
-
)
|
|
3152
|
+
recipe_files = list(recipes_dir.glob("*.rb"))
|
|
3153
|
+
if not recipe_files:
|
|
3154
|
+
st.warning(f"No recipe files found in {result['name']}")
|
|
3155
|
+
return [], []
|
|
1189
3156
|
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
3157
|
+
# Convert recipes
|
|
3158
|
+
converted_playbooks = _convert_recipes(
|
|
3159
|
+
result["name"], recipe_files, project_recommendations
|
|
3160
|
+
)
|
|
3161
|
+
|
|
3162
|
+
# Convert templates
|
|
3163
|
+
converted_templates = _convert_templates(result["name"], cookbook_dir)
|
|
3164
|
+
|
|
3165
|
+
return converted_playbooks, converted_templates
|
|
3166
|
+
|
|
3167
|
+
|
|
3168
|
+
def _convert_recipes(
|
|
3169
|
+
cookbook_name: str, recipe_files: list, project_recommendations: dict | None
|
|
3170
|
+
) -> list:
|
|
3171
|
+
"""
|
|
3172
|
+
Convert all recipes in a cookbook.
|
|
1205
3173
|
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
3174
|
+
Args:
|
|
3175
|
+
cookbook_name: Name of the cookbook.
|
|
3176
|
+
recipe_files: List of recipe file paths.
|
|
3177
|
+
project_recommendations: Optional project recommendations.
|
|
3178
|
+
|
|
3179
|
+
Returns:
|
|
3180
|
+
List of converted playbooks.
|
|
3181
|
+
|
|
3182
|
+
"""
|
|
3183
|
+
ai_config = load_ai_settings()
|
|
3184
|
+
provider_name = _get_ai_provider(ai_config)
|
|
3185
|
+
use_ai = (
|
|
3186
|
+
provider_name and provider_name != LOCAL_PROVIDER and ai_config.get("api_key")
|
|
3187
|
+
)
|
|
3188
|
+
|
|
3189
|
+
provider_mapping = {
|
|
3190
|
+
ANTHROPIC_CLAUDE_DISPLAY: "anthropic",
|
|
3191
|
+
ANTHROPIC_PROVIDER: "anthropic",
|
|
3192
|
+
"OpenAI": "openai",
|
|
3193
|
+
OPENAI_PROVIDER: "openai",
|
|
3194
|
+
IBM_WATSONX: "watson",
|
|
3195
|
+
RED_HAT_LIGHTSPEED: "lightspeed",
|
|
3196
|
+
}
|
|
3197
|
+
ai_provider = provider_mapping.get(
|
|
3198
|
+
provider_name,
|
|
3199
|
+
provider_name.lower().replace(" ", "_") if provider_name else "anthropic",
|
|
3200
|
+
)
|
|
3201
|
+
|
|
3202
|
+
converted_playbooks = []
|
|
3203
|
+
api_key = _get_ai_string_value(ai_config, "api_key", "")
|
|
3204
|
+
model = _get_ai_string_value(ai_config, "model", "claude-3-5-sonnet-20241022")
|
|
3205
|
+
temperature = _get_ai_float_value(ai_config, "temperature", 0.7)
|
|
3206
|
+
max_tokens = _get_ai_int_value(ai_config, "max_tokens", 4000)
|
|
3207
|
+
project_id = _get_ai_string_value(ai_config, "project_id", "")
|
|
3208
|
+
base_url = _get_ai_string_value(ai_config, "base_url", "")
|
|
3209
|
+
|
|
3210
|
+
for recipe_file in recipe_files:
|
|
3211
|
+
try:
|
|
3212
|
+
if use_ai:
|
|
3213
|
+
playbook_content = generate_playbook_from_recipe_with_ai(
|
|
3214
|
+
str(recipe_file),
|
|
3215
|
+
ai_provider=ai_provider,
|
|
3216
|
+
api_key=api_key,
|
|
3217
|
+
model=model,
|
|
3218
|
+
temperature=temperature,
|
|
3219
|
+
max_tokens=max_tokens,
|
|
3220
|
+
project_id=project_id,
|
|
3221
|
+
base_url=base_url,
|
|
3222
|
+
project_recommendations=project_recommendations,
|
|
3223
|
+
)
|
|
3224
|
+
else:
|
|
3225
|
+
playbook_content = generate_playbook_from_recipe(str(recipe_file))
|
|
3226
|
+
|
|
3227
|
+
if not playbook_content.startswith("Error"):
|
|
3228
|
+
converted_playbooks.append(
|
|
3229
|
+
{
|
|
3230
|
+
"cookbook_name": cookbook_name,
|
|
3231
|
+
"playbook_content": playbook_content,
|
|
3232
|
+
"recipe_file": recipe_file.name,
|
|
3233
|
+
"conversion_method": "AI-enhanced"
|
|
3234
|
+
if use_ai
|
|
3235
|
+
else "Deterministic",
|
|
3236
|
+
}
|
|
3237
|
+
)
|
|
3238
|
+
else:
|
|
3239
|
+
st.warning(f"Failed to convert {recipe_file.name}: {playbook_content}")
|
|
3240
|
+
except Exception as e:
|
|
3241
|
+
st.warning(f"Failed to convert {recipe_file.name}: {e}")
|
|
3242
|
+
|
|
3243
|
+
return converted_playbooks
|
|
3244
|
+
|
|
3245
|
+
|
|
3246
|
+
def _convert_templates(cookbook_name: str, cookbook_dir: Path) -> list:
|
|
3247
|
+
"""
|
|
3248
|
+
Convert all templates in a cookbook.
|
|
3249
|
+
|
|
3250
|
+
Args:
|
|
3251
|
+
cookbook_name: Name of the cookbook.
|
|
3252
|
+
cookbook_dir: Path to cookbook directory.
|
|
3253
|
+
|
|
3254
|
+
Returns:
|
|
3255
|
+
List of converted templates.
|
|
3256
|
+
|
|
3257
|
+
"""
|
|
3258
|
+
converted_templates = []
|
|
3259
|
+
template_results = convert_cookbook_templates(str(cookbook_dir))
|
|
3260
|
+
|
|
3261
|
+
if template_results.get("success"):
|
|
3262
|
+
for template_result in template_results.get("results", []):
|
|
3263
|
+
if template_result["success"]:
|
|
3264
|
+
converted_templates.append(
|
|
3265
|
+
{
|
|
3266
|
+
"cookbook_name": cookbook_name,
|
|
3267
|
+
"template_content": template_result["jinja2_content"],
|
|
3268
|
+
"template_file": Path(template_result["jinja2_file"]).name,
|
|
3269
|
+
"original_file": Path(template_result["original_file"]).name,
|
|
3270
|
+
"variables": template_result["variables"],
|
|
3271
|
+
}
|
|
3272
|
+
)
|
|
3273
|
+
if converted_templates:
|
|
3274
|
+
st.info(
|
|
3275
|
+
f"Converted {len(converted_templates)} template(s) from {cookbook_name}"
|
|
1215
3276
|
)
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
return {
|
|
1222
|
-
"cookbook_name": result["name"],
|
|
1223
|
-
"playbook_content": playbook_content,
|
|
1224
|
-
"recipe_file": recipe_file.name,
|
|
1225
|
-
"conversion_method": "AI-enhanced" if use_ai else "Deterministic",
|
|
1226
|
-
}
|
|
1227
|
-
else:
|
|
1228
|
-
st.warning(f"Failed to convert {result['name']}: {playbook_content}")
|
|
1229
|
-
return None
|
|
1230
|
-
except Exception as e:
|
|
1231
|
-
st.warning(f"Failed to convert {result['name']}: {e}")
|
|
1232
|
-
return None
|
|
3277
|
+
elif not template_results.get("message"):
|
|
3278
|
+
st.warning(
|
|
3279
|
+
f"Template conversion failed for {cookbook_name}: "
|
|
3280
|
+
f"{template_results.get('error', 'Unknown error')}"
|
|
3281
|
+
)
|
|
1233
3282
|
|
|
3283
|
+
return converted_templates
|
|
1234
3284
|
|
|
1235
|
-
|
|
3285
|
+
|
|
3286
|
+
def _find_recipe_file(cookbook_dir: Path, cookbook_name: str) -> Path | None:
|
|
1236
3287
|
"""Find the appropriate recipe file for a cookbook."""
|
|
1237
3288
|
recipes_dir = cookbook_dir / "recipes"
|
|
1238
3289
|
if not recipes_dir.exists():
|
|
@@ -1249,85 +3300,207 @@ def _find_recipe_file(cookbook_dir, cookbook_name):
|
|
|
1249
3300
|
return default_recipe if default_recipe.exists() else recipe_files[0]
|
|
1250
3301
|
|
|
1251
3302
|
|
|
1252
|
-
def _handle_playbook_download(playbooks):
|
|
3303
|
+
def _handle_playbook_download(playbooks: list, templates: list | None = None) -> None:
|
|
1253
3304
|
"""Handle the download of generated playbooks."""
|
|
1254
3305
|
if not playbooks:
|
|
1255
3306
|
st.error("No playbooks were successfully generated.")
|
|
1256
3307
|
return
|
|
1257
3308
|
|
|
1258
|
-
|
|
1259
|
-
playbook_archive = _create_playbook_archive(playbooks)
|
|
3309
|
+
templates = templates or []
|
|
3310
|
+
playbook_archive = _create_playbook_archive(playbooks, templates)
|
|
1260
3311
|
|
|
3312
|
+
# Display success and statistics
|
|
3313
|
+
unique_cookbooks = len({p["cookbook_name"] for p in playbooks})
|
|
3314
|
+
template_count = len(templates)
|
|
1261
3315
|
st.success(
|
|
1262
|
-
f"Successfully converted {
|
|
3316
|
+
f"Successfully converted {unique_cookbooks} cookbook(s) with "
|
|
3317
|
+
f"{len(playbooks)} recipe(s) and {template_count} template(s) to Ansible!"
|
|
1263
3318
|
)
|
|
1264
3319
|
|
|
3320
|
+
# Show summary
|
|
3321
|
+
_display_playbook_summary(len(playbooks), template_count)
|
|
3322
|
+
|
|
1265
3323
|
# Provide download button
|
|
3324
|
+
_display_download_button(len(playbooks), template_count, playbook_archive)
|
|
3325
|
+
|
|
3326
|
+
# Show previews
|
|
3327
|
+
_display_playbook_previews(playbooks)
|
|
3328
|
+
_display_template_previews(templates)
|
|
3329
|
+
|
|
3330
|
+
|
|
3331
|
+
def _display_playbook_summary(playbook_count: int, template_count: int) -> None:
|
|
3332
|
+
"""Display summary of archive contents."""
|
|
3333
|
+
if template_count > 0:
|
|
3334
|
+
st.info(
|
|
3335
|
+
f"Archive includes:\n"
|
|
3336
|
+
f"- {playbook_count} playbook files (.yml)\n"
|
|
3337
|
+
f"- {template_count} template files (.j2)\n"
|
|
3338
|
+
f"- README.md with conversion details"
|
|
3339
|
+
)
|
|
3340
|
+
else:
|
|
3341
|
+
st.info(
|
|
3342
|
+
f"Archive includes:\n"
|
|
3343
|
+
f"- {playbook_count} playbook files (.yml)\n"
|
|
3344
|
+
f"- README.md with conversion details\n"
|
|
3345
|
+
f"- Note: No templates were found in the converted cookbooks"
|
|
3346
|
+
)
|
|
3347
|
+
|
|
3348
|
+
|
|
3349
|
+
def _display_download_button(
|
|
3350
|
+
playbook_count: int, template_count: int, archive_data: bytes
|
|
3351
|
+
) -> None:
|
|
3352
|
+
"""Display the download button for the archive."""
|
|
3353
|
+
download_label = f"Download Ansible Playbooks ({playbook_count} playbooks"
|
|
3354
|
+
if template_count > 0:
|
|
3355
|
+
download_label += f", {template_count} templates"
|
|
3356
|
+
download_label += ")"
|
|
3357
|
+
|
|
1266
3358
|
st.download_button(
|
|
1267
|
-
label=
|
|
1268
|
-
data=
|
|
3359
|
+
label=download_label,
|
|
3360
|
+
data=archive_data,
|
|
1269
3361
|
file_name="ansible_playbooks.zip",
|
|
1270
3362
|
mime="application/zip",
|
|
1271
|
-
help="Download ZIP archive containing
|
|
3363
|
+
help=f"Download ZIP archive containing {playbook_count} playbooks "
|
|
3364
|
+
f"and {template_count} templates",
|
|
1272
3365
|
)
|
|
1273
3366
|
|
|
1274
|
-
|
|
1275
|
-
|
|
3367
|
+
|
|
3368
|
+
def _display_playbook_previews(playbooks: list) -> None:
|
|
3369
|
+
"""Display preview of generated playbooks."""
|
|
3370
|
+
with st.expander("Preview Generated Playbooks", expanded=True):
|
|
1276
3371
|
for playbook in playbooks:
|
|
1277
3372
|
conversion_badge = (
|
|
1278
|
-
"
|
|
3373
|
+
"AI-Enhanced"
|
|
1279
3374
|
if playbook.get("conversion_method") == "AI-enhanced"
|
|
1280
|
-
else "
|
|
3375
|
+
else "Deterministic"
|
|
1281
3376
|
)
|
|
1282
3377
|
st.subheader(
|
|
1283
3378
|
f"{playbook['cookbook_name']} ({conversion_badge}) - "
|
|
1284
3379
|
f"from {playbook['recipe_file']}"
|
|
1285
3380
|
)
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
3381
|
+
content = playbook["playbook_content"]
|
|
3382
|
+
preview = content[:1000] + "..." if len(content) > 1000 else content
|
|
3383
|
+
st.code(preview, language="yaml")
|
|
3384
|
+
st.divider()
|
|
3385
|
+
|
|
3386
|
+
|
|
3387
|
+
def _display_template_previews(templates: list) -> None:
|
|
3388
|
+
"""Display preview of converted templates."""
|
|
3389
|
+
if not templates:
|
|
3390
|
+
return
|
|
3391
|
+
|
|
3392
|
+
with st.expander(
|
|
3393
|
+
f"Preview Converted Templates ({len(templates)} templates)", expanded=True
|
|
3394
|
+
):
|
|
3395
|
+
for template in templates:
|
|
3396
|
+
st.subheader(
|
|
3397
|
+
f"{template['cookbook_name']}/templates/{template['template_file']}"
|
|
1291
3398
|
)
|
|
3399
|
+
st.caption(f"Converted from: {template['original_file']}")
|
|
3400
|
+
|
|
3401
|
+
# Show extracted variables
|
|
3402
|
+
if template.get("variables"):
|
|
3403
|
+
with st.container():
|
|
3404
|
+
st.write("**Variables used in template:**")
|
|
3405
|
+
st.code(", ".join(template["variables"]), language="text")
|
|
3406
|
+
|
|
3407
|
+
# Show template content preview
|
|
3408
|
+
content = template["template_content"]
|
|
3409
|
+
preview = content[:500] + "..." if len(content) > 500 else content
|
|
3410
|
+
st.code(preview, language="jinja2")
|
|
1292
3411
|
st.divider()
|
|
1293
3412
|
|
|
1294
3413
|
|
|
1295
|
-
def _create_playbook_archive(playbooks):
|
|
1296
|
-
"""Create a ZIP archive containing all generated Ansible playbooks."""
|
|
3414
|
+
def _create_playbook_archive(playbooks, templates=None):
|
|
3415
|
+
"""Create a ZIP archive containing all generated Ansible playbooks and templates."""
|
|
1297
3416
|
zip_buffer = io.BytesIO()
|
|
3417
|
+
templates = templates or []
|
|
1298
3418
|
|
|
1299
3419
|
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
|
|
1300
|
-
#
|
|
3420
|
+
# Organize playbooks by cookbook in subdirectories
|
|
1301
3421
|
for playbook in playbooks:
|
|
1302
|
-
|
|
3422
|
+
# Create cookbook directory structure with sanitised names
|
|
3423
|
+
cookbook_name = _sanitize_filename(playbook["cookbook_name"])
|
|
3424
|
+
recipe_name = _sanitize_filename(playbook["recipe_file"].replace(".rb", ""))
|
|
3425
|
+
playbook_filename = f"{cookbook_name}/{recipe_name}.yml"
|
|
1303
3426
|
zip_file.writestr(playbook_filename, playbook["playbook_content"])
|
|
1304
3427
|
|
|
3428
|
+
# Add converted templates
|
|
3429
|
+
for template in templates:
|
|
3430
|
+
cookbook_name = _sanitize_filename(template["cookbook_name"])
|
|
3431
|
+
template_filename = _sanitize_filename(template["template_file"])
|
|
3432
|
+
archive_path = f"{cookbook_name}/templates/{template_filename}"
|
|
3433
|
+
zip_file.writestr(archive_path, template["template_content"])
|
|
3434
|
+
|
|
3435
|
+
# Count unique cookbooks
|
|
3436
|
+
unique_cookbooks = len({p["cookbook_name"] for p in playbooks})
|
|
3437
|
+
template_count = len(templates)
|
|
3438
|
+
|
|
1305
3439
|
# Add a summary README
|
|
1306
3440
|
readme_content = f"""# Ansible Playbooks Generated by SousChef
|
|
1307
3441
|
|
|
1308
|
-
This archive contains {len(playbooks)} Ansible playbooks
|
|
3442
|
+
This archive contains {len(playbooks)} Ansible playbooks and {template_count} """
|
|
3443
|
+
readme_content += f"templates from {unique_cookbooks} cookbook(s) "
|
|
3444
|
+
readme_content += "converted from Chef."
|
|
3445
|
+
|
|
3446
|
+
readme_content += """
|
|
1309
3447
|
|
|
1310
3448
|
## Contents:
|
|
1311
3449
|
"""
|
|
1312
3450
|
|
|
3451
|
+
# Group by cookbook for README
|
|
3452
|
+
from collections import defaultdict
|
|
3453
|
+
|
|
3454
|
+
by_cookbook = defaultdict(list)
|
|
1313
3455
|
for playbook in playbooks:
|
|
1314
|
-
|
|
3456
|
+
by_cookbook[playbook["cookbook_name"]].append(playbook)
|
|
3457
|
+
|
|
3458
|
+
# Group templates by cookbook
|
|
3459
|
+
by_cookbook_templates = defaultdict(list)
|
|
3460
|
+
for template in templates:
|
|
3461
|
+
by_cookbook_templates[template["cookbook_name"]].append(template)
|
|
3462
|
+
|
|
3463
|
+
for cookbook_name, cookbook_playbooks in sorted(by_cookbook.items()):
|
|
3464
|
+
cookbook_templates = by_cookbook_templates.get(cookbook_name, [])
|
|
3465
|
+
# Sanitise cookbook name for display in README
|
|
3466
|
+
safe_cookbook_name = _sanitize_filename(cookbook_name)
|
|
1315
3467
|
readme_content += (
|
|
1316
|
-
f"
|
|
1317
|
-
f"(
|
|
1318
|
-
f"
|
|
3468
|
+
f"\n### {safe_cookbook_name}/ "
|
|
3469
|
+
f"({len(cookbook_playbooks)} recipes, "
|
|
3470
|
+
f"{len(cookbook_templates)} templates)\n"
|
|
1319
3471
|
)
|
|
3472
|
+
for playbook in cookbook_playbooks:
|
|
3473
|
+
conversion_method = playbook.get("conversion_method", "Deterministic")
|
|
3474
|
+
recipe_name = playbook["recipe_file"].replace(".rb", "")
|
|
3475
|
+
safe_recipe_name = _sanitize_filename(recipe_name)
|
|
3476
|
+
readme_content += (
|
|
3477
|
+
f" - {safe_recipe_name}.yml "
|
|
3478
|
+
f"(from {playbook['recipe_file']}, "
|
|
3479
|
+
f"{conversion_method})\n"
|
|
3480
|
+
)
|
|
3481
|
+
if cookbook_templates:
|
|
3482
|
+
readme_content += " - templates/\n"
|
|
3483
|
+
for template in cookbook_templates:
|
|
3484
|
+
safe_template_name = _sanitize_filename(template["template_file"])
|
|
3485
|
+
readme_content += (
|
|
3486
|
+
f" - {safe_template_name} "
|
|
3487
|
+
f"(from {template['original_file']})\n"
|
|
3488
|
+
)
|
|
1320
3489
|
|
|
1321
3490
|
readme_content += """
|
|
1322
3491
|
|
|
1323
3492
|
## Usage:
|
|
1324
3493
|
Run these playbooks with Ansible:
|
|
1325
|
-
ansible-playbook <
|
|
3494
|
+
ansible-playbook <cookbook_name>/<recipe_name>.yml
|
|
1326
3495
|
|
|
1327
3496
|
## Notes:
|
|
1328
3497
|
- These playbooks were automatically generated from Chef recipes
|
|
3498
|
+
- Templates have been converted from ERB to Jinja2 format
|
|
3499
|
+
- Each cookbook's recipes and templates are organized in separate directories
|
|
3500
|
+
- Review and test before deploying to production
|
|
1329
3501
|
- Review and test the playbooks before using in production
|
|
1330
|
-
- Some manual adjustments may be required for complex recipes
|
|
3502
|
+
- Some manual adjustments may be required for complex recipes or templates
|
|
3503
|
+
- Verify that template variables are correctly mapped from Chef to Ansible
|
|
1331
3504
|
"""
|
|
1332
3505
|
|
|
1333
3506
|
zip_file.writestr("README.md", readme_content)
|
|
@@ -1354,7 +3527,3 @@ def _create_analysis_report(results):
|
|
|
1354
3527
|
}
|
|
1355
3528
|
|
|
1356
3529
|
return json.dumps(report, indent=2)
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
if __name__ == "__main__":
|
|
1360
|
-
show_cookbook_analysis_page()
|