mcp-souschef 2.2.0__py3-none-any.whl → 2.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_souschef-2.2.0.dist-info → mcp_souschef-2.8.0.dist-info}/METADATA +226 -38
- mcp_souschef-2.8.0.dist-info/RECORD +42 -0
- mcp_souschef-2.8.0.dist-info/entry_points.txt +4 -0
- souschef/__init__.py +10 -2
- souschef/assessment.py +113 -30
- souschef/ci/__init__.py +11 -0
- souschef/ci/github_actions.py +379 -0
- souschef/ci/gitlab_ci.py +299 -0
- souschef/ci/jenkins_pipeline.py +343 -0
- souschef/cli.py +605 -5
- souschef/converters/__init__.py +2 -2
- souschef/converters/cookbook_specific.py +125 -0
- souschef/converters/cookbook_specific.py.backup +109 -0
- souschef/converters/playbook.py +853 -15
- souschef/converters/resource.py +103 -1
- souschef/core/constants.py +13 -0
- souschef/core/path_utils.py +12 -9
- souschef/core/validation.py +35 -2
- souschef/deployment.py +29 -27
- souschef/filesystem/operations.py +0 -7
- souschef/parsers/__init__.py +6 -1
- souschef/parsers/attributes.py +397 -32
- souschef/parsers/inspec.py +343 -18
- souschef/parsers/metadata.py +30 -0
- souschef/parsers/recipe.py +48 -10
- souschef/server.py +429 -178
- souschef/ui/__init__.py +8 -0
- souschef/ui/app.py +2998 -0
- souschef/ui/health_check.py +36 -0
- souschef/ui/pages/ai_settings.py +497 -0
- souschef/ui/pages/cookbook_analysis.py +1360 -0
- mcp_souschef-2.2.0.dist-info/RECORD +0 -31
- mcp_souschef-2.2.0.dist-info/entry_points.txt +0 -4
- {mcp_souschef-2.2.0.dist-info → mcp_souschef-2.8.0.dist-info}/WHEEL +0 -0
- {mcp_souschef-2.2.0.dist-info → mcp_souschef-2.8.0.dist-info}/licenses/LICENSE +0 -0
souschef/ui/app.py
ADDED
|
@@ -0,0 +1,2998 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Visual Migration Planning Interface for SousChef.
|
|
3
|
+
|
|
4
|
+
A Streamlit-based web interface for Chef to Ansible migration planning,
|
|
5
|
+
assessment, and visualization.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import contextlib
|
|
9
|
+
import logging
|
|
10
|
+
import sys
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
import streamlit as st
|
|
14
|
+
|
|
15
|
+
# Configure logging to stdout for Docker visibility
|
|
16
|
+
logging.basicConfig(
|
|
17
|
+
level=logging.INFO,
|
|
18
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
19
|
+
stream=sys.stdout,
|
|
20
|
+
force=True, # Override any existing configuration
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
logger.info("Starting SousChef UI application")
|
|
25
|
+
|
|
26
|
+
# Add the parent directory to the path so we can import souschef modules
|
|
27
|
+
app_path = Path(__file__).parent.parent
|
|
28
|
+
if str(app_path) not in sys.path:
|
|
29
|
+
sys.path.insert(0, str(app_path))
|
|
30
|
+
|
|
31
|
+
# Import page modules
|
|
32
|
+
from souschef.ui.pages.ai_settings import show_ai_settings_page # noqa: E402
|
|
33
|
+
from souschef.ui.pages.cookbook_analysis import ( # noqa: E402
|
|
34
|
+
show_cookbook_analysis_page,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
# Constants for repeated strings
|
|
38
|
+
NAV_MIGRATION_PLANNING = "Migration Planning"
|
|
39
|
+
NAV_DEPENDENCY_MAPPING = "Dependency Mapping"
|
|
40
|
+
NAV_VALIDATION_REPORTS = "Validation Reports"
|
|
41
|
+
MIME_TEXT_MARKDOWN = "text/markdown"
|
|
42
|
+
MIME_APPLICATION_JSON = "application/json"
|
|
43
|
+
SECTION_CIRCULAR_DEPENDENCIES = "Circular Dependencies"
|
|
44
|
+
NAV_COOKBOOK_ANALYSIS = "Cookbook Analysis"
|
|
45
|
+
NAV_AI_SETTINGS = "AI Settings"
|
|
46
|
+
BUTTON_ANALYSE_DEPENDENCIES = "Analyse Dependencies"
|
|
47
|
+
SECTION_COMMUNITY_COOKBOOKS = "Community Cookbooks"
|
|
48
|
+
SECTION_COMMUNITY_COOKBOOKS_HEADER = "Community Cookbooks:"
|
|
49
|
+
INPUT_METHOD_DIRECTORY_PATH = "Directory Path"
|
|
50
|
+
SCOPE_BEST_PRACTICES = "Best Practices"
|
|
51
|
+
ERROR_MSG_ENTER_PATH = "Please enter a path to validate."
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def health_check():
|
|
55
|
+
"""Return simple health check endpoint for Docker."""
|
|
56
|
+
return {"status": "healthy", "service": "souschef-ui"}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class ProgressTracker:
|
|
60
|
+
"""Track progress for long-running operations."""
|
|
61
|
+
|
|
62
|
+
def __init__(self, total_steps=100, description="Processing..."):
|
|
63
|
+
self.total_steps = total_steps
|
|
64
|
+
self.current_step = 0
|
|
65
|
+
self.description = description
|
|
66
|
+
self.progress_bar = st.progress(0)
|
|
67
|
+
self.status_text = st.empty()
|
|
68
|
+
|
|
69
|
+
def update(self, step=None, description=None):
|
|
70
|
+
"""Update progress."""
|
|
71
|
+
if step is not None:
|
|
72
|
+
self.current_step = min(step, self.total_steps)
|
|
73
|
+
else:
|
|
74
|
+
self.current_step = min(self.current_step + 1, self.total_steps)
|
|
75
|
+
|
|
76
|
+
if description:
|
|
77
|
+
self.description = description
|
|
78
|
+
|
|
79
|
+
progress = min(self.current_step / self.total_steps, 1.0)
|
|
80
|
+
self.progress_bar.progress(progress)
|
|
81
|
+
self.status_text.text(
|
|
82
|
+
f"{self.description} ({self.current_step}/{self.total_steps})"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
def complete(self, message="Completed!"):
|
|
86
|
+
"""Mark progress as complete."""
|
|
87
|
+
self.progress_bar.progress(1.0)
|
|
88
|
+
self.status_text.text(message)
|
|
89
|
+
import time
|
|
90
|
+
|
|
91
|
+
time.sleep(0.5) # Brief pause to show completion
|
|
92
|
+
|
|
93
|
+
def close(self):
|
|
94
|
+
"""Clean up progress indicators."""
|
|
95
|
+
self.progress_bar.empty()
|
|
96
|
+
self.status_text.empty()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def with_progress_tracking(
|
|
100
|
+
operation_func, description="Processing...", total_steps=100
|
|
101
|
+
):
|
|
102
|
+
"""Add progress tracking to operations."""
|
|
103
|
+
|
|
104
|
+
def wrapper(*args, **kwargs):
|
|
105
|
+
tracker = ProgressTracker(total_steps, description)
|
|
106
|
+
try:
|
|
107
|
+
result = operation_func(tracker, *args, **kwargs)
|
|
108
|
+
tracker.complete()
|
|
109
|
+
return result
|
|
110
|
+
except Exception as e:
|
|
111
|
+
tracker.close()
|
|
112
|
+
raise e
|
|
113
|
+
finally:
|
|
114
|
+
tracker.close()
|
|
115
|
+
|
|
116
|
+
return wrapper
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _setup_sidebar_navigation():
|
|
120
|
+
"""Set up the sidebar navigation with buttons."""
|
|
121
|
+
st.sidebar.title("Navigation")
|
|
122
|
+
|
|
123
|
+
# Dashboard button
|
|
124
|
+
if st.sidebar.button(
|
|
125
|
+
"Dashboard",
|
|
126
|
+
help="View migration overview and quick actions",
|
|
127
|
+
width="stretch",
|
|
128
|
+
):
|
|
129
|
+
st.session_state.current_page = "Dashboard"
|
|
130
|
+
st.rerun()
|
|
131
|
+
|
|
132
|
+
# Cookbook Analysis button
|
|
133
|
+
if st.sidebar.button(
|
|
134
|
+
NAV_COOKBOOK_ANALYSIS,
|
|
135
|
+
help="Analyse Chef cookbooks and assess migration complexity",
|
|
136
|
+
width="stretch",
|
|
137
|
+
):
|
|
138
|
+
st.session_state.current_page = NAV_COOKBOOK_ANALYSIS
|
|
139
|
+
st.rerun()
|
|
140
|
+
|
|
141
|
+
# Dependency Mapping button
|
|
142
|
+
if st.sidebar.button(
|
|
143
|
+
NAV_DEPENDENCY_MAPPING,
|
|
144
|
+
help="Visualise cookbook dependencies and migration order",
|
|
145
|
+
width="stretch",
|
|
146
|
+
):
|
|
147
|
+
st.session_state.current_page = NAV_DEPENDENCY_MAPPING
|
|
148
|
+
st.rerun()
|
|
149
|
+
|
|
150
|
+
# Migration Planning button
|
|
151
|
+
if st.sidebar.button(
|
|
152
|
+
NAV_MIGRATION_PLANNING,
|
|
153
|
+
help="Plan your Chef to Ansible migration with detailed timelines",
|
|
154
|
+
width="stretch",
|
|
155
|
+
):
|
|
156
|
+
st.session_state.current_page = NAV_MIGRATION_PLANNING
|
|
157
|
+
st.rerun()
|
|
158
|
+
|
|
159
|
+
# Validation Reports button
|
|
160
|
+
if st.sidebar.button(
|
|
161
|
+
NAV_VALIDATION_REPORTS,
|
|
162
|
+
help="Validate conversions and generate quality assurance reports",
|
|
163
|
+
width="stretch",
|
|
164
|
+
):
|
|
165
|
+
st.session_state.current_page = NAV_VALIDATION_REPORTS
|
|
166
|
+
st.rerun()
|
|
167
|
+
|
|
168
|
+
# AI Settings button
|
|
169
|
+
if st.sidebar.button(
|
|
170
|
+
NAV_AI_SETTINGS,
|
|
171
|
+
help="Configure AI provider settings for intelligent conversions",
|
|
172
|
+
width="stretch",
|
|
173
|
+
):
|
|
174
|
+
st.session_state.current_page = NAV_AI_SETTINGS
|
|
175
|
+
st.rerun()
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def main():
|
|
179
|
+
"""Run the main Streamlit application."""
|
|
180
|
+
st.set_page_config(
|
|
181
|
+
page_title="SousChef - Chef to Ansible Migration",
|
|
182
|
+
layout="wide",
|
|
183
|
+
initial_sidebar_state="expanded",
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Hide Streamlit's default header elements and sidebar navigation
|
|
187
|
+
st.markdown(
|
|
188
|
+
"""
|
|
189
|
+
<style>
|
|
190
|
+
#MainMenu {visibility: hidden;}
|
|
191
|
+
.stDeployButton {display:none;}
|
|
192
|
+
[data-testid="stSidebarNavLink"] {display: none;}
|
|
193
|
+
</style>
|
|
194
|
+
""",
|
|
195
|
+
unsafe_allow_html=True,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Set up sidebar navigation
|
|
199
|
+
_setup_sidebar_navigation()
|
|
200
|
+
|
|
201
|
+
# Get current page from session state, default to Dashboard
|
|
202
|
+
page = st.session_state.get("current_page", "Dashboard")
|
|
203
|
+
|
|
204
|
+
# Main content area
|
|
205
|
+
if page == "Dashboard":
|
|
206
|
+
show_dashboard()
|
|
207
|
+
elif page == NAV_COOKBOOK_ANALYSIS:
|
|
208
|
+
show_cookbook_analysis_page()
|
|
209
|
+
elif page == NAV_AI_SETTINGS:
|
|
210
|
+
show_ai_settings_page()
|
|
211
|
+
elif page == NAV_MIGRATION_PLANNING:
|
|
212
|
+
show_migration_planning()
|
|
213
|
+
elif page == NAV_DEPENDENCY_MAPPING:
|
|
214
|
+
show_dependency_mapping()
|
|
215
|
+
elif page == NAV_VALIDATION_REPORTS:
|
|
216
|
+
show_validation_reports()
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _calculate_dashboard_metrics():
|
|
220
|
+
"""Calculate and return dashboard metrics."""
|
|
221
|
+
cookbooks_analysed = 0
|
|
222
|
+
complexity_counts = {"High": 0, "Medium": 0, "Low": 0}
|
|
223
|
+
successful_analyses = 0
|
|
224
|
+
|
|
225
|
+
if "analysis_results" in st.session_state and st.session_state.analysis_results:
|
|
226
|
+
results = st.session_state.analysis_results
|
|
227
|
+
cookbooks_analysed = len(results)
|
|
228
|
+
successful_analyses = len([r for r in results if r.get("status") == "Analysed"])
|
|
229
|
+
|
|
230
|
+
for r in results:
|
|
231
|
+
comp = r.get("complexity", "Unknown")
|
|
232
|
+
if comp in complexity_counts:
|
|
233
|
+
complexity_counts[comp] += 1
|
|
234
|
+
|
|
235
|
+
# Determine overall complexity
|
|
236
|
+
overall_complexity = "Unknown"
|
|
237
|
+
if cookbooks_analysed > 0:
|
|
238
|
+
if complexity_counts["High"] > 0:
|
|
239
|
+
overall_complexity = "High"
|
|
240
|
+
elif complexity_counts["Medium"] > 0:
|
|
241
|
+
overall_complexity = "Medium"
|
|
242
|
+
elif complexity_counts["Low"] > 0:
|
|
243
|
+
overall_complexity = "Low"
|
|
244
|
+
|
|
245
|
+
conversion_rate = 0
|
|
246
|
+
if cookbooks_analysed > 0:
|
|
247
|
+
conversion_rate = int((successful_analyses / cookbooks_analysed) * 100)
|
|
248
|
+
|
|
249
|
+
return cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def _display_dashboard_metrics(
|
|
253
|
+
cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses
|
|
254
|
+
):
|
|
255
|
+
"""Display the dashboard metrics."""
|
|
256
|
+
col1, col2, col3 = st.columns(3)
|
|
257
|
+
|
|
258
|
+
with col1:
|
|
259
|
+
st.metric(
|
|
260
|
+
"Cookbooks Analysed",
|
|
261
|
+
str(cookbooks_analysed),
|
|
262
|
+
f"{cookbooks_analysed} processed"
|
|
263
|
+
if cookbooks_analysed > 0
|
|
264
|
+
else "Ready to analyse",
|
|
265
|
+
)
|
|
266
|
+
st.caption("Total cookbooks processed")
|
|
267
|
+
|
|
268
|
+
with col2:
|
|
269
|
+
st.metric(
|
|
270
|
+
"Migration Complexity",
|
|
271
|
+
overall_complexity,
|
|
272
|
+
"Based on analysis"
|
|
273
|
+
if overall_complexity != "Unknown"
|
|
274
|
+
else "Assessment needed",
|
|
275
|
+
)
|
|
276
|
+
st.caption("Overall migration effort")
|
|
277
|
+
|
|
278
|
+
with col3:
|
|
279
|
+
st.metric(
|
|
280
|
+
"Success Rate",
|
|
281
|
+
f"{conversion_rate}%",
|
|
282
|
+
f"{successful_analyses} successful"
|
|
283
|
+
if cookbooks_analysed > 0
|
|
284
|
+
else "Start migration",
|
|
285
|
+
)
|
|
286
|
+
st.caption("Successful analyses")
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def _display_quick_upload_section():
|
|
290
|
+
"""Display the quick upload section."""
|
|
291
|
+
st.subheader("Quick Start")
|
|
292
|
+
|
|
293
|
+
col1, col2 = st.columns([2, 1])
|
|
294
|
+
|
|
295
|
+
with col1:
|
|
296
|
+
uploaded_file = st.file_uploader(
|
|
297
|
+
"Upload Cookbook Archive",
|
|
298
|
+
type=["zip", "tar.gz", "tgz", "tar"],
|
|
299
|
+
help="Upload a ZIP or TAR archive containing your Chef "
|
|
300
|
+
"cookbooks for quick analysis",
|
|
301
|
+
key="dashboard_upload",
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
if uploaded_file:
|
|
305
|
+
# Store the uploaded file in session state for persistence across pages
|
|
306
|
+
st.session_state.uploaded_file_data = uploaded_file.getvalue()
|
|
307
|
+
st.session_state.uploaded_file_name = uploaded_file.name
|
|
308
|
+
st.session_state.uploaded_file_type = uploaded_file.type
|
|
309
|
+
|
|
310
|
+
st.success(f"File {uploaded_file.name} uploaded successfully!")
|
|
311
|
+
st.info(
|
|
312
|
+
"Navigate to Cookbook Analysis to process this file, "
|
|
313
|
+
"or upload another file to replace it."
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
with col2:
|
|
317
|
+
st.markdown("**Or choose your workflow:**")
|
|
318
|
+
|
|
319
|
+
# Quick actions
|
|
320
|
+
if st.button("Analyse Cookbooks", type="primary", width="stretch"):
|
|
321
|
+
st.session_state.current_page = "Cookbook Analysis"
|
|
322
|
+
st.rerun()
|
|
323
|
+
|
|
324
|
+
if st.button("Generate Migration Plan", width="stretch"):
|
|
325
|
+
st.session_state.current_page = NAV_MIGRATION_PLANNING
|
|
326
|
+
st.rerun()
|
|
327
|
+
|
|
328
|
+
if st.button(BUTTON_ANALYSE_DEPENDENCIES, width="stretch"):
|
|
329
|
+
st.session_state.current_page = NAV_DEPENDENCY_MAPPING
|
|
330
|
+
st.rerun()
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def _display_recent_activity():
|
|
334
|
+
"""Display the recent activity section."""
|
|
335
|
+
st.subheader("Recent Activity")
|
|
336
|
+
st.info(
|
|
337
|
+
"No recent migration activity. Start by uploading cookbooks "
|
|
338
|
+
"above or using the Cookbook Analysis page!"
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
# Getting started guide
|
|
342
|
+
with st.expander("How to Get Started"):
|
|
343
|
+
st.markdown("""
|
|
344
|
+
**New to SousChef? Here's how to begin:**
|
|
345
|
+
|
|
346
|
+
1. **Upload Cookbooks**: Use the uploader above or go to Cookbook Analysis
|
|
347
|
+
2. **Analyse Complexity**: Get detailed migration assessments
|
|
348
|
+
3. **Plan Migration**: Generate timelines and resource requirements
|
|
349
|
+
4. **Convert to Ansible**: Download converted playbooks
|
|
350
|
+
|
|
351
|
+
**Supported Formats:**
|
|
352
|
+
- ZIP archives (.zip)
|
|
353
|
+
- TAR archives (.tar, .tar.gz, .tgz)
|
|
354
|
+
- Directory paths (in Cookbook Analysis)
|
|
355
|
+
|
|
356
|
+
**Expected Structure:**
|
|
357
|
+
```
|
|
358
|
+
your-cookbooks/
|
|
359
|
+
├── nginx/
|
|
360
|
+
│ ├── metadata.rb
|
|
361
|
+
│ ├── recipes/
|
|
362
|
+
│ └── attributes/
|
|
363
|
+
└── apache2/
|
|
364
|
+
└── metadata.rb
|
|
365
|
+
```
|
|
366
|
+
""")
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
def show_dashboard():
|
|
370
|
+
"""Show the main dashboard with migration overview."""
|
|
371
|
+
st.header("Migration Dashboard")
|
|
372
|
+
|
|
373
|
+
# Metrics calculation
|
|
374
|
+
cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses = (
|
|
375
|
+
_calculate_dashboard_metrics()
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
# Display metrics
|
|
379
|
+
_display_dashboard_metrics(
|
|
380
|
+
cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
st.divider()
|
|
384
|
+
|
|
385
|
+
# Quick upload section
|
|
386
|
+
_display_quick_upload_section()
|
|
387
|
+
|
|
388
|
+
# Recent activity
|
|
389
|
+
_display_recent_activity()
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
def show_migration_planning():
|
|
393
|
+
"""Show migration planning interface."""
|
|
394
|
+
st.header(NAV_MIGRATION_PLANNING)
|
|
395
|
+
|
|
396
|
+
# Import assessment functions
|
|
397
|
+
from souschef.assessment import generate_migration_plan
|
|
398
|
+
|
|
399
|
+
# Migration planning wizard
|
|
400
|
+
st.markdown("""
|
|
401
|
+
Plan your Chef-to-Ansible migration with this interactive wizard.
|
|
402
|
+
Get detailed timelines, effort estimates, and risk assessments.
|
|
403
|
+
""")
|
|
404
|
+
|
|
405
|
+
# Step 1: Cookbook Selection
|
|
406
|
+
st.subheader("Step 1: Cookbook Selection")
|
|
407
|
+
|
|
408
|
+
# Check for previously analyzed cookbooks
|
|
409
|
+
uploaded_plan_context = None
|
|
410
|
+
if (
|
|
411
|
+
"analysis_cookbook_path" in st.session_state
|
|
412
|
+
and st.session_state.analysis_cookbook_path
|
|
413
|
+
):
|
|
414
|
+
uploaded_plan_context = st.session_state.analysis_cookbook_path
|
|
415
|
+
st.info(f"Using analyzed cookbooks from: {uploaded_plan_context}")
|
|
416
|
+
|
|
417
|
+
col1, col2 = st.columns([3, 1])
|
|
418
|
+
|
|
419
|
+
with col1:
|
|
420
|
+
# Default to analyzed path if available
|
|
421
|
+
default_paths = uploaded_plan_context if uploaded_plan_context else ""
|
|
422
|
+
|
|
423
|
+
cookbook_paths = st.text_area(
|
|
424
|
+
"Cookbook Paths",
|
|
425
|
+
value=default_paths,
|
|
426
|
+
placeholder="/path/to/cookbooks/nginx,/path/to/cookbooks/apache2,/path/to/cookbooks/mysql",
|
|
427
|
+
help="Enter comma-separated paths to your Chef cookbooks. If you uploaded "
|
|
428
|
+
"an archive in the Analysis tab, that path is pre-filled.",
|
|
429
|
+
height=100,
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
with col2:
|
|
433
|
+
quick_select = st.selectbox(
|
|
434
|
+
"Quick Examples",
|
|
435
|
+
["", "Single Cookbook", "Multiple Cookbooks", "Full Migration"],
|
|
436
|
+
help="Load example cookbook configurations",
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
# Load example configurations
|
|
440
|
+
if quick_select == "Single Cookbook":
|
|
441
|
+
cookbook_paths = "/path/to/cookbooks/nginx"
|
|
442
|
+
elif quick_select == "Multiple Cookbooks":
|
|
443
|
+
cookbook_paths = (
|
|
444
|
+
"/path/to/cookbooks/nginx,/path/to/cookbooks/apache2,"
|
|
445
|
+
"/path/to/cookbooks/mysql"
|
|
446
|
+
)
|
|
447
|
+
elif quick_select == "Full Migration":
|
|
448
|
+
cookbook_paths = (
|
|
449
|
+
"/path/to/cookbooks/nginx,/path/to/cookbooks/apache2,"
|
|
450
|
+
"/path/to/cookbooks/mysql,/path/to/cookbooks/postgresql,"
|
|
451
|
+
"/path/to/cookbooks/redis"
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
# Step 2: Migration Strategy
|
|
455
|
+
st.subheader("Step 2: Migration Strategy")
|
|
456
|
+
|
|
457
|
+
col1, col2 = st.columns(2)
|
|
458
|
+
|
|
459
|
+
with col1:
|
|
460
|
+
migration_strategy = st.selectbox(
|
|
461
|
+
"Migration Approach",
|
|
462
|
+
["phased", "big_bang", "parallel"],
|
|
463
|
+
help="Choose your migration strategy",
|
|
464
|
+
format_func=lambda x: {
|
|
465
|
+
"phased": "Phased Migration (Recommended)",
|
|
466
|
+
"big_bang": "Big Bang Migration",
|
|
467
|
+
"parallel": "Parallel Migration",
|
|
468
|
+
}.get(x, str(x)),
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
with col2:
|
|
472
|
+
timeline_weeks = st.slider(
|
|
473
|
+
"Timeline (Weeks)",
|
|
474
|
+
min_value=4,
|
|
475
|
+
max_value=24,
|
|
476
|
+
value=12,
|
|
477
|
+
help="Target timeline for migration completion",
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
# Strategy descriptions
|
|
481
|
+
strategy_descriptions = {
|
|
482
|
+
"phased": """
|
|
483
|
+
**Phased Migration** - Migrate cookbooks in stages based on complexity
|
|
484
|
+
and dependencies.
|
|
485
|
+
- Lower risk with incremental progress
|
|
486
|
+
- Easier rollback if issues occur
|
|
487
|
+
- Longer timeline but more controlled
|
|
488
|
+
- Recommended for most organizations
|
|
489
|
+
""",
|
|
490
|
+
"big_bang": """
|
|
491
|
+
**Big Bang Migration** - Convert all cookbooks simultaneously and deploy
|
|
492
|
+
at once.
|
|
493
|
+
- Faster overall timeline
|
|
494
|
+
- Higher risk and coordination required
|
|
495
|
+
- Requires comprehensive testing
|
|
496
|
+
- Best for small, well-understood environments
|
|
497
|
+
""",
|
|
498
|
+
"parallel": """
|
|
499
|
+
**Parallel Migration** - Run Chef and Ansible side-by-side during transition.
|
|
500
|
+
- Zero downtime possible
|
|
501
|
+
- Most complex to manage
|
|
502
|
+
- Requires dual maintenance
|
|
503
|
+
- Best for critical production systems
|
|
504
|
+
""",
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
with st.expander("Strategy Details"):
|
|
508
|
+
st.markdown(strategy_descriptions.get(migration_strategy, ""))
|
|
509
|
+
|
|
510
|
+
# Step 3: Generate Plan
|
|
511
|
+
st.subheader("Step 3: Generate Migration Plan")
|
|
512
|
+
|
|
513
|
+
if st.button("Generate Migration Plan", type="primary", width="stretch"):
|
|
514
|
+
if not cookbook_paths.strip():
|
|
515
|
+
st.error("Please enter cookbook paths to generate a migration plan.")
|
|
516
|
+
return
|
|
517
|
+
|
|
518
|
+
# Create progress tracker
|
|
519
|
+
progress_tracker = ProgressTracker(
|
|
520
|
+
total_steps=7, description="Generating migration plan..."
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
try:
|
|
524
|
+
progress_tracker.update(1, "Scanning cookbook directories...")
|
|
525
|
+
|
|
526
|
+
# Generate migration plan
|
|
527
|
+
plan_result = generate_migration_plan(
|
|
528
|
+
cookbook_paths.strip(), migration_strategy, timeline_weeks
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
progress_tracker.update(2, "Analyzing cookbook complexity...")
|
|
532
|
+
progress_tracker.update(3, "Assessing migration risks...")
|
|
533
|
+
progress_tracker.update(4, "Calculating resource requirements...")
|
|
534
|
+
progress_tracker.update(5, "Generating timeline estimates...")
|
|
535
|
+
progress_tracker.update(6, "Creating migration phases...")
|
|
536
|
+
|
|
537
|
+
# Store results in session state for persistence
|
|
538
|
+
st.session_state.migration_plan = plan_result
|
|
539
|
+
st.session_state.cookbook_paths = cookbook_paths.strip()
|
|
540
|
+
st.session_state.strategy = migration_strategy
|
|
541
|
+
st.session_state.timeline = timeline_weeks
|
|
542
|
+
|
|
543
|
+
progress_tracker.complete("Migration plan generated!")
|
|
544
|
+
st.success("Migration plan generated successfully!")
|
|
545
|
+
st.rerun()
|
|
546
|
+
|
|
547
|
+
except Exception as e:
|
|
548
|
+
progress_tracker.close()
|
|
549
|
+
st.error(f"Error generating migration plan: {e}")
|
|
550
|
+
return
|
|
551
|
+
|
|
552
|
+
# Display results if available
|
|
553
|
+
if "migration_plan" in st.session_state:
|
|
554
|
+
display_migration_plan_results()
|
|
555
|
+
|
|
556
|
+
|
|
557
|
+
def _display_migration_summary_metrics(cookbook_paths, strategy, timeline):
|
|
558
|
+
"""Display migration overview summary metrics."""
|
|
559
|
+
st.subheader("Migration Overview")
|
|
560
|
+
|
|
561
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
562
|
+
|
|
563
|
+
with col1:
|
|
564
|
+
cookbook_count = len(cookbook_paths.split(","))
|
|
565
|
+
st.metric("Cookbooks", cookbook_count)
|
|
566
|
+
|
|
567
|
+
with col2:
|
|
568
|
+
st.metric("Strategy", strategy.replace("_", " ").title())
|
|
569
|
+
|
|
570
|
+
with col3:
|
|
571
|
+
st.metric("Timeline", f"{timeline} weeks")
|
|
572
|
+
|
|
573
|
+
with col4:
|
|
574
|
+
st.metric("Status", "Plan Generated")
|
|
575
|
+
|
|
576
|
+
|
|
577
|
+
def _display_migration_plan_details(plan_result):
|
|
578
|
+
"""Display the detailed migration plan sections."""
|
|
579
|
+
st.subheader("Migration Plan Details")
|
|
580
|
+
|
|
581
|
+
# Split the plan into sections and display
|
|
582
|
+
plan_sections = plan_result.split("\n## ")
|
|
583
|
+
|
|
584
|
+
for section in plan_sections:
|
|
585
|
+
if section.strip():
|
|
586
|
+
if not section.startswith("#"):
|
|
587
|
+
section = "## " + section
|
|
588
|
+
|
|
589
|
+
# Clean up section headers
|
|
590
|
+
section = section.replace("## Executive Summary", "### Executive Summary")
|
|
591
|
+
section = section.replace("## Migration Phases", "### Migration Phases")
|
|
592
|
+
section = section.replace("## Timeline", "### Timeline")
|
|
593
|
+
section = section.replace("## Team Requirements", "### Team Requirements")
|
|
594
|
+
|
|
595
|
+
st.markdown(section)
|
|
596
|
+
|
|
597
|
+
|
|
598
|
+
def _display_migration_action_buttons(cookbook_paths):
|
|
599
|
+
"""Display action buttons for next steps."""
|
|
600
|
+
st.subheader("Next Steps")
|
|
601
|
+
|
|
602
|
+
col1, col2, col3 = st.columns(3)
|
|
603
|
+
|
|
604
|
+
with col1:
|
|
605
|
+
if st.button("📊 Generate Detailed Report", width="stretch"):
|
|
606
|
+
with st.spinner("Generating detailed migration report..."):
|
|
607
|
+
try:
|
|
608
|
+
from souschef.assessment import generate_migration_report
|
|
609
|
+
|
|
610
|
+
report = generate_migration_report(
|
|
611
|
+
"assessment_complete", "executive", "yes"
|
|
612
|
+
)
|
|
613
|
+
st.session_state.detailed_report = report
|
|
614
|
+
st.success("Detailed report generated!")
|
|
615
|
+
except Exception as e:
|
|
616
|
+
st.error(f"Error generating report: {e}")
|
|
617
|
+
|
|
618
|
+
with col2:
|
|
619
|
+
if st.button("🔍 Analyse Dependencies", width="stretch"):
|
|
620
|
+
if len(cookbook_paths.split(",")) == 1:
|
|
621
|
+
# Single cookbook dependency analysis
|
|
622
|
+
cookbook_path = cookbook_paths.split(",")[0].strip()
|
|
623
|
+
with st.spinner(f"Analysing dependencies for {cookbook_path}..."):
|
|
624
|
+
try:
|
|
625
|
+
from souschef.assessment import analyse_cookbook_dependencies
|
|
626
|
+
|
|
627
|
+
dep_analysis = analyse_cookbook_dependencies(cookbook_path)
|
|
628
|
+
st.session_state.dep_analysis = dep_analysis
|
|
629
|
+
st.success("Dependency analysis complete!")
|
|
630
|
+
except Exception as e:
|
|
631
|
+
st.error(f"Error analyzing dependencies: {e}")
|
|
632
|
+
else:
|
|
633
|
+
st.info(
|
|
634
|
+
"Dependency analysis is optimised for single cookbooks. "
|
|
635
|
+
"Select one cookbook path for detailed analysis."
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
with col3:
|
|
639
|
+
if st.button("📥 Export Plan", width="stretch"):
|
|
640
|
+
# Create downloadable plan
|
|
641
|
+
plan_content = f"""# Chef to Ansible Migration Plan
|
|
642
|
+
Generated: {st.session_state.get("timestamp", "Unknown")}
|
|
643
|
+
|
|
644
|
+
## Configuration
|
|
645
|
+
- Cookbook Paths: {cookbook_paths}
|
|
646
|
+
- Strategy: {st.session_state.strategy}
|
|
647
|
+
- Timeline: {st.session_state.timeline} weeks
|
|
648
|
+
|
|
649
|
+
## Migration Plan
|
|
650
|
+
{st.session_state.migration_plan}
|
|
651
|
+
"""
|
|
652
|
+
|
|
653
|
+
st.download_button(
|
|
654
|
+
label="Download Migration Plan",
|
|
655
|
+
data=plan_content,
|
|
656
|
+
file_name="migration_plan.md",
|
|
657
|
+
mime=MIME_TEXT_MARKDOWN,
|
|
658
|
+
help="Download the complete migration plan as Markdown",
|
|
659
|
+
)
|
|
660
|
+
|
|
661
|
+
|
|
662
|
+
def _display_additional_reports():
|
|
663
|
+
"""Display detailed report and dependency analysis if available."""
|
|
664
|
+
# Display detailed report if generated
|
|
665
|
+
if "detailed_report" in st.session_state:
|
|
666
|
+
with st.expander("📊 Detailed Migration Report"):
|
|
667
|
+
st.markdown(st.session_state.detailed_report)
|
|
668
|
+
|
|
669
|
+
# Display dependency analysis if generated
|
|
670
|
+
if "dep_analysis" in st.session_state:
|
|
671
|
+
with st.expander("🔍 Dependency Analysis"):
|
|
672
|
+
st.markdown(st.session_state.dep_analysis)
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
def display_migration_plan_results():
|
|
676
|
+
"""Display the generated migration plan results."""
|
|
677
|
+
plan_result = st.session_state.migration_plan
|
|
678
|
+
cookbook_paths = st.session_state.cookbook_paths
|
|
679
|
+
strategy = st.session_state.strategy
|
|
680
|
+
timeline = st.session_state.timeline
|
|
681
|
+
|
|
682
|
+
_display_migration_summary_metrics(cookbook_paths, strategy, timeline)
|
|
683
|
+
_display_migration_plan_details(plan_result)
|
|
684
|
+
_display_migration_action_buttons(cookbook_paths)
|
|
685
|
+
_display_additional_reports()
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
def show_dependency_mapping():
|
|
689
|
+
"""Show dependency mapping visualization."""
|
|
690
|
+
st.header(NAV_DEPENDENCY_MAPPING)
|
|
691
|
+
|
|
692
|
+
# Import assessment functions
|
|
693
|
+
from souschef.assessment import analyse_cookbook_dependencies
|
|
694
|
+
|
|
695
|
+
st.markdown("""
|
|
696
|
+
Visualise and analyse cookbook dependencies to understand migration order
|
|
697
|
+
and identify potential circular dependencies.
|
|
698
|
+
""")
|
|
699
|
+
|
|
700
|
+
# Input method selection
|
|
701
|
+
input_method = st.radio(
|
|
702
|
+
"Choose Input Method",
|
|
703
|
+
["Upload Archive", INPUT_METHOD_DIRECTORY_PATH],
|
|
704
|
+
horizontal=True,
|
|
705
|
+
help="Select how to provide cookbooks for dependency analysis",
|
|
706
|
+
key="dep_input_method",
|
|
707
|
+
)
|
|
708
|
+
|
|
709
|
+
cookbook_path = None
|
|
710
|
+
uploaded_file = None
|
|
711
|
+
|
|
712
|
+
if input_method == INPUT_METHOD_DIRECTORY_PATH:
|
|
713
|
+
cookbook_path = st.text_input(
|
|
714
|
+
"Cookbook Directory Path",
|
|
715
|
+
placeholder="/path/to/your/cookbooks",
|
|
716
|
+
help="Enter the path to your cookbooks directory for dependency analysis",
|
|
717
|
+
)
|
|
718
|
+
else:
|
|
719
|
+
uploaded_file = st.file_uploader(
|
|
720
|
+
"Upload Cookbook Archive",
|
|
721
|
+
type=["zip", "tar.gz", "tgz", "tar"],
|
|
722
|
+
help="Upload a ZIP or TAR archive containing your Chef cookbooks",
|
|
723
|
+
key="dep_archive_upload",
|
|
724
|
+
)
|
|
725
|
+
if uploaded_file:
|
|
726
|
+
try:
|
|
727
|
+
with st.spinner("Extracting archive..."):
|
|
728
|
+
# Import the extract function from cookbook_analysis
|
|
729
|
+
from souschef.ui.pages.cookbook_analysis import extract_archive
|
|
730
|
+
|
|
731
|
+
cookbook_path = str(extract_archive(uploaded_file))
|
|
732
|
+
st.success("Archive extracted successfully")
|
|
733
|
+
except Exception as e:
|
|
734
|
+
st.error(f"Failed to extract archive: {e}")
|
|
735
|
+
return
|
|
736
|
+
|
|
737
|
+
# Analysis options
|
|
738
|
+
col1, col2 = st.columns(2)
|
|
739
|
+
|
|
740
|
+
with col1:
|
|
741
|
+
dependency_depth = st.selectbox(
|
|
742
|
+
"Analysis Depth",
|
|
743
|
+
["direct", "transitive", "full"],
|
|
744
|
+
help="How deep to analyse dependencies",
|
|
745
|
+
format_func=lambda x: {
|
|
746
|
+
"direct": "Direct Dependencies Only",
|
|
747
|
+
"transitive": "Include Transitive Dependencies",
|
|
748
|
+
"full": "Full Dependency Graph",
|
|
749
|
+
}.get(x, str(x)),
|
|
750
|
+
)
|
|
751
|
+
|
|
752
|
+
with col2:
|
|
753
|
+
visualization_type = st.selectbox(
|
|
754
|
+
"Visualization",
|
|
755
|
+
["text", "graph", "interactive"],
|
|
756
|
+
help="How to display dependency information",
|
|
757
|
+
format_func=lambda x: {
|
|
758
|
+
"text": "Text Summary",
|
|
759
|
+
"graph": "Static Graph View",
|
|
760
|
+
"interactive": "Interactive Graph",
|
|
761
|
+
}.get(x, str(x)),
|
|
762
|
+
)
|
|
763
|
+
|
|
764
|
+
# Analysis button
|
|
765
|
+
if st.button(BUTTON_ANALYSE_DEPENDENCIES, type="primary", width="stretch"):
|
|
766
|
+
if not cookbook_path or not cookbook_path.strip():
|
|
767
|
+
st.error("Please enter a cookbook directory path.")
|
|
768
|
+
return
|
|
769
|
+
|
|
770
|
+
# Create progress tracker
|
|
771
|
+
progress_tracker = ProgressTracker(
|
|
772
|
+
total_steps=5, description="Analysing cookbook dependencies..."
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
try:
|
|
776
|
+
progress_tracker.update(1, "Scanning cookbook directory...")
|
|
777
|
+
|
|
778
|
+
# Analyse dependencies
|
|
779
|
+
analysis_result = analyse_cookbook_dependencies(
|
|
780
|
+
cookbook_path.strip(), dependency_depth
|
|
781
|
+
)
|
|
782
|
+
|
|
783
|
+
progress_tracker.update(2, "Parsing dependency relationships...")
|
|
784
|
+
progress_tracker.update(3, "Detecting circular dependencies...")
|
|
785
|
+
progress_tracker.update(4, "Generating migration recommendations...")
|
|
786
|
+
|
|
787
|
+
# Store results
|
|
788
|
+
st.session_state.dep_analysis_result = analysis_result
|
|
789
|
+
st.session_state.dep_cookbook_path = cookbook_path.strip()
|
|
790
|
+
st.session_state.dep_depth = dependency_depth
|
|
791
|
+
st.session_state.dep_viz_type = visualization_type
|
|
792
|
+
|
|
793
|
+
progress_tracker.complete("Dependency analysis completed!")
|
|
794
|
+
st.success("Analysis completed successfully!")
|
|
795
|
+
st.rerun()
|
|
796
|
+
|
|
797
|
+
except Exception as e:
|
|
798
|
+
progress_tracker.close()
|
|
799
|
+
st.error(f"Error analyzing dependencies: {e}")
|
|
800
|
+
return
|
|
801
|
+
|
|
802
|
+
# Display results if available
|
|
803
|
+
if "dep_analysis_result" in st.session_state:
|
|
804
|
+
display_dependency_analysis_results()
|
|
805
|
+
|
|
806
|
+
|
|
807
|
+
def _setup_dependency_mapping_ui():
|
|
808
|
+
"""Set up the dependency mapping UI header and description."""
|
|
809
|
+
st.header(NAV_DEPENDENCY_MAPPING)
|
|
810
|
+
|
|
811
|
+
st.markdown("""
|
|
812
|
+
Visualise and analyse cookbook dependencies to understand migration order
|
|
813
|
+
and identify potential circular dependencies.
|
|
814
|
+
""")
|
|
815
|
+
|
|
816
|
+
|
|
817
|
+
def _get_dependency_mapping_inputs():
|
|
818
|
+
"""Collect user inputs for dependency analysis."""
|
|
819
|
+
# Cookbook path input
|
|
820
|
+
cookbook_path = st.text_input(
|
|
821
|
+
"Cookbook Directory Path",
|
|
822
|
+
placeholder="/path/to/your/cookbooks",
|
|
823
|
+
help="Enter the path to your cookbooks directory for dependency analysis",
|
|
824
|
+
)
|
|
825
|
+
|
|
826
|
+
# Analysis options
|
|
827
|
+
col1, col2 = st.columns(2)
|
|
828
|
+
|
|
829
|
+
with col1:
|
|
830
|
+
dependency_depth = st.selectbox(
|
|
831
|
+
"Analysis Depth",
|
|
832
|
+
["direct", "transitive", "full"],
|
|
833
|
+
help="How deep to analyse dependencies",
|
|
834
|
+
format_func=lambda x: {
|
|
835
|
+
"direct": "Direct Dependencies Only",
|
|
836
|
+
"transitive": "Include Transitive Dependencies",
|
|
837
|
+
"full": "Full Dependency Graph",
|
|
838
|
+
}.get(x, str(x)),
|
|
839
|
+
)
|
|
840
|
+
|
|
841
|
+
with col2:
|
|
842
|
+
visualization_type = st.selectbox(
|
|
843
|
+
"Visualization",
|
|
844
|
+
["text", "graph", "interactive"],
|
|
845
|
+
help="How to display dependency information",
|
|
846
|
+
format_func=lambda x: {
|
|
847
|
+
"text": "Text Summary",
|
|
848
|
+
"graph": "Static Graph View",
|
|
849
|
+
"interactive": "Interactive Graph",
|
|
850
|
+
}.get(x, str(x)),
|
|
851
|
+
)
|
|
852
|
+
|
|
853
|
+
return cookbook_path, dependency_depth, visualization_type
|
|
854
|
+
|
|
855
|
+
|
|
856
|
+
def _handle_dependency_analysis_execution(
|
|
857
|
+
cookbook_path, dependency_depth, visualization_type
|
|
858
|
+
):
|
|
859
|
+
"""Handle the dependency analysis execution when button is clicked."""
|
|
860
|
+
# Analysis button
|
|
861
|
+
if st.button(BUTTON_ANALYSE_DEPENDENCIES, type="primary", width="stretch"):
|
|
862
|
+
if not cookbook_path or not cookbook_path.strip():
|
|
863
|
+
st.error("Please enter a cookbook directory path.")
|
|
864
|
+
return
|
|
865
|
+
|
|
866
|
+
_perform_dependency_analysis(
|
|
867
|
+
cookbook_path.strip(), dependency_depth, visualization_type
|
|
868
|
+
)
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
def _perform_dependency_analysis(cookbook_path, dependency_depth, visualization_type):
|
|
872
|
+
"""Perform the actual dependency analysis."""
|
|
873
|
+
# Import assessment functions
|
|
874
|
+
from souschef.assessment import analyse_cookbook_dependencies
|
|
875
|
+
|
|
876
|
+
# Create progress tracker
|
|
877
|
+
progress_tracker = ProgressTracker(
|
|
878
|
+
total_steps=5, description="Analysing cookbook dependencies..."
|
|
879
|
+
)
|
|
880
|
+
|
|
881
|
+
try:
|
|
882
|
+
progress_tracker.update(1, "Scanning cookbook directory...")
|
|
883
|
+
|
|
884
|
+
# Analyse dependencies
|
|
885
|
+
analysis_result = analyse_cookbook_dependencies(cookbook_path, dependency_depth)
|
|
886
|
+
|
|
887
|
+
progress_tracker.update(2, "Parsing dependency relationships...")
|
|
888
|
+
progress_tracker.update(3, "Detecting circular dependencies...")
|
|
889
|
+
progress_tracker.update(4, "Generating migration recommendations...")
|
|
890
|
+
|
|
891
|
+
# Store results
|
|
892
|
+
st.session_state.dep_analysis_result = analysis_result
|
|
893
|
+
st.session_state.dep_cookbook_path = cookbook_path
|
|
894
|
+
st.session_state.dep_depth = dependency_depth
|
|
895
|
+
st.session_state.dep_viz_type = visualization_type
|
|
896
|
+
|
|
897
|
+
progress_tracker.complete("Dependency analysis completed!")
|
|
898
|
+
st.success("Analysis completed successfully!")
|
|
899
|
+
st.rerun()
|
|
900
|
+
|
|
901
|
+
except Exception as e:
|
|
902
|
+
progress_tracker.close()
|
|
903
|
+
st.error(f"Error analyzing dependencies: {e}")
|
|
904
|
+
|
|
905
|
+
|
|
906
|
+
def _display_dependency_analysis_results_if_available():
|
|
907
|
+
"""Display dependency analysis results if they exist in session state."""
|
|
908
|
+
# Display results if available
|
|
909
|
+
if "dep_analysis_result" in st.session_state:
|
|
910
|
+
display_dependency_analysis_results()
|
|
911
|
+
|
|
912
|
+
|
|
913
|
+
def _extract_dependency_relationships(lines):
|
|
914
|
+
"""Extract dependency relationships from analysis lines."""
|
|
915
|
+
dependencies = {}
|
|
916
|
+
current_section = None
|
|
917
|
+
|
|
918
|
+
for line in lines:
|
|
919
|
+
line = line.strip()
|
|
920
|
+
if "Direct Dependencies:" in line:
|
|
921
|
+
current_section = "direct"
|
|
922
|
+
elif "Transitive Dependencies:" in line:
|
|
923
|
+
current_section = "transitive"
|
|
924
|
+
elif line.startswith("- ") and current_section == "direct":
|
|
925
|
+
# Regular dependencies
|
|
926
|
+
dep_text = line[2:].strip()
|
|
927
|
+
if ":" in dep_text:
|
|
928
|
+
parts = dep_text.split(":", 1)
|
|
929
|
+
cookbook = parts[0].strip()
|
|
930
|
+
deps = parts[1].strip()
|
|
931
|
+
if deps and deps != "None":
|
|
932
|
+
dep_list = [d.strip() for d in deps.split(",")]
|
|
933
|
+
dependencies[cookbook] = dep_list
|
|
934
|
+
|
|
935
|
+
return dependencies
|
|
936
|
+
|
|
937
|
+
|
|
938
|
+
def _extract_circular_and_community_deps(lines):
|
|
939
|
+
"""Extract circular dependencies and community cookbooks."""
|
|
940
|
+
circular_deps: list[tuple[str, str]] = []
|
|
941
|
+
community_cookbooks: list[str] = []
|
|
942
|
+
current_section = None
|
|
943
|
+
|
|
944
|
+
for line in lines:
|
|
945
|
+
current_section = _update_current_section(line, current_section)
|
|
946
|
+
if _is_list_item(line) and current_section:
|
|
947
|
+
_process_list_item(
|
|
948
|
+
line, current_section, circular_deps, community_cookbooks
|
|
949
|
+
)
|
|
950
|
+
|
|
951
|
+
return circular_deps, community_cookbooks
|
|
952
|
+
|
|
953
|
+
|
|
954
|
+
def _update_current_section(line, current_section):
|
|
955
|
+
"""Update the current section based on the line content."""
|
|
956
|
+
line = line.strip()
|
|
957
|
+
if "Circular Dependencies:" in line:
|
|
958
|
+
return "circular"
|
|
959
|
+
elif SECTION_COMMUNITY_COOKBOOKS_HEADER in line:
|
|
960
|
+
return "community"
|
|
961
|
+
return current_section
|
|
962
|
+
|
|
963
|
+
|
|
964
|
+
def _is_list_item(line):
|
|
965
|
+
"""Check if the line is a list item."""
|
|
966
|
+
return line.strip().startswith("- ")
|
|
967
|
+
|
|
968
|
+
|
|
969
|
+
def _process_list_item(line, current_section, circular_deps, community_cookbooks):
|
|
970
|
+
"""Process a list item based on the current section."""
|
|
971
|
+
if current_section == "circular":
|
|
972
|
+
_process_circular_dependency_item(line, circular_deps)
|
|
973
|
+
elif current_section == "community":
|
|
974
|
+
_process_community_cookbook_item(line, community_cookbooks)
|
|
975
|
+
|
|
976
|
+
|
|
977
|
+
def _process_circular_dependency_item(line, circular_deps):
|
|
978
|
+
"""Process a circular dependency list item."""
|
|
979
|
+
dep_text = line[2:].strip()
|
|
980
|
+
if "->" in dep_text:
|
|
981
|
+
parts = dep_text.split("->")
|
|
982
|
+
if len(parts) >= 2:
|
|
983
|
+
circular_deps.append((parts[0].strip(), parts[1].strip()))
|
|
984
|
+
|
|
985
|
+
|
|
986
|
+
def _process_community_cookbook_item(line, community_cookbooks):
|
|
987
|
+
"""Process a community cookbook list item."""
|
|
988
|
+
cookbook = line[2:].strip()
|
|
989
|
+
if cookbook:
|
|
990
|
+
community_cookbooks.append(cookbook)
|
|
991
|
+
|
|
992
|
+
|
|
993
|
+
def _parse_dependency_analysis(analysis_result):
|
|
994
|
+
"""Parse dependency analysis result into structured data."""
|
|
995
|
+
lines = analysis_result.split("\n")
|
|
996
|
+
|
|
997
|
+
dependencies = _extract_dependency_relationships(lines)
|
|
998
|
+
circular_deps, community_cookbooks = _extract_circular_and_community_deps(lines)
|
|
999
|
+
|
|
1000
|
+
return dependencies, circular_deps, community_cookbooks
|
|
1001
|
+
|
|
1002
|
+
|
|
1003
|
+
def _create_networkx_graph(dependencies, circular_deps, community_cookbooks):
|
|
1004
|
+
"""Create NetworkX graph from dependency data."""
|
|
1005
|
+
import networkx as nx
|
|
1006
|
+
|
|
1007
|
+
graph: nx.DiGraph = nx.DiGraph()
|
|
1008
|
+
|
|
1009
|
+
# Add nodes and edges
|
|
1010
|
+
for cookbook, deps in dependencies.items():
|
|
1011
|
+
graph.add_node(cookbook, node_type="cookbook")
|
|
1012
|
+
for dep in deps:
|
|
1013
|
+
graph.add_node(dep, node_type="dependency")
|
|
1014
|
+
graph.add_edge(cookbook, dep)
|
|
1015
|
+
|
|
1016
|
+
# Add circular dependency edges with different styling
|
|
1017
|
+
for source, target in circular_deps:
|
|
1018
|
+
graph.add_edge(source, target, circular=True)
|
|
1019
|
+
|
|
1020
|
+
# Mark community cookbooks
|
|
1021
|
+
for cookbook in community_cookbooks:
|
|
1022
|
+
if cookbook in graph.nodes:
|
|
1023
|
+
graph.nodes[cookbook]["community"] = True
|
|
1024
|
+
|
|
1025
|
+
return graph
|
|
1026
|
+
|
|
1027
|
+
|
|
1028
|
+
def _calculate_graph_positions(graph, layout_algorithm):
|
|
1029
|
+
"""
|
|
1030
|
+
Calculate positions for graph nodes using the specified layout algorithm.
|
|
1031
|
+
|
|
1032
|
+
Args:
|
|
1033
|
+
graph: NetworkX graph object
|
|
1034
|
+
layout_algorithm: String specifying the layout algorithm to use
|
|
1035
|
+
|
|
1036
|
+
Returns:
|
|
1037
|
+
tuple: (positions_dict, algorithm_used)
|
|
1038
|
+
|
|
1039
|
+
"""
|
|
1040
|
+
# Choose layout algorithm based on graph size and user preference
|
|
1041
|
+
num_nodes = len(graph.nodes)
|
|
1042
|
+
if layout_algorithm == "auto":
|
|
1043
|
+
layout_algorithm = _choose_auto_layout_algorithm(num_nodes)
|
|
1044
|
+
|
|
1045
|
+
# Calculate positions using selected layout algorithm
|
|
1046
|
+
pos = _calculate_positions_with_algorithm(graph, layout_algorithm)
|
|
1047
|
+
|
|
1048
|
+
return pos, layout_algorithm
|
|
1049
|
+
|
|
1050
|
+
|
|
1051
|
+
def _choose_auto_layout_algorithm(num_nodes):
|
|
1052
|
+
"""Choose the best layout algorithm based on graph size."""
|
|
1053
|
+
if num_nodes <= 10:
|
|
1054
|
+
return "circular"
|
|
1055
|
+
elif num_nodes <= 50:
|
|
1056
|
+
return "spring"
|
|
1057
|
+
else:
|
|
1058
|
+
return "kamada_kawai"
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
def _calculate_positions_with_algorithm(graph, layout_algorithm):
|
|
1062
|
+
"""Calculate node positions using the specified algorithm."""
|
|
1063
|
+
import networkx as nx
|
|
1064
|
+
|
|
1065
|
+
try:
|
|
1066
|
+
if layout_algorithm == "spring":
|
|
1067
|
+
return nx.spring_layout(graph, k=2, iterations=50, seed=42)
|
|
1068
|
+
elif layout_algorithm == "circular":
|
|
1069
|
+
return nx.circular_layout(graph)
|
|
1070
|
+
elif layout_algorithm == "kamada_kawai":
|
|
1071
|
+
return nx.kamada_kawai_layout(graph)
|
|
1072
|
+
elif layout_algorithm == "shell":
|
|
1073
|
+
return _calculate_shell_layout_positions(graph)
|
|
1074
|
+
elif layout_algorithm == "random":
|
|
1075
|
+
return nx.random_layout(graph, seed=42)
|
|
1076
|
+
elif layout_algorithm == "spectral":
|
|
1077
|
+
return nx.spectral_layout(graph)
|
|
1078
|
+
elif layout_algorithm == "force_directed":
|
|
1079
|
+
return nx.spring_layout(graph, k=3, iterations=100, seed=42, scale=2)
|
|
1080
|
+
else:
|
|
1081
|
+
return nx.spring_layout(graph, k=2, iterations=50, seed=42)
|
|
1082
|
+
except Exception as e:
|
|
1083
|
+
# Fallback to spring layout if algorithm fails
|
|
1084
|
+
st.warning(
|
|
1085
|
+
f"Layout algorithm '{layout_algorithm}' failed, using spring layout: {e}"
|
|
1086
|
+
)
|
|
1087
|
+
return nx.spring_layout(graph, k=2, iterations=50, seed=42)
|
|
1088
|
+
|
|
1089
|
+
|
|
1090
|
+
def _calculate_shell_layout_positions(graph):
|
|
1091
|
+
"""Calculate shell layout positions for hierarchical organization."""
|
|
1092
|
+
import networkx as nx
|
|
1093
|
+
|
|
1094
|
+
# Identify leaf nodes (no outgoing edges)
|
|
1095
|
+
leaf_nodes = [n for n in graph.nodes() if graph.out_degree(n) == 0]
|
|
1096
|
+
# Identify root nodes (no incoming edges)
|
|
1097
|
+
root_nodes = [n for n in graph.nodes() if graph.in_degree(n) == 0]
|
|
1098
|
+
# Middle nodes
|
|
1099
|
+
middle_nodes = [
|
|
1100
|
+
n for n in graph.nodes() if n not in leaf_nodes and n not in root_nodes
|
|
1101
|
+
]
|
|
1102
|
+
|
|
1103
|
+
shells = []
|
|
1104
|
+
if root_nodes:
|
|
1105
|
+
shells.append(root_nodes)
|
|
1106
|
+
if middle_nodes:
|
|
1107
|
+
shells.append(middle_nodes)
|
|
1108
|
+
if leaf_nodes:
|
|
1109
|
+
shells.append(leaf_nodes)
|
|
1110
|
+
|
|
1111
|
+
if shells:
|
|
1112
|
+
return nx.shell_layout(graph, shells)
|
|
1113
|
+
else:
|
|
1114
|
+
return nx.spring_layout(graph, k=2, iterations=50, seed=42)
|
|
1115
|
+
|
|
1116
|
+
|
|
1117
|
+
def _create_plotly_edge_traces(graph, pos):
|
|
1118
|
+
"""Create edge traces for Plotly graph."""
|
|
1119
|
+
import plotly.graph_objects as go # type: ignore[import-untyped]
|
|
1120
|
+
|
|
1121
|
+
edge_traces = []
|
|
1122
|
+
|
|
1123
|
+
# Regular edges
|
|
1124
|
+
edge_x = []
|
|
1125
|
+
edge_y = []
|
|
1126
|
+
for edge in graph.edges():
|
|
1127
|
+
if not graph.edges[edge].get("circular", False):
|
|
1128
|
+
x0, y0 = pos[edge[0]]
|
|
1129
|
+
x1, y1 = pos[edge[1]]
|
|
1130
|
+
edge_x.extend([x0, x1, None])
|
|
1131
|
+
edge_y.extend([y0, y1, None])
|
|
1132
|
+
|
|
1133
|
+
if edge_x:
|
|
1134
|
+
edge_traces.append(
|
|
1135
|
+
go.Scatter(
|
|
1136
|
+
x=edge_x,
|
|
1137
|
+
y=edge_y,
|
|
1138
|
+
line={"width": 2, "color": "#888"},
|
|
1139
|
+
hoverinfo="none",
|
|
1140
|
+
mode="lines",
|
|
1141
|
+
name="Dependencies",
|
|
1142
|
+
)
|
|
1143
|
+
)
|
|
1144
|
+
|
|
1145
|
+
# Circular dependency edges (red)
|
|
1146
|
+
circ_edge_x = []
|
|
1147
|
+
circ_edge_y = []
|
|
1148
|
+
for edge in graph.edges():
|
|
1149
|
+
if graph.edges[edge].get("circular", False):
|
|
1150
|
+
x0, y0 = pos[edge[0]]
|
|
1151
|
+
x1, y1 = pos[edge[1]]
|
|
1152
|
+
circ_edge_x.extend([x0, x1, None])
|
|
1153
|
+
circ_edge_y.extend([y0, y1, None])
|
|
1154
|
+
|
|
1155
|
+
if circ_edge_x:
|
|
1156
|
+
edge_traces.append(
|
|
1157
|
+
go.Scatter(
|
|
1158
|
+
x=circ_edge_x,
|
|
1159
|
+
y=circ_edge_y,
|
|
1160
|
+
line={"width": 3, "color": "red"},
|
|
1161
|
+
hoverinfo="none",
|
|
1162
|
+
mode="lines",
|
|
1163
|
+
name=SECTION_CIRCULAR_DEPENDENCIES,
|
|
1164
|
+
)
|
|
1165
|
+
)
|
|
1166
|
+
|
|
1167
|
+
return edge_traces
|
|
1168
|
+
|
|
1169
|
+
|
|
1170
|
+
def _create_plotly_node_trace(graph, pos):
|
|
1171
|
+
"""Create node trace for Plotly graph."""
|
|
1172
|
+
import plotly.graph_objects as go
|
|
1173
|
+
|
|
1174
|
+
node_x = []
|
|
1175
|
+
node_y = []
|
|
1176
|
+
node_text = []
|
|
1177
|
+
node_colors = []
|
|
1178
|
+
node_sizes = []
|
|
1179
|
+
|
|
1180
|
+
for node in graph.nodes():
|
|
1181
|
+
x, y = pos[node]
|
|
1182
|
+
node_x.append(x)
|
|
1183
|
+
node_y.append(y)
|
|
1184
|
+
node_text.append(node)
|
|
1185
|
+
|
|
1186
|
+
# Dynamic node sizing based on connectivity
|
|
1187
|
+
degree = graph.degree(node)
|
|
1188
|
+
node_sizes.append(max(15, min(30, 15 + degree * 2)))
|
|
1189
|
+
|
|
1190
|
+
# Color coding
|
|
1191
|
+
if graph.nodes[node].get("community", False):
|
|
1192
|
+
node_colors.append("lightgreen") # Community cookbooks
|
|
1193
|
+
elif any(
|
|
1194
|
+
graph.edges[edge].get("circular", False)
|
|
1195
|
+
for edge in graph.in_edges(node)
|
|
1196
|
+
if edge[1] == node
|
|
1197
|
+
):
|
|
1198
|
+
node_colors.append("red") # Involved in circular deps
|
|
1199
|
+
elif graph.in_degree(node) > 0:
|
|
1200
|
+
node_colors.append("lightblue") # Has dependencies
|
|
1201
|
+
else:
|
|
1202
|
+
node_colors.append("lightgray") # Leaf dependencies
|
|
1203
|
+
|
|
1204
|
+
node_trace = go.Scatter(
|
|
1205
|
+
x=node_x,
|
|
1206
|
+
y=node_y,
|
|
1207
|
+
mode="markers+text",
|
|
1208
|
+
hoverinfo="text",
|
|
1209
|
+
text=node_text,
|
|
1210
|
+
textposition="top center",
|
|
1211
|
+
marker={
|
|
1212
|
+
"size": node_sizes,
|
|
1213
|
+
"color": node_colors,
|
|
1214
|
+
"line_width": 2,
|
|
1215
|
+
"line_color": "darkgray",
|
|
1216
|
+
},
|
|
1217
|
+
name="Cookbooks",
|
|
1218
|
+
)
|
|
1219
|
+
|
|
1220
|
+
return node_trace
|
|
1221
|
+
|
|
1222
|
+
|
|
1223
|
+
def _create_plotly_figure_layout(num_nodes, layout_algorithm):
|
|
1224
|
+
"""Create Plotly figure layout."""
|
|
1225
|
+
import plotly.graph_objects as go
|
|
1226
|
+
|
|
1227
|
+
layout: go.Layout = go.Layout(
|
|
1228
|
+
title=f"Cookbook Dependency Graph ({num_nodes} nodes, "
|
|
1229
|
+
f"{layout_algorithm} layout)",
|
|
1230
|
+
titlefont_size=16,
|
|
1231
|
+
showlegend=True,
|
|
1232
|
+
hovermode="closest",
|
|
1233
|
+
margin={"b": 20, "l": 5, "r": 5, "t": 40},
|
|
1234
|
+
xaxis={
|
|
1235
|
+
"showgrid": False,
|
|
1236
|
+
"zeroline": False,
|
|
1237
|
+
"showticklabels": False,
|
|
1238
|
+
},
|
|
1239
|
+
yaxis={
|
|
1240
|
+
"showgrid": False,
|
|
1241
|
+
"zeroline": False,
|
|
1242
|
+
"showticklabels": False,
|
|
1243
|
+
},
|
|
1244
|
+
plot_bgcolor="white",
|
|
1245
|
+
)
|
|
1246
|
+
|
|
1247
|
+
return layout
|
|
1248
|
+
|
|
1249
|
+
|
|
1250
|
+
def _create_interactive_plotly_graph(graph, pos, num_nodes, layout_algorithm):
|
|
1251
|
+
"""Create interactive Plotly graph visualization."""
|
|
1252
|
+
import plotly.graph_objects as go
|
|
1253
|
+
|
|
1254
|
+
edge_traces = _create_plotly_edge_traces(graph, pos)
|
|
1255
|
+
node_trace = _create_plotly_node_trace(graph, pos)
|
|
1256
|
+
layout = _create_plotly_figure_layout(num_nodes, layout_algorithm)
|
|
1257
|
+
|
|
1258
|
+
# Create the figure
|
|
1259
|
+
fig = go.Figure(data=edge_traces + [node_trace], layout=layout)
|
|
1260
|
+
|
|
1261
|
+
return fig
|
|
1262
|
+
|
|
1263
|
+
|
|
1264
|
+
def _create_static_matplotlib_graph(graph, pos, num_nodes, layout_algorithm):
|
|
1265
|
+
"""Create static matplotlib graph visualization."""
|
|
1266
|
+
import matplotlib.pyplot as plt
|
|
1267
|
+
|
|
1268
|
+
plt.figure(figsize=(12, 8))
|
|
1269
|
+
|
|
1270
|
+
# Draw regular edges
|
|
1271
|
+
regular_edges = [
|
|
1272
|
+
(u, v) for u, v, d in graph.edges(data=True) if not d.get("circular", False)
|
|
1273
|
+
]
|
|
1274
|
+
if regular_edges:
|
|
1275
|
+
import networkx as nx
|
|
1276
|
+
|
|
1277
|
+
nx.draw_networkx_edges(
|
|
1278
|
+
graph,
|
|
1279
|
+
pos,
|
|
1280
|
+
edgelist=regular_edges,
|
|
1281
|
+
edge_color="gray",
|
|
1282
|
+
arrows=True,
|
|
1283
|
+
arrowsize=20,
|
|
1284
|
+
width=2,
|
|
1285
|
+
alpha=0.7,
|
|
1286
|
+
)
|
|
1287
|
+
|
|
1288
|
+
# Draw circular dependency edges
|
|
1289
|
+
circular_edges = [
|
|
1290
|
+
(u, v) for u, v, d in graph.edges(data=True) if d.get("circular", False)
|
|
1291
|
+
]
|
|
1292
|
+
if circular_edges:
|
|
1293
|
+
import networkx as nx
|
|
1294
|
+
|
|
1295
|
+
nx.draw_networkx_edges(
|
|
1296
|
+
graph,
|
|
1297
|
+
pos,
|
|
1298
|
+
edgelist=circular_edges,
|
|
1299
|
+
edge_color="red",
|
|
1300
|
+
arrows=True,
|
|
1301
|
+
arrowsize=25,
|
|
1302
|
+
width=3,
|
|
1303
|
+
alpha=0.9,
|
|
1304
|
+
style="dashed",
|
|
1305
|
+
)
|
|
1306
|
+
|
|
1307
|
+
# Color nodes
|
|
1308
|
+
node_colors = []
|
|
1309
|
+
for node in graph.nodes():
|
|
1310
|
+
if graph.nodes[node].get("community", False):
|
|
1311
|
+
node_colors.append("lightgreen") # Community cookbooks
|
|
1312
|
+
elif any(
|
|
1313
|
+
graph.edges[edge].get("circular", False)
|
|
1314
|
+
for edge in graph.in_edges(node)
|
|
1315
|
+
if edge[1] == node
|
|
1316
|
+
):
|
|
1317
|
+
node_colors.append("red") # Involved in circular deps
|
|
1318
|
+
elif graph.in_degree(node) > 0:
|
|
1319
|
+
node_colors.append("lightblue") # Has dependencies
|
|
1320
|
+
else:
|
|
1321
|
+
node_colors.append("lightgray") # Leaf dependencies
|
|
1322
|
+
|
|
1323
|
+
# Draw nodes with size based on connectivity
|
|
1324
|
+
node_sizes = [
|
|
1325
|
+
max(300, min(1200, 300 + graph.degree(node) * 100)) for node in graph.nodes()
|
|
1326
|
+
]
|
|
1327
|
+
|
|
1328
|
+
# Draw nodes
|
|
1329
|
+
import networkx as nx
|
|
1330
|
+
|
|
1331
|
+
nx.draw_networkx_nodes(
|
|
1332
|
+
graph,
|
|
1333
|
+
pos,
|
|
1334
|
+
node_color=node_colors,
|
|
1335
|
+
node_size=node_sizes,
|
|
1336
|
+
alpha=0.8,
|
|
1337
|
+
linewidths=2,
|
|
1338
|
+
edgecolors="darkgray",
|
|
1339
|
+
)
|
|
1340
|
+
|
|
1341
|
+
# Draw labels
|
|
1342
|
+
nx.draw_networkx_labels(graph, pos, font_size=8, font_weight="bold")
|
|
1343
|
+
|
|
1344
|
+
plt.title(
|
|
1345
|
+
f"Cookbook Dependency Graph ({num_nodes} nodes, {layout_algorithm} layout)",
|
|
1346
|
+
fontsize=16,
|
|
1347
|
+
pad=20,
|
|
1348
|
+
)
|
|
1349
|
+
plt.axis("off")
|
|
1350
|
+
plt.tight_layout()
|
|
1351
|
+
|
|
1352
|
+
return plt.gcf()
|
|
1353
|
+
|
|
1354
|
+
|
|
1355
|
+
def create_dependency_graph(
|
|
1356
|
+
analysis_result, viz_type, layout_algorithm="auto", filters=None
|
|
1357
|
+
):
|
|
1358
|
+
"""
|
|
1359
|
+
Create a dependency graph visualization with optional filtering.
|
|
1360
|
+
|
|
1361
|
+
Args:
|
|
1362
|
+
analysis_result: Text analysis result from dependency analysis
|
|
1363
|
+
viz_type: Visualization type ("interactive" or "static")
|
|
1364
|
+
layout_algorithm: Layout algorithm to use
|
|
1365
|
+
filters: Dictionary of filter options
|
|
1366
|
+
|
|
1367
|
+
Returns:
|
|
1368
|
+
Plotly figure for interactive graphs, matplotlib figure for static graphs
|
|
1369
|
+
|
|
1370
|
+
"""
|
|
1371
|
+
try:
|
|
1372
|
+
# Parse the analysis result to extract dependencies
|
|
1373
|
+
dependencies, circular_deps, community_cookbooks = _parse_dependency_analysis(
|
|
1374
|
+
analysis_result
|
|
1375
|
+
)
|
|
1376
|
+
|
|
1377
|
+
# Create NetworkX graph
|
|
1378
|
+
graph = _create_networkx_graph(dependencies, circular_deps, community_cookbooks)
|
|
1379
|
+
|
|
1380
|
+
# Apply filters if provided
|
|
1381
|
+
if filters:
|
|
1382
|
+
graph = _apply_graph_filters(graph, filters)
|
|
1383
|
+
|
|
1384
|
+
if len(graph.nodes) == 0:
|
|
1385
|
+
return None
|
|
1386
|
+
|
|
1387
|
+
# Calculate positions
|
|
1388
|
+
pos, final_layout = _calculate_graph_positions(graph, layout_algorithm)
|
|
1389
|
+
|
|
1390
|
+
if viz_type == "interactive":
|
|
1391
|
+
return _create_interactive_plotly_graph(
|
|
1392
|
+
graph, pos, len(graph.nodes), final_layout
|
|
1393
|
+
)
|
|
1394
|
+
else:
|
|
1395
|
+
return _create_static_matplotlib_graph(
|
|
1396
|
+
graph, pos, len(graph.nodes), final_layout
|
|
1397
|
+
)
|
|
1398
|
+
|
|
1399
|
+
except Exception as e:
|
|
1400
|
+
st.error(f"Error creating dependency graph: {e}")
|
|
1401
|
+
return None
|
|
1402
|
+
|
|
1403
|
+
|
|
1404
|
+
def _apply_graph_filters(graph, filters):
|
|
1405
|
+
"""Apply filters to the NetworkX graph."""
|
|
1406
|
+
filtered_graph = graph.copy()
|
|
1407
|
+
|
|
1408
|
+
# Apply each filter type
|
|
1409
|
+
filtered_graph = _filter_circular_dependencies_only(filtered_graph, filters)
|
|
1410
|
+
filtered_graph = _filter_community_cookbooks_only(filtered_graph, filters)
|
|
1411
|
+
filtered_graph = _filter_minimum_connections(filtered_graph, filters)
|
|
1412
|
+
|
|
1413
|
+
return filtered_graph
|
|
1414
|
+
|
|
1415
|
+
|
|
1416
|
+
def _filter_circular_dependencies_only(graph, filters):
|
|
1417
|
+
"""Filter graph to show only nodes involved in circular dependencies."""
|
|
1418
|
+
if not filters.get("circular_only", False):
|
|
1419
|
+
return graph
|
|
1420
|
+
|
|
1421
|
+
# Find nodes involved in circular dependencies
|
|
1422
|
+
circular_nodes = set()
|
|
1423
|
+
for source, target in filters.get("circular_deps", []):
|
|
1424
|
+
circular_nodes.add(source)
|
|
1425
|
+
circular_nodes.add(target)
|
|
1426
|
+
|
|
1427
|
+
# Remove nodes not involved in circular dependencies
|
|
1428
|
+
nodes_to_remove = [n for n in graph.nodes() if n not in circular_nodes]
|
|
1429
|
+
graph.remove_nodes_from(nodes_to_remove)
|
|
1430
|
+
|
|
1431
|
+
return graph
|
|
1432
|
+
|
|
1433
|
+
|
|
1434
|
+
def _filter_community_cookbooks_only(graph, filters):
|
|
1435
|
+
"""Filter graph to show only community cookbooks and their dependencies."""
|
|
1436
|
+
if not filters.get("community_only", False):
|
|
1437
|
+
return graph
|
|
1438
|
+
|
|
1439
|
+
community_nodes = set()
|
|
1440
|
+
for node in graph.nodes():
|
|
1441
|
+
if graph.nodes[node].get("community", False):
|
|
1442
|
+
community_nodes.add(node)
|
|
1443
|
+
# Also include dependencies of community cookbooks
|
|
1444
|
+
for successor in graph.successors(node):
|
|
1445
|
+
community_nodes.add(successor)
|
|
1446
|
+
|
|
1447
|
+
# Remove nodes not related to community cookbooks
|
|
1448
|
+
nodes_to_remove = [n for n in graph.nodes() if n not in community_nodes]
|
|
1449
|
+
graph.remove_nodes_from(nodes_to_remove)
|
|
1450
|
+
|
|
1451
|
+
return graph
|
|
1452
|
+
|
|
1453
|
+
|
|
1454
|
+
def _filter_minimum_connections(graph, filters):
|
|
1455
|
+
"""Filter graph to show only nodes with minimum connection count."""
|
|
1456
|
+
min_connections = filters.get("min_connections", 0)
|
|
1457
|
+
if min_connections <= 0:
|
|
1458
|
+
return graph
|
|
1459
|
+
|
|
1460
|
+
nodes_to_remove = []
|
|
1461
|
+
for node in graph.nodes():
|
|
1462
|
+
degree = graph.degree(node)
|
|
1463
|
+
if degree < min_connections:
|
|
1464
|
+
nodes_to_remove.append(node)
|
|
1465
|
+
graph.remove_nodes_from(nodes_to_remove)
|
|
1466
|
+
|
|
1467
|
+
return graph
|
|
1468
|
+
|
|
1469
|
+
|
|
1470
|
+
def _parse_dependency_metrics_from_result(analysis_result):
|
|
1471
|
+
"""Parse dependency analysis result to extract key metrics."""
|
|
1472
|
+
lines = analysis_result.split("\n")
|
|
1473
|
+
|
|
1474
|
+
# Extract key metrics from the analysis
|
|
1475
|
+
direct_deps = 0
|
|
1476
|
+
transitive_deps = 0
|
|
1477
|
+
circular_deps = 0
|
|
1478
|
+
community_cookbooks = 0
|
|
1479
|
+
|
|
1480
|
+
for line in lines:
|
|
1481
|
+
if "Direct Dependencies:" in line:
|
|
1482
|
+
with contextlib.suppress(ValueError):
|
|
1483
|
+
direct_deps = int(line.split(":")[1].strip())
|
|
1484
|
+
elif "Transitive Dependencies:" in line:
|
|
1485
|
+
with contextlib.suppress(ValueError):
|
|
1486
|
+
transitive_deps = int(line.split(":")[1].strip())
|
|
1487
|
+
elif "Circular Dependencies:" in line:
|
|
1488
|
+
with contextlib.suppress(ValueError):
|
|
1489
|
+
circular_deps = int(line.split(":")[1].strip())
|
|
1490
|
+
elif "Community Cookbooks:" in line:
|
|
1491
|
+
with contextlib.suppress(ValueError):
|
|
1492
|
+
community_cookbooks = int(line.split(":")[1].strip())
|
|
1493
|
+
|
|
1494
|
+
return direct_deps, transitive_deps, circular_deps, community_cookbooks
|
|
1495
|
+
|
|
1496
|
+
|
|
1497
|
+
def _display_dependency_summary_metrics(
|
|
1498
|
+
direct_deps, transitive_deps, circular_deps, community_cookbooks
|
|
1499
|
+
):
|
|
1500
|
+
"""Display dependency analysis summary metrics."""
|
|
1501
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
1502
|
+
|
|
1503
|
+
with col1:
|
|
1504
|
+
st.metric("Direct Dependencies", direct_deps)
|
|
1505
|
+
|
|
1506
|
+
with col2:
|
|
1507
|
+
st.metric("Transitive Dependencies", transitive_deps)
|
|
1508
|
+
|
|
1509
|
+
with col3:
|
|
1510
|
+
st.metric(
|
|
1511
|
+
SECTION_CIRCULAR_DEPENDENCIES,
|
|
1512
|
+
circular_deps,
|
|
1513
|
+
delta="⚠️ Check" if circular_deps > 0 else "✅ OK",
|
|
1514
|
+
)
|
|
1515
|
+
|
|
1516
|
+
with col4:
|
|
1517
|
+
st.metric(SECTION_COMMUNITY_COOKBOOKS, community_cookbooks)
|
|
1518
|
+
|
|
1519
|
+
|
|
1520
|
+
def _calculate_migration_impact(dependencies, circular_deps, community_cookbooks):
|
|
1521
|
+
"""Calculate migration impact analysis based on dependency structure."""
|
|
1522
|
+
from typing import Any
|
|
1523
|
+
|
|
1524
|
+
impact: dict[str, Any] = {
|
|
1525
|
+
"risk_score": 0.0,
|
|
1526
|
+
"timeline_impact_weeks": 0,
|
|
1527
|
+
"complexity_level": "Low",
|
|
1528
|
+
"parallel_streams": 1,
|
|
1529
|
+
"critical_path": [],
|
|
1530
|
+
"bottlenecks": [],
|
|
1531
|
+
"recommendations": [],
|
|
1532
|
+
}
|
|
1533
|
+
|
|
1534
|
+
# Calculate risk score based on various factors
|
|
1535
|
+
risk_factors = {
|
|
1536
|
+
"circular_deps": len(circular_deps)
|
|
1537
|
+
* 2.0, # Each circular dep adds significant risk
|
|
1538
|
+
"total_deps": len(dependencies) * 0.1, # More dependencies = higher complexity
|
|
1539
|
+
"community_cookbooks": len(community_cookbooks)
|
|
1540
|
+
* 0.5, # Community cookbooks need evaluation
|
|
1541
|
+
"max_chain_length": _calculate_max_dependency_chain(dependencies)
|
|
1542
|
+
* 0.3, # Long chains are risky
|
|
1543
|
+
}
|
|
1544
|
+
|
|
1545
|
+
impact["risk_score"] = min(10.0, sum(risk_factors.values()))
|
|
1546
|
+
|
|
1547
|
+
# Determine complexity level
|
|
1548
|
+
if impact["risk_score"] > 7:
|
|
1549
|
+
impact["complexity_level"] = "High"
|
|
1550
|
+
impact["timeline_impact_weeks"] = 4
|
|
1551
|
+
elif impact["risk_score"] > 4:
|
|
1552
|
+
impact["complexity_level"] = "Medium"
|
|
1553
|
+
impact["timeline_impact_weeks"] = 2
|
|
1554
|
+
else:
|
|
1555
|
+
impact["complexity_level"] = "Low"
|
|
1556
|
+
impact["timeline_impact_weeks"] = 0
|
|
1557
|
+
|
|
1558
|
+
# Calculate parallel migration streams
|
|
1559
|
+
if len(dependencies) > 20:
|
|
1560
|
+
impact["parallel_streams"] = 3
|
|
1561
|
+
elif len(dependencies) > 10:
|
|
1562
|
+
impact["parallel_streams"] = 2
|
|
1563
|
+
else:
|
|
1564
|
+
impact["parallel_streams"] = 1
|
|
1565
|
+
|
|
1566
|
+
# Identify critical path (longest dependency chain)
|
|
1567
|
+
impact["critical_path"] = _find_critical_path(dependencies)
|
|
1568
|
+
|
|
1569
|
+
# Identify bottlenecks (highly depended-upon cookbooks)
|
|
1570
|
+
impact["bottlenecks"] = _identify_bottlenecks(dependencies)
|
|
1571
|
+
|
|
1572
|
+
# Generate recommendations
|
|
1573
|
+
impact["recommendations"] = _generate_impact_recommendations(
|
|
1574
|
+
impact, circular_deps, community_cookbooks
|
|
1575
|
+
)
|
|
1576
|
+
|
|
1577
|
+
return impact
|
|
1578
|
+
|
|
1579
|
+
|
|
1580
|
+
def _calculate_max_dependency_chain(dependencies):
|
|
1581
|
+
"""Calculate the maximum dependency chain length."""
|
|
1582
|
+
max_length = 0
|
|
1583
|
+
|
|
1584
|
+
def get_chain_length(cookbook, visited=None):
|
|
1585
|
+
if visited is None:
|
|
1586
|
+
visited = set()
|
|
1587
|
+
|
|
1588
|
+
if cookbook in visited:
|
|
1589
|
+
return 0 # Circular dependency detected
|
|
1590
|
+
|
|
1591
|
+
visited.add(cookbook)
|
|
1592
|
+
deps = dependencies.get(cookbook, [])
|
|
1593
|
+
|
|
1594
|
+
if not deps:
|
|
1595
|
+
return 1
|
|
1596
|
+
|
|
1597
|
+
max_child_length = 0
|
|
1598
|
+
for dep in deps:
|
|
1599
|
+
child_length = get_chain_length(dep, visited.copy())
|
|
1600
|
+
max_child_length = max(max_child_length, child_length)
|
|
1601
|
+
|
|
1602
|
+
return 1 + max_child_length
|
|
1603
|
+
|
|
1604
|
+
for cookbook in dependencies:
|
|
1605
|
+
length = get_chain_length(cookbook)
|
|
1606
|
+
max_length = max(max_length, length)
|
|
1607
|
+
|
|
1608
|
+
return max_length
|
|
1609
|
+
|
|
1610
|
+
|
|
1611
|
+
def _find_critical_path(dependencies):
|
|
1612
|
+
"""Find the critical path (longest dependency chain)."""
|
|
1613
|
+
longest_chain: list[str] = []
|
|
1614
|
+
|
|
1615
|
+
def find_longest_chain(cookbook, visited=None):
|
|
1616
|
+
if visited is None:
|
|
1617
|
+
visited = set()
|
|
1618
|
+
|
|
1619
|
+
if cookbook in visited:
|
|
1620
|
+
return [] # Circular dependency
|
|
1621
|
+
|
|
1622
|
+
visited.add(cookbook)
|
|
1623
|
+
deps = dependencies.get(cookbook, [])
|
|
1624
|
+
|
|
1625
|
+
if not deps:
|
|
1626
|
+
return [cookbook]
|
|
1627
|
+
|
|
1628
|
+
longest_child_chain: list[str] = []
|
|
1629
|
+
for dep in deps:
|
|
1630
|
+
child_chain = find_longest_chain(dep, visited.copy())
|
|
1631
|
+
if len(child_chain) > len(longest_child_chain):
|
|
1632
|
+
longest_child_chain = child_chain
|
|
1633
|
+
|
|
1634
|
+
return [cookbook] + longest_child_chain
|
|
1635
|
+
|
|
1636
|
+
for cookbook in dependencies:
|
|
1637
|
+
chain = find_longest_chain(cookbook)
|
|
1638
|
+
if len(chain) > len(longest_chain):
|
|
1639
|
+
longest_chain = chain
|
|
1640
|
+
|
|
1641
|
+
return longest_chain
|
|
1642
|
+
|
|
1643
|
+
|
|
1644
|
+
def _identify_bottlenecks(dependencies: dict[str, list[str]]):
|
|
1645
|
+
"""Identify bottleneck cookbooks (highly depended upon)."""
|
|
1646
|
+
# Count how many times each cookbook is depended upon
|
|
1647
|
+
dependency_counts: dict[str, int] = {}
|
|
1648
|
+
|
|
1649
|
+
for deps in dependencies.values():
|
|
1650
|
+
for dep in deps:
|
|
1651
|
+
dependency_counts[dep] = dependency_counts.get(dep, 0) + 1
|
|
1652
|
+
|
|
1653
|
+
# Find cookbooks with high dependency counts
|
|
1654
|
+
bottlenecks = []
|
|
1655
|
+
max_count: int = max(dependency_counts.values()) if dependency_counts else 0
|
|
1656
|
+
|
|
1657
|
+
for cookbook, count in dependency_counts.items():
|
|
1658
|
+
if count >= 5:
|
|
1659
|
+
risk_level = "High"
|
|
1660
|
+
elif count >= 3:
|
|
1661
|
+
risk_level = "Medium"
|
|
1662
|
+
else:
|
|
1663
|
+
risk_level = "Low"
|
|
1664
|
+
|
|
1665
|
+
if count >= 3 or (max_count > 1 and count == max_count):
|
|
1666
|
+
bottlenecks.append(
|
|
1667
|
+
{
|
|
1668
|
+
"cookbook": cookbook,
|
|
1669
|
+
"dependent_count": count,
|
|
1670
|
+
"risk_level": risk_level,
|
|
1671
|
+
}
|
|
1672
|
+
)
|
|
1673
|
+
|
|
1674
|
+
return sorted(bottlenecks, key=lambda x: x["dependent_count"], reverse=True)
|
|
1675
|
+
|
|
1676
|
+
|
|
1677
|
+
def _generate_impact_recommendations(impact, circular_deps, community_cookbooks):
|
|
1678
|
+
"""Generate recommendations based on impact analysis."""
|
|
1679
|
+
recommendations = []
|
|
1680
|
+
|
|
1681
|
+
if circular_deps:
|
|
1682
|
+
recommendations.append(
|
|
1683
|
+
{
|
|
1684
|
+
"priority": "Critical",
|
|
1685
|
+
"action": (
|
|
1686
|
+
f"Resolve {len(circular_deps)} circular dependencies "
|
|
1687
|
+
"before migration"
|
|
1688
|
+
),
|
|
1689
|
+
"impact": "Prevents successful migration",
|
|
1690
|
+
}
|
|
1691
|
+
)
|
|
1692
|
+
|
|
1693
|
+
if impact["parallel_streams"] > 1:
|
|
1694
|
+
recommendations.append(
|
|
1695
|
+
{
|
|
1696
|
+
"priority": "High",
|
|
1697
|
+
"action": (
|
|
1698
|
+
f"Plan {impact['parallel_streams']} parallel migration streams"
|
|
1699
|
+
),
|
|
1700
|
+
"impact": (
|
|
1701
|
+
f"Reduces timeline by ~{impact['parallel_streams'] * 2} weeks"
|
|
1702
|
+
),
|
|
1703
|
+
}
|
|
1704
|
+
)
|
|
1705
|
+
|
|
1706
|
+
if community_cookbooks:
|
|
1707
|
+
recommendations.append(
|
|
1708
|
+
{
|
|
1709
|
+
"priority": "Medium",
|
|
1710
|
+
"action": (
|
|
1711
|
+
f"Evaluate {len(community_cookbooks)} community cookbooks "
|
|
1712
|
+
"for Ansible Galaxy alternatives"
|
|
1713
|
+
),
|
|
1714
|
+
"impact": "Reduces custom development effort",
|
|
1715
|
+
}
|
|
1716
|
+
)
|
|
1717
|
+
|
|
1718
|
+
if impact["bottlenecks"]:
|
|
1719
|
+
bottleneck_names = [b["cookbook"] for b in impact["bottlenecks"][:3]]
|
|
1720
|
+
recommendations.append(
|
|
1721
|
+
{
|
|
1722
|
+
"priority": "Medium",
|
|
1723
|
+
"action": (
|
|
1724
|
+
f"Migrate bottleneck cookbooks first: {', '.join(bottleneck_names)}"
|
|
1725
|
+
),
|
|
1726
|
+
"impact": "Unblocks dependent cookbook migrations",
|
|
1727
|
+
}
|
|
1728
|
+
)
|
|
1729
|
+
|
|
1730
|
+
if impact["timeline_impact_weeks"] > 0:
|
|
1731
|
+
recommendations.append(
|
|
1732
|
+
{
|
|
1733
|
+
"priority": "Low",
|
|
1734
|
+
"action": (
|
|
1735
|
+
f"Allocate additional {impact['timeline_impact_weeks']} "
|
|
1736
|
+
"weeks for complexity"
|
|
1737
|
+
),
|
|
1738
|
+
"impact": "Ensures successful migration completion",
|
|
1739
|
+
}
|
|
1740
|
+
)
|
|
1741
|
+
|
|
1742
|
+
return recommendations
|
|
1743
|
+
|
|
1744
|
+
|
|
1745
|
+
def _display_detailed_impact_analysis(
|
|
1746
|
+
impact_analysis, dependencies, circular_deps, community_cookbooks
|
|
1747
|
+
):
|
|
1748
|
+
"""Display detailed impact analysis breakdown."""
|
|
1749
|
+
_display_risk_assessment_breakdown(dependencies, circular_deps, community_cookbooks)
|
|
1750
|
+
_display_critical_path_analysis(impact_analysis)
|
|
1751
|
+
_display_migration_bottlenecks(impact_analysis)
|
|
1752
|
+
_display_strategic_recommendations(impact_analysis)
|
|
1753
|
+
|
|
1754
|
+
|
|
1755
|
+
def _display_risk_assessment_breakdown(
|
|
1756
|
+
dependencies, circular_deps, community_cookbooks
|
|
1757
|
+
):
|
|
1758
|
+
"""Display risk assessment breakdown."""
|
|
1759
|
+
st.markdown("### Risk Assessment Breakdown")
|
|
1760
|
+
|
|
1761
|
+
# Risk factors
|
|
1762
|
+
risk_factors = {
|
|
1763
|
+
"Circular Dependencies": len(circular_deps) * 2.0,
|
|
1764
|
+
"Total Dependencies": len(dependencies) * 0.1,
|
|
1765
|
+
"Community Cookbooks": len(community_cookbooks) * 0.5,
|
|
1766
|
+
"Dependency Chain Length": _calculate_max_dependency_chain(dependencies) * 0.3,
|
|
1767
|
+
}
|
|
1768
|
+
|
|
1769
|
+
for factor, score in risk_factors.items():
|
|
1770
|
+
if score > 0:
|
|
1771
|
+
st.write(f"• **{factor}**: {score:.1f} points")
|
|
1772
|
+
|
|
1773
|
+
|
|
1774
|
+
def _display_critical_path_analysis(impact_analysis):
|
|
1775
|
+
"""Display critical path analysis."""
|
|
1776
|
+
st.markdown("### Critical Path Analysis")
|
|
1777
|
+
if impact_analysis["critical_path"]:
|
|
1778
|
+
st.write("**Longest dependency chain:**")
|
|
1779
|
+
st.code(" → ".join(impact_analysis["critical_path"]), language="text")
|
|
1780
|
+
else:
|
|
1781
|
+
st.write("No dependency chains identified.")
|
|
1782
|
+
|
|
1783
|
+
|
|
1784
|
+
def _display_migration_bottlenecks(impact_analysis):
|
|
1785
|
+
"""Display migration bottlenecks."""
|
|
1786
|
+
st.markdown("### Migration Bottlenecks")
|
|
1787
|
+
if impact_analysis["bottlenecks"]:
|
|
1788
|
+
for bottleneck in impact_analysis["bottlenecks"]:
|
|
1789
|
+
risk_level = bottleneck["risk_level"]
|
|
1790
|
+
if risk_level == "High":
|
|
1791
|
+
risk_icon = "🔴"
|
|
1792
|
+
elif risk_level == "Medium":
|
|
1793
|
+
risk_icon = "🟡"
|
|
1794
|
+
else:
|
|
1795
|
+
risk_icon = "🟢"
|
|
1796
|
+
st.write(
|
|
1797
|
+
f"• {risk_icon} **{bottleneck['cookbook']}**: "
|
|
1798
|
+
f"{bottleneck['dependent_count']} dependents "
|
|
1799
|
+
f"({risk_level} risk)"
|
|
1800
|
+
)
|
|
1801
|
+
else:
|
|
1802
|
+
st.write("✅ No significant bottlenecks identified.")
|
|
1803
|
+
|
|
1804
|
+
|
|
1805
|
+
def _display_strategic_recommendations(impact_analysis):
|
|
1806
|
+
"""Display strategic recommendations."""
|
|
1807
|
+
st.markdown("### Strategic Recommendations")
|
|
1808
|
+
for rec in impact_analysis["recommendations"]:
|
|
1809
|
+
priority = rec["priority"]
|
|
1810
|
+
if priority == "Critical":
|
|
1811
|
+
priority_icon = "🔴"
|
|
1812
|
+
elif priority == "High":
|
|
1813
|
+
priority_icon = "🟡"
|
|
1814
|
+
else:
|
|
1815
|
+
priority_icon = "🟢"
|
|
1816
|
+
st.write(f"• {priority_icon} **{priority}**: {rec['action']}")
|
|
1817
|
+
st.write(f" *Impact*: {rec['impact']}")
|
|
1818
|
+
|
|
1819
|
+
|
|
1820
|
+
def _handle_graph_caching():
|
|
1821
|
+
"""Handle graph caching controls and cleanup."""
|
|
1822
|
+
st.subheader("💾 Graph Cache Management")
|
|
1823
|
+
|
|
1824
|
+
col1, col2, col3 = st.columns([1, 1, 2])
|
|
1825
|
+
|
|
1826
|
+
with col1:
|
|
1827
|
+
# Toggle caching on/off
|
|
1828
|
+
cache_enabled = st.checkbox(
|
|
1829
|
+
"Enable Graph Caching",
|
|
1830
|
+
value=st.session_state.get("graph_cache_enabled", True),
|
|
1831
|
+
help="Cache graph visualizations to improve performance for repeated views",
|
|
1832
|
+
)
|
|
1833
|
+
st.session_state.graph_cache_enabled = cache_enabled
|
|
1834
|
+
|
|
1835
|
+
with col2:
|
|
1836
|
+
# Clear cache button
|
|
1837
|
+
if st.button("🗑️ Clear Cache", help="Clear all cached graph data"):
|
|
1838
|
+
# Find and remove all graph cache keys
|
|
1839
|
+
cache_keys = [key for key in st.session_state if key.startswith("graph_")]
|
|
1840
|
+
for key in cache_keys:
|
|
1841
|
+
del st.session_state[key]
|
|
1842
|
+
st.success(f"✅ Cleared {len(cache_keys)} cached graphs")
|
|
1843
|
+
st.rerun()
|
|
1844
|
+
|
|
1845
|
+
with col3:
|
|
1846
|
+
# Cache statistics
|
|
1847
|
+
cache_keys = [key for key in st.session_state if key.startswith("graph_")]
|
|
1848
|
+
cache_count = len(cache_keys)
|
|
1849
|
+
|
|
1850
|
+
if cache_count > 0:
|
|
1851
|
+
# Estimate memory usage (rough approximation)
|
|
1852
|
+
estimated_memory = cache_count * 50 # Rough estimate: 50KB per cached graph
|
|
1853
|
+
st.metric(
|
|
1854
|
+
"Cached Graphs",
|
|
1855
|
+
f"{cache_count} items",
|
|
1856
|
+
f"~{estimated_memory}KB estimated",
|
|
1857
|
+
)
|
|
1858
|
+
else:
|
|
1859
|
+
st.info("📭 No graphs currently cached")
|
|
1860
|
+
|
|
1861
|
+
# Cache status indicator
|
|
1862
|
+
if cache_enabled:
|
|
1863
|
+
st.success(
|
|
1864
|
+
"✅ Graph caching is enabled - visualizations will be "
|
|
1865
|
+
"cached for faster loading"
|
|
1866
|
+
)
|
|
1867
|
+
else:
|
|
1868
|
+
st.warning(
|
|
1869
|
+
"⚠️ Graph caching is disabled - each visualization will be recalculated"
|
|
1870
|
+
)
|
|
1871
|
+
|
|
1872
|
+
|
|
1873
|
+
def _display_dependency_graph_visualization(
|
|
1874
|
+
analysis_result,
|
|
1875
|
+
viz_type,
|
|
1876
|
+
selected_layout,
|
|
1877
|
+
show_circular_only,
|
|
1878
|
+
show_community_only,
|
|
1879
|
+
min_connections,
|
|
1880
|
+
):
|
|
1881
|
+
"""Display the dependency graph visualization section with filtering."""
|
|
1882
|
+
try:
|
|
1883
|
+
# Parse dependencies for filtering
|
|
1884
|
+
_, circular_deps, _ = _parse_dependency_analysis(analysis_result)
|
|
1885
|
+
|
|
1886
|
+
# Prepare filters
|
|
1887
|
+
filters = {
|
|
1888
|
+
"circular_only": show_circular_only,
|
|
1889
|
+
"community_only": show_community_only,
|
|
1890
|
+
"min_connections": min_connections,
|
|
1891
|
+
"circular_deps": circular_deps,
|
|
1892
|
+
}
|
|
1893
|
+
|
|
1894
|
+
# Try to get cached graph data
|
|
1895
|
+
graph_data = _get_cached_graph_data(
|
|
1896
|
+
analysis_result, viz_type, selected_layout, filters
|
|
1897
|
+
)
|
|
1898
|
+
|
|
1899
|
+
if graph_data is None:
|
|
1900
|
+
# Create dependency graph with filters
|
|
1901
|
+
graph_data = create_dependency_graph(
|
|
1902
|
+
analysis_result, viz_type, selected_layout, filters
|
|
1903
|
+
)
|
|
1904
|
+
# Cache the result
|
|
1905
|
+
_cache_graph_data(
|
|
1906
|
+
analysis_result, viz_type, selected_layout, filters, graph_data
|
|
1907
|
+
)
|
|
1908
|
+
|
|
1909
|
+
if graph_data:
|
|
1910
|
+
_display_graph_with_export_options(graph_data, viz_type)
|
|
1911
|
+
else:
|
|
1912
|
+
st.info(
|
|
1913
|
+
"No dependency relationships found to visualise after applying filters."
|
|
1914
|
+
)
|
|
1915
|
+
|
|
1916
|
+
except Exception as e:
|
|
1917
|
+
_handle_graph_visualization_error(e, analysis_result)
|
|
1918
|
+
|
|
1919
|
+
|
|
1920
|
+
def _get_cached_graph_data(analysis_result, viz_type, selected_layout, filters):
|
|
1921
|
+
"""Get cached graph data if available."""
|
|
1922
|
+
cache_key = (
|
|
1923
|
+
f"graph_{hash(analysis_result)}_{viz_type}_{selected_layout}_{str(filters)}"
|
|
1924
|
+
)
|
|
1925
|
+
|
|
1926
|
+
if cache_key in st.session_state and st.session_state.get(
|
|
1927
|
+
"graph_cache_enabled", True
|
|
1928
|
+
):
|
|
1929
|
+
graph_data = st.session_state[cache_key]
|
|
1930
|
+
st.info("📋 Using cached graph data")
|
|
1931
|
+
return graph_data
|
|
1932
|
+
|
|
1933
|
+
return None
|
|
1934
|
+
|
|
1935
|
+
|
|
1936
|
+
def _cache_graph_data(analysis_result, viz_type, selected_layout, filters, graph_data):
|
|
1937
|
+
"""Cache graph data if caching is enabled."""
|
|
1938
|
+
if graph_data is not None and st.session_state.get("graph_cache_enabled", True):
|
|
1939
|
+
cache_key = (
|
|
1940
|
+
f"graph_{hash(analysis_result)}_{viz_type}_{selected_layout}_{str(filters)}"
|
|
1941
|
+
)
|
|
1942
|
+
st.session_state[cache_key] = graph_data
|
|
1943
|
+
|
|
1944
|
+
|
|
1945
|
+
def _display_graph_with_export_options(graph_data, viz_type):
|
|
1946
|
+
"""Display graph and provide export options."""
|
|
1947
|
+
if viz_type == "interactive":
|
|
1948
|
+
# Interactive Plotly graph
|
|
1949
|
+
st.plotly_chart(graph_data, width="stretch")
|
|
1950
|
+
|
|
1951
|
+
# Export options for interactive graph
|
|
1952
|
+
st.subheader("Export Graph")
|
|
1953
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
1954
|
+
|
|
1955
|
+
with col1:
|
|
1956
|
+
# Export as HTML
|
|
1957
|
+
html_content = graph_data.to_html(full_html=False, include_plotlyjs="cdn")
|
|
1958
|
+
st.download_button(
|
|
1959
|
+
label="🌐 HTML",
|
|
1960
|
+
data=html_content,
|
|
1961
|
+
file_name="dependency_graph.html",
|
|
1962
|
+
mime="text/html",
|
|
1963
|
+
help="Download interactive graph as HTML file",
|
|
1964
|
+
)
|
|
1965
|
+
|
|
1966
|
+
with col2:
|
|
1967
|
+
# Export as JSON
|
|
1968
|
+
json_data = graph_data.to_json()
|
|
1969
|
+
st.download_button(
|
|
1970
|
+
label="📊 JSON",
|
|
1971
|
+
data=json_data,
|
|
1972
|
+
file_name="dependency_graph.json",
|
|
1973
|
+
mime=MIME_APPLICATION_JSON,
|
|
1974
|
+
help="Download graph data as JSON",
|
|
1975
|
+
)
|
|
1976
|
+
|
|
1977
|
+
with col3:
|
|
1978
|
+
# Export as PNG (requires kaleido)
|
|
1979
|
+
try:
|
|
1980
|
+
import plotly.io as pio # type: ignore[import-untyped]
|
|
1981
|
+
|
|
1982
|
+
png_data = pio.to_image(graph_data, format="png", scale=2)
|
|
1983
|
+
st.download_button(
|
|
1984
|
+
label="🖼️ PNG (High-res)",
|
|
1985
|
+
data=png_data,
|
|
1986
|
+
file_name="dependency_graph.png",
|
|
1987
|
+
mime="image/png",
|
|
1988
|
+
help="Download graph as high-resolution PNG",
|
|
1989
|
+
)
|
|
1990
|
+
except ImportError:
|
|
1991
|
+
st.info("PNG export requires additional dependencies")
|
|
1992
|
+
|
|
1993
|
+
with col4:
|
|
1994
|
+
# Export as PDF
|
|
1995
|
+
try:
|
|
1996
|
+
import plotly.io as pio
|
|
1997
|
+
|
|
1998
|
+
pdf_data = pio.to_image(graph_data, format="pdf")
|
|
1999
|
+
st.download_button(
|
|
2000
|
+
label="📄 PDF",
|
|
2001
|
+
data=pdf_data,
|
|
2002
|
+
file_name="dependency_graph.pdf",
|
|
2003
|
+
mime="application/pdf",
|
|
2004
|
+
help="Download graph as PDF document",
|
|
2005
|
+
)
|
|
2006
|
+
except ImportError:
|
|
2007
|
+
st.info("PDF export requires additional dependencies")
|
|
2008
|
+
|
|
2009
|
+
else:
|
|
2010
|
+
# Static matplotlib graph
|
|
2011
|
+
st.pyplot(graph_data)
|
|
2012
|
+
|
|
2013
|
+
# Export options for static graph
|
|
2014
|
+
st.subheader("Export Graph")
|
|
2015
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
2016
|
+
|
|
2017
|
+
with col1:
|
|
2018
|
+
# Export as PNG
|
|
2019
|
+
import io
|
|
2020
|
+
|
|
2021
|
+
buf = io.BytesIO()
|
|
2022
|
+
graph_data.savefig(buf, format="png", dpi=300, bbox_inches="tight")
|
|
2023
|
+
buf.seek(0)
|
|
2024
|
+
st.download_button(
|
|
2025
|
+
label="🖼️ PNG (High-res)",
|
|
2026
|
+
data=buf.getvalue(),
|
|
2027
|
+
file_name="dependency_graph.png",
|
|
2028
|
+
mime="image/png",
|
|
2029
|
+
help="Download graph as high-resolution PNG",
|
|
2030
|
+
)
|
|
2031
|
+
|
|
2032
|
+
with col2:
|
|
2033
|
+
# Export as SVG
|
|
2034
|
+
buf_svg = io.BytesIO()
|
|
2035
|
+
graph_data.savefig(buf_svg, format="svg", bbox_inches="tight")
|
|
2036
|
+
buf_svg.seek(0)
|
|
2037
|
+
st.download_button(
|
|
2038
|
+
label="📈 SVG",
|
|
2039
|
+
data=buf_svg.getvalue(),
|
|
2040
|
+
file_name="dependency_graph.svg",
|
|
2041
|
+
mime="image/svg+xml",
|
|
2042
|
+
help="Download graph as scalable SVG",
|
|
2043
|
+
)
|
|
2044
|
+
|
|
2045
|
+
with col3:
|
|
2046
|
+
# Export as PDF
|
|
2047
|
+
buf_pdf = io.BytesIO()
|
|
2048
|
+
graph_data.savefig(buf_pdf, format="pdf", bbox_inches="tight")
|
|
2049
|
+
buf_pdf.seek(0)
|
|
2050
|
+
st.download_button(
|
|
2051
|
+
label="📄 PDF",
|
|
2052
|
+
data=buf_pdf.getvalue(),
|
|
2053
|
+
file_name="dependency_graph.pdf",
|
|
2054
|
+
mime="application/pdf",
|
|
2055
|
+
help="Download graph as PDF document",
|
|
2056
|
+
)
|
|
2057
|
+
|
|
2058
|
+
with col4:
|
|
2059
|
+
# Export as EPS
|
|
2060
|
+
buf_eps = io.BytesIO()
|
|
2061
|
+
graph_data.savefig(buf_eps, format="eps", bbox_inches="tight")
|
|
2062
|
+
buf_eps.seek(0)
|
|
2063
|
+
st.download_button(
|
|
2064
|
+
label="🔧 EPS",
|
|
2065
|
+
data=buf_eps.getvalue(),
|
|
2066
|
+
file_name="dependency_graph.eps",
|
|
2067
|
+
mime="application/postscript",
|
|
2068
|
+
help="Download graph as EPS vector format",
|
|
2069
|
+
)
|
|
2070
|
+
|
|
2071
|
+
|
|
2072
|
+
def _handle_graph_visualization_error(error, analysis_result):
|
|
2073
|
+
"""Handle graph visualization errors with fallback display."""
|
|
2074
|
+
st.error("❌ **Graph Visualization Error**")
|
|
2075
|
+
with st.expander("Error Details"):
|
|
2076
|
+
st.code(str(error), language="text")
|
|
2077
|
+
st.markdown("""
|
|
2078
|
+
**Possible causes:**
|
|
2079
|
+
- Invalid dependency analysis data
|
|
2080
|
+
- Graph layout algorithm failed for this data
|
|
2081
|
+
- Memory constraints for large graphs
|
|
2082
|
+
|
|
2083
|
+
**Suggestions:**
|
|
2084
|
+
- Try a different layout algorithm
|
|
2085
|
+
- Reduce the scope of your dependency analysis
|
|
2086
|
+
- Check the dependency analysis output for issues
|
|
2087
|
+
""")
|
|
2088
|
+
|
|
2089
|
+
# Fallback: show text summary
|
|
2090
|
+
st.info("📄 Showing text-based dependency summary instead:")
|
|
2091
|
+
st.text_area(
|
|
2092
|
+
"Dependency Analysis Text",
|
|
2093
|
+
analysis_result,
|
|
2094
|
+
height=300,
|
|
2095
|
+
help="Raw dependency analysis output",
|
|
2096
|
+
)
|
|
2097
|
+
|
|
2098
|
+
|
|
2099
|
+
def _display_dependency_analysis_sections(analysis_result):
|
|
2100
|
+
"""Display dependency analysis results in expandable sections."""
|
|
2101
|
+
# Split analysis into sections
|
|
2102
|
+
sections = analysis_result.split("\n## ")
|
|
2103
|
+
|
|
2104
|
+
for section in sections:
|
|
2105
|
+
if section.strip():
|
|
2106
|
+
if not section.startswith("#"):
|
|
2107
|
+
section = "## " + section
|
|
2108
|
+
|
|
2109
|
+
# Add expanders for different sections
|
|
2110
|
+
if "Migration Order Recommendations" in section:
|
|
2111
|
+
with st.expander("📋 Migration Order Recommendations"):
|
|
2112
|
+
st.markdown(
|
|
2113
|
+
section.replace("## Migration Order Recommendations", "")
|
|
2114
|
+
)
|
|
2115
|
+
elif "Dependency Graph" in section:
|
|
2116
|
+
with st.expander("🔗 Dependency Graph"):
|
|
2117
|
+
st.markdown(section.replace("## Dependency Graph", ""))
|
|
2118
|
+
with st.expander(f"⚠️ {SECTION_CIRCULAR_DEPENDENCIES}"):
|
|
2119
|
+
st.markdown(
|
|
2120
|
+
section.replace(f"## {SECTION_CIRCULAR_DEPENDENCIES}", "")
|
|
2121
|
+
)
|
|
2122
|
+
with st.expander(f"🌐 {SECTION_COMMUNITY_COOKBOOKS}"):
|
|
2123
|
+
st.markdown(
|
|
2124
|
+
section.replace(f"## {SECTION_COMMUNITY_COOKBOOKS}", "")
|
|
2125
|
+
)
|
|
2126
|
+
elif "Migration Impact Analysis" in section:
|
|
2127
|
+
with st.expander("📊 Migration Impact Analysis"):
|
|
2128
|
+
st.markdown(section.replace("## Migration Impact Analysis", ""))
|
|
2129
|
+
else:
|
|
2130
|
+
st.markdown(section)
|
|
2131
|
+
|
|
2132
|
+
|
|
2133
|
+
def _display_migration_recommendations(circular_deps, community_cookbooks, direct_deps):
|
|
2134
|
+
"""Display migration recommendations based on analysis results."""
|
|
2135
|
+
st.subheader("Migration Recommendations")
|
|
2136
|
+
|
|
2137
|
+
if circular_deps > 0:
|
|
2138
|
+
st.error(
|
|
2139
|
+
"⚠️ **Critical Issue**: Circular dependencies detected. "
|
|
2140
|
+
"Resolve before migration."
|
|
2141
|
+
)
|
|
2142
|
+
st.markdown("""
|
|
2143
|
+
**Resolution Steps:**
|
|
2144
|
+
1. Review the circular dependency pairs
|
|
2145
|
+
2. Refactor cookbooks to break circular references
|
|
2146
|
+
3. Consider combining tightly coupled cookbooks
|
|
2147
|
+
4. Update dependency declarations
|
|
2148
|
+
""")
|
|
2149
|
+
|
|
2150
|
+
if community_cookbooks > 0:
|
|
2151
|
+
st.success(
|
|
2152
|
+
f"✅ **Good News**: {community_cookbooks} community cookbooks identified."
|
|
2153
|
+
)
|
|
2154
|
+
st.markdown("""
|
|
2155
|
+
**Recommendations:**
|
|
2156
|
+
- Replace with Ansible Galaxy roles where possible
|
|
2157
|
+
- Review community cookbook versions and security
|
|
2158
|
+
- Consider forking and maintaining custom versions if needed
|
|
2159
|
+
""")
|
|
2160
|
+
|
|
2161
|
+
if direct_deps > 10:
|
|
2162
|
+
st.warning("⚠️ **Complex Dependencies**: High dependency count detected.")
|
|
2163
|
+
st.markdown("""
|
|
2164
|
+
**Consider:**
|
|
2165
|
+
- Breaking down monolithic cookbooks
|
|
2166
|
+
- Implementing proper dependency injection
|
|
2167
|
+
- Planning migration in smaller phases
|
|
2168
|
+
""")
|
|
2169
|
+
|
|
2170
|
+
|
|
2171
|
+
def _display_dependency_export_options(
|
|
2172
|
+
analysis_result,
|
|
2173
|
+
cookbook_path,
|
|
2174
|
+
depth,
|
|
2175
|
+
direct_deps,
|
|
2176
|
+
transitive_deps,
|
|
2177
|
+
circular_deps,
|
|
2178
|
+
community_cookbooks,
|
|
2179
|
+
):
|
|
2180
|
+
"""Display export options for dependency analysis."""
|
|
2181
|
+
st.subheader("Export Analysis")
|
|
2182
|
+
|
|
2183
|
+
col1, col2 = st.columns(2)
|
|
2184
|
+
|
|
2185
|
+
with col1:
|
|
2186
|
+
st.download_button(
|
|
2187
|
+
label="📥 Download Full Analysis",
|
|
2188
|
+
data=analysis_result,
|
|
2189
|
+
file_name="dependency_analysis.md",
|
|
2190
|
+
mime=MIME_TEXT_MARKDOWN,
|
|
2191
|
+
help="Download complete dependency analysis",
|
|
2192
|
+
)
|
|
2193
|
+
|
|
2194
|
+
with col2:
|
|
2195
|
+
# Create a simplified JSON export
|
|
2196
|
+
analysis_json = {
|
|
2197
|
+
"cookbook_path": cookbook_path,
|
|
2198
|
+
"analysis_depth": depth,
|
|
2199
|
+
"metrics": {
|
|
2200
|
+
"direct_dependencies": direct_deps,
|
|
2201
|
+
"transitive_dependencies": transitive_deps,
|
|
2202
|
+
"circular_dependencies": circular_deps,
|
|
2203
|
+
"community_cookbooks": community_cookbooks,
|
|
2204
|
+
},
|
|
2205
|
+
"full_analysis": analysis_result,
|
|
2206
|
+
}
|
|
2207
|
+
|
|
2208
|
+
import json
|
|
2209
|
+
|
|
2210
|
+
st.download_button(
|
|
2211
|
+
label="📊 Download JSON Summary",
|
|
2212
|
+
data=json.dumps(analysis_json, indent=2),
|
|
2213
|
+
file_name="dependency_analysis.json",
|
|
2214
|
+
mime=MIME_APPLICATION_JSON,
|
|
2215
|
+
help="Download analysis summary as JSON",
|
|
2216
|
+
)
|
|
2217
|
+
|
|
2218
|
+
|
|
2219
|
+
def _display_dependency_analysis_summary(analysis_result, cookbook_path, depth):
|
|
2220
|
+
"""Display dependency analysis summary section."""
|
|
2221
|
+
# Summary metrics
|
|
2222
|
+
st.subheader("Dependency Analysis Summary")
|
|
2223
|
+
|
|
2224
|
+
# Parse metrics from analysis result
|
|
2225
|
+
direct_deps, transitive_deps, circular_deps, community_cookbooks = (
|
|
2226
|
+
_parse_dependency_metrics_from_result(analysis_result)
|
|
2227
|
+
)
|
|
2228
|
+
|
|
2229
|
+
# Display summary metrics
|
|
2230
|
+
_display_dependency_summary_metrics(
|
|
2231
|
+
direct_deps, transitive_deps, circular_deps, community_cookbooks
|
|
2232
|
+
)
|
|
2233
|
+
|
|
2234
|
+
# Analysis depth indicator
|
|
2235
|
+
analysis_msg = f"Analysis performed with **{depth}** depth on: `{cookbook_path}`"
|
|
2236
|
+
st.info(analysis_msg)
|
|
2237
|
+
|
|
2238
|
+
|
|
2239
|
+
def _display_graph_visualization_section(analysis_result, viz_type):
|
|
2240
|
+
"""Display graph visualization section."""
|
|
2241
|
+
if viz_type not in ["graph", "interactive"]:
|
|
2242
|
+
return
|
|
2243
|
+
|
|
2244
|
+
st.subheader("📊 Dependency Graph Visualization")
|
|
2245
|
+
|
|
2246
|
+
# Parse dependencies for filtering and analysis
|
|
2247
|
+
_ = _parse_dependency_analysis(analysis_result)
|
|
2248
|
+
|
|
2249
|
+
# Layout algorithm selector
|
|
2250
|
+
layout_options = [
|
|
2251
|
+
"auto",
|
|
2252
|
+
"spring",
|
|
2253
|
+
"circular",
|
|
2254
|
+
"kamada_kawai",
|
|
2255
|
+
"shell",
|
|
2256
|
+
"spectral",
|
|
2257
|
+
"force_directed",
|
|
2258
|
+
"random",
|
|
2259
|
+
]
|
|
2260
|
+
selected_layout = st.selectbox(
|
|
2261
|
+
"Layout Algorithm",
|
|
2262
|
+
layout_options,
|
|
2263
|
+
help="Choose graph layout algorithm. 'auto' selects best "
|
|
2264
|
+
"algorithm based on graph size.",
|
|
2265
|
+
format_func=lambda x: {
|
|
2266
|
+
"auto": "Auto (recommended)",
|
|
2267
|
+
"spring": "Spring Layout",
|
|
2268
|
+
"circular": "Circular Layout",
|
|
2269
|
+
"kamada_kawai": "Kamada-Kawai Layout",
|
|
2270
|
+
"shell": "Shell Layout (hierarchical)",
|
|
2271
|
+
"spectral": "Spectral Layout",
|
|
2272
|
+
"force_directed": "Force Directed",
|
|
2273
|
+
"random": "Random Layout",
|
|
2274
|
+
}.get(x, str(x)),
|
|
2275
|
+
)
|
|
2276
|
+
|
|
2277
|
+
# Graph cache management
|
|
2278
|
+
_handle_graph_caching()
|
|
2279
|
+
|
|
2280
|
+
# Graph Filtering Options
|
|
2281
|
+
st.subheader("🔍 Graph Filtering & Analysis")
|
|
2282
|
+
|
|
2283
|
+
col1, col2, col3 = st.columns(3)
|
|
2284
|
+
|
|
2285
|
+
with col1:
|
|
2286
|
+
show_circular_only = st.checkbox(
|
|
2287
|
+
"Show Circular Dependencies Only",
|
|
2288
|
+
help=("Filter graph to show only nodes involved in circular dependencies"),
|
|
2289
|
+
)
|
|
2290
|
+
|
|
2291
|
+
with col2:
|
|
2292
|
+
show_community_only = st.checkbox(
|
|
2293
|
+
"Show Community Cookbooks Only",
|
|
2294
|
+
help=(
|
|
2295
|
+
"Filter graph to show only community cookbooks and their dependencies"
|
|
2296
|
+
),
|
|
2297
|
+
)
|
|
2298
|
+
|
|
2299
|
+
with col3:
|
|
2300
|
+
min_connections = st.slider(
|
|
2301
|
+
"Minimum Connections",
|
|
2302
|
+
min_value=0,
|
|
2303
|
+
max_value=10,
|
|
2304
|
+
value=0,
|
|
2305
|
+
help="Show only nodes with at least this many connections",
|
|
2306
|
+
)
|
|
2307
|
+
|
|
2308
|
+
_display_dependency_graph_visualization(
|
|
2309
|
+
analysis_result,
|
|
2310
|
+
viz_type,
|
|
2311
|
+
selected_layout,
|
|
2312
|
+
show_circular_only,
|
|
2313
|
+
show_community_only,
|
|
2314
|
+
min_connections,
|
|
2315
|
+
)
|
|
2316
|
+
|
|
2317
|
+
|
|
2318
|
+
def _display_impact_analysis_section(analysis_result):
|
|
2319
|
+
"""Display migration impact analysis section."""
|
|
2320
|
+
# Parse dependencies for impact analysis
|
|
2321
|
+
dependencies, circular_deps, community_cookbooks = _parse_dependency_analysis(
|
|
2322
|
+
analysis_result
|
|
2323
|
+
)
|
|
2324
|
+
|
|
2325
|
+
# Impact Analysis Section
|
|
2326
|
+
st.subheader("📊 Migration Impact Analysis")
|
|
2327
|
+
|
|
2328
|
+
if not dependencies:
|
|
2329
|
+
st.info("No dependencies found for impact analysis.")
|
|
2330
|
+
return
|
|
2331
|
+
|
|
2332
|
+
impact_analysis = _calculate_migration_impact(
|
|
2333
|
+
dependencies, circular_deps, community_cookbooks
|
|
2334
|
+
)
|
|
2335
|
+
|
|
2336
|
+
# Calculate risk score delta
|
|
2337
|
+
risk_score = impact_analysis["risk_score"]
|
|
2338
|
+
if risk_score > 7:
|
|
2339
|
+
risk_delta = "🔴 High"
|
|
2340
|
+
elif risk_score > 4:
|
|
2341
|
+
risk_delta = "🟡 Medium"
|
|
2342
|
+
else:
|
|
2343
|
+
risk_delta = "🟢 Low"
|
|
2344
|
+
|
|
2345
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
2346
|
+
|
|
2347
|
+
with col1:
|
|
2348
|
+
st.metric(
|
|
2349
|
+
"Migration Risk Score",
|
|
2350
|
+
f"{impact_analysis['risk_score']:.1f}/10",
|
|
2351
|
+
delta=risk_delta,
|
|
2352
|
+
)
|
|
2353
|
+
|
|
2354
|
+
with col2:
|
|
2355
|
+
timeline_weeks = impact_analysis["timeline_impact_weeks"]
|
|
2356
|
+
timeline_delta = "↗️" if timeline_weeks > 0 else "→"
|
|
2357
|
+
st.metric(
|
|
2358
|
+
"Estimated Timeline Impact",
|
|
2359
|
+
f"{timeline_weeks} weeks",
|
|
2360
|
+
delta=timeline_delta,
|
|
2361
|
+
)
|
|
2362
|
+
|
|
2363
|
+
with col3:
|
|
2364
|
+
complexity_level = impact_analysis["complexity_level"]
|
|
2365
|
+
complexity_delta = "⚠️ High" if complexity_level == "High" else "✅ Low"
|
|
2366
|
+
st.metric(
|
|
2367
|
+
"Dependency Complexity",
|
|
2368
|
+
complexity_level,
|
|
2369
|
+
delta=complexity_delta,
|
|
2370
|
+
)
|
|
2371
|
+
|
|
2372
|
+
with col4:
|
|
2373
|
+
parallel_streams = impact_analysis["parallel_streams"]
|
|
2374
|
+
parallel_delta = "🔀 Multiple" if parallel_streams > 1 else "➡️ Single"
|
|
2375
|
+
st.metric(
|
|
2376
|
+
"Parallel Migration Streams",
|
|
2377
|
+
parallel_streams,
|
|
2378
|
+
delta=parallel_delta,
|
|
2379
|
+
)
|
|
2380
|
+
|
|
2381
|
+
# Detailed impact breakdown
|
|
2382
|
+
with st.expander("📈 Detailed Impact Analysis"):
|
|
2383
|
+
_display_detailed_impact_analysis(
|
|
2384
|
+
impact_analysis, dependencies, circular_deps, community_cookbooks
|
|
2385
|
+
)
|
|
2386
|
+
|
|
2387
|
+
|
|
2388
|
+
def _display_analysis_details_section(
|
|
2389
|
+
analysis_result, circular_deps, community_cookbooks, direct_deps
|
|
2390
|
+
):
|
|
2391
|
+
"""Display analysis details section."""
|
|
2392
|
+
# Display analysis results
|
|
2393
|
+
st.subheader("Dependency Analysis Details")
|
|
2394
|
+
|
|
2395
|
+
_display_dependency_analysis_sections(analysis_result)
|
|
2396
|
+
|
|
2397
|
+
# Migration recommendations
|
|
2398
|
+
_display_migration_recommendations(circular_deps, community_cookbooks, direct_deps)
|
|
2399
|
+
|
|
2400
|
+
|
|
2401
|
+
def display_dependency_analysis_results():
|
|
2402
|
+
"""Display dependency analysis results."""
|
|
2403
|
+
analysis_result = st.session_state.dep_analysis_result
|
|
2404
|
+
cookbook_path = st.session_state.dep_cookbook_path
|
|
2405
|
+
depth = st.session_state.dep_depth
|
|
2406
|
+
viz_type = st.session_state.get("dep_viz_type", "text")
|
|
2407
|
+
|
|
2408
|
+
# Display summary section
|
|
2409
|
+
_display_dependency_analysis_summary(analysis_result, cookbook_path, depth)
|
|
2410
|
+
|
|
2411
|
+
# Display graph visualization section
|
|
2412
|
+
_display_graph_visualization_section(analysis_result, viz_type)
|
|
2413
|
+
|
|
2414
|
+
# Display impact analysis section
|
|
2415
|
+
_display_impact_analysis_section(analysis_result)
|
|
2416
|
+
|
|
2417
|
+
# Display analysis details section
|
|
2418
|
+
dependencies, circular_deps, community_cookbooks = _parse_dependency_analysis(
|
|
2419
|
+
analysis_result
|
|
2420
|
+
)
|
|
2421
|
+
direct_deps = len(dependencies) if dependencies else 0
|
|
2422
|
+
_display_analysis_details_section(
|
|
2423
|
+
analysis_result, circular_deps, community_cookbooks, direct_deps
|
|
2424
|
+
)
|
|
2425
|
+
|
|
2426
|
+
# Export options
|
|
2427
|
+
_display_dependency_export_options(
|
|
2428
|
+
analysis_result,
|
|
2429
|
+
cookbook_path,
|
|
2430
|
+
depth,
|
|
2431
|
+
direct_deps,
|
|
2432
|
+
len(dependencies) if dependencies else 0, # transitive_deps approximation
|
|
2433
|
+
circular_deps,
|
|
2434
|
+
community_cookbooks,
|
|
2435
|
+
)
|
|
2436
|
+
|
|
2437
|
+
|
|
2438
|
+
def _collect_files_to_validate(input_path: str) -> list[Path]:
|
|
2439
|
+
"""Collect valid YAML files from input path."""
|
|
2440
|
+
validated_path = _normalize_and_validate_input_path(input_path)
|
|
2441
|
+
if validated_path is None:
|
|
2442
|
+
# Error already reported by _normalize_and_validate_input_path
|
|
2443
|
+
return []
|
|
2444
|
+
|
|
2445
|
+
path_obj = validated_path
|
|
2446
|
+
files_to_validate = []
|
|
2447
|
+
|
|
2448
|
+
if not path_obj.exists():
|
|
2449
|
+
st.error(f"Path does not exist: {path_obj}")
|
|
2450
|
+
return []
|
|
2451
|
+
|
|
2452
|
+
if path_obj.is_file():
|
|
2453
|
+
if path_obj.suffix in [".yml", ".yaml"] and path_obj.name not in [
|
|
2454
|
+
".kitchen.yml",
|
|
2455
|
+
"kitchen.yml",
|
|
2456
|
+
"docker-compose.yml",
|
|
2457
|
+
]:
|
|
2458
|
+
files_to_validate.append(path_obj)
|
|
2459
|
+
elif path_obj.is_dir():
|
|
2460
|
+
# Filter out obvious non-playbook files
|
|
2461
|
+
excluded_files = {".kitchen.yml", "kitchen.yml", "docker-compose.yml"}
|
|
2462
|
+
|
|
2463
|
+
yml_files = list(path_obj.glob("**/*.yml"))
|
|
2464
|
+
yaml_files = list(path_obj.glob("**/*.yaml"))
|
|
2465
|
+
|
|
2466
|
+
raw_files = yml_files + yaml_files
|
|
2467
|
+
files_to_validate.extend([f for f in raw_files if f.name not in excluded_files])
|
|
2468
|
+
|
|
2469
|
+
return files_to_validate
|
|
2470
|
+
|
|
2471
|
+
|
|
2472
|
+
def _run_validation_engine(files_to_validate):
|
|
2473
|
+
"""Run validation engine on a list of files."""
|
|
2474
|
+
from souschef.core.validation import (
|
|
2475
|
+
ValidationCategory,
|
|
2476
|
+
ValidationEngine,
|
|
2477
|
+
ValidationLevel,
|
|
2478
|
+
ValidationResult,
|
|
2479
|
+
)
|
|
2480
|
+
|
|
2481
|
+
engine = ValidationEngine()
|
|
2482
|
+
all_results = []
|
|
2483
|
+
|
|
2484
|
+
for file_path in files_to_validate:
|
|
2485
|
+
try:
|
|
2486
|
+
content = file_path.read_text()
|
|
2487
|
+
# We assume 'recipe' (Playbook) conversion type for .yml files found
|
|
2488
|
+
file_results = engine.validate_conversion("recipe", content)
|
|
2489
|
+
|
|
2490
|
+
# If no issues found, explicitly add a success record
|
|
2491
|
+
if not file_results:
|
|
2492
|
+
file_results = [
|
|
2493
|
+
ValidationResult(
|
|
2494
|
+
ValidationLevel.INFO,
|
|
2495
|
+
ValidationCategory.SYNTAX,
|
|
2496
|
+
"File passed all validation checks",
|
|
2497
|
+
location=file_path.name,
|
|
2498
|
+
)
|
|
2499
|
+
]
|
|
2500
|
+
|
|
2501
|
+
# Annotate results with location if missing
|
|
2502
|
+
for res in file_results:
|
|
2503
|
+
if not res.location:
|
|
2504
|
+
res.location = file_path.name
|
|
2505
|
+
|
|
2506
|
+
all_results.extend(file_results)
|
|
2507
|
+
except Exception as file_err:
|
|
2508
|
+
st.warning(f"Could not read/validate {file_path.name}: {file_err}")
|
|
2509
|
+
|
|
2510
|
+
return all_results
|
|
2511
|
+
|
|
2512
|
+
|
|
2513
|
+
def _get_default_validation_path():
|
|
2514
|
+
"""Determine the default path for validation from session state."""
|
|
2515
|
+
default_path = ""
|
|
2516
|
+
if "converted_playbooks_path" in st.session_state:
|
|
2517
|
+
default_path = st.session_state.converted_playbooks_path
|
|
2518
|
+
st.info(f"Pre-filled path from conversion: {default_path}")
|
|
2519
|
+
elif (
|
|
2520
|
+
"analysis_cookbook_path" in st.session_state
|
|
2521
|
+
and st.session_state.analysis_cookbook_path
|
|
2522
|
+
):
|
|
2523
|
+
default_path = st.session_state.analysis_cookbook_path
|
|
2524
|
+
st.info(f"Pre-filled path from analysis: {default_path}")
|
|
2525
|
+
st.caption(
|
|
2526
|
+
"Note: This tool validates Ansible playbooks (.yml). If you're using a raw "
|
|
2527
|
+
"Chef cookbook path, please ensure you've performed the conversion first."
|
|
2528
|
+
)
|
|
2529
|
+
return default_path
|
|
2530
|
+
|
|
2531
|
+
|
|
2532
|
+
def _render_validation_options_ui():
|
|
2533
|
+
"""Render validation scope and format options."""
|
|
2534
|
+
col1, col2 = st.columns(2)
|
|
2535
|
+
|
|
2536
|
+
with col1:
|
|
2537
|
+
sub_scope = st.selectbox(
|
|
2538
|
+
"Validation Scope",
|
|
2539
|
+
[
|
|
2540
|
+
"Full Suite",
|
|
2541
|
+
"Syntax Only",
|
|
2542
|
+
"Logic/Semantic",
|
|
2543
|
+
"Security",
|
|
2544
|
+
SCOPE_BEST_PRACTICES,
|
|
2545
|
+
],
|
|
2546
|
+
help="Filter which validation checks to run",
|
|
2547
|
+
)
|
|
2548
|
+
|
|
2549
|
+
with col2:
|
|
2550
|
+
sub_format = st.selectbox(
|
|
2551
|
+
"Output Format",
|
|
2552
|
+
["text", "json", "html"],
|
|
2553
|
+
help="Format for validation reports",
|
|
2554
|
+
format_func=lambda x: {
|
|
2555
|
+
"text": "Text Report",
|
|
2556
|
+
"json": "JSON Data",
|
|
2557
|
+
"html": "HTML Report",
|
|
2558
|
+
}.get(x, str(x)),
|
|
2559
|
+
)
|
|
2560
|
+
return sub_scope, sub_format
|
|
2561
|
+
|
|
2562
|
+
|
|
2563
|
+
def _render_validation_input_ui(default_path):
|
|
2564
|
+
"""Render input source selection UI."""
|
|
2565
|
+
st.subheader("Input Source")
|
|
2566
|
+
|
|
2567
|
+
input_type = st.radio(
|
|
2568
|
+
"Input Type",
|
|
2569
|
+
["Directory", "Single File"],
|
|
2570
|
+
horizontal=True,
|
|
2571
|
+
help="Validate a directory of files or a single file",
|
|
2572
|
+
)
|
|
2573
|
+
|
|
2574
|
+
if input_type == "Directory":
|
|
2575
|
+
input_path = st.text_input(
|
|
2576
|
+
"Directory Path",
|
|
2577
|
+
value=default_path,
|
|
2578
|
+
placeholder="/path/to/ansible/playbooks",
|
|
2579
|
+
help="Path to directory containing Ansible playbooks to validate",
|
|
2580
|
+
)
|
|
2581
|
+
else:
|
|
2582
|
+
input_path = st.text_input(
|
|
2583
|
+
"File Path",
|
|
2584
|
+
value=default_path
|
|
2585
|
+
if default_path and default_path.endswith((".yml", ".yaml"))
|
|
2586
|
+
else "",
|
|
2587
|
+
placeholder="/path/to/playbook.yml",
|
|
2588
|
+
help="Path to single Ansible playbook file to validate",
|
|
2589
|
+
)
|
|
2590
|
+
return input_path
|
|
2591
|
+
|
|
2592
|
+
|
|
2593
|
+
def _render_validation_settings_ui():
|
|
2594
|
+
"""Render strict mode and other validation settings."""
|
|
2595
|
+
st.subheader("Validation Options")
|
|
2596
|
+
|
|
2597
|
+
col1, col2, col3 = st.columns(3)
|
|
2598
|
+
|
|
2599
|
+
with col1:
|
|
2600
|
+
strict_mode = st.checkbox(
|
|
2601
|
+
"Strict Mode", help="Fail on warnings, not just errors"
|
|
2602
|
+
)
|
|
2603
|
+
|
|
2604
|
+
with col2:
|
|
2605
|
+
include_best_practices = st.checkbox(
|
|
2606
|
+
f"Include {SCOPE_BEST_PRACTICES}",
|
|
2607
|
+
value=True,
|
|
2608
|
+
help="Check for Ansible best practices",
|
|
2609
|
+
)
|
|
2610
|
+
|
|
2611
|
+
with col3:
|
|
2612
|
+
generate_recommendations = st.checkbox(
|
|
2613
|
+
"Generate Recommendations",
|
|
2614
|
+
value=True,
|
|
2615
|
+
help="Provide improvement suggestions",
|
|
2616
|
+
)
|
|
2617
|
+
|
|
2618
|
+
return strict_mode, include_best_practices, generate_recommendations
|
|
2619
|
+
|
|
2620
|
+
|
|
2621
|
+
def _normalize_and_validate_input_path(input_path: str) -> Path | None:
|
|
2622
|
+
"""
|
|
2623
|
+
Normalize and validate a user-provided filesystem path.
|
|
2624
|
+
|
|
2625
|
+
Returns a resolved Path object if valid, otherwise reports an error
|
|
2626
|
+
via Streamlit and returns None.
|
|
2627
|
+
"""
|
|
2628
|
+
if not input_path:
|
|
2629
|
+
st.error(ERROR_MSG_ENTER_PATH)
|
|
2630
|
+
return None
|
|
2631
|
+
|
|
2632
|
+
raw = input_path.strip()
|
|
2633
|
+
if not raw:
|
|
2634
|
+
st.error(ERROR_MSG_ENTER_PATH)
|
|
2635
|
+
return None
|
|
2636
|
+
|
|
2637
|
+
try:
|
|
2638
|
+
# Expand user home and resolve to an absolute, normalized path
|
|
2639
|
+
path_obj = Path(raw).expanduser().resolve()
|
|
2640
|
+
except Exception:
|
|
2641
|
+
st.error(f"Invalid path: {raw}")
|
|
2642
|
+
return None
|
|
2643
|
+
|
|
2644
|
+
# Optional safety: constrain to the application root directory
|
|
2645
|
+
try:
|
|
2646
|
+
app_root = Path(app_path).resolve()
|
|
2647
|
+
path_obj.relative_to(app_root)
|
|
2648
|
+
except Exception:
|
|
2649
|
+
st.error("Path must be within the SousChef project directory.")
|
|
2650
|
+
return None
|
|
2651
|
+
|
|
2652
|
+
return path_obj
|
|
2653
|
+
|
|
2654
|
+
|
|
2655
|
+
def _handle_validation_execution(input_path, options):
|
|
2656
|
+
"""Execute the validation process with progress tracking."""
|
|
2657
|
+
progress_tracker = ProgressTracker(
|
|
2658
|
+
total_steps=6, description="Running validation..."
|
|
2659
|
+
)
|
|
2660
|
+
|
|
2661
|
+
try:
|
|
2662
|
+
progress_tracker.update(1, "Preparing validation environment...")
|
|
2663
|
+
|
|
2664
|
+
progress_tracker.update(2, "Scanning input files...")
|
|
2665
|
+
|
|
2666
|
+
files_to_validate = _collect_files_to_validate(input_path)
|
|
2667
|
+
|
|
2668
|
+
if not files_to_validate:
|
|
2669
|
+
# Error is handled inside _collect_files_to_validate
|
|
2670
|
+
# if path doesn't exist or is invalid
|
|
2671
|
+
validated_path = _normalize_and_validate_input_path(input_path)
|
|
2672
|
+
if validated_path is not None and validated_path.exists():
|
|
2673
|
+
st.warning(f"No YAML files found in {validated_path}")
|
|
2674
|
+
return
|
|
2675
|
+
|
|
2676
|
+
progress_tracker.update(3, f"Validating {len(files_to_validate)} files...")
|
|
2677
|
+
|
|
2678
|
+
all_results = _run_validation_engine(files_to_validate)
|
|
2679
|
+
|
|
2680
|
+
# Filter results based on scope
|
|
2681
|
+
filtered_results = _filter_results_by_scope(all_results, options["scope"])
|
|
2682
|
+
|
|
2683
|
+
# Format the results as text
|
|
2684
|
+
validation_result = "\n".join(
|
|
2685
|
+
[
|
|
2686
|
+
f"[{result.level.value.upper()}] {result.location}: {result.message}"
|
|
2687
|
+
for result in filtered_results
|
|
2688
|
+
]
|
|
2689
|
+
)
|
|
2690
|
+
|
|
2691
|
+
if not validation_result:
|
|
2692
|
+
validation_result = "No issues found matching the selected scope."
|
|
2693
|
+
|
|
2694
|
+
progress_tracker.update(5, "Generating validation report...")
|
|
2695
|
+
|
|
2696
|
+
# Store results
|
|
2697
|
+
st.session_state.validation_result = validation_result
|
|
2698
|
+
st.session_state.validation_path = input_path.strip()
|
|
2699
|
+
st.session_state.validation_type = options["scope"]
|
|
2700
|
+
st.session_state.validation_options = options
|
|
2701
|
+
|
|
2702
|
+
progress_tracker.complete("Validation completed!")
|
|
2703
|
+
st.success(f"Validation completed! Scanned {len(files_to_validate)} files.")
|
|
2704
|
+
st.rerun()
|
|
2705
|
+
|
|
2706
|
+
except Exception as e:
|
|
2707
|
+
progress_tracker.close()
|
|
2708
|
+
st.error(f"Error during validation: {e}")
|
|
2709
|
+
|
|
2710
|
+
|
|
2711
|
+
def show_validation_reports():
|
|
2712
|
+
"""Show validation reports and conversion validation."""
|
|
2713
|
+
st.header(NAV_VALIDATION_REPORTS)
|
|
2714
|
+
|
|
2715
|
+
st.markdown("""
|
|
2716
|
+
Validate Chef to Ansible conversions and generate comprehensive
|
|
2717
|
+
validation reports for migration quality assurance.
|
|
2718
|
+
""")
|
|
2719
|
+
|
|
2720
|
+
# Check for previously analyzed path to pre-fill
|
|
2721
|
+
default_path = _get_default_validation_path()
|
|
2722
|
+
|
|
2723
|
+
# UI Components
|
|
2724
|
+
validation_scope, output_format = _render_validation_options_ui()
|
|
2725
|
+
input_path = _render_validation_input_ui(default_path)
|
|
2726
|
+
strict_mode, include_best_practices, generate_recommendations = (
|
|
2727
|
+
_render_validation_settings_ui()
|
|
2728
|
+
)
|
|
2729
|
+
|
|
2730
|
+
# Validation button
|
|
2731
|
+
if st.button("Run Validation", type="primary", width="stretch"):
|
|
2732
|
+
if not input_path or not input_path.strip():
|
|
2733
|
+
st.error("Please enter a path to validate.")
|
|
2734
|
+
return
|
|
2735
|
+
|
|
2736
|
+
options = {
|
|
2737
|
+
"strict": strict_mode,
|
|
2738
|
+
"best_practices": include_best_practices,
|
|
2739
|
+
"recommendations": generate_recommendations,
|
|
2740
|
+
"scope": validation_scope,
|
|
2741
|
+
"format": output_format,
|
|
2742
|
+
}
|
|
2743
|
+
|
|
2744
|
+
_handle_validation_execution(input_path, options)
|
|
2745
|
+
|
|
2746
|
+
# Display results if available
|
|
2747
|
+
if "validation_result" in st.session_state:
|
|
2748
|
+
display_validation_results()
|
|
2749
|
+
|
|
2750
|
+
|
|
2751
|
+
def _filter_results_by_scope(results, scope):
|
|
2752
|
+
"""Filter validation results based on selected scope."""
|
|
2753
|
+
from souschef.core.validation import ValidationCategory
|
|
2754
|
+
|
|
2755
|
+
if scope == "Full Suite":
|
|
2756
|
+
return results
|
|
2757
|
+
|
|
2758
|
+
scope_map = {
|
|
2759
|
+
"Syntax Only": ValidationCategory.SYNTAX,
|
|
2760
|
+
"Logic/Semantic": ValidationCategory.SEMANTIC,
|
|
2761
|
+
"Security": ValidationCategory.SECURITY,
|
|
2762
|
+
SCOPE_BEST_PRACTICES: ValidationCategory.BEST_PRACTICE,
|
|
2763
|
+
}
|
|
2764
|
+
|
|
2765
|
+
target_category = scope_map.get(scope)
|
|
2766
|
+
if not target_category:
|
|
2767
|
+
return results
|
|
2768
|
+
|
|
2769
|
+
return [r for r in results if r.category == target_category]
|
|
2770
|
+
|
|
2771
|
+
|
|
2772
|
+
def _parse_validation_metrics(validation_result):
|
|
2773
|
+
"""Parse validation result to extract key metrics."""
|
|
2774
|
+
lines = validation_result.split("\n")
|
|
2775
|
+
|
|
2776
|
+
errors = 0
|
|
2777
|
+
warnings = 0
|
|
2778
|
+
passed = 0
|
|
2779
|
+
total_checks = 0
|
|
2780
|
+
|
|
2781
|
+
for line in lines:
|
|
2782
|
+
line_upper = line.upper()
|
|
2783
|
+
# Match both old format "ERROR:" and new format "[ERROR]"
|
|
2784
|
+
if "ERROR:" in line_upper or "[ERROR]" in line_upper:
|
|
2785
|
+
errors += 1
|
|
2786
|
+
elif "WARNING:" in line_upper or "[WARNING]" in line_upper:
|
|
2787
|
+
warnings += 1
|
|
2788
|
+
# Match explicit passed check or INFO level (which we use for success now)
|
|
2789
|
+
elif (
|
|
2790
|
+
"PASSED:" in line_upper
|
|
2791
|
+
or "PASSED" in line_upper
|
|
2792
|
+
or "✓" in line
|
|
2793
|
+
or "[INFO]" in line_upper
|
|
2794
|
+
):
|
|
2795
|
+
passed += 1
|
|
2796
|
+
if "Total checks:" in line.lower():
|
|
2797
|
+
with contextlib.suppress(ValueError):
|
|
2798
|
+
total_checks = int(line.split(":")[1].strip())
|
|
2799
|
+
|
|
2800
|
+
# If we found errors/warnings but no explicit "checks" count (legacy log parsing),
|
|
2801
|
+
# infer total checks from line items
|
|
2802
|
+
if total_checks == 0 and (errors > 0 or warnings > 0 or passed > 0):
|
|
2803
|
+
total_checks = errors + warnings + passed
|
|
2804
|
+
|
|
2805
|
+
return errors, warnings, passed, total_checks
|
|
2806
|
+
|
|
2807
|
+
|
|
2808
|
+
def _display_validation_summary_metrics(errors, warnings, passed, total_checks):
|
|
2809
|
+
"""Display validation summary metrics."""
|
|
2810
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
2811
|
+
|
|
2812
|
+
with col1:
|
|
2813
|
+
st.metric("Total Checks", total_checks)
|
|
2814
|
+
|
|
2815
|
+
with col2:
|
|
2816
|
+
st.metric("Passed", passed, delta="✅" if passed > 0 else "")
|
|
2817
|
+
|
|
2818
|
+
with col3:
|
|
2819
|
+
st.metric("Warnings", warnings, delta="⚠️" if warnings > 0 else "")
|
|
2820
|
+
|
|
2821
|
+
with col4:
|
|
2822
|
+
st.metric("Errors", errors, delta="❌" if errors > 0 else "")
|
|
2823
|
+
|
|
2824
|
+
|
|
2825
|
+
def _display_validation_status(errors, warnings):
|
|
2826
|
+
"""Display overall validation status."""
|
|
2827
|
+
if errors > 0:
|
|
2828
|
+
st.error("❌ **Validation Failed**: Critical issues found that need attention.")
|
|
2829
|
+
elif warnings > 0:
|
|
2830
|
+
st.warning(
|
|
2831
|
+
"⚠️ **Validation Passed with Warnings**: Review warnings before proceeding."
|
|
2832
|
+
)
|
|
2833
|
+
else:
|
|
2834
|
+
st.success("✅ **Validation Passed**: All checks successful!")
|
|
2835
|
+
|
|
2836
|
+
|
|
2837
|
+
def _display_validation_sections(validation_result):
|
|
2838
|
+
"""Display validation results in expandable sections."""
|
|
2839
|
+
# Split results into sections
|
|
2840
|
+
sections = validation_result.split("\n## ")
|
|
2841
|
+
|
|
2842
|
+
for section in sections:
|
|
2843
|
+
if section.strip():
|
|
2844
|
+
if not section.startswith("#"):
|
|
2845
|
+
section = "## " + section
|
|
2846
|
+
|
|
2847
|
+
# Add expanders for different sections
|
|
2848
|
+
if "Syntax Validation" in section:
|
|
2849
|
+
with st.expander("🔍 Syntax Validation"):
|
|
2850
|
+
st.markdown(section.replace("## Syntax Validation", ""))
|
|
2851
|
+
elif "Logic Validation" in section:
|
|
2852
|
+
with st.expander("🧠 Logic Validation"):
|
|
2853
|
+
st.markdown(section.replace("## Logic Validation", ""))
|
|
2854
|
+
elif "Security Validation" in section:
|
|
2855
|
+
with st.expander("🔒 Security Validation"):
|
|
2856
|
+
st.markdown(section.replace("## Security Validation", ""))
|
|
2857
|
+
elif "Performance Validation" in section:
|
|
2858
|
+
with st.expander("⚡ Performance Validation"):
|
|
2859
|
+
st.markdown(section.replace("## Performance Validation", ""))
|
|
2860
|
+
elif SCOPE_BEST_PRACTICES in section:
|
|
2861
|
+
with st.expander(f"📋 {SCOPE_BEST_PRACTICES}"):
|
|
2862
|
+
st.markdown(section.replace(f"## {SCOPE_BEST_PRACTICES}", ""))
|
|
2863
|
+
elif "Recommendations" in section:
|
|
2864
|
+
with st.expander("💡 Recommendations"):
|
|
2865
|
+
st.markdown(section.replace("## Recommendations", ""))
|
|
2866
|
+
else:
|
|
2867
|
+
st.markdown(section)
|
|
2868
|
+
|
|
2869
|
+
|
|
2870
|
+
def _display_validation_action_items(errors, warnings):
|
|
2871
|
+
"""Display action items based on validation results."""
|
|
2872
|
+
if errors > 0 or warnings > 0:
|
|
2873
|
+
st.subheader("Action Items")
|
|
2874
|
+
|
|
2875
|
+
if errors > 0:
|
|
2876
|
+
st.error("**Critical Issues to Fix:**")
|
|
2877
|
+
st.markdown("""
|
|
2878
|
+
- Review error messages above
|
|
2879
|
+
- Fix syntax and logic errors
|
|
2880
|
+
- Re-run validation after fixes
|
|
2881
|
+
- Consider impact on migration timeline
|
|
2882
|
+
""")
|
|
2883
|
+
|
|
2884
|
+
if warnings > 0:
|
|
2885
|
+
st.warning("**Warnings to Review:**")
|
|
2886
|
+
st.markdown("""
|
|
2887
|
+
- Address security warnings
|
|
2888
|
+
- Review performance suggestions
|
|
2889
|
+
- Consider best practice recommendations
|
|
2890
|
+
- Document any intentional deviations
|
|
2891
|
+
""")
|
|
2892
|
+
|
|
2893
|
+
|
|
2894
|
+
def _display_validation_export_options(
|
|
2895
|
+
validation_result,
|
|
2896
|
+
input_path,
|
|
2897
|
+
validation_type,
|
|
2898
|
+
options,
|
|
2899
|
+
errors,
|
|
2900
|
+
warnings,
|
|
2901
|
+
passed,
|
|
2902
|
+
total_checks,
|
|
2903
|
+
):
|
|
2904
|
+
"""Display export options for validation results."""
|
|
2905
|
+
st.subheader("Export Report")
|
|
2906
|
+
|
|
2907
|
+
col1, col2 = st.columns(2)
|
|
2908
|
+
|
|
2909
|
+
with col1:
|
|
2910
|
+
st.download_button(
|
|
2911
|
+
label="📥 Download Full Report",
|
|
2912
|
+
data=validation_result,
|
|
2913
|
+
file_name="validation_report.md",
|
|
2914
|
+
mime=MIME_TEXT_MARKDOWN,
|
|
2915
|
+
help="Download complete validation report",
|
|
2916
|
+
)
|
|
2917
|
+
|
|
2918
|
+
with col2:
|
|
2919
|
+
# Create JSON summary
|
|
2920
|
+
if errors > 0:
|
|
2921
|
+
status = "failed"
|
|
2922
|
+
elif warnings > 0:
|
|
2923
|
+
status = "warning"
|
|
2924
|
+
else:
|
|
2925
|
+
status = "passed"
|
|
2926
|
+
report_json = {
|
|
2927
|
+
"input_path": input_path,
|
|
2928
|
+
"validation_type": validation_type,
|
|
2929
|
+
"options": options,
|
|
2930
|
+
"metrics": {
|
|
2931
|
+
"total_checks": total_checks,
|
|
2932
|
+
"passed": passed,
|
|
2933
|
+
"warnings": warnings,
|
|
2934
|
+
"errors": errors,
|
|
2935
|
+
},
|
|
2936
|
+
"status": status,
|
|
2937
|
+
"full_report": validation_result,
|
|
2938
|
+
}
|
|
2939
|
+
|
|
2940
|
+
import json
|
|
2941
|
+
|
|
2942
|
+
st.download_button(
|
|
2943
|
+
label="📊 Download JSON Summary",
|
|
2944
|
+
data=json.dumps(report_json, indent=2),
|
|
2945
|
+
file_name="validation_report.json",
|
|
2946
|
+
mime=MIME_APPLICATION_JSON,
|
|
2947
|
+
help="Download validation summary as JSON",
|
|
2948
|
+
)
|
|
2949
|
+
|
|
2950
|
+
|
|
2951
|
+
def display_validation_results():
|
|
2952
|
+
"""Display validation results."""
|
|
2953
|
+
validation_result = st.session_state.validation_result
|
|
2954
|
+
input_path = st.session_state.validation_path
|
|
2955
|
+
validation_type = st.session_state.validation_type
|
|
2956
|
+
options = st.session_state.validation_options
|
|
2957
|
+
|
|
2958
|
+
# Summary metrics
|
|
2959
|
+
st.subheader("Validation Summary")
|
|
2960
|
+
|
|
2961
|
+
# Parse validation result for metrics
|
|
2962
|
+
errors, warnings, passed, total_checks = _parse_validation_metrics(
|
|
2963
|
+
validation_result
|
|
2964
|
+
)
|
|
2965
|
+
|
|
2966
|
+
# Display summary metrics
|
|
2967
|
+
_display_validation_summary_metrics(errors, warnings, passed, total_checks)
|
|
2968
|
+
|
|
2969
|
+
# Overall status
|
|
2970
|
+
_display_validation_status(errors, warnings)
|
|
2971
|
+
|
|
2972
|
+
# Validation details
|
|
2973
|
+
validation_msg = f"Validation type: **{validation_type}** | Path: `{input_path}`"
|
|
2974
|
+
st.info(validation_msg)
|
|
2975
|
+
|
|
2976
|
+
# Display validation results
|
|
2977
|
+
st.subheader("Validation Details")
|
|
2978
|
+
|
|
2979
|
+
_display_validation_sections(validation_result)
|
|
2980
|
+
|
|
2981
|
+
# Action items
|
|
2982
|
+
_display_validation_action_items(errors, warnings)
|
|
2983
|
+
|
|
2984
|
+
# Export options
|
|
2985
|
+
_display_validation_export_options(
|
|
2986
|
+
validation_result,
|
|
2987
|
+
input_path,
|
|
2988
|
+
validation_type,
|
|
2989
|
+
options,
|
|
2990
|
+
errors,
|
|
2991
|
+
warnings,
|
|
2992
|
+
passed,
|
|
2993
|
+
total_checks,
|
|
2994
|
+
)
|
|
2995
|
+
|
|
2996
|
+
|
|
2997
|
+
if __name__ == "__main__":
|
|
2998
|
+
main()
|