mcp-souschef 2.5.3__py3-none-any.whl → 2.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/ui/app.py CHANGED
@@ -6,16 +6,33 @@ assessment, and visualization.
6
6
  """
7
7
 
8
8
  import contextlib
9
+ import logging
9
10
  import sys
10
11
  from pathlib import Path
11
12
 
12
13
  import streamlit as st
13
14
 
15
+ # Configure logging to stdout for Docker visibility
16
+ logging.basicConfig(
17
+ level=logging.INFO,
18
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
19
+ stream=sys.stdout,
20
+ force=True, # Override any existing configuration
21
+ )
22
+
23
+ logger = logging.getLogger(__name__)
24
+ logger.info("Starting SousChef UI application")
25
+
14
26
  # Add the parent directory to the path so we can import souschef modules
15
- sys.path.insert(0, str(Path(__file__).parent.parent))
27
+ app_path = Path(__file__).parent.parent
28
+ if str(app_path) not in sys.path:
29
+ sys.path.insert(0, str(app_path))
16
30
 
17
31
  # Import page modules
18
- from souschef.ui.pages.cookbook_analysis import show_cookbook_analysis_page
32
+ from souschef.ui.pages.ai_settings import show_ai_settings_page # noqa: E402
33
+ from souschef.ui.pages.cookbook_analysis import ( # noqa: E402
34
+ show_cookbook_analysis_page,
35
+ )
19
36
 
20
37
  # Constants for repeated strings
21
38
  NAV_MIGRATION_PLANNING = "Migration Planning"
@@ -24,6 +41,19 @@ NAV_VALIDATION_REPORTS = "Validation Reports"
24
41
  MIME_TEXT_MARKDOWN = "text/markdown"
25
42
  MIME_APPLICATION_JSON = "application/json"
26
43
  SECTION_CIRCULAR_DEPENDENCIES = "Circular Dependencies"
44
+ NAV_COOKBOOK_ANALYSIS = "Cookbook Analysis"
45
+ NAV_AI_SETTINGS = "AI Settings"
46
+ BUTTON_ANALYSE_DEPENDENCIES = "Analyse Dependencies"
47
+ SECTION_COMMUNITY_COOKBOOKS = "Community Cookbooks"
48
+ SECTION_COMMUNITY_COOKBOOKS_HEADER = "Community Cookbooks:"
49
+ INPUT_METHOD_DIRECTORY_PATH = "Directory Path"
50
+ SCOPE_BEST_PRACTICES = "Best Practices"
51
+ ERROR_MSG_ENTER_PATH = "Please enter a path to validate."
52
+
53
+
54
+ def health_check():
55
+ """Return simple health check endpoint for Docker."""
56
+ return {"status": "healthy", "service": "souschef-ui"}
27
57
 
28
58
 
29
59
  class ProgressTracker:
@@ -86,6 +116,65 @@ def with_progress_tracking(
86
116
  return wrapper
87
117
 
88
118
 
119
+ def _setup_sidebar_navigation():
120
+ """Set up the sidebar navigation with buttons."""
121
+ st.sidebar.title("Navigation")
122
+
123
+ # Dashboard button
124
+ if st.sidebar.button(
125
+ "Dashboard",
126
+ help="View migration overview and quick actions",
127
+ width="stretch",
128
+ ):
129
+ st.session_state.current_page = "Dashboard"
130
+ st.rerun()
131
+
132
+ # Cookbook Analysis button
133
+ if st.sidebar.button(
134
+ NAV_COOKBOOK_ANALYSIS,
135
+ help="Analyse Chef cookbooks and assess migration complexity",
136
+ width="stretch",
137
+ ):
138
+ st.session_state.current_page = NAV_COOKBOOK_ANALYSIS
139
+ st.rerun()
140
+
141
+ # Dependency Mapping button
142
+ if st.sidebar.button(
143
+ NAV_DEPENDENCY_MAPPING,
144
+ help="Visualise cookbook dependencies and migration order",
145
+ width="stretch",
146
+ ):
147
+ st.session_state.current_page = NAV_DEPENDENCY_MAPPING
148
+ st.rerun()
149
+
150
+ # Migration Planning button
151
+ if st.sidebar.button(
152
+ NAV_MIGRATION_PLANNING,
153
+ help="Plan your Chef to Ansible migration with detailed timelines",
154
+ width="stretch",
155
+ ):
156
+ st.session_state.current_page = NAV_MIGRATION_PLANNING
157
+ st.rerun()
158
+
159
+ # Validation Reports button
160
+ if st.sidebar.button(
161
+ NAV_VALIDATION_REPORTS,
162
+ help="Validate conversions and generate quality assurance reports",
163
+ width="stretch",
164
+ ):
165
+ st.session_state.current_page = NAV_VALIDATION_REPORTS
166
+ st.rerun()
167
+
168
+ # AI Settings button
169
+ if st.sidebar.button(
170
+ NAV_AI_SETTINGS,
171
+ help="Configure AI provider settings for intelligent conversions",
172
+ width="stretch",
173
+ ):
174
+ st.session_state.current_page = NAV_AI_SETTINGS
175
+ st.rerun()
176
+
177
+
89
178
  def main():
90
179
  """Run the main Streamlit application."""
91
180
  st.set_page_config(
@@ -94,29 +183,31 @@ def main():
94
183
  initial_sidebar_state="expanded",
95
184
  )
96
185
 
97
- st.title("SousChef - Visual Migration Planning")
98
- st.markdown("*AI-powered Chef to Ansible migration planning interface*")
99
-
100
- # Sidebar navigation
101
- page = st.sidebar.selectbox(
102
- "Navigation",
103
- [
104
- "Dashboard",
105
- "Cookbook Analysis",
106
- NAV_MIGRATION_PLANNING,
107
- NAV_DEPENDENCY_MAPPING,
108
- NAV_VALIDATION_REPORTS,
109
- ],
110
- help="Choose the section you want to work with. "
111
- "Use arrow keys to navigate options.",
112
- key="main_navigation",
186
+ # Hide Streamlit's default header elements and sidebar navigation
187
+ st.markdown(
188
+ """
189
+ <style>
190
+ #MainMenu {visibility: hidden;}
191
+ .stDeployButton {display:none;}
192
+ [data-testid="stSidebarNavLink"] {display: none;}
193
+ </style>
194
+ """,
195
+ unsafe_allow_html=True,
113
196
  )
114
197
 
198
+ # Set up sidebar navigation
199
+ _setup_sidebar_navigation()
200
+
201
+ # Get current page from session state, default to Dashboard
202
+ page = st.session_state.get("current_page", "Dashboard")
203
+
115
204
  # Main content area
116
205
  if page == "Dashboard":
117
206
  show_dashboard()
118
- elif page == "Cookbook Analysis":
207
+ elif page == NAV_COOKBOOK_ANALYSIS:
119
208
  show_cookbook_analysis_page()
209
+ elif page == NAV_AI_SETTINGS:
210
+ show_ai_settings_page()
120
211
  elif page == NAV_MIGRATION_PLANNING:
121
212
  show_migration_planning()
122
213
  elif page == NAV_DEPENDENCY_MAPPING:
@@ -125,51 +216,182 @@ def main():
125
216
  show_validation_reports()
126
217
 
127
218
 
128
- def show_dashboard():
129
- """Show the main dashboard with migration overview."""
130
- st.header("Migration Dashboard")
219
+ def _calculate_dashboard_metrics():
220
+ """Calculate and return dashboard metrics."""
221
+ cookbooks_analysed = 0
222
+ complexity_counts = {"High": 0, "Medium": 0, "Low": 0}
223
+ successful_analyses = 0
224
+
225
+ if "analysis_results" in st.session_state and st.session_state.analysis_results:
226
+ results = st.session_state.analysis_results
227
+ cookbooks_analysed = len(results)
228
+ successful_analyses = len([r for r in results if r.get("status") == "Analysed"])
229
+
230
+ for r in results:
231
+ comp = r.get("complexity", "Unknown")
232
+ if comp in complexity_counts:
233
+ complexity_counts[comp] += 1
234
+
235
+ # Determine overall complexity
236
+ overall_complexity = "Unknown"
237
+ if cookbooks_analysed > 0:
238
+ if complexity_counts["High"] > 0:
239
+ overall_complexity = "High"
240
+ elif complexity_counts["Medium"] > 0:
241
+ overall_complexity = "Medium"
242
+ elif complexity_counts["Low"] > 0:
243
+ overall_complexity = "Low"
244
+
245
+ conversion_rate = 0
246
+ if cookbooks_analysed > 0:
247
+ conversion_rate = int((successful_analyses / cookbooks_analysed) * 100)
248
+
249
+ return cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses
131
250
 
251
+
252
+ def _display_dashboard_metrics(
253
+ cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses
254
+ ):
255
+ """Display the dashboard metrics."""
132
256
  col1, col2, col3 = st.columns(3)
133
257
 
134
258
  with col1:
135
- st.metric("Cookbooks Analyzed", "0", "Ready to analyze")
259
+ st.metric(
260
+ "Cookbooks Analysed",
261
+ str(cookbooks_analysed),
262
+ f"{cookbooks_analysed} processed"
263
+ if cookbooks_analysed > 0
264
+ else "Ready to analyse",
265
+ )
136
266
  st.caption("Total cookbooks processed")
137
267
 
138
268
  with col2:
139
- st.metric("Migration Complexity", "Unknown", "Assessment needed")
269
+ st.metric(
270
+ "Migration Complexity",
271
+ overall_complexity,
272
+ "Based on analysis"
273
+ if overall_complexity != "Unknown"
274
+ else "Assessment needed",
275
+ )
140
276
  st.caption("Overall migration effort")
141
277
 
142
278
  with col3:
143
- st.metric("Conversion Rate", "0%", "Start migration")
144
- st.caption("Successful conversions")
279
+ st.metric(
280
+ "Success Rate",
281
+ f"{conversion_rate}%",
282
+ f"{successful_analyses} successful"
283
+ if cookbooks_analysed > 0
284
+ else "Start migration",
285
+ )
286
+ st.caption("Successful analyses")
145
287
 
146
- st.divider()
147
288
 
148
- # Quick actions
149
- st.subheader("Quick Actions")
289
+ def _display_quick_upload_section():
290
+ """Display the quick upload section."""
291
+ st.subheader("Quick Start")
150
292
 
151
- col1, col2 = st.columns(2)
293
+ col1, col2 = st.columns([2, 1])
152
294
 
153
295
  with col1:
154
- if st.button(
155
- "Analyze Cookbook Directory", type="primary", use_container_width=True
156
- ):
157
- st.rerun() # This will trigger navigation to cookbook analysis
296
+ uploaded_file = st.file_uploader(
297
+ "Upload Cookbook Archive",
298
+ type=["zip", "tar.gz", "tgz", "tar"],
299
+ help="Upload a ZIP or TAR archive containing your Chef "
300
+ "cookbooks for quick analysis",
301
+ key="dashboard_upload",
302
+ )
303
+
304
+ if uploaded_file:
305
+ # Store the uploaded file in session state for persistence across pages
306
+ st.session_state.uploaded_file_data = uploaded_file.getvalue()
307
+ st.session_state.uploaded_file_name = uploaded_file.name
308
+ st.session_state.uploaded_file_type = uploaded_file.type
309
+
310
+ st.success(f"File {uploaded_file.name} uploaded successfully!")
311
+ st.info(
312
+ "Navigate to Cookbook Analysis to process this file, "
313
+ "or upload another file to replace it."
314
+ )
158
315
 
159
316
  with col2:
160
- if st.button(
161
- "Generate Migration Plan", type="secondary", use_container_width=True
162
- ):
163
- st.rerun() # This will trigger navigation to migration planning
317
+ st.markdown("**Or choose your workflow:**")
164
318
 
165
- # Recent activity
319
+ # Quick actions
320
+ if st.button("Analyse Cookbooks", type="primary", width="stretch"):
321
+ st.session_state.current_page = "Cookbook Analysis"
322
+ st.rerun()
323
+
324
+ if st.button("Generate Migration Plan", width="stretch"):
325
+ st.session_state.current_page = NAV_MIGRATION_PLANNING
326
+ st.rerun()
327
+
328
+ if st.button(BUTTON_ANALYSE_DEPENDENCIES, width="stretch"):
329
+ st.session_state.current_page = NAV_DEPENDENCY_MAPPING
330
+ st.rerun()
331
+
332
+
333
+ def _display_recent_activity():
334
+ """Display the recent activity section."""
166
335
  st.subheader("Recent Activity")
167
- st.info("No recent migration activity. Start by analyzing your cookbooks!")
336
+ st.info(
337
+ "No recent migration activity. Start by uploading cookbooks "
338
+ "above or using the Cookbook Analysis page!"
339
+ )
340
+
341
+ # Getting started guide
342
+ with st.expander("How to Get Started"):
343
+ st.markdown("""
344
+ **New to SousChef? Here's how to begin:**
345
+
346
+ 1. **Upload Cookbooks**: Use the uploader above or go to Cookbook Analysis
347
+ 2. **Analyse Complexity**: Get detailed migration assessments
348
+ 3. **Plan Migration**: Generate timelines and resource requirements
349
+ 4. **Convert to Ansible**: Download converted playbooks
350
+
351
+ **Supported Formats:**
352
+ - ZIP archives (.zip)
353
+ - TAR archives (.tar, .tar.gz, .tgz)
354
+ - Directory paths (in Cookbook Analysis)
355
+
356
+ **Expected Structure:**
357
+ ```
358
+ your-cookbooks/
359
+ ├── nginx/
360
+ │ ├── metadata.rb
361
+ │ ├── recipes/
362
+ │ └── attributes/
363
+ └── apache2/
364
+ └── metadata.rb
365
+ ```
366
+ """)
367
+
368
+
369
+ def show_dashboard():
370
+ """Show the main dashboard with migration overview."""
371
+ st.header("Migration Dashboard")
372
+
373
+ # Metrics calculation
374
+ cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses = (
375
+ _calculate_dashboard_metrics()
376
+ )
377
+
378
+ # Display metrics
379
+ _display_dashboard_metrics(
380
+ cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses
381
+ )
382
+
383
+ st.divider()
384
+
385
+ # Quick upload section
386
+ _display_quick_upload_section()
387
+
388
+ # Recent activity
389
+ _display_recent_activity()
168
390
 
169
391
 
170
392
  def show_migration_planning():
171
393
  """Show migration planning interface."""
172
- st.header("Migration Planning")
394
+ st.header(NAV_MIGRATION_PLANNING)
173
395
 
174
396
  # Import assessment functions
175
397
  from souschef.assessment import generate_migration_plan
@@ -183,13 +405,27 @@ def show_migration_planning():
183
405
  # Step 1: Cookbook Selection
184
406
  st.subheader("Step 1: Cookbook Selection")
185
407
 
408
+ # Check for previously analyzed cookbooks
409
+ uploaded_plan_context = None
410
+ if (
411
+ "analysis_cookbook_path" in st.session_state
412
+ and st.session_state.analysis_cookbook_path
413
+ ):
414
+ uploaded_plan_context = st.session_state.analysis_cookbook_path
415
+ st.info(f"Using analyzed cookbooks from: {uploaded_plan_context}")
416
+
186
417
  col1, col2 = st.columns([3, 1])
187
418
 
188
419
  with col1:
420
+ # Default to analyzed path if available
421
+ default_paths = uploaded_plan_context if uploaded_plan_context else ""
422
+
189
423
  cookbook_paths = st.text_area(
190
424
  "Cookbook Paths",
425
+ value=default_paths,
191
426
  placeholder="/path/to/cookbooks/nginx,/path/to/cookbooks/apache2,/path/to/cookbooks/mysql",
192
- help="Enter comma-separated paths to your Chef cookbooks",
427
+ help="Enter comma-separated paths to your Chef cookbooks. If you uploaded "
428
+ "an archive in the Analysis tab, that path is pre-filled.",
193
429
  height=100,
194
430
  )
195
431
 
@@ -274,7 +510,7 @@ def show_migration_planning():
274
510
  # Step 3: Generate Plan
275
511
  st.subheader("Step 3: Generate Migration Plan")
276
512
 
277
- if st.button("Generate Migration Plan", type="primary", use_container_width=True):
513
+ if st.button("Generate Migration Plan", type="primary", width="stretch"):
278
514
  if not cookbook_paths.strip():
279
515
  st.error("Please enter cookbook paths to generate a migration plan.")
280
516
  return
@@ -366,7 +602,7 @@ def _display_migration_action_buttons(cookbook_paths):
366
602
  col1, col2, col3 = st.columns(3)
367
603
 
368
604
  with col1:
369
- if st.button("📊 Generate Detailed Report", use_container_width=True):
605
+ if st.button("📊 Generate Detailed Report", width="stretch"):
370
606
  with st.spinner("Generating detailed migration report..."):
371
607
  try:
372
608
  from souschef.assessment import generate_migration_report
@@ -380,27 +616,27 @@ def _display_migration_action_buttons(cookbook_paths):
380
616
  st.error(f"Error generating report: {e}")
381
617
 
382
618
  with col2:
383
- if st.button("🔍 Analyze Dependencies", use_container_width=True):
619
+ if st.button("🔍 Analyse Dependencies", width="stretch"):
384
620
  if len(cookbook_paths.split(",")) == 1:
385
621
  # Single cookbook dependency analysis
386
622
  cookbook_path = cookbook_paths.split(",")[0].strip()
387
- with st.spinner(f"Analyzing dependencies for {cookbook_path}..."):
623
+ with st.spinner(f"Analysing dependencies for {cookbook_path}..."):
388
624
  try:
389
- from souschef.assessment import analyze_cookbook_dependencies
625
+ from souschef.assessment import analyse_cookbook_dependencies
390
626
 
391
- dep_analysis = analyze_cookbook_dependencies(cookbook_path)
627
+ dep_analysis = analyse_cookbook_dependencies(cookbook_path)
392
628
  st.session_state.dep_analysis = dep_analysis
393
629
  st.success("Dependency analysis complete!")
394
630
  except Exception as e:
395
631
  st.error(f"Error analyzing dependencies: {e}")
396
632
  else:
397
633
  st.info(
398
- "Dependency analysis is optimized for single cookbooks. "
634
+ "Dependency analysis is optimised for single cookbooks. "
399
635
  "Select one cookbook path for detailed analysis."
400
636
  )
401
637
 
402
638
  with col3:
403
- if st.button("📥 Export Plan", use_container_width=True):
639
+ if st.button("📥 Export Plan", width="stretch"):
404
640
  # Create downloadable plan
405
641
  plan_content = f"""# Chef to Ansible Migration Plan
406
642
  Generated: {st.session_state.get("timestamp", "Unknown")}
@@ -454,20 +690,50 @@ def show_dependency_mapping():
454
690
  st.header(NAV_DEPENDENCY_MAPPING)
455
691
 
456
692
  # Import assessment functions
457
- from souschef.assessment import analyze_cookbook_dependencies
693
+ from souschef.assessment import analyse_cookbook_dependencies
458
694
 
459
695
  st.markdown("""
460
- Visualize and analyze cookbook dependencies to understand migration order
696
+ Visualise and analyse cookbook dependencies to understand migration order
461
697
  and identify potential circular dependencies.
462
698
  """)
463
699
 
464
- # Cookbook path input
465
- cookbook_path = st.text_input(
466
- "Cookbook Directory Path",
467
- placeholder="/path/to/your/cookbooks",
468
- help="Enter the path to your cookbooks directory for dependency analysis",
700
+ # Input method selection
701
+ input_method = st.radio(
702
+ "Choose Input Method",
703
+ ["Upload Archive", INPUT_METHOD_DIRECTORY_PATH],
704
+ horizontal=True,
705
+ help="Select how to provide cookbooks for dependency analysis",
706
+ key="dep_input_method",
469
707
  )
470
708
 
709
+ cookbook_path = None
710
+ uploaded_file = None
711
+
712
+ if input_method == INPUT_METHOD_DIRECTORY_PATH:
713
+ cookbook_path = st.text_input(
714
+ "Cookbook Directory Path",
715
+ placeholder="/path/to/your/cookbooks",
716
+ help="Enter the path to your cookbooks directory for dependency analysis",
717
+ )
718
+ else:
719
+ uploaded_file = st.file_uploader(
720
+ "Upload Cookbook Archive",
721
+ type=["zip", "tar.gz", "tgz", "tar"],
722
+ help="Upload a ZIP or TAR archive containing your Chef cookbooks",
723
+ key="dep_archive_upload",
724
+ )
725
+ if uploaded_file:
726
+ try:
727
+ with st.spinner("Extracting archive..."):
728
+ # Import the extract function from cookbook_analysis
729
+ from souschef.ui.pages.cookbook_analysis import extract_archive
730
+
731
+ cookbook_path = str(extract_archive(uploaded_file))
732
+ st.success("Archive extracted successfully")
733
+ except Exception as e:
734
+ st.error(f"Failed to extract archive: {e}")
735
+ return
736
+
471
737
  # Analysis options
472
738
  col1, col2 = st.columns(2)
473
739
 
@@ -475,7 +741,7 @@ def show_dependency_mapping():
475
741
  dependency_depth = st.selectbox(
476
742
  "Analysis Depth",
477
743
  ["direct", "transitive", "full"],
478
- help="How deep to analyze dependencies",
744
+ help="How deep to analyse dependencies",
479
745
  format_func=lambda x: {
480
746
  "direct": "Direct Dependencies Only",
481
747
  "transitive": "Include Transitive Dependencies",
@@ -496,21 +762,21 @@ def show_dependency_mapping():
496
762
  )
497
763
 
498
764
  # Analysis button
499
- if st.button("Analyze Dependencies", type="primary", use_container_width=True):
500
- if not cookbook_path.strip():
765
+ if st.button(BUTTON_ANALYSE_DEPENDENCIES, type="primary", width="stretch"):
766
+ if not cookbook_path or not cookbook_path.strip():
501
767
  st.error("Please enter a cookbook directory path.")
502
768
  return
503
769
 
504
770
  # Create progress tracker
505
771
  progress_tracker = ProgressTracker(
506
- total_steps=5, description="Analyzing cookbook dependencies..."
772
+ total_steps=5, description="Analysing cookbook dependencies..."
507
773
  )
508
774
 
509
775
  try:
510
776
  progress_tracker.update(1, "Scanning cookbook directory...")
511
777
 
512
- # Analyze dependencies
513
- analysis_result = analyze_cookbook_dependencies(
778
+ # Analyse dependencies
779
+ analysis_result = analyse_cookbook_dependencies(
514
780
  cookbook_path.strip(), dependency_depth
515
781
  )
516
782
 
@@ -543,7 +809,7 @@ def _setup_dependency_mapping_ui():
543
809
  st.header(NAV_DEPENDENCY_MAPPING)
544
810
 
545
811
  st.markdown("""
546
- Visualize and analyze cookbook dependencies to understand migration order
812
+ Visualise and analyse cookbook dependencies to understand migration order
547
813
  and identify potential circular dependencies.
548
814
  """)
549
815
 
@@ -564,7 +830,7 @@ def _get_dependency_mapping_inputs():
564
830
  dependency_depth = st.selectbox(
565
831
  "Analysis Depth",
566
832
  ["direct", "transitive", "full"],
567
- help="How deep to analyze dependencies",
833
+ help="How deep to analyse dependencies",
568
834
  format_func=lambda x: {
569
835
  "direct": "Direct Dependencies Only",
570
836
  "transitive": "Include Transitive Dependencies",
@@ -592,8 +858,8 @@ def _handle_dependency_analysis_execution(
592
858
  ):
593
859
  """Handle the dependency analysis execution when button is clicked."""
594
860
  # Analysis button
595
- if st.button("Analyze Dependencies", type="primary", use_container_width=True):
596
- if not cookbook_path.strip():
861
+ if st.button(BUTTON_ANALYSE_DEPENDENCIES, type="primary", width="stretch"):
862
+ if not cookbook_path or not cookbook_path.strip():
597
863
  st.error("Please enter a cookbook directory path.")
598
864
  return
599
865
 
@@ -605,18 +871,18 @@ def _handle_dependency_analysis_execution(
605
871
  def _perform_dependency_analysis(cookbook_path, dependency_depth, visualization_type):
606
872
  """Perform the actual dependency analysis."""
607
873
  # Import assessment functions
608
- from souschef.assessment import analyze_cookbook_dependencies
874
+ from souschef.assessment import analyse_cookbook_dependencies
609
875
 
610
876
  # Create progress tracker
611
877
  progress_tracker = ProgressTracker(
612
- total_steps=5, description="Analyzing cookbook dependencies..."
878
+ total_steps=5, description="Analysing cookbook dependencies..."
613
879
  )
614
880
 
615
881
  try:
616
882
  progress_tracker.update(1, "Scanning cookbook directory...")
617
883
 
618
- # Analyze dependencies
619
- analysis_result = analyze_cookbook_dependencies(cookbook_path, dependency_depth)
884
+ # Analyse dependencies
885
+ analysis_result = analyse_cookbook_dependencies(cookbook_path, dependency_depth)
620
886
 
621
887
  progress_tracker.update(2, "Parsing dependency relationships...")
622
888
  progress_tracker.update(3, "Detecting circular dependencies...")
@@ -655,7 +921,7 @@ def _extract_dependency_relationships(lines):
655
921
  current_section = "direct"
656
922
  elif "Transitive Dependencies:" in line:
657
923
  current_section = "transitive"
658
- elif line.startswith("- ") and current_section in ["direct", "transitive"]:
924
+ elif line.startswith("- ") and current_section == "direct":
659
925
  # Regular dependencies
660
926
  dep_text = line[2:].strip()
661
927
  if ":" in dep_text:
@@ -690,7 +956,7 @@ def _update_current_section(line, current_section):
690
956
  line = line.strip()
691
957
  if "Circular Dependencies:" in line:
692
958
  return "circular"
693
- elif "Community Cookbooks:" in line:
959
+ elif SECTION_COMMUNITY_COOKBOOKS_HEADER in line:
694
960
  return "community"
695
961
  return current_section
696
962
 
@@ -760,36 +1026,94 @@ def _create_networkx_graph(dependencies, circular_deps, community_cookbooks):
760
1026
 
761
1027
 
762
1028
  def _calculate_graph_positions(graph, layout_algorithm):
763
- """Calculate node positions using specified layout algorithm."""
764
- import networkx as nx
1029
+ """
1030
+ Calculate positions for graph nodes using the specified layout algorithm.
1031
+
1032
+ Args:
1033
+ graph: NetworkX graph object
1034
+ layout_algorithm: String specifying the layout algorithm to use
765
1035
 
1036
+ Returns:
1037
+ tuple: (positions_dict, algorithm_used)
1038
+
1039
+ """
766
1040
  # Choose layout algorithm based on graph size and user preference
767
1041
  num_nodes = len(graph.nodes)
768
1042
  if layout_algorithm == "auto":
769
- if num_nodes < 10:
770
- layout_algorithm = "spring"
771
- elif num_nodes < 50:
772
- layout_algorithm = "kamada_kawai"
773
- else:
774
- layout_algorithm = "circular"
1043
+ layout_algorithm = _choose_auto_layout_algorithm(num_nodes)
775
1044
 
776
1045
  # Calculate positions using selected layout algorithm
777
- if layout_algorithm == "spring":
778
- pos = nx.spring_layout(graph, k=2, iterations=50)
779
- elif layout_algorithm == "circular":
780
- pos = nx.circular_layout(graph)
781
- elif layout_algorithm == "kamada_kawai":
782
- try:
783
- pos = nx.kamada_kawai_layout(graph)
784
- except Exception:
785
- # Fallback to spring layout if kamada_kawai fails
786
- pos = nx.spring_layout(graph, k=2, iterations=50)
787
- else:
788
- pos = nx.spring_layout(graph, k=2, iterations=50)
1046
+ pos = _calculate_positions_with_algorithm(graph, layout_algorithm)
789
1047
 
790
1048
  return pos, layout_algorithm
791
1049
 
792
1050
 
1051
+ def _choose_auto_layout_algorithm(num_nodes):
1052
+ """Choose the best layout algorithm based on graph size."""
1053
+ if num_nodes <= 10:
1054
+ return "circular"
1055
+ elif num_nodes <= 50:
1056
+ return "spring"
1057
+ else:
1058
+ return "kamada_kawai"
1059
+
1060
+
1061
+ def _calculate_positions_with_algorithm(graph, layout_algorithm):
1062
+ """Calculate node positions using the specified algorithm."""
1063
+ import networkx as nx
1064
+
1065
+ try:
1066
+ if layout_algorithm == "spring":
1067
+ return nx.spring_layout(graph, k=2, iterations=50, seed=42)
1068
+ elif layout_algorithm == "circular":
1069
+ return nx.circular_layout(graph)
1070
+ elif layout_algorithm == "kamada_kawai":
1071
+ return nx.kamada_kawai_layout(graph)
1072
+ elif layout_algorithm == "shell":
1073
+ return _calculate_shell_layout_positions(graph)
1074
+ elif layout_algorithm == "random":
1075
+ return nx.random_layout(graph, seed=42)
1076
+ elif layout_algorithm == "spectral":
1077
+ return nx.spectral_layout(graph)
1078
+ elif layout_algorithm == "force_directed":
1079
+ return nx.spring_layout(graph, k=3, iterations=100, seed=42, scale=2)
1080
+ else:
1081
+ return nx.spring_layout(graph, k=2, iterations=50, seed=42)
1082
+ except Exception as e:
1083
+ # Fallback to spring layout if algorithm fails
1084
+ st.warning(
1085
+ f"Layout algorithm '{layout_algorithm}' failed, using spring layout: {e}"
1086
+ )
1087
+ return nx.spring_layout(graph, k=2, iterations=50, seed=42)
1088
+
1089
+
1090
+ def _calculate_shell_layout_positions(graph):
1091
+ """Calculate shell layout positions for hierarchical organization."""
1092
+ import networkx as nx
1093
+
1094
+ # Identify leaf nodes (no outgoing edges)
1095
+ leaf_nodes = [n for n in graph.nodes() if graph.out_degree(n) == 0]
1096
+ # Identify root nodes (no incoming edges)
1097
+ root_nodes = [n for n in graph.nodes() if graph.in_degree(n) == 0]
1098
+ # Middle nodes
1099
+ middle_nodes = [
1100
+ n for n in graph.nodes() if n not in leaf_nodes and n not in root_nodes
1101
+ ]
1102
+
1103
+ shells = []
1104
+ if root_nodes:
1105
+ shells.append(root_nodes)
1106
+ if middle_nodes:
1107
+ shells.append(middle_nodes)
1108
+ if leaf_nodes:
1109
+ shells.append(leaf_nodes)
1110
+
1111
+ if shells:
1112
+ return nx.shell_layout(graph, shells)
1113
+ else:
1114
+ return nx.spring_layout(graph, k=2, iterations=50, seed=42)
1115
+
1116
+
793
1117
  def _create_plotly_edge_traces(graph, pos):
794
1118
  """Create edge traces for Plotly graph."""
795
1119
  import plotly.graph_objects as go # type: ignore[import-untyped]
@@ -900,7 +1224,7 @@ def _create_plotly_figure_layout(num_nodes, layout_algorithm):
900
1224
  """Create Plotly figure layout."""
901
1225
  import plotly.graph_objects as go
902
1226
 
903
- return go.Layout(
1227
+ layout: go.Layout = go.Layout(
904
1228
  title=f"Cookbook Dependency Graph ({num_nodes} nodes, "
905
1229
  f"{layout_algorithm} layout)",
906
1230
  titlefont_size=16,
@@ -920,6 +1244,8 @@ def _create_plotly_figure_layout(num_nodes, layout_algorithm):
920
1244
  plot_bgcolor="white",
921
1245
  )
922
1246
 
1247
+ return layout
1248
+
923
1249
 
924
1250
  def _create_interactive_plotly_graph(graph, pos, num_nodes, layout_algorithm):
925
1251
  """Create interactive Plotly graph visualization."""
@@ -1026,15 +1352,17 @@ def _create_static_matplotlib_graph(graph, pos, num_nodes, layout_algorithm):
1026
1352
  return plt.gcf()
1027
1353
 
1028
1354
 
1029
- def create_dependency_graph(analysis_result, viz_type, layout_algorithm="auto"):
1355
+ def create_dependency_graph(
1356
+ analysis_result, viz_type, layout_algorithm="auto", filters=None
1357
+ ):
1030
1358
  """
1031
- Create a dependency graph visualization.
1359
+ Create a dependency graph visualization with optional filtering.
1032
1360
 
1033
1361
  Args:
1034
1362
  analysis_result: Text analysis result from dependency analysis
1035
1363
  viz_type: Visualization type ("interactive" or "static")
1036
- layout_algorithm: Layout algorithm to use ("auto", "spring",
1037
- "circular", "kamada_kawai")
1364
+ layout_algorithm: Layout algorithm to use
1365
+ filters: Dictionary of filter options
1038
1366
 
1039
1367
  Returns:
1040
1368
  Plotly figure for interactive graphs, matplotlib figure for static graphs
@@ -1049,6 +1377,10 @@ def create_dependency_graph(analysis_result, viz_type, layout_algorithm="auto"):
1049
1377
  # Create NetworkX graph
1050
1378
  graph = _create_networkx_graph(dependencies, circular_deps, community_cookbooks)
1051
1379
 
1380
+ # Apply filters if provided
1381
+ if filters:
1382
+ graph = _apply_graph_filters(graph, filters)
1383
+
1052
1384
  if len(graph.nodes) == 0:
1053
1385
  return None
1054
1386
 
@@ -1069,6 +1401,72 @@ def create_dependency_graph(analysis_result, viz_type, layout_algorithm="auto"):
1069
1401
  return None
1070
1402
 
1071
1403
 
1404
+ def _apply_graph_filters(graph, filters):
1405
+ """Apply filters to the NetworkX graph."""
1406
+ filtered_graph = graph.copy()
1407
+
1408
+ # Apply each filter type
1409
+ filtered_graph = _filter_circular_dependencies_only(filtered_graph, filters)
1410
+ filtered_graph = _filter_community_cookbooks_only(filtered_graph, filters)
1411
+ filtered_graph = _filter_minimum_connections(filtered_graph, filters)
1412
+
1413
+ return filtered_graph
1414
+
1415
+
1416
+ def _filter_circular_dependencies_only(graph, filters):
1417
+ """Filter graph to show only nodes involved in circular dependencies."""
1418
+ if not filters.get("circular_only", False):
1419
+ return graph
1420
+
1421
+ # Find nodes involved in circular dependencies
1422
+ circular_nodes = set()
1423
+ for source, target in filters.get("circular_deps", []):
1424
+ circular_nodes.add(source)
1425
+ circular_nodes.add(target)
1426
+
1427
+ # Remove nodes not involved in circular dependencies
1428
+ nodes_to_remove = [n for n in graph.nodes() if n not in circular_nodes]
1429
+ graph.remove_nodes_from(nodes_to_remove)
1430
+
1431
+ return graph
1432
+
1433
+
1434
+ def _filter_community_cookbooks_only(graph, filters):
1435
+ """Filter graph to show only community cookbooks and their dependencies."""
1436
+ if not filters.get("community_only", False):
1437
+ return graph
1438
+
1439
+ community_nodes = set()
1440
+ for node in graph.nodes():
1441
+ if graph.nodes[node].get("community", False):
1442
+ community_nodes.add(node)
1443
+ # Also include dependencies of community cookbooks
1444
+ for successor in graph.successors(node):
1445
+ community_nodes.add(successor)
1446
+
1447
+ # Remove nodes not related to community cookbooks
1448
+ nodes_to_remove = [n for n in graph.nodes() if n not in community_nodes]
1449
+ graph.remove_nodes_from(nodes_to_remove)
1450
+
1451
+ return graph
1452
+
1453
+
1454
+ def _filter_minimum_connections(graph, filters):
1455
+ """Filter graph to show only nodes with minimum connection count."""
1456
+ min_connections = filters.get("min_connections", 0)
1457
+ if min_connections <= 0:
1458
+ return graph
1459
+
1460
+ nodes_to_remove = []
1461
+ for node in graph.nodes():
1462
+ degree = graph.degree(node)
1463
+ if degree < min_connections:
1464
+ nodes_to_remove.append(node)
1465
+ graph.remove_nodes_from(nodes_to_remove)
1466
+
1467
+ return graph
1468
+
1469
+
1072
1470
  def _parse_dependency_metrics_from_result(analysis_result):
1073
1471
  """Parse dependency analysis result to extract key metrics."""
1074
1472
  lines = analysis_result.split("\n")
@@ -1116,81 +1514,449 @@ def _display_dependency_summary_metrics(
1116
1514
  )
1117
1515
 
1118
1516
  with col4:
1119
- st.metric("Community Cookbooks", community_cookbooks)
1517
+ st.metric(SECTION_COMMUNITY_COOKBOOKS, community_cookbooks)
1518
+
1519
+
1520
+ def _calculate_migration_impact(dependencies, circular_deps, community_cookbooks):
1521
+ """Calculate migration impact analysis based on dependency structure."""
1522
+ from typing import Any
1523
+
1524
+ impact: dict[str, Any] = {
1525
+ "risk_score": 0.0,
1526
+ "timeline_impact_weeks": 0,
1527
+ "complexity_level": "Low",
1528
+ "parallel_streams": 1,
1529
+ "critical_path": [],
1530
+ "bottlenecks": [],
1531
+ "recommendations": [],
1532
+ }
1533
+
1534
+ # Calculate risk score based on various factors
1535
+ risk_factors = {
1536
+ "circular_deps": len(circular_deps)
1537
+ * 2.0, # Each circular dep adds significant risk
1538
+ "total_deps": len(dependencies) * 0.1, # More dependencies = higher complexity
1539
+ "community_cookbooks": len(community_cookbooks)
1540
+ * 0.5, # Community cookbooks need evaluation
1541
+ "max_chain_length": _calculate_max_dependency_chain(dependencies)
1542
+ * 0.3, # Long chains are risky
1543
+ }
1544
+
1545
+ impact["risk_score"] = min(10.0, sum(risk_factors.values()))
1546
+
1547
+ # Determine complexity level
1548
+ if impact["risk_score"] > 7:
1549
+ impact["complexity_level"] = "High"
1550
+ impact["timeline_impact_weeks"] = 4
1551
+ elif impact["risk_score"] > 4:
1552
+ impact["complexity_level"] = "Medium"
1553
+ impact["timeline_impact_weeks"] = 2
1554
+ else:
1555
+ impact["complexity_level"] = "Low"
1556
+ impact["timeline_impact_weeks"] = 0
1557
+
1558
+ # Calculate parallel migration streams
1559
+ if len(dependencies) > 20:
1560
+ impact["parallel_streams"] = 3
1561
+ elif len(dependencies) > 10:
1562
+ impact["parallel_streams"] = 2
1563
+ else:
1564
+ impact["parallel_streams"] = 1
1565
+
1566
+ # Identify critical path (longest dependency chain)
1567
+ impact["critical_path"] = _find_critical_path(dependencies)
1568
+
1569
+ # Identify bottlenecks (highly depended-upon cookbooks)
1570
+ impact["bottlenecks"] = _identify_bottlenecks(dependencies)
1571
+
1572
+ # Generate recommendations
1573
+ impact["recommendations"] = _generate_impact_recommendations(
1574
+ impact, circular_deps, community_cookbooks
1575
+ )
1576
+
1577
+ return impact
1578
+
1579
+
1580
+ def _calculate_max_dependency_chain(dependencies):
1581
+ """Calculate the maximum dependency chain length."""
1582
+ max_length = 0
1583
+
1584
+ def get_chain_length(cookbook, visited=None):
1585
+ if visited is None:
1586
+ visited = set()
1587
+
1588
+ if cookbook in visited:
1589
+ return 0 # Circular dependency detected
1590
+
1591
+ visited.add(cookbook)
1592
+ deps = dependencies.get(cookbook, [])
1593
+
1594
+ if not deps:
1595
+ return 1
1596
+
1597
+ max_child_length = 0
1598
+ for dep in deps:
1599
+ child_length = get_chain_length(dep, visited.copy())
1600
+ max_child_length = max(max_child_length, child_length)
1601
+
1602
+ return 1 + max_child_length
1603
+
1604
+ for cookbook in dependencies:
1605
+ length = get_chain_length(cookbook)
1606
+ max_length = max(max_length, length)
1607
+
1608
+ return max_length
1609
+
1610
+
1611
+ def _find_critical_path(dependencies):
1612
+ """Find the critical path (longest dependency chain)."""
1613
+ longest_chain: list[str] = []
1614
+
1615
+ def find_longest_chain(cookbook, visited=None):
1616
+ if visited is None:
1617
+ visited = set()
1618
+
1619
+ if cookbook in visited:
1620
+ return [] # Circular dependency
1621
+
1622
+ visited.add(cookbook)
1623
+ deps = dependencies.get(cookbook, [])
1624
+
1625
+ if not deps:
1626
+ return [cookbook]
1627
+
1628
+ longest_child_chain: list[str] = []
1629
+ for dep in deps:
1630
+ child_chain = find_longest_chain(dep, visited.copy())
1631
+ if len(child_chain) > len(longest_child_chain):
1632
+ longest_child_chain = child_chain
1633
+
1634
+ return [cookbook] + longest_child_chain
1635
+
1636
+ for cookbook in dependencies:
1637
+ chain = find_longest_chain(cookbook)
1638
+ if len(chain) > len(longest_chain):
1639
+ longest_chain = chain
1640
+
1641
+ return longest_chain
1642
+
1643
+
1644
+ def _identify_bottlenecks(dependencies: dict[str, list[str]]):
1645
+ """Identify bottleneck cookbooks (highly depended upon)."""
1646
+ # Count how many times each cookbook is depended upon
1647
+ dependency_counts: dict[str, int] = {}
1648
+
1649
+ for deps in dependencies.values():
1650
+ for dep in deps:
1651
+ dependency_counts[dep] = dependency_counts.get(dep, 0) + 1
1652
+
1653
+ # Find cookbooks with high dependency counts
1654
+ bottlenecks = []
1655
+ max_count: int = max(dependency_counts.values()) if dependency_counts else 0
1656
+
1657
+ for cookbook, count in dependency_counts.items():
1658
+ if count >= 5:
1659
+ risk_level = "High"
1660
+ elif count >= 3:
1661
+ risk_level = "Medium"
1662
+ else:
1663
+ risk_level = "Low"
1664
+
1665
+ if count >= 3 or (max_count > 1 and count == max_count):
1666
+ bottlenecks.append(
1667
+ {
1668
+ "cookbook": cookbook,
1669
+ "dependent_count": count,
1670
+ "risk_level": risk_level,
1671
+ }
1672
+ )
1673
+
1674
+ return sorted(bottlenecks, key=lambda x: x["dependent_count"], reverse=True)
1675
+
1676
+
1677
+ def _generate_impact_recommendations(impact, circular_deps, community_cookbooks):
1678
+ """Generate recommendations based on impact analysis."""
1679
+ recommendations = []
1680
+
1681
+ if circular_deps:
1682
+ recommendations.append(
1683
+ {
1684
+ "priority": "Critical",
1685
+ "action": (
1686
+ f"Resolve {len(circular_deps)} circular dependencies "
1687
+ "before migration"
1688
+ ),
1689
+ "impact": "Prevents successful migration",
1690
+ }
1691
+ )
1692
+
1693
+ if impact["parallel_streams"] > 1:
1694
+ recommendations.append(
1695
+ {
1696
+ "priority": "High",
1697
+ "action": (
1698
+ f"Plan {impact['parallel_streams']} parallel migration streams"
1699
+ ),
1700
+ "impact": (
1701
+ f"Reduces timeline by ~{impact['parallel_streams'] * 2} weeks"
1702
+ ),
1703
+ }
1704
+ )
1705
+
1706
+ if community_cookbooks:
1707
+ recommendations.append(
1708
+ {
1709
+ "priority": "Medium",
1710
+ "action": (
1711
+ f"Evaluate {len(community_cookbooks)} community cookbooks "
1712
+ "for Ansible Galaxy alternatives"
1713
+ ),
1714
+ "impact": "Reduces custom development effort",
1715
+ }
1716
+ )
1717
+
1718
+ if impact["bottlenecks"]:
1719
+ bottleneck_names = [b["cookbook"] for b in impact["bottlenecks"][:3]]
1720
+ recommendations.append(
1721
+ {
1722
+ "priority": "Medium",
1723
+ "action": (
1724
+ f"Migrate bottleneck cookbooks first: {', '.join(bottleneck_names)}"
1725
+ ),
1726
+ "impact": "Unblocks dependent cookbook migrations",
1727
+ }
1728
+ )
1729
+
1730
+ if impact["timeline_impact_weeks"] > 0:
1731
+ recommendations.append(
1732
+ {
1733
+ "priority": "Low",
1734
+ "action": (
1735
+ f"Allocate additional {impact['timeline_impact_weeks']} "
1736
+ "weeks for complexity"
1737
+ ),
1738
+ "impact": "Ensures successful migration completion",
1739
+ }
1740
+ )
1741
+
1742
+ return recommendations
1743
+
1744
+
1745
+ def _display_detailed_impact_analysis(
1746
+ impact_analysis, dependencies, circular_deps, community_cookbooks
1747
+ ):
1748
+ """Display detailed impact analysis breakdown."""
1749
+ _display_risk_assessment_breakdown(dependencies, circular_deps, community_cookbooks)
1750
+ _display_critical_path_analysis(impact_analysis)
1751
+ _display_migration_bottlenecks(impact_analysis)
1752
+ _display_strategic_recommendations(impact_analysis)
1753
+
1754
+
1755
+ def _display_risk_assessment_breakdown(
1756
+ dependencies, circular_deps, community_cookbooks
1757
+ ):
1758
+ """Display risk assessment breakdown."""
1759
+ st.markdown("### Risk Assessment Breakdown")
1760
+
1761
+ # Risk factors
1762
+ risk_factors = {
1763
+ "Circular Dependencies": len(circular_deps) * 2.0,
1764
+ "Total Dependencies": len(dependencies) * 0.1,
1765
+ "Community Cookbooks": len(community_cookbooks) * 0.5,
1766
+ "Dependency Chain Length": _calculate_max_dependency_chain(dependencies) * 0.3,
1767
+ }
1768
+
1769
+ for factor, score in risk_factors.items():
1770
+ if score > 0:
1771
+ st.write(f"• **{factor}**: {score:.1f} points")
1772
+
1773
+
1774
+ def _display_critical_path_analysis(impact_analysis):
1775
+ """Display critical path analysis."""
1776
+ st.markdown("### Critical Path Analysis")
1777
+ if impact_analysis["critical_path"]:
1778
+ st.write("**Longest dependency chain:**")
1779
+ st.code(" → ".join(impact_analysis["critical_path"]), language="text")
1780
+ else:
1781
+ st.write("No dependency chains identified.")
1782
+
1783
+
1784
+ def _display_migration_bottlenecks(impact_analysis):
1785
+ """Display migration bottlenecks."""
1786
+ st.markdown("### Migration Bottlenecks")
1787
+ if impact_analysis["bottlenecks"]:
1788
+ for bottleneck in impact_analysis["bottlenecks"]:
1789
+ risk_level = bottleneck["risk_level"]
1790
+ if risk_level == "High":
1791
+ risk_icon = "🔴"
1792
+ elif risk_level == "Medium":
1793
+ risk_icon = "🟡"
1794
+ else:
1795
+ risk_icon = "🟢"
1796
+ st.write(
1797
+ f"• {risk_icon} **{bottleneck['cookbook']}**: "
1798
+ f"{bottleneck['dependent_count']} dependents "
1799
+ f"({risk_level} risk)"
1800
+ )
1801
+ else:
1802
+ st.write("✅ No significant bottlenecks identified.")
1803
+
1804
+
1805
+ def _display_strategic_recommendations(impact_analysis):
1806
+ """Display strategic recommendations."""
1807
+ st.markdown("### Strategic Recommendations")
1808
+ for rec in impact_analysis["recommendations"]:
1809
+ priority = rec["priority"]
1810
+ if priority == "Critical":
1811
+ priority_icon = "🔴"
1812
+ elif priority == "High":
1813
+ priority_icon = "🟡"
1814
+ else:
1815
+ priority_icon = "🟢"
1816
+ st.write(f"• {priority_icon} **{priority}**: {rec['action']}")
1817
+ st.write(f" *Impact*: {rec['impact']}")
1120
1818
 
1121
1819
 
1122
1820
  def _handle_graph_caching():
1123
- """Handle graph data caching logic."""
1124
- # Cache control
1125
- with st.expander("⚙️ Graph Settings"):
1821
+ """Handle graph caching controls and cleanup."""
1822
+ st.subheader("💾 Graph Cache Management")
1823
+
1824
+ col1, col2, col3 = st.columns([1, 1, 2])
1825
+
1826
+ with col1:
1827
+ # Toggle caching on/off
1126
1828
  cache_enabled = st.checkbox(
1127
1829
  "Enable Graph Caching",
1128
1830
  value=st.session_state.get("graph_cache_enabled", True),
1129
- help="Cache graph data to improve performance for repeated views",
1831
+ help="Cache graph visualizations to improve performance for repeated views",
1130
1832
  )
1131
- st.session_state["graph_cache_enabled"] = cache_enabled
1833
+ st.session_state.graph_cache_enabled = cache_enabled
1132
1834
 
1133
- if st.button(
1134
- "🗑️ Clear Graph Cache", help="Clear cached graph data to free memory"
1135
- ):
1136
- # Clear all cached graphs
1137
- keys_to_remove = [k for k in st.session_state if k.startswith("graph_")]
1138
- for key in keys_to_remove:
1835
+ with col2:
1836
+ # Clear cache button
1837
+ if st.button("🗑️ Clear Cache", help="Clear all cached graph data"):
1838
+ # Find and remove all graph cache keys
1839
+ cache_keys = [key for key in st.session_state if key.startswith("graph_")]
1840
+ for key in cache_keys:
1139
1841
  del st.session_state[key]
1140
- st.success("Graph cache cleared!")
1842
+ st.success(f" Cleared {len(cache_keys)} cached graphs")
1141
1843
  st.rerun()
1142
1844
 
1845
+ with col3:
1846
+ # Cache statistics
1847
+ cache_keys = [key for key in st.session_state if key.startswith("graph_")]
1848
+ cache_count = len(cache_keys)
1849
+
1850
+ if cache_count > 0:
1851
+ # Estimate memory usage (rough approximation)
1852
+ estimated_memory = cache_count * 50 # Rough estimate: 50KB per cached graph
1853
+ st.metric(
1854
+ "Cached Graphs",
1855
+ f"{cache_count} items",
1856
+ f"~{estimated_memory}KB estimated",
1857
+ )
1858
+ else:
1859
+ st.info("📭 No graphs currently cached")
1860
+
1861
+ # Cache status indicator
1862
+ if cache_enabled:
1863
+ st.success(
1864
+ "✅ Graph caching is enabled - visualizations will be "
1865
+ "cached for faster loading"
1866
+ )
1867
+ else:
1868
+ st.warning(
1869
+ "⚠️ Graph caching is disabled - each visualization will be recalculated"
1870
+ )
1871
+
1143
1872
 
1144
- def _display_dependency_graph_visualization(analysis_result, viz_type, selected_layout):
1145
- """Display the dependency graph visualization section."""
1873
+ def _display_dependency_graph_visualization(
1874
+ analysis_result,
1875
+ viz_type,
1876
+ selected_layout,
1877
+ show_circular_only,
1878
+ show_community_only,
1879
+ min_connections,
1880
+ ):
1881
+ """Display the dependency graph visualization section with filtering."""
1146
1882
  try:
1147
- # Create cache key for graph data
1148
- cache_key = f"graph_{hash(analysis_result)}_{viz_type}_{selected_layout}"
1883
+ # Parse dependencies for filtering
1884
+ _, circular_deps, _ = _parse_dependency_analysis(analysis_result)
1885
+
1886
+ # Prepare filters
1887
+ filters = {
1888
+ "circular_only": show_circular_only,
1889
+ "community_only": show_community_only,
1890
+ "min_connections": min_connections,
1891
+ "circular_deps": circular_deps,
1892
+ }
1149
1893
 
1150
- # Check if we have cached graph data
1151
- if cache_key in st.session_state and st.session_state.get(
1152
- "graph_cache_enabled", True
1153
- ):
1154
- graph_data = st.session_state[cache_key]
1155
- st.info("📋 Using cached graph data")
1156
- else:
1157
- # Create dependency graph
1894
+ # Try to get cached graph data
1895
+ graph_data = _get_cached_graph_data(
1896
+ analysis_result, viz_type, selected_layout, filters
1897
+ )
1898
+
1899
+ if graph_data is None:
1900
+ # Create dependency graph with filters
1158
1901
  graph_data = create_dependency_graph(
1159
- analysis_result, viz_type, selected_layout
1902
+ analysis_result, viz_type, selected_layout, filters
1160
1903
  )
1161
-
1162
1904
  # Cache the result
1163
- if graph_data is not None and st.session_state.get(
1164
- "graph_cache_enabled", True
1165
- ):
1166
- st.session_state[cache_key] = graph_data
1167
-
1168
- _handle_graph_caching()
1905
+ _cache_graph_data(
1906
+ analysis_result, viz_type, selected_layout, filters, graph_data
1907
+ )
1169
1908
 
1170
1909
  if graph_data:
1171
1910
  _display_graph_with_export_options(graph_data, viz_type)
1172
1911
  else:
1173
- st.info("No dependency relationships found to visualize.")
1912
+ st.info(
1913
+ "No dependency relationships found to visualise after applying filters."
1914
+ )
1174
1915
 
1175
1916
  except Exception as e:
1176
1917
  _handle_graph_visualization_error(e, analysis_result)
1177
1918
 
1178
1919
 
1920
+ def _get_cached_graph_data(analysis_result, viz_type, selected_layout, filters):
1921
+ """Get cached graph data if available."""
1922
+ cache_key = (
1923
+ f"graph_{hash(analysis_result)}_{viz_type}_{selected_layout}_{str(filters)}"
1924
+ )
1925
+
1926
+ if cache_key in st.session_state and st.session_state.get(
1927
+ "graph_cache_enabled", True
1928
+ ):
1929
+ graph_data = st.session_state[cache_key]
1930
+ st.info("📋 Using cached graph data")
1931
+ return graph_data
1932
+
1933
+ return None
1934
+
1935
+
1936
+ def _cache_graph_data(analysis_result, viz_type, selected_layout, filters, graph_data):
1937
+ """Cache graph data if caching is enabled."""
1938
+ if graph_data is not None and st.session_state.get("graph_cache_enabled", True):
1939
+ cache_key = (
1940
+ f"graph_{hash(analysis_result)}_{viz_type}_{selected_layout}_{str(filters)}"
1941
+ )
1942
+ st.session_state[cache_key] = graph_data
1943
+
1944
+
1179
1945
  def _display_graph_with_export_options(graph_data, viz_type):
1180
1946
  """Display graph and provide export options."""
1181
1947
  if viz_type == "interactive":
1182
1948
  # Interactive Plotly graph
1183
- st.plotly_chart(graph_data, use_container_width=True)
1949
+ st.plotly_chart(graph_data, width="stretch")
1184
1950
 
1185
1951
  # Export options for interactive graph
1186
1952
  st.subheader("Export Graph")
1187
- col1, col2, col3 = st.columns(3)
1953
+ col1, col2, col3, col4 = st.columns(4)
1188
1954
 
1189
1955
  with col1:
1190
1956
  # Export as HTML
1191
1957
  html_content = graph_data.to_html(full_html=False, include_plotlyjs="cdn")
1192
1958
  st.download_button(
1193
- label="📄 Export as HTML",
1959
+ label="🌐 HTML",
1194
1960
  data=html_content,
1195
1961
  file_name="dependency_graph.html",
1196
1962
  mime="text/html",
@@ -1201,7 +1967,7 @@ def _display_graph_with_export_options(graph_data, viz_type):
1201
1967
  # Export as JSON
1202
1968
  json_data = graph_data.to_json()
1203
1969
  st.download_button(
1204
- label="📊 Export as JSON",
1970
+ label="📊 JSON",
1205
1971
  data=json_data,
1206
1972
  file_name="dependency_graph.json",
1207
1973
  mime=MIME_APPLICATION_JSON,
@@ -1213,24 +1979,40 @@ def _display_graph_with_export_options(graph_data, viz_type):
1213
1979
  try:
1214
1980
  import plotly.io as pio # type: ignore[import-untyped]
1215
1981
 
1216
- png_data = pio.to_image(graph_data, format="png")
1982
+ png_data = pio.to_image(graph_data, format="png", scale=2)
1217
1983
  st.download_button(
1218
- label="🖼️ Export as PNG",
1984
+ label="🖼️ PNG (High-res)",
1219
1985
  data=png_data,
1220
1986
  file_name="dependency_graph.png",
1221
1987
  mime="image/png",
1222
- help="Download graph as PNG image",
1988
+ help="Download graph as high-resolution PNG",
1223
1989
  )
1224
1990
  except ImportError:
1225
1991
  st.info("PNG export requires additional dependencies")
1226
1992
 
1993
+ with col4:
1994
+ # Export as PDF
1995
+ try:
1996
+ import plotly.io as pio
1997
+
1998
+ pdf_data = pio.to_image(graph_data, format="pdf")
1999
+ st.download_button(
2000
+ label="📄 PDF",
2001
+ data=pdf_data,
2002
+ file_name="dependency_graph.pdf",
2003
+ mime="application/pdf",
2004
+ help="Download graph as PDF document",
2005
+ )
2006
+ except ImportError:
2007
+ st.info("PDF export requires additional dependencies")
2008
+
1227
2009
  else:
1228
2010
  # Static matplotlib graph
1229
2011
  st.pyplot(graph_data)
1230
2012
 
1231
2013
  # Export options for static graph
1232
2014
  st.subheader("Export Graph")
1233
- col1, col2 = st.columns(2)
2015
+ col1, col2, col3, col4 = st.columns(4)
1234
2016
 
1235
2017
  with col1:
1236
2018
  # Export as PNG
@@ -1240,7 +2022,7 @@ def _display_graph_with_export_options(graph_data, viz_type):
1240
2022
  graph_data.savefig(buf, format="png", dpi=300, bbox_inches="tight")
1241
2023
  buf.seek(0)
1242
2024
  st.download_button(
1243
- label="🖼️ Export as PNG",
2025
+ label="🖼️ PNG (High-res)",
1244
2026
  data=buf.getvalue(),
1245
2027
  file_name="dependency_graph.png",
1246
2028
  mime="image/png",
@@ -1253,13 +2035,39 @@ def _display_graph_with_export_options(graph_data, viz_type):
1253
2035
  graph_data.savefig(buf_svg, format="svg", bbox_inches="tight")
1254
2036
  buf_svg.seek(0)
1255
2037
  st.download_button(
1256
- label="📈 Export as SVG",
2038
+ label="📈 SVG",
1257
2039
  data=buf_svg.getvalue(),
1258
2040
  file_name="dependency_graph.svg",
1259
2041
  mime="image/svg+xml",
1260
2042
  help="Download graph as scalable SVG",
1261
2043
  )
1262
2044
 
2045
+ with col3:
2046
+ # Export as PDF
2047
+ buf_pdf = io.BytesIO()
2048
+ graph_data.savefig(buf_pdf, format="pdf", bbox_inches="tight")
2049
+ buf_pdf.seek(0)
2050
+ st.download_button(
2051
+ label="📄 PDF",
2052
+ data=buf_pdf.getvalue(),
2053
+ file_name="dependency_graph.pdf",
2054
+ mime="application/pdf",
2055
+ help="Download graph as PDF document",
2056
+ )
2057
+
2058
+ with col4:
2059
+ # Export as EPS
2060
+ buf_eps = io.BytesIO()
2061
+ graph_data.savefig(buf_eps, format="eps", bbox_inches="tight")
2062
+ buf_eps.seek(0)
2063
+ st.download_button(
2064
+ label="🔧 EPS",
2065
+ data=buf_eps.getvalue(),
2066
+ file_name="dependency_graph.eps",
2067
+ mime="application/postscript",
2068
+ help="Download graph as EPS vector format",
2069
+ )
2070
+
1263
2071
 
1264
2072
  def _handle_graph_visualization_error(error, analysis_result):
1265
2073
  """Handle graph visualization errors with fallback display."""
@@ -1307,12 +2115,14 @@ def _display_dependency_analysis_sections(analysis_result):
1307
2115
  elif "Dependency Graph" in section:
1308
2116
  with st.expander("🔗 Dependency Graph"):
1309
2117
  st.markdown(section.replace("## Dependency Graph", ""))
1310
- elif "Circular Dependencies" in section:
1311
2118
  with st.expander(f"⚠️ {SECTION_CIRCULAR_DEPENDENCIES}"):
1312
- st.markdown(section.replace("## Circular Dependencies", ""))
1313
- elif "Community Cookbooks" in section:
1314
- with st.expander("🌐 Community Cookbooks"):
1315
- st.markdown(section.replace("## Community Cookbooks", ""))
2119
+ st.markdown(
2120
+ section.replace(f"## {SECTION_CIRCULAR_DEPENDENCIES}", "")
2121
+ )
2122
+ with st.expander(f"🌐 {SECTION_COMMUNITY_COOKBOOKS}"):
2123
+ st.markdown(
2124
+ section.replace(f"## {SECTION_COMMUNITY_COOKBOOKS}", "")
2125
+ )
1316
2126
  elif "Migration Impact Analysis" in section:
1317
2127
  with st.expander("📊 Migration Impact Analysis"):
1318
2128
  st.markdown(section.replace("## Migration Impact Analysis", ""))
@@ -1406,13 +2216,8 @@ def _display_dependency_export_options(
1406
2216
  )
1407
2217
 
1408
2218
 
1409
- def display_dependency_analysis_results():
1410
- """Display dependency analysis results."""
1411
- analysis_result = st.session_state.dep_analysis_result
1412
- cookbook_path = st.session_state.dep_cookbook_path
1413
- depth = st.session_state.dep_depth
1414
- viz_type = st.session_state.get("dep_viz_type", "text")
1415
-
2219
+ def _display_dependency_analysis_summary(analysis_result, cookbook_path, depth):
2220
+ """Display dependency analysis summary section."""
1416
2221
  # Summary metrics
1417
2222
  st.subheader("Dependency Analysis Summary")
1418
2223
 
@@ -1427,31 +2232,163 @@ def display_dependency_analysis_results():
1427
2232
  )
1428
2233
 
1429
2234
  # Analysis depth indicator
1430
- st.info(f"Analysis performed with **{depth}** depth on: `{cookbook_path}`")
1431
-
1432
- # Graph Visualization Section
1433
- if viz_type in ["graph", "interactive"]:
1434
- st.subheader("📊 Dependency Graph Visualization")
1435
-
1436
- # Layout algorithm selector
1437
- layout_options = ["auto", "spring", "circular", "kamada_kawai"]
1438
- selected_layout = st.selectbox(
1439
- "Layout Algorithm",
1440
- layout_options,
1441
- help="Choose graph layout algorithm. 'auto' selects best "
1442
- "algorithm based on graph size.",
1443
- format_func=lambda x: {
1444
- "auto": "Auto (recommended)",
1445
- "spring": "Spring Layout",
1446
- "circular": "Circular Layout",
1447
- "kamada_kawai": "Kamada-Kawai Layout",
1448
- }.get(x, str(x)),
2235
+ analysis_msg = f"Analysis performed with **{depth}** depth on: `{cookbook_path}`"
2236
+ st.info(analysis_msg)
2237
+
2238
+
2239
+ def _display_graph_visualization_section(analysis_result, viz_type):
2240
+ """Display graph visualization section."""
2241
+ if viz_type not in ["graph", "interactive"]:
2242
+ return
2243
+
2244
+ st.subheader("📊 Dependency Graph Visualization")
2245
+
2246
+ # Parse dependencies for filtering and analysis
2247
+ _ = _parse_dependency_analysis(analysis_result)
2248
+
2249
+ # Layout algorithm selector
2250
+ layout_options = [
2251
+ "auto",
2252
+ "spring",
2253
+ "circular",
2254
+ "kamada_kawai",
2255
+ "shell",
2256
+ "spectral",
2257
+ "force_directed",
2258
+ "random",
2259
+ ]
2260
+ selected_layout = st.selectbox(
2261
+ "Layout Algorithm",
2262
+ layout_options,
2263
+ help="Choose graph layout algorithm. 'auto' selects best "
2264
+ "algorithm based on graph size.",
2265
+ format_func=lambda x: {
2266
+ "auto": "Auto (recommended)",
2267
+ "spring": "Spring Layout",
2268
+ "circular": "Circular Layout",
2269
+ "kamada_kawai": "Kamada-Kawai Layout",
2270
+ "shell": "Shell Layout (hierarchical)",
2271
+ "spectral": "Spectral Layout",
2272
+ "force_directed": "Force Directed",
2273
+ "random": "Random Layout",
2274
+ }.get(x, str(x)),
2275
+ )
2276
+
2277
+ # Graph cache management
2278
+ _handle_graph_caching()
2279
+
2280
+ # Graph Filtering Options
2281
+ st.subheader("🔍 Graph Filtering & Analysis")
2282
+
2283
+ col1, col2, col3 = st.columns(3)
2284
+
2285
+ with col1:
2286
+ show_circular_only = st.checkbox(
2287
+ "Show Circular Dependencies Only",
2288
+ help=("Filter graph to show only nodes involved in circular dependencies"),
2289
+ )
2290
+
2291
+ with col2:
2292
+ show_community_only = st.checkbox(
2293
+ "Show Community Cookbooks Only",
2294
+ help=(
2295
+ "Filter graph to show only community cookbooks and their dependencies"
2296
+ ),
2297
+ )
2298
+
2299
+ with col3:
2300
+ min_connections = st.slider(
2301
+ "Minimum Connections",
2302
+ min_value=0,
2303
+ max_value=10,
2304
+ value=0,
2305
+ help="Show only nodes with at least this many connections",
2306
+ )
2307
+
2308
+ _display_dependency_graph_visualization(
2309
+ analysis_result,
2310
+ viz_type,
2311
+ selected_layout,
2312
+ show_circular_only,
2313
+ show_community_only,
2314
+ min_connections,
2315
+ )
2316
+
2317
+
2318
+ def _display_impact_analysis_section(analysis_result):
2319
+ """Display migration impact analysis section."""
2320
+ # Parse dependencies for impact analysis
2321
+ dependencies, circular_deps, community_cookbooks = _parse_dependency_analysis(
2322
+ analysis_result
2323
+ )
2324
+
2325
+ # Impact Analysis Section
2326
+ st.subheader("📊 Migration Impact Analysis")
2327
+
2328
+ if not dependencies:
2329
+ st.info("No dependencies found for impact analysis.")
2330
+ return
2331
+
2332
+ impact_analysis = _calculate_migration_impact(
2333
+ dependencies, circular_deps, community_cookbooks
2334
+ )
2335
+
2336
+ # Calculate risk score delta
2337
+ risk_score = impact_analysis["risk_score"]
2338
+ if risk_score > 7:
2339
+ risk_delta = "🔴 High"
2340
+ elif risk_score > 4:
2341
+ risk_delta = "🟡 Medium"
2342
+ else:
2343
+ risk_delta = "🟢 Low"
2344
+
2345
+ col1, col2, col3, col4 = st.columns(4)
2346
+
2347
+ with col1:
2348
+ st.metric(
2349
+ "Migration Risk Score",
2350
+ f"{impact_analysis['risk_score']:.1f}/10",
2351
+ delta=risk_delta,
2352
+ )
2353
+
2354
+ with col2:
2355
+ timeline_weeks = impact_analysis["timeline_impact_weeks"]
2356
+ timeline_delta = "↗️" if timeline_weeks > 0 else "→"
2357
+ st.metric(
2358
+ "Estimated Timeline Impact",
2359
+ f"{timeline_weeks} weeks",
2360
+ delta=timeline_delta,
2361
+ )
2362
+
2363
+ with col3:
2364
+ complexity_level = impact_analysis["complexity_level"]
2365
+ complexity_delta = "⚠️ High" if complexity_level == "High" else "✅ Low"
2366
+ st.metric(
2367
+ "Dependency Complexity",
2368
+ complexity_level,
2369
+ delta=complexity_delta,
2370
+ )
2371
+
2372
+ with col4:
2373
+ parallel_streams = impact_analysis["parallel_streams"]
2374
+ parallel_delta = "🔀 Multiple" if parallel_streams > 1 else "➡️ Single"
2375
+ st.metric(
2376
+ "Parallel Migration Streams",
2377
+ parallel_streams,
2378
+ delta=parallel_delta,
1449
2379
  )
1450
2380
 
1451
- _display_dependency_graph_visualization(
1452
- analysis_result, viz_type, selected_layout
2381
+ # Detailed impact breakdown
2382
+ with st.expander("📈 Detailed Impact Analysis"):
2383
+ _display_detailed_impact_analysis(
2384
+ impact_analysis, dependencies, circular_deps, community_cookbooks
1453
2385
  )
1454
2386
 
2387
+
2388
+ def _display_analysis_details_section(
2389
+ analysis_result, circular_deps, community_cookbooks, direct_deps
2390
+ ):
2391
+ """Display analysis details section."""
1455
2392
  # Display analysis results
1456
2393
  st.subheader("Dependency Analysis Details")
1457
2394
 
@@ -1460,49 +2397,157 @@ def display_dependency_analysis_results():
1460
2397
  # Migration recommendations
1461
2398
  _display_migration_recommendations(circular_deps, community_cookbooks, direct_deps)
1462
2399
 
2400
+
2401
+ def display_dependency_analysis_results():
2402
+ """Display dependency analysis results."""
2403
+ analysis_result = st.session_state.dep_analysis_result
2404
+ cookbook_path = st.session_state.dep_cookbook_path
2405
+ depth = st.session_state.dep_depth
2406
+ viz_type = st.session_state.get("dep_viz_type", "text")
2407
+
2408
+ # Display summary section
2409
+ _display_dependency_analysis_summary(analysis_result, cookbook_path, depth)
2410
+
2411
+ # Display graph visualization section
2412
+ _display_graph_visualization_section(analysis_result, viz_type)
2413
+
2414
+ # Display impact analysis section
2415
+ _display_impact_analysis_section(analysis_result)
2416
+
2417
+ # Display analysis details section
2418
+ dependencies, circular_deps, community_cookbooks = _parse_dependency_analysis(
2419
+ analysis_result
2420
+ )
2421
+ direct_deps = len(dependencies) if dependencies else 0
2422
+ _display_analysis_details_section(
2423
+ analysis_result, circular_deps, community_cookbooks, direct_deps
2424
+ )
2425
+
1463
2426
  # Export options
1464
2427
  _display_dependency_export_options(
1465
2428
  analysis_result,
1466
2429
  cookbook_path,
1467
2430
  depth,
1468
2431
  direct_deps,
1469
- transitive_deps,
2432
+ len(dependencies) if dependencies else 0, # transitive_deps approximation
1470
2433
  circular_deps,
1471
2434
  community_cookbooks,
1472
2435
  )
1473
2436
 
1474
2437
 
1475
- def show_validation_reports():
1476
- """Show validation reports and conversion validation."""
1477
- st.header("Validation Reports")
2438
+ def _collect_files_to_validate(input_path: str) -> list[Path]:
2439
+ """Collect valid YAML files from input path."""
2440
+ validated_path = _normalize_and_validate_input_path(input_path)
2441
+ if validated_path is None:
2442
+ # Error already reported by _normalize_and_validate_input_path
2443
+ return []
1478
2444
 
1479
- # Import validation functions
1480
- from souschef.core.validation import ValidationEngine
2445
+ path_obj = validated_path
2446
+ files_to_validate = []
1481
2447
 
1482
- st.markdown("""
1483
- Validate Chef to Ansible conversions and generate comprehensive
1484
- validation reports for migration quality assurance.
1485
- """)
2448
+ if not path_obj.exists():
2449
+ st.error(f"Path does not exist: {path_obj}")
2450
+ return []
2451
+
2452
+ if path_obj.is_file():
2453
+ if path_obj.suffix in [".yml", ".yaml"] and path_obj.name not in [
2454
+ ".kitchen.yml",
2455
+ "kitchen.yml",
2456
+ "docker-compose.yml",
2457
+ ]:
2458
+ files_to_validate.append(path_obj)
2459
+ elif path_obj.is_dir():
2460
+ # Filter out obvious non-playbook files
2461
+ excluded_files = {".kitchen.yml", "kitchen.yml", "docker-compose.yml"}
2462
+
2463
+ yml_files = list(path_obj.glob("**/*.yml"))
2464
+ yaml_files = list(path_obj.glob("**/*.yaml"))
2465
+
2466
+ raw_files = yml_files + yaml_files
2467
+ files_to_validate.extend([f for f in raw_files if f.name not in excluded_files])
1486
2468
 
1487
- # Validation options
2469
+ return files_to_validate
2470
+
2471
+
2472
+ def _run_validation_engine(files_to_validate):
2473
+ """Run validation engine on a list of files."""
2474
+ from souschef.core.validation import (
2475
+ ValidationCategory,
2476
+ ValidationEngine,
2477
+ ValidationLevel,
2478
+ ValidationResult,
2479
+ )
2480
+
2481
+ engine = ValidationEngine()
2482
+ all_results = []
2483
+
2484
+ for file_path in files_to_validate:
2485
+ try:
2486
+ content = file_path.read_text()
2487
+ # We assume 'recipe' (Playbook) conversion type for .yml files found
2488
+ file_results = engine.validate_conversion("recipe", content)
2489
+
2490
+ # If no issues found, explicitly add a success record
2491
+ if not file_results:
2492
+ file_results = [
2493
+ ValidationResult(
2494
+ ValidationLevel.INFO,
2495
+ ValidationCategory.SYNTAX,
2496
+ "File passed all validation checks",
2497
+ location=file_path.name,
2498
+ )
2499
+ ]
2500
+
2501
+ # Annotate results with location if missing
2502
+ for res in file_results:
2503
+ if not res.location:
2504
+ res.location = file_path.name
2505
+
2506
+ all_results.extend(file_results)
2507
+ except Exception as file_err:
2508
+ st.warning(f"Could not read/validate {file_path.name}: {file_err}")
2509
+
2510
+ return all_results
2511
+
2512
+
2513
+ def _get_default_validation_path():
2514
+ """Determine the default path for validation from session state."""
2515
+ default_path = ""
2516
+ if "converted_playbooks_path" in st.session_state:
2517
+ default_path = st.session_state.converted_playbooks_path
2518
+ st.info(f"Pre-filled path from conversion: {default_path}")
2519
+ elif (
2520
+ "analysis_cookbook_path" in st.session_state
2521
+ and st.session_state.analysis_cookbook_path
2522
+ ):
2523
+ default_path = st.session_state.analysis_cookbook_path
2524
+ st.info(f"Pre-filled path from analysis: {default_path}")
2525
+ st.caption(
2526
+ "Note: This tool validates Ansible playbooks (.yml). If you're using a raw "
2527
+ "Chef cookbook path, please ensure you've performed the conversion first."
2528
+ )
2529
+ return default_path
2530
+
2531
+
2532
+ def _render_validation_options_ui():
2533
+ """Render validation scope and format options."""
1488
2534
  col1, col2 = st.columns(2)
1489
2535
 
1490
2536
  with col1:
1491
- validation_type = st.selectbox(
1492
- "Validation Type",
1493
- ["syntax", "logic", "security", "performance", "full"],
1494
- help="Type of validation to perform",
1495
- format_func=lambda x: {
1496
- "syntax": "Syntax Validation",
1497
- "logic": "Logic & Structure Validation",
1498
- "security": "Security Best Practices",
1499
- "performance": "Performance Analysis",
1500
- "full": "Complete Validation Suite",
1501
- }.get(x, str(x)),
2537
+ sub_scope = st.selectbox(
2538
+ "Validation Scope",
2539
+ [
2540
+ "Full Suite",
2541
+ "Syntax Only",
2542
+ "Logic/Semantic",
2543
+ "Security",
2544
+ SCOPE_BEST_PRACTICES,
2545
+ ],
2546
+ help="Filter which validation checks to run",
1502
2547
  )
1503
2548
 
1504
2549
  with col2:
1505
- output_format = st.selectbox(
2550
+ sub_format = st.selectbox(
1506
2551
  "Output Format",
1507
2552
  ["text", "json", "html"],
1508
2553
  help="Format for validation reports",
@@ -1512,8 +2557,11 @@ def show_validation_reports():
1512
2557
  "html": "HTML Report",
1513
2558
  }.get(x, str(x)),
1514
2559
  )
2560
+ return sub_scope, sub_format
1515
2561
 
1516
- # File/Directory input
2562
+
2563
+ def _render_validation_input_ui(default_path):
2564
+ """Render input source selection UI."""
1517
2565
  st.subheader("Input Source")
1518
2566
 
1519
2567
  input_type = st.radio(
@@ -1526,17 +2574,24 @@ def show_validation_reports():
1526
2574
  if input_type == "Directory":
1527
2575
  input_path = st.text_input(
1528
2576
  "Directory Path",
2577
+ value=default_path,
1529
2578
  placeholder="/path/to/ansible/playbooks",
1530
2579
  help="Path to directory containing Ansible playbooks to validate",
1531
2580
  )
1532
2581
  else:
1533
2582
  input_path = st.text_input(
1534
2583
  "File Path",
2584
+ value=default_path
2585
+ if default_path and default_path.endswith((".yml", ".yaml"))
2586
+ else "",
1535
2587
  placeholder="/path/to/playbook.yml",
1536
2588
  help="Path to single Ansible playbook file to validate",
1537
2589
  )
2590
+ return input_path
2591
+
1538
2592
 
1539
- # Validation options
2593
+ def _render_validation_settings_ui():
2594
+ """Render strict mode and other validation settings."""
1540
2595
  st.subheader("Validation Options")
1541
2596
 
1542
2597
  col1, col2, col3 = st.columns(3)
@@ -1548,7 +2603,7 @@ def show_validation_reports():
1548
2603
 
1549
2604
  with col2:
1550
2605
  include_best_practices = st.checkbox(
1551
- "Include Best Practices",
2606
+ f"Include {SCOPE_BEST_PRACTICES}",
1552
2607
  value=True,
1553
2608
  help="Check for Ansible best practices",
1554
2609
  )
@@ -1560,68 +2615,160 @@ def show_validation_reports():
1560
2615
  help="Provide improvement suggestions",
1561
2616
  )
1562
2617
 
1563
- # Validation button
1564
- if st.button("Run Validation", type="primary", use_container_width=True):
1565
- if not input_path.strip():
1566
- st.error("Please enter a path to validate.")
2618
+ return strict_mode, include_best_practices, generate_recommendations
2619
+
2620
+
2621
+ def _normalize_and_validate_input_path(input_path: str) -> Path | None:
2622
+ """
2623
+ Normalize and validate a user-provided filesystem path.
2624
+
2625
+ Returns a resolved Path object if valid, otherwise reports an error
2626
+ via Streamlit and returns None.
2627
+ """
2628
+ if not input_path:
2629
+ st.error(ERROR_MSG_ENTER_PATH)
2630
+ return None
2631
+
2632
+ raw = input_path.strip()
2633
+ if not raw:
2634
+ st.error(ERROR_MSG_ENTER_PATH)
2635
+ return None
2636
+
2637
+ try:
2638
+ # Expand user home and resolve to an absolute, normalized path
2639
+ path_obj = Path(raw).expanduser().resolve()
2640
+ except Exception:
2641
+ st.error(f"Invalid path: {raw}")
2642
+ return None
2643
+
2644
+ # Optional safety: constrain to the application root directory
2645
+ try:
2646
+ app_root = Path(app_path).resolve()
2647
+ path_obj.relative_to(app_root)
2648
+ except Exception:
2649
+ st.error("Path must be within the SousChef project directory.")
2650
+ return None
2651
+
2652
+ return path_obj
2653
+
2654
+
2655
+ def _handle_validation_execution(input_path, options):
2656
+ """Execute the validation process with progress tracking."""
2657
+ progress_tracker = ProgressTracker(
2658
+ total_steps=6, description="Running validation..."
2659
+ )
2660
+
2661
+ try:
2662
+ progress_tracker.update(1, "Preparing validation environment...")
2663
+
2664
+ progress_tracker.update(2, "Scanning input files...")
2665
+
2666
+ files_to_validate = _collect_files_to_validate(input_path)
2667
+
2668
+ if not files_to_validate:
2669
+ # Error is handled inside _collect_files_to_validate
2670
+ # if path doesn't exist or is invalid
2671
+ validated_path = _normalize_and_validate_input_path(input_path)
2672
+ if validated_path is not None and validated_path.exists():
2673
+ st.warning(f"No YAML files found in {validated_path}")
1567
2674
  return
1568
2675
 
1569
- # Create progress tracker
1570
- progress_tracker = ProgressTracker(
1571
- total_steps=6, description="Running validation..."
2676
+ progress_tracker.update(3, f"Validating {len(files_to_validate)} files...")
2677
+
2678
+ all_results = _run_validation_engine(files_to_validate)
2679
+
2680
+ # Filter results based on scope
2681
+ filtered_results = _filter_results_by_scope(all_results, options["scope"])
2682
+
2683
+ # Format the results as text
2684
+ validation_result = "\n".join(
2685
+ [
2686
+ f"[{result.level.value.upper()}] {result.location}: {result.message}"
2687
+ for result in filtered_results
2688
+ ]
1572
2689
  )
1573
2690
 
1574
- try:
1575
- progress_tracker.update(1, "Preparing validation environment...")
1576
-
1577
- # Prepare validation options
1578
- options = {
1579
- "strict": strict_mode,
1580
- "best_practices": include_best_practices,
1581
- "recommendations": generate_recommendations,
1582
- "format": output_format,
1583
- }
2691
+ if not validation_result:
2692
+ validation_result = "No issues found matching the selected scope."
1584
2693
 
1585
- progress_tracker.update(2, "Scanning input files...")
1586
- progress_tracker.update(3, "Running syntax validation...")
1587
- progress_tracker.update(4, "Performing logic checks...")
2694
+ progress_tracker.update(5, "Generating validation report...")
1588
2695
 
1589
- # Run validation
1590
- engine = ValidationEngine()
1591
- validation_results = engine.validate_conversion(
1592
- validation_type, input_path.strip()
1593
- )
2696
+ # Store results
2697
+ st.session_state.validation_result = validation_result
2698
+ st.session_state.validation_path = input_path.strip()
2699
+ st.session_state.validation_type = options["scope"]
2700
+ st.session_state.validation_options = options
1594
2701
 
1595
- # Format the results as text
1596
- validation_result = "\n".join(
1597
- [
1598
- f"{result.level.value.upper()}: {result.message}"
1599
- for result in validation_results
1600
- ]
1601
- )
2702
+ progress_tracker.complete("Validation completed!")
2703
+ st.success(f"Validation completed! Scanned {len(files_to_validate)} files.")
2704
+ st.rerun()
1602
2705
 
1603
- progress_tracker.update(5, "Generating validation report...")
2706
+ except Exception as e:
2707
+ progress_tracker.close()
2708
+ st.error(f"Error during validation: {e}")
1604
2709
 
1605
- # Store results
1606
- st.session_state.validation_result = validation_result
1607
- st.session_state.validation_path = input_path.strip()
1608
- st.session_state.validation_type = validation_type
1609
- st.session_state.validation_options = options
1610
2710
 
1611
- progress_tracker.complete("Validation completed!")
1612
- st.success("Validation completed successfully!")
1613
- st.rerun()
2711
+ def show_validation_reports():
2712
+ """Show validation reports and conversion validation."""
2713
+ st.header(NAV_VALIDATION_REPORTS)
1614
2714
 
1615
- except Exception as e:
1616
- progress_tracker.close()
1617
- st.error(f"Error during validation: {e}")
2715
+ st.markdown("""
2716
+ Validate Chef to Ansible conversions and generate comprehensive
2717
+ validation reports for migration quality assurance.
2718
+ """)
2719
+
2720
+ # Check for previously analyzed path to pre-fill
2721
+ default_path = _get_default_validation_path()
2722
+
2723
+ # UI Components
2724
+ validation_scope, output_format = _render_validation_options_ui()
2725
+ input_path = _render_validation_input_ui(default_path)
2726
+ strict_mode, include_best_practices, generate_recommendations = (
2727
+ _render_validation_settings_ui()
2728
+ )
2729
+
2730
+ # Validation button
2731
+ if st.button("Run Validation", type="primary", width="stretch"):
2732
+ if not input_path or not input_path.strip():
2733
+ st.error("Please enter a path to validate.")
1618
2734
  return
1619
2735
 
2736
+ options = {
2737
+ "strict": strict_mode,
2738
+ "best_practices": include_best_practices,
2739
+ "recommendations": generate_recommendations,
2740
+ "scope": validation_scope,
2741
+ "format": output_format,
2742
+ }
2743
+
2744
+ _handle_validation_execution(input_path, options)
2745
+
1620
2746
  # Display results if available
1621
2747
  if "validation_result" in st.session_state:
1622
2748
  display_validation_results()
1623
2749
 
1624
2750
 
2751
+ def _filter_results_by_scope(results, scope):
2752
+ """Filter validation results based on selected scope."""
2753
+ from souschef.core.validation import ValidationCategory
2754
+
2755
+ if scope == "Full Suite":
2756
+ return results
2757
+
2758
+ scope_map = {
2759
+ "Syntax Only": ValidationCategory.SYNTAX,
2760
+ "Logic/Semantic": ValidationCategory.SEMANTIC,
2761
+ "Security": ValidationCategory.SECURITY,
2762
+ SCOPE_BEST_PRACTICES: ValidationCategory.BEST_PRACTICE,
2763
+ }
2764
+
2765
+ target_category = scope_map.get(scope)
2766
+ if not target_category:
2767
+ return results
2768
+
2769
+ return [r for r in results if r.category == target_category]
2770
+
2771
+
1625
2772
  def _parse_validation_metrics(validation_result):
1626
2773
  """Parse validation result to extract key metrics."""
1627
2774
  lines = validation_result.split("\n")
@@ -1632,16 +2779,29 @@ def _parse_validation_metrics(validation_result):
1632
2779
  total_checks = 0
1633
2780
 
1634
2781
  for line in lines:
1635
- if "ERROR:" in line.upper():
2782
+ line_upper = line.upper()
2783
+ # Match both old format "ERROR:" and new format "[ERROR]"
2784
+ if "ERROR:" in line_upper or "[ERROR]" in line_upper:
1636
2785
  errors += 1
1637
- elif "WARNING:" in line.upper():
2786
+ elif "WARNING:" in line_upper or "[WARNING]" in line_upper:
1638
2787
  warnings += 1
1639
- elif "PASSED:" in line.upper() or "✓" in line:
2788
+ # Match explicit passed check or INFO level (which we use for success now)
2789
+ elif (
2790
+ "PASSED:" in line_upper
2791
+ or "PASSED" in line_upper
2792
+ or "✓" in line
2793
+ or "[INFO]" in line_upper
2794
+ ):
1640
2795
  passed += 1
1641
2796
  if "Total checks:" in line.lower():
1642
2797
  with contextlib.suppress(ValueError):
1643
2798
  total_checks = int(line.split(":")[1].strip())
1644
2799
 
2800
+ # If we found errors/warnings but no explicit "checks" count (legacy log parsing),
2801
+ # infer total checks from line items
2802
+ if total_checks == 0 and (errors > 0 or warnings > 0 or passed > 0):
2803
+ total_checks = errors + warnings + passed
2804
+
1645
2805
  return errors, warnings, passed, total_checks
1646
2806
 
1647
2807
 
@@ -1697,9 +2857,9 @@ def _display_validation_sections(validation_result):
1697
2857
  elif "Performance Validation" in section:
1698
2858
  with st.expander("⚡ Performance Validation"):
1699
2859
  st.markdown(section.replace("## Performance Validation", ""))
1700
- elif "Best Practices" in section:
1701
- with st.expander("📋 Best Practices"):
1702
- st.markdown(section.replace("## Best Practices", ""))
2860
+ elif SCOPE_BEST_PRACTICES in section:
2861
+ with st.expander(f"📋 {SCOPE_BEST_PRACTICES}"):
2862
+ st.markdown(section.replace(f"## {SCOPE_BEST_PRACTICES}", ""))
1703
2863
  elif "Recommendations" in section:
1704
2864
  with st.expander("💡 Recommendations"):
1705
2865
  st.markdown(section.replace("## Recommendations", ""))
@@ -1810,7 +2970,8 @@ def display_validation_results():
1810
2970
  _display_validation_status(errors, warnings)
1811
2971
 
1812
2972
  # Validation details
1813
- st.info(f"Validation type: **{validation_type}** | Path: `{input_path}`")
2973
+ validation_msg = f"Validation type: **{validation_type}** | Path: `{input_path}`"
2974
+ st.info(validation_msg)
1814
2975
 
1815
2976
  # Display validation results
1816
2977
  st.subheader("Validation Details")