mcp-souschef 2.5.3__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/ui/app.py CHANGED
@@ -1,42 +1,66 @@
1
- """
2
- Visual Migration Planning Interface for SousChef.
1
+ # Add the parent directory to the path so we can import souschef modules
2
+ import sys
3
+ from pathlib import Path
3
4
 
4
- A Streamlit-based web interface for Chef to Ansible migration planning,
5
- assessment, and visualization.
6
- """
5
+ app_path = Path(__file__).parent.parent
6
+ if str(app_path) not in sys.path:
7
+ sys.path.insert(0, str(app_path))
7
8
 
8
9
  import contextlib
9
- import sys
10
- from pathlib import Path
10
+ import os
11
+ from collections.abc import Callable, Iterable, Mapping, Sequence
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Any,
15
+ Concatenate,
16
+ ParamSpec,
17
+ TypeVar,
18
+ )
11
19
 
12
20
  import streamlit as st
13
21
 
14
- # Add the parent directory to the path so we can import souschef modules
15
- sys.path.insert(0, str(Path(__file__).parent.parent))
22
+ if TYPE_CHECKING:
23
+ import networkx as nx
24
+ import plotly.graph_objects as go
25
+ from matplotlib.figure import Figure
26
+
27
+ P = ParamSpec("P")
28
+ R = TypeVar("R")
16
29
 
17
- # Import page modules
30
+ from souschef.ui.pages.ai_settings import show_ai_settings_page
18
31
  from souschef.ui.pages.cookbook_analysis import show_cookbook_analysis_page
19
32
 
20
- # Constants for repeated strings
33
+ # Constants
34
+ SECTION_COMMUNITY_COOKBOOKS_HEADER = "Community Cookbooks:"
35
+ SECTION_COMMUNITY_COOKBOOKS = "Community Cookbooks"
36
+ SECTION_CIRCULAR_DEPENDENCIES = "Circular Dependencies"
37
+ SECTION_MIGRATION_IMPACT_ANALYSIS = "Migration Impact Analysis"
21
38
  NAV_MIGRATION_PLANNING = "Migration Planning"
22
39
  NAV_DEPENDENCY_MAPPING = "Dependency Mapping"
23
40
  NAV_VALIDATION_REPORTS = "Validation Reports"
41
+ NAV_AI_SETTINGS = "AI Settings"
42
+ NAV_COOKBOOK_ANALYSIS = "Cookbook Analysis"
43
+ BUTTON_ANALYSE_DEPENDENCIES = "Analyse Dependencies"
44
+ INPUT_METHOD_DIRECTORY_PATH = "Directory Path"
24
45
  MIME_TEXT_MARKDOWN = "text/markdown"
25
46
  MIME_APPLICATION_JSON = "application/json"
26
- SECTION_CIRCULAR_DEPENDENCIES = "Circular Dependencies"
47
+ ERROR_MSG_ENTER_PATH = "Please enter a path."
48
+ SCOPE_BEST_PRACTICES = "Best Practices"
27
49
 
28
50
 
29
51
  class ProgressTracker:
30
52
  """Track progress for long-running operations."""
31
53
 
32
- def __init__(self, total_steps=100, description="Processing..."):
33
- self.total_steps = total_steps
34
- self.current_step = 0
35
- self.description = description
54
+ def __init__(
55
+ self, total_steps: int = 100, description: str = "Processing..."
56
+ ) -> None:
57
+ self.total_steps: int = total_steps
58
+ self.current_step: int = 0
59
+ self.description: str = description
36
60
  self.progress_bar = st.progress(0)
37
61
  self.status_text = st.empty()
38
62
 
39
- def update(self, step=None, description=None):
63
+ def update(self, step: int | None = None, description: str | None = None) -> None:
40
64
  """Update progress."""
41
65
  if step is not None:
42
66
  self.current_step = min(step, self.total_steps)
@@ -52,7 +76,7 @@ class ProgressTracker:
52
76
  f"{self.description} ({self.current_step}/{self.total_steps})"
53
77
  )
54
78
 
55
- def complete(self, message="Completed!"):
79
+ def complete(self, message: str = "Completed!") -> None:
56
80
  """Mark progress as complete."""
57
81
  self.progress_bar.progress(1.0)
58
82
  self.status_text.text(message)
@@ -60,24 +84,26 @@ class ProgressTracker:
60
84
 
61
85
  time.sleep(0.5) # Brief pause to show completion
62
86
 
63
- def close(self):
87
+ def close(self) -> None:
64
88
  """Clean up progress indicators."""
65
89
  self.progress_bar.empty()
66
90
  self.status_text.empty()
67
91
 
68
92
 
69
93
  def with_progress_tracking(
70
- operation_func, description="Processing...", total_steps=100
71
- ):
94
+ operation_func: Callable[Concatenate[ProgressTracker, P], R],
95
+ description: str = "Processing...",
96
+ total_steps: int = 100,
97
+ ) -> Callable[P, R]:
72
98
  """Add progress tracking to operations."""
73
99
 
74
- def wrapper(*args, **kwargs):
100
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
75
101
  tracker = ProgressTracker(total_steps, description)
76
102
  try:
77
- result = operation_func(tracker, *args, **kwargs)
103
+ result: R = operation_func(tracker, *args, **kwargs)
78
104
  tracker.complete()
79
105
  return result
80
- except Exception as e:
106
+ except Exception as e: # noqa: BLE001
81
107
  tracker.close()
82
108
  raise e
83
109
  finally:
@@ -86,90 +112,273 @@ def with_progress_tracking(
86
112
  return wrapper
87
113
 
88
114
 
89
- def main():
115
+ def main() -> None:
90
116
  """Run the main Streamlit application."""
91
117
  st.set_page_config(
92
118
  page_title="SousChef - Chef to Ansible Migration",
93
119
  layout="wide",
94
- initial_sidebar_state="expanded",
120
+ initial_sidebar_state="collapsed",
95
121
  )
96
122
 
97
123
  st.title("SousChef - Visual Migration Planning")
98
124
  st.markdown("*AI-powered Chef to Ansible migration planning interface*")
99
125
 
100
- # Sidebar navigation
101
- page = st.sidebar.selectbox(
102
- "Navigation",
103
- [
104
- "Dashboard",
126
+ # Main content area - default to dashboard
127
+ page = st.session_state.get("current_page", "Dashboard")
128
+
129
+ # Navigation section
130
+ st.subheader("Navigation")
131
+
132
+ col1, col2, col3, col4, col5 = st.columns(5)
133
+
134
+ with col1:
135
+ if st.button(
105
136
  "Cookbook Analysis",
106
- NAV_MIGRATION_PLANNING,
107
- NAV_DEPENDENCY_MAPPING,
108
- NAV_VALIDATION_REPORTS,
109
- ],
110
- help="Choose the section you want to work with. "
111
- "Use arrow keys to navigate options.",
112
- key="main_navigation",
113
- )
137
+ type="primary" if page == NAV_COOKBOOK_ANALYSIS else "secondary",
138
+ width="stretch",
139
+ key="nav_cookbook_analysis",
140
+ ):
141
+ st.session_state.current_page = NAV_COOKBOOK_ANALYSIS
142
+ st.rerun()
143
+
144
+ with col2:
145
+ if st.button(
146
+ "Migration Planning",
147
+ type="primary" if page == NAV_MIGRATION_PLANNING else "secondary",
148
+ width="stretch",
149
+ key="nav_migration_planning",
150
+ ):
151
+ st.session_state.current_page = NAV_MIGRATION_PLANNING
152
+ st.rerun()
114
153
 
115
- # Main content area
116
- if page == "Dashboard":
117
- show_dashboard()
118
- elif page == "Cookbook Analysis":
119
- show_cookbook_analysis_page()
120
- elif page == NAV_MIGRATION_PLANNING:
121
- show_migration_planning()
122
- elif page == NAV_DEPENDENCY_MAPPING:
123
- show_dependency_mapping()
124
- elif page == NAV_VALIDATION_REPORTS:
125
- show_validation_reports()
154
+ with col3:
155
+ if st.button(
156
+ "Dependency Mapping",
157
+ type="primary" if page == NAV_DEPENDENCY_MAPPING else "secondary",
158
+ width="stretch",
159
+ key="nav_dependency_mapping",
160
+ ):
161
+ st.session_state.current_page = NAV_DEPENDENCY_MAPPING
162
+ st.rerun()
163
+
164
+ with col4:
165
+ if st.button(
166
+ "Validation Reports",
167
+ type="primary" if page == NAV_VALIDATION_REPORTS else "secondary",
168
+ width="stretch",
169
+ key="nav_validation_reports",
170
+ ):
171
+ st.session_state.current_page = NAV_VALIDATION_REPORTS
172
+ st.rerun()
126
173
 
174
+ with col5:
175
+ if st.button(
176
+ "AI Settings",
177
+ type="primary" if page == NAV_AI_SETTINGS else "secondary",
178
+ width="stretch",
179
+ key="nav_ai_settings",
180
+ ):
181
+ st.session_state.current_page = NAV_AI_SETTINGS
182
+ st.rerun()
183
+
184
+ st.divider()
185
+
186
+ # Page routing
187
+ _route_to_page(page)
188
+
189
+
190
+ def _route_to_page(page: str) -> None:
191
+ """Route to the appropriate page based on the current page state."""
192
+ page_routes = {
193
+ "Dashboard": show_dashboard,
194
+ NAV_COOKBOOK_ANALYSIS: show_cookbook_analysis_page,
195
+ NAV_MIGRATION_PLANNING: show_migration_planning,
196
+ NAV_DEPENDENCY_MAPPING: show_dependency_mapping,
197
+ NAV_VALIDATION_REPORTS: show_validation_reports,
198
+ NAV_AI_SETTINGS: show_ai_settings_page,
199
+ }
200
+
201
+ route_func = page_routes.get(page)
202
+ if route_func:
203
+ route_func()
204
+ else:
205
+ show_dashboard() # Default fallback
206
+
207
+
208
+ def _calculate_dashboard_metrics() -> tuple[int, str, int, int]:
209
+ """Calculate and return dashboard metrics."""
210
+ complexity_counts = {"High": 0, "Medium": 0, "Low": 0}
211
+ successful_analyses = 0
212
+ cookbooks_analysed = 0
213
+
214
+ if "analysis_results" in st.session_state and st.session_state.analysis_results:
215
+ results = st.session_state.analysis_results
216
+ cookbooks_analysed = len(results)
217
+ successful_analyses = len([r for r in results if r.get("status") == "Analysed"])
218
+
219
+ for r in results:
220
+ comp = r.get("complexity", "Unknown")
221
+ if comp in complexity_counts:
222
+ complexity_counts[comp] += 1
223
+
224
+ # Determine overall complexity
225
+ overall_complexity = "Unknown"
226
+ if cookbooks_analysed > 0:
227
+ if complexity_counts["High"] > 0:
228
+ overall_complexity = "High"
229
+ elif complexity_counts["Medium"] > 0:
230
+ overall_complexity = "Medium"
231
+ elif complexity_counts["Low"] > 0:
232
+ overall_complexity = "Low"
233
+
234
+ conversion_rate = 0
235
+ if cookbooks_analysed > 0:
236
+ conversion_rate = int((successful_analyses / cookbooks_analysed) * 100)
237
+
238
+ return (
239
+ cookbooks_analysed,
240
+ overall_complexity,
241
+ conversion_rate,
242
+ successful_analyses,
243
+ )
127
244
 
128
- def show_dashboard():
129
- """Show the main dashboard with migration overview."""
130
- st.header("Migration Dashboard")
131
245
 
246
+ def _display_dashboard_metrics(
247
+ cookbooks_analysed: int,
248
+ overall_complexity: str,
249
+ conversion_rate: int,
250
+ successful_analyses: int,
251
+ ) -> None:
252
+ """Display the dashboard metrics."""
132
253
  col1, col2, col3 = st.columns(3)
133
254
 
134
255
  with col1:
135
- st.metric("Cookbooks Analyzed", "0", "Ready to analyze")
256
+ st.metric(
257
+ "Cookbooks Analysed",
258
+ str(cookbooks_analysed),
259
+ f"{cookbooks_analysed} processed"
260
+ if cookbooks_analysed > 0
261
+ else "Ready to analyse",
262
+ )
136
263
  st.caption("Total cookbooks processed")
137
264
 
138
265
  with col2:
139
- st.metric("Migration Complexity", "Unknown", "Assessment needed")
266
+ st.metric(
267
+ "Migration Complexity",
268
+ overall_complexity,
269
+ "Based on analysis"
270
+ if overall_complexity != "Unknown"
271
+ else "Assessment needed",
272
+ )
140
273
  st.caption("Overall migration effort")
141
274
 
142
275
  with col3:
143
- st.metric("Conversion Rate", "0%", "Start migration")
144
- st.caption("Successful conversions")
276
+ st.metric(
277
+ "Success Rate",
278
+ f"{conversion_rate}%",
279
+ f"{successful_analyses} successful"
280
+ if cookbooks_analysed > 0
281
+ else "Start migration",
282
+ )
283
+ st.caption("Successful analyses")
145
284
 
146
- st.divider()
147
285
 
148
- # Quick actions
149
- st.subheader("Quick Actions")
286
+ def _display_quick_upload_section() -> None:
287
+ """Display the quick upload section."""
288
+ st.subheader("Quick Start - Upload Cookbooks")
150
289
 
151
- col1, col2 = st.columns(2)
290
+ col1, col2 = st.columns([2, 1])
152
291
 
153
292
  with col1:
154
- if st.button(
155
- "Analyze Cookbook Directory", type="primary", use_container_width=True
156
- ):
157
- st.rerun() # This will trigger navigation to cookbook analysis
293
+ uploaded_file = st.file_uploader(
294
+ "Upload Cookbook Archive",
295
+ type=["zip", "tar.gz", "tgz", "tar"],
296
+ help="Upload a ZIP or TAR archive containing your Chef "
297
+ "cookbooks for quick analysis",
298
+ key="dashboard_upload",
299
+ )
300
+
301
+ if uploaded_file:
302
+ # Store the uploaded file in session state for persistence across pages
303
+ st.session_state.uploaded_file_data = uploaded_file.getvalue()
304
+ st.session_state.uploaded_file_name = uploaded_file.name
305
+ st.session_state.uploaded_file_type = uploaded_file.type
306
+
307
+ st.success(f"File {uploaded_file.name} uploaded successfully!")
308
+ st.info(
309
+ "Navigate to Cookbook Analysis above to process this file, "
310
+ "or upload another file to replace it."
311
+ )
158
312
 
159
313
  with col2:
160
- if st.button(
161
- "Generate Migration Plan", type="secondary", use_container_width=True
162
- ):
163
- st.rerun() # This will trigger navigation to migration planning
314
+ st.markdown("**Upload Options:**")
315
+ st.markdown("- ZIP archives (.zip)")
316
+ st.markdown("- TAR archives (.tar, .tar.gz, .tgz)")
317
+ st.markdown("- Process uploaded files using the navigation buttons above")
164
318
 
165
- # Recent activity
319
+
320
+ def _display_recent_activity() -> None:
321
+ """Display the recent activity section."""
166
322
  st.subheader("Recent Activity")
167
- st.info("No recent migration activity. Start by analyzing your cookbooks!")
323
+ st.info(
324
+ "No recent migration activity. Start by uploading cookbooks "
325
+ f"above or using the {NAV_COOKBOOK_ANALYSIS} page!"
326
+ )
327
+
328
+ # Getting started guide
329
+ with st.expander("How to Get Started"):
330
+ st.markdown("""
331
+ **New to SousChef? Here's how to begin:**
332
+
333
+ 1. **Upload Cookbooks**: Use the uploader above or go to {NAV_COOKBOOK_ANALYSIS}
334
+ 2. **Analyse Complexity**: Get detailed migration assessments
335
+ 3. **Plan Migration**: Generate timelines and resource requirements
336
+ 4. **Convert to Ansible**: Download converted playbooks
337
+
338
+ **Supported Formats:**
339
+ - ZIP archives (.zip)
340
+ - TAR archives (.tar, .tar.gz, .tgz)
341
+ - Directory paths (in {NAV_COOKBOOK_ANALYSIS})
342
+
343
+ **Expected Structure:**
344
+ ```
345
+ your-cookbooks/
346
+ ├── nginx/
347
+ │ ├── metadata.rb
348
+ │ ├── recipes/
349
+ │ └── attributes/
350
+ └── apache2/
351
+ └── metadata.rb
352
+ ```
353
+ """)
354
+
355
+
356
+ def show_dashboard() -> None:
357
+ """Show the main dashboard with migration overview."""
358
+ st.header("Migration Dashboard")
359
+
360
+ # Metrics calculation
361
+ cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses = (
362
+ _calculate_dashboard_metrics()
363
+ )
364
+
365
+ # Display metrics
366
+ _display_dashboard_metrics(
367
+ cookbooks_analysed, overall_complexity, conversion_rate, successful_analyses
368
+ )
369
+
370
+ st.divider()
371
+
372
+ # Quick upload section
373
+ _display_quick_upload_section()
374
+
375
+ # Recent activity
376
+ _display_recent_activity()
168
377
 
169
378
 
170
- def show_migration_planning():
379
+ def show_migration_planning() -> None:
171
380
  """Show migration planning interface."""
172
- st.header("Migration Planning")
381
+ st.header(NAV_MIGRATION_PLANNING)
173
382
 
174
383
  # Import assessment functions
175
384
  from souschef.assessment import generate_migration_plan
@@ -183,13 +392,27 @@ def show_migration_planning():
183
392
  # Step 1: Cookbook Selection
184
393
  st.subheader("Step 1: Cookbook Selection")
185
394
 
395
+ # Check for previously analyzed cookbooks
396
+ uploaded_plan_context = None
397
+ if (
398
+ "analysis_cookbook_path" in st.session_state
399
+ and st.session_state.analysis_cookbook_path
400
+ ):
401
+ uploaded_plan_context = st.session_state.analysis_cookbook_path
402
+ st.info(f"Using analyzed cookbooks from: {uploaded_plan_context}")
403
+
186
404
  col1, col2 = st.columns([3, 1])
187
405
 
188
406
  with col1:
407
+ # Default to analyzed path if available
408
+ default_paths = uploaded_plan_context if uploaded_plan_context else ""
409
+
189
410
  cookbook_paths = st.text_area(
190
411
  "Cookbook Paths",
412
+ value=default_paths,
191
413
  placeholder="/path/to/cookbooks/nginx,/path/to/cookbooks/apache2,/path/to/cookbooks/mysql",
192
- help="Enter comma-separated paths to your Chef cookbooks",
414
+ help="Enter comma-separated paths to your Chef cookbooks. If you uploaded "
415
+ "an archive in the Analysis tab, that path is pre-filled.",
193
416
  height=100,
194
417
  )
195
418
 
@@ -274,7 +497,12 @@ def show_migration_planning():
274
497
  # Step 3: Generate Plan
275
498
  st.subheader("Step 3: Generate Migration Plan")
276
499
 
277
- if st.button("Generate Migration Plan", type="primary", use_container_width=True):
500
+ if st.button(
501
+ "Generate Migration Plan",
502
+ type="primary",
503
+ width="stretch",
504
+ key="migration_plan_generate",
505
+ ):
278
506
  if not cookbook_paths.strip():
279
507
  st.error("Please enter cookbook paths to generate a migration plan.")
280
508
  return
@@ -318,7 +546,9 @@ def show_migration_planning():
318
546
  display_migration_plan_results()
319
547
 
320
548
 
321
- def _display_migration_summary_metrics(cookbook_paths, strategy, timeline):
549
+ def _display_migration_summary_metrics(
550
+ cookbook_paths: str, strategy: str, timeline: int
551
+ ) -> None:
322
552
  """Display migration overview summary metrics."""
323
553
  st.subheader("Migration Overview")
324
554
 
@@ -338,7 +568,7 @@ def _display_migration_summary_metrics(cookbook_paths, strategy, timeline):
338
568
  st.metric("Status", "Plan Generated")
339
569
 
340
570
 
341
- def _display_migration_plan_details(plan_result):
571
+ def _display_migration_plan_details(plan_result: str) -> None:
342
572
  """Display the detailed migration plan sections."""
343
573
  st.subheader("Migration Plan Details")
344
574
 
@@ -359,14 +589,18 @@ def _display_migration_plan_details(plan_result):
359
589
  st.markdown(section)
360
590
 
361
591
 
362
- def _display_migration_action_buttons(cookbook_paths):
592
+ def _display_migration_action_buttons(cookbook_paths: str) -> None:
363
593
  """Display action buttons for next steps."""
364
594
  st.subheader("Next Steps")
365
595
 
366
596
  col1, col2, col3 = st.columns(3)
367
597
 
368
598
  with col1:
369
- if st.button("📊 Generate Detailed Report", use_container_width=True):
599
+ if st.button(
600
+ "Generate Detailed Report",
601
+ width="stretch",
602
+ key="migration_detailed_report",
603
+ ):
370
604
  with st.spinner("Generating detailed migration report..."):
371
605
  try:
372
606
  from souschef.assessment import generate_migration_report
@@ -380,27 +614,31 @@ def _display_migration_action_buttons(cookbook_paths):
380
614
  st.error(f"Error generating report: {e}")
381
615
 
382
616
  with col2:
383
- if st.button("🔍 Analyze Dependencies", use_container_width=True):
617
+ if st.button(
618
+ "Analyse Dependencies",
619
+ width="stretch",
620
+ key="migration_analyse_dependencies",
621
+ ):
384
622
  if len(cookbook_paths.split(",")) == 1:
385
623
  # Single cookbook dependency analysis
386
624
  cookbook_path = cookbook_paths.split(",")[0].strip()
387
- with st.spinner(f"Analyzing dependencies for {cookbook_path}..."):
625
+ with st.spinner(f"Analysing dependencies for {cookbook_path}..."):
388
626
  try:
389
- from souschef.assessment import analyze_cookbook_dependencies
627
+ from souschef.assessment import analyse_cookbook_dependencies
390
628
 
391
- dep_analysis = analyze_cookbook_dependencies(cookbook_path)
629
+ dep_analysis = analyse_cookbook_dependencies(cookbook_path)
392
630
  st.session_state.dep_analysis = dep_analysis
393
631
  st.success("Dependency analysis complete!")
394
632
  except Exception as e:
395
633
  st.error(f"Error analyzing dependencies: {e}")
396
634
  else:
397
635
  st.info(
398
- "Dependency analysis is optimized for single cookbooks. "
636
+ "Dependency analysis is optimised for single cookbooks. "
399
637
  "Select one cookbook path for detailed analysis."
400
638
  )
401
639
 
402
640
  with col3:
403
- if st.button("📥 Export Plan", use_container_width=True):
641
+ if st.button("Export Plan", width="stretch", key="migration_export_plan"):
404
642
  # Create downloadable plan
405
643
  plan_content = f"""# Chef to Ansible Migration Plan
406
644
  Generated: {st.session_state.get("timestamp", "Unknown")}
@@ -423,20 +661,20 @@ Generated: {st.session_state.get("timestamp", "Unknown")}
423
661
  )
424
662
 
425
663
 
426
- def _display_additional_reports():
664
+ def _display_additional_reports() -> None:
427
665
  """Display detailed report and dependency analysis if available."""
428
666
  # Display detailed report if generated
429
667
  if "detailed_report" in st.session_state:
430
- with st.expander("📊 Detailed Migration Report"):
668
+ with st.expander("Detailed Migration Report"):
431
669
  st.markdown(st.session_state.detailed_report)
432
670
 
433
671
  # Display dependency analysis if generated
434
672
  if "dep_analysis" in st.session_state:
435
- with st.expander("🔍 Dependency Analysis"):
673
+ with st.expander("Dependency Analysis"):
436
674
  st.markdown(st.session_state.dep_analysis)
437
675
 
438
676
 
439
- def display_migration_plan_results():
677
+ def display_migration_plan_results() -> None:
440
678
  """Display the generated migration plan results."""
441
679
  plan_result = st.session_state.migration_plan
442
680
  cookbook_paths = st.session_state.cookbook_paths
@@ -449,25 +687,55 @@ def display_migration_plan_results():
449
687
  _display_additional_reports()
450
688
 
451
689
 
452
- def show_dependency_mapping():
690
+ def show_dependency_mapping() -> None:
453
691
  """Show dependency mapping visualization."""
454
692
  st.header(NAV_DEPENDENCY_MAPPING)
455
693
 
456
694
  # Import assessment functions
457
- from souschef.assessment import analyze_cookbook_dependencies
695
+ from souschef.assessment import analyse_cookbook_dependencies
458
696
 
459
697
  st.markdown("""
460
- Visualize and analyze cookbook dependencies to understand migration order
698
+ Visualise and analyse cookbook dependencies to understand migration order
461
699
  and identify potential circular dependencies.
462
700
  """)
463
701
 
464
- # Cookbook path input
465
- cookbook_path = st.text_input(
466
- "Cookbook Directory Path",
467
- placeholder="/path/to/your/cookbooks",
468
- help="Enter the path to your cookbooks directory for dependency analysis",
702
+ # Input method selection
703
+ input_method = st.radio(
704
+ "Choose Input Method",
705
+ ["Upload Archive", INPUT_METHOD_DIRECTORY_PATH],
706
+ horizontal=True,
707
+ help="Select how to provide cookbooks for dependency analysis",
708
+ key="dep_input_method",
469
709
  )
470
710
 
711
+ cookbook_path = None
712
+ uploaded_file = None
713
+
714
+ if input_method == INPUT_METHOD_DIRECTORY_PATH:
715
+ cookbook_path = st.text_input(
716
+ "Cookbook Directory Path",
717
+ placeholder="/path/to/your/cookbooks",
718
+ help="Enter the path to your cookbooks directory for dependency analysis",
719
+ )
720
+ else:
721
+ uploaded_file = st.file_uploader(
722
+ "Upload Cookbook Archive",
723
+ type=["zip", "tar.gz", "tgz", "tar"],
724
+ help="Upload a ZIP or TAR archive containing your Chef cookbooks",
725
+ key="dep_archive_upload",
726
+ )
727
+ if uploaded_file:
728
+ try:
729
+ with st.spinner("Extracting archive..."):
730
+ # Import the extract function from cookbook_analysis
731
+ from souschef.ui.pages.cookbook_analysis import extract_archive
732
+
733
+ cookbook_path = str(extract_archive(uploaded_file))
734
+ st.success("Archive extracted successfully")
735
+ except Exception as e:
736
+ st.error(f"Failed to extract archive: {e}")
737
+ return
738
+
471
739
  # Analysis options
472
740
  col1, col2 = st.columns(2)
473
741
 
@@ -475,7 +743,7 @@ def show_dependency_mapping():
475
743
  dependency_depth = st.selectbox(
476
744
  "Analysis Depth",
477
745
  ["direct", "transitive", "full"],
478
- help="How deep to analyze dependencies",
746
+ help="How deep to analyse dependencies",
479
747
  format_func=lambda x: {
480
748
  "direct": "Direct Dependencies Only",
481
749
  "transitive": "Include Transitive Dependencies",
@@ -496,21 +764,26 @@ def show_dependency_mapping():
496
764
  )
497
765
 
498
766
  # Analysis button
499
- if st.button("Analyze Dependencies", type="primary", use_container_width=True):
500
- if not cookbook_path.strip():
767
+ if st.button(
768
+ BUTTON_ANALYSE_DEPENDENCIES,
769
+ type="primary",
770
+ width="stretch",
771
+ key="dep_analyse_dependencies",
772
+ ):
773
+ if not cookbook_path or not cookbook_path.strip():
501
774
  st.error("Please enter a cookbook directory path.")
502
775
  return
503
776
 
504
777
  # Create progress tracker
505
778
  progress_tracker = ProgressTracker(
506
- total_steps=5, description="Analyzing cookbook dependencies..."
779
+ total_steps=5, description="Analysing cookbook dependencies..."
507
780
  )
508
781
 
509
782
  try:
510
783
  progress_tracker.update(1, "Scanning cookbook directory...")
511
784
 
512
- # Analyze dependencies
513
- analysis_result = analyze_cookbook_dependencies(
785
+ # Analyse dependencies
786
+ analysis_result = analyse_cookbook_dependencies(
514
787
  cookbook_path.strip(), dependency_depth
515
788
  )
516
789
 
@@ -538,17 +811,17 @@ def show_dependency_mapping():
538
811
  display_dependency_analysis_results()
539
812
 
540
813
 
541
- def _setup_dependency_mapping_ui():
814
+ def _setup_dependency_mapping_ui() -> None:
542
815
  """Set up the dependency mapping UI header and description."""
543
816
  st.header(NAV_DEPENDENCY_MAPPING)
544
817
 
545
818
  st.markdown("""
546
- Visualize and analyze cookbook dependencies to understand migration order
819
+ Visualise and analyse cookbook dependencies to understand migration order
547
820
  and identify potential circular dependencies.
548
821
  """)
549
822
 
550
823
 
551
- def _get_dependency_mapping_inputs():
824
+ def _get_dependency_mapping_inputs() -> tuple[str, str, str]:
552
825
  """Collect user inputs for dependency analysis."""
553
826
  # Cookbook path input
554
827
  cookbook_path = st.text_input(
@@ -564,7 +837,7 @@ def _get_dependency_mapping_inputs():
564
837
  dependency_depth = st.selectbox(
565
838
  "Analysis Depth",
566
839
  ["direct", "transitive", "full"],
567
- help="How deep to analyze dependencies",
840
+ help="How deep to analyse dependencies",
568
841
  format_func=lambda x: {
569
842
  "direct": "Direct Dependencies Only",
570
843
  "transitive": "Include Transitive Dependencies",
@@ -588,12 +861,14 @@ def _get_dependency_mapping_inputs():
588
861
 
589
862
 
590
863
  def _handle_dependency_analysis_execution(
591
- cookbook_path, dependency_depth, visualization_type
592
- ):
864
+ cookbook_path: str, dependency_depth: str, visualization_type: str
865
+ ) -> None:
593
866
  """Handle the dependency analysis execution when button is clicked."""
594
867
  # Analysis button
595
- if st.button("Analyze Dependencies", type="primary", use_container_width=True):
596
- if not cookbook_path.strip():
868
+ if st.button(
869
+ BUTTON_ANALYSE_DEPENDENCIES, type="primary", width="stretch", key="dep_analyze"
870
+ ):
871
+ if not cookbook_path or not cookbook_path.strip():
597
872
  st.error("Please enter a cookbook directory path.")
598
873
  return
599
874
 
@@ -602,21 +877,23 @@ def _handle_dependency_analysis_execution(
602
877
  )
603
878
 
604
879
 
605
- def _perform_dependency_analysis(cookbook_path, dependency_depth, visualization_type):
880
+ def _perform_dependency_analysis(
881
+ cookbook_path: str, dependency_depth: str, visualization_type: str
882
+ ) -> None:
606
883
  """Perform the actual dependency analysis."""
607
884
  # Import assessment functions
608
- from souschef.assessment import analyze_cookbook_dependencies
885
+ from souschef.assessment import analyse_cookbook_dependencies
609
886
 
610
887
  # Create progress tracker
611
888
  progress_tracker = ProgressTracker(
612
- total_steps=5, description="Analyzing cookbook dependencies..."
889
+ total_steps=5, description="Analysing cookbook dependencies..."
613
890
  )
614
891
 
615
892
  try:
616
893
  progress_tracker.update(1, "Scanning cookbook directory...")
617
894
 
618
- # Analyze dependencies
619
- analysis_result = analyze_cookbook_dependencies(cookbook_path, dependency_depth)
895
+ # Analyse dependencies
896
+ analysis_result = analyse_cookbook_dependencies(cookbook_path, dependency_depth)
620
897
 
621
898
  progress_tracker.update(2, "Parsing dependency relationships...")
622
899
  progress_tracker.update(3, "Detecting circular dependencies...")
@@ -637,16 +914,16 @@ def _perform_dependency_analysis(cookbook_path, dependency_depth, visualization_
637
914
  st.error(f"Error analyzing dependencies: {e}")
638
915
 
639
916
 
640
- def _display_dependency_analysis_results_if_available():
917
+ def _display_dependency_analysis_results_if_available() -> None:
641
918
  """Display dependency analysis results if they exist in session state."""
642
919
  # Display results if available
643
920
  if "dep_analysis_result" in st.session_state:
644
921
  display_dependency_analysis_results()
645
922
 
646
923
 
647
- def _extract_dependency_relationships(lines):
924
+ def _extract_dependency_relationships(lines: Iterable[str]) -> dict[str, list[str]]:
648
925
  """Extract dependency relationships from analysis lines."""
649
- dependencies = {}
926
+ dependencies: dict[str, list[str]] = {}
650
927
  current_section = None
651
928
 
652
929
  for line in lines:
@@ -655,7 +932,7 @@ def _extract_dependency_relationships(lines):
655
932
  current_section = "direct"
656
933
  elif "Transitive Dependencies:" in line:
657
934
  current_section = "transitive"
658
- elif line.startswith("- ") and current_section in ["direct", "transitive"]:
935
+ elif line.startswith("- ") and current_section == "direct":
659
936
  # Regular dependencies
660
937
  dep_text = line[2:].strip()
661
938
  if ":" in dep_text:
@@ -669,11 +946,13 @@ def _extract_dependency_relationships(lines):
669
946
  return dependencies
670
947
 
671
948
 
672
- def _extract_circular_and_community_deps(lines):
949
+ def _extract_circular_and_community_deps(
950
+ lines: Iterable[str],
951
+ ) -> tuple[list[tuple[str, str]], list[str]]:
673
952
  """Extract circular dependencies and community cookbooks."""
674
953
  circular_deps: list[tuple[str, str]] = []
675
954
  community_cookbooks: list[str] = []
676
- current_section = None
955
+ current_section: str | None = None
677
956
 
678
957
  for line in lines:
679
958
  current_section = _update_current_section(line, current_section)
@@ -685,22 +964,27 @@ def _extract_circular_and_community_deps(lines):
685
964
  return circular_deps, community_cookbooks
686
965
 
687
966
 
688
- def _update_current_section(line, current_section):
967
+ def _update_current_section(line: str, current_section: str | None) -> str | None:
689
968
  """Update the current section based on the line content."""
690
969
  line = line.strip()
691
970
  if "Circular Dependencies:" in line:
692
971
  return "circular"
693
- elif "Community Cookbooks:" in line:
972
+ elif SECTION_COMMUNITY_COOKBOOKS_HEADER in line:
694
973
  return "community"
695
974
  return current_section
696
975
 
697
976
 
698
- def _is_list_item(line):
977
+ def _is_list_item(line: str) -> bool:
699
978
  """Check if the line is a list item."""
700
979
  return line.strip().startswith("- ")
701
980
 
702
981
 
703
- def _process_list_item(line, current_section, circular_deps, community_cookbooks):
982
+ def _process_list_item(
983
+ line: str,
984
+ current_section: str | None,
985
+ circular_deps: list[tuple[str, str]],
986
+ community_cookbooks: list[str],
987
+ ) -> None:
704
988
  """Process a list item based on the current section."""
705
989
  if current_section == "circular":
706
990
  _process_circular_dependency_item(line, circular_deps)
@@ -708,7 +992,9 @@ def _process_list_item(line, current_section, circular_deps, community_cookbooks
708
992
  _process_community_cookbook_item(line, community_cookbooks)
709
993
 
710
994
 
711
- def _process_circular_dependency_item(line, circular_deps):
995
+ def _process_circular_dependency_item(
996
+ line: str, circular_deps: list[tuple[str, str]]
997
+ ) -> None:
712
998
  """Process a circular dependency list item."""
713
999
  dep_text = line[2:].strip()
714
1000
  if "->" in dep_text:
@@ -717,14 +1003,16 @@ def _process_circular_dependency_item(line, circular_deps):
717
1003
  circular_deps.append((parts[0].strip(), parts[1].strip()))
718
1004
 
719
1005
 
720
- def _process_community_cookbook_item(line, community_cookbooks):
1006
+ def _process_community_cookbook_item(line: str, community_cookbooks: list[str]) -> None:
721
1007
  """Process a community cookbook list item."""
722
1008
  cookbook = line[2:].strip()
723
1009
  if cookbook:
724
1010
  community_cookbooks.append(cookbook)
725
1011
 
726
1012
 
727
- def _parse_dependency_analysis(analysis_result):
1013
+ def _parse_dependency_analysis(
1014
+ analysis_result: str,
1015
+ ) -> tuple[dict[str, list[str]], list[tuple[str, str]], list[str]]:
728
1016
  """Parse dependency analysis result into structured data."""
729
1017
  lines = analysis_result.split("\n")
730
1018
 
@@ -734,7 +1022,11 @@ def _parse_dependency_analysis(analysis_result):
734
1022
  return dependencies, circular_deps, community_cookbooks
735
1023
 
736
1024
 
737
- def _create_networkx_graph(dependencies, circular_deps, community_cookbooks):
1025
+ def _create_networkx_graph(
1026
+ dependencies: Mapping[str, Sequence[str]],
1027
+ circular_deps: Sequence[tuple[str, str]],
1028
+ community_cookbooks: Sequence[str],
1029
+ ) -> "nx.DiGraph":
738
1030
  """Create NetworkX graph from dependency data."""
739
1031
  import networkx as nx
740
1032
 
@@ -759,40 +1051,106 @@ def _create_networkx_graph(dependencies, circular_deps, community_cookbooks):
759
1051
  return graph
760
1052
 
761
1053
 
762
- def _calculate_graph_positions(graph, layout_algorithm):
763
- """Calculate node positions using specified layout algorithm."""
764
- import networkx as nx
1054
+ def _calculate_graph_positions(
1055
+ graph: "nx.DiGraph", layout_algorithm: str
1056
+ ) -> tuple[dict[Any, tuple[float, float]], str]:
1057
+ """
1058
+ Calculate positions for graph nodes using the specified layout algorithm.
1059
+
1060
+ Args:
1061
+ graph: NetworkX graph object
1062
+ layout_algorithm: String specifying the layout algorithm to use
1063
+
1064
+ Returns:
1065
+ tuple: (positions_dict, algorithm_used)
765
1066
 
1067
+ """
766
1068
  # Choose layout algorithm based on graph size and user preference
767
1069
  num_nodes = len(graph.nodes)
768
1070
  if layout_algorithm == "auto":
769
- if num_nodes < 10:
770
- layout_algorithm = "spring"
771
- elif num_nodes < 50:
772
- layout_algorithm = "kamada_kawai"
773
- else:
774
- layout_algorithm = "circular"
1071
+ layout_algorithm = _choose_auto_layout_algorithm(num_nodes)
775
1072
 
776
1073
  # Calculate positions using selected layout algorithm
777
- if layout_algorithm == "spring":
778
- pos = nx.spring_layout(graph, k=2, iterations=50)
779
- elif layout_algorithm == "circular":
780
- pos = nx.circular_layout(graph)
781
- elif layout_algorithm == "kamada_kawai":
782
- try:
783
- pos = nx.kamada_kawai_layout(graph)
784
- except Exception:
785
- # Fallback to spring layout if kamada_kawai fails
786
- pos = nx.spring_layout(graph, k=2, iterations=50)
787
- else:
788
- pos = nx.spring_layout(graph, k=2, iterations=50)
1074
+ pos = _calculate_positions_with_algorithm(graph, layout_algorithm)
789
1075
 
790
1076
  return pos, layout_algorithm
791
1077
 
792
1078
 
793
- def _create_plotly_edge_traces(graph, pos):
1079
+ def _choose_auto_layout_algorithm(num_nodes: int) -> str:
1080
+ """Choose the best layout algorithm based on graph size."""
1081
+ if num_nodes <= 10:
1082
+ return "circular"
1083
+ elif num_nodes <= 50:
1084
+ return "spring"
1085
+ else:
1086
+ return "kamada_kawai"
1087
+
1088
+
1089
+ def _calculate_positions_with_algorithm(
1090
+ graph: "nx.DiGraph", layout_algorithm: str
1091
+ ) -> Any:
1092
+ """Calculate node positions using the specified algorithm."""
1093
+ import networkx as nx
1094
+
1095
+ try:
1096
+ if layout_algorithm == "spring":
1097
+ return nx.spring_layout(graph, k=2, iterations=50, seed=42)
1098
+ elif layout_algorithm == "circular":
1099
+ return nx.circular_layout(graph)
1100
+ elif layout_algorithm == "kamada_kawai":
1101
+ return nx.kamada_kawai_layout(graph)
1102
+ elif layout_algorithm == "shell":
1103
+ return _calculate_shell_layout_positions(graph)
1104
+ elif layout_algorithm == "random":
1105
+ return nx.random_layout(graph, seed=42)
1106
+ elif layout_algorithm == "spectral":
1107
+ return nx.spectral_layout(graph)
1108
+ elif layout_algorithm == "force_directed":
1109
+ return nx.spring_layout(graph, k=3, iterations=100, seed=42, scale=2)
1110
+ else:
1111
+ return nx.spring_layout(graph, k=2, iterations=50, seed=42)
1112
+ except Exception as e:
1113
+ # Fallback to spring layout if algorithm fails
1114
+ st.warning(
1115
+ f"Layout algorithm '{layout_algorithm}' failed, using spring layout: {e}"
1116
+ )
1117
+ return nx.spring_layout(graph, k=2, iterations=50, seed=42)
1118
+
1119
+
1120
+ def _calculate_shell_layout_positions(
1121
+ graph: "nx.DiGraph",
1122
+ ) -> Any:
1123
+ """Calculate shell layout positions for hierarchical organization."""
1124
+ import networkx as nx
1125
+
1126
+ # Identify leaf nodes (no outgoing edges)
1127
+ leaf_nodes = [n for n in graph.nodes() if graph.out_degree(n) == 0]
1128
+ # Identify root nodes (no incoming edges)
1129
+ root_nodes = [n for n in graph.nodes() if graph.in_degree(n) == 0]
1130
+ # Middle nodes
1131
+ middle_nodes = [
1132
+ n for n in graph.nodes() if n not in leaf_nodes and n not in root_nodes
1133
+ ]
1134
+
1135
+ shells = []
1136
+ if root_nodes:
1137
+ shells.append(root_nodes)
1138
+ if middle_nodes:
1139
+ shells.append(middle_nodes)
1140
+ if leaf_nodes:
1141
+ shells.append(leaf_nodes)
1142
+
1143
+ if shells:
1144
+ return nx.shell_layout(graph, shells)
1145
+ else:
1146
+ return nx.spring_layout(graph, k=2, iterations=50, seed=42)
1147
+
1148
+
1149
+ def _create_plotly_edge_traces(
1150
+ graph: "nx.DiGraph", pos: Mapping[Any, tuple[float, float]]
1151
+ ) -> list["go.Scatter"]:
794
1152
  """Create edge traces for Plotly graph."""
795
- import plotly.graph_objects as go # type: ignore[import-untyped]
1153
+ import plotly.graph_objects as go
796
1154
 
797
1155
  edge_traces = []
798
1156
 
@@ -843,7 +1201,9 @@ def _create_plotly_edge_traces(graph, pos):
843
1201
  return edge_traces
844
1202
 
845
1203
 
846
- def _create_plotly_node_trace(graph, pos):
1204
+ def _create_plotly_node_trace(
1205
+ graph: "nx.DiGraph", pos: Mapping[Any, tuple[float, float]]
1206
+ ) -> "go.Scatter":
847
1207
  """Create node trace for Plotly graph."""
848
1208
  import plotly.graph_objects as go
849
1209
 
@@ -896,14 +1256,16 @@ def _create_plotly_node_trace(graph, pos):
896
1256
  return node_trace
897
1257
 
898
1258
 
899
- def _create_plotly_figure_layout(num_nodes, layout_algorithm):
1259
+ def _create_plotly_figure_layout(num_nodes: int, layout_algorithm: str) -> "go.Layout":
900
1260
  """Create Plotly figure layout."""
901
1261
  import plotly.graph_objects as go
902
1262
 
903
- return go.Layout(
904
- title=f"Cookbook Dependency Graph ({num_nodes} nodes, "
905
- f"{layout_algorithm} layout)",
906
- titlefont_size=16,
1263
+ layout: go.Layout = go.Layout(
1264
+ title={
1265
+ "text": f"Cookbook Dependency Graph ({num_nodes} nodes, "
1266
+ f"{layout_algorithm} layout)",
1267
+ "font": {"size": 16},
1268
+ },
907
1269
  showlegend=True,
908
1270
  hovermode="closest",
909
1271
  margin={"b": 20, "l": 5, "r": 5, "t": 40},
@@ -920,8 +1282,15 @@ def _create_plotly_figure_layout(num_nodes, layout_algorithm):
920
1282
  plot_bgcolor="white",
921
1283
  )
922
1284
 
1285
+ return layout
923
1286
 
924
- def _create_interactive_plotly_graph(graph, pos, num_nodes, layout_algorithm):
1287
+
1288
+ def _create_interactive_plotly_graph(
1289
+ graph: "nx.DiGraph",
1290
+ pos: Mapping[Any, tuple[float, float]],
1291
+ num_nodes: int,
1292
+ layout_algorithm: str,
1293
+ ) -> "go.Figure":
925
1294
  """Create interactive Plotly graph visualization."""
926
1295
  import plotly.graph_objects as go
927
1296
 
@@ -935,7 +1304,12 @@ def _create_interactive_plotly_graph(graph, pos, num_nodes, layout_algorithm):
935
1304
  return fig
936
1305
 
937
1306
 
938
- def _create_static_matplotlib_graph(graph, pos, num_nodes, layout_algorithm):
1307
+ def _create_static_matplotlib_graph(
1308
+ graph: "nx.DiGraph",
1309
+ pos: Mapping[Any, tuple[float, float]],
1310
+ num_nodes: int,
1311
+ layout_algorithm: str,
1312
+ ) -> "Figure":
939
1313
  """Create static matplotlib graph visualization."""
940
1314
  import matplotlib.pyplot as plt
941
1315
 
@@ -1026,15 +1400,20 @@ def _create_static_matplotlib_graph(graph, pos, num_nodes, layout_algorithm):
1026
1400
  return plt.gcf()
1027
1401
 
1028
1402
 
1029
- def create_dependency_graph(analysis_result, viz_type, layout_algorithm="auto"):
1403
+ def create_dependency_graph(
1404
+ analysis_result: str,
1405
+ viz_type: str,
1406
+ layout_algorithm: str = "auto",
1407
+ filters: Mapping[str, Any] | None = None,
1408
+ ) -> "go.Figure | Figure | None":
1030
1409
  """
1031
- Create a dependency graph visualization.
1410
+ Create a dependency graph visualization with optional filtering.
1032
1411
 
1033
1412
  Args:
1034
1413
  analysis_result: Text analysis result from dependency analysis
1035
1414
  viz_type: Visualization type ("interactive" or "static")
1036
- layout_algorithm: Layout algorithm to use ("auto", "spring",
1037
- "circular", "kamada_kawai")
1415
+ layout_algorithm: Layout algorithm to use
1416
+ filters: Dictionary of filter options
1038
1417
 
1039
1418
  Returns:
1040
1419
  Plotly figure for interactive graphs, matplotlib figure for static graphs
@@ -1049,6 +1428,10 @@ def create_dependency_graph(analysis_result, viz_type, layout_algorithm="auto"):
1049
1428
  # Create NetworkX graph
1050
1429
  graph = _create_networkx_graph(dependencies, circular_deps, community_cookbooks)
1051
1430
 
1431
+ # Apply filters if provided
1432
+ if filters:
1433
+ graph = _apply_graph_filters(graph, filters)
1434
+
1052
1435
  if len(graph.nodes) == 0:
1053
1436
  return None
1054
1437
 
@@ -1069,7 +1452,83 @@ def create_dependency_graph(analysis_result, viz_type, layout_algorithm="auto"):
1069
1452
  return None
1070
1453
 
1071
1454
 
1072
- def _parse_dependency_metrics_from_result(analysis_result):
1455
+ def _apply_graph_filters(
1456
+ graph: "nx.DiGraph", filters: Mapping[str, Any]
1457
+ ) -> "nx.DiGraph":
1458
+ """Apply filters to the NetworkX graph."""
1459
+ filtered_graph = graph.copy()
1460
+
1461
+ # Apply each filter type
1462
+ filtered_graph = _filter_circular_dependencies_only(filtered_graph, filters)
1463
+ filtered_graph = _filter_community_cookbooks_only(filtered_graph, filters)
1464
+ filtered_graph = _filter_minimum_connections(filtered_graph, filters)
1465
+
1466
+ return filtered_graph
1467
+
1468
+
1469
+ def _filter_circular_dependencies_only(
1470
+ graph: "nx.DiGraph", filters: Mapping[str, Any]
1471
+ ) -> "nx.DiGraph":
1472
+ """Filter graph to show only nodes involved in circular dependencies."""
1473
+ if not filters.get("circular_only", False):
1474
+ return graph
1475
+
1476
+ # Find nodes involved in circular dependencies
1477
+ circular_nodes = set()
1478
+ for source, target in filters.get("circular_deps", []):
1479
+ circular_nodes.add(source)
1480
+ circular_nodes.add(target)
1481
+
1482
+ # Remove nodes not involved in circular dependencies
1483
+ nodes_to_remove = [n for n in graph.nodes() if n not in circular_nodes]
1484
+ graph.remove_nodes_from(nodes_to_remove)
1485
+
1486
+ return graph
1487
+
1488
+
1489
+ def _filter_community_cookbooks_only(
1490
+ graph: "nx.DiGraph", filters: Mapping[str, Any]
1491
+ ) -> "nx.DiGraph":
1492
+ """Filter graph to show only community cookbooks and their dependencies."""
1493
+ if not filters.get("community_only", False):
1494
+ return graph
1495
+
1496
+ community_nodes = set()
1497
+ for node in graph.nodes():
1498
+ if graph.nodes[node].get("community", False):
1499
+ community_nodes.add(node)
1500
+ # Also include dependencies of community cookbooks
1501
+ for successor in graph.successors(node):
1502
+ community_nodes.add(successor)
1503
+
1504
+ # Remove nodes not related to community cookbooks
1505
+ nodes_to_remove = [n for n in graph.nodes() if n not in community_nodes]
1506
+ graph.remove_nodes_from(nodes_to_remove)
1507
+
1508
+ return graph
1509
+
1510
+
1511
+ def _filter_minimum_connections(
1512
+ graph: "nx.DiGraph", filters: Mapping[str, Any]
1513
+ ) -> "nx.DiGraph":
1514
+ """Filter graph to show only nodes with minimum connection count."""
1515
+ min_connections = filters.get("min_connections", 0)
1516
+ if min_connections <= 0:
1517
+ return graph
1518
+
1519
+ nodes_to_remove = []
1520
+ for node in graph.nodes():
1521
+ degree = graph.degree(node)
1522
+ if degree < min_connections:
1523
+ nodes_to_remove.append(node)
1524
+ graph.remove_nodes_from(nodes_to_remove)
1525
+
1526
+ return graph
1527
+
1528
+
1529
+ def _parse_dependency_metrics_from_result(
1530
+ analysis_result: str,
1531
+ ) -> tuple[int, int, int, int]:
1073
1532
  """Parse dependency analysis result to extract key metrics."""
1074
1533
  lines = analysis_result.split("\n")
1075
1534
 
@@ -1097,8 +1556,8 @@ def _parse_dependency_metrics_from_result(analysis_result):
1097
1556
 
1098
1557
 
1099
1558
  def _display_dependency_summary_metrics(
1100
- direct_deps, transitive_deps, circular_deps, community_cookbooks
1101
- ):
1559
+ direct_deps: int, transitive_deps: int, circular_deps: int, community_cookbooks: int
1560
+ ) -> None:
1102
1561
  """Display dependency analysis summary metrics."""
1103
1562
  col1, col2, col3, col4 = st.columns(4)
1104
1563
 
@@ -1112,85 +1571,479 @@ def _display_dependency_summary_metrics(
1112
1571
  st.metric(
1113
1572
  SECTION_CIRCULAR_DEPENDENCIES,
1114
1573
  circular_deps,
1115
- delta="⚠️ Check" if circular_deps > 0 else "OK",
1574
+ delta="Check" if circular_deps > 0 else "OK",
1116
1575
  )
1117
1576
 
1118
1577
  with col4:
1119
- st.metric("Community Cookbooks", community_cookbooks)
1578
+ st.metric(SECTION_COMMUNITY_COOKBOOKS, community_cookbooks)
1579
+
1580
+
1581
+ def _calculate_migration_impact(
1582
+ dependencies: Mapping[str, Sequence[str]],
1583
+ circular_deps: Sequence[tuple[str, str]],
1584
+ community_cookbooks: Sequence[str],
1585
+ ) -> dict[str, Any]:
1586
+ """Calculate migration impact analysis based on dependency structure."""
1587
+ impact: dict[str, Any] = {
1588
+ "risk_score": 0.0,
1589
+ "timeline_impact_weeks": 0,
1590
+ "complexity_level": "Low",
1591
+ "parallel_streams": 1,
1592
+ "critical_path": [],
1593
+ "bottlenecks": [],
1594
+ "recommendations": [],
1595
+ }
1596
+
1597
+ # Calculate risk score based on various factors
1598
+ risk_factors = {
1599
+ "circular_deps": len(circular_deps)
1600
+ * 2.0, # Each circular dep adds significant risk
1601
+ "total_deps": len(dependencies) * 0.1, # More dependencies = higher complexity
1602
+ "community_cookbooks": len(community_cookbooks)
1603
+ * 0.5, # Community cookbooks need evaluation
1604
+ "max_chain_length": _calculate_max_dependency_chain(dependencies)
1605
+ * 0.3, # Long chains are risky
1606
+ }
1607
+
1608
+ impact["risk_score"] = min(10.0, sum(risk_factors.values()))
1609
+
1610
+ # Determine complexity level
1611
+ if impact["risk_score"] > 7:
1612
+ impact["complexity_level"] = "High"
1613
+ impact["timeline_impact_weeks"] = 4
1614
+ elif impact["risk_score"] > 4:
1615
+ impact["complexity_level"] = "Medium"
1616
+ impact["timeline_impact_weeks"] = 2
1617
+ else:
1618
+ impact["complexity_level"] = "Low"
1619
+ impact["timeline_impact_weeks"] = 0
1620
+
1621
+ # Calculate parallel migration streams
1622
+ if len(dependencies) > 20:
1623
+ impact["parallel_streams"] = 3
1624
+ elif len(dependencies) > 10:
1625
+ impact["parallel_streams"] = 2
1626
+ else:
1627
+ impact["parallel_streams"] = 1
1628
+
1629
+ # Identify critical path (longest dependency chain)
1630
+ impact["critical_path"] = _find_critical_path(dependencies)
1631
+
1632
+ # Identify bottlenecks (highly depended-upon cookbooks)
1633
+ impact["bottlenecks"] = _identify_bottlenecks(dependencies)
1634
+
1635
+ # Generate recommendations
1636
+ impact["recommendations"] = _generate_impact_recommendations(
1637
+ impact, circular_deps, community_cookbooks
1638
+ )
1639
+
1640
+ return impact
1641
+
1642
+
1643
+ def _calculate_max_dependency_chain(dependencies: Mapping[str, Sequence[str]]) -> int:
1644
+ """Calculate the maximum dependency chain length."""
1645
+ max_length = 0
1646
+
1647
+ def get_chain_length(cookbook, visited=None):
1648
+ if visited is None:
1649
+ visited = set()
1650
+
1651
+ if cookbook in visited:
1652
+ return 0 # Circular dependency detected
1653
+
1654
+ visited.add(cookbook)
1655
+ deps = dependencies.get(cookbook, [])
1656
+
1657
+ if not deps:
1658
+ return 1
1659
+
1660
+ max_child_length = 0
1661
+ for dep in deps:
1662
+ child_length = get_chain_length(dep, visited.copy())
1663
+ max_child_length = max(max_child_length, child_length)
1664
+
1665
+ return 1 + max_child_length
1666
+
1667
+ for cookbook in dependencies:
1668
+ length = get_chain_length(cookbook)
1669
+ max_length = max(max_length, length)
1670
+
1671
+ return max_length
1672
+
1673
+
1674
+ def _find_critical_path(dependencies: Mapping[str, Sequence[str]]) -> list[str]:
1675
+ """Find the critical path (longest dependency chain)."""
1676
+ longest_chain: list[str] = []
1677
+
1678
+ def find_longest_chain(cookbook, visited=None):
1679
+ if visited is None:
1680
+ visited = set()
1681
+
1682
+ if cookbook in visited:
1683
+ return [] # Circular dependency
1684
+
1685
+ visited.add(cookbook)
1686
+ deps = dependencies.get(cookbook, [])
1120
1687
 
1688
+ if not deps:
1689
+ return [cookbook]
1690
+
1691
+ longest_child_chain: list[str] = []
1692
+ for dep in deps:
1693
+ child_chain = find_longest_chain(dep, visited.copy())
1694
+ if len(child_chain) > len(longest_child_chain):
1695
+ longest_child_chain = child_chain
1696
+
1697
+ return [cookbook] + longest_child_chain
1698
+
1699
+ for cookbook in dependencies:
1700
+ chain = find_longest_chain(cookbook)
1701
+ if len(chain) > len(longest_chain):
1702
+ longest_chain = chain
1703
+
1704
+ return longest_chain
1705
+
1706
+
1707
+ def _identify_bottlenecks(
1708
+ dependencies: Mapping[str, Sequence[str]],
1709
+ ) -> list[dict[str, Any]]:
1710
+ """Identify bottleneck cookbooks (highly depended upon)."""
1711
+ # Count how many times each cookbook is depended upon
1712
+ dependency_counts: dict[str, int] = {}
1713
+
1714
+ for deps in dependencies.values():
1715
+ for dep in deps:
1716
+ dependency_counts[dep] = dependency_counts.get(dep, 0) + 1
1717
+
1718
+ # Find cookbooks with high dependency counts
1719
+ bottlenecks = []
1720
+ max_count: int = max(dependency_counts.values()) if dependency_counts else 0
1721
+
1722
+ for cookbook, count in dependency_counts.items():
1723
+ if count >= 5:
1724
+ risk_level = "High"
1725
+ elif count >= 3:
1726
+ risk_level = "Medium"
1727
+ else:
1728
+ risk_level = "Low"
1729
+
1730
+ if count >= 3 or (max_count > 1 and count == max_count):
1731
+ bottlenecks.append(
1732
+ {
1733
+ "cookbook": cookbook,
1734
+ "dependent_count": count,
1735
+ "risk_level": risk_level,
1736
+ }
1737
+ )
1738
+
1739
+ return sorted(bottlenecks, key=lambda x: x["dependent_count"], reverse=True)
1740
+
1741
+
1742
+ def _generate_impact_recommendations(
1743
+ impact: Mapping[str, Any],
1744
+ circular_deps: Sequence[tuple[str, str]],
1745
+ community_cookbooks: Sequence[str],
1746
+ ) -> list[dict[str, Any]]:
1747
+ """Generate recommendations based on impact analysis."""
1748
+ recommendations = []
1749
+
1750
+ if circular_deps:
1751
+ recommendations.append(
1752
+ {
1753
+ "priority": "Critical",
1754
+ "action": (
1755
+ f"Resolve {len(circular_deps)} circular dependencies "
1756
+ "before migration"
1757
+ ),
1758
+ "impact": "Prevents successful migration",
1759
+ }
1760
+ )
1761
+
1762
+ if impact["parallel_streams"] > 1:
1763
+ recommendations.append(
1764
+ {
1765
+ "priority": "High",
1766
+ "action": (
1767
+ f"Plan {impact['parallel_streams']} parallel migration streams"
1768
+ ),
1769
+ "impact": (
1770
+ f"Reduces timeline by ~{impact['parallel_streams'] * 2} weeks"
1771
+ ),
1772
+ }
1773
+ )
1774
+
1775
+ if community_cookbooks:
1776
+ recommendations.append(
1777
+ {
1778
+ "priority": "Medium",
1779
+ "action": (
1780
+ f"Evaluate {len(community_cookbooks)} community cookbooks "
1781
+ "for Ansible Galaxy alternatives"
1782
+ ),
1783
+ "impact": "Reduces custom development effort",
1784
+ }
1785
+ )
1786
+
1787
+ if impact["bottlenecks"]:
1788
+ bottleneck_names = [b["cookbook"] for b in impact["bottlenecks"][:3]]
1789
+ recommendations.append(
1790
+ {
1791
+ "priority": "Medium",
1792
+ "action": (
1793
+ f"Migrate bottleneck cookbooks first: {', '.join(bottleneck_names)}"
1794
+ ),
1795
+ "impact": "Unblocks dependent cookbook migrations",
1796
+ }
1797
+ )
1798
+
1799
+ if impact["timeline_impact_weeks"] > 0:
1800
+ recommendations.append(
1801
+ {
1802
+ "priority": "Low",
1803
+ "action": (
1804
+ f"Allocate additional {impact['timeline_impact_weeks']} "
1805
+ "weeks for complexity"
1806
+ ),
1807
+ "impact": "Ensures successful migration completion",
1808
+ }
1809
+ )
1121
1810
 
1122
- def _handle_graph_caching():
1123
- """Handle graph data caching logic."""
1124
- # Cache control
1125
- with st.expander("⚙️ Graph Settings"):
1811
+ return recommendations
1812
+
1813
+
1814
+ def _display_detailed_impact_analysis(
1815
+ impact_analysis: Mapping[str, Any],
1816
+ dependencies: Mapping[str, Sequence[str]],
1817
+ circular_deps: Sequence[tuple[str, str]],
1818
+ community_cookbooks: Sequence[str],
1819
+ ) -> None:
1820
+ """Display detailed impact analysis breakdown."""
1821
+ _display_risk_assessment_breakdown(dependencies, circular_deps, community_cookbooks)
1822
+ _display_critical_path_analysis(impact_analysis)
1823
+ _display_migration_bottlenecks(impact_analysis)
1824
+ _display_strategic_recommendations(impact_analysis)
1825
+
1826
+
1827
+ def _display_risk_assessment_breakdown(
1828
+ dependencies: Mapping[str, Sequence[str]],
1829
+ circular_deps: Sequence[tuple[str, str]],
1830
+ community_cookbooks: Sequence[str],
1831
+ ) -> None:
1832
+ """Display risk assessment breakdown."""
1833
+ st.markdown("### Risk Assessment Breakdown")
1834
+
1835
+ # Risk factors
1836
+ risk_factors = {
1837
+ "Circular Dependencies": len(circular_deps) * 2.0,
1838
+ "Total Dependencies": len(dependencies) * 0.1,
1839
+ "Community Cookbooks": len(community_cookbooks) * 0.5,
1840
+ "Dependency Chain Length": _calculate_max_dependency_chain(dependencies) * 0.3,
1841
+ }
1842
+
1843
+ for factor, score in risk_factors.items():
1844
+ if score > 0:
1845
+ st.write(f"• **{factor}**: {score:.1f} points")
1846
+
1847
+
1848
+ def _display_critical_path_analysis(impact_analysis: Mapping[str, Any]) -> None:
1849
+ """Display critical path analysis."""
1850
+ st.markdown("### Critical Path Analysis")
1851
+ if impact_analysis["critical_path"]:
1852
+ st.write("**Longest dependency chain:**")
1853
+ st.code(" → ".join(impact_analysis["critical_path"]), language="text")
1854
+ else:
1855
+ st.write("No dependency chains identified.")
1856
+
1857
+
1858
+ def _display_migration_bottlenecks(impact_analysis: Mapping[str, Any]) -> None:
1859
+ """Display migration bottlenecks."""
1860
+ st.markdown("### Migration Bottlenecks")
1861
+ if impact_analysis["bottlenecks"]:
1862
+ for bottleneck in impact_analysis["bottlenecks"]:
1863
+ risk_level = bottleneck["risk_level"]
1864
+ if risk_level == "High":
1865
+ risk_icon = "HIGH"
1866
+ elif risk_level == "Medium":
1867
+ risk_icon = "MEDIUM"
1868
+ else:
1869
+ risk_icon = "LOW"
1870
+ st.write(
1871
+ f"• {risk_icon} **{bottleneck['cookbook']}**: "
1872
+ f"{bottleneck['dependent_count']} dependents "
1873
+ f"({risk_level} risk)"
1874
+ )
1875
+ else:
1876
+ st.write("No significant bottlenecks identified.")
1877
+
1878
+
1879
+ def _display_strategic_recommendations(impact_analysis: Mapping[str, Any]) -> None:
1880
+ """Display strategic recommendations."""
1881
+ st.markdown("### Strategic Recommendations")
1882
+ for rec in impact_analysis["recommendations"]:
1883
+ priority = rec["priority"]
1884
+ if priority == "Critical":
1885
+ priority_icon = "CRITICAL"
1886
+ elif priority == "High":
1887
+ priority_icon = "HIGH"
1888
+ else:
1889
+ priority_icon = "MEDIUM"
1890
+ st.write(f"• {priority_icon} **{priority}**: {rec['action']}")
1891
+ st.write(f" *Impact*: {rec['impact']}")
1892
+
1893
+
1894
+ def _handle_graph_caching() -> None:
1895
+ """Handle graph caching controls and cleanup."""
1896
+ st.subheader("Graph Cache Management")
1897
+
1898
+ col1, col2, col3 = st.columns([1, 1, 2])
1899
+
1900
+ with col1:
1901
+ # Toggle caching on/off
1126
1902
  cache_enabled = st.checkbox(
1127
1903
  "Enable Graph Caching",
1128
1904
  value=st.session_state.get("graph_cache_enabled", True),
1129
- help="Cache graph data to improve performance for repeated views",
1905
+ help="Cache graph visualizations to improve performance for repeated views",
1130
1906
  )
1131
- st.session_state["graph_cache_enabled"] = cache_enabled
1907
+ st.session_state.graph_cache_enabled = cache_enabled
1132
1908
 
1909
+ with col2:
1910
+ # Clear cache button
1133
1911
  if st.button(
1134
- "🗑️ Clear Graph Cache", help="Clear cached graph data to free memory"
1912
+ "Clear Cache", help="Clear all cached graph data", key="clear_cache"
1135
1913
  ):
1136
- # Clear all cached graphs
1137
- keys_to_remove = [k for k in st.session_state if k.startswith("graph_")]
1138
- for key in keys_to_remove:
1914
+ # Find and remove all graph cache keys
1915
+ cache_keys = [key for key in st.session_state if key.startswith("graph_")]
1916
+ for key in cache_keys:
1139
1917
  del st.session_state[key]
1140
- st.success("Graph cache cleared!")
1918
+ st.success(f"Cleared {len(cache_keys)} cached graphs")
1141
1919
  st.rerun()
1142
1920
 
1921
+ with col3:
1922
+ # Cache statistics
1923
+ cache_keys = [key for key in st.session_state if key.startswith("graph_")]
1924
+ cache_count = len(cache_keys)
1925
+
1926
+ if cache_count > 0:
1927
+ # Estimate memory usage (rough approximation)
1928
+ estimated_memory = cache_count * 50 # Rough estimate: 50KB per cached graph
1929
+ st.metric(
1930
+ "Cached Graphs",
1931
+ f"{cache_count} items",
1932
+ f"~{estimated_memory}KB estimated",
1933
+ )
1934
+ else:
1935
+ st.info("📭 No graphs currently cached")
1936
+
1937
+ # Cache status indicator
1938
+ if cache_enabled:
1939
+ st.success(
1940
+ "Graph caching is enabled - visualizations will be "
1941
+ "cached for faster loading"
1942
+ )
1943
+ else:
1944
+ st.warning(
1945
+ "Graph caching is disabled - each visualization will be recalculated"
1946
+ )
1947
+
1143
1948
 
1144
- def _display_dependency_graph_visualization(analysis_result, viz_type, selected_layout):
1145
- """Display the dependency graph visualization section."""
1949
+ def _display_dependency_graph_visualization(
1950
+ analysis_result: str,
1951
+ viz_type: str,
1952
+ selected_layout: str,
1953
+ show_circular_only: bool,
1954
+ show_community_only: bool,
1955
+ min_connections: int,
1956
+ ) -> None:
1957
+ """Display the dependency graph visualization section with filtering."""
1146
1958
  try:
1147
- # Create cache key for graph data
1148
- cache_key = f"graph_{hash(analysis_result)}_{viz_type}_{selected_layout}"
1959
+ # Parse dependencies for filtering
1960
+ _, circular_deps, _ = _parse_dependency_analysis(analysis_result)
1961
+
1962
+ # Prepare filters
1963
+ filters = {
1964
+ "circular_only": show_circular_only,
1965
+ "community_only": show_community_only,
1966
+ "min_connections": min_connections,
1967
+ "circular_deps": circular_deps,
1968
+ }
1149
1969
 
1150
- # Check if we have cached graph data
1151
- if cache_key in st.session_state and st.session_state.get(
1152
- "graph_cache_enabled", True
1153
- ):
1154
- graph_data = st.session_state[cache_key]
1155
- st.info("📋 Using cached graph data")
1156
- else:
1157
- # Create dependency graph
1970
+ # Try to get cached graph data
1971
+ graph_data = _get_cached_graph_data(
1972
+ analysis_result, viz_type, selected_layout, filters
1973
+ )
1974
+
1975
+ if graph_data is None:
1976
+ # Create dependency graph with filters
1158
1977
  graph_data = create_dependency_graph(
1159
- analysis_result, viz_type, selected_layout
1978
+ analysis_result, viz_type, selected_layout, filters
1160
1979
  )
1161
-
1162
1980
  # Cache the result
1163
- if graph_data is not None and st.session_state.get(
1164
- "graph_cache_enabled", True
1165
- ):
1166
- st.session_state[cache_key] = graph_data
1167
-
1168
- _handle_graph_caching()
1981
+ _cache_graph_data(
1982
+ analysis_result, viz_type, selected_layout, filters, graph_data
1983
+ )
1169
1984
 
1170
1985
  if graph_data:
1171
1986
  _display_graph_with_export_options(graph_data, viz_type)
1172
1987
  else:
1173
- st.info("No dependency relationships found to visualize.")
1988
+ st.info(
1989
+ "No dependency relationships found to visualise after applying filters."
1990
+ )
1174
1991
 
1175
1992
  except Exception as e:
1176
1993
  _handle_graph_visualization_error(e, analysis_result)
1177
1994
 
1178
1995
 
1179
- def _display_graph_with_export_options(graph_data, viz_type):
1996
+ def _get_cached_graph_data(
1997
+ analysis_result: str,
1998
+ viz_type: str,
1999
+ selected_layout: str,
2000
+ filters: Mapping[str, Any],
2001
+ ) -> Any | None:
2002
+ """Get cached graph data if available."""
2003
+ cache_key = (
2004
+ f"graph_{hash(analysis_result)}_{viz_type}_{selected_layout}_{str(filters)}"
2005
+ )
2006
+
2007
+ if cache_key in st.session_state and st.session_state.get(
2008
+ "graph_cache_enabled", True
2009
+ ):
2010
+ graph_data = st.session_state[cache_key]
2011
+ st.info("Using cached graph data")
2012
+ return graph_data
2013
+
2014
+ return None
2015
+
2016
+
2017
+ def _cache_graph_data(
2018
+ analysis_result: str,
2019
+ viz_type: str,
2020
+ selected_layout: str,
2021
+ filters: Mapping[str, Any],
2022
+ graph_data: Any,
2023
+ ) -> None:
2024
+ """Cache graph data if caching is enabled."""
2025
+ if graph_data is not None and st.session_state.get("graph_cache_enabled", True):
2026
+ cache_key = (
2027
+ f"graph_{hash(analysis_result)}_{viz_type}_{selected_layout}_{str(filters)}"
2028
+ )
2029
+ st.session_state[cache_key] = graph_data
2030
+
2031
+
2032
+ def _display_graph_with_export_options(graph_data: Any, viz_type: str) -> None:
1180
2033
  """Display graph and provide export options."""
1181
2034
  if viz_type == "interactive":
1182
2035
  # Interactive Plotly graph
1183
- st.plotly_chart(graph_data, use_container_width=True)
2036
+ st.plotly_chart(graph_data, width="stretch")
1184
2037
 
1185
2038
  # Export options for interactive graph
1186
2039
  st.subheader("Export Graph")
1187
- col1, col2, col3 = st.columns(3)
2040
+ col1, col2, col3, col4 = st.columns(4)
1188
2041
 
1189
2042
  with col1:
1190
2043
  # Export as HTML
1191
2044
  html_content = graph_data.to_html(full_html=False, include_plotlyjs="cdn")
1192
2045
  st.download_button(
1193
- label="📄 Export as HTML",
2046
+ label="HTML",
1194
2047
  data=html_content,
1195
2048
  file_name="dependency_graph.html",
1196
2049
  mime="text/html",
@@ -1201,7 +2054,7 @@ def _display_graph_with_export_options(graph_data, viz_type):
1201
2054
  # Export as JSON
1202
2055
  json_data = graph_data.to_json()
1203
2056
  st.download_button(
1204
- label="📊 Export as JSON",
2057
+ label="JSON",
1205
2058
  data=json_data,
1206
2059
  file_name="dependency_graph.json",
1207
2060
  mime=MIME_APPLICATION_JSON,
@@ -1211,26 +2064,42 @@ def _display_graph_with_export_options(graph_data, viz_type):
1211
2064
  with col3:
1212
2065
  # Export as PNG (requires kaleido)
1213
2066
  try:
1214
- import plotly.io as pio # type: ignore[import-untyped]
2067
+ import plotly.io as pio
1215
2068
 
1216
- png_data = pio.to_image(graph_data, format="png")
2069
+ png_data = pio.to_image(graph_data, format="png", scale=2)
1217
2070
  st.download_button(
1218
- label="🖼️ Export as PNG",
2071
+ label="PNG (High-res)",
1219
2072
  data=png_data,
1220
2073
  file_name="dependency_graph.png",
1221
2074
  mime="image/png",
1222
- help="Download graph as PNG image",
2075
+ help="Download graph as high-resolution PNG",
1223
2076
  )
1224
2077
  except ImportError:
1225
2078
  st.info("PNG export requires additional dependencies")
1226
2079
 
2080
+ with col4:
2081
+ # Export as PDF
2082
+ try:
2083
+ import plotly.io as pio
2084
+
2085
+ pdf_data = pio.to_image(graph_data, format="pdf")
2086
+ st.download_button(
2087
+ label="PDF",
2088
+ data=pdf_data,
2089
+ file_name="dependency_graph.pdf",
2090
+ mime="application/pdf",
2091
+ help="Download graph as PDF document",
2092
+ )
2093
+ except ImportError:
2094
+ st.info("PDF export requires additional dependencies")
2095
+
1227
2096
  else:
1228
2097
  # Static matplotlib graph
1229
2098
  st.pyplot(graph_data)
1230
2099
 
1231
2100
  # Export options for static graph
1232
2101
  st.subheader("Export Graph")
1233
- col1, col2 = st.columns(2)
2102
+ col1, col2, col3, col4 = st.columns(4)
1234
2103
 
1235
2104
  with col1:
1236
2105
  # Export as PNG
@@ -1240,7 +2109,7 @@ def _display_graph_with_export_options(graph_data, viz_type):
1240
2109
  graph_data.savefig(buf, format="png", dpi=300, bbox_inches="tight")
1241
2110
  buf.seek(0)
1242
2111
  st.download_button(
1243
- label="🖼️ Export as PNG",
2112
+ label="PNG (High-res)",
1244
2113
  data=buf.getvalue(),
1245
2114
  file_name="dependency_graph.png",
1246
2115
  mime="image/png",
@@ -1253,17 +2122,43 @@ def _display_graph_with_export_options(graph_data, viz_type):
1253
2122
  graph_data.savefig(buf_svg, format="svg", bbox_inches="tight")
1254
2123
  buf_svg.seek(0)
1255
2124
  st.download_button(
1256
- label="📈 Export as SVG",
2125
+ label="SVG",
1257
2126
  data=buf_svg.getvalue(),
1258
2127
  file_name="dependency_graph.svg",
1259
2128
  mime="image/svg+xml",
1260
2129
  help="Download graph as scalable SVG",
1261
2130
  )
1262
2131
 
2132
+ with col3:
2133
+ # Export as PDF
2134
+ buf_pdf = io.BytesIO()
2135
+ graph_data.savefig(buf_pdf, format="pdf", bbox_inches="tight")
2136
+ buf_pdf.seek(0)
2137
+ st.download_button(
2138
+ label="PDF",
2139
+ data=buf_pdf.getvalue(),
2140
+ file_name="dependency_graph.pdf",
2141
+ mime="application/pdf",
2142
+ help="Download graph as PDF document",
2143
+ )
2144
+
2145
+ with col4:
2146
+ # Export as EPS
2147
+ buf_eps = io.BytesIO()
2148
+ graph_data.savefig(buf_eps, format="eps", bbox_inches="tight")
2149
+ buf_eps.seek(0)
2150
+ st.download_button(
2151
+ label="EPS",
2152
+ data=buf_eps.getvalue(),
2153
+ file_name="dependency_graph.eps",
2154
+ mime="application/postscript",
2155
+ help="Download graph as EPS vector format",
2156
+ )
1263
2157
 
1264
- def _handle_graph_visualization_error(error, analysis_result):
2158
+
2159
+ def _handle_graph_visualization_error(error: Exception, analysis_result: str) -> None:
1265
2160
  """Handle graph visualization errors with fallback display."""
1266
- st.error("**Graph Visualization Error**")
2161
+ st.error("**Graph Visualization Error**")
1267
2162
  with st.expander("Error Details"):
1268
2163
  st.code(str(error), language="text")
1269
2164
  st.markdown("""
@@ -1279,7 +2174,7 @@ def _handle_graph_visualization_error(error, analysis_result):
1279
2174
  """)
1280
2175
 
1281
2176
  # Fallback: show text summary
1282
- st.info("📄 Showing text-based dependency summary instead:")
2177
+ st.info("Showing text-based dependency summary instead:")
1283
2178
  st.text_area(
1284
2179
  "Dependency Analysis Text",
1285
2180
  analysis_result,
@@ -1288,7 +2183,7 @@ def _handle_graph_visualization_error(error, analysis_result):
1288
2183
  )
1289
2184
 
1290
2185
 
1291
- def _display_dependency_analysis_sections(analysis_result):
2186
+ def _display_dependency_analysis_sections(analysis_result: str) -> None:
1292
2187
  """Display dependency analysis results in expandable sections."""
1293
2188
  # Split analysis into sections
1294
2189
  sections = analysis_result.split("\n## ")
@@ -1300,33 +2195,38 @@ def _display_dependency_analysis_sections(analysis_result):
1300
2195
 
1301
2196
  # Add expanders for different sections
1302
2197
  if "Migration Order Recommendations" in section:
1303
- with st.expander("📋 Migration Order Recommendations"):
2198
+ with st.expander("Migration Order Recommendations"):
1304
2199
  st.markdown(
1305
2200
  section.replace("## Migration Order Recommendations", "")
1306
2201
  )
1307
2202
  elif "Dependency Graph" in section:
1308
- with st.expander("🔗 Dependency Graph"):
2203
+ with st.expander("Dependency Graph"):
1309
2204
  st.markdown(section.replace("## Dependency Graph", ""))
1310
- elif "Circular Dependencies" in section:
1311
- with st.expander(f"⚠️ {SECTION_CIRCULAR_DEPENDENCIES}"):
1312
- st.markdown(section.replace("## Circular Dependencies", ""))
1313
- elif "Community Cookbooks" in section:
1314
- with st.expander("🌐 Community Cookbooks"):
1315
- st.markdown(section.replace("## Community Cookbooks", ""))
1316
- elif "Migration Impact Analysis" in section:
1317
- with st.expander("📊 Migration Impact Analysis"):
1318
- st.markdown(section.replace("## Migration Impact Analysis", ""))
2205
+ with st.expander(f"{SECTION_CIRCULAR_DEPENDENCIES}"):
2206
+ st.markdown(
2207
+ section.replace(f"## {SECTION_CIRCULAR_DEPENDENCIES}", "")
2208
+ )
2209
+ with st.expander(f"{SECTION_COMMUNITY_COOKBOOKS}"):
2210
+ st.markdown(
2211
+ section.replace(f"## {SECTION_COMMUNITY_COOKBOOKS}", "")
2212
+ )
2213
+ elif SECTION_MIGRATION_IMPACT_ANALYSIS in section:
2214
+ with st.expander(SECTION_MIGRATION_IMPACT_ANALYSIS):
2215
+ header_text = f"## {SECTION_MIGRATION_IMPACT_ANALYSIS}"
2216
+ st.markdown(section.replace(header_text, ""))
1319
2217
  else:
1320
2218
  st.markdown(section)
1321
2219
 
1322
2220
 
1323
- def _display_migration_recommendations(circular_deps, community_cookbooks, direct_deps):
2221
+ def _display_migration_recommendations(
2222
+ circular_deps: int, community_cookbooks: int, direct_deps: int
2223
+ ) -> None:
1324
2224
  """Display migration recommendations based on analysis results."""
1325
2225
  st.subheader("Migration Recommendations")
1326
2226
 
1327
2227
  if circular_deps > 0:
1328
2228
  st.error(
1329
- "⚠️ **Critical Issue**: Circular dependencies detected. "
2229
+ "**Critical Issue**: Circular dependencies detected. "
1330
2230
  "Resolve before migration."
1331
2231
  )
1332
2232
  st.markdown("""
@@ -1339,7 +2239,7 @@ def _display_migration_recommendations(circular_deps, community_cookbooks, direc
1339
2239
 
1340
2240
  if community_cookbooks > 0:
1341
2241
  st.success(
1342
- f"**Good News**: {community_cookbooks} community cookbooks identified."
2242
+ f"**Good News**: {community_cookbooks} community cookbooks identified."
1343
2243
  )
1344
2244
  st.markdown("""
1345
2245
  **Recommendations:**
@@ -1349,7 +2249,7 @@ def _display_migration_recommendations(circular_deps, community_cookbooks, direc
1349
2249
  """)
1350
2250
 
1351
2251
  if direct_deps > 10:
1352
- st.warning("⚠️ **Complex Dependencies**: High dependency count detected.")
2252
+ st.warning("**Complex Dependencies**: High dependency count detected.")
1353
2253
  st.markdown("""
1354
2254
  **Consider:**
1355
2255
  - Breaking down monolithic cookbooks
@@ -1358,15 +2258,26 @@ def _display_migration_recommendations(circular_deps, community_cookbooks, direc
1358
2258
  """)
1359
2259
 
1360
2260
 
2261
+ def health_check() -> dict[str, str]:
2262
+ """Return health check information for the SousChef UI service."""
2263
+ from souschef.core.constants import VERSION
2264
+
2265
+ return {
2266
+ "status": "healthy",
2267
+ "service": "souschef-ui",
2268
+ "version": VERSION,
2269
+ }
2270
+
2271
+
1361
2272
  def _display_dependency_export_options(
1362
- analysis_result,
1363
- cookbook_path,
1364
- depth,
1365
- direct_deps,
1366
- transitive_deps,
1367
- circular_deps,
1368
- community_cookbooks,
1369
- ):
2273
+ analysis_result: str,
2274
+ cookbook_path: str,
2275
+ depth: str,
2276
+ direct_deps: int,
2277
+ transitive_deps: int,
2278
+ circular_deps: int,
2279
+ community_cookbooks: int,
2280
+ ) -> None:
1370
2281
  """Display export options for dependency analysis."""
1371
2282
  st.subheader("Export Analysis")
1372
2283
 
@@ -1374,7 +2285,7 @@ def _display_dependency_export_options(
1374
2285
 
1375
2286
  with col1:
1376
2287
  st.download_button(
1377
- label="📥 Download Full Analysis",
2288
+ label="Download Full Analysis",
1378
2289
  data=analysis_result,
1379
2290
  file_name="dependency_analysis.md",
1380
2291
  mime=MIME_TEXT_MARKDOWN,
@@ -1398,7 +2309,7 @@ def _display_dependency_export_options(
1398
2309
  import json
1399
2310
 
1400
2311
  st.download_button(
1401
- label="📊 Download JSON Summary",
2312
+ label="Download JSON Summary",
1402
2313
  data=json.dumps(analysis_json, indent=2),
1403
2314
  file_name="dependency_analysis.json",
1404
2315
  mime=MIME_APPLICATION_JSON,
@@ -1406,13 +2317,10 @@ def _display_dependency_export_options(
1406
2317
  )
1407
2318
 
1408
2319
 
1409
- def display_dependency_analysis_results():
1410
- """Display dependency analysis results."""
1411
- analysis_result = st.session_state.dep_analysis_result
1412
- cookbook_path = st.session_state.dep_cookbook_path
1413
- depth = st.session_state.dep_depth
1414
- viz_type = st.session_state.get("dep_viz_type", "text")
1415
-
2320
+ def _display_dependency_analysis_summary(
2321
+ analysis_result: str, cookbook_path: str, depth: str
2322
+ ) -> None:
2323
+ """Display dependency analysis summary section."""
1416
2324
  # Summary metrics
1417
2325
  st.subheader("Dependency Analysis Summary")
1418
2326
 
@@ -1427,38 +2335,201 @@ def display_dependency_analysis_results():
1427
2335
  )
1428
2336
 
1429
2337
  # Analysis depth indicator
1430
- st.info(f"Analysis performed with **{depth}** depth on: `{cookbook_path}`")
1431
-
1432
- # Graph Visualization Section
1433
- if viz_type in ["graph", "interactive"]:
1434
- st.subheader("📊 Dependency Graph Visualization")
1435
-
1436
- # Layout algorithm selector
1437
- layout_options = ["auto", "spring", "circular", "kamada_kawai"]
1438
- selected_layout = st.selectbox(
1439
- "Layout Algorithm",
1440
- layout_options,
1441
- help="Choose graph layout algorithm. 'auto' selects best "
1442
- "algorithm based on graph size.",
1443
- format_func=lambda x: {
1444
- "auto": "Auto (recommended)",
1445
- "spring": "Spring Layout",
1446
- "circular": "Circular Layout",
1447
- "kamada_kawai": "Kamada-Kawai Layout",
1448
- }.get(x, str(x)),
2338
+ analysis_msg = f"Analysis performed with **{depth}** depth on: `{cookbook_path}`"
2339
+ st.info(analysis_msg)
2340
+
2341
+
2342
+ def _display_graph_visualization_section(analysis_result: str, viz_type: str) -> None:
2343
+ """Display graph visualization section."""
2344
+ if viz_type not in ["graph", "interactive"]:
2345
+ return
2346
+
2347
+ st.subheader("Dependency Graph Visualization")
2348
+
2349
+ # Parse dependencies for filtering and analysis
2350
+ _ = _parse_dependency_analysis(analysis_result)
2351
+
2352
+ # Layout algorithm selector
2353
+ layout_options = [
2354
+ "auto",
2355
+ "spring",
2356
+ "circular",
2357
+ "kamada_kawai",
2358
+ "shell",
2359
+ "spectral",
2360
+ "force_directed",
2361
+ "random",
2362
+ ]
2363
+ selected_layout = st.selectbox(
2364
+ "Layout Algorithm",
2365
+ layout_options,
2366
+ help="Choose graph layout algorithm. 'auto' selects best "
2367
+ "algorithm based on graph size.",
2368
+ format_func=lambda x: {
2369
+ "auto": "Auto (recommended)",
2370
+ "spring": "Spring Layout",
2371
+ "circular": "Circular Layout",
2372
+ "kamada_kawai": "Kamada-Kawai Layout",
2373
+ "shell": "Shell Layout (hierarchical)",
2374
+ "spectral": "Spectral Layout",
2375
+ "force_directed": "Force Directed",
2376
+ "random": "Random Layout",
2377
+ }.get(x, str(x)),
2378
+ )
2379
+
2380
+ # Graph cache management
2381
+ _handle_graph_caching()
2382
+
2383
+ # Graph Filtering Options
2384
+ st.subheader("Graph Filtering & Analysis")
2385
+
2386
+ col1, col2, col3 = st.columns(3)
2387
+
2388
+ with col1:
2389
+ show_circular_only = st.checkbox(
2390
+ "Show Circular Dependencies Only",
2391
+ help=("Filter graph to show only nodes involved in circular dependencies"),
2392
+ )
2393
+
2394
+ with col2:
2395
+ show_community_only = st.checkbox(
2396
+ "Show Community Cookbooks Only",
2397
+ help=(
2398
+ "Filter graph to show only community cookbooks and their dependencies"
2399
+ ),
2400
+ )
2401
+
2402
+ with col3:
2403
+ min_connections = st.slider(
2404
+ "Minimum Connections",
2405
+ min_value=0,
2406
+ max_value=10,
2407
+ value=0,
2408
+ help="Show only nodes with at least this many connections",
2409
+ )
2410
+
2411
+ _display_dependency_graph_visualization(
2412
+ analysis_result,
2413
+ viz_type,
2414
+ selected_layout,
2415
+ show_circular_only,
2416
+ show_community_only,
2417
+ min_connections,
2418
+ )
2419
+
2420
+
2421
+ def _display_impact_analysis_section(analysis_result: str) -> None:
2422
+ """Display migration impact analysis section."""
2423
+ # Parse dependencies for impact analysis
2424
+ dependencies, circular_deps, community_cookbooks = _parse_dependency_analysis(
2425
+ analysis_result
2426
+ )
2427
+
2428
+ # Impact Analysis Section
2429
+ st.subheader("Migration Impact Analysis")
2430
+
2431
+ if not dependencies:
2432
+ st.info("No dependencies found for impact analysis.")
2433
+ return
2434
+
2435
+ impact_analysis = _calculate_migration_impact(
2436
+ dependencies, circular_deps, community_cookbooks
2437
+ )
2438
+
2439
+ # Calculate risk score delta
2440
+ risk_score = impact_analysis["risk_score"]
2441
+ if risk_score > 7:
2442
+ risk_delta = "High"
2443
+ elif risk_score > 4:
2444
+ risk_delta = "Medium"
2445
+ else:
2446
+ risk_delta = "Low"
2447
+
2448
+ col1, col2, col3, col4 = st.columns(4)
2449
+
2450
+ with col1:
2451
+ st.metric(
2452
+ "Migration Risk Score",
2453
+ f"{impact_analysis['risk_score']:.1f}/10",
2454
+ delta=risk_delta,
2455
+ )
2456
+
2457
+ with col2:
2458
+ timeline_weeks = impact_analysis["timeline_impact_weeks"]
2459
+ timeline_delta = "Increase" if timeline_weeks > 0 else "Unchanged"
2460
+ st.metric(
2461
+ "Estimated Timeline Impact",
2462
+ f"{timeline_weeks} weeks",
2463
+ delta=timeline_delta,
1449
2464
  )
1450
2465
 
1451
- _display_dependency_graph_visualization(
1452
- analysis_result, viz_type, selected_layout
2466
+ with col3:
2467
+ complexity_level = impact_analysis["complexity_level"]
2468
+ complexity_delta = "High" if complexity_level == "High" else "Low"
2469
+ st.metric(
2470
+ "Dependency Complexity",
2471
+ complexity_level,
2472
+ delta=complexity_delta,
1453
2473
  )
1454
2474
 
2475
+ with col4:
2476
+ parallel_streams = impact_analysis["parallel_streams"]
2477
+ parallel_delta = "Multiple" if parallel_streams > 1 else "Single"
2478
+ st.metric(
2479
+ "Parallel Migration Streams",
2480
+ parallel_streams,
2481
+ delta=parallel_delta,
2482
+ )
2483
+
2484
+ # Detailed impact breakdown
2485
+ with st.expander("Detailed Impact Analysis"):
2486
+ _display_detailed_impact_analysis(
2487
+ impact_analysis, dependencies, circular_deps, community_cookbooks
2488
+ )
2489
+
2490
+
2491
+ def _display_analysis_details_section(
2492
+ analysis_result: str,
2493
+ circular_deps: list[tuple[str, str]],
2494
+ community_cookbooks: list[str],
2495
+ direct_deps: int,
2496
+ ) -> None:
2497
+ """Display analysis details section."""
1455
2498
  # Display analysis results
1456
2499
  st.subheader("Dependency Analysis Details")
1457
2500
 
1458
2501
  _display_dependency_analysis_sections(analysis_result)
1459
2502
 
1460
2503
  # Migration recommendations
1461
- _display_migration_recommendations(circular_deps, community_cookbooks, direct_deps)
2504
+ _display_migration_recommendations(
2505
+ len(circular_deps), len(community_cookbooks), direct_deps
2506
+ )
2507
+
2508
+
2509
+ def display_dependency_analysis_results() -> None:
2510
+ """Display dependency analysis results."""
2511
+ analysis_result = st.session_state.dep_analysis_result
2512
+ cookbook_path = st.session_state.dep_cookbook_path
2513
+ depth = st.session_state.dep_depth
2514
+ viz_type = st.session_state.get("dep_viz_type", "text")
2515
+
2516
+ # Display summary section
2517
+ _display_dependency_analysis_summary(analysis_result, cookbook_path, depth)
2518
+
2519
+ # Display graph visualization section
2520
+ _display_graph_visualization_section(analysis_result, viz_type)
2521
+
2522
+ # Display impact analysis section
2523
+ _display_impact_analysis_section(analysis_result)
2524
+
2525
+ # Display analysis details section
2526
+ dependencies, circular_deps, community_cookbooks = _parse_dependency_analysis(
2527
+ analysis_result
2528
+ )
2529
+ direct_deps = len(dependencies) if dependencies else 0
2530
+ _display_analysis_details_section(
2531
+ analysis_result, circular_deps, community_cookbooks, direct_deps
2532
+ )
1462
2533
 
1463
2534
  # Export options
1464
2535
  _display_dependency_export_options(
@@ -1466,43 +2537,125 @@ def display_dependency_analysis_results():
1466
2537
  cookbook_path,
1467
2538
  depth,
1468
2539
  direct_deps,
1469
- transitive_deps,
1470
- circular_deps,
1471
- community_cookbooks,
2540
+ len(dependencies) if dependencies else 0, # transitive_deps approximation
2541
+ len(circular_deps),
2542
+ len(community_cookbooks),
1472
2543
  )
1473
2544
 
1474
2545
 
1475
- def show_validation_reports():
1476
- """Show validation reports and conversion validation."""
1477
- st.header("Validation Reports")
2546
+ def _collect_files_to_validate(input_path: str) -> list[Path]:
2547
+ """Collect valid YAML files from input path."""
2548
+ validated_path = _normalize_and_validate_input_path(input_path)
2549
+ if validated_path is None:
2550
+ # Error already reported by _normalize_and_validate_input_path
2551
+ return []
1478
2552
 
1479
- # Import validation functions
1480
- from souschef.core.validation import ValidationEngine
2553
+ path_obj = validated_path
2554
+ files_to_validate = []
1481
2555
 
1482
- st.markdown("""
1483
- Validate Chef to Ansible conversions and generate comprehensive
1484
- validation reports for migration quality assurance.
1485
- """)
2556
+ if not path_obj.exists():
2557
+ st.error(f"Path does not exist: {path_obj}")
2558
+ return []
2559
+
2560
+ if path_obj.is_file():
2561
+ if path_obj.suffix in [".yml", ".yaml"] and path_obj.name not in [
2562
+ ".kitchen.yml",
2563
+ "kitchen.yml",
2564
+ "docker-compose.yml",
2565
+ ]:
2566
+ files_to_validate.append(path_obj)
2567
+ elif path_obj.is_dir():
2568
+ # Filter out obvious non-playbook files
2569
+ excluded_files = {".kitchen.yml", "kitchen.yml", "docker-compose.yml"}
2570
+
2571
+ yml_files = list(path_obj.glob("**/*.yml"))
2572
+ yaml_files = list(path_obj.glob("**/*.yaml"))
2573
+
2574
+ raw_files = yml_files + yaml_files
2575
+ files_to_validate.extend([f for f in raw_files if f.name not in excluded_files])
2576
+
2577
+ return files_to_validate
2578
+
2579
+
2580
+ def _run_validation_engine(files_to_validate: Sequence[Path]) -> list[Any]:
2581
+ """Run validation engine on a list of files."""
2582
+ from souschef.core.validation import (
2583
+ ValidationCategory,
2584
+ ValidationEngine,
2585
+ ValidationLevel,
2586
+ ValidationResult,
2587
+ )
1486
2588
 
1487
- # Validation options
2589
+ engine = ValidationEngine()
2590
+ all_results = []
2591
+
2592
+ for file_path in files_to_validate:
2593
+ try:
2594
+ content = file_path.read_text()
2595
+ # We assume 'recipe' (Playbook) conversion type for .yml files found
2596
+ file_results = engine.validate_conversion("recipe", content)
2597
+
2598
+ # If no issues found, explicitly add a success record
2599
+ if not file_results:
2600
+ file_results = [
2601
+ ValidationResult(
2602
+ ValidationLevel.INFO,
2603
+ ValidationCategory.SYNTAX,
2604
+ "File passed all validation checks",
2605
+ location=file_path.name,
2606
+ )
2607
+ ]
2608
+
2609
+ # Annotate results with location if missing
2610
+ for res in file_results:
2611
+ if not res.location:
2612
+ res.location = file_path.name
2613
+
2614
+ all_results.extend(file_results)
2615
+ except Exception as file_err:
2616
+ st.warning(f"Could not read/validate {file_path.name}: {file_err}")
2617
+
2618
+ return all_results
2619
+
2620
+
2621
+ def _get_default_validation_path() -> str:
2622
+ """Determine the default path for validation from session state."""
2623
+ default_path = ""
2624
+ if "converted_playbooks_path" in st.session_state:
2625
+ default_path = st.session_state.converted_playbooks_path
2626
+ st.info(f"Pre-filled path from conversion: {default_path}")
2627
+ elif (
2628
+ "analysis_cookbook_path" in st.session_state
2629
+ and st.session_state.analysis_cookbook_path
2630
+ ):
2631
+ default_path = st.session_state.analysis_cookbook_path
2632
+ st.info(f"Pre-filled path from analysis: {default_path}")
2633
+ st.caption(
2634
+ "Note: This tool validates Ansible playbooks (.yml). If you're using a raw "
2635
+ "Chef cookbook path, please ensure you've performed the conversion first."
2636
+ )
2637
+ return default_path
2638
+
2639
+
2640
+ def _render_validation_options_ui() -> tuple[str, str]:
2641
+ """Render validation scope and format options."""
1488
2642
  col1, col2 = st.columns(2)
1489
2643
 
1490
2644
  with col1:
1491
- validation_type = st.selectbox(
1492
- "Validation Type",
1493
- ["syntax", "logic", "security", "performance", "full"],
1494
- help="Type of validation to perform",
1495
- format_func=lambda x: {
1496
- "syntax": "Syntax Validation",
1497
- "logic": "Logic & Structure Validation",
1498
- "security": "Security Best Practices",
1499
- "performance": "Performance Analysis",
1500
- "full": "Complete Validation Suite",
1501
- }.get(x, str(x)),
2645
+ sub_scope = st.selectbox(
2646
+ "Validation Scope",
2647
+ [
2648
+ "Full Suite",
2649
+ "Syntax Only",
2650
+ "Logic/Semantic",
2651
+ "Security",
2652
+ SCOPE_BEST_PRACTICES,
2653
+ ],
2654
+ help="Filter which validation checks to run",
1502
2655
  )
1503
2656
 
1504
2657
  with col2:
1505
- output_format = st.selectbox(
2658
+ sub_format = st.selectbox(
1506
2659
  "Output Format",
1507
2660
  ["text", "json", "html"],
1508
2661
  help="Format for validation reports",
@@ -1512,8 +2665,11 @@ def show_validation_reports():
1512
2665
  "html": "HTML Report",
1513
2666
  }.get(x, str(x)),
1514
2667
  )
2668
+ return sub_scope, sub_format
2669
+
1515
2670
 
1516
- # File/Directory input
2671
+ def _render_validation_input_ui(default_path: str) -> str:
2672
+ """Render input source selection UI."""
1517
2673
  st.subheader("Input Source")
1518
2674
 
1519
2675
  input_type = st.radio(
@@ -1526,17 +2682,24 @@ def show_validation_reports():
1526
2682
  if input_type == "Directory":
1527
2683
  input_path = st.text_input(
1528
2684
  "Directory Path",
2685
+ value=default_path,
1529
2686
  placeholder="/path/to/ansible/playbooks",
1530
2687
  help="Path to directory containing Ansible playbooks to validate",
1531
2688
  )
1532
2689
  else:
1533
2690
  input_path = st.text_input(
1534
2691
  "File Path",
2692
+ value=default_path
2693
+ if default_path and default_path.endswith((".yml", ".yaml"))
2694
+ else "",
1535
2695
  placeholder="/path/to/playbook.yml",
1536
2696
  help="Path to single Ansible playbook file to validate",
1537
2697
  )
2698
+ return input_path
1538
2699
 
1539
- # Validation options
2700
+
2701
+ def _render_validation_settings_ui() -> tuple[bool, bool, bool]:
2702
+ """Render strict mode and other validation settings."""
1540
2703
  st.subheader("Validation Options")
1541
2704
 
1542
2705
  col1, col2, col3 = st.columns(3)
@@ -1548,7 +2711,7 @@ def show_validation_reports():
1548
2711
 
1549
2712
  with col2:
1550
2713
  include_best_practices = st.checkbox(
1551
- "Include Best Practices",
2714
+ f"Include {SCOPE_BEST_PRACTICES}",
1552
2715
  value=True,
1553
2716
  help="Check for Ansible best practices",
1554
2717
  )
@@ -1560,69 +2723,163 @@ def show_validation_reports():
1560
2723
  help="Provide improvement suggestions",
1561
2724
  )
1562
2725
 
1563
- # Validation button
1564
- if st.button("Run Validation", type="primary", use_container_width=True):
1565
- if not input_path.strip():
1566
- st.error("Please enter a path to validate.")
2726
+ return strict_mode, include_best_practices, generate_recommendations
2727
+
2728
+
2729
+ def _normalize_and_validate_input_path(input_path: str) -> Path | None:
2730
+ """
2731
+ Normalize and validate a user-provided filesystem path.
2732
+
2733
+ Returns a resolved Path object if valid, otherwise reports an error
2734
+ via Streamlit and returns None.
2735
+ """
2736
+ if not input_path:
2737
+ st.error(ERROR_MSG_ENTER_PATH)
2738
+ return None
2739
+
2740
+ raw = input_path.strip()
2741
+ if not raw:
2742
+ st.error(ERROR_MSG_ENTER_PATH)
2743
+ return None
2744
+
2745
+ try:
2746
+ # Expand user home and resolve to an absolute, normalized path
2747
+ path_obj = Path(raw).expanduser().resolve()
2748
+ except Exception:
2749
+ st.error(f"Invalid path: {raw}")
2750
+ return None
2751
+
2752
+ # Optional safety: constrain to the application root directory
2753
+ try:
2754
+ app_root = Path(app_path).resolve()
2755
+ path_obj.relative_to(app_root)
2756
+ except Exception:
2757
+ st.error("Path must be within the SousChef project directory.")
2758
+ return None
2759
+
2760
+ return path_obj
2761
+
2762
+
2763
+ def _handle_validation_execution(input_path: str, options: Mapping[str, Any]) -> None:
2764
+ """Execute the validation process with progress tracking."""
2765
+ progress_tracker = ProgressTracker(
2766
+ total_steps=6, description="Running validation..."
2767
+ )
2768
+
2769
+ try:
2770
+ progress_tracker.update(1, "Preparing validation environment...")
2771
+
2772
+ progress_tracker.update(2, "Scanning input files...")
2773
+
2774
+ files_to_validate = _collect_files_to_validate(input_path)
2775
+
2776
+ if not files_to_validate:
2777
+ # Error is handled inside _collect_files_to_validate
2778
+ # if path doesn't exist or is invalid
2779
+ validated_path = _normalize_and_validate_input_path(input_path)
2780
+ if validated_path is not None and validated_path.exists():
2781
+ st.warning(f"No YAML files found in {validated_path}")
1567
2782
  return
1568
2783
 
1569
- # Create progress tracker
1570
- progress_tracker = ProgressTracker(
1571
- total_steps=6, description="Running validation..."
2784
+ progress_tracker.update(3, f"Validating {len(files_to_validate)} files...")
2785
+
2786
+ all_results = _run_validation_engine(files_to_validate)
2787
+
2788
+ # Filter results based on scope
2789
+ filtered_results = _filter_results_by_scope(all_results, options["scope"])
2790
+
2791
+ # Format the results as text
2792
+ validation_result = "\n".join(
2793
+ [
2794
+ f"[{result.level.value.upper()}] {result.location}: {result.message}"
2795
+ for result in filtered_results
2796
+ ]
1572
2797
  )
1573
2798
 
1574
- try:
1575
- progress_tracker.update(1, "Preparing validation environment...")
1576
-
1577
- # Prepare validation options
1578
- options = {
1579
- "strict": strict_mode,
1580
- "best_practices": include_best_practices,
1581
- "recommendations": generate_recommendations,
1582
- "format": output_format,
1583
- }
2799
+ if not validation_result:
2800
+ validation_result = "No issues found matching the selected scope."
1584
2801
 
1585
- progress_tracker.update(2, "Scanning input files...")
1586
- progress_tracker.update(3, "Running syntax validation...")
1587
- progress_tracker.update(4, "Performing logic checks...")
2802
+ progress_tracker.update(5, "Generating validation report...")
1588
2803
 
1589
- # Run validation
1590
- engine = ValidationEngine()
1591
- validation_results = engine.validate_conversion(
1592
- validation_type, input_path.strip()
1593
- )
2804
+ # Store results
2805
+ st.session_state.validation_result = validation_result
2806
+ st.session_state.validation_path = input_path.strip()
2807
+ st.session_state.validation_type = options["scope"]
2808
+ st.session_state.validation_options = options
1594
2809
 
1595
- # Format the results as text
1596
- validation_result = "\n".join(
1597
- [
1598
- f"{result.level.value.upper()}: {result.message}"
1599
- for result in validation_results
1600
- ]
1601
- )
2810
+ progress_tracker.complete("Validation completed!")
2811
+ st.success(f"Validation completed! Scanned {len(files_to_validate)} files.")
2812
+ st.rerun()
1602
2813
 
1603
- progress_tracker.update(5, "Generating validation report...")
2814
+ except Exception as e:
2815
+ progress_tracker.close()
2816
+ st.error(f"Error during validation: {e}")
1604
2817
 
1605
- # Store results
1606
- st.session_state.validation_result = validation_result
1607
- st.session_state.validation_path = input_path.strip()
1608
- st.session_state.validation_type = validation_type
1609
- st.session_state.validation_options = options
1610
2818
 
1611
- progress_tracker.complete("Validation completed!")
1612
- st.success("Validation completed successfully!")
1613
- st.rerun()
2819
+ def show_validation_reports() -> None:
2820
+ """Show validation reports and conversion validation."""
2821
+ st.header(NAV_VALIDATION_REPORTS)
1614
2822
 
1615
- except Exception as e:
1616
- progress_tracker.close()
1617
- st.error(f"Error during validation: {e}")
2823
+ st.markdown("""
2824
+ Validate Chef to Ansible conversions and generate comprehensive
2825
+ validation reports for migration quality assurance.
2826
+ """)
2827
+
2828
+ # Check for previously analyzed path to pre-fill
2829
+ default_path = _get_default_validation_path()
2830
+
2831
+ # UI Components
2832
+ validation_scope, output_format = _render_validation_options_ui()
2833
+ input_path = _render_validation_input_ui(default_path)
2834
+ strict_mode, include_best_practices, generate_recommendations = (
2835
+ _render_validation_settings_ui()
2836
+ )
2837
+
2838
+ # Validation button
2839
+ if st.button(
2840
+ "Run Validation", type="primary", width="stretch", key="run_validation"
2841
+ ):
2842
+ if not input_path or not input_path.strip():
2843
+ st.error("Please enter a path to validate.")
1618
2844
  return
1619
2845
 
2846
+ options = {
2847
+ "strict": strict_mode,
2848
+ "best_practices": include_best_practices,
2849
+ "recommendations": generate_recommendations,
2850
+ "scope": validation_scope,
2851
+ "format": output_format,
2852
+ }
2853
+
2854
+ _handle_validation_execution(input_path, options)
2855
+
1620
2856
  # Display results if available
1621
2857
  if "validation_result" in st.session_state:
1622
2858
  display_validation_results()
1623
2859
 
1624
2860
 
1625
- def _parse_validation_metrics(validation_result):
2861
+ def _filter_results_by_scope(results: list[Any], scope: str) -> list[Any]:
2862
+ """Filter validation results based on selected scope."""
2863
+ from souschef.core.validation import ValidationCategory
2864
+
2865
+ if scope == "Full Suite":
2866
+ return results
2867
+
2868
+ scope_map = {
2869
+ "Syntax Only": ValidationCategory.SYNTAX,
2870
+ "Logic/Semantic": ValidationCategory.SEMANTIC,
2871
+ "Security": ValidationCategory.SECURITY,
2872
+ SCOPE_BEST_PRACTICES: ValidationCategory.BEST_PRACTICE,
2873
+ }
2874
+
2875
+ target_category = scope_map.get(scope)
2876
+ if not target_category:
2877
+ return results
2878
+
2879
+ return [r for r in results if r.category == target_category]
2880
+
2881
+
2882
+ def _parse_validation_metrics(validation_result: str) -> tuple[int, int, int, int]:
1626
2883
  """Parse validation result to extract key metrics."""
1627
2884
  lines = validation_result.split("\n")
1628
2885
 
@@ -1632,20 +2889,35 @@ def _parse_validation_metrics(validation_result):
1632
2889
  total_checks = 0
1633
2890
 
1634
2891
  for line in lines:
1635
- if "ERROR:" in line.upper():
2892
+ line_upper = line.upper()
2893
+ # Match both old format "ERROR:" and new format "[ERROR]"
2894
+ if "ERROR:" in line_upper or "[ERROR]" in line_upper:
1636
2895
  errors += 1
1637
- elif "WARNING:" in line.upper():
2896
+ elif "WARNING:" in line_upper or "[WARNING]" in line_upper:
1638
2897
  warnings += 1
1639
- elif "PASSED:" in line.upper() or "✓" in line:
2898
+ # Match explicit passed check or INFO level (which we use for success now)
2899
+ elif (
2900
+ "PASSED:" in line_upper
2901
+ or "PASSED" in line_upper
2902
+ or "✓" in line
2903
+ or "[INFO]" in line_upper
2904
+ ):
1640
2905
  passed += 1
1641
2906
  if "Total checks:" in line.lower():
1642
2907
  with contextlib.suppress(ValueError):
1643
2908
  total_checks = int(line.split(":")[1].strip())
1644
2909
 
2910
+ # If we found errors/warnings but no explicit "checks" count (legacy log parsing),
2911
+ # infer total checks from line items
2912
+ if total_checks == 0 and (errors > 0 or warnings > 0 or passed > 0):
2913
+ total_checks = errors + warnings + passed
2914
+
1645
2915
  return errors, warnings, passed, total_checks
1646
2916
 
1647
2917
 
1648
- def _display_validation_summary_metrics(errors, warnings, passed, total_checks):
2918
+ def _display_validation_summary_metrics(
2919
+ errors: int, warnings: int, passed: int, total_checks: int
2920
+ ) -> None:
1649
2921
  """Display validation summary metrics."""
1650
2922
  col1, col2, col3, col4 = st.columns(4)
1651
2923
 
@@ -1653,28 +2925,28 @@ def _display_validation_summary_metrics(errors, warnings, passed, total_checks):
1653
2925
  st.metric("Total Checks", total_checks)
1654
2926
 
1655
2927
  with col2:
1656
- st.metric("Passed", passed, delta="" if passed > 0 else "")
2928
+ st.metric("Passed", passed, delta="Pass" if passed > 0 else "")
1657
2929
 
1658
2930
  with col3:
1659
- st.metric("Warnings", warnings, delta="⚠️" if warnings > 0 else "")
2931
+ st.metric("Warnings", warnings, delta="Warning" if warnings > 0 else "")
1660
2932
 
1661
2933
  with col4:
1662
- st.metric("Errors", errors, delta="" if errors > 0 else "")
2934
+ st.metric("Errors", errors, delta="Error" if errors > 0 else "")
1663
2935
 
1664
2936
 
1665
- def _display_validation_status(errors, warnings):
2937
+ def _display_validation_status(errors: int, warnings: int) -> None:
1666
2938
  """Display overall validation status."""
1667
2939
  if errors > 0:
1668
- st.error("**Validation Failed**: Critical issues found that need attention.")
2940
+ st.error("**Validation Failed**: Critical issues found that need attention.")
1669
2941
  elif warnings > 0:
1670
2942
  st.warning(
1671
- "⚠️ **Validation Passed with Warnings**: Review warnings before proceeding."
2943
+ "**Validation Passed with Warnings**: Review warnings before proceeding."
1672
2944
  )
1673
2945
  else:
1674
- st.success("**Validation Passed**: All checks successful!")
2946
+ st.success("**Validation Passed**: All checks successful!")
1675
2947
 
1676
2948
 
1677
- def _display_validation_sections(validation_result):
2949
+ def _display_validation_sections(validation_result: str) -> None:
1678
2950
  """Display validation results in expandable sections."""
1679
2951
  # Split results into sections
1680
2952
  sections = validation_result.split("\n## ")
@@ -1686,28 +2958,28 @@ def _display_validation_sections(validation_result):
1686
2958
 
1687
2959
  # Add expanders for different sections
1688
2960
  if "Syntax Validation" in section:
1689
- with st.expander("🔍 Syntax Validation"):
2961
+ with st.expander("Syntax Validation"):
1690
2962
  st.markdown(section.replace("## Syntax Validation", ""))
1691
2963
  elif "Logic Validation" in section:
1692
- with st.expander("🧠 Logic Validation"):
2964
+ with st.expander("Logic Validation"):
1693
2965
  st.markdown(section.replace("## Logic Validation", ""))
1694
2966
  elif "Security Validation" in section:
1695
- with st.expander("🔒 Security Validation"):
2967
+ with st.expander("Security Validation"):
1696
2968
  st.markdown(section.replace("## Security Validation", ""))
1697
2969
  elif "Performance Validation" in section:
1698
- with st.expander("Performance Validation"):
2970
+ with st.expander("Performance Validation"):
1699
2971
  st.markdown(section.replace("## Performance Validation", ""))
1700
- elif "Best Practices" in section:
1701
- with st.expander("📋 Best Practices"):
1702
- st.markdown(section.replace("## Best Practices", ""))
2972
+ elif SCOPE_BEST_PRACTICES in section:
2973
+ with st.expander(f"{SCOPE_BEST_PRACTICES}"):
2974
+ st.markdown(section.replace(f"## {SCOPE_BEST_PRACTICES}", ""))
1703
2975
  elif "Recommendations" in section:
1704
- with st.expander("💡 Recommendations"):
2976
+ with st.expander("Recommendations"):
1705
2977
  st.markdown(section.replace("## Recommendations", ""))
1706
2978
  else:
1707
2979
  st.markdown(section)
1708
2980
 
1709
2981
 
1710
- def _display_validation_action_items(errors, warnings):
2982
+ def _display_validation_action_items(errors: int, warnings: int) -> None:
1711
2983
  """Display action items based on validation results."""
1712
2984
  if errors > 0 or warnings > 0:
1713
2985
  st.subheader("Action Items")
@@ -1732,15 +3004,15 @@ def _display_validation_action_items(errors, warnings):
1732
3004
 
1733
3005
 
1734
3006
  def _display_validation_export_options(
1735
- validation_result,
1736
- input_path,
1737
- validation_type,
1738
- options,
1739
- errors,
1740
- warnings,
1741
- passed,
1742
- total_checks,
1743
- ):
3007
+ validation_result: str,
3008
+ input_path: str,
3009
+ validation_type: str,
3010
+ options: Mapping[str, Any],
3011
+ errors: int,
3012
+ warnings: int,
3013
+ passed: int,
3014
+ total_checks: int,
3015
+ ) -> None:
1744
3016
  """Display export options for validation results."""
1745
3017
  st.subheader("Export Report")
1746
3018
 
@@ -1748,7 +3020,7 @@ def _display_validation_export_options(
1748
3020
 
1749
3021
  with col1:
1750
3022
  st.download_button(
1751
- label="📥 Download Full Report",
3023
+ label="Download Full Report",
1752
3024
  data=validation_result,
1753
3025
  file_name="validation_report.md",
1754
3026
  mime=MIME_TEXT_MARKDOWN,
@@ -1780,7 +3052,7 @@ def _display_validation_export_options(
1780
3052
  import json
1781
3053
 
1782
3054
  st.download_button(
1783
- label="📊 Download JSON Summary",
3055
+ label="Download JSON Summary",
1784
3056
  data=json.dumps(report_json, indent=2),
1785
3057
  file_name="validation_report.json",
1786
3058
  mime=MIME_APPLICATION_JSON,
@@ -1788,7 +3060,7 @@ def _display_validation_export_options(
1788
3060
  )
1789
3061
 
1790
3062
 
1791
- def display_validation_results():
3063
+ def display_validation_results() -> None:
1792
3064
  """Display validation results."""
1793
3065
  validation_result = st.session_state.validation_result
1794
3066
  input_path = st.session_state.validation_path
@@ -1810,7 +3082,8 @@ def display_validation_results():
1810
3082
  _display_validation_status(errors, warnings)
1811
3083
 
1812
3084
  # Validation details
1813
- st.info(f"Validation type: **{validation_type}** | Path: `{input_path}`")
3085
+ validation_msg = f"Validation type: **{validation_type}** | Path: `{input_path}`"
3086
+ st.info(validation_msg)
1814
3087
 
1815
3088
  # Display validation results
1816
3089
  st.subheader("Validation Details")
@@ -1833,5 +3106,11 @@ def display_validation_results():
1833
3106
  )
1834
3107
 
1835
3108
 
3109
+ # UI code only when running under Streamlit
3110
+ if not os.environ.get("STREAMLIT_SERVER_PORT") and not os.environ.get(
3111
+ "STREAMLIT_SERVER_HEADLESS"
3112
+ ):
3113
+ main()
3114
+
1836
3115
  if __name__ == "__main__":
1837
3116
  main()