mcp-souschef 3.0.0__py3-none-any.whl → 3.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/ui/app.py CHANGED
@@ -9,13 +9,7 @@ if str(app_path) not in sys.path:
9
9
  import contextlib
10
10
  import os
11
11
  from collections.abc import Callable, Iterable, Mapping, Sequence
12
- from typing import (
13
- TYPE_CHECKING,
14
- Any,
15
- Concatenate,
16
- ParamSpec,
17
- TypeVar,
18
- )
12
+ from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar
19
13
 
20
14
  import streamlit as st
21
15
 
@@ -27,7 +21,10 @@ if TYPE_CHECKING:
27
21
  P = ParamSpec("P")
28
22
  R = TypeVar("R")
29
23
 
24
+ from souschef.core import _ensure_within_base_path, _normalize_path
25
+ from souschef.core.path_utils import safe_exists, safe_glob, safe_is_dir, safe_is_file
30
26
  from souschef.ui.pages.ai_settings import show_ai_settings_page
27
+ from souschef.ui.pages.chef_server_settings import show_chef_server_settings_page
31
28
  from souschef.ui.pages.cookbook_analysis import show_cookbook_analysis_page
32
29
 
33
30
  # Constants
@@ -39,6 +36,7 @@ NAV_MIGRATION_PLANNING = "Migration Planning"
39
36
  NAV_DEPENDENCY_MAPPING = "Dependency Mapping"
40
37
  NAV_VALIDATION_REPORTS = "Validation Reports"
41
38
  NAV_AI_SETTINGS = "AI Settings"
39
+ NAV_CHEF_SERVER_SETTINGS = "Chef Server Settings"
42
40
  NAV_COOKBOOK_ANALYSIS = "Cookbook Analysis"
43
41
  BUTTON_ANALYSE_DEPENDENCIES = "Analyse Dependencies"
44
42
  INPUT_METHOD_DIRECTORY_PATH = "Directory Path"
@@ -129,13 +127,14 @@ def main() -> None:
129
127
  # Navigation section
130
128
  st.subheader("Navigation")
131
129
 
132
- col1, col2, col3, col4, col5 = st.columns(5)
130
+ col1, col2, col3 = st.columns(3)
131
+ col4, col5, col6 = st.columns(3)
133
132
 
134
133
  with col1:
135
134
  if st.button(
136
135
  "Cookbook Analysis",
137
136
  type="primary" if page == NAV_COOKBOOK_ANALYSIS else "secondary",
138
- width="stretch",
137
+ use_container_width=True,
139
138
  key="nav_cookbook_analysis",
140
139
  ):
141
140
  st.session_state.current_page = NAV_COOKBOOK_ANALYSIS
@@ -145,7 +144,7 @@ def main() -> None:
145
144
  if st.button(
146
145
  "Migration Planning",
147
146
  type="primary" if page == NAV_MIGRATION_PLANNING else "secondary",
148
- width="stretch",
147
+ use_container_width=True,
149
148
  key="nav_migration_planning",
150
149
  ):
151
150
  st.session_state.current_page = NAV_MIGRATION_PLANNING
@@ -155,7 +154,7 @@ def main() -> None:
155
154
  if st.button(
156
155
  "Dependency Mapping",
157
156
  type="primary" if page == NAV_DEPENDENCY_MAPPING else "secondary",
158
- width="stretch",
157
+ use_container_width=True,
159
158
  key="nav_dependency_mapping",
160
159
  ):
161
160
  st.session_state.current_page = NAV_DEPENDENCY_MAPPING
@@ -165,7 +164,7 @@ def main() -> None:
165
164
  if st.button(
166
165
  "Validation Reports",
167
166
  type="primary" if page == NAV_VALIDATION_REPORTS else "secondary",
168
- width="stretch",
167
+ use_container_width=True,
169
168
  key="nav_validation_reports",
170
169
  ):
171
170
  st.session_state.current_page = NAV_VALIDATION_REPORTS
@@ -175,12 +174,22 @@ def main() -> None:
175
174
  if st.button(
176
175
  "AI Settings",
177
176
  type="primary" if page == NAV_AI_SETTINGS else "secondary",
178
- width="stretch",
177
+ use_container_width=True,
179
178
  key="nav_ai_settings",
180
179
  ):
181
180
  st.session_state.current_page = NAV_AI_SETTINGS
182
181
  st.rerun()
183
182
 
183
+ with col6:
184
+ if st.button(
185
+ "Chef Server",
186
+ type="primary" if page == NAV_CHEF_SERVER_SETTINGS else "secondary",
187
+ use_container_width=True,
188
+ key="nav_chef_server_settings",
189
+ ):
190
+ st.session_state.current_page = NAV_CHEF_SERVER_SETTINGS
191
+ st.rerun()
192
+
184
193
  st.divider()
185
194
 
186
195
  # Page routing
@@ -196,6 +205,7 @@ def _route_to_page(page: str) -> None:
196
205
  NAV_DEPENDENCY_MAPPING: show_dependency_mapping,
197
206
  NAV_VALIDATION_REPORTS: show_validation_reports,
198
207
  NAV_AI_SETTINGS: show_ai_settings_page,
208
+ NAV_CHEF_SERVER_SETTINGS: show_chef_server_settings_page,
199
209
  }
200
210
 
201
211
  route_func = page_routes.get(page)
@@ -2550,28 +2560,31 @@ def _collect_files_to_validate(input_path: str) -> list[Path]:
2550
2560
  # Error already reported by _normalize_and_validate_input_path
2551
2561
  return []
2552
2562
 
2553
- path_obj = validated_path
2554
- files_to_validate = []
2563
+ # Path is normalized and validated to be within app root
2564
+ path_obj: Path = validated_path
2565
+ files_to_validate: list[Path] = []
2555
2566
 
2556
- if not path_obj.exists():
2567
+ # Check if path exists using safe function
2568
+ if not safe_exists(path_obj, Path.cwd()):
2557
2569
  st.error(f"Path does not exist: {path_obj}")
2558
2570
  return []
2559
2571
 
2560
- if path_obj.is_file():
2572
+ # Determine if it's a file or directory
2573
+ if safe_is_file(path_obj, Path.cwd()):
2561
2574
  if path_obj.suffix in [".yml", ".yaml"] and path_obj.name not in [
2562
2575
  ".kitchen.yml",
2563
2576
  "kitchen.yml",
2564
2577
  "docker-compose.yml",
2565
2578
  ]:
2566
2579
  files_to_validate.append(path_obj)
2567
- elif path_obj.is_dir():
2580
+ elif safe_is_dir(path_obj, Path.cwd()):
2568
2581
  # Filter out obvious non-playbook files
2569
2582
  excluded_files = {".kitchen.yml", "kitchen.yml", "docker-compose.yml"}
2570
2583
 
2571
- yml_files = list(path_obj.glob("**/*.yml"))
2572
- yaml_files = list(path_obj.glob("**/*.yaml"))
2584
+ yml_files: list[Path] = safe_glob(path_obj, "**/*.yml", Path.cwd())
2585
+ yaml_files: list[Path] = safe_glob(path_obj, "**/*.yaml", Path.cwd())
2573
2586
 
2574
- raw_files = yml_files + yaml_files
2587
+ raw_files: list[Path] = yml_files + yaml_files
2575
2588
  files_to_validate.extend([f for f in raw_files if f.name not in excluded_files])
2576
2589
 
2577
2590
  return files_to_validate
@@ -2743,22 +2756,14 @@ def _normalize_and_validate_input_path(input_path: str) -> Path | None:
2743
2756
  return None
2744
2757
 
2745
2758
  try:
2746
- # Expand user home and resolve to an absolute, normalized path
2747
- path_obj = Path(raw).expanduser().resolve()
2748
- except Exception:
2749
- st.error(f"Invalid path: {raw}")
2750
- return None
2751
-
2752
- # Optional safety: constrain to the application root directory
2753
- try:
2759
+ path_obj = _normalize_path(raw)
2754
2760
  app_root = Path(app_path).resolve()
2755
- path_obj.relative_to(app_root)
2756
- except Exception:
2757
- st.error("Path must be within the SousChef project directory.")
2761
+ # Use centralised containment validation
2762
+ return _ensure_within_base_path(path_obj, app_root)
2763
+ except (ValueError, OSError) as e:
2764
+ st.error(f"Invalid path: {e}")
2758
2765
  return None
2759
2766
 
2760
- return path_obj
2761
-
2762
2767
 
2763
2768
  def _handle_validation_execution(input_path: str, options: Mapping[str, Any]) -> None:
2764
2769
  """Execute the validation process with progress tracking."""
@@ -2777,8 +2782,11 @@ def _handle_validation_execution(input_path: str, options: Mapping[str, Any]) ->
2777
2782
  # Error is handled inside _collect_files_to_validate
2778
2783
  # if path doesn't exist or is invalid
2779
2784
  validated_path = _normalize_and_validate_input_path(input_path)
2780
- if validated_path is not None and validated_path.exists():
2781
- st.warning(f"No YAML files found in {validated_path}")
2785
+ if validated_path is not None:
2786
+ # Check if the validated path exists
2787
+ path_exists: bool = safe_exists(validated_path, Path.cwd())
2788
+ if path_exists:
2789
+ st.warning(f"No YAML files found in {validated_path}")
2782
2790
  return
2783
2791
 
2784
2792
  progress_tracker.update(3, f"Validating {len(files_to_validate)} files...")
@@ -8,10 +8,11 @@ import json
8
8
  import os
9
9
  from pathlib import Path
10
10
  from typing import Any
11
- from urllib.parse import urlparse, urlunparse
12
11
 
13
12
  import streamlit as st
14
13
 
14
+ from souschef.core.url_validation import validate_user_provided_url
15
+
15
16
  # AI Provider Constants
16
17
  ANTHROPIC_PROVIDER = "Anthropic (Claude)"
17
18
  OPENAI_PROVIDER = "OpenAI (GPT)"
@@ -21,6 +22,7 @@ LOCAL_PROVIDER = "Local Model"
21
22
 
22
23
  # UI Constants
23
24
  API_KEY_LABEL = "API Key"
25
+ REQUESTS_NOT_INSTALLED_MESSAGE = "requests library not installed"
24
26
 
25
27
  # Import AI libraries (optional dependencies)
26
28
  try:
@@ -34,9 +36,9 @@ except ImportError:
34
36
  APIClient = None
35
37
 
36
38
  try:
37
- import requests # type: ignore[import-untyped]
39
+ import requests
38
40
  except ImportError:
39
- requests = None
41
+ requests = None # type: ignore[assignment]
40
42
 
41
43
  try:
42
44
  import openai
@@ -70,8 +72,26 @@ def _get_model_options(provider):
70
72
  def _render_api_configuration(provider):
71
73
  """Render API configuration UI and return config values."""
72
74
  if provider == LOCAL_PROVIDER:
73
- st.info("Local model configuration will be added in a future update.")
74
- return "", "", ""
75
+ col1, col2 = st.columns(2)
76
+ with col1:
77
+ base_url = st.text_input(
78
+ "Local Server URL",
79
+ help=(
80
+ "HTTPS URL of your local model server (Ollama, llama.cpp, vLLM, "
81
+ "etc.). Add non-public hosts to SOUSCHEF_ALLOWED_HOSTNAMES."
82
+ ),
83
+ key="base_url_input",
84
+ placeholder="https://localhost:11434 (for Ollama)",
85
+ value="https://localhost:11434",
86
+ )
87
+ with col2:
88
+ model = st.text_input(
89
+ "Model Name",
90
+ help="Name of the model to use from your local server",
91
+ key="model_input",
92
+ placeholder="e.g., llama2, mistral, neural-chat",
93
+ )
94
+ return model, base_url, ""
75
95
  elif provider == WATSON_PROVIDER:
76
96
  col1, col2, col3 = st.columns(3)
77
97
  with col1:
@@ -258,7 +278,7 @@ def show_ai_settings_page():
258
278
 
259
279
  def validate_ai_configuration(provider, api_key, model, base_url="", project_id=""):
260
280
  """Validate the AI configuration by making a test API call."""
261
- if not api_key and provider != "Local Model":
281
+ if not api_key and provider != LOCAL_PROVIDER:
262
282
  st.error("API key is required for validation.")
263
283
  return
264
284
 
@@ -276,8 +296,10 @@ def validate_ai_configuration(provider, api_key, model, base_url="", project_id=
276
296
  success, message = validate_watson_config(api_key, project_id, base_url)
277
297
  elif provider == LIGHTSPEED_PROVIDER:
278
298
  success, message = validate_lightspeed_config(api_key, model, base_url)
299
+ elif provider == LOCAL_PROVIDER:
300
+ success, message = validate_local_model_config(base_url, model)
279
301
  else:
280
- st.info("Local model validation not implemented yet.")
302
+ st.error("Unknown provider selected.")
281
303
  return
282
304
 
283
305
  if success:
@@ -299,29 +321,116 @@ def _sanitize_lightspeed_base_url(base_url: str) -> str:
299
321
  - Strip any user-supplied path, query, or fragment.
300
322
  """
301
323
  default_url = "https://api.redhat.com"
302
- allowed_hosts = {
303
- "api.redhat.com",
304
- }
324
+ allowed_hosts = {"api.redhat.com"}
325
+
326
+ return validate_user_provided_url(
327
+ base_url,
328
+ default_url=default_url,
329
+ allowed_hosts=allowed_hosts,
330
+ strip_path=True,
331
+ )
332
+
333
+
334
+ def _check_ollama_server(base_url: str, model: str) -> tuple[bool, str]:
335
+ """Check Ollama server availability and models."""
336
+ if requests is None:
337
+ return False, REQUESTS_NOT_INSTALLED_MESSAGE
338
+
339
+ response = requests.get(f"{base_url}/api/tags", timeout=5)
340
+ if response.status_code == 200:
341
+ models_data = response.json()
342
+ available = [m.get("name", "") for m in models_data.get("models", [])]
343
+
344
+ if model and model in available:
345
+ return True, f"Model '{model}' found on Ollama server"
346
+ elif available:
347
+ models_str = ", ".join(available[:3])
348
+ return True, f"Ollama server with models: {models_str}"
349
+ else:
350
+ return False, "Ollama server found but no models available"
351
+ return False, "Ollama API not responding"
352
+
353
+
354
+ def _check_openai_compatible_server(base_url: str, model: str) -> tuple[bool, str]:
355
+ """Check OpenAI-compatible server availability and models."""
356
+ if requests is None:
357
+ return False, REQUESTS_NOT_INSTALLED_MESSAGE
358
+
359
+ response = requests.get(f"{base_url}/v1/models", timeout=5)
360
+ if response.status_code == 200:
361
+ models_data = response.json()
362
+ available = [m.get("id", "") for m in models_data.get("data", [])]
363
+
364
+ if model and model in available:
365
+ return True, f"Model '{model}' found on server"
366
+ elif available:
367
+ return True, f"OpenAI-compatible server running at {base_url}"
368
+ else:
369
+ return False, "Server found but no models available"
370
+ return False, "OpenAI API not responding"
371
+
305
372
 
306
- if not base_url or not str(base_url).strip():
307
- return default_url
373
+ def validate_local_model_config(base_url="", model=""):
374
+ """
375
+ Validate local model server configuration.
308
376
 
309
- parsed = urlparse(base_url)
377
+ Supports multiple local model servers:
378
+ - Ollama (default: https://localhost:11434)
379
+ - llama.cpp server (default: https://localhost:8000)
380
+ - vLLM (default: https://localhost:8000)
381
+ - LM Studio (default: https://localhost:1234)
310
382
 
311
- # If scheme is missing, assume https
312
- if not parsed.scheme:
313
- parsed = parsed._replace(scheme="https")
383
+ Args:
384
+ base_url: Base URL of local model server
385
+ model: Model name to check availability
314
386
 
315
- if parsed.scheme.lower() != "https":
316
- raise ValueError("Base URL must use HTTPS.")
387
+ Returns:
388
+ Tuple of (success: bool, message: str)
317
389
 
318
- hostname = (parsed.hostname or "").lower()
319
- if hostname not in allowed_hosts:
320
- raise ValueError("Base URL host must be a supported Red Hat domain.")
390
+ """
391
+ if requests is None:
392
+ return False, REQUESTS_NOT_INSTALLED_MESSAGE
321
393
 
322
- # Normalize to scheme + netloc only; drop path/query/fragment.
323
- cleaned = parsed._replace(path="", params="", query="", fragment="")
324
- return urlunparse(cleaned)
394
+ # Default to Ollama if no URL provided
395
+ if not base_url:
396
+ base_url = "https://localhost:11434"
397
+
398
+ try:
399
+ base_url = validate_user_provided_url(base_url)
400
+ except ValueError as exc:
401
+ return False, f"Invalid local model server URL: {exc}"
402
+
403
+ base_url = base_url.rstrip("/")
404
+
405
+ try:
406
+ # Try Ollama API first
407
+ success, message = _check_ollama_server(base_url, model)
408
+ if success or "Ollama server" in message:
409
+ return success, f"{message} at {base_url}"
410
+
411
+ # Try OpenAI-compatible API
412
+ success, message = _check_openai_compatible_server(base_url, model)
413
+ if success:
414
+ return success, message
415
+
416
+ # If neither endpoint works, server might not be running
417
+ return False, (
418
+ f"Cannot connect to local model server at {base_url}. "
419
+ "Make sure it's running."
420
+ )
421
+
422
+ except requests.exceptions.Timeout:
423
+ return (
424
+ False,
425
+ f"Connection timed out. Is server running at {base_url}?",
426
+ )
427
+ except requests.exceptions.ConnectionError:
428
+ return (
429
+ False,
430
+ f"Cannot reach {base_url}. Ensure local model server is running.",
431
+ )
432
+ except Exception as e:
433
+ return False, f"Error validating local model server: {e}"
325
434
 
326
435
 
327
436
  def validate_anthropic_config(api_key, model):
@@ -351,7 +460,12 @@ def validate_openai_config(api_key, model, base_url=""):
351
460
  try:
352
461
  client_kwargs = {"api_key": api_key}
353
462
  if base_url:
354
- client_kwargs["base_url"] = base_url
463
+ try:
464
+ validated_url = validate_user_provided_url(base_url)
465
+ except ValueError as exc:
466
+ return False, f"Invalid base URL: {exc}"
467
+
468
+ client_kwargs["base_url"] = validated_url
355
469
 
356
470
  client = openai.OpenAI(**client_kwargs)
357
471
 
@@ -415,11 +529,18 @@ def validate_watson_config(api_key, project_id, base_url=""):
415
529
  )
416
530
 
417
531
  try:
532
+ validated_url = "https://us-south.ml.cloud.ibm.com"
533
+ if base_url:
534
+ try:
535
+ validated_url = validate_user_provided_url(base_url)
536
+ except ValueError as exc:
537
+ return False, f"Invalid base URL: {exc}"
538
+
418
539
  # Initialize Watsonx client
419
540
  client = APIClient(
420
541
  api_key=api_key,
421
542
  project_id=project_id,
422
- url=base_url or "https://us-south.ml.cloud.ibm.com",
543
+ url=validated_url,
423
544
  )
424
545
 
425
546
  # Test connection by listing available models
@@ -441,9 +562,9 @@ def save_ai_settings(
441
562
  ):
442
563
  """Save AI settings to configuration file."""
443
564
  try:
444
- # Use /tmp/.souschef for container compatibility (tmpfs is writable)
445
- config_dir = Path("/tmp/.souschef")
446
- config_dir.mkdir(exist_ok=True)
565
+ # Use user-specific directory with secure permissions
566
+ config_dir = Path.home() / ".souschef"
567
+ config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
447
568
  config_file = config_dir / "ai_config.json"
448
569
 
449
570
  config = {
@@ -552,7 +673,7 @@ def _load_ai_settings_from_env() -> dict[str, str | float | int]:
552
673
  def _load_ai_settings_from_file() -> dict[str, Any]:
553
674
  """Load AI settings from configuration file."""
554
675
  try:
555
- config_file = Path("/tmp/.souschef/ai_config.json")
676
+ config_file = Path.home() / ".souschef" / "ai_config.json"
556
677
  if config_file.exists():
557
678
  with config_file.open() as f:
558
679
  result = json.load(f)