mcp-souschef 3.2.0__py3-none-any.whl → 3.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
souschef/server.py CHANGED
@@ -82,6 +82,7 @@ from souschef.converters.playbook import (
82
82
  from souschef.converters.playbook import (
83
83
  generate_dynamic_inventory_script as _generate_dynamic_inventory_script,
84
84
  )
85
+ from souschef.converters.playbook import get_chef_nodes as _get_chef_nodes
85
86
  from souschef.converters.resource import ( # noqa: F401, codeql[py/unused-import]
86
87
  _convert_chef_resource_to_ansible,
87
88
  _format_ansible_task,
@@ -104,6 +105,7 @@ from souschef.core.constants import ( # noqa: F401, codeql[py/unused-import]
104
105
 
105
106
  # Import core utilities
106
107
  from souschef.core.errors import format_error_with_context
108
+ from souschef.core.logging import configure_logging
107
109
  from souschef.core.path_utils import ( # noqa: F401, codeql[py/unused-import]
108
110
  _ensure_within_base_path,
109
111
  _normalize_path,
@@ -144,6 +146,10 @@ __all__ = [
144
146
  # Re-exports of deployment internal functions for backward compatibility (tests)
145
147
  # Public re-exports of deployment functions for test backward compatibility
146
148
  # Note: MCP tool wrappers exist for some of these, but tests import directly
149
+ # Import converters.template functions
150
+ from souschef.converters.template import (
151
+ convert_template_with_ai as _convert_template_with_ai,
152
+ )
147
153
  from souschef.deployment import ( # noqa: F401, codeql[py/unused-import]
148
154
  _analyse_cookbook_for_awx,
149
155
  _analyse_cookbooks_directory,
@@ -256,6 +262,11 @@ from souschef.parsers.template import ( # noqa: F401, codeql[py/unused-import]
256
262
  )
257
263
  from souschef.parsers.template import parse_template as _parse_template
258
264
 
265
+ # Import UI helper functions for MCP exposure
266
+ from souschef.ui.pages.chef_server_settings import (
267
+ _validate_chef_server_connection,
268
+ )
269
+
259
270
  # Backward compatibility re-exports without underscore prefix (for tests)
260
271
  # noinspection PyUnusedLocal
261
272
  convert_chef_deployment_to_ansible_strategy = ( # noqa: F401
@@ -2543,6 +2554,124 @@ def validate_conversion(
2543
2554
  return _validate_conversion(conversion_type, result_content, output_format)
2544
2555
 
2545
2556
 
2557
+ # Chef Server Integration Tools
2558
+
2559
+
2560
+ @mcp.tool()
2561
+ def validate_chef_server_connection(
2562
+ server_url: str,
2563
+ node_name: str,
2564
+ ) -> str:
2565
+ """
2566
+ Validate Chef Server connectivity and configuration.
2567
+
2568
+ Tests the Chef Server REST API connection to ensure the server is
2569
+ reachable and properly configured.
2570
+
2571
+ Args:
2572
+ server_url: Base URL of the Chef Server (e.g., https://chef.example.com).
2573
+ node_name: Chef node name for authentication.
2574
+
2575
+ Returns:
2576
+ Success/failure message indicating the connection status.
2577
+
2578
+ """
2579
+ try:
2580
+ success, message = _validate_chef_server_connection(server_url, node_name)
2581
+ result = "✅ Success" if success else "❌ Failed"
2582
+ return f"{result}: {message}"
2583
+ except Exception as e:
2584
+ return f"❌ Error validating Chef Server connection: {e}"
2585
+
2586
+
2587
+ @mcp.tool()
2588
+ def get_chef_nodes(search_query: str = "*:*") -> str:
2589
+ """
2590
+ Query Chef Server for nodes matching search criteria.
2591
+
2592
+ Retrieves nodes from Chef Server that match the provided search query,
2593
+ extracting role assignments, environment, platform, and IP address
2594
+ information for dynamic inventory generation.
2595
+
2596
+ Args:
2597
+ search_query: Chef search query (default: '*:*' for all nodes).
2598
+
2599
+ Returns:
2600
+ JSON string containing list of matching nodes with their attributes.
2601
+
2602
+ """
2603
+ try:
2604
+ nodes = _get_chef_nodes(search_query)
2605
+ if not nodes:
2606
+ return json.dumps(
2607
+ {
2608
+ "status": "no_nodes",
2609
+ "message": "No nodes found matching the search query",
2610
+ "nodes": [],
2611
+ }
2612
+ )
2613
+ return json.dumps(
2614
+ {
2615
+ "status": "success",
2616
+ "count": len(nodes),
2617
+ "nodes": nodes,
2618
+ }
2619
+ )
2620
+ except Exception as e:
2621
+ return json.dumps(
2622
+ {
2623
+ "status": "error",
2624
+ "message": f"Error querying Chef Server: {str(e)}",
2625
+ "nodes": [],
2626
+ }
2627
+ )
2628
+
2629
+
2630
+ # Template Conversion Tools
2631
+
2632
+
2633
+ @mcp.tool()
2634
+ def convert_template_with_ai(
2635
+ erb_path: str,
2636
+ use_ai_enhancement: bool = True,
2637
+ ) -> str:
2638
+ """
2639
+ Convert an ERB template to Jinja2 with optional AI assistance.
2640
+
2641
+ Converts Chef ERB templates to Ansible Jinja2 format with optional
2642
+ AI-based validation and improvement for complex Ruby logic that cannot
2643
+ be automatically converted.
2644
+
2645
+ Args:
2646
+ erb_path: Path to the ERB template file.
2647
+ use_ai_enhancement: Whether to use AI for validation (default: True).
2648
+
2649
+ Returns:
2650
+ JSON string with conversion results including success status,
2651
+ Jinja2 output, warnings, and conversion method used.
2652
+
2653
+ """
2654
+ try:
2655
+ if use_ai_enhancement:
2656
+ result = _convert_template_with_ai(erb_path, ai_service=None)
2657
+ else:
2658
+ # Fall back to rule-based conversion
2659
+ from souschef.converters.template import convert_template_file
2660
+
2661
+ result = convert_template_file(erb_path)
2662
+ result["conversion_method"] = "rule-based"
2663
+ return json.dumps(result, indent=2)
2664
+ except Exception as e:
2665
+ return json.dumps(
2666
+ {
2667
+ "success": False,
2668
+ "error": f"Error converting template: {str(e)}",
2669
+ "template": erb_path,
2670
+ "jinja2_output": "",
2671
+ }
2672
+ )
2673
+
2674
+
2546
2675
  # Habitat Parsing Tool
2547
2676
 
2548
2677
 
@@ -3734,6 +3863,7 @@ def main() -> None:
3734
3863
 
3735
3864
  This is the main entry point for running the server.
3736
3865
  """
3866
+ configure_logging()
3737
3867
  mcp.run()
3738
3868
 
3739
3869
 
souschef/ui/app.py CHANGED
@@ -24,6 +24,7 @@ R = TypeVar("R")
24
24
  from souschef.core import _ensure_within_base_path, _normalize_path
25
25
  from souschef.core.path_utils import safe_exists, safe_glob, safe_is_dir, safe_is_file
26
26
  from souschef.ui.pages.ai_settings import show_ai_settings_page
27
+ from souschef.ui.pages.chef_server_settings import show_chef_server_settings_page
27
28
  from souschef.ui.pages.cookbook_analysis import show_cookbook_analysis_page
28
29
 
29
30
  # Constants
@@ -35,6 +36,7 @@ NAV_MIGRATION_PLANNING = "Migration Planning"
35
36
  NAV_DEPENDENCY_MAPPING = "Dependency Mapping"
36
37
  NAV_VALIDATION_REPORTS = "Validation Reports"
37
38
  NAV_AI_SETTINGS = "AI Settings"
39
+ NAV_CHEF_SERVER_SETTINGS = "Chef Server Settings"
38
40
  NAV_COOKBOOK_ANALYSIS = "Cookbook Analysis"
39
41
  BUTTON_ANALYSE_DEPENDENCIES = "Analyse Dependencies"
40
42
  INPUT_METHOD_DIRECTORY_PATH = "Directory Path"
@@ -125,13 +127,14 @@ def main() -> None:
125
127
  # Navigation section
126
128
  st.subheader("Navigation")
127
129
 
128
- col1, col2, col3, col4, col5 = st.columns(5)
130
+ col1, col2, col3 = st.columns(3)
131
+ col4, col5, col6 = st.columns(3)
129
132
 
130
133
  with col1:
131
134
  if st.button(
132
135
  "Cookbook Analysis",
133
136
  type="primary" if page == NAV_COOKBOOK_ANALYSIS else "secondary",
134
- width="stretch",
137
+ use_container_width=True,
135
138
  key="nav_cookbook_analysis",
136
139
  ):
137
140
  st.session_state.current_page = NAV_COOKBOOK_ANALYSIS
@@ -141,7 +144,7 @@ def main() -> None:
141
144
  if st.button(
142
145
  "Migration Planning",
143
146
  type="primary" if page == NAV_MIGRATION_PLANNING else "secondary",
144
- width="stretch",
147
+ use_container_width=True,
145
148
  key="nav_migration_planning",
146
149
  ):
147
150
  st.session_state.current_page = NAV_MIGRATION_PLANNING
@@ -151,7 +154,7 @@ def main() -> None:
151
154
  if st.button(
152
155
  "Dependency Mapping",
153
156
  type="primary" if page == NAV_DEPENDENCY_MAPPING else "secondary",
154
- width="stretch",
157
+ use_container_width=True,
155
158
  key="nav_dependency_mapping",
156
159
  ):
157
160
  st.session_state.current_page = NAV_DEPENDENCY_MAPPING
@@ -161,7 +164,7 @@ def main() -> None:
161
164
  if st.button(
162
165
  "Validation Reports",
163
166
  type="primary" if page == NAV_VALIDATION_REPORTS else "secondary",
164
- width="stretch",
167
+ use_container_width=True,
165
168
  key="nav_validation_reports",
166
169
  ):
167
170
  st.session_state.current_page = NAV_VALIDATION_REPORTS
@@ -171,12 +174,22 @@ def main() -> None:
171
174
  if st.button(
172
175
  "AI Settings",
173
176
  type="primary" if page == NAV_AI_SETTINGS else "secondary",
174
- width="stretch",
177
+ use_container_width=True,
175
178
  key="nav_ai_settings",
176
179
  ):
177
180
  st.session_state.current_page = NAV_AI_SETTINGS
178
181
  st.rerun()
179
182
 
183
+ with col6:
184
+ if st.button(
185
+ "Chef Server",
186
+ type="primary" if page == NAV_CHEF_SERVER_SETTINGS else "secondary",
187
+ use_container_width=True,
188
+ key="nav_chef_server_settings",
189
+ ):
190
+ st.session_state.current_page = NAV_CHEF_SERVER_SETTINGS
191
+ st.rerun()
192
+
180
193
  st.divider()
181
194
 
182
195
  # Page routing
@@ -192,6 +205,7 @@ def _route_to_page(page: str) -> None:
192
205
  NAV_DEPENDENCY_MAPPING: show_dependency_mapping,
193
206
  NAV_VALIDATION_REPORTS: show_validation_reports,
194
207
  NAV_AI_SETTINGS: show_ai_settings_page,
208
+ NAV_CHEF_SERVER_SETTINGS: show_chef_server_settings_page,
195
209
  }
196
210
 
197
211
  route_func = page_routes.get(page)
@@ -8,10 +8,11 @@ import json
8
8
  import os
9
9
  from pathlib import Path
10
10
  from typing import Any
11
- from urllib.parse import urlparse, urlunparse
12
11
 
13
12
  import streamlit as st
14
13
 
14
+ from souschef.core.url_validation import validate_user_provided_url
15
+
15
16
  # AI Provider Constants
16
17
  ANTHROPIC_PROVIDER = "Anthropic (Claude)"
17
18
  OPENAI_PROVIDER = "OpenAI (GPT)"
@@ -21,6 +22,7 @@ LOCAL_PROVIDER = "Local Model"
21
22
 
22
23
  # UI Constants
23
24
  API_KEY_LABEL = "API Key"
25
+ REQUESTS_NOT_INSTALLED_MESSAGE = "requests library not installed"
24
26
 
25
27
  # Import AI libraries (optional dependencies)
26
28
  try:
@@ -34,9 +36,9 @@ except ImportError:
34
36
  APIClient = None
35
37
 
36
38
  try:
37
- import requests # type: ignore[import-untyped]
39
+ import requests
38
40
  except ImportError:
39
- requests = None
41
+ requests = None # type: ignore[assignment]
40
42
 
41
43
  try:
42
44
  import openai
@@ -70,8 +72,26 @@ def _get_model_options(provider):
70
72
  def _render_api_configuration(provider):
71
73
  """Render API configuration UI and return config values."""
72
74
  if provider == LOCAL_PROVIDER:
73
- st.info("Local model configuration will be added in a future update.")
74
- return "", "", ""
75
+ col1, col2 = st.columns(2)
76
+ with col1:
77
+ base_url = st.text_input(
78
+ "Local Server URL",
79
+ help=(
80
+ "HTTPS URL of your local model server (Ollama, llama.cpp, vLLM, "
81
+ "etc.). Add non-public hosts to SOUSCHEF_ALLOWED_HOSTNAMES."
82
+ ),
83
+ key="base_url_input",
84
+ placeholder="https://localhost:11434 (for Ollama)",
85
+ value="https://localhost:11434",
86
+ )
87
+ with col2:
88
+ model = st.text_input(
89
+ "Model Name",
90
+ help="Name of the model to use from your local server",
91
+ key="model_input",
92
+ placeholder="e.g., llama2, mistral, neural-chat",
93
+ )
94
+ return model, base_url, ""
75
95
  elif provider == WATSON_PROVIDER:
76
96
  col1, col2, col3 = st.columns(3)
77
97
  with col1:
@@ -258,7 +278,7 @@ def show_ai_settings_page():
258
278
 
259
279
  def validate_ai_configuration(provider, api_key, model, base_url="", project_id=""):
260
280
  """Validate the AI configuration by making a test API call."""
261
- if not api_key and provider != "Local Model":
281
+ if not api_key and provider != LOCAL_PROVIDER:
262
282
  st.error("API key is required for validation.")
263
283
  return
264
284
 
@@ -276,8 +296,10 @@ def validate_ai_configuration(provider, api_key, model, base_url="", project_id=
276
296
  success, message = validate_watson_config(api_key, project_id, base_url)
277
297
  elif provider == LIGHTSPEED_PROVIDER:
278
298
  success, message = validate_lightspeed_config(api_key, model, base_url)
299
+ elif provider == LOCAL_PROVIDER:
300
+ success, message = validate_local_model_config(base_url, model)
279
301
  else:
280
- st.info("Local model validation not implemented yet.")
302
+ st.error("Unknown provider selected.")
281
303
  return
282
304
 
283
305
  if success:
@@ -299,29 +321,116 @@ def _sanitize_lightspeed_base_url(base_url: str) -> str:
299
321
  - Strip any user-supplied path, query, or fragment.
300
322
  """
301
323
  default_url = "https://api.redhat.com"
302
- allowed_hosts = {
303
- "api.redhat.com",
304
- }
324
+ allowed_hosts = {"api.redhat.com"}
325
+
326
+ return validate_user_provided_url(
327
+ base_url,
328
+ default_url=default_url,
329
+ allowed_hosts=allowed_hosts,
330
+ strip_path=True,
331
+ )
332
+
333
+
334
+ def _check_ollama_server(base_url: str, model: str) -> tuple[bool, str]:
335
+ """Check Ollama server availability and models."""
336
+ if requests is None:
337
+ return False, REQUESTS_NOT_INSTALLED_MESSAGE
338
+
339
+ response = requests.get(f"{base_url}/api/tags", timeout=5)
340
+ if response.status_code == 200:
341
+ models_data = response.json()
342
+ available = [m.get("name", "") for m in models_data.get("models", [])]
343
+
344
+ if model and model in available:
345
+ return True, f"Model '{model}' found on Ollama server"
346
+ elif available:
347
+ models_str = ", ".join(available[:3])
348
+ return True, f"Ollama server with models: {models_str}"
349
+ else:
350
+ return False, "Ollama server found but no models available"
351
+ return False, "Ollama API not responding"
352
+
353
+
354
+ def _check_openai_compatible_server(base_url: str, model: str) -> tuple[bool, str]:
355
+ """Check OpenAI-compatible server availability and models."""
356
+ if requests is None:
357
+ return False, REQUESTS_NOT_INSTALLED_MESSAGE
358
+
359
+ response = requests.get(f"{base_url}/v1/models", timeout=5)
360
+ if response.status_code == 200:
361
+ models_data = response.json()
362
+ available = [m.get("id", "") for m in models_data.get("data", [])]
363
+
364
+ if model and model in available:
365
+ return True, f"Model '{model}' found on server"
366
+ elif available:
367
+ return True, f"OpenAI-compatible server running at {base_url}"
368
+ else:
369
+ return False, "Server found but no models available"
370
+ return False, "OpenAI API not responding"
371
+
305
372
 
306
- if not base_url or not str(base_url).strip():
307
- return default_url
373
+ def validate_local_model_config(base_url="", model=""):
374
+ """
375
+ Validate local model server configuration.
308
376
 
309
- parsed = urlparse(base_url)
377
+ Supports multiple local model servers:
378
+ - Ollama (default: https://localhost:11434)
379
+ - llama.cpp server (default: https://localhost:8000)
380
+ - vLLM (default: https://localhost:8000)
381
+ - LM Studio (default: https://localhost:1234)
310
382
 
311
- # If scheme is missing, assume https
312
- if not parsed.scheme:
313
- parsed = parsed._replace(scheme="https")
383
+ Args:
384
+ base_url: Base URL of local model server
385
+ model: Model name to check availability
314
386
 
315
- if parsed.scheme.lower() != "https":
316
- raise ValueError("Base URL must use HTTPS.")
387
+ Returns:
388
+ Tuple of (success: bool, message: str)
317
389
 
318
- hostname = (parsed.hostname or "").lower()
319
- if hostname not in allowed_hosts:
320
- raise ValueError("Base URL host must be a supported Red Hat domain.")
390
+ """
391
+ if requests is None:
392
+ return False, REQUESTS_NOT_INSTALLED_MESSAGE
321
393
 
322
- # Normalize to scheme + netloc only; drop path/query/fragment.
323
- cleaned = parsed._replace(path="", params="", query="", fragment="")
324
- return urlunparse(cleaned)
394
+ # Default to Ollama if no URL provided
395
+ if not base_url:
396
+ base_url = "https://localhost:11434"
397
+
398
+ try:
399
+ base_url = validate_user_provided_url(base_url)
400
+ except ValueError as exc:
401
+ return False, f"Invalid local model server URL: {exc}"
402
+
403
+ base_url = base_url.rstrip("/")
404
+
405
+ try:
406
+ # Try Ollama API first
407
+ success, message = _check_ollama_server(base_url, model)
408
+ if success or "Ollama server" in message:
409
+ return success, f"{message} at {base_url}"
410
+
411
+ # Try OpenAI-compatible API
412
+ success, message = _check_openai_compatible_server(base_url, model)
413
+ if success:
414
+ return success, message
415
+
416
+ # If neither endpoint works, server might not be running
417
+ return False, (
418
+ f"Cannot connect to local model server at {base_url}. "
419
+ "Make sure it's running."
420
+ )
421
+
422
+ except requests.exceptions.Timeout:
423
+ return (
424
+ False,
425
+ f"Connection timed out. Is server running at {base_url}?",
426
+ )
427
+ except requests.exceptions.ConnectionError:
428
+ return (
429
+ False,
430
+ f"Cannot reach {base_url}. Ensure local model server is running.",
431
+ )
432
+ except Exception as e:
433
+ return False, f"Error validating local model server: {e}"
325
434
 
326
435
 
327
436
  def validate_anthropic_config(api_key, model):
@@ -351,7 +460,12 @@ def validate_openai_config(api_key, model, base_url=""):
351
460
  try:
352
461
  client_kwargs = {"api_key": api_key}
353
462
  if base_url:
354
- client_kwargs["base_url"] = base_url
463
+ try:
464
+ validated_url = validate_user_provided_url(base_url)
465
+ except ValueError as exc:
466
+ return False, f"Invalid base URL: {exc}"
467
+
468
+ client_kwargs["base_url"] = validated_url
355
469
 
356
470
  client = openai.OpenAI(**client_kwargs)
357
471
 
@@ -415,11 +529,18 @@ def validate_watson_config(api_key, project_id, base_url=""):
415
529
  )
416
530
 
417
531
  try:
532
+ validated_url = "https://us-south.ml.cloud.ibm.com"
533
+ if base_url:
534
+ try:
535
+ validated_url = validate_user_provided_url(base_url)
536
+ except ValueError as exc:
537
+ return False, f"Invalid base URL: {exc}"
538
+
418
539
  # Initialize Watsonx client
419
540
  client = APIClient(
420
541
  api_key=api_key,
421
542
  project_id=project_id,
422
- url=base_url or "https://us-south.ml.cloud.ibm.com",
543
+ url=validated_url,
423
544
  )
424
545
 
425
546
  # Test connection by listing available models
@@ -441,9 +562,9 @@ def save_ai_settings(
441
562
  ):
442
563
  """Save AI settings to configuration file."""
443
564
  try:
444
- # Use /tmp/.souschef for container compatibility (tmpfs is writable)
445
- config_dir = Path("/tmp/.souschef")
446
- config_dir.mkdir(exist_ok=True)
565
+ # Use user-specific directory with secure permissions
566
+ config_dir = Path.home() / ".souschef"
567
+ config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
447
568
  config_file = config_dir / "ai_config.json"
448
569
 
449
570
  config = {
@@ -552,7 +673,7 @@ def _load_ai_settings_from_env() -> dict[str, str | float | int]:
552
673
  def _load_ai_settings_from_file() -> dict[str, Any]:
553
674
  """Load AI settings from configuration file."""
554
675
  try:
555
- config_file = Path("/tmp/.souschef/ai_config.json")
676
+ config_file = Path.home() / ".souschef" / "ai_config.json"
556
677
  if config_file.exists():
557
678
  with config_file.open() as f:
558
679
  result = json.load(f)