mcp-souschef 2.5.3__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ #!/usr/bin/env python3
2
+ """Health check endpoint for SousChef UI Docker container."""
3
+
4
+ import json
5
+ import sys
6
+ from pathlib import Path
7
+
8
+ # Add the app directory to the path for proper imports
9
+ app_path = Path(__file__).parent.parent
10
+ if str(app_path) not in sys.path:
11
+ sys.path.insert(0, str(app_path))
12
+
13
+
14
+ def main():
15
+ """Run health check."""
16
+ try:
17
+ # Try to import a core module to verify the environment is working
18
+ from souschef.core.constants import VERSION
19
+
20
+ sys.stdout.write(
21
+ json.dumps(
22
+ {"status": "healthy", "service": "souschef-ui", "version": VERSION}
23
+ )
24
+ )
25
+ sys.exit(0)
26
+ except Exception as e:
27
+ sys.stdout.write(
28
+ json.dumps(
29
+ {"status": "unhealthy", "service": "souschef-ui", "error": str(e)}
30
+ )
31
+ )
32
+ sys.exit(1)
33
+
34
+
35
+ if __name__ == "__main__":
36
+ main()
@@ -0,0 +1,563 @@
1
+ """
2
+ AI Settings Page for SousChef UI.
3
+
4
+ Configure and validate AI provider settings for the SousChef MCP server.
5
+ """
6
+
7
+ import json
8
+ import os
9
+ from pathlib import Path
10
+ from typing import Any
11
+ from urllib.parse import urlparse, urlunparse
12
+
13
+ import streamlit as st
14
+
15
+ # AI Provider Constants
16
+ ANTHROPIC_PROVIDER = "Anthropic (Claude)"
17
+ OPENAI_PROVIDER = "OpenAI (GPT)"
18
+ WATSON_PROVIDER = "IBM Watsonx"
19
+ LIGHTSPEED_PROVIDER = "Red Hat Lightspeed"
20
+ LOCAL_PROVIDER = "Local Model"
21
+
22
+ # UI Constants
23
+ API_KEY_LABEL = "API Key"
24
+
25
+ # Import AI libraries (optional dependencies)
26
+ try:
27
+ import anthropic
28
+ except ImportError:
29
+ anthropic = None # type: ignore[assignment]
30
+
31
+ try:
32
+ from ibm_watsonx_ai import APIClient # type: ignore[import-not-found]
33
+ except ImportError:
34
+ APIClient = None
35
+
36
+ try:
37
+ import requests # type: ignore[import-untyped]
38
+ except ImportError:
39
+ requests = None
40
+
41
+ try:
42
+ import openai
43
+ except ImportError:
44
+ openai = None # type: ignore[assignment]
45
+
46
+
47
+ def _get_model_options(provider):
48
+ """Get model options for the selected provider."""
49
+ if provider == ANTHROPIC_PROVIDER:
50
+ return [
51
+ "claude-3-5-sonnet-20241022",
52
+ "claude-3-5-haiku-20241022",
53
+ "claude-3-opus-20240229",
54
+ ]
55
+ elif provider == OPENAI_PROVIDER:
56
+ return ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-3.5-turbo"]
57
+ elif provider == WATSON_PROVIDER:
58
+ return [
59
+ "meta-llama/llama-3-70b-instruct",
60
+ "meta-llama/llama-3-8b-instruct",
61
+ "ibm/granite-13b-instruct-v2",
62
+ "ibm/granite-13b-chat-v2",
63
+ ]
64
+ elif provider == LIGHTSPEED_PROVIDER:
65
+ return ["codellama/CodeLlama-34b-Instruct-hf"]
66
+ else:
67
+ return ["local-model"]
68
+
69
+
70
+ def _render_api_configuration(provider):
71
+ """Render API configuration UI and return config values."""
72
+ if provider == LOCAL_PROVIDER:
73
+ st.info("Local model configuration will be added in a future update.")
74
+ return "", "", ""
75
+ elif provider == WATSON_PROVIDER:
76
+ col1, col2, col3 = st.columns(3)
77
+ with col1:
78
+ api_key = st.text_input(
79
+ API_KEY_LABEL,
80
+ type="password",
81
+ help="Enter your IBM Watsonx API key",
82
+ key="api_key_input",
83
+ placeholder="your-watsonx-api-key",
84
+ )
85
+ with col2:
86
+ project_id = st.text_input(
87
+ "Project ID",
88
+ type="password",
89
+ help="Enter your IBM Watsonx Project ID",
90
+ key="project_id_input",
91
+ placeholder="your-project-id",
92
+ )
93
+ with col3:
94
+ base_url = st.text_input(
95
+ "Base URL",
96
+ help="IBM Watsonx API base URL",
97
+ key="base_url_input",
98
+ placeholder="https://us-south.ml.cloud.ibm.com",
99
+ )
100
+ return api_key, base_url, project_id
101
+ elif provider == LIGHTSPEED_PROVIDER:
102
+ col1, col2 = st.columns(2)
103
+ with col1:
104
+ api_key = st.text_input(
105
+ API_KEY_LABEL,
106
+ type="password",
107
+ help="Enter your Red Hat Lightspeed API key",
108
+ key="api_key_input",
109
+ placeholder="your-lightspeed-api-key",
110
+ )
111
+ with col2:
112
+ base_url = st.text_input(
113
+ "Base URL",
114
+ help="Red Hat Lightspeed API base URL",
115
+ key="base_url_input",
116
+ placeholder="https://api.redhat.com",
117
+ )
118
+ return api_key, base_url, ""
119
+ else:
120
+ col1, col2 = st.columns(2)
121
+ with col1:
122
+ api_key = st.text_input(
123
+ API_KEY_LABEL,
124
+ type="password",
125
+ help=f"Enter your {provider.split(' ')[0]} API key",
126
+ key="api_key_input",
127
+ placeholder=f"sk-... (for {provider.split(' ')[0]})",
128
+ )
129
+ with col2:
130
+ if provider == OPENAI_PROVIDER:
131
+ base_url = st.text_input(
132
+ "Base URL (Optional)",
133
+ help="Custom OpenAI API base URL",
134
+ key="base_url_input",
135
+ placeholder="https://api.openai.com/v1",
136
+ )
137
+ else:
138
+ base_url = ""
139
+ return api_key, base_url, ""
140
+
141
+
142
+ def _render_advanced_settings():
143
+ """Render advanced settings UI and return values."""
144
+ with st.expander("Advanced Settings"):
145
+ col1, col2 = st.columns(2)
146
+ with col1:
147
+ temperature = st.slider(
148
+ "Temperature",
149
+ min_value=0.0,
150
+ max_value=2.0,
151
+ value=0.7,
152
+ step=0.1,
153
+ help="Controls randomness in AI responses "
154
+ "(0.0 = deterministic, 2.0 = very random)",
155
+ key="temperature_slider",
156
+ )
157
+ with col2:
158
+ max_tokens = st.number_input(
159
+ "Max Tokens",
160
+ min_value=100,
161
+ max_value=100000,
162
+ value=4000,
163
+ help="Maximum number of tokens to generate",
164
+ key="max_tokens_input",
165
+ )
166
+ return temperature, max_tokens
167
+
168
+
169
+ def _render_validation_section(
170
+ provider, api_key, model, base_url, project_id, temperature, max_tokens
171
+ ):
172
+ """Render validation and save buttons."""
173
+ st.subheader("Configuration Validation")
174
+ col1, col2 = st.columns([1, 1])
175
+ with col1:
176
+ if st.button(
177
+ "Validate Configuration",
178
+ type="primary",
179
+ width="stretch",
180
+ key="validate_ai_config",
181
+ ):
182
+ validate_ai_configuration(provider, api_key, model, base_url, project_id)
183
+ with col2:
184
+ if st.button("Save Settings", width="stretch", key="save_ai_settings"):
185
+ save_ai_settings(
186
+ provider, api_key, model, base_url, temperature, max_tokens, project_id
187
+ )
188
+
189
+
190
+ def show_ai_settings_page():
191
+ """Show the AI settings configuration page."""
192
+ # Add back to dashboard button
193
+ col1, col2 = st.columns([1, 4])
194
+ with col1:
195
+ if st.button(
196
+ "← Back to Dashboard",
197
+ help="Return to main dashboard",
198
+ key="back_to_dashboard_from_ai",
199
+ ):
200
+ st.session_state.current_page = "Dashboard"
201
+ st.rerun()
202
+
203
+ st.markdown("""
204
+ Configure your AI provider settings for the SousChef MCP server.
205
+ These settings determine which AI model will be used for Chef to Ansible
206
+ conversions.
207
+ """)
208
+
209
+ # AI Provider Selection
210
+ st.subheader("AI Provider Configuration")
211
+
212
+ col1, col2 = st.columns([1, 2])
213
+
214
+ with col1:
215
+ ai_provider = st.selectbox(
216
+ "AI Provider",
217
+ [
218
+ ANTHROPIC_PROVIDER,
219
+ OPENAI_PROVIDER,
220
+ WATSON_PROVIDER,
221
+ LIGHTSPEED_PROVIDER,
222
+ LOCAL_PROVIDER,
223
+ ],
224
+ help="Select your preferred AI provider",
225
+ key="ai_provider_select",
226
+ )
227
+
228
+ with col2:
229
+ model_options = _get_model_options(ai_provider)
230
+ selected_model = st.selectbox(
231
+ "Model",
232
+ model_options,
233
+ help="Select the AI model to use",
234
+ key="ai_model_select",
235
+ )
236
+
237
+ # API Configuration
238
+ st.subheader("API Configuration")
239
+ api_key, base_url, project_id = _render_api_configuration(ai_provider)
240
+
241
+ # Advanced Settings
242
+ temperature, max_tokens = _render_advanced_settings()
243
+
244
+ # Validation Section
245
+ _render_validation_section(
246
+ ai_provider,
247
+ api_key,
248
+ selected_model,
249
+ base_url,
250
+ project_id,
251
+ temperature,
252
+ max_tokens,
253
+ )
254
+
255
+ # Current Settings Display
256
+ display_current_settings()
257
+
258
+
259
+ def validate_ai_configuration(provider, api_key, model, base_url="", project_id=""):
260
+ """Validate the AI configuration by making a test API call."""
261
+ if not api_key and provider != "Local Model":
262
+ st.error("API key is required for validation.")
263
+ return
264
+
265
+ if provider == WATSON_PROVIDER and not project_id:
266
+ st.error("Project ID is required for IBM Watsonx validation.")
267
+ return
268
+
269
+ with st.spinner("Validating AI configuration..."):
270
+ try:
271
+ if provider == ANTHROPIC_PROVIDER:
272
+ success, message = validate_anthropic_config(api_key, model)
273
+ elif provider == OPENAI_PROVIDER:
274
+ success, message = validate_openai_config(api_key, model, base_url)
275
+ elif provider == WATSON_PROVIDER:
276
+ success, message = validate_watson_config(api_key, project_id, base_url)
277
+ elif provider == LIGHTSPEED_PROVIDER:
278
+ success, message = validate_lightspeed_config(api_key, model, base_url)
279
+ else:
280
+ st.info("Local model validation not implemented yet.")
281
+ return
282
+
283
+ if success:
284
+ st.success(f"Configuration validated successfully! {message}")
285
+ else:
286
+ st.error(f"Validation failed: {message}")
287
+
288
+ except Exception as e:
289
+ st.error(f"Validation error: {str(e)}")
290
+
291
+
292
+ def _sanitize_lightspeed_base_url(base_url: str) -> str:
293
+ """
294
+ Sanitize and validate the Red Hat Lightspeed base URL to prevent SSRF.
295
+
296
+ - Default to the standard Lightspeed endpoint if no URL is provided.
297
+ - Only allow HTTPS scheme.
298
+ - Restrict host to known Red Hat-owned Lightspeed domains.
299
+ - Strip any user-supplied path, query, or fragment.
300
+ """
301
+ default_url = "https://api.redhat.com"
302
+ allowed_hosts = {
303
+ "api.redhat.com",
304
+ }
305
+
306
+ if not base_url or not str(base_url).strip():
307
+ return default_url
308
+
309
+ parsed = urlparse(base_url)
310
+
311
+ # If scheme is missing, assume https
312
+ if not parsed.scheme:
313
+ parsed = parsed._replace(scheme="https")
314
+
315
+ if parsed.scheme.lower() != "https":
316
+ raise ValueError("Base URL must use HTTPS.")
317
+
318
+ hostname = (parsed.hostname or "").lower()
319
+ if hostname not in allowed_hosts:
320
+ raise ValueError("Base URL host must be a supported Red Hat domain.")
321
+
322
+ # Normalize to scheme + netloc only; drop path/query/fragment.
323
+ cleaned = parsed._replace(path="", params="", query="", fragment="")
324
+ return urlunparse(cleaned)
325
+
326
+
327
+ def validate_anthropic_config(api_key, model):
328
+ """Validate Anthropic API configuration."""
329
+ if anthropic is None:
330
+ return False, "Anthropic library not installed"
331
+
332
+ try:
333
+ client = anthropic.Anthropic(api_key=api_key)
334
+
335
+ # Make a simple test call
336
+ client.messages.create(
337
+ model=model, max_tokens=10, messages=[{"role": "user", "content": "Hello"}]
338
+ )
339
+
340
+ return True, f"Successfully connected to {model}"
341
+
342
+ except Exception as e:
343
+ return False, f"Connection failed: {e}"
344
+
345
+
346
+ def validate_openai_config(api_key, model, base_url=""):
347
+ """Validate OpenAI API configuration."""
348
+ if openai is None:
349
+ return False, "OpenAI library not installed. Run: pip install openai"
350
+
351
+ try:
352
+ client_kwargs = {"api_key": api_key}
353
+ if base_url:
354
+ client_kwargs["base_url"] = base_url
355
+
356
+ client = openai.OpenAI(**client_kwargs)
357
+
358
+ # Make a simple test call
359
+ client.chat.completions.create(
360
+ model=model, messages=[{"role": "user", "content": "Hello"}], max_tokens=5
361
+ )
362
+
363
+ return True, f"Successfully connected to {model}"
364
+
365
+ except Exception as e:
366
+ return False, f"Connection failed: {e}"
367
+
368
+
369
+ def validate_lightspeed_config(api_key, model, base_url=""):
370
+ """Validate Red Hat Lightspeed API configuration."""
371
+ if requests is None:
372
+ return False, "Requests library not installed. Run: pip install requests"
373
+
374
+ try:
375
+ # Sanitize and validate base URL
376
+ sanitized_url = _sanitize_lightspeed_base_url(base_url)
377
+
378
+ # Make a simple test request to validate API key
379
+ headers = {
380
+ "Authorization": f"Bearer {api_key}",
381
+ "Content-Type": "application/json",
382
+ }
383
+
384
+ # Test with a simple completion request
385
+ test_payload = {
386
+ "model": model,
387
+ "prompt": "Hello",
388
+ "max_tokens": 5,
389
+ }
390
+
391
+ response = requests.post(
392
+ f"{sanitized_url}/v1/completions",
393
+ headers=headers,
394
+ json=test_payload,
395
+ timeout=10,
396
+ )
397
+
398
+ if response.status_code == 200:
399
+ return True, f"Successfully connected to Red Hat Lightspeed {model}"
400
+ else:
401
+ return False, (
402
+ f"API request failed with status {response.status_code}: "
403
+ f"{response.text}"
404
+ )
405
+
406
+ except Exception as e:
407
+ return False, f"Connection failed: {e}"
408
+
409
+
410
+ def validate_watson_config(api_key, project_id, base_url=""):
411
+ """Validate IBM Watsonx API configuration."""
412
+ if APIClient is None:
413
+ return False, (
414
+ "IBM Watsonx AI library not installed. Run: pip install ibm-watsonx-ai"
415
+ )
416
+
417
+ try:
418
+ # Initialize Watsonx client
419
+ client = APIClient(
420
+ api_key=api_key,
421
+ project_id=project_id,
422
+ url=base_url or "https://us-south.ml.cloud.ibm.com",
423
+ )
424
+
425
+ # Test connection by listing available models
426
+ models = client.foundation_models.get_model_specs()
427
+ if models:
428
+ return True, (
429
+ f"Successfully connected to IBM Watsonx. "
430
+ f"Found {len(models)} available models."
431
+ )
432
+ else:
433
+ return False, "Connected to IBM Watsonx but no models available."
434
+
435
+ except Exception as e:
436
+ return False, f"Connection failed: {e}"
437
+
438
+
439
+ def save_ai_settings(
440
+ provider, api_key, model, base_url, temperature, max_tokens, project_id=""
441
+ ):
442
+ """Save AI settings to configuration file."""
443
+ try:
444
+ # Use /tmp/.souschef for container compatibility (tmpfs is writable)
445
+ config_dir = Path("/tmp/.souschef")
446
+ config_dir.mkdir(exist_ok=True)
447
+ config_file = config_dir / "ai_config.json"
448
+
449
+ config = {
450
+ "provider": provider,
451
+ "model": model,
452
+ "api_key": api_key if api_key else None,
453
+ "base_url": base_url if base_url else None,
454
+ "project_id": project_id if project_id else None,
455
+ "temperature": temperature,
456
+ "max_tokens": max_tokens,
457
+ "last_updated": str(st.session_state.get("timestamp", "Unknown")),
458
+ }
459
+
460
+ with config_file.open("w") as f:
461
+ json.dump(config, f, indent=2)
462
+
463
+ # Store in session state for immediate use
464
+ st.session_state.ai_config = config
465
+
466
+ st.success("Settings saved successfully!")
467
+
468
+ except Exception as e:
469
+ st.error(f"Failed to save settings: {str(e)}")
470
+
471
+
472
+ def display_current_settings():
473
+ """Display current AI settings."""
474
+ st.subheader("Current Configuration")
475
+
476
+ # Try to load from file first, then session state
477
+ config = load_ai_settings()
478
+
479
+ if config:
480
+ col1, col2 = st.columns(2)
481
+
482
+ with col1:
483
+ st.metric("Provider", config.get("provider", "Not configured"))
484
+ st.metric("Model", config.get("model", "Not configured"))
485
+
486
+ with col2:
487
+ st.metric("Temperature", config.get("temperature", "Not set"))
488
+ st.metric("Max Tokens", config.get("max_tokens", "Not set"))
489
+
490
+ if config.get("last_updated"):
491
+ st.caption(f"Last updated: {config['last_updated']}")
492
+
493
+ # Security note
494
+ if config.get("api_key"):
495
+ st.info("API key is configured and stored securely.")
496
+ else:
497
+ st.warning("No API key configured.")
498
+ else:
499
+ st.info("No AI configuration found. Please configure your settings above.")
500
+
501
+
502
+ def load_ai_settings() -> dict[str, Any]:
503
+ """Load AI settings from file, session state, or environment variables."""
504
+ file_config = _load_ai_settings_from_file()
505
+ if file_config:
506
+ return file_config
507
+
508
+ session_config = st.session_state.get("ai_config", {})
509
+ if isinstance(session_config, dict) and session_config:
510
+ return session_config
511
+
512
+ env_config = _load_ai_settings_from_env()
513
+ if env_config:
514
+ return env_config
515
+
516
+ return {}
517
+
518
+
519
+ def _load_ai_settings_from_env() -> dict[str, str | float | int]:
520
+ """Load AI settings from environment variables."""
521
+ from contextlib import suppress
522
+
523
+ env_config: dict[str, str | float | int] = {}
524
+ env_mappings = {
525
+ "SOUSCHEF_AI_PROVIDER": "provider",
526
+ "SOUSCHEF_AI_MODEL": "model",
527
+ "SOUSCHEF_AI_API_KEY": "api_key",
528
+ "SOUSCHEF_AI_BASE_URL": "base_url",
529
+ "SOUSCHEF_AI_PROJECT_ID": "project_id",
530
+ }
531
+
532
+ # Handle string values
533
+ for env_var, config_key in env_mappings.items():
534
+ env_value = os.environ.get(env_var)
535
+ if env_value:
536
+ env_config[config_key] = env_value
537
+
538
+ # Handle numeric values with error suppression
539
+ temp_value = os.environ.get("SOUSCHEF_AI_TEMPERATURE")
540
+ if temp_value:
541
+ with suppress(ValueError):
542
+ env_config["temperature"] = float(temp_value)
543
+
544
+ tokens_value = os.environ.get("SOUSCHEF_AI_MAX_TOKENS")
545
+ if tokens_value:
546
+ with suppress(ValueError):
547
+ env_config["max_tokens"] = int(tokens_value)
548
+
549
+ return env_config
550
+
551
+
552
+ def _load_ai_settings_from_file() -> dict[str, Any]:
553
+ """Load AI settings from configuration file."""
554
+ try:
555
+ config_file = Path("/tmp/.souschef/ai_config.json")
556
+ if config_file.exists():
557
+ with config_file.open() as f:
558
+ result = json.load(f)
559
+ return result if isinstance(result, dict) else {}
560
+ except Exception as e:
561
+ st.warning(f"Unable to load saved AI settings: {e}")
562
+
563
+ return {}