mcp-souschef 2.2.0__py3-none-any.whl → 2.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ #!/usr/bin/env python3
2
+ """Health check endpoint for SousChef UI Docker container."""
3
+
4
+ import json
5
+ import sys
6
+ from pathlib import Path
7
+
8
+ # Add the app directory to the path for proper imports
9
+ app_path = Path(__file__).parent.parent
10
+ if str(app_path) not in sys.path:
11
+ sys.path.insert(0, str(app_path))
12
+
13
+
14
+ def main():
15
+ """Run health check."""
16
+ try:
17
+ # Try to import a core module to verify the environment is working
18
+ from souschef.core.constants import VERSION
19
+
20
+ sys.stdout.write(
21
+ json.dumps(
22
+ {"status": "healthy", "service": "souschef-ui", "version": VERSION}
23
+ )
24
+ )
25
+ sys.exit(0)
26
+ except Exception as e:
27
+ sys.stdout.write(
28
+ json.dumps(
29
+ {"status": "unhealthy", "service": "souschef-ui", "error": str(e)}
30
+ )
31
+ )
32
+ sys.exit(1)
33
+
34
+
35
+ if __name__ == "__main__":
36
+ main()
@@ -0,0 +1,497 @@
1
+ """
2
+ AI Settings Page for SousChef UI.
3
+
4
+ Configure and validate AI provider settings for the SousChef MCP server.
5
+ """
6
+
7
+ import json
8
+ from pathlib import Path
9
+ from urllib.parse import urlparse, urlunparse
10
+
11
+ import streamlit as st
12
+
13
+ # AI Provider Constants
14
+ ANTHROPIC_PROVIDER = "Anthropic (Claude)"
15
+ OPENAI_PROVIDER = "OpenAI (GPT)"
16
+ WATSON_PROVIDER = "IBM Watsonx"
17
+ LIGHTSPEED_PROVIDER = "Red Hat Lightspeed"
18
+ LOCAL_PROVIDER = "Local Model"
19
+
20
+ # UI Constants
21
+ API_KEY_LABEL = "API Key"
22
+
23
+ # Import AI libraries (optional dependencies)
24
+ try:
25
+ import anthropic
26
+ except ImportError:
27
+ anthropic = None # type: ignore[assignment]
28
+
29
+ try:
30
+ from ibm_watsonx_ai import APIClient # type: ignore[import-not-found]
31
+ except ImportError:
32
+ APIClient = None
33
+
34
+ try:
35
+ import requests # type: ignore[import-untyped]
36
+ except ImportError:
37
+ requests = None
38
+
39
+ try:
40
+ import openai
41
+ except ImportError:
42
+ openai = None # type: ignore[assignment]
43
+
44
+
45
+ def _get_model_options(provider):
46
+ """Get model options for the selected provider."""
47
+ if provider == ANTHROPIC_PROVIDER:
48
+ return [
49
+ "claude-3-5-sonnet-20241022",
50
+ "claude-3-5-haiku-20241022",
51
+ "claude-3-opus-20240229",
52
+ ]
53
+ elif provider == OPENAI_PROVIDER:
54
+ return ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-3.5-turbo"]
55
+ elif provider == WATSON_PROVIDER:
56
+ return [
57
+ "meta-llama/llama-3-70b-instruct",
58
+ "meta-llama/llama-3-8b-instruct",
59
+ "ibm/granite-13b-instruct-v2",
60
+ "ibm/granite-13b-chat-v2",
61
+ ]
62
+ elif provider == LIGHTSPEED_PROVIDER:
63
+ return ["codellama/CodeLlama-34b-Instruct-hf"]
64
+ else:
65
+ return ["local-model"]
66
+
67
+
68
+ def _render_api_configuration(provider):
69
+ """Render API configuration UI and return config values."""
70
+ if provider == LOCAL_PROVIDER:
71
+ st.info("Local model configuration will be added in a future update.")
72
+ return "", "", ""
73
+ elif provider == WATSON_PROVIDER:
74
+ col1, col2, col3 = st.columns(3)
75
+ with col1:
76
+ api_key = st.text_input(
77
+ API_KEY_LABEL,
78
+ type="password",
79
+ help="Enter your IBM Watsonx API key",
80
+ key="api_key_input",
81
+ placeholder="your-watsonx-api-key",
82
+ )
83
+ with col2:
84
+ project_id = st.text_input(
85
+ "Project ID",
86
+ type="password",
87
+ help="Enter your IBM Watsonx Project ID",
88
+ key="project_id_input",
89
+ placeholder="your-project-id",
90
+ )
91
+ with col3:
92
+ base_url = st.text_input(
93
+ "Base URL",
94
+ help="IBM Watsonx API base URL",
95
+ key="base_url_input",
96
+ placeholder="https://us-south.ml.cloud.ibm.com",
97
+ )
98
+ return api_key, base_url, project_id
99
+ elif provider == LIGHTSPEED_PROVIDER:
100
+ col1, col2 = st.columns(2)
101
+ with col1:
102
+ api_key = st.text_input(
103
+ API_KEY_LABEL,
104
+ type="password",
105
+ help="Enter your Red Hat Lightspeed API key",
106
+ key="api_key_input",
107
+ placeholder="your-lightspeed-api-key",
108
+ )
109
+ with col2:
110
+ base_url = st.text_input(
111
+ "Base URL",
112
+ help="Red Hat Lightspeed API base URL",
113
+ key="base_url_input",
114
+ placeholder="https://api.redhat.com",
115
+ )
116
+ return api_key, base_url, ""
117
+ else:
118
+ col1, col2 = st.columns(2)
119
+ with col1:
120
+ api_key = st.text_input(
121
+ API_KEY_LABEL,
122
+ type="password",
123
+ help=f"Enter your {provider.split(' ')[0]} API key",
124
+ key="api_key_input",
125
+ placeholder=f"sk-... (for {provider.split(' ')[0]})",
126
+ )
127
+ with col2:
128
+ if provider == OPENAI_PROVIDER:
129
+ base_url = st.text_input(
130
+ "Base URL (Optional)",
131
+ help="Custom OpenAI API base URL",
132
+ key="base_url_input",
133
+ placeholder="https://api.openai.com/v1",
134
+ )
135
+ else:
136
+ base_url = ""
137
+ return api_key, base_url, ""
138
+
139
+
140
+ def _render_advanced_settings():
141
+ """Render advanced settings UI and return values."""
142
+ with st.expander("Advanced Settings"):
143
+ col1, col2 = st.columns(2)
144
+ with col1:
145
+ temperature = st.slider(
146
+ "Temperature",
147
+ min_value=0.0,
148
+ max_value=2.0,
149
+ value=0.7,
150
+ step=0.1,
151
+ help="Controls randomness in AI responses "
152
+ "(0.0 = deterministic, 2.0 = very random)",
153
+ key="temperature_slider",
154
+ )
155
+ with col2:
156
+ max_tokens = st.number_input(
157
+ "Max Tokens",
158
+ min_value=100,
159
+ max_value=100000,
160
+ value=4000,
161
+ help="Maximum number of tokens to generate",
162
+ key="max_tokens_input",
163
+ )
164
+ return temperature, max_tokens
165
+
166
+
167
+ def _render_validation_section(
168
+ provider, api_key, model, base_url, project_id, temperature, max_tokens
169
+ ):
170
+ """Render validation and save buttons."""
171
+ st.subheader("Configuration Validation")
172
+ col1, col2 = st.columns([1, 1])
173
+ with col1:
174
+ if st.button("Validate Configuration", type="primary", width="stretch"):
175
+ validate_ai_configuration(provider, api_key, model, base_url, project_id)
176
+ with col2:
177
+ if st.button("Save Settings", width="stretch"):
178
+ save_ai_settings(
179
+ provider, api_key, model, base_url, temperature, max_tokens, project_id
180
+ )
181
+
182
+
183
+ def show_ai_settings_page():
184
+ """Show the AI settings configuration page."""
185
+ st.markdown("""
186
+ Configure your AI provider settings for the SousChef MCP server.
187
+ These settings determine which AI model will be used for Chef to Ansible
188
+ conversions.
189
+ """)
190
+
191
+ # AI Provider Selection
192
+ st.subheader("AI Provider Configuration")
193
+
194
+ col1, col2 = st.columns([1, 2])
195
+
196
+ with col1:
197
+ ai_provider = st.selectbox(
198
+ "AI Provider",
199
+ [
200
+ ANTHROPIC_PROVIDER,
201
+ OPENAI_PROVIDER,
202
+ WATSON_PROVIDER,
203
+ LIGHTSPEED_PROVIDER,
204
+ LOCAL_PROVIDER,
205
+ ],
206
+ help="Select your preferred AI provider",
207
+ key="ai_provider_select",
208
+ )
209
+
210
+ with col2:
211
+ model_options = _get_model_options(ai_provider)
212
+ selected_model = st.selectbox(
213
+ "Model",
214
+ model_options,
215
+ help="Select the AI model to use",
216
+ key="ai_model_select",
217
+ )
218
+
219
+ # API Configuration
220
+ st.subheader("API Configuration")
221
+ api_key, base_url, project_id = _render_api_configuration(ai_provider)
222
+
223
+ # Advanced Settings
224
+ temperature, max_tokens = _render_advanced_settings()
225
+
226
+ # Validation Section
227
+ _render_validation_section(
228
+ ai_provider,
229
+ api_key,
230
+ selected_model,
231
+ base_url,
232
+ project_id,
233
+ temperature,
234
+ max_tokens,
235
+ )
236
+
237
+ # Current Settings Display
238
+ display_current_settings()
239
+
240
+
241
+ def validate_ai_configuration(provider, api_key, model, base_url="", project_id=""):
242
+ """Validate the AI configuration by making a test API call."""
243
+ if not api_key and provider != "Local Model":
244
+ st.error("API key is required for validation.")
245
+ return
246
+
247
+ if provider == WATSON_PROVIDER and not project_id:
248
+ st.error("Project ID is required for IBM Watsonx validation.")
249
+ return
250
+
251
+ with st.spinner("Validating AI configuration..."):
252
+ try:
253
+ if provider == ANTHROPIC_PROVIDER:
254
+ success, message = validate_anthropic_config(api_key, model)
255
+ elif provider == OPENAI_PROVIDER:
256
+ success, message = validate_openai_config(api_key, model, base_url)
257
+ elif provider == WATSON_PROVIDER:
258
+ success, message = validate_watson_config(api_key, project_id, base_url)
259
+ elif provider == LIGHTSPEED_PROVIDER:
260
+ success, message = validate_lightspeed_config(api_key, model, base_url)
261
+ else:
262
+ st.info("Local model validation not implemented yet.")
263
+ return
264
+
265
+ if success:
266
+ st.success(f"Configuration validated successfully! {message}")
267
+ else:
268
+ st.error(f"Validation failed: {message}")
269
+
270
+ except Exception as e:
271
+ st.error(f"Validation error: {str(e)}")
272
+
273
+
274
+ def _sanitize_lightspeed_base_url(base_url: str) -> str:
275
+ """
276
+ Sanitize and validate the Red Hat Lightspeed base URL to prevent SSRF.
277
+
278
+ - Default to the standard Lightspeed endpoint if no URL is provided.
279
+ - Only allow HTTPS scheme.
280
+ - Restrict host to known Red Hat-owned Lightspeed domains.
281
+ - Strip any user-supplied path, query, or fragment.
282
+ """
283
+ default_url = "https://api.redhat.com"
284
+ allowed_hosts = {
285
+ "api.redhat.com",
286
+ }
287
+
288
+ if not base_url or not str(base_url).strip():
289
+ return default_url
290
+
291
+ parsed = urlparse(base_url)
292
+
293
+ # If scheme is missing, assume https
294
+ if not parsed.scheme:
295
+ parsed = parsed._replace(scheme="https")
296
+
297
+ if parsed.scheme.lower() != "https":
298
+ raise ValueError("Base URL must use HTTPS.")
299
+
300
+ hostname = (parsed.hostname or "").lower()
301
+ if hostname not in allowed_hosts:
302
+ raise ValueError("Base URL host must be a supported Red Hat domain.")
303
+
304
+ # Normalize to scheme + netloc only; drop path/query/fragment.
305
+ cleaned = parsed._replace(path="", params="", query="", fragment="")
306
+ return urlunparse(cleaned)
307
+
308
+
309
+ def validate_anthropic_config(api_key, model):
310
+ """Validate Anthropic API configuration."""
311
+ if anthropic is None:
312
+ return False, "Anthropic library not installed"
313
+
314
+ try:
315
+ client = anthropic.Anthropic(api_key=api_key)
316
+
317
+ # Make a simple test call
318
+ client.messages.create(
319
+ model=model, max_tokens=10, messages=[{"role": "user", "content": "Hello"}]
320
+ )
321
+
322
+ return True, f"Successfully connected to {model}"
323
+
324
+ except Exception as e:
325
+ return False, f"Connection failed: {e}"
326
+
327
+
328
+ def validate_openai_config(api_key, model, base_url=""):
329
+ """Validate OpenAI API configuration."""
330
+ if openai is None:
331
+ return False, "OpenAI library not installed. Run: pip install openai"
332
+
333
+ try:
334
+ client_kwargs = {"api_key": api_key}
335
+ if base_url:
336
+ client_kwargs["base_url"] = base_url
337
+
338
+ client = openai.OpenAI(**client_kwargs)
339
+
340
+ # Make a simple test call
341
+ client.chat.completions.create(
342
+ model=model, messages=[{"role": "user", "content": "Hello"}], max_tokens=5
343
+ )
344
+
345
+ return True, f"Successfully connected to {model}"
346
+
347
+ except Exception as e:
348
+ return False, f"Connection failed: {e}"
349
+
350
+
351
+ def validate_lightspeed_config(api_key, model, base_url=""):
352
+ """Validate Red Hat Lightspeed API configuration."""
353
+ if requests is None:
354
+ return False, "Requests library not installed. Run: pip install requests"
355
+
356
+ try:
357
+ # Sanitize and validate base URL
358
+ sanitized_url = _sanitize_lightspeed_base_url(base_url)
359
+
360
+ # Make a simple test request to validate API key
361
+ headers = {
362
+ "Authorization": f"Bearer {api_key}",
363
+ "Content-Type": "application/json",
364
+ }
365
+
366
+ # Test with a simple completion request
367
+ test_payload = {
368
+ "model": model,
369
+ "prompt": "Hello",
370
+ "max_tokens": 5,
371
+ }
372
+
373
+ response = requests.post(
374
+ f"{sanitized_url}/v1/completions",
375
+ headers=headers,
376
+ json=test_payload,
377
+ timeout=10,
378
+ )
379
+
380
+ if response.status_code == 200:
381
+ return True, f"Successfully connected to Red Hat Lightspeed {model}"
382
+ else:
383
+ return False, (
384
+ f"API request failed with status {response.status_code}: "
385
+ f"{response.text}"
386
+ )
387
+
388
+ except Exception as e:
389
+ return False, f"Connection failed: {e}"
390
+
391
+
392
+ def validate_watson_config(api_key, project_id, base_url=""):
393
+ """Validate IBM Watsonx API configuration."""
394
+ if APIClient is None:
395
+ return False, (
396
+ "IBM Watsonx AI library not installed. Run: pip install ibm-watsonx-ai"
397
+ )
398
+
399
+ try:
400
+ # Initialize Watsonx client
401
+ client = APIClient(
402
+ api_key=api_key,
403
+ project_id=project_id,
404
+ url=base_url or "https://us-south.ml.cloud.ibm.com",
405
+ )
406
+
407
+ # Test connection by listing available models
408
+ models = client.foundation_models.get_model_specs()
409
+ if models:
410
+ return True, (
411
+ f"Successfully connected to IBM Watsonx. "
412
+ f"Found {len(models)} available models."
413
+ )
414
+ else:
415
+ return False, "Connected to IBM Watsonx but no models available."
416
+
417
+ except Exception as e:
418
+ return False, f"Connection failed: {e}"
419
+
420
+
421
+ def save_ai_settings(
422
+ provider, api_key, model, base_url, temperature, max_tokens, project_id=""
423
+ ):
424
+ """Save AI settings to configuration file."""
425
+ try:
426
+ # Use /tmp/.souschef for container compatibility (tmpfs is writable)
427
+ config_dir = Path("/tmp/.souschef")
428
+ config_dir.mkdir(exist_ok=True)
429
+ config_file = config_dir / "ai_config.json"
430
+
431
+ config = {
432
+ "provider": provider,
433
+ "model": model,
434
+ "api_key": api_key if api_key else None,
435
+ "base_url": base_url if base_url else None,
436
+ "project_id": project_id if project_id else None,
437
+ "temperature": temperature,
438
+ "max_tokens": max_tokens,
439
+ "last_updated": str(st.session_state.get("timestamp", "Unknown")),
440
+ }
441
+
442
+ with config_file.open("w") as f:
443
+ json.dump(config, f, indent=2)
444
+
445
+ # Store in session state for immediate use
446
+ st.session_state.ai_config = config
447
+
448
+ st.success("Settings saved successfully!")
449
+
450
+ except Exception as e:
451
+ st.error(f"Failed to save settings: {str(e)}")
452
+
453
+
454
+ def display_current_settings():
455
+ """Display current AI settings."""
456
+ st.subheader("Current Configuration")
457
+
458
+ # Try to load from file first, then session state
459
+ config = load_ai_settings()
460
+
461
+ if config:
462
+ col1, col2 = st.columns(2)
463
+
464
+ with col1:
465
+ st.metric("Provider", config.get("provider", "Not configured"))
466
+ st.metric("Model", config.get("model", "Not configured"))
467
+
468
+ with col2:
469
+ st.metric("Temperature", config.get("temperature", "Not set"))
470
+ st.metric("Max Tokens", config.get("max_tokens", "Not set"))
471
+
472
+ if config.get("last_updated"):
473
+ st.caption(f"Last updated: {config['last_updated']}")
474
+
475
+ # Security note
476
+ if config.get("api_key"):
477
+ st.info("API key is configured and stored securely.")
478
+ else:
479
+ st.warning("No API key configured.")
480
+ else:
481
+ st.info("No AI configuration found. Please configure your settings above.")
482
+
483
+
484
+ def load_ai_settings():
485
+ """Load AI settings from configuration file."""
486
+ try:
487
+ # Use /tmp/.souschef for container compatibility (tmpfs is writable)
488
+ config_file = Path("/tmp/.souschef/ai_config.json")
489
+ if config_file.exists():
490
+ with config_file.open() as f:
491
+ return json.load(f)
492
+ except Exception as e:
493
+ # Failed to load config from file; fall back to session state/defaults
494
+ st.warning(f"Unable to load saved AI settings: {e}")
495
+
496
+ # Fallback to session state or return empty dict
497
+ return st.session_state.get("ai_config", {})