alchemist-nrel 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. alchemist_core/__init__.py +2 -2
  2. alchemist_core/acquisition/botorch_acquisition.py +84 -126
  3. alchemist_core/data/experiment_manager.py +196 -20
  4. alchemist_core/models/botorch_model.py +292 -63
  5. alchemist_core/models/sklearn_model.py +175 -15
  6. alchemist_core/session.py +3532 -76
  7. alchemist_core/utils/__init__.py +3 -1
  8. alchemist_core/utils/acquisition_utils.py +60 -0
  9. alchemist_core/visualization/__init__.py +45 -0
  10. alchemist_core/visualization/helpers.py +130 -0
  11. alchemist_core/visualization/plots.py +1449 -0
  12. alchemist_nrel-0.3.2.dist-info/METADATA +185 -0
  13. {alchemist_nrel-0.3.0.dist-info → alchemist_nrel-0.3.2.dist-info}/RECORD +34 -29
  14. {alchemist_nrel-0.3.0.dist-info → alchemist_nrel-0.3.2.dist-info}/WHEEL +1 -1
  15. {alchemist_nrel-0.3.0.dist-info → alchemist_nrel-0.3.2.dist-info}/entry_points.txt +1 -1
  16. {alchemist_nrel-0.3.0.dist-info → alchemist_nrel-0.3.2.dist-info}/top_level.txt +0 -1
  17. api/example_client.py +7 -2
  18. api/main.py +3 -2
  19. api/models/requests.py +76 -1
  20. api/models/responses.py +102 -2
  21. api/routers/acquisition.py +25 -0
  22. api/routers/experiments.py +352 -11
  23. api/routers/sessions.py +195 -11
  24. api/routers/visualizations.py +6 -4
  25. api/routers/websocket.py +132 -0
  26. run_api.py → api/run_api.py +8 -7
  27. api/services/session_store.py +370 -71
  28. api/static/assets/index-B6Cf6s_b.css +1 -0
  29. api/static/assets/{index-C0_glioA.js → index-B7njvc9r.js} +223 -208
  30. api/static/index.html +2 -2
  31. ui/gpr_panel.py +11 -5
  32. ui/target_column_dialog.py +299 -0
  33. ui/ui.py +52 -5
  34. alchemist_core/models/ax_model.py +0 -159
  35. alchemist_nrel-0.3.0.dist-info/METADATA +0 -223
  36. api/static/assets/index-CB4V1LI5.css +0 -1
  37. {alchemist_nrel-0.3.0.dist-info → alchemist_nrel-0.3.2.dist-info}/licenses/LICENSE +0 -0
@@ -4,4 +4,6 @@ Utility functions for ALchemist Core.
4
4
  Will be populated in future branches.
5
5
  """
6
6
 
7
- __all__ = []
7
+ from .acquisition_utils import evaluate_acquisition
8
+
9
+ __all__ = ['evaluate_acquisition']
@@ -0,0 +1,60 @@
1
+ """
2
+ Utility functions for acquisition function evaluation.
3
+
4
+ These are internal helper functions used by visualization methods.
5
+ Users should use the high-level plotting APIs in OptimizationSession instead.
6
+ """
7
+
8
+ import numpy as np
9
+ import pandas as pd
10
+ from typing import Union, Tuple, Optional, Dict, Any
11
+
12
+
13
+ def evaluate_acquisition(
14
+ model,
15
+ X: Union[pd.DataFrame, np.ndarray],
16
+ acq_func: str = 'ucb',
17
+ acq_func_kwargs: Optional[Dict[str, Any]] = None,
18
+ goal: str = 'maximize'
19
+ ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
20
+ """
21
+ Evaluate acquisition function at given points using the model's backend.
22
+
23
+ This is an internal utility function used by visualization methods.
24
+ Users should call plot_acquisition_slice() or plot_acquisition_contour() instead.
25
+
26
+ Args:
27
+ model: Trained model instance (SklearnModel or BoTorchModel)
28
+ X: Points to evaluate (DataFrame or array with shape (n, d))
29
+ acq_func: Acquisition function name:
30
+ - 'ei': Expected Improvement
31
+ - 'pi': Probability of Improvement
32
+ - 'ucb/lcb': Upper/Lower Confidence Bound
33
+ - 'logei', 'logpi': Log variants (BoTorch only)
34
+ acq_func_kwargs: Additional parameters:
35
+ - 'xi' (float): Exploration parameter for EI/PI (default: 0.01)
36
+ - 'kappa' (float): Exploration parameter for UCB (default: 1.96)
37
+ - 'beta' (float): Exploration parameter for UCB (BoTorch, default: 0.5)
38
+ goal: 'maximize' or 'minimize' - optimization direction
39
+
40
+ Returns:
41
+ Tuple of (acq_values, None) - None because acquisition functions are deterministic
42
+
43
+ Example:
44
+ >>> from alchemist_core.utils.acquisition_utils import evaluate_acquisition
45
+ >>> acq_vals, _ = evaluate_acquisition(
46
+ ... session.model, points, acq_func='ei', goal='maximize'
47
+ ... )
48
+
49
+ Note:
50
+ - Requires trained model
51
+ - Acquisition values are relative - only their ordering matters
52
+ - Higher values indicate better candidates for next experiment
53
+ """
54
+ if model is None:
55
+ raise ValueError("Model must be trained before evaluating acquisition functions")
56
+
57
+ maximize = (goal.lower() == 'maximize')
58
+
59
+ # Delegate to model's evaluate_acquisition method
60
+ return model.evaluate_acquisition(X, acq_func, acq_func_kwargs, maximize)
@@ -0,0 +1,45 @@
1
+ """
2
+ Visualization module for ALchemist.
3
+
4
+ Pure plotting functions with no session or UI dependencies.
5
+ All functions return matplotlib Figure/Axes objects for maximum flexibility.
6
+ """
7
+
8
+ from alchemist_core.visualization.plots import (
9
+ create_parity_plot,
10
+ create_contour_plot,
11
+ create_slice_plot,
12
+ create_voxel_plot,
13
+ create_metrics_plot,
14
+ create_qq_plot,
15
+ create_calibration_plot,
16
+ create_regret_plot,
17
+ create_probability_of_improvement_plot,
18
+ create_uncertainty_contour_plot,
19
+ create_uncertainty_voxel_plot,
20
+ create_acquisition_voxel_plot,
21
+ )
22
+
23
+ from alchemist_core.visualization.helpers import (
24
+ check_matplotlib,
25
+ compute_z_scores,
26
+ compute_calibration_metrics,
27
+ )
28
+
29
+ __all__ = [
30
+ 'create_parity_plot',
31
+ 'create_contour_plot',
32
+ 'create_slice_plot',
33
+ 'create_voxel_plot',
34
+ 'create_metrics_plot',
35
+ 'create_qq_plot',
36
+ 'create_calibration_plot',
37
+ 'create_regret_plot',
38
+ 'create_probability_of_improvement_plot',
39
+ 'create_uncertainty_contour_plot',
40
+ 'create_uncertainty_voxel_plot',
41
+ 'create_acquisition_voxel_plot',
42
+ 'check_matplotlib',
43
+ 'compute_z_scores',
44
+ 'compute_calibration_metrics',
45
+ ]
@@ -0,0 +1,130 @@
1
+ """
2
+ Helper functions for visualization module.
3
+
4
+ Utilities for data preparation, validation, and computation.
5
+ """
6
+
7
+ import numpy as np
8
+ from typing import Optional, Tuple
9
+
10
+
11
+ def check_matplotlib() -> None:
12
+ """
13
+ Check if matplotlib is available for plotting.
14
+
15
+ Raises:
16
+ ImportError: If matplotlib is not installed
17
+ """
18
+ try:
19
+ import matplotlib.pyplot as plt
20
+ except ImportError:
21
+ raise ImportError(
22
+ "matplotlib is required for visualization. "
23
+ "Install with: pip install matplotlib"
24
+ )
25
+
26
+
27
+ def compute_z_scores(
28
+ y_true: np.ndarray,
29
+ y_pred: np.ndarray,
30
+ y_std: np.ndarray
31
+ ) -> np.ndarray:
32
+ """
33
+ Compute standardized residuals (z-scores).
34
+
35
+ z = (y_true - y_pred) / y_std
36
+
37
+ Args:
38
+ y_true: Actual experimental values
39
+ y_pred: Model predicted values
40
+ y_std: Prediction standard deviations
41
+
42
+ Returns:
43
+ Array of z-scores (standardized residuals)
44
+
45
+ Note:
46
+ Small epsilon (1e-10) added to denominator to avoid division by zero.
47
+ """
48
+ return (y_true - y_pred) / (y_std + 1e-10)
49
+
50
+
51
+ def compute_calibration_metrics(
52
+ y_true: np.ndarray,
53
+ y_pred: np.ndarray,
54
+ y_std: np.ndarray,
55
+ prob_levels: Optional[np.ndarray] = None
56
+ ) -> Tuple[np.ndarray, np.ndarray]:
57
+ """
58
+ Compute nominal vs empirical coverage for calibration curves.
59
+
60
+ For each nominal probability level, computes the empirical fraction of
61
+ observations that fall within the predicted confidence interval.
62
+
63
+ Args:
64
+ y_true: Actual experimental values
65
+ y_pred: Model predicted values
66
+ y_std: Prediction standard deviations
67
+ prob_levels: Nominal coverage probabilities to evaluate.
68
+ Default: np.arange(0.10, 1.00, 0.05)
69
+
70
+ Returns:
71
+ Tuple of (nominal_probs, empirical_coverage)
72
+ - nominal_probs: The requested probability levels
73
+ - empirical_coverage: Observed coverage fractions
74
+
75
+ Example:
76
+ >>> nominal, empirical = compute_calibration_metrics(y_true, y_pred, y_std)
77
+ >>> # nominal[i] is the expected coverage (e.g., 0.68 for ±1σ)
78
+ >>> # empirical[i] is the observed coverage fraction
79
+ """
80
+ from scipy import stats
81
+
82
+ if prob_levels is None:
83
+ prob_levels = np.arange(0.10, 1.00, 0.05)
84
+
85
+ empirical_coverage = []
86
+
87
+ for prob in prob_levels:
88
+ # Convert probability to sigma multiplier
89
+ # For symmetric interval: P(|Z| < z) = prob → z = Φ^(-1)((1+prob)/2)
90
+ sigma = stats.norm.ppf((1 + prob) / 2)
91
+
92
+ # Compute empirical coverage at this sigma level
93
+ lower_bound = y_pred - sigma * y_std
94
+ upper_bound = y_pred + sigma * y_std
95
+ within_interval = (y_true >= lower_bound) & (y_true <= upper_bound)
96
+ empirical_coverage.append(np.mean(within_interval))
97
+
98
+ return prob_levels, np.array(empirical_coverage)
99
+
100
+
101
+ def sort_legend_items(labels: list) -> list:
102
+ """
103
+ Sort legend labels for consistent ordering.
104
+
105
+ Preferred order: Prediction, uncertainty bands (small to large), Experiments
106
+
107
+ Args:
108
+ labels: List of legend label strings
109
+
110
+ Returns:
111
+ List of indices for sorted order
112
+ """
113
+ def sort_key(lbl):
114
+ if 'Prediction' in lbl:
115
+ return (0, 0)
116
+ elif 'σ' in lbl:
117
+ # Extract sigma value for sorting bands
118
+ import re
119
+ match = re.search(r'±([\d.]+)σ', lbl)
120
+ if match:
121
+ return (1, float(match.group(1)))
122
+ return (1, 999)
123
+ elif 'Experiment' in lbl:
124
+ return (2, 0)
125
+ else:
126
+ return (3, 0)
127
+
128
+ indices = list(range(len(labels)))
129
+ indices.sort(key=lambda i: sort_key(labels[i]))
130
+ return indices