alchemist-nrel 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. alchemist_core/__init__.py +2 -2
  2. alchemist_core/acquisition/botorch_acquisition.py +83 -126
  3. alchemist_core/data/experiment_manager.py +181 -12
  4. alchemist_core/models/botorch_model.py +292 -63
  5. alchemist_core/models/sklearn_model.py +145 -13
  6. alchemist_core/session.py +3330 -31
  7. alchemist_core/utils/__init__.py +3 -1
  8. alchemist_core/utils/acquisition_utils.py +60 -0
  9. alchemist_core/visualization/__init__.py +45 -0
  10. alchemist_core/visualization/helpers.py +130 -0
  11. alchemist_core/visualization/plots.py +1449 -0
  12. {alchemist_nrel-0.3.1.dist-info → alchemist_nrel-0.3.2.dist-info}/METADATA +13 -13
  13. {alchemist_nrel-0.3.1.dist-info → alchemist_nrel-0.3.2.dist-info}/RECORD +31 -26
  14. {alchemist_nrel-0.3.1.dist-info → alchemist_nrel-0.3.2.dist-info}/WHEEL +1 -1
  15. api/main.py +1 -1
  16. api/models/requests.py +52 -0
  17. api/models/responses.py +79 -2
  18. api/routers/experiments.py +333 -8
  19. api/routers/sessions.py +84 -9
  20. api/routers/visualizations.py +6 -4
  21. api/routers/websocket.py +2 -2
  22. api/services/session_store.py +295 -71
  23. api/static/assets/index-B6Cf6s_b.css +1 -0
  24. api/static/assets/{index-DWfIKU9j.js → index-B7njvc9r.js} +201 -196
  25. api/static/index.html +2 -2
  26. ui/gpr_panel.py +11 -5
  27. ui/target_column_dialog.py +299 -0
  28. ui/ui.py +52 -5
  29. api/static/assets/index-sMIa_1hV.css +0 -1
  30. {alchemist_nrel-0.3.1.dist-info → alchemist_nrel-0.3.2.dist-info}/entry_points.txt +0 -0
  31. {alchemist_nrel-0.3.1.dist-info → alchemist_nrel-0.3.2.dist-info}/licenses/LICENSE +0 -0
  32. {alchemist_nrel-0.3.1.dist-info → alchemist_nrel-0.3.2.dist-info}/top_level.txt +0 -0
@@ -1,10 +1,12 @@
1
1
  import torch
2
2
  import numpy as np
3
3
  import pandas as pd
4
+ from typing import Union, Tuple, Optional
4
5
  from botorch.models import SingleTaskGP
5
6
  from botorch.models.gp_regression_mixed import MixedSingleTaskGP
6
7
  from botorch.models.transforms import Normalize, Standardize
7
8
  from botorch.fit import fit_gpytorch_mll
9
+ from botorch.exceptions import OptimizationWarning
8
10
  from gpytorch.mlls import ExactMarginalLogLikelihood
9
11
  from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
10
12
  from sklearn.model_selection import KFold
@@ -15,6 +17,8 @@ from botorch.models.utils.assorted import InputDataWarning
15
17
 
16
18
  # Import necessary kernels from GPyTorch
17
19
  from gpytorch.kernels import MaternKernel, RBFKernel
20
+ import gpytorch
21
+ gpytorch.settings.cholesky_jitter(1e-2)
18
22
 
19
23
  logger = get_logger(__name__)
20
24
 
@@ -61,15 +65,25 @@ class BoTorchModel(BaseModel):
61
65
 
62
66
  def _get_cont_kernel_factory(self):
63
67
  """Returns a factory function for the continuous kernel."""
68
+ # Validate kernel type before creating factory
69
+ valid_kernels = ["matern", "rbf"]
70
+ kernel_lower = self.cont_kernel_type.lower()
71
+
72
+ if kernel_lower not in valid_kernels:
73
+ raise ValueError(
74
+ f"Unknown kernel type: '{self.cont_kernel_type}'. "
75
+ f"Valid options for BoTorch are: {valid_kernels}"
76
+ )
77
+
64
78
  def factory(batch_shape, ard_num_dims, active_dims):
65
- if self.cont_kernel_type.lower() == "matern":
79
+ if kernel_lower == "matern":
66
80
  return MaternKernel(
67
81
  nu=self.matern_nu,
68
82
  ard_num_dims=ard_num_dims,
69
83
  active_dims=active_dims,
70
84
  batch_shape=batch_shape
71
85
  )
72
- else: # Default to RBF
86
+ else: # RBF
73
87
  return RBFKernel(
74
88
  ard_num_dims=ard_num_dims,
75
89
  active_dims=active_dims,
@@ -154,12 +168,12 @@ class BoTorchModel(BaseModel):
154
168
  X_encoded = self._encode_categorical_data(X)
155
169
 
156
170
  # Convert to tensors
157
- train_X = torch.tensor(X_encoded.values, dtype=torch.double)
158
- train_Y = torch.tensor(y.values, dtype=torch.double).unsqueeze(-1)
171
+ train_X = torch.tensor(X_encoded.values, dtype=torch.float64)
172
+ train_Y = torch.tensor(y.values, dtype=torch.float64).unsqueeze(-1)
159
173
 
160
174
  # Convert noise values to tensor if available
161
175
  if noise is not None:
162
- train_Yvar = torch.tensor(noise.values, dtype=torch.double).unsqueeze(-1)
176
+ train_Yvar = torch.tensor(noise.values, dtype=torch.float64).unsqueeze(-1)
163
177
  logger.info(f"Using provided noise values for BoTorch model regularization.")
164
178
  else:
165
179
  train_Yvar = None
@@ -208,12 +222,25 @@ class BoTorchModel(BaseModel):
208
222
  outcome_transform=outcome_transform
209
223
  )
210
224
  else:
211
- # For continuous-only models
225
+ # For continuous-only models, we need to manually construct the covariance module
226
+ # SingleTaskGP doesn't accept cont_kernel_factory, so we create it and set it manually
227
+ from gpytorch.kernels import ScaleKernel
228
+
229
+ # Get the kernel from our factory
230
+ num_dims = train_X.shape[-1]
231
+ base_kernel = cont_kernel_factory(
232
+ batch_shape=torch.Size([]),
233
+ ard_num_dims=num_dims,
234
+ active_dims=list(range(num_dims))
235
+ )
236
+ covar_module = ScaleKernel(base_kernel)
237
+
212
238
  if noise is not None:
213
239
  self.model = SingleTaskGP(
214
240
  train_X=train_X,
215
241
  train_Y=train_Y,
216
242
  train_Yvar=train_Yvar,
243
+ covar_module=covar_module,
217
244
  input_transform=input_transform,
218
245
  outcome_transform=outcome_transform
219
246
  )
@@ -222,6 +249,7 @@ class BoTorchModel(BaseModel):
222
249
  self.model = SingleTaskGP(
223
250
  train_X=train_X,
224
251
  train_Y=train_Y,
252
+ covar_module=covar_module,
225
253
  input_transform=input_transform,
226
254
  outcome_transform=outcome_transform
227
255
  )
@@ -259,10 +287,10 @@ class BoTorchModel(BaseModel):
259
287
 
260
288
  # Convert to tensor - handle both DataFrame and numpy array inputs
261
289
  if isinstance(X_encoded, pd.DataFrame):
262
- test_X = torch.tensor(X_encoded.values, dtype=torch.double)
290
+ test_X = torch.tensor(X_encoded.values, dtype=torch.float64)
263
291
  else:
264
292
  # If X_encoded is already a numpy array
265
- test_X = torch.tensor(X_encoded, dtype=torch.double)
293
+ test_X = torch.tensor(X_encoded, dtype=torch.float64)
266
294
 
267
295
  # Set model to evaluation mode
268
296
  self.model.eval()
@@ -302,10 +330,10 @@ class BoTorchModel(BaseModel):
302
330
 
303
331
  # Convert to tensor
304
332
  if isinstance(X_encoded, pd.DataFrame):
305
- X_tensor = torch.tensor(X_encoded.values, dtype=torch.double)
333
+ X_tensor = torch.tensor(X_encoded.values, dtype=torch.float64)
306
334
  else:
307
335
  # If X_encoded is already a numpy array
308
- X_tensor = torch.tensor(X_encoded, dtype=torch.double)
336
+ X_tensor = torch.tensor(X_encoded, dtype=torch.float64)
309
337
 
310
338
  # Set model to evaluation mode
311
339
  self.model.eval()
@@ -415,20 +443,23 @@ class BoTorchModel(BaseModel):
415
443
  if self.model is None or self.fitted_state_dict is None:
416
444
  self.train(experiment_manager)
417
445
 
446
+ # Get target column name from experiment manager
447
+ target_col = experiment_manager.target_columns[0]
448
+
418
449
  # Get data - handle noise column if present
419
450
  if 'Noise' in exp_df.columns:
420
- X = exp_df.drop(columns=["Output", "Noise"])
451
+ X = exp_df.drop(columns=[target_col, "Noise"])
421
452
  else:
422
- X = exp_df.drop(columns=["Output"])
453
+ X = exp_df.drop(columns=[target_col])
423
454
 
424
- y = exp_df["Output"]
455
+ y = exp_df[target_col]
425
456
 
426
457
  # Encode categorical variables
427
458
  X_encoded = self._encode_categorical_data(X)
428
459
 
429
460
  # Convert to tensors
430
- full_X = torch.tensor(X_encoded.values, dtype=torch.double)
431
- full_Y = torch.tensor(y.values, dtype=torch.double).unsqueeze(-1)
461
+ full_X = torch.tensor(X_encoded.values, dtype=torch.float64)
462
+ full_Y = torch.tensor(y.values, dtype=torch.float64).unsqueeze(-1)
432
463
 
433
464
  # Metrics storage
434
465
  rmse_values = []
@@ -438,11 +469,11 @@ class BoTorchModel(BaseModel):
438
469
  n_obs = []
439
470
 
440
471
  # Calculate total steps for progress
441
- total_steps = len(range(max(cv_splits+1, 5), len(full_X) + 1))
472
+ total_steps = len(range(5, len(full_X) + 1))
442
473
  current_step = 0
443
474
 
444
- # Evaluate on increasing subsets of data
445
- for i in range(max(cv_splits+1, 5), len(full_X) + 1):
475
+ # Evaluate on increasing subsets of data (starting at 5 for minimum CV size)
476
+ for i in range(5, len(full_X) + 1):
446
477
  if debug:
447
478
  logger.info(f"Evaluating with {i} observations")
448
479
 
@@ -459,48 +490,68 @@ class BoTorchModel(BaseModel):
459
490
 
460
491
  # Perform cross-validation for this subset size
461
492
  for train_idx, test_idx in kf.split(subset_np_X):
462
- # Split data
463
- X_train = subset_X[train_idx]
464
- y_train = subset_Y[train_idx]
465
- X_test = subset_X[test_idx]
466
- y_test = subset_Y[test_idx]
467
-
468
- # Create a new model with this fold's training data
469
- # Need to recreate transforms with the same parameters as the main model
470
- fold_input_transform, fold_outcome_transform = self._create_transforms(X_train, y_train)
471
-
472
- cont_kernel_factory = self._get_cont_kernel_factory()
473
- if self.cat_dims and len(self.cat_dims) > 0:
474
- fold_model = MixedSingleTaskGP(
475
- X_train, y_train,
476
- cat_dims=self.cat_dims,
477
- cont_kernel_factory=cont_kernel_factory,
478
- input_transform=fold_input_transform,
479
- outcome_transform=fold_outcome_transform
480
- )
481
- else:
482
- fold_model = SingleTaskGP(
483
- X_train, y_train,
484
- input_transform=fold_input_transform,
485
- outcome_transform=fold_outcome_transform
486
- )
487
-
488
- # Train the fold model from scratch (don't load state_dict to avoid dimension mismatches)
489
- # This is necessary because folds may have different categorical values or data shapes
490
- mll = ExactMarginalLogLikelihood(fold_model.likelihood, fold_model)
491
- fit_gpytorch_mll(mll)
492
-
493
- # Make predictions on test fold
494
- fold_model.eval()
495
- fold_model.likelihood.eval()
496
-
497
- with torch.no_grad():
498
- posterior = fold_model.posterior(X_test)
499
- preds = posterior.mean.squeeze(-1)
493
+ try:
494
+ # Split data
495
+ X_train = subset_X[train_idx]
496
+ y_train = subset_Y[train_idx]
497
+ X_test = subset_X[test_idx]
498
+ y_test = subset_Y[test_idx]
499
+
500
+ # Create a new model with this fold's training data
501
+ # Need to recreate transforms with the same parameters as the main model
502
+ fold_input_transform, fold_outcome_transform = self._create_transforms(X_train, y_train)
503
+
504
+ cont_kernel_factory = self._get_cont_kernel_factory()
505
+ if self.cat_dims and len(self.cat_dims) > 0:
506
+ fold_model = MixedSingleTaskGP(
507
+ X_train, y_train,
508
+ cat_dims=self.cat_dims,
509
+ cont_kernel_factory=cont_kernel_factory,
510
+ input_transform=fold_input_transform,
511
+ outcome_transform=fold_outcome_transform
512
+ )
513
+ else:
514
+ fold_model = SingleTaskGP(
515
+ X_train, y_train,
516
+ input_transform=fold_input_transform,
517
+ outcome_transform=fold_outcome_transform
518
+ )
519
+
520
+ # Train the fold model from scratch (don't load state_dict to avoid dimension mismatches)
521
+ # This is necessary because folds may have different categorical values or data shapes
522
+ mll = ExactMarginalLogLikelihood(fold_model.likelihood, fold_model)
523
+
524
+ # Suppress optimization warnings for small folds where convergence may be difficult
525
+ import warnings
526
+ with warnings.catch_warnings():
527
+ warnings.filterwarnings('ignore', category=OptimizationWarning)
528
+ # Use fit_gpytorch_mll with options that improve convergence for small datasets
529
+ fit_gpytorch_mll(
530
+ mll,
531
+ options={
532
+ "maxiter": 50, # Reduce iterations for speed
533
+ "ftol": 1e-6, # Slightly relaxed tolerance
534
+ "gtol": 1e-5, # Slightly relaxed gradient tolerance
535
+ }
536
+ )
537
+
538
+ # Make predictions on test fold
539
+ fold_model.eval()
540
+ fold_model.likelihood.eval()
500
541
 
501
- # Store this fold's results
502
- fold_y_trues.append(y_test.squeeze(-1))
503
- fold_y_preds.append(preds)
542
+ with torch.no_grad():
543
+ posterior = fold_model.posterior(X_test)
544
+ preds = posterior.mean.squeeze(-1)
545
+
546
+ # Store this fold's results
547
+ fold_y_trues.append(y_test.squeeze(-1))
548
+ fold_y_preds.append(preds)
549
+
550
+ except Exception as e:
551
+ # Skip this fold if optimization fails (can happen with small/difficult training sets)
552
+ if debug:
553
+ logger.warning(f"Skipping fold for subset size {i} due to error: {e}")
554
+ continue
504
555
 
505
556
  # Combine all fold results for this subset size
506
557
  all_y_true = torch.cat(fold_y_trues).cpu().numpy()
@@ -795,14 +846,14 @@ class BoTorchModel(BaseModel):
795
846
  # Convert pandas/numpy data to tensors if needed
796
847
  if isinstance(X, pd.DataFrame):
797
848
  X_encoded = self._encode_categorical_data(X)
798
- X_tensor = torch.tensor(X_encoded.values, dtype=torch.double)
849
+ X_tensor = torch.tensor(X_encoded.values, dtype=torch.float64)
799
850
  elif isinstance(X, np.ndarray):
800
- X_tensor = torch.tensor(X, dtype=torch.double)
851
+ X_tensor = torch.tensor(X, dtype=torch.float64)
801
852
  else:
802
853
  X_tensor = X # Assume it's already a tensor
803
854
 
804
855
  if isinstance(y, pd.Series) or isinstance(y, np.ndarray):
805
- y_tensor = torch.tensor(y, dtype=torch.double).unsqueeze(-1)
856
+ y_tensor = torch.tensor(y, dtype=torch.float64).unsqueeze(-1)
806
857
  else:
807
858
  y_tensor = y # Assume it's already a tensor
808
859
 
@@ -921,4 +972,182 @@ class BoTorchModel(BaseModel):
921
972
  else:
922
973
  logger.info(" ✓ Uncertainty appears well-calibrated")
923
974
 
924
- logger.info(f"{'='*60}\n")
975
+ logger.info(f"{'='*60}\n")
976
+
977
+ def evaluate_acquisition(
978
+ self,
979
+ X: Union[pd.DataFrame, np.ndarray],
980
+ acq_func: str = 'ucb',
981
+ acq_func_kwargs: Optional[dict] = None,
982
+ maximize: bool = True
983
+ ) -> Tuple[np.ndarray, None]:
984
+ """
985
+ Evaluate acquisition function at given points using BoTorch functions.
986
+
987
+ Args:
988
+ X: Points to evaluate (DataFrame or array with shape (n, d))
989
+ acq_func: Acquisition function name
990
+ Analytic: 'ei', 'logei', 'pi', 'logpi', 'ucb'
991
+ Batch: 'qei', 'qucb', 'qnipv'
992
+ acq_func_kwargs: Additional parameters (e.g., {'beta': 0.5, 'mc_samples': 128})
993
+ maximize: Whether we're maximizing (True) or minimizing (False)
994
+
995
+ Returns:
996
+ Tuple of (acq_values, None) - None because acq functions are deterministic
997
+
998
+ Example:
999
+ >>> points = pd.DataFrame({'temp': [300, 350, 400], 'pressure': [1, 2, 3]})
1000
+ >>> acq_vals, _ = model.evaluate_acquisition(points, acq_func='ei', maximize=True)
1001
+ """
1002
+ from botorch.acquisition.analytic import (
1003
+ ExpectedImprovement,
1004
+ LogExpectedImprovement,
1005
+ ProbabilityOfImprovement,
1006
+ LogProbabilityOfImprovement,
1007
+ UpperConfidenceBound,
1008
+ )
1009
+ from botorch.acquisition.monte_carlo import (
1010
+ qExpectedImprovement,
1011
+ qUpperConfidenceBound,
1012
+ )
1013
+ from botorch.acquisition.active_learning import qNegIntegratedPosteriorVariance
1014
+ from botorch.sampling import SobolQMCNormalSampler
1015
+
1016
+ if not self.is_trained:
1017
+ raise ValueError("Model must be trained before evaluating acquisition functions.")
1018
+
1019
+ # Encode categorical variables (same preprocessing as predict())
1020
+ X_encoded = self._encode_categorical_data(X)
1021
+
1022
+ # Convert to torch tensor
1023
+ if isinstance(X_encoded, pd.DataFrame):
1024
+ X_tensor = torch.tensor(X_encoded.values, dtype=torch.float64)
1025
+ else:
1026
+ # If X_encoded is already a numpy array
1027
+ X_tensor = torch.tensor(X_encoded, dtype=torch.float64)
1028
+
1029
+ # Add q=1 dimension if not present (batch_size, d) -> (batch_size, 1, d)
1030
+ if X_tensor.ndim == 2:
1031
+ X_tensor = X_tensor.unsqueeze(-2)
1032
+
1033
+ # Calculate best_f from ORIGINAL (untransformed) training data
1034
+ # When using Standardize transform, best_f must be in original scale
1035
+ if hasattr(self, 'Y_orig') and self.Y_orig is not None:
1036
+ y_train_tensor = self.Y_orig
1037
+ else:
1038
+ # Fallback: use model's train_targets (may be transformed)
1039
+ y_train_tensor = self.model.train_targets
1040
+
1041
+ if maximize:
1042
+ best_f = torch.max(y_train_tensor)
1043
+ else:
1044
+ best_f = torch.min(y_train_tensor)
1045
+
1046
+ # Map acquisition function names
1047
+ acq_func_lower = acq_func.lower()
1048
+
1049
+ # Parse kwargs with defaults
1050
+ if acq_func_kwargs is None:
1051
+ acq_func_kwargs = {}
1052
+
1053
+ beta = acq_func_kwargs.get('beta', 0.5)
1054
+ mc_samples = acq_func_kwargs.get('mc_samples', 128)
1055
+
1056
+ # Determine if this is a batch (q) acquisition function
1057
+ is_batch_acq = acq_func_lower.startswith('q')
1058
+
1059
+ # Create acquisition function
1060
+ try:
1061
+ if acq_func_lower in ['ei', 'expectedimprovement']:
1062
+ acq_fn = ExpectedImprovement(
1063
+ model=self.model,
1064
+ best_f=best_f,
1065
+ maximize=maximize
1066
+ )
1067
+ elif acq_func_lower in ['logei', 'logexpectedimprovement']:
1068
+ acq_fn = LogExpectedImprovement(
1069
+ model=self.model,
1070
+ best_f=best_f,
1071
+ maximize=maximize
1072
+ )
1073
+ elif acq_func_lower in ['pi', 'probabilityofimprovement']:
1074
+ acq_fn = ProbabilityOfImprovement(
1075
+ model=self.model,
1076
+ best_f=best_f,
1077
+ maximize=maximize
1078
+ )
1079
+ elif acq_func_lower in ['logpi', 'logprobabilityofimprovement']:
1080
+ acq_fn = LogProbabilityOfImprovement(
1081
+ model=self.model,
1082
+ best_f=best_f,
1083
+ maximize=maximize
1084
+ )
1085
+ elif acq_func_lower in ['ucb', 'upperconfidencebound']:
1086
+ acq_fn = UpperConfidenceBound(
1087
+ model=self.model,
1088
+ beta=beta,
1089
+ maximize=maximize
1090
+ )
1091
+ elif acq_func_lower in ['qei', 'qexpectedimprovement']:
1092
+ sampler = SobolQMCNormalSampler(sample_shape=torch.Size([mc_samples]))
1093
+ acq_fn = qExpectedImprovement(
1094
+ model=self.model,
1095
+ best_f=best_f,
1096
+ sampler=sampler
1097
+ )
1098
+ elif acq_func_lower in ['qucb', 'qupperconfidencebound']:
1099
+ sampler = SobolQMCNormalSampler(sample_shape=torch.Size([mc_samples]))
1100
+ acq_fn = qUpperConfidenceBound(
1101
+ model=self.model,
1102
+ beta=beta,
1103
+ sampler=sampler
1104
+ )
1105
+ elif acq_func_lower in ['qnipv', 'qnegintegratedposteriorvariance', 'qipv']:
1106
+ # qNIPV requires mc_points for integration over the search space
1107
+ n_mc_points = acq_func_kwargs.get('n_mc_points', 500)
1108
+
1109
+ # Generate MC points uniformly over the input space
1110
+ # Get bounds from the encoded input space
1111
+ if hasattr(self, 'X_train') and self.X_train is not None:
1112
+ # Use training data bounds
1113
+ lower_bounds = self.X_train.min(dim=0)[0]
1114
+ upper_bounds = self.X_train.max(dim=0)[0]
1115
+ else:
1116
+ # Fallback: assume normalized space [0, 1]
1117
+ n_dims = X_tensor.shape[-1]
1118
+ lower_bounds = torch.zeros(n_dims, dtype=torch.float64)
1119
+ upper_bounds = torch.ones(n_dims, dtype=torch.float64)
1120
+
1121
+ # Generate random points
1122
+ mc_points = torch.rand(n_mc_points, len(lower_bounds), dtype=torch.float64)
1123
+ mc_points = mc_points * (upper_bounds - lower_bounds) + lower_bounds
1124
+
1125
+ acq_fn = qNegIntegratedPosteriorVariance(
1126
+ model=self.model,
1127
+ mc_points=mc_points
1128
+ )
1129
+ else:
1130
+ raise ValueError(
1131
+ f"Unknown acquisition function '{acq_func}' for BoTorch backend. "
1132
+ f"Valid options are: 'ei', 'logei', 'pi', 'logpi', 'ucb', 'qei', 'qucb', 'qnipv'"
1133
+ )
1134
+
1135
+ # Evaluate acquisition function
1136
+ with torch.no_grad():
1137
+ if is_batch_acq:
1138
+ # For batch acquisitions, evaluate each point as q=1
1139
+ # X_tensor is already (batch_size, 1, d)
1140
+ acq_values = acq_fn(X_tensor).cpu().numpy()
1141
+ else:
1142
+ # For analytic acquisitions
1143
+ acq_values = acq_fn(X_tensor).cpu().numpy()
1144
+
1145
+ # Ensure output is 1D array
1146
+ if acq_values.ndim > 1:
1147
+ acq_values = acq_values.ravel()
1148
+
1149
+ return acq_values, None
1150
+
1151
+ except Exception as e:
1152
+ logger.error(f"Error evaluating acquisition function: {e}")
1153
+ raise