pymodaq 5.0.5__py3-none-any.whl → 5.1.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pymodaq might be problematic. Click here for more details.
- pymodaq/control_modules/daq_move.py +77 -64
- pymodaq/control_modules/daq_move_ui.py +16 -15
- pymodaq/control_modules/daq_viewer.py +95 -87
- pymodaq/control_modules/daq_viewer_ui.py +22 -23
- pymodaq/control_modules/mocks.py +2 -2
- pymodaq/control_modules/move_utility_classes.py +28 -19
- pymodaq/control_modules/thread_commands.py +138 -0
- pymodaq/control_modules/utils.py +88 -20
- pymodaq/control_modules/viewer_utility_classes.py +8 -17
- pymodaq/dashboard.py +90 -27
- pymodaq/examples/qt_less_standalone_module.py +48 -11
- pymodaq/extensions/__init__.py +7 -3
- pymodaq/extensions/adaptive/__init__.py +2 -0
- pymodaq/extensions/adaptive/adaptive_optimization.py +159 -0
- pymodaq/extensions/adaptive/loss_function/_1d_loss_functions.py +73 -0
- pymodaq/extensions/adaptive/loss_function/_2d_loss_functions.py +86 -0
- pymodaq/extensions/adaptive/loss_function/__init__.py +3 -0
- pymodaq/extensions/adaptive/loss_function/loss_factory.py +106 -0
- pymodaq/extensions/adaptive/utils.py +97 -0
- pymodaq/extensions/bayesian/__init__.py +1 -1
- pymodaq/extensions/bayesian/acquisition/__init__.py +2 -0
- pymodaq/extensions/bayesian/acquisition/acquisition_function_factory.py +71 -0
- pymodaq/extensions/bayesian/acquisition/base_acquisition_function.py +86 -0
- pymodaq/extensions/bayesian/bayesian_optimization.py +121 -0
- pymodaq/extensions/bayesian/utils.py +27 -286
- pymodaq/extensions/daq_logger/daq_logger.py +7 -12
- pymodaq/extensions/daq_logger/h5logging.py +1 -1
- pymodaq/extensions/daq_scan.py +18 -47
- pymodaq/extensions/h5browser.py +3 -34
- pymodaq/extensions/optimizers_base/__init__.py +0 -0
- pymodaq/extensions/{bayesian/bayesian_optimisation.py → optimizers_base/optimizer.py} +441 -334
- pymodaq/extensions/optimizers_base/thread_commands.py +20 -0
- pymodaq/extensions/optimizers_base/utils.py +378 -0
- pymodaq/extensions/pid/pid_controller.py +6 -10
- pymodaq/extensions/utils.py +12 -0
- pymodaq/utils/data.py +1 -0
- pymodaq/utils/gui_utils/loader_utils.py +2 -0
- pymodaq/utils/h5modules/module_saving.py +134 -22
- pymodaq/utils/leco/daq_move_LECODirector.py +73 -73
- pymodaq/utils/leco/daq_xDviewer_LECODirector.py +36 -84
- pymodaq/utils/leco/director_utils.py +25 -10
- pymodaq/utils/leco/leco_director.py +65 -26
- pymodaq/utils/leco/pymodaq_listener.py +118 -68
- pymodaq/utils/leco/utils.py +24 -24
- pymodaq/utils/managers/modules_manager.py +37 -8
- pymodaq/utils/scanner/scanners/_1d_scanners.py +0 -38
- pymodaq/utils/scanner/scanners/_2d_scanners.py +0 -58
- {pymodaq-5.0.5.dist-info → pymodaq-5.1.0a0.dist-info}/METADATA +4 -3
- {pymodaq-5.0.5.dist-info → pymodaq-5.1.0a0.dist-info}/RECORD +52 -38
- {pymodaq-5.0.5.dist-info → pymodaq-5.1.0a0.dist-info}/entry_points.txt +0 -2
- pymodaq/utils/leco/desktop.ini +0 -2
- {pymodaq-5.0.5.dist-info → pymodaq-5.1.0a0.dist-info}/WHEEL +0 -0
- {pymodaq-5.0.5.dist-info → pymodaq-5.1.0a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
from abc import ABCMeta, abstractmethod
|
|
2
|
+
from typing import Callable, Type, Union, Sequence
|
|
3
|
+
from pymodaq_utils.enums import StrEnum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
from adaptive.learner import Learner1D, Learner2D, LearnerND, BaseLearner
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
from pymodaq_utils.logger import set_logger, get_module_name
|
|
10
|
+
|
|
11
|
+
logger = set_logger(get_module_name(__file__))
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LossDim(StrEnum):
|
|
15
|
+
|
|
16
|
+
LOSS_1D = 'Loss1D'
|
|
17
|
+
LOSS_2D = 'Loss2D'
|
|
18
|
+
LOSS_ND = 'LossND'
|
|
19
|
+
|
|
20
|
+
@staticmethod
|
|
21
|
+
def get_enum_from_dim_as_int(dim: int):
|
|
22
|
+
if dim == 1:
|
|
23
|
+
return LossDim.LOSS_1D
|
|
24
|
+
elif dim == 2:
|
|
25
|
+
return LossDim.LOSS_2D
|
|
26
|
+
elif dim > 2:
|
|
27
|
+
return LossDim.LOSS_ND
|
|
28
|
+
else:
|
|
29
|
+
raise ValueError(f'No Loss with dim={dim} is known')
|
|
30
|
+
|
|
31
|
+
def get_learner_from_enum(self, bounds: Sequence[tuple[float, float]],
|
|
32
|
+
loss_function: 'LossFunctionBase') -> Union[Learner1D, Learner2D, LearnerND]:
|
|
33
|
+
""" Return an instance of a Learner given the enum value
|
|
34
|
+
|
|
35
|
+
Parameters
|
|
36
|
+
----------
|
|
37
|
+
bounds: type depends on the learner, could be a tuple of real numbers (Learner1D) or a tuple of tuples of real
|
|
38
|
+
numbers
|
|
39
|
+
loss_function: one of the LossFunction class as given by the LossFunctinoFactory
|
|
40
|
+
|
|
41
|
+
See Also:
|
|
42
|
+
---------
|
|
43
|
+
:class:`LossFunctionFactory`
|
|
44
|
+
"""
|
|
45
|
+
if self == self.LOSS_1D:
|
|
46
|
+
bounds = bounds[0]
|
|
47
|
+
return Learner1D(None, bounds, loss_per_interval=loss_function)
|
|
48
|
+
elif self == self.LOSS_2D:
|
|
49
|
+
return Learner2D(None, bounds, loss_per_triangle=loss_function)
|
|
50
|
+
elif self == self.LOSS_ND:
|
|
51
|
+
return LearnerND(None, bounds, loss_per_simplex=loss_function)
|
|
52
|
+
else:
|
|
53
|
+
raise ValueError(f'No learner for this enum: {self}')
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class LossFunctionBase(metaclass=ABCMeta):
|
|
57
|
+
_loss : Callable
|
|
58
|
+
dim: LossDim
|
|
59
|
+
usual_name: str
|
|
60
|
+
params: list[dict] = []
|
|
61
|
+
|
|
62
|
+
def __call__(self, *args, **kwargs):
|
|
63
|
+
return self._loss(**kwargs)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class LossFunctionFactory:
|
|
67
|
+
_builders = {}
|
|
68
|
+
|
|
69
|
+
@classmethod
|
|
70
|
+
def register(cls) -> Callable:
|
|
71
|
+
""" To be used as a decorator
|
|
72
|
+
|
|
73
|
+
Register in the class registry a new LossFunction class using its 2 identifiers: LossDim and usual_name
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def inner_wrapper(wrapped_class: LossFunctionBase) -> Callable:
|
|
77
|
+
key = wrapped_class.usual_name
|
|
78
|
+
dim = wrapped_class.dim
|
|
79
|
+
if dim not in cls._builders:
|
|
80
|
+
cls._builders[dim] = {}
|
|
81
|
+
if key not in cls._builders[dim]:
|
|
82
|
+
cls._builders[dim][key] = wrapped_class
|
|
83
|
+
else:
|
|
84
|
+
logger.warning(f'The {key} builder is already registered. Replacing it')
|
|
85
|
+
return wrapped_class
|
|
86
|
+
|
|
87
|
+
return inner_wrapper
|
|
88
|
+
|
|
89
|
+
@classmethod
|
|
90
|
+
def get(cls, dim: LossDim, key : str) -> Type[LossFunctionBase]:
|
|
91
|
+
loss = cls._builders.get(dim).get(key)
|
|
92
|
+
if not loss:
|
|
93
|
+
raise ValueError(f'Unknown Loss function with dim={dim} and key={key}')
|
|
94
|
+
return loss
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def create(cls, dim: LossDim, key: str, **kwargs) -> LossFunctionBase:
|
|
98
|
+
return cls.get(dim, key)()(**kwargs)
|
|
99
|
+
|
|
100
|
+
@classmethod
|
|
101
|
+
def dims(cls) -> list[LossDim]:
|
|
102
|
+
return list(cls._builders.keys())
|
|
103
|
+
|
|
104
|
+
@classmethod
|
|
105
|
+
def keys(cls, dim: LossDim) -> list[str]:
|
|
106
|
+
return list(cls._builders.get(dim).keys())
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Created the 31/08/2023
|
|
4
|
+
|
|
5
|
+
@author: Sebastien Weber
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import List, TYPE_CHECKING, Union, Dict, Tuple, Iterable
|
|
9
|
+
|
|
10
|
+
import numpy as np
|
|
11
|
+
from collections import OrderedDict
|
|
12
|
+
from collections.abc import Iterable as IterableClass
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from pymodaq_utils.logger import set_logger, get_module_name
|
|
16
|
+
|
|
17
|
+
from pymodaq_data.data import (DataCalculated, DataRaw, Axis)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
from pymodaq.extensions.optimizers_base.utils import (
|
|
21
|
+
GenericAlgorithm, OptimizerModelDefault, StopType, StoppingParameters,
|
|
22
|
+
OptimizerConfig)
|
|
23
|
+
from pymodaq.extensions.adaptive.loss_function.loss_factory import LossDim, LossFunctionBase, LossFunctionFactory
|
|
24
|
+
|
|
25
|
+
logger = set_logger(get_module_name(__file__))
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class AdaptiveConfig(OptimizerConfig):
|
|
29
|
+
config_name = f"adaptive_settings"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class AdaptiveAlgorithm(GenericAlgorithm):
|
|
33
|
+
|
|
34
|
+
def __init__(self, ini_random: int, bounds: list[tuple[float, float]],
|
|
35
|
+
loss_type: LossDim, kind: str, **kwargs):
|
|
36
|
+
super().__init__(ini_random)
|
|
37
|
+
self._algo = loss_type.get_learner_from_enum(
|
|
38
|
+
bounds=bounds,
|
|
39
|
+
loss_function=LossFunctionFactory.create(loss_type, kind, **kwargs))
|
|
40
|
+
self._best = 1
|
|
41
|
+
|
|
42
|
+
def set_prediction_function(self, loss_type=LossDim.LOSS_1D, kind='', **kwargs):
|
|
43
|
+
self._prediction = LossFunctionFactory.create(loss_type, kind, **kwargs)
|
|
44
|
+
|
|
45
|
+
def update_prediction_function(self):
|
|
46
|
+
pass
|
|
47
|
+
|
|
48
|
+
@property
|
|
49
|
+
def tradeoff(self) -> float:
|
|
50
|
+
return 0.
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def bounds(self) -> List[np.ndarray]:
|
|
54
|
+
return [np.array(bound) if isinstance(bound, IterableClass) else np.array([bound]) for bound in self._algo.bounds]
|
|
55
|
+
|
|
56
|
+
@bounds.setter
|
|
57
|
+
def bounds(self, bounds: Union[Tuple[float, float], Iterable[np.ndarray]]):
|
|
58
|
+
#todo check the type
|
|
59
|
+
self._algo.bounds = bounds
|
|
60
|
+
|
|
61
|
+
def prediction_ask(self) -> np.ndarray:
|
|
62
|
+
""" Ask the prediction function or algo to provide the next point to probe"""
|
|
63
|
+
return np.atleast_1d(self._algo.ask(1)[0][0])
|
|
64
|
+
|
|
65
|
+
def tell(self, function_value: float):
|
|
66
|
+
next_point = tuple(self._next_point)
|
|
67
|
+
if len(next_point) == 1:
|
|
68
|
+
next_point = next_point[0] #Learner don't have the same tell method signature
|
|
69
|
+
self._algo.tell(next_point, function_value)
|
|
70
|
+
|
|
71
|
+
@property
|
|
72
|
+
def best_fitness(self) -> float:
|
|
73
|
+
""" For adaptive optimization this is only used as a stopping critter"""
|
|
74
|
+
if 1/self._algo.loss() > self._best:
|
|
75
|
+
self._best = 1/self._algo.loss()
|
|
76
|
+
return self._best
|
|
77
|
+
|
|
78
|
+
@property
|
|
79
|
+
def best_individual(self) -> Union[np.ndarray, None]:
|
|
80
|
+
""" For adaptive optimization this doesn't mean anything"""
|
|
81
|
+
return np.atleast_1d(self.bounds[0])
|
|
82
|
+
|
|
83
|
+
def stopping(self, ind_iter: int, stopping_parameters: StoppingParameters):
|
|
84
|
+
if ind_iter >= stopping_parameters.niter:
|
|
85
|
+
return True
|
|
86
|
+
if ind_iter > stopping_parameters.npoints and stopping_parameters.stop_type == 'Predict':
|
|
87
|
+
try:
|
|
88
|
+
return self.best_fitness < stopping_parameters.tolerance
|
|
89
|
+
except IndexError:
|
|
90
|
+
return False
|
|
91
|
+
return False
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
|
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
from . import
|
|
1
|
+
from . import bayesian_optimization
|
|
2
2
|
from . import utils
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
from abc import ABCMeta, abstractmethod
|
|
2
|
+
from typing import Callable
|
|
3
|
+
|
|
4
|
+
from numpy.random import RandomState
|
|
5
|
+
from bayes_opt.acquisition import AcquisitionFunction
|
|
6
|
+
|
|
7
|
+
from pymodaq_gui.managers.parameter_manager import ParameterManager
|
|
8
|
+
from pymodaq_utils.logger import set_logger, get_module_name
|
|
9
|
+
|
|
10
|
+
logger = set_logger(get_module_name(__file__))
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class GenericAcquisitionFunctionBase(metaclass=ABCMeta):
|
|
14
|
+
_function : AcquisitionFunction
|
|
15
|
+
_usual_name : str
|
|
16
|
+
params : property(abstractmethod)
|
|
17
|
+
|
|
18
|
+
def base_acq(self, mean, std):
|
|
19
|
+
return self._function.base_acq(mean, std)
|
|
20
|
+
|
|
21
|
+
def decay_exploration(self):
|
|
22
|
+
self._function.decay_exploration()
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def tradeoff(self):
|
|
26
|
+
raise NotImplemented
|
|
27
|
+
|
|
28
|
+
@tradeoff.setter
|
|
29
|
+
def tradeoff(self, tradeoff):
|
|
30
|
+
raise NotImplemented
|
|
31
|
+
|
|
32
|
+
def suggest(self, gaussian_process, target_space, n_random = 1000, n_l_bfgs_b = 10, fit_gp = True):
|
|
33
|
+
return self._function.suggest(gaussian_process, target_space, n_random, n_l_bfgs_b, fit_gp)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class GenericAcquisitionFunctionFactory:
|
|
37
|
+
_builders = {}
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def register(cls) -> Callable:
|
|
41
|
+
""" To be used as a decorator
|
|
42
|
+
|
|
43
|
+
Register in the class registry a new scanner class using its 2 identifiers: scan_type and scan_sub_type
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def inner_wrapper(wrapped_class: GenericAcquisitionFunctionBase) -> Callable:
|
|
47
|
+
key = wrapped_class._usual_name
|
|
48
|
+
|
|
49
|
+
if key not in cls._builders:
|
|
50
|
+
cls._builders[key] = wrapped_class
|
|
51
|
+
else:
|
|
52
|
+
logger.warning(f'The {key} builder is already registered. Replacing it')
|
|
53
|
+
return wrapped_class
|
|
54
|
+
|
|
55
|
+
return inner_wrapper
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@classmethod
|
|
59
|
+
def get(cls, key : str) -> GenericAcquisitionFunctionBase:
|
|
60
|
+
builder = cls._builders.get(key)
|
|
61
|
+
if not builder:
|
|
62
|
+
raise ValueError(key)
|
|
63
|
+
return builder
|
|
64
|
+
|
|
65
|
+
@classmethod
|
|
66
|
+
def create(cls, key, **kwargs) -> GenericAcquisitionFunctionBase:
|
|
67
|
+
return cls.get(key)(**kwargs)
|
|
68
|
+
|
|
69
|
+
@classmethod
|
|
70
|
+
def keys(cls) -> list[str]:
|
|
71
|
+
return list(cls._builders.keys())
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from bayes_opt.acquisition import UpperConfidenceBound, ExpectedImprovement, ProbabilityOfImprovement
|
|
2
|
+
from pymodaq.extensions.bayesian.acquisition import GenericAcquisitionFunctionFactory, GenericAcquisitionFunctionBase
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@GenericAcquisitionFunctionFactory.register()
|
|
6
|
+
class GenericUpperConfidenceBound(GenericAcquisitionFunctionBase):
|
|
7
|
+
_usual_name = "Upper Confidence Bound"
|
|
8
|
+
params = [
|
|
9
|
+
{'title': 'Kappa:', 'name': 'kappa', 'type': 'slide', 'value': 2.576,
|
|
10
|
+
'min': 0.001, 'max': 100, 'subtype': 'log',
|
|
11
|
+
'tip': 'Parameter to indicate how closed are the next parameters sampled.'
|
|
12
|
+
'Higher value = favors spaces that are least explored.'
|
|
13
|
+
'Lower value = favors spaces where the regression function is the '
|
|
14
|
+
'highest.'},
|
|
15
|
+
{'title': 'Kappa actual:', 'name': 'tradeoff_actual', 'type': 'float', 'value': 2.576,
|
|
16
|
+
'tip': 'Current value of the kappa parameter', 'readonly': True},
|
|
17
|
+
{'title': 'Exploration decay:', 'name': 'exploration_decay', 'type': 'float', 'value': 0.9,
|
|
18
|
+
'tip': 'kappa is multiplied by this factor every iteration.'},
|
|
19
|
+
{'title': 'Exploration decay delay:', 'name': 'exploration_decay_delay', 'type': 'int', 'value': 20,
|
|
20
|
+
'tip': 'Number of iterations that must have passed before applying the decay to kappa.'}
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def __init__(self, **kwargs):
|
|
25
|
+
super().__init__()
|
|
26
|
+
self._function = UpperConfidenceBound(**kwargs)
|
|
27
|
+
|
|
28
|
+
@property
|
|
29
|
+
def tradeoff(self):
|
|
30
|
+
return self._function.kappa
|
|
31
|
+
|
|
32
|
+
@tradeoff.setter
|
|
33
|
+
def tradeoff(self, tradeoff):
|
|
34
|
+
self._function.kappa = tradeoff
|
|
35
|
+
|
|
36
|
+
@GenericAcquisitionFunctionFactory.register()
|
|
37
|
+
class GenericProbabilityOfImprovement(GenericAcquisitionFunctionBase):
|
|
38
|
+
_usual_name = "Probability of Improvement"
|
|
39
|
+
params = [
|
|
40
|
+
{'title': 'Xi:', 'name': 'xi', 'type': 'slide', 'value': 0,
|
|
41
|
+
'tip': 'Governs the exploration/exploitation tradeoff.'
|
|
42
|
+
'Lower prefers exploitation, higher prefers exploration.'},
|
|
43
|
+
{'title': 'Xi actual:', 'name': 'tradeoff_actual', 'type': 'float', 'value': 2.576,
|
|
44
|
+
'tip': 'Current value of the xi parameter', 'readonly': True},
|
|
45
|
+
{'title': 'Exploration decay:', 'name': 'exploration_decay', 'type': 'float', 'value': 0.9,
|
|
46
|
+
'tip': 'Xi is multiplied by this factor every iteration.'},
|
|
47
|
+
{'title': 'Exploration decay delay:', 'name': 'exploration_decay_delay', 'type': 'int', 'value': 20,
|
|
48
|
+
'tip': 'Number of iterations that must have passed before applying the decay to xi.'}
|
|
49
|
+
]
|
|
50
|
+
def __init__(self, **kwargs):
|
|
51
|
+
super().__init__()
|
|
52
|
+
self._function = ProbabilityOfImprovement(**kwargs)
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def tradeoff(self):
|
|
56
|
+
return self._function.xi
|
|
57
|
+
|
|
58
|
+
@tradeoff.setter
|
|
59
|
+
def tradeoff(self, tradeoff):
|
|
60
|
+
self._function.xi = tradeoff
|
|
61
|
+
|
|
62
|
+
@GenericAcquisitionFunctionFactory.register()
|
|
63
|
+
class GenericExpectedImprovement(GenericAcquisitionFunctionBase):
|
|
64
|
+
_usual_name = "Expected Improvement"
|
|
65
|
+
params = [
|
|
66
|
+
{'title': 'Xi:', 'name': 'xi', 'type': 'slide', 'value': 0,
|
|
67
|
+
'tip': 'Governs the exploration/exploitation tradeoff.'
|
|
68
|
+
'Lower prefers exploitation, higher prefers exploration.'},
|
|
69
|
+
{'title': 'Xi actual:', 'name': 'tradeoff_actual', 'type': 'float', 'value': 2.576,
|
|
70
|
+
'tip': 'Current value of the xi parameter', 'readonly': True},
|
|
71
|
+
{'title': 'Exploration decay:', 'name': 'exploration_decay', 'type': 'float', 'value': 0.9,
|
|
72
|
+
'tip': 'Xi is multiplied by this factor every iteration.'},
|
|
73
|
+
{'title': 'Exploration decay delay:', 'name': 'exploration_decay_delay', 'type': 'int', 'value': 20,
|
|
74
|
+
'tip': 'Number of iterations that must have passed before applying the decay to xi.'}
|
|
75
|
+
]
|
|
76
|
+
def __init__(self, **kwargs):
|
|
77
|
+
super().__init__()
|
|
78
|
+
self._function = ExpectedImprovement(**kwargs)
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def tradeoff(self):
|
|
82
|
+
return self._function.xi
|
|
83
|
+
|
|
84
|
+
@tradeoff.setter
|
|
85
|
+
def tradeoff(self, tradeoff):
|
|
86
|
+
self._function.xi = tradeoff
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
from pymodaq_utils import config as config_mod
|
|
4
|
+
from pymodaq_utils.logger import set_logger, get_module_name
|
|
5
|
+
from pymodaq_utils.utils import ThreadCommand
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
from pymodaq.extensions.bayesian.utils import BayesianAlgorithm, BayesianConfig
|
|
9
|
+
|
|
10
|
+
from pymodaq.extensions.bayesian.acquisition import GenericAcquisitionFunctionFactory
|
|
11
|
+
|
|
12
|
+
from pymodaq.extensions.optimizers_base.optimizer import (
|
|
13
|
+
GenericOptimization, OptimizationRunner, optimizer_params)
|
|
14
|
+
from pymodaq.extensions.optimizers_base.utils import OptimizerModelDefault, find_key_in_nested_dict
|
|
15
|
+
from pymodaq.extensions.optimizers_base.thread_commands import OptimizerToRunner
|
|
16
|
+
|
|
17
|
+
logger = set_logger(get_module_name(__file__))
|
|
18
|
+
config = config_mod.Config()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
EXTENSION_NAME = 'BayesianOptimization'
|
|
22
|
+
CLASS_NAME = 'BayesianOptimization'
|
|
23
|
+
|
|
24
|
+
PREDICTION_NAMES = list(GenericAcquisitionFunctionFactory.keys())
|
|
25
|
+
PREDICTION_PARAMS = [{'title': 'Kind', 'name': 'kind', 'type': 'list',
|
|
26
|
+
'value': PREDICTION_NAMES[0],
|
|
27
|
+
'limits': PREDICTION_NAMES}
|
|
28
|
+
] + GenericAcquisitionFunctionFactory.get(
|
|
29
|
+
PREDICTION_NAMES[0]).params
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class BayesianOptimizationRunner(OptimizationRunner):
|
|
33
|
+
|
|
34
|
+
def __init__(self, *args, **kwargs):
|
|
35
|
+
super().__init__(*args, **kwargs)
|
|
36
|
+
|
|
37
|
+
def queue_command(self, command: ThreadCommand):
|
|
38
|
+
"""
|
|
39
|
+
"""
|
|
40
|
+
if command.command == OptimizerToRunner.PREDICTION:
|
|
41
|
+
utility_params = {k: v for k, v in command.attribute.items() if k != "kind" and k != "tradeoff_actual"}
|
|
42
|
+
self.optimization_algorithm.set_acquisition_function(
|
|
43
|
+
command.attribute['kind'],
|
|
44
|
+
**utility_params)
|
|
45
|
+
else:
|
|
46
|
+
super().queue_command(command)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class BayesianOptimization(GenericOptimization):
|
|
50
|
+
""" PyMoDAQ extension of the DashBoard to perform the optimization of a target signal
|
|
51
|
+
taken form the detectors as a function of one or more parameters controlled by the actuators.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
runner = BayesianOptimizationRunner
|
|
55
|
+
params = optimizer_params(PREDICTION_PARAMS)
|
|
56
|
+
config_saver = BayesianConfig
|
|
57
|
+
|
|
58
|
+
def ini_custom_attributes(self):
|
|
59
|
+
""" Here you can reimplement specific attributes"""
|
|
60
|
+
self._base_name: str = 'Bayesian'
|
|
61
|
+
|
|
62
|
+
def update_after_actuators_changed(self, actuators: list[str]):
|
|
63
|
+
""" Actions to do after the actuators have been updated
|
|
64
|
+
"""
|
|
65
|
+
pass
|
|
66
|
+
|
|
67
|
+
def validate_config(self) -> bool:
|
|
68
|
+
utility = find_key_in_nested_dict(self.optimizer_config.to_dict(), 'prediction')
|
|
69
|
+
if utility:
|
|
70
|
+
try:
|
|
71
|
+
utility_params = { k : v for k, v in utility.items() \
|
|
72
|
+
if k != "kind" and k != "tradeoff_actual" }
|
|
73
|
+
GenericAcquisitionFunctionFactory.create(utility['kind'], **utility_params)
|
|
74
|
+
except ValueError:
|
|
75
|
+
return False
|
|
76
|
+
|
|
77
|
+
return True
|
|
78
|
+
|
|
79
|
+
def value_changed(self, param):
|
|
80
|
+
""" to be subclassed for actions to perform when one of the param's value in self.settings is changed
|
|
81
|
+
|
|
82
|
+
For instance:
|
|
83
|
+
if param.name() == 'do_something':
|
|
84
|
+
if param.value():
|
|
85
|
+
print('Do something')
|
|
86
|
+
self.settings.child('main_settings', 'something_done').setValue(False)
|
|
87
|
+
|
|
88
|
+
Parameters
|
|
89
|
+
----------
|
|
90
|
+
param: (Parameter) the parameter whose value just changed
|
|
91
|
+
"""
|
|
92
|
+
super().value_changed(param)
|
|
93
|
+
if param.name() == 'kind':
|
|
94
|
+
utility_settings = self.settings.child('main_settings', 'prediction')
|
|
95
|
+
old_children = utility_settings.children()[1:]
|
|
96
|
+
for child in old_children:
|
|
97
|
+
utility_settings.removeChild(child)
|
|
98
|
+
utility_settings.addChildren(GenericAcquisitionFunctionFactory.get(param.value()).params)
|
|
99
|
+
|
|
100
|
+
def set_algorithm(self):
|
|
101
|
+
self.algorithm = BayesianAlgorithm(
|
|
102
|
+
ini_random=self.settings['main_settings', 'ini_random'],
|
|
103
|
+
bounds=self.format_bounds())
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def main():
|
|
107
|
+
from pymodaq_gui.utils.utils import mkQApp
|
|
108
|
+
from pymodaq.utils.gui_utils.loader_utils import load_dashboard_with_preset
|
|
109
|
+
|
|
110
|
+
app = mkQApp('Bayesian Optimiser')
|
|
111
|
+
preset_file_name = config('presets', f'default_preset_for_scan')
|
|
112
|
+
|
|
113
|
+
dashboard, extension, win = load_dashboard_with_preset(preset_file_name, 'Bayesian')
|
|
114
|
+
|
|
115
|
+
app.exec()
|
|
116
|
+
|
|
117
|
+
return dashboard, extension, win
|
|
118
|
+
|
|
119
|
+
if __name__ == '__main__':
|
|
120
|
+
main()
|
|
121
|
+
|