ChessAnalysisPipeline 0.0.17.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHAP/TaskManager.py +216 -0
- CHAP/__init__.py +27 -0
- CHAP/common/__init__.py +57 -0
- CHAP/common/models/__init__.py +8 -0
- CHAP/common/models/common.py +124 -0
- CHAP/common/models/integration.py +659 -0
- CHAP/common/models/map.py +1291 -0
- CHAP/common/processor.py +2869 -0
- CHAP/common/reader.py +658 -0
- CHAP/common/utils.py +110 -0
- CHAP/common/writer.py +730 -0
- CHAP/edd/__init__.py +23 -0
- CHAP/edd/models.py +876 -0
- CHAP/edd/processor.py +3069 -0
- CHAP/edd/reader.py +1023 -0
- CHAP/edd/select_material_params_gui.py +348 -0
- CHAP/edd/utils.py +1572 -0
- CHAP/edd/writer.py +26 -0
- CHAP/foxden/__init__.py +19 -0
- CHAP/foxden/models.py +71 -0
- CHAP/foxden/processor.py +124 -0
- CHAP/foxden/reader.py +224 -0
- CHAP/foxden/utils.py +80 -0
- CHAP/foxden/writer.py +168 -0
- CHAP/giwaxs/__init__.py +11 -0
- CHAP/giwaxs/models.py +491 -0
- CHAP/giwaxs/processor.py +776 -0
- CHAP/giwaxs/reader.py +8 -0
- CHAP/giwaxs/writer.py +8 -0
- CHAP/inference/__init__.py +7 -0
- CHAP/inference/processor.py +69 -0
- CHAP/inference/reader.py +8 -0
- CHAP/inference/writer.py +8 -0
- CHAP/models.py +227 -0
- CHAP/pipeline.py +479 -0
- CHAP/processor.py +125 -0
- CHAP/reader.py +124 -0
- CHAP/runner.py +277 -0
- CHAP/saxswaxs/__init__.py +7 -0
- CHAP/saxswaxs/processor.py +8 -0
- CHAP/saxswaxs/reader.py +8 -0
- CHAP/saxswaxs/writer.py +8 -0
- CHAP/server.py +125 -0
- CHAP/sin2psi/__init__.py +7 -0
- CHAP/sin2psi/processor.py +8 -0
- CHAP/sin2psi/reader.py +8 -0
- CHAP/sin2psi/writer.py +8 -0
- CHAP/tomo/__init__.py +15 -0
- CHAP/tomo/models.py +210 -0
- CHAP/tomo/processor.py +3862 -0
- CHAP/tomo/reader.py +9 -0
- CHAP/tomo/writer.py +59 -0
- CHAP/utils/__init__.py +6 -0
- CHAP/utils/converters.py +188 -0
- CHAP/utils/fit.py +2947 -0
- CHAP/utils/general.py +2655 -0
- CHAP/utils/material.py +274 -0
- CHAP/utils/models.py +595 -0
- CHAP/utils/parfile.py +224 -0
- CHAP/writer.py +122 -0
- MLaaS/__init__.py +0 -0
- MLaaS/ktrain.py +205 -0
- MLaaS/mnist_img.py +83 -0
- MLaaS/tfaas_client.py +371 -0
- chessanalysispipeline-0.0.17.dev3.dist-info/LICENSE +60 -0
- chessanalysispipeline-0.0.17.dev3.dist-info/METADATA +29 -0
- chessanalysispipeline-0.0.17.dev3.dist-info/RECORD +70 -0
- chessanalysispipeline-0.0.17.dev3.dist-info/WHEEL +5 -0
- chessanalysispipeline-0.0.17.dev3.dist-info/entry_points.txt +2 -0
- chessanalysispipeline-0.0.17.dev3.dist-info/top_level.txt +2 -0
CHAP/utils/fit.py
ADDED
|
@@ -0,0 +1,2947 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
#-*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
File : fit.py
|
|
5
|
+
Author : Rolf Verberg <rolfverberg AT gmail dot com>
|
|
6
|
+
Description: General curve fitting module
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
# System modules
|
|
10
|
+
from collections import Counter
|
|
11
|
+
from copy import deepcopy
|
|
12
|
+
from os import (
|
|
13
|
+
cpu_count,
|
|
14
|
+
mkdir,
|
|
15
|
+
path,
|
|
16
|
+
)
|
|
17
|
+
from re import sub
|
|
18
|
+
from shutil import rmtree
|
|
19
|
+
from sys import float_info
|
|
20
|
+
#from time import time
|
|
21
|
+
|
|
22
|
+
# Third party modules
|
|
23
|
+
try:
|
|
24
|
+
from joblib import (
|
|
25
|
+
Parallel,
|
|
26
|
+
delayed,
|
|
27
|
+
)
|
|
28
|
+
HAVE_JOBLIB = True
|
|
29
|
+
except ImportError:
|
|
30
|
+
HAVE_JOBLIB = False
|
|
31
|
+
from nexusformat.nexus import NXdata
|
|
32
|
+
import numpy as np
|
|
33
|
+
|
|
34
|
+
# Local modules
|
|
35
|
+
from CHAP.processor import Processor
|
|
36
|
+
from CHAP.utils.general import (
|
|
37
|
+
# is_int,
|
|
38
|
+
is_index,
|
|
39
|
+
index_nearest,
|
|
40
|
+
quick_plot,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
FLOAT_MIN = float_info.min
|
|
44
|
+
FLOAT_MAX = float_info.max
|
|
45
|
+
FLOAT_EPS = float_info.epsilon
|
|
46
|
+
|
|
47
|
+
# sigma = fwhm_factor*fwhm
|
|
48
|
+
fwhm_factor = {
|
|
49
|
+
'gaussian': 'fwhm/(2*sqrt(2*log(2)))',
|
|
50
|
+
'lorentzian': '0.5*fwhm',
|
|
51
|
+
'splitlorentzian': '0.5*fwhm', # sigma = sigma_r
|
|
52
|
+
'voight': '0.2776*fwhm', # sigma = gamma
|
|
53
|
+
'pseudovoight': '0.5*fwhm', # fraction = 0.5
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
# amplitude = height_factor*height*fwhm
|
|
57
|
+
height_factor = {
|
|
58
|
+
'gaussian': 'height*fwhm*0.5*sqrt(pi/log(2))',
|
|
59
|
+
'lorentzian': 'height*fwhm*0.5*pi',
|
|
60
|
+
'splitlorentzian': 'height*fwhm*0.5*pi', # sigma = sigma_r
|
|
61
|
+
'voight': '3.334*height*fwhm', # sigma = gamma
|
|
62
|
+
'pseudovoight': '1.268*height*fwhm', # fraction = 0.5
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class FitProcessor(Processor):
|
|
67
|
+
"""A processor to perform a fit on a data set or data map. """
|
|
68
|
+
def process(self, data, config=None):
|
|
69
|
+
"""Fit the data and return a CHAP.utils.fit.Fit or
|
|
70
|
+
CHAP.utils.fit.FitMap object depending on the dimensionality
|
|
71
|
+
of the input data. The input data should contain a NeXus NXdata
|
|
72
|
+
object, with properly defined signal and axis.
|
|
73
|
+
|
|
74
|
+
:param data: Input data containing the
|
|
75
|
+
nexusformat.nexus.NXdata object to fit.
|
|
76
|
+
:type data: list[PipelineData]
|
|
77
|
+
:param config: Fit configuration.
|
|
78
|
+
:type config: dict, optional
|
|
79
|
+
:raises ValueError: Invalid input or configuration parameter.
|
|
80
|
+
:return: The fitted data object.
|
|
81
|
+
:rtype: Union[CHAP.utils.fit.Fit, CHAP.utils.fit.FitMap]
|
|
82
|
+
"""
|
|
83
|
+
# Local modules
|
|
84
|
+
from CHAP.utils.models import (
|
|
85
|
+
FitConfig,
|
|
86
|
+
Multipeak,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Unwrap the PipelineData if called as a Pipeline Processor
|
|
90
|
+
if (not isinstance(data, (Fit, FitMap))
|
|
91
|
+
and not isinstance(data, NXdata)):
|
|
92
|
+
data = self.unwrap_pipelinedata(data)[0]
|
|
93
|
+
|
|
94
|
+
if isinstance(data, (Fit, FitMap)):
|
|
95
|
+
|
|
96
|
+
# Refit/continue the fit with possibly updated parameters
|
|
97
|
+
fit = data
|
|
98
|
+
fit_config = None
|
|
99
|
+
if config is not None:
|
|
100
|
+
try:
|
|
101
|
+
fit_config = FitConfig(**config)
|
|
102
|
+
except Exception as exc:
|
|
103
|
+
raise RuntimeError from exc
|
|
104
|
+
|
|
105
|
+
if isinstance(data, FitMap):
|
|
106
|
+
fit.fit(config=fit_config)
|
|
107
|
+
else:
|
|
108
|
+
fit.fit(config=fit_config)
|
|
109
|
+
if fit_config is not None:
|
|
110
|
+
if fit_config.print_report:
|
|
111
|
+
fit.print_fit_report()
|
|
112
|
+
if fit_config.plot:
|
|
113
|
+
fit.plot(skip_init=True)
|
|
114
|
+
|
|
115
|
+
else:
|
|
116
|
+
|
|
117
|
+
# Get the default NXdata object
|
|
118
|
+
try:
|
|
119
|
+
nxdata = data.get_default()
|
|
120
|
+
assert nxdata is not None
|
|
121
|
+
except Exception as exc:
|
|
122
|
+
if nxdata is None or nxdata.nxclass != 'NXdata':
|
|
123
|
+
raise ValueError(
|
|
124
|
+
'Invalid default pathway to an NXdata '
|
|
125
|
+
f'object in ({data})') from exc
|
|
126
|
+
|
|
127
|
+
# Get the validated fit configuration
|
|
128
|
+
fit_config = self.get_config(
|
|
129
|
+
data=data, config=config, schema='utils.models.FitConfig')
|
|
130
|
+
|
|
131
|
+
# Expand multipeak model if present
|
|
132
|
+
found_multipeak = False
|
|
133
|
+
for i, model in enumerate(deepcopy(fit_config.models)):
|
|
134
|
+
if isinstance(model, Multipeak):
|
|
135
|
+
if found_multipeak:
|
|
136
|
+
raise ValueError(
|
|
137
|
+
f'Invalid parameter models ({fit_config.models}) '
|
|
138
|
+
'(multiple instances of multipeak not allowed)')
|
|
139
|
+
parameters, models = self.create_multipeak_model(model)
|
|
140
|
+
if parameters:
|
|
141
|
+
fit_config.parameters += parameters
|
|
142
|
+
fit_config.models += models
|
|
143
|
+
fit_config.models.pop(i)
|
|
144
|
+
found_multipeak = True
|
|
145
|
+
|
|
146
|
+
# Instantiate the Fit or FitMap object and fit the data
|
|
147
|
+
if np.squeeze(nxdata.nxsignal).ndim == 1:
|
|
148
|
+
fit = Fit(nxdata, fit_config, self.logger)
|
|
149
|
+
fit.fit()
|
|
150
|
+
if fit_config.print_report:
|
|
151
|
+
fit.print_fit_report()
|
|
152
|
+
if fit_config.plot:
|
|
153
|
+
fit.plot(skip_init=True)
|
|
154
|
+
else:
|
|
155
|
+
fit = FitMap(nxdata, fit_config, self.logger)
|
|
156
|
+
fit.fit(
|
|
157
|
+
rel_height_cutoff=fit_config.rel_height_cutoff,
|
|
158
|
+
num_proc=fit_config.num_proc, plot=fit_config.plot,
|
|
159
|
+
print_report=fit_config.print_report)
|
|
160
|
+
|
|
161
|
+
return fit
|
|
162
|
+
|
|
163
|
+
@staticmethod
|
|
164
|
+
def create_multipeak_model(model_config):
|
|
165
|
+
"""Create a multipeak model."""
|
|
166
|
+
# Local modules
|
|
167
|
+
from CHAP.utils.models import (
|
|
168
|
+
FitParameter,
|
|
169
|
+
Gaussian,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
parameters = []
|
|
173
|
+
models = []
|
|
174
|
+
num_peak = len(model_config.centers)
|
|
175
|
+
if num_peak == 1 and model_config.fit_type == 'uniform':
|
|
176
|
+
model_config.fit_type = 'unconstrained'
|
|
177
|
+
|
|
178
|
+
sig_min = FLOAT_MIN
|
|
179
|
+
sig_max = np.inf
|
|
180
|
+
if (model_config.fwhm_min is not None
|
|
181
|
+
or model_config.fwhm_max is not None):
|
|
182
|
+
# Third party modules
|
|
183
|
+
from asteval import Interpreter
|
|
184
|
+
ast = Interpreter()
|
|
185
|
+
|
|
186
|
+
if model_config.fwhm_min is not None:
|
|
187
|
+
ast(f'fwhm = {model_config.fwhm_min}')
|
|
188
|
+
sig_min = ast(fwhm_factor[model_config.peak_models])
|
|
189
|
+
if model_config.fwhm_max is not None:
|
|
190
|
+
ast(f'fwhm = {model_config.fwhm_max}')
|
|
191
|
+
sig_max = ast(fwhm_factor[model_config.peak_models])
|
|
192
|
+
|
|
193
|
+
prefix = ''
|
|
194
|
+
if model_config.fit_type == 'uniform':
|
|
195
|
+
parameters.append(FitParameter(
|
|
196
|
+
name='scale_factor', value=1.0, min=FLOAT_MIN))
|
|
197
|
+
for i, cen in enumerate(model_config.centers):
|
|
198
|
+
if num_peak > 1:
|
|
199
|
+
prefix = f'peak{i+1}_'
|
|
200
|
+
models.append(Gaussian(
|
|
201
|
+
model='gaussian',
|
|
202
|
+
prefix=prefix,
|
|
203
|
+
parameters=[
|
|
204
|
+
{'name': 'amplitude', 'min': FLOAT_MIN},
|
|
205
|
+
{'name': 'center', 'expr': f'scale_factor*{cen}'},
|
|
206
|
+
{'name': 'sigma', 'min': sig_min, 'max': sig_max}]))
|
|
207
|
+
else:
|
|
208
|
+
for i, cen in enumerate(model_config.centers):
|
|
209
|
+
if num_peak > 1:
|
|
210
|
+
prefix = f'peak{i+1}_'
|
|
211
|
+
if model_config.centers_range == 0:
|
|
212
|
+
models.append(Gaussian(
|
|
213
|
+
model='gaussian',
|
|
214
|
+
prefix=prefix,
|
|
215
|
+
parameters=[
|
|
216
|
+
{'name': 'amplitude', 'min': FLOAT_MIN},
|
|
217
|
+
{'name': 'center', 'value': cen, 'vary': False},
|
|
218
|
+
{'name': 'sigma', 'min': sig_min, 'max': sig_max}
|
|
219
|
+
]))
|
|
220
|
+
else:
|
|
221
|
+
if model_config.centers_range is None:
|
|
222
|
+
cen_min = None
|
|
223
|
+
cen_max = None
|
|
224
|
+
else:
|
|
225
|
+
cen_min = cen - model_config.centers_range
|
|
226
|
+
cen_max = cen + model_config.centers_range
|
|
227
|
+
models.append(Gaussian(
|
|
228
|
+
model='gaussian',
|
|
229
|
+
prefix=prefix,
|
|
230
|
+
parameters=[
|
|
231
|
+
{'name': 'amplitude', 'min': FLOAT_MIN},
|
|
232
|
+
{'name': 'center', 'value': cen, 'min': cen_min,
|
|
233
|
+
'max': cen_max},
|
|
234
|
+
{'name': 'sigma', 'min': sig_min, 'max': sig_max}
|
|
235
|
+
]))
|
|
236
|
+
|
|
237
|
+
return parameters, models
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
class Component():
|
|
241
|
+
"""A model fit component."""
|
|
242
|
+
def __init__(self, model, prefix=''):
|
|
243
|
+
# Local modules
|
|
244
|
+
from CHAP.utils.models import models
|
|
245
|
+
|
|
246
|
+
self.func = models[model.model]
|
|
247
|
+
self.param_names = [f'{prefix}{par.name}' for par in model.parameters]
|
|
248
|
+
self.prefix = prefix
|
|
249
|
+
self._name = model.model
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class Components(dict):
|
|
253
|
+
"""The dictionary of model fit components."""
|
|
254
|
+
def __init__(self):
|
|
255
|
+
super().__init__(self)
|
|
256
|
+
|
|
257
|
+
def __setitem__(self, key, value):
|
|
258
|
+
if key not in self and not isinstance(key, str):
|
|
259
|
+
raise KeyError(f'Invalid component name ({key})')
|
|
260
|
+
if not isinstance(value, Component):
|
|
261
|
+
raise ValueError(f'Invalid component ({value})')
|
|
262
|
+
dict.__setitem__(self, key, value)
|
|
263
|
+
value.name = key
|
|
264
|
+
|
|
265
|
+
@property
|
|
266
|
+
def components(self):
|
|
267
|
+
"""Return the model fit component dictionary."""
|
|
268
|
+
return self.values()
|
|
269
|
+
|
|
270
|
+
def add(self, model, prefix=''):
|
|
271
|
+
"""Add a model to the model fit component dictionary."""
|
|
272
|
+
# Local modules
|
|
273
|
+
from CHAP.utils.models import model_classes
|
|
274
|
+
|
|
275
|
+
if not isinstance(model, model_classes):
|
|
276
|
+
raise ValueError(f'Invalid parameter model ({model})')
|
|
277
|
+
if not isinstance(prefix, str):
|
|
278
|
+
raise ValueError(f'Invalid parameter prefix ({prefix})')
|
|
279
|
+
name = f'{prefix}{model.model}'
|
|
280
|
+
self.__setitem__(name, Component(model, prefix))
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
class Parameters(dict):
|
|
284
|
+
"""A dictionary of FitParameter objects, mimicking the
|
|
285
|
+
functionality of a similarly named class in the lmfit library.
|
|
286
|
+
"""
|
|
287
|
+
def __init__(self):
|
|
288
|
+
super().__init__(self)
|
|
289
|
+
|
|
290
|
+
def __setitem__(self, key, value):
|
|
291
|
+
# Local modules
|
|
292
|
+
from CHAP.utils.models import FitParameter
|
|
293
|
+
|
|
294
|
+
if key in self:
|
|
295
|
+
raise KeyError(f'Duplicate name for FitParameter ({key})')
|
|
296
|
+
if key not in self and not isinstance(key, str):
|
|
297
|
+
raise KeyError(f'Invalid FitParameter name ({key})')
|
|
298
|
+
if value is not None and not isinstance(value, FitParameter):
|
|
299
|
+
raise ValueError(f'Invalid FitParameter ({value})')
|
|
300
|
+
dict.__setitem__(self, key, value)
|
|
301
|
+
value.name = key
|
|
302
|
+
|
|
303
|
+
def add(self, parameter, prefix=''):
|
|
304
|
+
"""Add a fit parameter.
|
|
305
|
+
|
|
306
|
+
:param parameter: The fit parameter to add to the dictionary.
|
|
307
|
+
:type parameter: Union[str, FitParameter]
|
|
308
|
+
:param prefix: The prefix for the model to which this
|
|
309
|
+
parameter belongs, defaults to `''`.
|
|
310
|
+
:type prefix: str, optional
|
|
311
|
+
"""
|
|
312
|
+
# Local modules
|
|
313
|
+
from CHAP.utils.models import FitParameter
|
|
314
|
+
|
|
315
|
+
if isinstance(parameter, FitParameter):
|
|
316
|
+
name = f'{prefix}{parameter.name}'
|
|
317
|
+
self.__setitem__(name, parameter)
|
|
318
|
+
else:
|
|
319
|
+
raise RuntimeError('Must test')
|
|
320
|
+
parameter = f'{prefix}{parameter}'
|
|
321
|
+
self.__setitem__(
|
|
322
|
+
parameter,
|
|
323
|
+
FitParameter(name=parameter))
|
|
324
|
+
setattr(self[parameter.name], '_prefix', prefix)
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
class ModelResult():
|
|
328
|
+
"""The result of a model fit, mimicking the functionality of a
|
|
329
|
+
similarly named class in the lmfit library.
|
|
330
|
+
"""
|
|
331
|
+
def __init__(
|
|
332
|
+
self, model, parameters, x=None, y=None, method=None, ast=None,
|
|
333
|
+
res_par_exprs=None, res_par_indices=None, res_par_names=None,
|
|
334
|
+
result=None):
|
|
335
|
+
self.components = model.components
|
|
336
|
+
self.params = deepcopy(parameters)
|
|
337
|
+
if x is None:
|
|
338
|
+
self.success = False
|
|
339
|
+
return
|
|
340
|
+
if method == 'leastsq':
|
|
341
|
+
best_pars = result[0]
|
|
342
|
+
self.ier = result[4]
|
|
343
|
+
self.message = result[3]
|
|
344
|
+
self.nfev = result[2]['nfev']
|
|
345
|
+
self.residual = result[2]['fvec']
|
|
346
|
+
self.success = 1 <= result[4] <= 4
|
|
347
|
+
else:
|
|
348
|
+
best_pars = result.x
|
|
349
|
+
self.ier = result.status
|
|
350
|
+
self.message = result.message
|
|
351
|
+
self.nfev = result.nfev
|
|
352
|
+
self.residual = result.fun
|
|
353
|
+
self.success = result.success
|
|
354
|
+
self.best_fit = y + self.residual
|
|
355
|
+
self.method = method
|
|
356
|
+
self.ndata = len(self.residual)
|
|
357
|
+
self.nvarys = len(res_par_indices)
|
|
358
|
+
self.x = x
|
|
359
|
+
self._ast = ast
|
|
360
|
+
self._expr_pars = {}
|
|
361
|
+
|
|
362
|
+
# Get the covarience matrix
|
|
363
|
+
self.chisqr = (self.residual**2).sum()
|
|
364
|
+
self.redchi = self.chisqr / (self.ndata-self.nvarys)
|
|
365
|
+
self.covar = None
|
|
366
|
+
if method == 'leastsq':
|
|
367
|
+
if result[1] is not None:
|
|
368
|
+
self.covar = result[1]*self.redchi
|
|
369
|
+
else:
|
|
370
|
+
try:
|
|
371
|
+
self.covar = self.redchi * np.linalg.inv(
|
|
372
|
+
np.dot(result.jac.T, result.jac))
|
|
373
|
+
except Exception:
|
|
374
|
+
self.covar = None
|
|
375
|
+
|
|
376
|
+
# Update the fit parameters with the fit result
|
|
377
|
+
par_names = list(self.params.keys())
|
|
378
|
+
self.var_names = []
|
|
379
|
+
for i, (value, index) in enumerate(zip(best_pars, res_par_indices)):
|
|
380
|
+
par = self.params[par_names[index]]
|
|
381
|
+
par.set(value=value)
|
|
382
|
+
stderr = None
|
|
383
|
+
if self.covar is not None:
|
|
384
|
+
stderr = self.covar[i,i]
|
|
385
|
+
if stderr is not None:
|
|
386
|
+
if stderr < 0.0:
|
|
387
|
+
stderr = None
|
|
388
|
+
else:
|
|
389
|
+
stderr = np.sqrt(stderr)
|
|
390
|
+
self.var_names.append(par.name)
|
|
391
|
+
if res_par_exprs:
|
|
392
|
+
# Third party modules
|
|
393
|
+
from sympy import diff
|
|
394
|
+
for value, name in zip(best_pars, res_par_names):
|
|
395
|
+
self._ast.symtable[name] = value
|
|
396
|
+
for par_expr in res_par_exprs:
|
|
397
|
+
name = par_names[par_expr['index']]
|
|
398
|
+
expr = par_expr['expr']
|
|
399
|
+
par = self.params[name]
|
|
400
|
+
par.set(value=self._ast.eval(expr))
|
|
401
|
+
self._expr_pars[name] = expr
|
|
402
|
+
stderr = None
|
|
403
|
+
if self.covar is not None:
|
|
404
|
+
stderr = 0
|
|
405
|
+
for i, name in enumerate(self.var_names):
|
|
406
|
+
d = diff(expr, name)
|
|
407
|
+
if not d:
|
|
408
|
+
continue
|
|
409
|
+
for ii, nname in enumerate(self.var_names):
|
|
410
|
+
dd = diff(expr, nname)
|
|
411
|
+
if not dd:
|
|
412
|
+
continue
|
|
413
|
+
stderr += (self._ast.eval(str(d))
|
|
414
|
+
* self._ast.eval(str(dd))
|
|
415
|
+
* self.covar[i,ii])
|
|
416
|
+
stderr = np.sqrt(stderr)
|
|
417
|
+
setattr(par, '_stderr', stderr)
|
|
418
|
+
|
|
419
|
+
def eval_components(self, x=None, parameters=None):
|
|
420
|
+
"""Evaluate each component of a composite model function.
|
|
421
|
+
|
|
422
|
+
:param x: Independent variable, defaults to `None`, in which
|
|
423
|
+
case the class variable x is used.
|
|
424
|
+
:type x: Union[list, np.ndarray], optional
|
|
425
|
+
:param parameters: Composite model parameters, defaults to
|
|
426
|
+
None, in which case the class variable params is used.
|
|
427
|
+
:type parameters: Parameters, optional
|
|
428
|
+
:return: A dictionary with component name and evealuated
|
|
429
|
+
function values key, value pairs.
|
|
430
|
+
:rtype: dict
|
|
431
|
+
"""
|
|
432
|
+
if x is None:
|
|
433
|
+
x = self.x
|
|
434
|
+
if parameters is None:
|
|
435
|
+
parameters = self.params
|
|
436
|
+
result = {}
|
|
437
|
+
for component in self.components:
|
|
438
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
439
|
+
continue
|
|
440
|
+
par_values = tuple(
|
|
441
|
+
parameters[par].value for par in component.param_names)
|
|
442
|
+
if component.prefix == '':
|
|
443
|
+
name = component._name
|
|
444
|
+
else:
|
|
445
|
+
name = component.prefix
|
|
446
|
+
result[name] = component.func(x, *par_values)
|
|
447
|
+
return result
|
|
448
|
+
|
|
449
|
+
def fit_report(self, show_correl=False):
|
|
450
|
+
"""Generates a report of the fitting results with their best
|
|
451
|
+
parameter values and uncertainties.
|
|
452
|
+
|
|
453
|
+
:param show_correl: Whether to show list of correlations,
|
|
454
|
+
defaults to `False`.
|
|
455
|
+
:type show_correl: bool, optional
|
|
456
|
+
"""
|
|
457
|
+
# FIX add show_correl option
|
|
458
|
+
# Local modules
|
|
459
|
+
from CHAP.utils.general import (
|
|
460
|
+
getfloat_attr,
|
|
461
|
+
gformat,
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
buff = []
|
|
465
|
+
add = buff.append
|
|
466
|
+
parnames = list(self.params.keys())
|
|
467
|
+
namelen = max(len(n) for n in parnames)
|
|
468
|
+
|
|
469
|
+
add("[[Fit Statistics]]")
|
|
470
|
+
add(f" # fitting method = {self.method}")
|
|
471
|
+
add(f" # function evals = {getfloat_attr(self, 'nfev')}")
|
|
472
|
+
add(f" # data points = {getfloat_attr(self, 'ndata')}")
|
|
473
|
+
add(f" # variables = {getfloat_attr(self, 'nvarys')}")
|
|
474
|
+
add(f" chi-square = {getfloat_attr(self, 'chisqr')}")
|
|
475
|
+
add(f" reduced chi-square = {getfloat_attr(self, 'redchi')}")
|
|
476
|
+
# add(f" Akaike info crit = {getfloat_attr(self, 'aic')}")
|
|
477
|
+
# add(f" Bayesian info crit = {getfloat_attr(self, 'bic')}")
|
|
478
|
+
# if hasattr(self, 'rsquared'):
|
|
479
|
+
# add(f" R-squared = {getfloat_attr(self, 'rsquared')}")
|
|
480
|
+
|
|
481
|
+
add("[[Variables]]")
|
|
482
|
+
for name in parnames:
|
|
483
|
+
par = self.params[name]
|
|
484
|
+
space = ' '*(namelen-len(name))
|
|
485
|
+
nout = f'{name}:{space}'
|
|
486
|
+
inval = '(init = ?)'
|
|
487
|
+
if par.init_value is not None:
|
|
488
|
+
inval = f'(init = {par.init_value:.7g})'
|
|
489
|
+
expr = self._expr_pars.get(name, par.expr)
|
|
490
|
+
if expr is not None:
|
|
491
|
+
val = self._ast.eval(expr)
|
|
492
|
+
else:
|
|
493
|
+
val = par.value
|
|
494
|
+
try:
|
|
495
|
+
val = gformat(par.value)
|
|
496
|
+
except (TypeError, ValueError):
|
|
497
|
+
val = ' Non Numeric Value?'
|
|
498
|
+
if par.stderr is not None:
|
|
499
|
+
serr = gformat(par.stderr)
|
|
500
|
+
try:
|
|
501
|
+
spercent = f'({abs(par.stderr/par.value):.2%})'
|
|
502
|
+
except ZeroDivisionError:
|
|
503
|
+
spercent = ''
|
|
504
|
+
val = f'{val} +/-{serr} {spercent}'
|
|
505
|
+
if par.vary:
|
|
506
|
+
add(f' {nout} {val} {inval}')
|
|
507
|
+
elif expr is not None:
|
|
508
|
+
add(f" {nout} {val} == '{expr}'")
|
|
509
|
+
else:
|
|
510
|
+
add(f' {nout} {par.value:.7g} (fixed)')
|
|
511
|
+
|
|
512
|
+
return '\n'.join(buff)
|
|
513
|
+
|
|
514
|
+
|
|
515
|
+
class Fit:
|
|
516
|
+
"""
|
|
517
|
+
Wrapper class for scipy/lmfit.
|
|
518
|
+
"""
|
|
519
|
+
def __init__(self, nxdata, config, logger):
|
|
520
|
+
"""Initialize Fit."""
|
|
521
|
+
self._code = config.code
|
|
522
|
+
for model in config.models:
|
|
523
|
+
if model.model == 'expression' and self._code != 'lmfit':
|
|
524
|
+
self._code = 'lmfit'
|
|
525
|
+
logger.warning('Using lmfit instead of scipy with '
|
|
526
|
+
'an expression model')
|
|
527
|
+
if self._code == 'scipy':
|
|
528
|
+
# Local modules
|
|
529
|
+
from CHAP.utils.fit import Parameters
|
|
530
|
+
else:
|
|
531
|
+
# Third party modules
|
|
532
|
+
from lmfit import Parameters
|
|
533
|
+
self._logger = logger
|
|
534
|
+
self._mask = None
|
|
535
|
+
self._method = config.method
|
|
536
|
+
self._model = None
|
|
537
|
+
self._norm = None
|
|
538
|
+
self._normalized = False
|
|
539
|
+
self._free_parameters = []
|
|
540
|
+
self._parameters = Parameters()
|
|
541
|
+
if self._code == 'scipy':
|
|
542
|
+
self._ast = None
|
|
543
|
+
self._res_num_pars = []
|
|
544
|
+
self._res_par_exprs = []
|
|
545
|
+
self._res_par_indices = []
|
|
546
|
+
self._res_par_names = []
|
|
547
|
+
self._res_par_values = []
|
|
548
|
+
self._parameter_bounds = None
|
|
549
|
+
self._linear_parameters = []
|
|
550
|
+
self._nonlinear_parameters = []
|
|
551
|
+
self._result = None
|
|
552
|
+
# self._try_linear_fit = True
|
|
553
|
+
# self._fwhm_min = None
|
|
554
|
+
# self._fwhm_max = None
|
|
555
|
+
# self._sigma_min = None
|
|
556
|
+
# self._sigma_max = None
|
|
557
|
+
self._x = None
|
|
558
|
+
self._y = None
|
|
559
|
+
self._y_norm = None
|
|
560
|
+
self._y_range = None
|
|
561
|
+
# if 'try_linear_fit' in kwargs:
|
|
562
|
+
# self._try_linear_fit = kwargs.pop('try_linear_fit')
|
|
563
|
+
# if not isinstance(self._try_linear_fit, bool):
|
|
564
|
+
# raise ValueError(
|
|
565
|
+
# 'Invalid value of keyword argument try_linear_fit '
|
|
566
|
+
# f'({self._try_linear_fit})')
|
|
567
|
+
if nxdata is not None:
|
|
568
|
+
if isinstance(nxdata.attrs['axes'], str):
|
|
569
|
+
dim_x = nxdata.attrs['axes']
|
|
570
|
+
else:
|
|
571
|
+
dim_x = nxdata.attrs['axes'][-1]
|
|
572
|
+
self._x = np.asarray(nxdata[dim_x])
|
|
573
|
+
self._y = np.squeeze(nxdata.nxsignal)
|
|
574
|
+
if self._x.ndim != 1:
|
|
575
|
+
raise ValueError(
|
|
576
|
+
f'Invalid x dimension ({self._x.ndim})')
|
|
577
|
+
if self._x.size != self._y.size:
|
|
578
|
+
raise ValueError(
|
|
579
|
+
f'Inconsistent x and y dimensions ({self._x.size} vs '
|
|
580
|
+
f'{self._y.size})')
|
|
581
|
+
# if 'mask' in kwargs:
|
|
582
|
+
# self._mask = kwargs.pop('mask')
|
|
583
|
+
if True: #self._mask is None:
|
|
584
|
+
y_min = float(self._y.min())
|
|
585
|
+
self._y_range = float(self._y.max())-y_min
|
|
586
|
+
if self._y_range > 0.0:
|
|
587
|
+
self._norm = (y_min, self._y_range)
|
|
588
|
+
# else:
|
|
589
|
+
# self._mask = np.asarray(self._mask).astype(bool)
|
|
590
|
+
# if self._x.size != self._mask.size:
|
|
591
|
+
# raise ValueError(
|
|
592
|
+
# f'Inconsistent x and mask dimensions ({self._x.size} '
|
|
593
|
+
# f'vs {self._mask.size})')
|
|
594
|
+
# y_masked = np.asarray(self._y)[~self._mask]
|
|
595
|
+
# y_min = float(y_masked.min())
|
|
596
|
+
# self._y_range = float(y_masked.max())-y_min
|
|
597
|
+
# if self._y_range > 0.0:
|
|
598
|
+
# self._norm = (y_min, self._y_range)
|
|
599
|
+
|
|
600
|
+
# Setup fit model
|
|
601
|
+
self._setup_fit_model(config.parameters, config.models)
|
|
602
|
+
|
|
603
|
+
@property
|
|
604
|
+
def best_errors(self):
|
|
605
|
+
"""Return errors in the best fit parameters."""
|
|
606
|
+
if self._result is None:
|
|
607
|
+
return None
|
|
608
|
+
return {name:self._result.params[name].stderr
|
|
609
|
+
for name in sorted(self._result.params)
|
|
610
|
+
if name != 'tmp_normalization_offset_c'}
|
|
611
|
+
|
|
612
|
+
@property
|
|
613
|
+
def best_fit(self):
|
|
614
|
+
"""Return the best fit."""
|
|
615
|
+
if self._result is None:
|
|
616
|
+
return None
|
|
617
|
+
return self._result.best_fit
|
|
618
|
+
|
|
619
|
+
@property
|
|
620
|
+
def best_parameters(self):
|
|
621
|
+
"""Return the best fit parameters."""
|
|
622
|
+
if self._result is None:
|
|
623
|
+
return None
|
|
624
|
+
parameters = {}
|
|
625
|
+
for name in sorted(self._result.params):
|
|
626
|
+
if name != 'tmp_normalization_offset_c':
|
|
627
|
+
par = self._result.params[name]
|
|
628
|
+
parameters[name] = {
|
|
629
|
+
'value': par.value,
|
|
630
|
+
'error': par.stderr,
|
|
631
|
+
'init_value': par.init_value,
|
|
632
|
+
'min': par.min,
|
|
633
|
+
'max': par.max,
|
|
634
|
+
'vary': par.vary, 'expr': par.expr
|
|
635
|
+
}
|
|
636
|
+
return parameters
|
|
637
|
+
|
|
638
|
+
@property
|
|
639
|
+
def best_values(self):
|
|
640
|
+
"""Return values of the best fit parameters."""
|
|
641
|
+
if self._result is None:
|
|
642
|
+
return None
|
|
643
|
+
return {name:self._result.params[name].value
|
|
644
|
+
for name in sorted(self._result.params)
|
|
645
|
+
if name != 'tmp_normalization_offset_c'}
|
|
646
|
+
|
|
647
|
+
@property
|
|
648
|
+
def chisqr(self):
|
|
649
|
+
"""Return the chisqr value of the best fit."""
|
|
650
|
+
if self._result is None:
|
|
651
|
+
return None
|
|
652
|
+
return self._result.chisqr
|
|
653
|
+
|
|
654
|
+
@property
|
|
655
|
+
def components(self):
|
|
656
|
+
"""Return the fit model components info."""
|
|
657
|
+
# Third party modules
|
|
658
|
+
from lmfit.models import ExpressionModel
|
|
659
|
+
|
|
660
|
+
components = {}
|
|
661
|
+
if self._result is None:
|
|
662
|
+
self._logger.warning(
|
|
663
|
+
'Unable to collect components in Fit.components')
|
|
664
|
+
return components
|
|
665
|
+
for component in self._result.components:
|
|
666
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
667
|
+
continue
|
|
668
|
+
parameters = {}
|
|
669
|
+
for name in component.param_names:
|
|
670
|
+
par = self._parameters[name]
|
|
671
|
+
parameters[name] = {
|
|
672
|
+
'free': par.vary,
|
|
673
|
+
'value': self._result.params[name].value,
|
|
674
|
+
}
|
|
675
|
+
if par.expr is not None:
|
|
676
|
+
parameters[name]['expr'] = par.expr
|
|
677
|
+
expr = None
|
|
678
|
+
if isinstance(component, ExpressionModel):
|
|
679
|
+
name = component._name
|
|
680
|
+
if name[-1] == '_':
|
|
681
|
+
name = name[:-1]
|
|
682
|
+
expr = component.expr
|
|
683
|
+
else:
|
|
684
|
+
prefix = component.prefix
|
|
685
|
+
if prefix:
|
|
686
|
+
if prefix[-1] == '_':
|
|
687
|
+
prefix = prefix[:-1]
|
|
688
|
+
name = f'{prefix} ({component._name})'
|
|
689
|
+
else:
|
|
690
|
+
name = f'{component._name}'
|
|
691
|
+
if expr is None:
|
|
692
|
+
components[name] = {
|
|
693
|
+
'parameters': parameters,
|
|
694
|
+
}
|
|
695
|
+
else:
|
|
696
|
+
components[name] = {
|
|
697
|
+
'expr': expr,
|
|
698
|
+
'parameters': parameters,
|
|
699
|
+
}
|
|
700
|
+
return components
|
|
701
|
+
|
|
702
|
+
@property
|
|
703
|
+
def covar(self):
|
|
704
|
+
"""Return the covarience matrix of the best fit parameters."""
|
|
705
|
+
if self._result is None:
|
|
706
|
+
return None
|
|
707
|
+
return self._result.covar
|
|
708
|
+
|
|
709
|
+
@property
|
|
710
|
+
def init_parameters(self):
|
|
711
|
+
"""Return the initial parameters for the fit model."""
|
|
712
|
+
if self._result is None or self._result.init_params is None:
|
|
713
|
+
return None
|
|
714
|
+
parameters = {}
|
|
715
|
+
for name in sorted(self._result.init_params):
|
|
716
|
+
if name != 'tmp_normalization_offset_c':
|
|
717
|
+
par = self._result.init_params[name]
|
|
718
|
+
parameters[name] = {
|
|
719
|
+
'value': par.value,
|
|
720
|
+
'min': par.min,
|
|
721
|
+
'max': par.max,
|
|
722
|
+
'vary': par.vary,
|
|
723
|
+
'expr': par.expr,
|
|
724
|
+
}
|
|
725
|
+
return parameters
|
|
726
|
+
|
|
727
|
+
@property
|
|
728
|
+
def init_values(self):
|
|
729
|
+
"""Return the initial values for the fit parameters."""
|
|
730
|
+
if self._result is None or self._result.init_params is None:
|
|
731
|
+
return None
|
|
732
|
+
return {name:self._result.init_params[name].value
|
|
733
|
+
for name in sorted(self._result.init_params)
|
|
734
|
+
if name != 'tmp_normalization_offset_c'}
|
|
735
|
+
|
|
736
|
+
@property
|
|
737
|
+
def normalization_offset(self):
|
|
738
|
+
"""Return the normalization_offset for the fit model."""
|
|
739
|
+
if self._result is None:
|
|
740
|
+
return None
|
|
741
|
+
if self._norm is None:
|
|
742
|
+
return 0.0
|
|
743
|
+
if self._result.init_params is not None:
|
|
744
|
+
normalization_offset = float(
|
|
745
|
+
self._result.init_params['tmp_normalization_offset_c'].value)
|
|
746
|
+
else:
|
|
747
|
+
normalization_offset = float(
|
|
748
|
+
self._result.params['tmp_normalization_offset_c'].value)
|
|
749
|
+
return normalization_offset
|
|
750
|
+
|
|
751
|
+
@property
|
|
752
|
+
def num_func_eval(self):
|
|
753
|
+
"""Return the number of function evaluations for the best fit.
|
|
754
|
+
"""
|
|
755
|
+
if self._result is None:
|
|
756
|
+
return None
|
|
757
|
+
return self._result.nfev
|
|
758
|
+
|
|
759
|
+
@property
|
|
760
|
+
def parameters(self):
|
|
761
|
+
"""Return the fit parameter info."""
|
|
762
|
+
return {name:{'min': par.min, 'max': par.max, 'vary': par.vary,
|
|
763
|
+
'expr': par.expr} for name, par in self._parameters.items()
|
|
764
|
+
if name != 'tmp_normalization_offset_c'}
|
|
765
|
+
|
|
766
|
+
@property
|
|
767
|
+
def redchi(self):
|
|
768
|
+
"""Return the redchi value of the best fit."""
|
|
769
|
+
if self._result is None:
|
|
770
|
+
return None
|
|
771
|
+
return self._result.redchi
|
|
772
|
+
|
|
773
|
+
@property
|
|
774
|
+
def residual(self):
|
|
775
|
+
"""Return the residual in the best fit."""
|
|
776
|
+
if self._result is None:
|
|
777
|
+
return None
|
|
778
|
+
# lmfit return the negative of the residual in its common
|
|
779
|
+
# definition as (data - fit)
|
|
780
|
+
return -self._result.residual
|
|
781
|
+
|
|
782
|
+
@property
|
|
783
|
+
def success(self):
|
|
784
|
+
"""Return the success value for the fit."""
|
|
785
|
+
if self._result is None:
|
|
786
|
+
return None
|
|
787
|
+
if not self._result.success:
|
|
788
|
+
self._logger.warning(
|
|
789
|
+
f'ier = {self._result.ier}: {self._result.message}')
|
|
790
|
+
if (self._code == 'lmfit' and self._result.ier
|
|
791
|
+
and self._result.ier != 5):
|
|
792
|
+
return True
|
|
793
|
+
return self._result.success
|
|
794
|
+
|
|
795
|
+
@property
|
|
796
|
+
def var_names(self):
|
|
797
|
+
"""Return the variable names for the covarience matrix
|
|
798
|
+
property.
|
|
799
|
+
"""
|
|
800
|
+
if self._result is None:
|
|
801
|
+
return None
|
|
802
|
+
return getattr(self._result, 'var_names', None)
|
|
803
|
+
|
|
804
|
+
@property
|
|
805
|
+
def x(self):
|
|
806
|
+
"""Return the input x-array."""
|
|
807
|
+
return self._x
|
|
808
|
+
|
|
809
|
+
@property
|
|
810
|
+
def y(self):
|
|
811
|
+
"""Return the input y-array."""
|
|
812
|
+
return self._y
|
|
813
|
+
|
|
814
|
+
def print_fit_report(self, result=None, show_correl=False):
|
|
815
|
+
"""Print a fit report."""
|
|
816
|
+
if result is None:
|
|
817
|
+
result = self._result
|
|
818
|
+
if result is not None:
|
|
819
|
+
print(result.fit_report(show_correl=show_correl))
|
|
820
|
+
|
|
821
|
+
def add_parameter(self, parameter):
|
|
822
|
+
"""Add a fit parameter to the fit model."""
|
|
823
|
+
# Local modules
|
|
824
|
+
from CHAP.utils.models import FitParameter
|
|
825
|
+
|
|
826
|
+
if parameter.get('expr') is not None:
|
|
827
|
+
raise KeyError(f'Invalid "expr" key in parameter {parameter}')
|
|
828
|
+
name = parameter['name']
|
|
829
|
+
if not parameter['vary']:
|
|
830
|
+
self._logger.warning(
|
|
831
|
+
f'Ignoring min in parameter {name} in '
|
|
832
|
+
f'Fit.add_parameter (vary = {parameter["vary"]})')
|
|
833
|
+
parameter['min'] = -np.inf
|
|
834
|
+
self._logger.warning(
|
|
835
|
+
f'Ignoring max in parameter {name} in '
|
|
836
|
+
f'Fit.add_parameter (vary = {parameter["vary"]})')
|
|
837
|
+
parameter['max'] = np.inf
|
|
838
|
+
if self._code == 'scipy':
|
|
839
|
+
self._parameters.add(FitParameter(**parameter))
|
|
840
|
+
else:
|
|
841
|
+
self._parameters.add(**parameter)
|
|
842
|
+
self._free_parameters.append(name)
|
|
843
|
+
|
|
844
|
+
def add_model(self, model, prefix):
|
|
845
|
+
"""Add a model component to the fit model."""
|
|
846
|
+
# pylint: disable=possibly-used-before-assignment
|
|
847
|
+
if self._code == 'lmfit':
|
|
848
|
+
from lmfit.models import (
|
|
849
|
+
ConstantModel,
|
|
850
|
+
LinearModel,
|
|
851
|
+
QuadraticModel,
|
|
852
|
+
# PolynomialModel,
|
|
853
|
+
ExponentialModel,
|
|
854
|
+
GaussianModel,
|
|
855
|
+
LorentzianModel,
|
|
856
|
+
ExpressionModel,
|
|
857
|
+
# StepModel,
|
|
858
|
+
RectangleModel,
|
|
859
|
+
)
|
|
860
|
+
|
|
861
|
+
if model.model == 'expression':
|
|
862
|
+
expr = model.expr
|
|
863
|
+
else:
|
|
864
|
+
expr = None
|
|
865
|
+
parameters = model.parameters
|
|
866
|
+
model_name = model.model
|
|
867
|
+
|
|
868
|
+
if prefix is None:
|
|
869
|
+
pprefix = ''
|
|
870
|
+
else:
|
|
871
|
+
pprefix = prefix
|
|
872
|
+
if self._code == 'scipy':
|
|
873
|
+
new_parameters = []
|
|
874
|
+
for par in deepcopy(parameters):
|
|
875
|
+
self._parameters.add(par, pprefix)
|
|
876
|
+
if self._parameters[par.name].expr is None:
|
|
877
|
+
self._parameters[par.name].set(value=par.default)
|
|
878
|
+
new_parameters.append(par.name)
|
|
879
|
+
self._res_num_pars += [len(parameters)]
|
|
880
|
+
|
|
881
|
+
if model_name == 'constant':
|
|
882
|
+
# Par: c
|
|
883
|
+
if self._code == 'lmfit':
|
|
884
|
+
newmodel = ConstantModel(prefix=prefix)
|
|
885
|
+
self._linear_parameters.append(f'{pprefix}c')
|
|
886
|
+
elif model_name == 'linear':
|
|
887
|
+
# Par: slope, intercept
|
|
888
|
+
if self._code == 'lmfit':
|
|
889
|
+
newmodel = LinearModel(prefix=prefix)
|
|
890
|
+
self._linear_parameters.append(f'{pprefix}slope')
|
|
891
|
+
self._linear_parameters.append(f'{pprefix}intercept')
|
|
892
|
+
elif model_name == 'quadratic':
|
|
893
|
+
# Par: a, b, c
|
|
894
|
+
if self._code == 'lmfit':
|
|
895
|
+
newmodel = QuadraticModel(prefix=prefix)
|
|
896
|
+
self._linear_parameters.append(f'{pprefix}a')
|
|
897
|
+
self._linear_parameters.append(f'{pprefix}b')
|
|
898
|
+
self._linear_parameters.append(f'{pprefix}c')
|
|
899
|
+
# elif model_name == 'polynomial':
|
|
900
|
+
# # Par: c0, c1,..., c7
|
|
901
|
+
# degree = kwargs.get('degree')
|
|
902
|
+
# if degree is not None:
|
|
903
|
+
# kwargs.pop('degree')
|
|
904
|
+
# if degree is None or not is_int(degree, ge=0, le=7):
|
|
905
|
+
# raise ValueError(
|
|
906
|
+
# 'Invalid parameter degree for build-in step model '
|
|
907
|
+
# f'({degree})')
|
|
908
|
+
# if self._code == 'lmfit':
|
|
909
|
+
# newmodel = PolynomialModel(degree=degree, prefix=prefix)
|
|
910
|
+
# for i in range(degree+1):
|
|
911
|
+
# self._linear_parameters.append(f'{pprefix}c{i}')
|
|
912
|
+
elif model_name == 'exponential':
|
|
913
|
+
# Par: amplitude, decay
|
|
914
|
+
if self._code == 'lmfit':
|
|
915
|
+
newmodel = ExponentialModel(prefix=prefix)
|
|
916
|
+
self._linear_parameters.append(f'{pprefix}amplitude')
|
|
917
|
+
self._nonlinear_parameters.append(f'{pprefix}decay')
|
|
918
|
+
elif model_name == 'gaussian':
|
|
919
|
+
# Par: amplitude, center, sigma (fwhm, height)
|
|
920
|
+
if self._code == 'lmfit':
|
|
921
|
+
newmodel = GaussianModel(prefix=prefix)
|
|
922
|
+
# parameter norms for height and fwhm are needed to
|
|
923
|
+
# get correct errors
|
|
924
|
+
self._linear_parameters.append(f'{pprefix}amplitude')
|
|
925
|
+
self._nonlinear_parameters.append(f'{pprefix}center')
|
|
926
|
+
self._nonlinear_parameters.append(f'{pprefix}sigma')
|
|
927
|
+
elif model_name == 'lorentzian':
|
|
928
|
+
# Par: amplitude, center, sigma (fwhm, height)
|
|
929
|
+
if self._code == 'lmfit':
|
|
930
|
+
newmodel = LorentzianModel(prefix=prefix)
|
|
931
|
+
# parameter norms for height and fwhm are needed to
|
|
932
|
+
# get correct errors
|
|
933
|
+
self._linear_parameters.append(f'{pprefix}amplitude')
|
|
934
|
+
self._nonlinear_parameters.append(f'{pprefix}center')
|
|
935
|
+
self._nonlinear_parameters.append(f'{pprefix}sigma')
|
|
936
|
+
# elif model_name == 'step':
|
|
937
|
+
# # Par: amplitude, center, sigma
|
|
938
|
+
# form = kwargs.get('form')
|
|
939
|
+
# if form is not None:
|
|
940
|
+
# kwargs.pop('form')
|
|
941
|
+
# if (form is None or form not in
|
|
942
|
+
# ('linear', 'atan', 'arctan', 'erf', 'logistic')):
|
|
943
|
+
# raise ValueError(
|
|
944
|
+
# 'Invalid parameter form for build-in step model '
|
|
945
|
+
# f'({form})')
|
|
946
|
+
# if self._code == 'lmfit':
|
|
947
|
+
# newmodel = StepModel(prefix=prefix, form=form)
|
|
948
|
+
# self._linear_parameters.append(f'{pprefix}amplitude')
|
|
949
|
+
# self._nonlinear_parameters.append(f'{pprefix}center')
|
|
950
|
+
# self._nonlinear_parameters.append(f'{pprefix}sigma')
|
|
951
|
+
elif model_name == 'rectangle':
|
|
952
|
+
# Par: amplitude, center1, center2, sigma1, sigma2
|
|
953
|
+
form = 'atan' #kwargs.get('form')
|
|
954
|
+
#if form is not None:
|
|
955
|
+
# kwargs.pop('form')
|
|
956
|
+
# RV: Implement and test other forms when needed
|
|
957
|
+
if (form is None or form not in
|
|
958
|
+
('linear', 'atan', 'arctan', 'erf', 'logistic')):
|
|
959
|
+
raise ValueError(
|
|
960
|
+
'Invalid parameter form for build-in rectangle model '
|
|
961
|
+
f'({form})')
|
|
962
|
+
if self._code == 'lmfit':
|
|
963
|
+
newmodel = RectangleModel(prefix=prefix, form=form)
|
|
964
|
+
self._linear_parameters.append(f'{pprefix}amplitude')
|
|
965
|
+
self._nonlinear_parameters.append(f'{pprefix}center1')
|
|
966
|
+
self._nonlinear_parameters.append(f'{pprefix}center2')
|
|
967
|
+
self._nonlinear_parameters.append(f'{pprefix}sigma1')
|
|
968
|
+
self._nonlinear_parameters.append(f'{pprefix}sigma2')
|
|
969
|
+
elif model_name == 'expression' and self._code == 'lmfit':
|
|
970
|
+
# Third party modules
|
|
971
|
+
from asteval import (
|
|
972
|
+
Interpreter,
|
|
973
|
+
get_ast_names,
|
|
974
|
+
)
|
|
975
|
+
for par in parameters:
|
|
976
|
+
if par.expr is not None:
|
|
977
|
+
raise KeyError(
|
|
978
|
+
f'Invalid "expr" key ({par.expr}) in '
|
|
979
|
+
f'parameter ({par}) for an expression model')
|
|
980
|
+
ast = Interpreter()
|
|
981
|
+
expr_parameters = [
|
|
982
|
+
name for name in get_ast_names(ast.parse(expr))
|
|
983
|
+
if (name != 'x' and name not in self._parameters
|
|
984
|
+
and name not in ast.symtable)]
|
|
985
|
+
if prefix is None:
|
|
986
|
+
newmodel = ExpressionModel(expr=expr)
|
|
987
|
+
else:
|
|
988
|
+
for name in expr_parameters:
|
|
989
|
+
expr = sub(rf'\b{name}\b', f'{prefix}{name}', expr)
|
|
990
|
+
expr_parameters = [
|
|
991
|
+
f'{prefix}{name}' for name in expr_parameters]
|
|
992
|
+
newmodel = ExpressionModel(expr=expr, name=model_name)
|
|
993
|
+
# Remove already existing names
|
|
994
|
+
for name in newmodel.param_names.copy():
|
|
995
|
+
if name not in expr_parameters:
|
|
996
|
+
newmodel._func_allargs.remove(name)
|
|
997
|
+
newmodel._param_names.remove(name)
|
|
998
|
+
else:
|
|
999
|
+
raise ValueError(f'Unknown fit model ({model_name})')
|
|
1000
|
+
|
|
1001
|
+
# Add the new model to the current one
|
|
1002
|
+
if self._code == 'scipy':
|
|
1003
|
+
if self._model is None:
|
|
1004
|
+
self._model = Components()
|
|
1005
|
+
self._model.add(model, prefix)
|
|
1006
|
+
else:
|
|
1007
|
+
if self._model is None:
|
|
1008
|
+
self._model = newmodel
|
|
1009
|
+
else:
|
|
1010
|
+
self._model += newmodel
|
|
1011
|
+
new_parameters = newmodel.make_params()
|
|
1012
|
+
self._parameters += new_parameters
|
|
1013
|
+
|
|
1014
|
+
# Check linearity of expression model parameters
|
|
1015
|
+
if self._code == 'lmfit' and isinstance(newmodel, ExpressionModel):
|
|
1016
|
+
# Third party modules
|
|
1017
|
+
from sympy import diff
|
|
1018
|
+
for name in newmodel.param_names:
|
|
1019
|
+
if not diff(newmodel.expr, name, name):
|
|
1020
|
+
if name not in self._linear_parameters:
|
|
1021
|
+
self._linear_parameters.append(name)
|
|
1022
|
+
else:
|
|
1023
|
+
if name not in self._nonlinear_parameters:
|
|
1024
|
+
self._nonlinear_parameters.append(name)
|
|
1025
|
+
|
|
1026
|
+
# Scale the default initial model parameters
|
|
1027
|
+
if self._norm is not None:
|
|
1028
|
+
for name in new_parameters:
|
|
1029
|
+
if name in self._linear_parameters:
|
|
1030
|
+
par = self._parameters.get(name)
|
|
1031
|
+
if par.expr is None:
|
|
1032
|
+
if self._code == 'scipy':
|
|
1033
|
+
value = par.default
|
|
1034
|
+
else:
|
|
1035
|
+
value = None
|
|
1036
|
+
if value is None:
|
|
1037
|
+
value = par.value
|
|
1038
|
+
if value is not None:
|
|
1039
|
+
value *= self._norm[1]
|
|
1040
|
+
_min = par.min
|
|
1041
|
+
_max = par.max
|
|
1042
|
+
if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
|
|
1043
|
+
_min *= self._norm[1]
|
|
1044
|
+
if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
|
|
1045
|
+
_max *= self._norm[1]
|
|
1046
|
+
par.set(value=value, min=_min, max=_max)
|
|
1047
|
+
|
|
1048
|
+
# Initialize the model parameters
|
|
1049
|
+
for parameter in deepcopy(parameters):
|
|
1050
|
+
name = parameter.name
|
|
1051
|
+
if name not in new_parameters:
|
|
1052
|
+
name = pprefix+name
|
|
1053
|
+
if name not in new_parameters:
|
|
1054
|
+
raise ValueError(
|
|
1055
|
+
f'Unable to match parameter {name}')
|
|
1056
|
+
if parameter.expr is None:
|
|
1057
|
+
self._parameters[name].set(
|
|
1058
|
+
value=parameter.value, min=parameter.min,
|
|
1059
|
+
max=parameter.max, vary=parameter.vary)
|
|
1060
|
+
else:
|
|
1061
|
+
if parameter.value is not None:
|
|
1062
|
+
self._logger.warning(
|
|
1063
|
+
'Ignoring input "value" for expression parameter'
|
|
1064
|
+
f'{name} = {parameter.expr}')
|
|
1065
|
+
if not np.isinf(parameter.min):
|
|
1066
|
+
self._logger.warning(
|
|
1067
|
+
'Ignoring input "min" for expression parameter'
|
|
1068
|
+
f'{name} = {parameter.expr}')
|
|
1069
|
+
if not np.isinf(parameter.max):
|
|
1070
|
+
self._logger.warning(
|
|
1071
|
+
'Ignoring input "max" for expression parameter'
|
|
1072
|
+
f'{name} = {parameter.expr}')
|
|
1073
|
+
self._parameters[name].set(
|
|
1074
|
+
value=None, min=-np.inf, max=np.inf, expr=parameter.expr)
|
|
1075
|
+
|
|
1076
|
+
def eval(self, x, result=None):
|
|
1077
|
+
"""Evaluate the best fit."""
|
|
1078
|
+
if result is None:
|
|
1079
|
+
result = self._result
|
|
1080
|
+
if result is None:
|
|
1081
|
+
return None
|
|
1082
|
+
return result.eval(x=np.asarray(x))-self.normalization_offset
|
|
1083
|
+
|
|
1084
|
+
def fit(self, config=None, **kwargs):
|
|
1085
|
+
"""Fit the model to the input data."""
|
|
1086
|
+
|
|
1087
|
+
# Check input parameters
|
|
1088
|
+
if self._model is None:
|
|
1089
|
+
self._logger.error('Undefined fit model')
|
|
1090
|
+
return None
|
|
1091
|
+
self._mask = kwargs.pop('mask', None)
|
|
1092
|
+
guess = kwargs.pop('guess', False)
|
|
1093
|
+
if not isinstance(guess, bool):
|
|
1094
|
+
raise ValueError(
|
|
1095
|
+
f'Invalid value of keyword argument guess ({guess})')
|
|
1096
|
+
if self._result is not None:
|
|
1097
|
+
if guess:
|
|
1098
|
+
self._logger.warning(
|
|
1099
|
+
'Ignoring input parameter guess during refitting')
|
|
1100
|
+
guess = False
|
|
1101
|
+
# if 'try_linear_fit' in kwargs:
|
|
1102
|
+
# raise RuntimeError('try_linear_fit needs testing')
|
|
1103
|
+
# try_linear_fit = kwargs.pop('try_linear_fit')
|
|
1104
|
+
# if not isinstance(try_linear_fit, bool):
|
|
1105
|
+
# raise ValueError(
|
|
1106
|
+
# 'Invalid value of keyword argument try_linear_fit '
|
|
1107
|
+
# f'({try_linear_fit})')
|
|
1108
|
+
# if not self._try_linear_fit:
|
|
1109
|
+
# self._logger.warning(
|
|
1110
|
+
# 'Ignore superfluous keyword argument "try_linear_fit" '
|
|
1111
|
+
# '(not yet supported for callable models)')
|
|
1112
|
+
# else:
|
|
1113
|
+
# self._try_linear_fit = try_linear_fit
|
|
1114
|
+
|
|
1115
|
+
# Setup the fit
|
|
1116
|
+
self._setup_fit(config, guess)
|
|
1117
|
+
|
|
1118
|
+
# Check if model is linear
|
|
1119
|
+
try:
|
|
1120
|
+
linear_model = self._check_linearity_model()
|
|
1121
|
+
except Exception:
|
|
1122
|
+
linear_model = False
|
|
1123
|
+
if kwargs.get('check_only_linearity') is not None:
|
|
1124
|
+
return linear_model
|
|
1125
|
+
|
|
1126
|
+
# Normalize the data and initial parameters
|
|
1127
|
+
self._normalize()
|
|
1128
|
+
|
|
1129
|
+
if linear_model:
|
|
1130
|
+
raise RuntimeError('linear solver needs testing')
|
|
1131
|
+
# Perform a linear fit by direct matrix solution with numpy
|
|
1132
|
+
try:
|
|
1133
|
+
if self._mask is None:
|
|
1134
|
+
self._fit_linear_model(self._x, self._y_norm)
|
|
1135
|
+
else:
|
|
1136
|
+
self._fit_linear_model(
|
|
1137
|
+
self._x[~self._mask],
|
|
1138
|
+
np.asarray(self._y_norm)[~self._mask])
|
|
1139
|
+
except Exception:
|
|
1140
|
+
linear_model = False
|
|
1141
|
+
if not linear_model:
|
|
1142
|
+
self._result = self._fit_nonlinear_model(
|
|
1143
|
+
self._x, self._y_norm, **kwargs)
|
|
1144
|
+
|
|
1145
|
+
# Set internal parameter values to fit results upon success
|
|
1146
|
+
if self.success:
|
|
1147
|
+
for name, par in self._parameters.items():
|
|
1148
|
+
if par.expr is None and par.vary:
|
|
1149
|
+
par.set(value=self._result.params[name].value)
|
|
1150
|
+
|
|
1151
|
+
# Renormalize the data and results
|
|
1152
|
+
self._renormalize()
|
|
1153
|
+
|
|
1154
|
+
return None
|
|
1155
|
+
|
|
1156
|
+
def plot(
|
|
1157
|
+
self, y=None, y_title=None, title=None, result=None,
|
|
1158
|
+
skip_init=False, plot_comp=True, plot_comp_legends=False,
|
|
1159
|
+
plot_residual=False, plot_masked_data=True, **kwargs):
|
|
1160
|
+
"""Plot the best fit."""
|
|
1161
|
+
if result is None:
|
|
1162
|
+
result = self._result
|
|
1163
|
+
if result is None:
|
|
1164
|
+
return
|
|
1165
|
+
plots = []
|
|
1166
|
+
legend = []
|
|
1167
|
+
if self._mask is None:
|
|
1168
|
+
mask = np.zeros(self._x.size).astype(bool)
|
|
1169
|
+
plot_masked_data = False
|
|
1170
|
+
else:
|
|
1171
|
+
mask = self._mask
|
|
1172
|
+
if y is not None:
|
|
1173
|
+
if not isinstance(y, (tuple, list, np.ndarray)):
|
|
1174
|
+
self._logger.warning('Ignorint invalid parameter y ({y}')
|
|
1175
|
+
if len(y) != len(self._x):
|
|
1176
|
+
self._logger.warning(
|
|
1177
|
+
'Ignoring parameter y in plot (wrong dimension)')
|
|
1178
|
+
y = None
|
|
1179
|
+
if y is not None:
|
|
1180
|
+
if y_title is None or not isinstance(y_title, str):
|
|
1181
|
+
y_title = 'data'
|
|
1182
|
+
plots += [(self._x, y, '.')]
|
|
1183
|
+
legend += [y_title]
|
|
1184
|
+
if self._y is not None:
|
|
1185
|
+
plots += [(self._x, np.asarray(self._y), 'b.')]
|
|
1186
|
+
legend += ['data']
|
|
1187
|
+
if plot_masked_data:
|
|
1188
|
+
plots += [(self._x[mask], np.asarray(self._y)[mask], 'bx')]
|
|
1189
|
+
legend += ['masked data']
|
|
1190
|
+
if isinstance(plot_residual, bool) and plot_residual:
|
|
1191
|
+
plots += [(self._x[~mask], result.residual, 'r-')]
|
|
1192
|
+
legend += ['residual']
|
|
1193
|
+
plots += [(self._x[~mask], result.best_fit, 'k-')]
|
|
1194
|
+
legend += ['best fit']
|
|
1195
|
+
if not skip_init and hasattr(result, 'init_fit'):
|
|
1196
|
+
plots += [(self._x[~mask], result.init_fit, 'g-')]
|
|
1197
|
+
legend += ['init']
|
|
1198
|
+
if plot_comp:
|
|
1199
|
+
components = result.eval_components(x=self._x[~mask])
|
|
1200
|
+
num_components = len(components)
|
|
1201
|
+
if 'tmp_normalization_offset_' in components:
|
|
1202
|
+
num_components -= 1
|
|
1203
|
+
if num_components > 1:
|
|
1204
|
+
eval_index = 0
|
|
1205
|
+
for modelname, y_comp in components.items():
|
|
1206
|
+
if modelname == 'tmp_normalization_offset_':
|
|
1207
|
+
continue
|
|
1208
|
+
if modelname == '_eval':
|
|
1209
|
+
modelname = f'eval{eval_index}'
|
|
1210
|
+
if len(modelname) > 20:
|
|
1211
|
+
modelname = f'{modelname[0:16]} ...'
|
|
1212
|
+
if isinstance(y_comp, (int, float)):
|
|
1213
|
+
y_comp *= np.ones(self._x[~mask].size)
|
|
1214
|
+
plots += [(self._x[~mask], y_comp, '--')]
|
|
1215
|
+
if plot_comp_legends:
|
|
1216
|
+
if modelname[-1] == '_':
|
|
1217
|
+
legend.append(modelname[:-1])
|
|
1218
|
+
else:
|
|
1219
|
+
legend.append(modelname)
|
|
1220
|
+
quick_plot(
|
|
1221
|
+
tuple(plots), legend=legend, title=title, block=True, **kwargs)
|
|
1222
|
+
|
|
1223
|
+
@staticmethod
|
|
1224
|
+
def guess_init_peak(
|
|
1225
|
+
x, y, *args, center_guess=None, use_max_for_center=True):
|
|
1226
|
+
"""Return a guess for the initial height, center and fwhm for a
|
|
1227
|
+
single peak.
|
|
1228
|
+
"""
|
|
1229
|
+
center_guesses = None
|
|
1230
|
+
x = np.asarray(x)
|
|
1231
|
+
y = np.asarray(y)
|
|
1232
|
+
if len(x) != len(y):
|
|
1233
|
+
print(
|
|
1234
|
+
f'Invalid x and y lengths ({len(x)}, {len(y)}), '
|
|
1235
|
+
'skip initial guess')
|
|
1236
|
+
return None, None, None
|
|
1237
|
+
if isinstance(center_guess, (int, float)):
|
|
1238
|
+
if args:
|
|
1239
|
+
print(
|
|
1240
|
+
'Ignoring additional arguments for single center_guess '
|
|
1241
|
+
'value')
|
|
1242
|
+
elif isinstance(center_guess, (tuple, list, np.ndarray)):
|
|
1243
|
+
if len(center_guess) == 1:
|
|
1244
|
+
print(
|
|
1245
|
+
'Ignoring additional arguments for single center_guess '
|
|
1246
|
+
'value')
|
|
1247
|
+
if not isinstance(center_guess[0], (int, float)):
|
|
1248
|
+
raise ValueError(
|
|
1249
|
+
'Invalid parameter center_guess '
|
|
1250
|
+
f'({type(center_guess[0])})')
|
|
1251
|
+
center_guess = center_guess[0]
|
|
1252
|
+
else:
|
|
1253
|
+
if len(args) != 1:
|
|
1254
|
+
raise ValueError(
|
|
1255
|
+
f'Invalid number of arguments ({len(args)})')
|
|
1256
|
+
n = args[0]
|
|
1257
|
+
if not is_index(n, 0, len(center_guess)):
|
|
1258
|
+
raise ValueError('Invalid argument')
|
|
1259
|
+
center_guesses = center_guess
|
|
1260
|
+
center_guess = center_guesses[n]
|
|
1261
|
+
elif center_guess is not None:
|
|
1262
|
+
raise ValueError(
|
|
1263
|
+
f'Invalid center_guess type ({type(center_guess)})')
|
|
1264
|
+
|
|
1265
|
+
# Sort the inputs
|
|
1266
|
+
index = np.argsort(x)
|
|
1267
|
+
x = x[index]
|
|
1268
|
+
y = y[index]
|
|
1269
|
+
miny = y.min()
|
|
1270
|
+
|
|
1271
|
+
# Set range for current peak
|
|
1272
|
+
if center_guesses is not None:
|
|
1273
|
+
if len(center_guesses) > 1:
|
|
1274
|
+
index = np.argsort(center_guesses)
|
|
1275
|
+
n = list(index).index(n)
|
|
1276
|
+
center_guesses = np.asarray(center_guesses)[index]
|
|
1277
|
+
if n == 0:
|
|
1278
|
+
low = 0
|
|
1279
|
+
upp = index_nearest(
|
|
1280
|
+
x, (center_guesses[0]+center_guesses[1]) / 2)
|
|
1281
|
+
elif n == len(center_guesses)-1:
|
|
1282
|
+
low = index_nearest(
|
|
1283
|
+
x, (center_guesses[n-1]+center_guesses[n]) / 2)
|
|
1284
|
+
upp = len(x)
|
|
1285
|
+
else:
|
|
1286
|
+
low = index_nearest(
|
|
1287
|
+
x, (center_guesses[n-1]+center_guesses[n]) / 2)
|
|
1288
|
+
upp = index_nearest(
|
|
1289
|
+
x, (center_guesses[n]+center_guesses[n+1]) / 2)
|
|
1290
|
+
x = x[low:upp]
|
|
1291
|
+
y = y[low:upp]
|
|
1292
|
+
|
|
1293
|
+
# Estimate FWHM
|
|
1294
|
+
maxy = y.max()
|
|
1295
|
+
if center_guess is None:
|
|
1296
|
+
center_index = np.argmax(y)
|
|
1297
|
+
center = x[center_index]
|
|
1298
|
+
height = maxy-miny
|
|
1299
|
+
else:
|
|
1300
|
+
if use_max_for_center:
|
|
1301
|
+
center_index = np.argmax(y)
|
|
1302
|
+
center = x[center_index]
|
|
1303
|
+
if center_index < 0.1*len(x) or center_index > 0.9*len(x):
|
|
1304
|
+
center_index = index_nearest(x, center_guess)
|
|
1305
|
+
center = center_guess
|
|
1306
|
+
else:
|
|
1307
|
+
center_index = index_nearest(x, center_guess)
|
|
1308
|
+
center = center_guess
|
|
1309
|
+
height = y[center_index]-miny
|
|
1310
|
+
half_height = miny + 0.5*height
|
|
1311
|
+
fwhm_index1 = 0
|
|
1312
|
+
for i in range(center_index, fwhm_index1, -1):
|
|
1313
|
+
if y[i] < half_height:
|
|
1314
|
+
fwhm_index1 = i
|
|
1315
|
+
break
|
|
1316
|
+
fwhm_index2 = len(x)-1
|
|
1317
|
+
for i in range(center_index, fwhm_index2):
|
|
1318
|
+
if y[i] < half_height:
|
|
1319
|
+
fwhm_index2 = i
|
|
1320
|
+
break
|
|
1321
|
+
if fwhm_index1 == 0 and fwhm_index2 < len(x)-1:
|
|
1322
|
+
fwhm = 2 * (x[fwhm_index2]-center)
|
|
1323
|
+
elif fwhm_index1 > 0 and fwhm_index2 == len(x)-1:
|
|
1324
|
+
fwhm = 2 * (center-x[fwhm_index1])
|
|
1325
|
+
else:
|
|
1326
|
+
fwhm = x[fwhm_index2]-x[fwhm_index1]
|
|
1327
|
+
|
|
1328
|
+
if center_guess is not None and not use_max_for_center:
|
|
1329
|
+
index = fwhm_index1+np.argmax(y[fwhm_index1:fwhm_index2])
|
|
1330
|
+
center = x[index]
|
|
1331
|
+
height = y[index]-miny
|
|
1332
|
+
|
|
1333
|
+
return height, center, fwhm
|
|
1334
|
+
|
|
1335
|
+
def _create_prefixes(self, models):
|
|
1336
|
+
"""Create model prefixes."""
|
|
1337
|
+
# Check for duplicate model names and create prefixes
|
|
1338
|
+
names = []
|
|
1339
|
+
prefixes = []
|
|
1340
|
+
for model in models:
|
|
1341
|
+
names.append(f'{model.prefix}{model.model}')
|
|
1342
|
+
prefixes.append(model.prefix)
|
|
1343
|
+
counts = Counter(names)
|
|
1344
|
+
for model, count in counts.items():
|
|
1345
|
+
if count > 1:
|
|
1346
|
+
n = 0
|
|
1347
|
+
for i, name in enumerate(names):
|
|
1348
|
+
if name == model:
|
|
1349
|
+
n += 1
|
|
1350
|
+
prefixes[i] = f'{name}{n}_'
|
|
1351
|
+
|
|
1352
|
+
return prefixes
|
|
1353
|
+
|
|
1354
|
+
def _setup_fit_model(self, parameters, models):
|
|
1355
|
+
"""Setup the fit model."""
|
|
1356
|
+
# Check for duplicate model names and create prefixes
|
|
1357
|
+
prefixes = self._create_prefixes(models)
|
|
1358
|
+
|
|
1359
|
+
# Add the free fit parameters
|
|
1360
|
+
for par in parameters:
|
|
1361
|
+
self.add_parameter(par.model_dump())
|
|
1362
|
+
|
|
1363
|
+
# Add the model functions
|
|
1364
|
+
for prefix, model in zip(prefixes, models):
|
|
1365
|
+
self.add_model(model, prefix)
|
|
1366
|
+
|
|
1367
|
+
# Check linearity of free fit parameters:
|
|
1368
|
+
known_parameters = (
|
|
1369
|
+
self._linear_parameters + self._nonlinear_parameters)
|
|
1370
|
+
for name in reversed(self._parameters):
|
|
1371
|
+
if name not in known_parameters:
|
|
1372
|
+
for nname, par in self._parameters.items():
|
|
1373
|
+
if par.expr is not None:
|
|
1374
|
+
# Third party modules
|
|
1375
|
+
from sympy import diff
|
|
1376
|
+
|
|
1377
|
+
if nname in self._nonlinear_parameters:
|
|
1378
|
+
self._nonlinear_parameters.insert(0, name)
|
|
1379
|
+
elif diff(par.expr, name, name):
|
|
1380
|
+
self._nonlinear_parameters.insert(0, name)
|
|
1381
|
+
else:
|
|
1382
|
+
self._linear_parameters.insert(0, name)
|
|
1383
|
+
|
|
1384
|
+
def _setup_fit(self, config, guess=False):
|
|
1385
|
+
"""Setup the fit."""
|
|
1386
|
+
# Apply mask if supplied:
|
|
1387
|
+
if self._mask is not None:
|
|
1388
|
+
raise RuntimeError('mask needs testing')
|
|
1389
|
+
self._mask = np.asarray(self._mask).astype(bool)
|
|
1390
|
+
if self._x.size != self._mask.size:
|
|
1391
|
+
raise ValueError(
|
|
1392
|
+
f'Inconsistent x and mask dimensions ({self._x.size} vs '
|
|
1393
|
+
f'{self._mask.size})')
|
|
1394
|
+
|
|
1395
|
+
# Estimate initial parameters
|
|
1396
|
+
if guess and not isinstance(self, FitMap):
|
|
1397
|
+
raise RuntimeError('Estimate initial parameters needs testing')
|
|
1398
|
+
if self._mask is None:
|
|
1399
|
+
xx = self._x
|
|
1400
|
+
yy = self._y
|
|
1401
|
+
else:
|
|
1402
|
+
xx = self._x[~self._mask]
|
|
1403
|
+
yy = np.asarray(self._y)[~self._mask]
|
|
1404
|
+
try:
|
|
1405
|
+
# Try with the build-in lmfit guess method
|
|
1406
|
+
# (only implemented for a single model)
|
|
1407
|
+
self._parameters = self._model.guess(yy, x=xx)
|
|
1408
|
+
except Exception:
|
|
1409
|
+
# Third party modules
|
|
1410
|
+
from asteval import Interpreter
|
|
1411
|
+
from lmfit.models import GaussianModel
|
|
1412
|
+
|
|
1413
|
+
ast = Interpreter()
|
|
1414
|
+
# Should work for other peak-like models,
|
|
1415
|
+
# but will need tests first
|
|
1416
|
+
for component in self._model.components:
|
|
1417
|
+
if isinstance(component, GaussianModel):
|
|
1418
|
+
center = self._parameters[
|
|
1419
|
+
f"{component.prefix}center"].value
|
|
1420
|
+
height_init, cen_init, fwhm_init = \
|
|
1421
|
+
self.guess_init_peak(
|
|
1422
|
+
xx, yy, center_guess=center,
|
|
1423
|
+
use_max_for_center=False)
|
|
1424
|
+
# if (self._fwhm_min is not None
|
|
1425
|
+
# and fwhm_init < self._fwhm_min):
|
|
1426
|
+
# fwhm_init = self._fwhm_min
|
|
1427
|
+
# elif (self._fwhm_max is not None
|
|
1428
|
+
# and fwhm_init > self._fwhm_max):
|
|
1429
|
+
# fwhm_init = self._fwhm_max
|
|
1430
|
+
ast(f'fwhm = {fwhm_init}')
|
|
1431
|
+
ast(f'height = {height_init}')
|
|
1432
|
+
sig_init = ast(fwhm_factor[component._name])
|
|
1433
|
+
amp_init = ast(height_factor[component._name])
|
|
1434
|
+
par = self._parameters[
|
|
1435
|
+
f"{component.prefix}amplitude"]
|
|
1436
|
+
if par.vary:
|
|
1437
|
+
par.set(value=amp_init)
|
|
1438
|
+
par = self._parameters[
|
|
1439
|
+
f"{component.prefix}center"]
|
|
1440
|
+
if par.vary:
|
|
1441
|
+
par.set(value=cen_init)
|
|
1442
|
+
par = self._parameters[
|
|
1443
|
+
f"{component.prefix}sigma"]
|
|
1444
|
+
if par.vary:
|
|
1445
|
+
par.set(value=sig_init)
|
|
1446
|
+
|
|
1447
|
+
# Add constant offset for a normalized model
|
|
1448
|
+
if self._result is None and self._norm is not None and self._norm[0]:
|
|
1449
|
+
from CHAP.utils.models import Constant
|
|
1450
|
+
model = Constant(
|
|
1451
|
+
model='constant',
|
|
1452
|
+
parameters=[{
|
|
1453
|
+
'name': 'c',
|
|
1454
|
+
'value': -self._norm[0],
|
|
1455
|
+
'vary': False,
|
|
1456
|
+
}])
|
|
1457
|
+
self.add_model(model, 'tmp_normalization_offset_')
|
|
1458
|
+
|
|
1459
|
+
# Adjust existing parameters for refit:
|
|
1460
|
+
if config is not None:
|
|
1461
|
+
# Local modules
|
|
1462
|
+
from CHAP.utils.models import (
|
|
1463
|
+
FitConfig,
|
|
1464
|
+
Multipeak,
|
|
1465
|
+
)
|
|
1466
|
+
|
|
1467
|
+
# Expand multipeak model if present
|
|
1468
|
+
scale_factor = None
|
|
1469
|
+
for i, model in enumerate(deepcopy(config.models)):
|
|
1470
|
+
found_multipeak = False
|
|
1471
|
+
if isinstance(model, Multipeak):
|
|
1472
|
+
if found_multipeak:
|
|
1473
|
+
raise ValueError(
|
|
1474
|
+
f'Invalid parameter models ({config.models}) '
|
|
1475
|
+
'(multiple instances of multipeak not allowed)')
|
|
1476
|
+
if (model.fit_type == 'uniform'
|
|
1477
|
+
and 'scale_factor' not in self._free_parameters):
|
|
1478
|
+
raise ValueError(
|
|
1479
|
+
f'Invalid parameter models ({config.models}) '
|
|
1480
|
+
'(uniform multipeak fit after unconstrained fit)')
|
|
1481
|
+
parameters, models = FitProcessor.create_multipeak_model(
|
|
1482
|
+
model)
|
|
1483
|
+
if (model.fit_type == 'unconstrained'
|
|
1484
|
+
and 'scale_factor' in self._free_parameters):
|
|
1485
|
+
# Third party modules
|
|
1486
|
+
from asteval import Interpreter
|
|
1487
|
+
|
|
1488
|
+
scale_factor = self._parameters['scale_factor'].value
|
|
1489
|
+
self._parameters.pop('scale_factor')
|
|
1490
|
+
self._free_parameters.remove('scale_factor')
|
|
1491
|
+
ast = Interpreter()
|
|
1492
|
+
ast(f'scale_factor = {scale_factor}')
|
|
1493
|
+
if parameters:
|
|
1494
|
+
config.parameters += parameters
|
|
1495
|
+
config.models += models
|
|
1496
|
+
config.models.remove(model)
|
|
1497
|
+
found_multipeak = True
|
|
1498
|
+
|
|
1499
|
+
# Check for duplicate model names and create prefixes
|
|
1500
|
+
prefixes = self._create_prefixes(config.models)
|
|
1501
|
+
if not isinstance(config, FitConfig):
|
|
1502
|
+
raise ValueError(f'Invalid parameter config ({config})')
|
|
1503
|
+
parameters = config.parameters
|
|
1504
|
+
for prefix, model in zip(prefixes, config.models):
|
|
1505
|
+
for par in model.parameters:
|
|
1506
|
+
par.name = f'{prefix}{par.name}'
|
|
1507
|
+
parameters += model.parameters
|
|
1508
|
+
|
|
1509
|
+
# Adjust parameters for refit as needed
|
|
1510
|
+
if isinstance(self, FitMap):
|
|
1511
|
+
scale_factor_index = \
|
|
1512
|
+
self._best_parameters.index('scale_factor')
|
|
1513
|
+
self._best_parameters.pop(scale_factor_index)
|
|
1514
|
+
self._best_values = np.delete(
|
|
1515
|
+
self._best_values, scale_factor_index, 0)
|
|
1516
|
+
self._best_errors = np.delete(
|
|
1517
|
+
self._best_errors, scale_factor_index, 0)
|
|
1518
|
+
for par in parameters:
|
|
1519
|
+
name = par.name
|
|
1520
|
+
if name not in self._parameters:
|
|
1521
|
+
raise ValueError(
|
|
1522
|
+
f'Unable to match {name} parameter {par} to an '
|
|
1523
|
+
'existing one')
|
|
1524
|
+
ppar = self._parameters[name]
|
|
1525
|
+
if ppar.expr is not None:
|
|
1526
|
+
if (scale_factor is not None and 'center' in name
|
|
1527
|
+
and 'scale_factor' in ppar.expr):
|
|
1528
|
+
ppar.set(value=ast(ppar.expr), expr='')
|
|
1529
|
+
value = ppar.value
|
|
1530
|
+
else:
|
|
1531
|
+
raise ValueError(
|
|
1532
|
+
f'Unable to modify {name} parameter {par} '
|
|
1533
|
+
'(currently an expression)')
|
|
1534
|
+
else:
|
|
1535
|
+
value = par.value
|
|
1536
|
+
if par.expr is not None:
|
|
1537
|
+
raise KeyError(
|
|
1538
|
+
f'Invalid "expr" key in {name} parameter {par}')
|
|
1539
|
+
ppar.set(
|
|
1540
|
+
value=value, min=par.min, max=par.max, vary=par.vary)
|
|
1541
|
+
|
|
1542
|
+
# Set parameters configuration
|
|
1543
|
+
if self._code == 'scipy':
|
|
1544
|
+
self._res_par_exprs = []
|
|
1545
|
+
self._res_par_indices = []
|
|
1546
|
+
self._res_par_names = []
|
|
1547
|
+
self._res_par_values = []
|
|
1548
|
+
for i, (name, par) in enumerate(self._parameters.items()):
|
|
1549
|
+
self._res_par_values.append(par.value)
|
|
1550
|
+
if par.expr:
|
|
1551
|
+
self._res_par_exprs.append(
|
|
1552
|
+
{'expr': par.expr, 'index': i})
|
|
1553
|
+
else:
|
|
1554
|
+
if par.vary:
|
|
1555
|
+
self._res_par_indices.append(i)
|
|
1556
|
+
self._res_par_names.append(name)
|
|
1557
|
+
|
|
1558
|
+
# Check for uninitialized parameters
|
|
1559
|
+
for name, par in self._parameters.items():
|
|
1560
|
+
if par.expr is None:
|
|
1561
|
+
value = par.value
|
|
1562
|
+
if value is None or np.isinf(value) or np.isnan(value):
|
|
1563
|
+
if (self._norm is None
|
|
1564
|
+
or name in self._nonlinear_parameters):
|
|
1565
|
+
self._parameters[name].set(value=1.0)
|
|
1566
|
+
else:
|
|
1567
|
+
self._parameters[name].set(value=self._norm[1])
|
|
1568
|
+
|
|
1569
|
+
def _check_linearity_model(self):
|
|
1570
|
+
"""Identify the linearity of all model parameters and check if
|
|
1571
|
+
the model is linear or not.
|
|
1572
|
+
"""
|
|
1573
|
+
# Third party modules
|
|
1574
|
+
from lmfit.models import ExpressionModel
|
|
1575
|
+
from sympy import diff
|
|
1576
|
+
|
|
1577
|
+
# if not self._try_linear_fit:
|
|
1578
|
+
# self._logger.info(
|
|
1579
|
+
# 'Skip linearity check (not yet supported for callable models)')
|
|
1580
|
+
# return False
|
|
1581
|
+
free_parameters = \
|
|
1582
|
+
[name for name, par in self._parameters.items() if par.vary]
|
|
1583
|
+
for component in self._model.components:
|
|
1584
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
1585
|
+
continue
|
|
1586
|
+
if isinstance(component, ExpressionModel):
|
|
1587
|
+
for name in free_parameters:
|
|
1588
|
+
if diff(component.expr, name, name):
|
|
1589
|
+
self._nonlinear_parameters.append(name)
|
|
1590
|
+
if name in self._linear_parameters:
|
|
1591
|
+
self._linear_parameters.remove(name)
|
|
1592
|
+
else:
|
|
1593
|
+
model_parameters = component.param_names.copy()
|
|
1594
|
+
for basename, hint in component.param_hints.items():
|
|
1595
|
+
name = f'{component.prefix}{basename}'
|
|
1596
|
+
if hint.get('expr') is not None:
|
|
1597
|
+
model_parameters.remove(name)
|
|
1598
|
+
for name in model_parameters:
|
|
1599
|
+
expr = self._parameters[name].expr
|
|
1600
|
+
if expr is not None:
|
|
1601
|
+
for nname in free_parameters:
|
|
1602
|
+
if name in self._nonlinear_parameters:
|
|
1603
|
+
if diff(expr, nname):
|
|
1604
|
+
self._nonlinear_parameters.append(nname)
|
|
1605
|
+
if nname in self._linear_parameters:
|
|
1606
|
+
self._linear_parameters.remove(nname)
|
|
1607
|
+
else:
|
|
1608
|
+
assert name in self._linear_parameters
|
|
1609
|
+
if diff(expr, nname, nname):
|
|
1610
|
+
self._nonlinear_parameters.append(nname)
|
|
1611
|
+
if nname in self._linear_parameters:
|
|
1612
|
+
self._linear_parameters.remove(nname)
|
|
1613
|
+
if any(True for name in self._nonlinear_parameters
|
|
1614
|
+
if self._parameters[name].vary):
|
|
1615
|
+
return False
|
|
1616
|
+
return True
|
|
1617
|
+
|
|
1618
|
+
def _fit_linear_model(self, x, y):
|
|
1619
|
+
"""Perform a linear fit by direct matrix solution with numpy.
|
|
1620
|
+
"""
|
|
1621
|
+
# Third party modules
|
|
1622
|
+
from asteval import Interpreter
|
|
1623
|
+
from lmfit.model import ModelResult
|
|
1624
|
+
from lmfit.models import (
|
|
1625
|
+
ConstantModel,
|
|
1626
|
+
LinearModel,
|
|
1627
|
+
QuadraticModel,
|
|
1628
|
+
ExpressionModel,
|
|
1629
|
+
)
|
|
1630
|
+
# Third party modules
|
|
1631
|
+
from sympy import (
|
|
1632
|
+
diff,
|
|
1633
|
+
simplify,
|
|
1634
|
+
)
|
|
1635
|
+
|
|
1636
|
+
# FIX self._parameter_norms
|
|
1637
|
+
# pylint: disable=no-member
|
|
1638
|
+
raise RuntimeError
|
|
1639
|
+
# Construct the matrix and the free parameter vector
|
|
1640
|
+
free_parameters = \
|
|
1641
|
+
[name for name, par in self._parameters.items() if par.vary]
|
|
1642
|
+
expr_parameters = {
|
|
1643
|
+
name:par.expr for name, par in self._parameters.items()
|
|
1644
|
+
if par.expr is not None}
|
|
1645
|
+
model_parameters = []
|
|
1646
|
+
for component in self._model.components:
|
|
1647
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
1648
|
+
continue
|
|
1649
|
+
model_parameters += component.param_names
|
|
1650
|
+
for basename, hint in component.param_hints.items():
|
|
1651
|
+
name = f'{component.prefix}{basename}'
|
|
1652
|
+
if hint.get('expr') is not None:
|
|
1653
|
+
expr_parameters.pop(name)
|
|
1654
|
+
model_parameters.remove(name)
|
|
1655
|
+
norm = 1.0
|
|
1656
|
+
if self._normalized:
|
|
1657
|
+
norm = self._norm[1]
|
|
1658
|
+
# Add expression parameters to asteval
|
|
1659
|
+
ast = Interpreter()
|
|
1660
|
+
for name, expr in expr_parameters.items():
|
|
1661
|
+
ast.symtable[name] = expr
|
|
1662
|
+
# Add constant parameters to asteval
|
|
1663
|
+
# (renormalize to use correctly in evaluation of expression
|
|
1664
|
+
# models)
|
|
1665
|
+
for name, par in self._parameters.items():
|
|
1666
|
+
if par.expr is None and not par.vary:
|
|
1667
|
+
if self._parameter_norms[name]:
|
|
1668
|
+
ast.symtable[name] = par.value*norm
|
|
1669
|
+
else:
|
|
1670
|
+
ast.symtable[name] = par.value
|
|
1671
|
+
mat_a = np.zeros((len(x), len(free_parameters)), dtype='float64')
|
|
1672
|
+
y_const = np.zeros(len(x), dtype='float64')
|
|
1673
|
+
have_expression_model = False
|
|
1674
|
+
for component in self._model.components:
|
|
1675
|
+
if isinstance(component, ConstantModel):
|
|
1676
|
+
name = component.param_names[0]
|
|
1677
|
+
if name in free_parameters:
|
|
1678
|
+
mat_a[:,free_parameters.index(name)] = 1.0
|
|
1679
|
+
else:
|
|
1680
|
+
if self._parameter_norms[name]:
|
|
1681
|
+
delta_y_const = \
|
|
1682
|
+
self._parameters[name] * np.ones(len(x))
|
|
1683
|
+
else:
|
|
1684
|
+
delta_y_const = \
|
|
1685
|
+
(self._parameters[name]*norm) * np.ones(len(x))
|
|
1686
|
+
y_const += delta_y_const
|
|
1687
|
+
elif isinstance(component, ExpressionModel):
|
|
1688
|
+
have_expression_model = True
|
|
1689
|
+
const_expr = component.expr
|
|
1690
|
+
for name in free_parameters:
|
|
1691
|
+
dexpr_dname = diff(component.expr, name)
|
|
1692
|
+
if dexpr_dname:
|
|
1693
|
+
const_expr = \
|
|
1694
|
+
f'{const_expr}-({str(dexpr_dname)})*{name}'
|
|
1695
|
+
if not self._parameter_norms[name]:
|
|
1696
|
+
dexpr_dname = f'({dexpr_dname})/{norm}'
|
|
1697
|
+
y_expr = [(lambda _: ast.eval(str(dexpr_dname)))
|
|
1698
|
+
(ast(f'x={v}')) for v in x]
|
|
1699
|
+
if ast.error:
|
|
1700
|
+
raise ValueError(
|
|
1701
|
+
f'Unable to evaluate {dexpr_dname}')
|
|
1702
|
+
mat_a[:,free_parameters.index(name)] += y_expr
|
|
1703
|
+
const_expr = str(simplify(f'({const_expr})/{norm}'))
|
|
1704
|
+
delta_y_const = [(lambda _: ast.eval(const_expr))
|
|
1705
|
+
(ast(f'x = {v}')) for v in x]
|
|
1706
|
+
y_const += delta_y_const
|
|
1707
|
+
if ast.error:
|
|
1708
|
+
raise ValueError(f'Unable to evaluate {const_expr}')
|
|
1709
|
+
else:
|
|
1710
|
+
free_model_parameters = [
|
|
1711
|
+
name for name in component.param_names
|
|
1712
|
+
if name in free_parameters or name in expr_parameters]
|
|
1713
|
+
if not free_model_parameters:
|
|
1714
|
+
y_const += component.eval(params=self._parameters, x=x)
|
|
1715
|
+
elif isinstance(component, LinearModel):
|
|
1716
|
+
name = f'{component.prefix}slope'
|
|
1717
|
+
if name in free_model_parameters:
|
|
1718
|
+
mat_a[:,free_parameters.index(name)] = x
|
|
1719
|
+
else:
|
|
1720
|
+
y_const += self._parameters[name].value * x
|
|
1721
|
+
name = f'{component.prefix}intercept'
|
|
1722
|
+
if name in free_model_parameters:
|
|
1723
|
+
mat_a[:,free_parameters.index(name)] = 1.0
|
|
1724
|
+
else:
|
|
1725
|
+
y_const += self._parameters[name].value \
|
|
1726
|
+
* np.ones(len(x))
|
|
1727
|
+
elif isinstance(component, QuadraticModel):
|
|
1728
|
+
name = f'{component.prefix}a'
|
|
1729
|
+
if name in free_model_parameters:
|
|
1730
|
+
mat_a[:,free_parameters.index(name)] = x**2
|
|
1731
|
+
else:
|
|
1732
|
+
y_const += self._parameters[name].value * x**2
|
|
1733
|
+
name = f'{component.prefix}b'
|
|
1734
|
+
if name in free_model_parameters:
|
|
1735
|
+
mat_a[:,free_parameters.index(name)] = x
|
|
1736
|
+
else:
|
|
1737
|
+
y_const += self._parameters[name].value * x
|
|
1738
|
+
name = f'{component.prefix}c'
|
|
1739
|
+
if name in free_model_parameters:
|
|
1740
|
+
mat_a[:,free_parameters.index(name)] = 1.0
|
|
1741
|
+
else:
|
|
1742
|
+
y_const += self._parameters[name].value \
|
|
1743
|
+
* np.ones(len(x))
|
|
1744
|
+
else:
|
|
1745
|
+
# At this point each build-in model must be
|
|
1746
|
+
# strictly proportional to each linear model
|
|
1747
|
+
# parameter. Without this assumption, the model
|
|
1748
|
+
# equation is needed
|
|
1749
|
+
# For the current build-in lmfit models, this can
|
|
1750
|
+
# only ever be the amplitude
|
|
1751
|
+
assert len(free_model_parameters) == 1
|
|
1752
|
+
name = f'{component.prefix}amplitude'
|
|
1753
|
+
assert free_model_parameters[0] == name
|
|
1754
|
+
assert self._parameter_norms[name]
|
|
1755
|
+
expr = self._parameters[name].expr
|
|
1756
|
+
if expr is None:
|
|
1757
|
+
parameters = deepcopy(self._parameters)
|
|
1758
|
+
parameters[name].set(value=1.0)
|
|
1759
|
+
mat_a[:,free_parameters.index(name)] += component.eval(
|
|
1760
|
+
params=parameters, x=x)
|
|
1761
|
+
else:
|
|
1762
|
+
const_expr = expr
|
|
1763
|
+
parameters = deepcopy(self._parameters)
|
|
1764
|
+
parameters[name].set(value=1.0)
|
|
1765
|
+
dcomp_dname = component.eval(params=parameters, x=x)
|
|
1766
|
+
for nname in free_parameters:
|
|
1767
|
+
dexpr_dnname = diff(expr, nname)
|
|
1768
|
+
if dexpr_dnname:
|
|
1769
|
+
assert self._parameter_norms[name]
|
|
1770
|
+
y_expr = np.asarray(
|
|
1771
|
+
dexpr_dnname*dcomp_dname, dtype='float64')
|
|
1772
|
+
if self._parameter_norms[nname]:
|
|
1773
|
+
mat_a[:,free_parameters.index(nname)] += \
|
|
1774
|
+
y_expr
|
|
1775
|
+
else:
|
|
1776
|
+
mat_a[:,free_parameters.index(nname)] += \
|
|
1777
|
+
y_expr/norm
|
|
1778
|
+
const_expr = \
|
|
1779
|
+
f'{const_expr}-({dexpr_dnname})*{nname}'
|
|
1780
|
+
const_expr = str(simplify(f'({const_expr})/{norm}'))
|
|
1781
|
+
y_expr = [
|
|
1782
|
+
(lambda _: ast.eval(const_expr))(ast(f'x = {v}'))
|
|
1783
|
+
for v in x]
|
|
1784
|
+
delta_y_const = np.multiply(y_expr, dcomp_dname)
|
|
1785
|
+
y_const += delta_y_const
|
|
1786
|
+
solution, _, _, _ = np.linalg.lstsq(
|
|
1787
|
+
mat_a, y-y_const, rcond=None)
|
|
1788
|
+
|
|
1789
|
+
# Assemble result
|
|
1790
|
+
# (compensate for normalization in expression models)
|
|
1791
|
+
for name, value in zip(free_parameters, solution):
|
|
1792
|
+
self._parameters[name].set(value=value)
|
|
1793
|
+
if (self._normalized
|
|
1794
|
+
and (have_expression_model or expr_parameters)):
|
|
1795
|
+
for name, norm in self._parameter_norms.items():
|
|
1796
|
+
par = self._parameters[name]
|
|
1797
|
+
if par.expr is None and norm:
|
|
1798
|
+
self._parameters[name].set(value=par.value*self._norm[1])
|
|
1799
|
+
#RV FIX
|
|
1800
|
+
self._result = ModelResult(
|
|
1801
|
+
self._model, deepcopy(self._parameters), 'linear')
|
|
1802
|
+
self._result.best_fit = self._model.eval(params=self._parameters, x=x)
|
|
1803
|
+
if (self._normalized
|
|
1804
|
+
and (have_expression_model or expr_parameters)):
|
|
1805
|
+
if 'tmp_normalization_offset_c' in self._parameters:
|
|
1806
|
+
offset = self._parameters['tmp_normalization_offset_c']
|
|
1807
|
+
else:
|
|
1808
|
+
offset = 0.0
|
|
1809
|
+
self._result.best_fit = \
|
|
1810
|
+
(self._result.best_fit-offset-self._norm[0]) / self._norm[1]
|
|
1811
|
+
if self._normalized:
|
|
1812
|
+
for name, norm in self._parameter_norms.items():
|
|
1813
|
+
par = self._parameters[name]
|
|
1814
|
+
if par.expr is None and norm:
|
|
1815
|
+
value = par.value/self._norm[1]
|
|
1816
|
+
self._parameters[name].set(value=value)
|
|
1817
|
+
self._result.params[name].set(value=value)
|
|
1818
|
+
self._result.residual = y-self._result.best_fit
|
|
1819
|
+
self._result.components = self._model.components
|
|
1820
|
+
self._result.init_params = None
|
|
1821
|
+
|
|
1822
|
+
def _fit_nonlinear_model(self, x, y, **kwargs):
|
|
1823
|
+
"""Perform a nonlinear fit with spipy or lmfit."""
|
|
1824
|
+
# Check bounds and prevent initial values at boundaries
|
|
1825
|
+
have_bounds = False
|
|
1826
|
+
self._parameter_bounds = {}
|
|
1827
|
+
for name, par in self._parameters.items():
|
|
1828
|
+
if par.vary:
|
|
1829
|
+
self._parameter_bounds[name] = {
|
|
1830
|
+
'min': par.min, 'max': par.max}
|
|
1831
|
+
if not have_bounds and (
|
|
1832
|
+
not np.isinf(par.min) or not np.isinf(par.max)):
|
|
1833
|
+
have_bounds = True
|
|
1834
|
+
if have_bounds:
|
|
1835
|
+
self._reset_par_at_boundary()
|
|
1836
|
+
|
|
1837
|
+
# Perform the fit
|
|
1838
|
+
if self._mask is not None:
|
|
1839
|
+
x = x[~self._mask]
|
|
1840
|
+
y = np.asarray(y)[~self._mask]
|
|
1841
|
+
if self._code == 'scipy':
|
|
1842
|
+
# Third party modules
|
|
1843
|
+
from asteval import Interpreter
|
|
1844
|
+
from scipy.optimize import (
|
|
1845
|
+
leastsq,
|
|
1846
|
+
least_squares,
|
|
1847
|
+
)
|
|
1848
|
+
|
|
1849
|
+
assert self._mask is None
|
|
1850
|
+
self._ast = Interpreter()
|
|
1851
|
+
self._ast.basesymtable = dict(self._ast.symtable.items())
|
|
1852
|
+
pars_init = []
|
|
1853
|
+
for i, (name, par) in enumerate(self._parameters.items()):
|
|
1854
|
+
setattr(par, '_init_value', par.value)
|
|
1855
|
+
self._res_par_values[i] = par.value
|
|
1856
|
+
if par.expr is None:
|
|
1857
|
+
self._ast.symtable[name] = par.value
|
|
1858
|
+
if par.vary:
|
|
1859
|
+
pars_init.append(par.value)
|
|
1860
|
+
if have_bounds:
|
|
1861
|
+
bounds = (
|
|
1862
|
+
[v['min'] for v in self._parameter_bounds.values()],
|
|
1863
|
+
[v['max'] for v in self._parameter_bounds.values()])
|
|
1864
|
+
if self._method in ('lm', 'leastsq'):
|
|
1865
|
+
self._method = 'trf'
|
|
1866
|
+
self._logger.debug(
|
|
1867
|
+
f'Fit method changed to {self._method} for fit with '
|
|
1868
|
+
'bounds')
|
|
1869
|
+
else:
|
|
1870
|
+
bounds = (-np.inf, np.inf)
|
|
1871
|
+
init_params = deepcopy(self._parameters)
|
|
1872
|
+
# t0 = time()
|
|
1873
|
+
lskws = {
|
|
1874
|
+
'ftol': 1.49012e-08,
|
|
1875
|
+
'xtol': 1.49012e-08,
|
|
1876
|
+
'gtol': 10*FLOAT_EPS,
|
|
1877
|
+
}
|
|
1878
|
+
if self._method == 'leastsq':
|
|
1879
|
+
lskws['maxfev'] = 64000
|
|
1880
|
+
result = leastsq(
|
|
1881
|
+
self._residual, pars_init, args=(x, y), full_output=True,
|
|
1882
|
+
**lskws)
|
|
1883
|
+
else:
|
|
1884
|
+
lskws['max_nfev'] = 64000
|
|
1885
|
+
result = least_squares(
|
|
1886
|
+
self._residual, pars_init, bounds=bounds,
|
|
1887
|
+
method=self._method, args=(x, y), **lskws)
|
|
1888
|
+
# t1 = time()
|
|
1889
|
+
# print(f'\n\nFitting took {1000*(t1-t0):.3f} ms\n\n')
|
|
1890
|
+
model_result = ModelResult(
|
|
1891
|
+
self._model, self._parameters, x, y, self._method, self._ast,
|
|
1892
|
+
self._res_par_exprs, self._res_par_indices,
|
|
1893
|
+
self._res_par_names, result)
|
|
1894
|
+
model_result.init_params = init_params
|
|
1895
|
+
model_result.init_values = {}
|
|
1896
|
+
for name, par in init_params.items():
|
|
1897
|
+
model_result.init_values[name] = par.value
|
|
1898
|
+
model_result.max_nfev = lskws.get('maxfev')
|
|
1899
|
+
else:
|
|
1900
|
+
fit_kws = {}
|
|
1901
|
+
# if 'Dfun' in kwargs:
|
|
1902
|
+
# fit_kws['Dfun'] = kwargs.pop('Dfun')
|
|
1903
|
+
# t0 = time()
|
|
1904
|
+
model_result = self._model.fit(
|
|
1905
|
+
y, self._parameters, x=x, method=self._method, fit_kws=fit_kws,
|
|
1906
|
+
**kwargs)
|
|
1907
|
+
# t1 = time()
|
|
1908
|
+
# print(f'\n\nFitting took {1000*(t1-t0):.3f} ms\n\n')
|
|
1909
|
+
|
|
1910
|
+
return model_result
|
|
1911
|
+
|
|
1912
|
+
def _normalize(self):
|
|
1913
|
+
"""Normalize the data and initial parameters."""
|
|
1914
|
+
if self._normalized:
|
|
1915
|
+
return
|
|
1916
|
+
if self._norm is None:
|
|
1917
|
+
if self._y is not None and self._y_norm is None:
|
|
1918
|
+
self._y_norm = np.asarray(self._y)
|
|
1919
|
+
else:
|
|
1920
|
+
if self._y is not None and self._y_norm is None:
|
|
1921
|
+
self._y_norm = \
|
|
1922
|
+
(np.asarray(self._y)-self._norm[0]) / self._norm[1]
|
|
1923
|
+
self._y_range = 1.0
|
|
1924
|
+
for name in self._linear_parameters:
|
|
1925
|
+
par = self._parameters[name]
|
|
1926
|
+
if par.expr is None:
|
|
1927
|
+
value = par.value/self._norm[1]
|
|
1928
|
+
_min = par.min
|
|
1929
|
+
_max = par.max
|
|
1930
|
+
if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
|
|
1931
|
+
_min /= self._norm[1]
|
|
1932
|
+
if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
|
|
1933
|
+
_max /= self._norm[1]
|
|
1934
|
+
par.set(value=value, min=_min, max=_max)
|
|
1935
|
+
self._normalized = True
|
|
1936
|
+
|
|
1937
|
+
def _renormalize(self):
|
|
1938
|
+
"""Renormalize the data and results."""
|
|
1939
|
+
if self._norm is None or not self._normalized:
|
|
1940
|
+
return
|
|
1941
|
+
self._normalized = False
|
|
1942
|
+
for name in self._linear_parameters:
|
|
1943
|
+
par = self._parameters[name]
|
|
1944
|
+
if par.expr is None:
|
|
1945
|
+
value = par.value*self._norm[1]
|
|
1946
|
+
_min = par.min
|
|
1947
|
+
_max = par.max
|
|
1948
|
+
if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
|
|
1949
|
+
_min *= self._norm[1]
|
|
1950
|
+
if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
|
|
1951
|
+
_max *= self._norm[1]
|
|
1952
|
+
par.set(value=value, min=_min, max=_max)
|
|
1953
|
+
if self._result is None:
|
|
1954
|
+
return
|
|
1955
|
+
self._result.best_fit = (
|
|
1956
|
+
self._result.best_fit*self._norm[1] + self._norm[0])
|
|
1957
|
+
for name, par in self._result.params.items():
|
|
1958
|
+
if name in self._linear_parameters:
|
|
1959
|
+
if par.stderr is not None:
|
|
1960
|
+
if self._code == 'scipy':
|
|
1961
|
+
setattr(par, '_stderr', par.stderr*self._norm[1])
|
|
1962
|
+
else:
|
|
1963
|
+
par.stderr *= self._norm[1]
|
|
1964
|
+
if par.expr is None:
|
|
1965
|
+
_min = par.min
|
|
1966
|
+
_max = par.max
|
|
1967
|
+
value = par.value*self._norm[1]
|
|
1968
|
+
if par.init_value is not None:
|
|
1969
|
+
if self._code == 'scipy':
|
|
1970
|
+
setattr(par, '_init_value',
|
|
1971
|
+
par.init_value*self._norm[1])
|
|
1972
|
+
else:
|
|
1973
|
+
par.init_value *= self._norm[1]
|
|
1974
|
+
if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
|
|
1975
|
+
_min *= self._norm[1]
|
|
1976
|
+
if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
|
|
1977
|
+
_max *= self._norm[1]
|
|
1978
|
+
par.set(value=value, min=_min, max=_max)
|
|
1979
|
+
if hasattr(self._result, 'init_fit'):
|
|
1980
|
+
self._result.init_fit = (
|
|
1981
|
+
self._result.init_fit*self._norm[1] + self._norm[0])
|
|
1982
|
+
if hasattr(self._result, 'init_values'):
|
|
1983
|
+
init_values = {}
|
|
1984
|
+
for name, value in self._result.init_values.items():
|
|
1985
|
+
if name in self._linear_parameters:
|
|
1986
|
+
init_values[name] = value*self._norm[1]
|
|
1987
|
+
else:
|
|
1988
|
+
init_values[name] = value
|
|
1989
|
+
self._result.init_values = init_values
|
|
1990
|
+
if (hasattr(self._result, 'init_params')
|
|
1991
|
+
and self._result.init_params is not None):
|
|
1992
|
+
for name, par in self._result.init_params.items():
|
|
1993
|
+
if par.expr is None and name in self._linear_parameters:
|
|
1994
|
+
value = par.value
|
|
1995
|
+
_min = par.min
|
|
1996
|
+
_max = par.max
|
|
1997
|
+
value *= self._norm[1]
|
|
1998
|
+
if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
|
|
1999
|
+
_min *= self._norm[1]
|
|
2000
|
+
if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
|
|
2001
|
+
_max *= self._norm[1]
|
|
2002
|
+
par.set(value=value, min=_min, max=_max)
|
|
2003
|
+
if self._code == 'scipy':
|
|
2004
|
+
setattr(par, '_init_value', par.value)
|
|
2005
|
+
else:
|
|
2006
|
+
par.init_value = par.value
|
|
2007
|
+
# Don't renormalize chisqr, it has no useful meaning in
|
|
2008
|
+
# physical units
|
|
2009
|
+
# self._result.chisqr *= self._norm[1]*self._norm[1]
|
|
2010
|
+
if self._result.covar is not None:
|
|
2011
|
+
norm_sq = self._norm[1]*self._norm[1]
|
|
2012
|
+
for i, name in enumerate(self._result.var_names):
|
|
2013
|
+
if name in self._linear_parameters:
|
|
2014
|
+
for j in range(len(self._result.var_names)):
|
|
2015
|
+
if self._result.covar[i,j] is not None:
|
|
2016
|
+
#self._result.covar[i,j] *= self._norm[1]
|
|
2017
|
+
self._result.covar[i,j] *= norm_sq
|
|
2018
|
+
if self._result.covar[j,i] is not None:
|
|
2019
|
+
#self._result.covar[j,i] *= self._norm[1]
|
|
2020
|
+
self._result.covar[j,i] *= norm_sq
|
|
2021
|
+
# Don't renormalize redchi, it has no useful meaning in
|
|
2022
|
+
# physical units
|
|
2023
|
+
# self._result.redchi *= self._norm[1]*self._norm[1]
|
|
2024
|
+
if self._result.residual is not None:
|
|
2025
|
+
self._result.residual *= self._norm[1]
|
|
2026
|
+
|
|
2027
|
+
def _reset_par_at_boundary(self):
|
|
2028
|
+
fraction = 0.02
|
|
2029
|
+
for name, par in self._parameters.items():
|
|
2030
|
+
if par.vary:
|
|
2031
|
+
value = par.value
|
|
2032
|
+
_min = self._parameter_bounds[name]['min']
|
|
2033
|
+
_max = self._parameter_bounds[name]['max']
|
|
2034
|
+
if np.isinf(_min):
|
|
2035
|
+
if not np.isinf(_max):
|
|
2036
|
+
if name in self._linear_parameters:
|
|
2037
|
+
upp = _max - fraction*self._y_range
|
|
2038
|
+
elif _max == 0.0:
|
|
2039
|
+
upp = _max - fraction
|
|
2040
|
+
else:
|
|
2041
|
+
upp = _max - fraction*abs(_max)
|
|
2042
|
+
if value >= upp:
|
|
2043
|
+
par.set(value=upp)
|
|
2044
|
+
else:
|
|
2045
|
+
if np.isinf(_max):
|
|
2046
|
+
if name in self._linear_parameters:
|
|
2047
|
+
low = _min + fraction*self._y_range
|
|
2048
|
+
elif _min == 0.0:
|
|
2049
|
+
low = _min + fraction
|
|
2050
|
+
else:
|
|
2051
|
+
low = _min + fraction*abs(_min)
|
|
2052
|
+
if value <= low:
|
|
2053
|
+
par.set(value=low)
|
|
2054
|
+
else:
|
|
2055
|
+
low = (1.0-fraction)*_min + fraction*_max
|
|
2056
|
+
upp = fraction*_min + (1.0-fraction)*_max
|
|
2057
|
+
if value <= low:
|
|
2058
|
+
par.set(value=low)
|
|
2059
|
+
if value >= upp:
|
|
2060
|
+
par.set(value=upp)
|
|
2061
|
+
|
|
2062
|
+
def _residual(self, pars, x, y):
|
|
2063
|
+
res = np.zeros((x.size))
|
|
2064
|
+
n_par = len(self._free_parameters)
|
|
2065
|
+
for par, index in zip(pars, self._res_par_indices):
|
|
2066
|
+
self._res_par_values[index] = par
|
|
2067
|
+
if self._res_par_exprs:
|
|
2068
|
+
for par, name in zip(pars, self._res_par_names):
|
|
2069
|
+
self._ast.symtable[name] = par
|
|
2070
|
+
for expr in self._res_par_exprs:
|
|
2071
|
+
self._res_par_values[expr['index']] = \
|
|
2072
|
+
self._ast.eval(expr['expr'])
|
|
2073
|
+
for component, num_par in zip(
|
|
2074
|
+
self._model.components, self._res_num_pars):
|
|
2075
|
+
res += component.func(
|
|
2076
|
+
x, *tuple(self._res_par_values[n_par:n_par+num_par]))
|
|
2077
|
+
n_par += num_par
|
|
2078
|
+
return res - y
|
|
2079
|
+
|
|
2080
|
+
|
|
2081
|
+
class FitMap(Fit):
|
|
2082
|
+
"""Wrapper to the Fit class to fit data on a N-dimensional map."""
|
|
2083
|
+
def __init__(self, nxdata, config, logger):
|
|
2084
|
+
"""Initialize FitMap."""
|
|
2085
|
+
super().__init__(None, config, logger)
|
|
2086
|
+
self._best_errors = None
|
|
2087
|
+
self._best_fit = None
|
|
2088
|
+
self._best_parameters = None
|
|
2089
|
+
self._best_values = None
|
|
2090
|
+
self._inv_transpose = None
|
|
2091
|
+
self._max_nfev = None
|
|
2092
|
+
self._memfolder = config.memfolder
|
|
2093
|
+
self._new_parameters = None
|
|
2094
|
+
self._num_func_eval = None
|
|
2095
|
+
self._out_of_bounds = None
|
|
2096
|
+
self._plot = False
|
|
2097
|
+
self._print_report = False
|
|
2098
|
+
self._redchi = None
|
|
2099
|
+
self._redchi_cutoff = 0.1
|
|
2100
|
+
self._rel_height_cutoff = None
|
|
2101
|
+
self._skip_init = True
|
|
2102
|
+
self._success = None
|
|
2103
|
+
self._try_no_bounds = True
|
|
2104
|
+
|
|
2105
|
+
# At this point the fastest index should always be the signal
|
|
2106
|
+
# dimension so that the slowest ndim-1 dimensions are the
|
|
2107
|
+
# map dimensions
|
|
2108
|
+
self._x = np.asarray(nxdata[nxdata.attrs['axes'][-1]])
|
|
2109
|
+
self._ymap = np.asarray(nxdata.nxsignal)
|
|
2110
|
+
|
|
2111
|
+
# Check input parameters
|
|
2112
|
+
if self._x.ndim != 1:
|
|
2113
|
+
raise ValueError(f'Invalid x dimension ({self._x.ndim})')
|
|
2114
|
+
if self._x.size != self._ymap.shape[-1]:
|
|
2115
|
+
raise ValueError(
|
|
2116
|
+
f'Inconsistent x and y dimensions ({self._x.size} vs '
|
|
2117
|
+
f'{self._ymap.shape[-1]})')
|
|
2118
|
+
|
|
2119
|
+
# Flatten the map
|
|
2120
|
+
# Store the flattened map in self._ymap_norm
|
|
2121
|
+
self._map_dim = int(self._ymap.size/self._x.size)
|
|
2122
|
+
self._map_shape = self._ymap.shape[:-1]
|
|
2123
|
+
self._ymap_norm = np.reshape(
|
|
2124
|
+
self._ymap, (self._map_dim, self._x.size))
|
|
2125
|
+
|
|
2126
|
+
# Check if a mask is provided
|
|
2127
|
+
# if 'mask' in kwargs:
|
|
2128
|
+
# self._mask = kwargs.pop('mask')
|
|
2129
|
+
if True: #self._mask is None:
|
|
2130
|
+
ymap_min = float(self._ymap_norm.min())
|
|
2131
|
+
ymap_max = float(self._ymap_norm.max())
|
|
2132
|
+
else:
|
|
2133
|
+
ymap_min = None
|
|
2134
|
+
ymap_max = None
|
|
2135
|
+
# self._mask = np.asarray(self._mask).astype(bool)
|
|
2136
|
+
# if self._x.size != self._mask.size:
|
|
2137
|
+
# raise ValueError(
|
|
2138
|
+
# f'Inconsistent mask dimension ({self._x.size} vs '
|
|
2139
|
+
# f'{self._mask.size})')
|
|
2140
|
+
# ymap_masked = np.asarray(self._ymap_norm)[:,~self._mask]
|
|
2141
|
+
# ymap_min = float(ymap_masked.min())
|
|
2142
|
+
# ymap_max = float(ymap_masked.max())
|
|
2143
|
+
|
|
2144
|
+
# Normalize the data
|
|
2145
|
+
self._y_range = ymap_max-ymap_min
|
|
2146
|
+
if self._y_range > 0.0:
|
|
2147
|
+
self._norm = (ymap_min, self._y_range)
|
|
2148
|
+
self._ymap_norm = (self._ymap_norm-self._norm[0]) / self._norm[1]
|
|
2149
|
+
else:
|
|
2150
|
+
self._redchi_cutoff *= self._y_range**2
|
|
2151
|
+
|
|
2152
|
+
# Setup fit model
|
|
2153
|
+
self._setup_fit_model(config.parameters, config.models)
|
|
2154
|
+
|
|
2155
|
+
@property
|
|
2156
|
+
def best_errors(self):
|
|
2157
|
+
"""Return errors in the best fit parameters."""
|
|
2158
|
+
return self._best_errors
|
|
2159
|
+
|
|
2160
|
+
@property
|
|
2161
|
+
def best_fit(self):
|
|
2162
|
+
"""Return the best fits."""
|
|
2163
|
+
return self._best_fit
|
|
2164
|
+
|
|
2165
|
+
@property
|
|
2166
|
+
def best_values(self):
|
|
2167
|
+
"""Return values of the best fit parameters."""
|
|
2168
|
+
return self._best_values
|
|
2169
|
+
|
|
2170
|
+
@property
|
|
2171
|
+
def chisqr(self):
|
|
2172
|
+
"""Return the chisqr value of each best fit."""
|
|
2173
|
+
self._logger.warning('Undefined property chisqr')
|
|
2174
|
+
|
|
2175
|
+
@property
|
|
2176
|
+
def components(self):
|
|
2177
|
+
"""Return the fit model components info."""
|
|
2178
|
+
# Third party modules
|
|
2179
|
+
from lmfit.models import ExpressionModel
|
|
2180
|
+
|
|
2181
|
+
components = {}
|
|
2182
|
+
if self._result is None:
|
|
2183
|
+
self._logger.warning(
|
|
2184
|
+
'Unable to collect components in FitMap.components')
|
|
2185
|
+
return components
|
|
2186
|
+
for component in self._result.components:
|
|
2187
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
2188
|
+
continue
|
|
2189
|
+
parameters = {}
|
|
2190
|
+
for name in component.param_names:
|
|
2191
|
+
if self._parameters[name].vary:
|
|
2192
|
+
parameters[name] = {'free': True}
|
|
2193
|
+
elif self._parameters[name].expr is not None:
|
|
2194
|
+
parameters[name] = {
|
|
2195
|
+
'free': False,
|
|
2196
|
+
'expr': self._parameters[name].expr,
|
|
2197
|
+
}
|
|
2198
|
+
else:
|
|
2199
|
+
parameters[name] = {
|
|
2200
|
+
'free': False,
|
|
2201
|
+
'value': self.init_parameters[name]['value'],
|
|
2202
|
+
}
|
|
2203
|
+
expr = None
|
|
2204
|
+
if isinstance(component, ExpressionModel):
|
|
2205
|
+
name = component._name
|
|
2206
|
+
if name[-1] == '_':
|
|
2207
|
+
name = name[:-1]
|
|
2208
|
+
expr = component.expr
|
|
2209
|
+
else:
|
|
2210
|
+
prefix = component.prefix
|
|
2211
|
+
if prefix:
|
|
2212
|
+
if prefix[-1] == '_':
|
|
2213
|
+
prefix = prefix[:-1]
|
|
2214
|
+
name = f'{prefix} ({component._name})'
|
|
2215
|
+
else:
|
|
2216
|
+
name = f'{component._name}'
|
|
2217
|
+
if expr is None:
|
|
2218
|
+
components[name] = {'parameters': parameters}
|
|
2219
|
+
else:
|
|
2220
|
+
components[name] = {'expr': expr, 'parameters': parameters}
|
|
2221
|
+
return components
|
|
2222
|
+
|
|
2223
|
+
@property
|
|
2224
|
+
def covar(self):
|
|
2225
|
+
"""Return the covarience matrices of the best fit parameters.
|
|
2226
|
+
"""
|
|
2227
|
+
self._logger.warning('Undefined property covar')
|
|
2228
|
+
|
|
2229
|
+
@property
|
|
2230
|
+
def max_nfev(self):
|
|
2231
|
+
"""Return if the maximum number of function evaluations is
|
|
2232
|
+
reached for each fit.
|
|
2233
|
+
"""
|
|
2234
|
+
return self._max_nfev
|
|
2235
|
+
|
|
2236
|
+
@property
|
|
2237
|
+
def num_func_eval(self):
|
|
2238
|
+
"""Return the number of function evaluations for each best fit.
|
|
2239
|
+
"""
|
|
2240
|
+
return self._num_func_eval
|
|
2241
|
+
|
|
2242
|
+
@property
|
|
2243
|
+
def out_of_bounds(self):
|
|
2244
|
+
"""Return the out_of_bounds value of each best fit."""
|
|
2245
|
+
return self._out_of_bounds
|
|
2246
|
+
|
|
2247
|
+
@property
|
|
2248
|
+
def redchi(self):
|
|
2249
|
+
"""Return the redchi value of each best fit."""
|
|
2250
|
+
return self._redchi
|
|
2251
|
+
|
|
2252
|
+
@property
|
|
2253
|
+
def residual(self):
|
|
2254
|
+
"""Return the residual in each best fit."""
|
|
2255
|
+
if self.best_fit is None:
|
|
2256
|
+
return None
|
|
2257
|
+
if self._mask is None:
|
|
2258
|
+
residual = np.asarray(self._ymap)-self.best_fit
|
|
2259
|
+
else:
|
|
2260
|
+
ymap_flat = np.reshape(
|
|
2261
|
+
np.asarray(self._ymap), (self._map_dim, self._x.size))
|
|
2262
|
+
ymap_flat_masked = ymap_flat[:,~self._mask]
|
|
2263
|
+
ymap_masked = np.reshape(
|
|
2264
|
+
ymap_flat_masked,
|
|
2265
|
+
list(self._map_shape) + [ymap_flat_masked.shape[-1]])
|
|
2266
|
+
residual = ymap_masked-self.best_fit
|
|
2267
|
+
return residual
|
|
2268
|
+
|
|
2269
|
+
@property
|
|
2270
|
+
def success(self):
|
|
2271
|
+
"""Return the success value for each fit."""
|
|
2272
|
+
return self._success
|
|
2273
|
+
|
|
2274
|
+
@property
|
|
2275
|
+
def var_names(self):
|
|
2276
|
+
"""Return the variable names for the covarience matrix
|
|
2277
|
+
property.
|
|
2278
|
+
"""
|
|
2279
|
+
self._logger.warning('Undefined property var_names')
|
|
2280
|
+
|
|
2281
|
+
@property
|
|
2282
|
+
def y(self):
|
|
2283
|
+
"""Return the input y-array."""
|
|
2284
|
+
self._logger.warning('Undefined property y')
|
|
2285
|
+
|
|
2286
|
+
@property
|
|
2287
|
+
def ymap(self):
|
|
2288
|
+
"""Return the input y-array map."""
|
|
2289
|
+
return self._ymap
|
|
2290
|
+
|
|
2291
|
+
def best_parameters(self, dims=None):
|
|
2292
|
+
"""Return the best fit parameters."""
|
|
2293
|
+
if dims is None:
|
|
2294
|
+
return self._best_parameters
|
|
2295
|
+
if (not isinstance(dims, (list, tuple))
|
|
2296
|
+
or len(dims) != len(self._map_shape)):
|
|
2297
|
+
raise ValueError('Invalid parameter dims ({dims})')
|
|
2298
|
+
if self.best_values is None or self.best_errors is None:
|
|
2299
|
+
self._logger.warning(
|
|
2300
|
+
f'Unable to obtain best parameter values for dims = {dims}')
|
|
2301
|
+
return {}
|
|
2302
|
+
# Create current parameters
|
|
2303
|
+
parameters = deepcopy(self._parameters)
|
|
2304
|
+
for n, name in enumerate(self._best_parameters):
|
|
2305
|
+
if self._parameters[name].vary:
|
|
2306
|
+
parameters[name].set(value=self.best_values[n][dims])
|
|
2307
|
+
parameters[name].stderr = self.best_errors[n][dims]
|
|
2308
|
+
parameters_dict = {}
|
|
2309
|
+
for name in sorted(parameters):
|
|
2310
|
+
if name != 'tmp_normalization_offset_c':
|
|
2311
|
+
par = parameters[name]
|
|
2312
|
+
parameters_dict[name] = {
|
|
2313
|
+
'value': par.value,
|
|
2314
|
+
'error': par.stderr,
|
|
2315
|
+
'init_value': self.init_parameters[name]['value'],
|
|
2316
|
+
'min': par.min,
|
|
2317
|
+
'max': par.max,
|
|
2318
|
+
'vary': par.vary,
|
|
2319
|
+
'expr': par.expr,
|
|
2320
|
+
}
|
|
2321
|
+
return parameters_dict
|
|
2322
|
+
|
|
2323
|
+
def freemem(self):
|
|
2324
|
+
"""Free memory allocated for parallel processing."""
|
|
2325
|
+
if self._memfolder is None:
|
|
2326
|
+
return
|
|
2327
|
+
try:
|
|
2328
|
+
rmtree(self._memfolder)
|
|
2329
|
+
except Exception:
|
|
2330
|
+
self._logger.warning('Could not clean-up automatically.')
|
|
2331
|
+
|
|
2332
|
+
def plot(
|
|
2333
|
+
self, dims=None, y_title=None, plot_residual=False,
|
|
2334
|
+
plot_comp_legends=False, plot_masked_data=True, **kwargs):
|
|
2335
|
+
"""Plot the best fits."""
|
|
2336
|
+
# Third party modules
|
|
2337
|
+
from lmfit.models import ExpressionModel
|
|
2338
|
+
|
|
2339
|
+
if dims is None:
|
|
2340
|
+
dims = [0]*len(self._map_shape)
|
|
2341
|
+
if (not isinstance(dims, (list, tuple))
|
|
2342
|
+
or len(dims) != len(self._map_shape)):
|
|
2343
|
+
raise ValueError('Invalid parameter dims ({dims})')
|
|
2344
|
+
dims = tuple(dims)
|
|
2345
|
+
if (self._result is None or self.best_fit is None
|
|
2346
|
+
or self.best_values is None):
|
|
2347
|
+
self._logger.warning(
|
|
2348
|
+
f'Unable to plot fit for dims = {dims}')
|
|
2349
|
+
return
|
|
2350
|
+
if y_title is None or not isinstance(y_title, str):
|
|
2351
|
+
y_title = 'data'
|
|
2352
|
+
if self._mask is None:
|
|
2353
|
+
mask = np.zeros(self._x.size).astype(bool)
|
|
2354
|
+
plot_masked_data = False
|
|
2355
|
+
else:
|
|
2356
|
+
mask = self._mask
|
|
2357
|
+
plots = [(self._x, np.asarray(self._ymap[dims]), 'b.')]
|
|
2358
|
+
legend = [y_title]
|
|
2359
|
+
if plot_masked_data:
|
|
2360
|
+
plots += \
|
|
2361
|
+
[(self._x[mask], np.asarray(self._ymap)[(*dims,mask)], 'bx')]
|
|
2362
|
+
legend += ['masked data']
|
|
2363
|
+
plots += [(self._x[~mask], self.best_fit[dims], 'k-')]
|
|
2364
|
+
legend += ['best fit']
|
|
2365
|
+
if plot_residual:
|
|
2366
|
+
plots += [(self._x[~mask], self.residual[dims], 'r--')]
|
|
2367
|
+
legend += ['residual']
|
|
2368
|
+
# Create current parameters
|
|
2369
|
+
parameters = deepcopy(self._parameters)
|
|
2370
|
+
for name in self._best_parameters:
|
|
2371
|
+
if self._parameters[name].vary:
|
|
2372
|
+
parameters[name].set(
|
|
2373
|
+
value=self.best_values[self._best_parameters.index(name)]
|
|
2374
|
+
[dims])
|
|
2375
|
+
for component in self._result.components:
|
|
2376
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
2377
|
+
continue
|
|
2378
|
+
if isinstance(component, ExpressionModel):
|
|
2379
|
+
prefix = component._name
|
|
2380
|
+
if prefix[-1] == '_':
|
|
2381
|
+
prefix = prefix[:-1]
|
|
2382
|
+
modelname = f'{prefix}: {component.expr}'
|
|
2383
|
+
else:
|
|
2384
|
+
prefix = component.prefix
|
|
2385
|
+
if prefix:
|
|
2386
|
+
if prefix[-1] == '_':
|
|
2387
|
+
prefix = prefix[:-1]
|
|
2388
|
+
modelname = f'{prefix} ({component._name})'
|
|
2389
|
+
else:
|
|
2390
|
+
modelname = f'{component._name}'
|
|
2391
|
+
if len(modelname) > 20:
|
|
2392
|
+
modelname = f'{modelname[0:16]} ...'
|
|
2393
|
+
y = component.eval(params=parameters, x=self._x[~mask])
|
|
2394
|
+
if isinstance(y, (int, float)):
|
|
2395
|
+
y *= np.ones(self._x[~mask].size)
|
|
2396
|
+
plots += [(self._x[~mask], y, '--')]
|
|
2397
|
+
if plot_comp_legends:
|
|
2398
|
+
legend.append(modelname)
|
|
2399
|
+
quick_plot(
|
|
2400
|
+
tuple(plots), legend=legend, title=str(dims), block=True, **kwargs)
|
|
2401
|
+
|
|
2402
|
+
def fit(self, config=None, **kwargs):
|
|
2403
|
+
"""Fit the model to the input data."""
|
|
2404
|
+
# Check input parameters
|
|
2405
|
+
if self._model is None:
|
|
2406
|
+
self._logger.error('Undefined fit model')
|
|
2407
|
+
num_proc_max = max(1, cpu_count())
|
|
2408
|
+
if config is None:
|
|
2409
|
+
num_proc = kwargs.pop('num_proc', num_proc_max)
|
|
2410
|
+
self._rel_height_cutoff = kwargs.pop('rel_height_cutoff')
|
|
2411
|
+
self._try_no_bounds = kwargs.pop('try_no_bounds', False)
|
|
2412
|
+
self._redchi_cutoff = kwargs.pop('redchi_cutoff', 0.1)
|
|
2413
|
+
self._print_report = kwargs.pop('print_report', False)
|
|
2414
|
+
self._plot = kwargs.pop('plot', False)
|
|
2415
|
+
self._skip_init = kwargs.pop('skip_init', True)
|
|
2416
|
+
else:
|
|
2417
|
+
num_proc = config.num_proc
|
|
2418
|
+
self._rel_height_cutoff = config.rel_height_cutoff
|
|
2419
|
+
# self._try_no_bounds = config.try_no_bounds
|
|
2420
|
+
# self._redchi_cutoff = config.redchi_cutoff
|
|
2421
|
+
self._print_report = config.print_report
|
|
2422
|
+
self._plot = config.plot
|
|
2423
|
+
# self._skip_init = config.skip_init
|
|
2424
|
+
if num_proc > 1 and not HAVE_JOBLIB:
|
|
2425
|
+
self._logger.warning(
|
|
2426
|
+
'Missing joblib in the conda environment, running serially')
|
|
2427
|
+
num_proc = 1
|
|
2428
|
+
if num_proc > num_proc_max:
|
|
2429
|
+
self._logger.warning(
|
|
2430
|
+
f'The requested number of processors ({num_proc}) exceeds the '
|
|
2431
|
+
'maximum allowed number of processors, num_proc reduced to '
|
|
2432
|
+
f'{num_proc_max}')
|
|
2433
|
+
num_proc = num_proc_max
|
|
2434
|
+
self._logger.debug(f'Using {num_proc} processors to fit the data')
|
|
2435
|
+
self._redchi_cutoff *= self._y_range**2
|
|
2436
|
+
|
|
2437
|
+
# Setup the fit
|
|
2438
|
+
self._setup_fit(config)
|
|
2439
|
+
|
|
2440
|
+
# Create the best parameter list, consisting of all varying
|
|
2441
|
+
# parameters plus the expression parameters in order to
|
|
2442
|
+
# collect their errors
|
|
2443
|
+
if self._result is None:
|
|
2444
|
+
# Initial fit
|
|
2445
|
+
assert self._best_parameters is None
|
|
2446
|
+
self._best_parameters = [
|
|
2447
|
+
name for name, par in self._parameters.items()
|
|
2448
|
+
if par.vary or par.expr is not None]
|
|
2449
|
+
num_new_parameters = 0
|
|
2450
|
+
else:
|
|
2451
|
+
# Refit
|
|
2452
|
+
assert self._best_parameters
|
|
2453
|
+
self._new_parameters = [
|
|
2454
|
+
name for name, par in self._parameters.items()
|
|
2455
|
+
if name != 'tmp_normalization_offset_c'
|
|
2456
|
+
and name not in self._best_parameters
|
|
2457
|
+
and (par.vary or par.expr is not None)]
|
|
2458
|
+
num_new_parameters = len(self._new_parameters)
|
|
2459
|
+
num_best_parameters = len(self._best_parameters)
|
|
2460
|
+
|
|
2461
|
+
# Flatten and normalize the best values of the previous fit,
|
|
2462
|
+
# remove the remaining results of the previous fit
|
|
2463
|
+
if self._result is not None:
|
|
2464
|
+
self._out_of_bounds = None
|
|
2465
|
+
self._max_nfev = None
|
|
2466
|
+
self._num_func_eval = None
|
|
2467
|
+
self._redchi = None
|
|
2468
|
+
self._success = None
|
|
2469
|
+
self._best_fit = None
|
|
2470
|
+
self._best_errors = None
|
|
2471
|
+
assert self._best_values is not None
|
|
2472
|
+
assert self._best_values.shape[0] == num_best_parameters
|
|
2473
|
+
assert self._best_values.shape[1:] == self._map_shape
|
|
2474
|
+
self._best_values = [
|
|
2475
|
+
np.reshape(self._best_values[i], self._map_dim)
|
|
2476
|
+
for i in range(num_best_parameters)]
|
|
2477
|
+
if self._norm is not None:
|
|
2478
|
+
for i, name in enumerate(self._best_parameters):
|
|
2479
|
+
if name in self._linear_parameters:
|
|
2480
|
+
self._best_values[i] /= self._norm[1]
|
|
2481
|
+
|
|
2482
|
+
# Normalize the initial parameters
|
|
2483
|
+
# (and best values for a refit)
|
|
2484
|
+
self._normalize()
|
|
2485
|
+
|
|
2486
|
+
# Prevent initial values from sitting at boundaries
|
|
2487
|
+
self._parameter_bounds = {
|
|
2488
|
+
name:{'min': par.min, 'max': par.max}
|
|
2489
|
+
for name, par in self._parameters.items() if par.vary}
|
|
2490
|
+
self._reset_par_at_boundary()
|
|
2491
|
+
|
|
2492
|
+
# Set parameter bounds to unbound
|
|
2493
|
+
# (only use bounds when fit fails)
|
|
2494
|
+
if self._try_no_bounds:
|
|
2495
|
+
for name in self._parameter_bounds.keys():
|
|
2496
|
+
self._parameters[name].set(min=-np.inf, max=np.inf)
|
|
2497
|
+
|
|
2498
|
+
# Allocate memory to store fit results
|
|
2499
|
+
if self._mask is None:
|
|
2500
|
+
x_size = self._x.size
|
|
2501
|
+
else:
|
|
2502
|
+
x_size = self._x[~self._mask].size
|
|
2503
|
+
if num_proc == 1:
|
|
2504
|
+
self._out_of_bounds_flat = np.zeros(self._map_dim, dtype=bool)
|
|
2505
|
+
self._max_nfev_flat = np.zeros(self._map_dim, dtype=bool)
|
|
2506
|
+
self._num_func_eval_flat = np.zeros(self._map_dim, dtype=np.intc)
|
|
2507
|
+
self._redchi_flat = np.zeros(self._map_dim, dtype=np.float64)
|
|
2508
|
+
self._success_flat = np.zeros(self._map_dim, dtype=bool)
|
|
2509
|
+
self._best_fit_flat = np.zeros(
|
|
2510
|
+
(self._map_dim, x_size), dtype=self._ymap_norm.dtype)
|
|
2511
|
+
self._best_errors_flat = [
|
|
2512
|
+
np.zeros(self._map_dim, dtype=np.float64)
|
|
2513
|
+
for _ in range(num_best_parameters+num_new_parameters)]
|
|
2514
|
+
if self._result is None:
|
|
2515
|
+
self._best_values_flat = [
|
|
2516
|
+
np.zeros(self._map_dim, dtype=np.float64)
|
|
2517
|
+
for _ in range(num_best_parameters)]
|
|
2518
|
+
else:
|
|
2519
|
+
self._best_values_flat = self._best_values
|
|
2520
|
+
self._best_values_flat += [
|
|
2521
|
+
np.zeros(self._map_dim, dtype=np.float64)
|
|
2522
|
+
for _ in range(num_new_parameters)]
|
|
2523
|
+
else:
|
|
2524
|
+
try:
|
|
2525
|
+
mkdir(self._memfolder)
|
|
2526
|
+
except FileExistsError:
|
|
2527
|
+
pass
|
|
2528
|
+
filename_memmap = path.join(
|
|
2529
|
+
self._memfolder, 'out_of_bounds_memmap')
|
|
2530
|
+
self._out_of_bounds_flat = np.memmap(
|
|
2531
|
+
filename_memmap, dtype=bool, shape=(self._map_dim), mode='w+')
|
|
2532
|
+
filename_memmap = path.join(self._memfolder, 'max_nfev_memmap')
|
|
2533
|
+
self._max_nfev_flat = np.memmap(
|
|
2534
|
+
filename_memmap, dtype=bool, shape=(self._map_dim), mode='w+')
|
|
2535
|
+
filename_memmap = path.join(
|
|
2536
|
+
self._memfolder, 'num_func_eval_memmap')
|
|
2537
|
+
self._num_func_eval_flat = np.memmap(
|
|
2538
|
+
filename_memmap, dtype=np.intc, shape=(self._map_dim),
|
|
2539
|
+
mode='w+')
|
|
2540
|
+
filename_memmap = path.join(self._memfolder, 'redchi_memmap')
|
|
2541
|
+
self._redchi_flat = np.memmap(
|
|
2542
|
+
filename_memmap, dtype=np.float64, shape=(self._map_dim),
|
|
2543
|
+
mode='w+')
|
|
2544
|
+
filename_memmap = path.join(self._memfolder, 'success_memmap')
|
|
2545
|
+
self._success_flat = np.memmap(
|
|
2546
|
+
filename_memmap, dtype=bool, shape=(self._map_dim), mode='w+')
|
|
2547
|
+
filename_memmap = path.join(self._memfolder, 'best_fit_memmap')
|
|
2548
|
+
self._best_fit_flat = np.memmap(
|
|
2549
|
+
filename_memmap, dtype=self._ymap_norm.dtype,
|
|
2550
|
+
shape=(self._map_dim, x_size), mode='w+')
|
|
2551
|
+
self._best_errors_flat = []
|
|
2552
|
+
for i in range(num_best_parameters+num_new_parameters):
|
|
2553
|
+
filename_memmap = path.join(
|
|
2554
|
+
self._memfolder, f'best_errors_memmap_{i}')
|
|
2555
|
+
self._best_errors_flat.append(
|
|
2556
|
+
np.memmap(filename_memmap, dtype=np.float64,
|
|
2557
|
+
shape=self._map_dim, mode='w+'))
|
|
2558
|
+
self._best_values_flat = []
|
|
2559
|
+
for i in range(num_best_parameters):
|
|
2560
|
+
filename_memmap = path.join(
|
|
2561
|
+
self._memfolder, f'best_values_memmap_{i}')
|
|
2562
|
+
self._best_values_flat.append(
|
|
2563
|
+
np.memmap(filename_memmap, dtype=np.float64,
|
|
2564
|
+
shape=self._map_dim, mode='w+'))
|
|
2565
|
+
if self._result is not None:
|
|
2566
|
+
self._best_values_flat[i][:] = self._best_values[i][:]
|
|
2567
|
+
for i in range(num_new_parameters):
|
|
2568
|
+
filename_memmap = path.join(
|
|
2569
|
+
self._memfolder,
|
|
2570
|
+
f'best_values_memmap_{i+num_best_parameters}')
|
|
2571
|
+
self._best_values_flat.append(
|
|
2572
|
+
np.memmap(filename_memmap, dtype=np.float64,
|
|
2573
|
+
shape=self._map_dim, mode='w+'))
|
|
2574
|
+
|
|
2575
|
+
# Update the best parameter list
|
|
2576
|
+
if num_new_parameters:
|
|
2577
|
+
self._best_parameters += self._new_parameters
|
|
2578
|
+
|
|
2579
|
+
# Perform the first fit to get model component info and
|
|
2580
|
+
# initial parameters
|
|
2581
|
+
current_best_values = {}
|
|
2582
|
+
self._result = self._fit(
|
|
2583
|
+
0, current_best_values, return_result=True, **kwargs)
|
|
2584
|
+
|
|
2585
|
+
# Remove all irrelevant content from self._result
|
|
2586
|
+
for attr in (
|
|
2587
|
+
'_abort', 'aborted', 'aic', 'best_fit', 'best_values', 'bic',
|
|
2588
|
+
'calc_covar', 'call_kws', 'chisqr', 'ci_out', 'col_deriv',
|
|
2589
|
+
'covar', 'data', 'errorbars', 'flatchain', 'ier', 'init_vals',
|
|
2590
|
+
'init_fit', 'iter_cb', 'jacfcn', 'kws', 'last_internal_values',
|
|
2591
|
+
'lmdif_message', 'message', 'method', 'nan_policy', 'ndata',
|
|
2592
|
+
'nfev', 'nfree', 'params', 'redchi', 'reduce_fcn', 'residual',
|
|
2593
|
+
'result', 'scale_covar', 'show_candidates', 'calc_covar',
|
|
2594
|
+
'success', 'userargs', 'userfcn', 'userkws', 'values',
|
|
2595
|
+
'var_names', 'weights', 'user_options'):
|
|
2596
|
+
try:
|
|
2597
|
+
delattr(self._result, attr)
|
|
2598
|
+
except AttributeError:
|
|
2599
|
+
pass
|
|
2600
|
+
|
|
2601
|
+
if self._map_dim > 1:
|
|
2602
|
+
if num_proc == 1:
|
|
2603
|
+
# Perform the remaining fits serially
|
|
2604
|
+
for n in range(1, self._map_dim):
|
|
2605
|
+
self._fit(n, current_best_values, **kwargs)
|
|
2606
|
+
else:
|
|
2607
|
+
# Perform the remaining fits in parallel
|
|
2608
|
+
num_fit = self._map_dim-1
|
|
2609
|
+
if num_proc > num_fit:
|
|
2610
|
+
self._logger.warning(
|
|
2611
|
+
f'The requested number of processors ({num_proc}) '
|
|
2612
|
+
'exceeds the number of fits, num_proc reduced to '
|
|
2613
|
+
f'{num_fit}')
|
|
2614
|
+
num_proc = num_fit
|
|
2615
|
+
num_fit_per_proc = 1
|
|
2616
|
+
else:
|
|
2617
|
+
num_fit_per_proc = round((num_fit)/num_proc)
|
|
2618
|
+
if num_proc*num_fit_per_proc < num_fit:
|
|
2619
|
+
num_fit_per_proc += 1
|
|
2620
|
+
num_fit_batch = min(num_fit_per_proc, 40)
|
|
2621
|
+
with Parallel(n_jobs=num_proc) as parallel:
|
|
2622
|
+
parallel(
|
|
2623
|
+
delayed(self._fit_parallel)
|
|
2624
|
+
(current_best_values, num_fit_batch, n_start,
|
|
2625
|
+
**kwargs)
|
|
2626
|
+
for n_start in range(1, self._map_dim, num_fit_batch))
|
|
2627
|
+
|
|
2628
|
+
# Renormalize the initial parameters for external use
|
|
2629
|
+
if self._norm is not None and self._normalized:
|
|
2630
|
+
if hasattr(self._result, 'init_values'):
|
|
2631
|
+
init_values = {}
|
|
2632
|
+
for name, value in self._result.init_values.items():
|
|
2633
|
+
if (name in self._nonlinear_parameters
|
|
2634
|
+
or self._parameters[name].expr is not None):
|
|
2635
|
+
init_values[name] = value
|
|
2636
|
+
else:
|
|
2637
|
+
init_values[name] = value*self._norm[1]
|
|
2638
|
+
self._result.init_values = init_values
|
|
2639
|
+
if (hasattr(self._result, 'init_params')
|
|
2640
|
+
and self._result.init_params is not None):
|
|
2641
|
+
for name, par in self._result.init_params.items():
|
|
2642
|
+
if par.expr is None and name in self._linear_parameters:
|
|
2643
|
+
_min = par.min
|
|
2644
|
+
_max = par.max
|
|
2645
|
+
value = par.value*self._norm[1]
|
|
2646
|
+
if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
|
|
2647
|
+
_min *= self._norm[1]
|
|
2648
|
+
if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
|
|
2649
|
+
_max *= self._norm[1]
|
|
2650
|
+
par.set(value=value, min=_min, max=_max)
|
|
2651
|
+
if self._code == 'scipy':
|
|
2652
|
+
setattr(par, '_init_value', par.value)
|
|
2653
|
+
else:
|
|
2654
|
+
par.init_value = par.value
|
|
2655
|
+
|
|
2656
|
+
# Remap the best results
|
|
2657
|
+
self._out_of_bounds = np.copy(np.reshape(
|
|
2658
|
+
self._out_of_bounds_flat, self._map_shape))
|
|
2659
|
+
self._max_nfev = np.copy(np.reshape(
|
|
2660
|
+
self._max_nfev_flat, self._map_shape))
|
|
2661
|
+
self._num_func_eval = np.copy(np.reshape(
|
|
2662
|
+
self._num_func_eval_flat, self._map_shape))
|
|
2663
|
+
self._redchi = np.copy(np.reshape(self._redchi_flat, self._map_shape))
|
|
2664
|
+
self._success = np.copy(np.reshape(
|
|
2665
|
+
self._success_flat, self._map_shape))
|
|
2666
|
+
self._best_fit = np.copy(np.reshape(
|
|
2667
|
+
self._best_fit_flat, list(self._map_shape)+[x_size]))
|
|
2668
|
+
self._best_values = np.asarray([np.reshape(
|
|
2669
|
+
par, list(self._map_shape)) for par in self._best_values_flat])
|
|
2670
|
+
self._best_errors = np.asarray([np.reshape(
|
|
2671
|
+
par, list(self._map_shape)) for par in self._best_errors_flat])
|
|
2672
|
+
if self._inv_transpose is not None:
|
|
2673
|
+
self._out_of_bounds = np.transpose(
|
|
2674
|
+
self._out_of_bounds, self._inv_transpose)
|
|
2675
|
+
self._max_nfev = np.transpose(self._max_nfev, self._inv_transpose)
|
|
2676
|
+
self._num_func_eval = np.transpose(
|
|
2677
|
+
self._num_func_eval, self._inv_transpose)
|
|
2678
|
+
self._redchi = np.transpose(self._redchi, self._inv_transpose)
|
|
2679
|
+
self._success = np.transpose(self._success, self._inv_transpose)
|
|
2680
|
+
self._best_fit = np.transpose(
|
|
2681
|
+
self._best_fit,
|
|
2682
|
+
list(self._inv_transpose) + [len(self._inv_transpose)])
|
|
2683
|
+
self._best_values = np.transpose(
|
|
2684
|
+
self._best_values, [0] + [i+1 for i in self._inv_transpose])
|
|
2685
|
+
self._best_errors = np.transpose(
|
|
2686
|
+
self._best_errors, [0] + [i+1 for i in self._inv_transpose])
|
|
2687
|
+
del self._out_of_bounds_flat
|
|
2688
|
+
del self._max_nfev_flat
|
|
2689
|
+
del self._num_func_eval_flat
|
|
2690
|
+
del self._redchi_flat
|
|
2691
|
+
del self._success_flat
|
|
2692
|
+
del self._best_fit_flat
|
|
2693
|
+
del self._best_values_flat
|
|
2694
|
+
del self._best_errors_flat
|
|
2695
|
+
|
|
2696
|
+
# Restore parameter bounds and renormalize the parameters
|
|
2697
|
+
for name, par in self._parameter_bounds.items():
|
|
2698
|
+
self._parameters[name].set(min=par['min'], max=par['max'])
|
|
2699
|
+
self._normalized = False
|
|
2700
|
+
if self._norm is not None:
|
|
2701
|
+
for name in self._linear_parameters:
|
|
2702
|
+
par = self._parameters[name]
|
|
2703
|
+
if par.expr is None:
|
|
2704
|
+
value = par.value*self._norm[1]
|
|
2705
|
+
_min = par.min
|
|
2706
|
+
_max = par.max
|
|
2707
|
+
if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
|
|
2708
|
+
_min *= self._norm[1]
|
|
2709
|
+
if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
|
|
2710
|
+
_max *= self._norm[1]
|
|
2711
|
+
par.set(value=value, min=_min, max=_max)
|
|
2712
|
+
|
|
2713
|
+
if num_proc > 1:
|
|
2714
|
+
# Free the shared memory
|
|
2715
|
+
self.freemem()
|
|
2716
|
+
|
|
2717
|
+
def _fit_parallel(self, current_best_values, num, n_start, **kwargs):
|
|
2718
|
+
num = min(num, self._map_dim-n_start)
|
|
2719
|
+
for n in range(num):
|
|
2720
|
+
self._fit(n_start+n, current_best_values, **kwargs)
|
|
2721
|
+
|
|
2722
|
+
def _fit(self, n, current_best_values, return_result=False, **kwargs):
|
|
2723
|
+
# Do not attempt a fit if the data is zero or entirely below
|
|
2724
|
+
# the cutoff
|
|
2725
|
+
y_max = self._ymap_norm[n].max()
|
|
2726
|
+
if (y_max == 0.0
|
|
2727
|
+
or (self._rel_height_cutoff is not None
|
|
2728
|
+
and y_max < self._rel_height_cutoff)):
|
|
2729
|
+
self._logger.debug(
|
|
2730
|
+
f'Skipping fit for n = {n} (rel norm = {y_max:.5f})')
|
|
2731
|
+
if self._code == 'scipy':
|
|
2732
|
+
from CHAP.utils.fit import ModelResult
|
|
2733
|
+
|
|
2734
|
+
result = ModelResult(self._model, deepcopy(self._parameters))
|
|
2735
|
+
else:
|
|
2736
|
+
from lmfit.model import ModelResult
|
|
2737
|
+
|
|
2738
|
+
result = ModelResult(self._model, deepcopy(self._parameters))
|
|
2739
|
+
result.success = False
|
|
2740
|
+
# Renormalize the data and results
|
|
2741
|
+
self._renormalize(n, result)
|
|
2742
|
+
return result
|
|
2743
|
+
|
|
2744
|
+
# Regular full fit
|
|
2745
|
+
result = self._fit_with_bounds_check(n, current_best_values, **kwargs)
|
|
2746
|
+
|
|
2747
|
+
if self._rel_height_cutoff is not None:
|
|
2748
|
+
# Third party modules
|
|
2749
|
+
from lmfit.models import (
|
|
2750
|
+
GaussianModel,
|
|
2751
|
+
LorentzianModel,
|
|
2752
|
+
)
|
|
2753
|
+
|
|
2754
|
+
# Check for low heights peaks and refit without them
|
|
2755
|
+
heights = []
|
|
2756
|
+
names = []
|
|
2757
|
+
for component in result.components:
|
|
2758
|
+
if isinstance(component, (GaussianModel, LorentzianModel)):
|
|
2759
|
+
for name in component.param_names:
|
|
2760
|
+
if 'height' in name:
|
|
2761
|
+
heights.append(result.params[name].value)
|
|
2762
|
+
names.append(name)
|
|
2763
|
+
if heights:
|
|
2764
|
+
refit = False
|
|
2765
|
+
max_height = max(heights)
|
|
2766
|
+
parameters_save = deepcopy(self._parameters)
|
|
2767
|
+
for i, (name, height) in enumerate(zip(names, heights)):
|
|
2768
|
+
if height < self._rel_height_cutoff*max_height:
|
|
2769
|
+
self._parameters[
|
|
2770
|
+
name.replace('height', 'amplitude')].set(
|
|
2771
|
+
value=0.0, min=0.0, vary=False)
|
|
2772
|
+
self._parameters[
|
|
2773
|
+
name.replace('height', 'center')].set(
|
|
2774
|
+
vary=False)
|
|
2775
|
+
self._parameters[
|
|
2776
|
+
name.replace('height', 'sigma')].set(
|
|
2777
|
+
value=0.0, min=0.0, vary=False)
|
|
2778
|
+
refit = True
|
|
2779
|
+
if refit:
|
|
2780
|
+
result = self._fit_with_bounds_check(
|
|
2781
|
+
n, current_best_values, **kwargs)
|
|
2782
|
+
# Reset fixed amplitudes back to default
|
|
2783
|
+
self._parameters = deepcopy(parameters_save)
|
|
2784
|
+
|
|
2785
|
+
if result.redchi >= self._redchi_cutoff:
|
|
2786
|
+
result.success = False
|
|
2787
|
+
self._num_func_eval_flat[n] = result.nfev
|
|
2788
|
+
if result.nfev == result.max_nfev:
|
|
2789
|
+
if result.redchi < self._redchi_cutoff:
|
|
2790
|
+
result.success = True
|
|
2791
|
+
self._max_nfev_flat[n] = True
|
|
2792
|
+
if result.success:
|
|
2793
|
+
assert all(
|
|
2794
|
+
True for par in current_best_values
|
|
2795
|
+
if par in result.params.values())
|
|
2796
|
+
for par in result.params.values():
|
|
2797
|
+
if par.vary:
|
|
2798
|
+
current_best_values[par.name] = par.value
|
|
2799
|
+
else:
|
|
2800
|
+
errortxt = f'Fit for n = {n} failed'
|
|
2801
|
+
if hasattr(result, 'lmdif_message'):
|
|
2802
|
+
errortxt += f'\n\t{result.lmdif_message}'
|
|
2803
|
+
if hasattr(result, 'message'):
|
|
2804
|
+
errortxt += f'\n\t{result.message}'
|
|
2805
|
+
self._logger.warning(f'{errortxt}')
|
|
2806
|
+
|
|
2807
|
+
# Renormalize the data and results
|
|
2808
|
+
self._renormalize(n, result)
|
|
2809
|
+
|
|
2810
|
+
if self._print_report:
|
|
2811
|
+
print(result.fit_report(show_correl=False))
|
|
2812
|
+
if self._plot:
|
|
2813
|
+
dims = np.unravel_index(n, self._map_shape)
|
|
2814
|
+
if self._inv_transpose is not None:
|
|
2815
|
+
dims = tuple(
|
|
2816
|
+
dims[self._inv_transpose[i]] for i in range(len(dims)))
|
|
2817
|
+
super().plot(
|
|
2818
|
+
result=result, y=np.asarray(self._ymap[dims]),
|
|
2819
|
+
plot_comp_legends=True, skip_init=self._skip_init,
|
|
2820
|
+
title=str(dims))
|
|
2821
|
+
|
|
2822
|
+
if return_result:
|
|
2823
|
+
return result
|
|
2824
|
+
return None
|
|
2825
|
+
|
|
2826
|
+
def _fit_with_bounds_check(self, n, current_best_values, **kwargs):
|
|
2827
|
+
# Set parameters to current best values, but prevent them from
|
|
2828
|
+
# sitting at boundaries
|
|
2829
|
+
if self._new_parameters is None:
|
|
2830
|
+
# Initial fit
|
|
2831
|
+
for name, value in current_best_values.items():
|
|
2832
|
+
par = self._parameters[name]
|
|
2833
|
+
if par.vary:
|
|
2834
|
+
par.set(value=value)
|
|
2835
|
+
else:
|
|
2836
|
+
# Refit
|
|
2837
|
+
for i, name in enumerate(self._best_parameters):
|
|
2838
|
+
par = self._parameters[name]
|
|
2839
|
+
if par.vary:
|
|
2840
|
+
if name in self._new_parameters:
|
|
2841
|
+
if name in current_best_values:
|
|
2842
|
+
par.set(value=current_best_values[name])
|
|
2843
|
+
elif par.expr is None:
|
|
2844
|
+
par.set(value=self._best_values[i][n])
|
|
2845
|
+
self._reset_par_at_boundary()
|
|
2846
|
+
result = self._fit_nonlinear_model(
|
|
2847
|
+
self._x, self._ymap_norm[n], **kwargs)
|
|
2848
|
+
out_of_bounds = False
|
|
2849
|
+
for name, par in self._parameter_bounds.items():
|
|
2850
|
+
if self._parameters[name].vary:
|
|
2851
|
+
value = result.params[name].value
|
|
2852
|
+
if not np.isinf(par['min']) and value < par['min']:
|
|
2853
|
+
out_of_bounds = True
|
|
2854
|
+
break
|
|
2855
|
+
if not np.isinf(par['max']) and value > par['max']:
|
|
2856
|
+
out_of_bounds = True
|
|
2857
|
+
break
|
|
2858
|
+
self._out_of_bounds_flat[n] = out_of_bounds
|
|
2859
|
+
if self._try_no_bounds and out_of_bounds:
|
|
2860
|
+
# Rerun fit with parameter bounds in place
|
|
2861
|
+
for name, par in self._parameter_bounds.items():
|
|
2862
|
+
if self._parameters[name].vary:
|
|
2863
|
+
self._parameters[name].set(min=par['min'], max=par['max'])
|
|
2864
|
+
# Set parameters to current best values, but prevent them
|
|
2865
|
+
# from sitting at boundaries
|
|
2866
|
+
if self._new_parameters is None:
|
|
2867
|
+
# Initial fit
|
|
2868
|
+
for name, value in current_best_values.items():
|
|
2869
|
+
par = self._parameters[name]
|
|
2870
|
+
if par.vary:
|
|
2871
|
+
par.set(value=value)
|
|
2872
|
+
else:
|
|
2873
|
+
# Refit
|
|
2874
|
+
for i, name in enumerate(self._best_parameters):
|
|
2875
|
+
par = self._parameters[name]
|
|
2876
|
+
if par.vary:
|
|
2877
|
+
if name in self._new_parameters:
|
|
2878
|
+
if name in current_best_values:
|
|
2879
|
+
par.set(value=current_best_values[name])
|
|
2880
|
+
elif par.expr is None:
|
|
2881
|
+
par.set(value=self._best_values[i][n])
|
|
2882
|
+
self._reset_par_at_boundary()
|
|
2883
|
+
result = self._fit_nonlinear_model(
|
|
2884
|
+
self._x, self._ymap_norm[n], **kwargs)
|
|
2885
|
+
out_of_bounds = False
|
|
2886
|
+
for name, par in self._parameter_bounds.items():
|
|
2887
|
+
if self._parameters[name].vary:
|
|
2888
|
+
value = result.params[name].value
|
|
2889
|
+
if not np.isinf(par['min']) and value < par['min']:
|
|
2890
|
+
out_of_bounds = True
|
|
2891
|
+
break
|
|
2892
|
+
if not np.isinf(par['max']) and value > par['max']:
|
|
2893
|
+
out_of_bounds = True
|
|
2894
|
+
break
|
|
2895
|
+
# Reset parameters back to unbound
|
|
2896
|
+
self._parameters[name].set(min=-np.inf, max=np.inf)
|
|
2897
|
+
assert not out_of_bounds
|
|
2898
|
+
return result
|
|
2899
|
+
|
|
2900
|
+
def _renormalize(self, n, result):
|
|
2901
|
+
self._success_flat[n] = result.success
|
|
2902
|
+
if result.success:
|
|
2903
|
+
self._redchi_flat[n] = np.float64(result.redchi)
|
|
2904
|
+
if self._norm is None or not self._normalized:
|
|
2905
|
+
for i, name in enumerate(self._best_parameters):
|
|
2906
|
+
self._best_values_flat[i][n] = np.float64(
|
|
2907
|
+
result.params[name].value)
|
|
2908
|
+
self._best_errors_flat[i][n] = np.float64(
|
|
2909
|
+
result.params[name].stderr)
|
|
2910
|
+
if result.success:
|
|
2911
|
+
self._best_fit_flat[n] = result.best_fit
|
|
2912
|
+
else:
|
|
2913
|
+
for name, par in result.params.items():
|
|
2914
|
+
if name in self._linear_parameters:
|
|
2915
|
+
if par.stderr is not None:
|
|
2916
|
+
if self._code == 'scipy':
|
|
2917
|
+
setattr(par, '_stderr', par.stderr*self._norm[1])
|
|
2918
|
+
else:
|
|
2919
|
+
par.stderr *= self._norm[1]
|
|
2920
|
+
if par.expr is None:
|
|
2921
|
+
par.value *= self._norm[1]
|
|
2922
|
+
if self._print_report:
|
|
2923
|
+
if par.init_value is not None:
|
|
2924
|
+
if self._code == 'scipy':
|
|
2925
|
+
setattr(par, '_init_value',
|
|
2926
|
+
par.init_value*self._norm[1])
|
|
2927
|
+
else:
|
|
2928
|
+
par.init_value *= self._norm[1]
|
|
2929
|
+
if (not np.isinf(par.min)
|
|
2930
|
+
and abs(par.min) != FLOAT_MIN):
|
|
2931
|
+
par.min *= self._norm[1]
|
|
2932
|
+
if (not np.isinf(par.max)
|
|
2933
|
+
and abs(par.max) != FLOAT_MIN):
|
|
2934
|
+
par.max *= self._norm[1]
|
|
2935
|
+
for i, name in enumerate(self._best_parameters):
|
|
2936
|
+
self._best_values_flat[i][n] = np.float64(
|
|
2937
|
+
result.params[name].value)
|
|
2938
|
+
self._best_errors_flat[i][n] = np.float64(
|
|
2939
|
+
result.params[name].stderr)
|
|
2940
|
+
if result.success:
|
|
2941
|
+
self._best_fit_flat[n] = (
|
|
2942
|
+
result.best_fit*self._norm[1] + self._norm[0])
|
|
2943
|
+
if self._plot:
|
|
2944
|
+
if not self._skip_init:
|
|
2945
|
+
result.init_fit = (
|
|
2946
|
+
result.init_fit*self._norm[1] + self._norm[0])
|
|
2947
|
+
result.best_fit = np.copy(self._best_fit_flat[n])
|