ChessAnalysisPipeline 0.0.2__py3-none-any.whl → 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ChessAnalysisPipeline might be problematic. Click here for more details.
- CHAP/__init__.py +3 -0
- CHAP/common/__init__.py +19 -0
- CHAP/common/models/__init__.py +2 -0
- CHAP/common/models/integration.py +515 -0
- CHAP/common/models/map.py +535 -0
- CHAP/common/processor.py +644 -0
- CHAP/common/reader.py +119 -0
- CHAP/common/utils/__init__.py +37 -0
- CHAP/common/utils/fit.py +2613 -0
- CHAP/common/utils/general.py +1225 -0
- CHAP/common/utils/material.py +231 -0
- CHAP/common/utils/scanparsers.py +785 -0
- CHAP/common/writer.py +96 -0
- CHAP/edd/__init__.py +7 -0
- CHAP/edd/models.py +215 -0
- CHAP/edd/processor.py +321 -0
- CHAP/edd/reader.py +5 -0
- CHAP/edd/writer.py +5 -0
- CHAP/inference/__init__.py +3 -0
- CHAP/inference/processor.py +68 -0
- CHAP/inference/reader.py +5 -0
- CHAP/inference/writer.py +5 -0
- CHAP/pipeline.py +1 -1
- CHAP/processor.py +11 -818
- CHAP/reader.py +18 -113
- CHAP/saxswaxs/__init__.py +6 -0
- CHAP/saxswaxs/processor.py +5 -0
- CHAP/saxswaxs/reader.py +5 -0
- CHAP/saxswaxs/writer.py +5 -0
- CHAP/sin2psi/__init__.py +7 -0
- CHAP/sin2psi/processor.py +5 -0
- CHAP/sin2psi/reader.py +5 -0
- CHAP/sin2psi/writer.py +5 -0
- CHAP/tomo/__init__.py +5 -0
- CHAP/tomo/models.py +125 -0
- CHAP/tomo/processor.py +2009 -0
- CHAP/tomo/reader.py +5 -0
- CHAP/tomo/writer.py +5 -0
- CHAP/writer.py +17 -167
- {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/METADATA +1 -1
- ChessAnalysisPipeline-0.0.4.dist-info/RECORD +50 -0
- CHAP/async.py +0 -56
- ChessAnalysisPipeline-0.0.2.dist-info/RECORD +0 -17
- {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/LICENSE +0 -0
- {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/WHEEL +0 -0
- {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/entry_points.txt +0 -0
- {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/top_level.txt +0 -0
CHAP/common/utils/fit.py
ADDED
|
@@ -0,0 +1,2613 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
# -*- coding: utf-8 -*-
|
|
4
|
+
"""
|
|
5
|
+
Created on Mon Dec 6 15:36:22 2021
|
|
6
|
+
|
|
7
|
+
@author: rv43
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import logging
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
from asteval import Interpreter, get_ast_names
|
|
14
|
+
except:
|
|
15
|
+
pass
|
|
16
|
+
from copy import deepcopy
|
|
17
|
+
try:
|
|
18
|
+
from lmfit import Model, Parameters
|
|
19
|
+
from lmfit.model import ModelResult
|
|
20
|
+
from lmfit.models import ConstantModel, LinearModel, QuadraticModel, PolynomialModel,\
|
|
21
|
+
ExponentialModel, StepModel, RectangleModel, ExpressionModel, GaussianModel,\
|
|
22
|
+
LorentzianModel
|
|
23
|
+
except:
|
|
24
|
+
pass
|
|
25
|
+
import numpy as np
|
|
26
|
+
from os import cpu_count, getpid, listdir, mkdir, path
|
|
27
|
+
from re import compile, sub
|
|
28
|
+
from shutil import rmtree
|
|
29
|
+
try:
|
|
30
|
+
from sympy import diff, simplify
|
|
31
|
+
except:
|
|
32
|
+
pass
|
|
33
|
+
try:
|
|
34
|
+
from joblib import Parallel, delayed
|
|
35
|
+
have_joblib = True
|
|
36
|
+
except:
|
|
37
|
+
have_joblib = False
|
|
38
|
+
try:
|
|
39
|
+
import xarray as xr
|
|
40
|
+
have_xarray = True
|
|
41
|
+
except:
|
|
42
|
+
have_xarray = False
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
from .general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \
|
|
46
|
+
almost_equal, quick_plot #, eval_expr
|
|
47
|
+
except:
|
|
48
|
+
try:
|
|
49
|
+
from sys import path as syspath
|
|
50
|
+
syspath.append(f'/nfs/chess/user/rv43/msnctools/msnctools')
|
|
51
|
+
from general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \
|
|
52
|
+
almost_equal, quick_plot #, eval_expr
|
|
53
|
+
except:
|
|
54
|
+
from general import illegal_value, is_int, is_dict_series, is_index, index_nearest, \
|
|
55
|
+
almost_equal, quick_plot #, eval_expr
|
|
56
|
+
|
|
57
|
+
from sys import float_info
|
|
58
|
+
float_min = float_info.min
|
|
59
|
+
float_max = float_info.max
|
|
60
|
+
|
|
61
|
+
# sigma = fwhm_factor*fwhm
|
|
62
|
+
fwhm_factor = {
|
|
63
|
+
'gaussian': f'fwhm/(2*sqrt(2*log(2)))',
|
|
64
|
+
'lorentzian': f'0.5*fwhm',
|
|
65
|
+
'splitlorentzian': f'0.5*fwhm', # sigma = sigma_r
|
|
66
|
+
'voight': f'0.2776*fwhm', # sigma = gamma
|
|
67
|
+
'pseudovoight': f'0.5*fwhm'} # fraction = 0.5
|
|
68
|
+
|
|
69
|
+
# amplitude = height_factor*height*fwhm
|
|
70
|
+
height_factor = {
|
|
71
|
+
'gaussian': f'height*fwhm*0.5*sqrt(pi/log(2))',
|
|
72
|
+
'lorentzian': f'height*fwhm*0.5*pi',
|
|
73
|
+
'splitlorentzian': f'height*fwhm*0.5*pi', # sigma = sigma_r
|
|
74
|
+
'voight': f'3.334*height*fwhm', # sigma = gamma
|
|
75
|
+
'pseudovoight': f'1.268*height*fwhm'} # fraction = 0.5
|
|
76
|
+
|
|
77
|
+
class Fit:
|
|
78
|
+
"""Wrapper class for lmfit
|
|
79
|
+
"""
|
|
80
|
+
def __init__(self, y, x=None, models=None, normalize=True, **kwargs):
|
|
81
|
+
if not isinstance(normalize, bool):
|
|
82
|
+
raise ValueError(f'Invalid parameter normalize ({normalize})')
|
|
83
|
+
self._mask = None
|
|
84
|
+
self._model = None
|
|
85
|
+
self._norm = None
|
|
86
|
+
self._normalized = False
|
|
87
|
+
self._parameters = Parameters()
|
|
88
|
+
self._parameter_bounds = None
|
|
89
|
+
self._parameter_norms = {}
|
|
90
|
+
self._linear_parameters = []
|
|
91
|
+
self._nonlinear_parameters = []
|
|
92
|
+
self._result = None
|
|
93
|
+
self._try_linear_fit = True
|
|
94
|
+
self._y = None
|
|
95
|
+
self._y_norm = None
|
|
96
|
+
self._y_range = None
|
|
97
|
+
if 'try_linear_fit' in kwargs:
|
|
98
|
+
try_linear_fit = kwargs.pop('try_linear_fit')
|
|
99
|
+
if not isinstance(try_linear_fit, bool):
|
|
100
|
+
illegal_value(try_linear_fit, 'try_linear_fit', 'Fit.fit', raise_error=True)
|
|
101
|
+
self._try_linear_fit = try_linear_fit
|
|
102
|
+
if y is not None:
|
|
103
|
+
if isinstance(y, (tuple, list, np.ndarray)):
|
|
104
|
+
self._x = np.asarray(x)
|
|
105
|
+
self._y = np.asarray(y)
|
|
106
|
+
elif have_xarray and isinstance(y, xr.DataArray):
|
|
107
|
+
if x is not None:
|
|
108
|
+
logging.warning('Ignoring superfluous input x ({x}) in Fit.__init__')
|
|
109
|
+
if y.ndim != 1:
|
|
110
|
+
illegal_value(y.ndim, 'DataArray dimensions', 'Fit:__init__', raise_error=True)
|
|
111
|
+
self._x = np.asarray(y[y.dims[0]])
|
|
112
|
+
self._y = y
|
|
113
|
+
else:
|
|
114
|
+
illegal_value(y, 'y', 'Fit:__init__', raise_error=True)
|
|
115
|
+
if self._x.ndim != 1:
|
|
116
|
+
raise ValueError(f'Invalid dimension for input x ({self._x.ndim})')
|
|
117
|
+
if self._x.size != self._y.size:
|
|
118
|
+
raise ValueError(f'Inconsistent x and y dimensions ({self._x.size} vs '+
|
|
119
|
+
f'{self._y.size})')
|
|
120
|
+
if 'mask' in kwargs:
|
|
121
|
+
self._mask = kwargs.pop('mask')
|
|
122
|
+
if self._mask is None:
|
|
123
|
+
y_min = float(self._y.min())
|
|
124
|
+
self._y_range = float(self._y.max())-y_min
|
|
125
|
+
if normalize and self._y_range > 0.0:
|
|
126
|
+
self._norm = (y_min, self._y_range)
|
|
127
|
+
else:
|
|
128
|
+
self._mask = np.asarray(self._mask).astype(bool)
|
|
129
|
+
if self._x.size != self._mask.size:
|
|
130
|
+
raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+
|
|
131
|
+
f'{self._mask.size})')
|
|
132
|
+
y_masked = np.asarray(self._y)[~self._mask]
|
|
133
|
+
y_min = float(y_masked.min())
|
|
134
|
+
self._y_range = float(y_masked.max())-y_min
|
|
135
|
+
if normalize and self._y_range > 0.0:
|
|
136
|
+
if normalize and self._y_range > 0.0:
|
|
137
|
+
self._norm = (y_min, self._y_range)
|
|
138
|
+
if models is not None:
|
|
139
|
+
if callable(models) or isinstance(models, str):
|
|
140
|
+
kwargs = self.add_model(models, **kwargs)
|
|
141
|
+
elif isinstance(models, (tuple, list)):
|
|
142
|
+
for model in models:
|
|
143
|
+
kwargs = self.add_model(model, **kwargs)
|
|
144
|
+
self.fit(**kwargs)
|
|
145
|
+
|
|
146
|
+
@classmethod
|
|
147
|
+
def fit_data(cls, y, models, x=None, normalize=True, **kwargs):
|
|
148
|
+
return(cls(y, x=x, models=models, normalize=normalize, **kwargs))
|
|
149
|
+
|
|
150
|
+
@property
|
|
151
|
+
def best_errors(self):
|
|
152
|
+
if self._result is None:
|
|
153
|
+
return(None)
|
|
154
|
+
return({name:self._result.params[name].stderr for name in sorted(self._result.params)
|
|
155
|
+
if name != 'tmp_normalization_offset_c'})
|
|
156
|
+
|
|
157
|
+
@property
|
|
158
|
+
def best_fit(self):
|
|
159
|
+
if self._result is None:
|
|
160
|
+
return(None)
|
|
161
|
+
return(self._result.best_fit)
|
|
162
|
+
|
|
163
|
+
@property
|
|
164
|
+
def best_parameters(self):
|
|
165
|
+
if self._result is None:
|
|
166
|
+
return(None)
|
|
167
|
+
parameters = {}
|
|
168
|
+
for name in sorted(self._result.params):
|
|
169
|
+
if name != 'tmp_normalization_offset_c':
|
|
170
|
+
par = self._result.params[name]
|
|
171
|
+
parameters[name] = {'value': par.value, 'error': par.stderr,
|
|
172
|
+
'init_value': par.init_value, 'min': par.min, 'max': par.max,
|
|
173
|
+
'vary': par.vary, 'expr': par.expr}
|
|
174
|
+
return(parameters)
|
|
175
|
+
|
|
176
|
+
@property
|
|
177
|
+
def best_results(self):
|
|
178
|
+
"""Convert the input data array to a data set and add the fit results.
|
|
179
|
+
"""
|
|
180
|
+
if self._result is None:
|
|
181
|
+
return(None)
|
|
182
|
+
if not have_xarray:
|
|
183
|
+
logging.warning('fit.best_results requires xarray in the conda environment')
|
|
184
|
+
return(None)
|
|
185
|
+
if isinstance(self._y, xr.DataArray):
|
|
186
|
+
best_results = self._y.to_dataset()
|
|
187
|
+
dims = self._y.dims
|
|
188
|
+
fit_name = f'{self._y.name}_fit'
|
|
189
|
+
else:
|
|
190
|
+
coords = {'x': (['x'], self._x)}
|
|
191
|
+
dims = ('x')
|
|
192
|
+
best_results = xr.Dataset(coords=coords)
|
|
193
|
+
best_results['y'] = (dims, self._y)
|
|
194
|
+
fit_name = 'y_fit'
|
|
195
|
+
best_results[fit_name] = (dims, self.best_fit)
|
|
196
|
+
if self._mask is not None:
|
|
197
|
+
best_results['mask'] = self._mask
|
|
198
|
+
best_results.coords['par_names'] = ('peak', [name for name in self.best_values.keys()])
|
|
199
|
+
best_results['best_values'] = (['par_names'], [v for v in self.best_values.values()])
|
|
200
|
+
best_results['best_errors'] = (['par_names'], [v for v in self.best_errors.values()])
|
|
201
|
+
best_results.attrs['components'] = self.components
|
|
202
|
+
return(best_results)
|
|
203
|
+
|
|
204
|
+
@property
|
|
205
|
+
def best_values(self):
|
|
206
|
+
if self._result is None:
|
|
207
|
+
return(None)
|
|
208
|
+
return({name:self._result.params[name].value for name in sorted(self._result.params)
|
|
209
|
+
if name != 'tmp_normalization_offset_c'})
|
|
210
|
+
|
|
211
|
+
@property
|
|
212
|
+
def chisqr(self):
|
|
213
|
+
if self._result is None:
|
|
214
|
+
return(None)
|
|
215
|
+
return(self._result.chisqr)
|
|
216
|
+
|
|
217
|
+
@property
|
|
218
|
+
def components(self):
|
|
219
|
+
components = {}
|
|
220
|
+
if self._result is None:
|
|
221
|
+
logging.warning('Unable to collect components in Fit.components')
|
|
222
|
+
return(components)
|
|
223
|
+
for component in self._result.components:
|
|
224
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
225
|
+
continue
|
|
226
|
+
parameters = {}
|
|
227
|
+
for name in component.param_names:
|
|
228
|
+
par = self._parameters[name]
|
|
229
|
+
parameters[name] = {'free': par.vary, 'value': self._result.params[name].value}
|
|
230
|
+
if par.expr is not None:
|
|
231
|
+
parameters[name]['expr'] = par.expr
|
|
232
|
+
expr = None
|
|
233
|
+
if isinstance(component, ExpressionModel):
|
|
234
|
+
name = component._name
|
|
235
|
+
if name[-1] == '_':
|
|
236
|
+
name = name[:-1]
|
|
237
|
+
expr = component.expr
|
|
238
|
+
else:
|
|
239
|
+
prefix = component.prefix
|
|
240
|
+
if len(prefix):
|
|
241
|
+
if prefix[-1] == '_':
|
|
242
|
+
prefix = prefix[:-1]
|
|
243
|
+
name = f'{prefix} ({component._name})'
|
|
244
|
+
else:
|
|
245
|
+
name = f'{component._name}'
|
|
246
|
+
if expr is None:
|
|
247
|
+
components[name] = {'parameters': parameters}
|
|
248
|
+
else:
|
|
249
|
+
components[name] = {'expr': expr, 'parameters': parameters}
|
|
250
|
+
return(components)
|
|
251
|
+
|
|
252
|
+
@property
|
|
253
|
+
def covar(self):
|
|
254
|
+
if self._result is None:
|
|
255
|
+
return(None)
|
|
256
|
+
return(self._result.covar)
|
|
257
|
+
|
|
258
|
+
@property
|
|
259
|
+
def init_parameters(self):
|
|
260
|
+
if self._result is None or self._result.init_params is None:
|
|
261
|
+
return(None)
|
|
262
|
+
parameters = {}
|
|
263
|
+
for name in sorted(self._result.init_params):
|
|
264
|
+
if name != 'tmp_normalization_offset_c':
|
|
265
|
+
par = self._result.init_params[name]
|
|
266
|
+
parameters[name] = {'value': par.value, 'min': par.min, 'max': par.max,
|
|
267
|
+
'vary': par.vary, 'expr': par.expr}
|
|
268
|
+
return(parameters)
|
|
269
|
+
|
|
270
|
+
@property
|
|
271
|
+
def init_values(self):
|
|
272
|
+
if self._result is None or self._result.init_params is None:
|
|
273
|
+
return(None)
|
|
274
|
+
return({name:self._result.init_params[name].value for name in
|
|
275
|
+
sorted(self._result.init_params) if name != 'tmp_normalization_offset_c'})
|
|
276
|
+
|
|
277
|
+
@property
|
|
278
|
+
def normalization_offset(self):
|
|
279
|
+
if self._result is None:
|
|
280
|
+
return(None)
|
|
281
|
+
if self._norm is None:
|
|
282
|
+
return(0.0)
|
|
283
|
+
else:
|
|
284
|
+
if self._result.init_params is not None:
|
|
285
|
+
normalization_offset = float(self._result.init_params['tmp_normalization_offset_c'])
|
|
286
|
+
else:
|
|
287
|
+
normalization_offset = float(self._result.params['tmp_normalization_offset_c'])
|
|
288
|
+
return(normalization_offset)
|
|
289
|
+
|
|
290
|
+
@property
|
|
291
|
+
def num_func_eval(self):
|
|
292
|
+
if self._result is None:
|
|
293
|
+
return(None)
|
|
294
|
+
return(self._result.nfev)
|
|
295
|
+
|
|
296
|
+
@property
|
|
297
|
+
def parameters(self):
|
|
298
|
+
return({name:{'min': par.min, 'max': par.max, 'vary': par.vary, 'expr': par.expr}
|
|
299
|
+
for name, par in self._parameters.items() if name != 'tmp_normalization_offset_c'})
|
|
300
|
+
|
|
301
|
+
@property
|
|
302
|
+
def redchi(self):
|
|
303
|
+
if self._result is None:
|
|
304
|
+
return(None)
|
|
305
|
+
return(self._result.redchi)
|
|
306
|
+
|
|
307
|
+
@property
|
|
308
|
+
def residual(self):
|
|
309
|
+
if self._result is None:
|
|
310
|
+
return(None)
|
|
311
|
+
return(self._result.residual)
|
|
312
|
+
|
|
313
|
+
@property
|
|
314
|
+
def success(self):
|
|
315
|
+
if self._result is None:
|
|
316
|
+
return(None)
|
|
317
|
+
if not self._result.success:
|
|
318
|
+
# print(f'ier = {self._result.ier}')
|
|
319
|
+
# print(f'lmdif_message = {self._result.lmdif_message}')
|
|
320
|
+
# print(f'message = {self._result.message}')
|
|
321
|
+
# print(f'nfev = {self._result.nfev}')
|
|
322
|
+
# print(f'redchi = {self._result.redchi}')
|
|
323
|
+
# print(f'success = {self._result.success}')
|
|
324
|
+
if self._result.ier == 0 or self._result.ier == 5:
|
|
325
|
+
logging.warning(f'ier = {self._result.ier}: {self._result.message}')
|
|
326
|
+
else:
|
|
327
|
+
logging.warning(f'ier = {self._result.ier}: {self._result.message}')
|
|
328
|
+
return(True)
|
|
329
|
+
# self.print_fit_report()
|
|
330
|
+
# self.plot()
|
|
331
|
+
return(self._result.success)
|
|
332
|
+
|
|
333
|
+
@property
|
|
334
|
+
def var_names(self):
|
|
335
|
+
"""Intended to be used with covar
|
|
336
|
+
"""
|
|
337
|
+
if self._result is None:
|
|
338
|
+
return(None)
|
|
339
|
+
return(getattr(self._result, 'var_names', None))
|
|
340
|
+
|
|
341
|
+
@property
|
|
342
|
+
def x(self):
|
|
343
|
+
return(self._x)
|
|
344
|
+
|
|
345
|
+
@property
|
|
346
|
+
def y(self):
|
|
347
|
+
return(self._y)
|
|
348
|
+
|
|
349
|
+
def print_fit_report(self, result=None, show_correl=False):
|
|
350
|
+
if result is None:
|
|
351
|
+
result = self._result
|
|
352
|
+
if result is not None:
|
|
353
|
+
print(result.fit_report(show_correl=show_correl))
|
|
354
|
+
|
|
355
|
+
def add_parameter(self, **parameter):
|
|
356
|
+
if not isinstance(parameter, dict):
|
|
357
|
+
raise ValueError(f'Invalid parameter ({parameter})')
|
|
358
|
+
if parameter.get('expr') is not None:
|
|
359
|
+
raise KeyError(f'Invalid "expr" key in parameter {parameter}')
|
|
360
|
+
name = parameter['name']
|
|
361
|
+
if not isinstance(name, str):
|
|
362
|
+
raise ValueError(f'Invalid "name" value ({name}) in parameter {parameter}')
|
|
363
|
+
if parameter.get('norm') is None:
|
|
364
|
+
self._parameter_norms[name] = False
|
|
365
|
+
else:
|
|
366
|
+
norm = parameter.pop('norm')
|
|
367
|
+
if self._norm is None:
|
|
368
|
+
logging.warning(f'Ignoring norm in parameter {name} in '+
|
|
369
|
+
f'Fit.add_parameter (normalization is turned off)')
|
|
370
|
+
self._parameter_norms[name] = False
|
|
371
|
+
else:
|
|
372
|
+
if not isinstance(norm, bool):
|
|
373
|
+
raise ValueError(f'Invalid "norm" value ({norm}) in parameter {parameter}')
|
|
374
|
+
self._parameter_norms[name] = norm
|
|
375
|
+
vary = parameter.get('vary')
|
|
376
|
+
if vary is not None:
|
|
377
|
+
if not isinstance(vary, bool):
|
|
378
|
+
raise ValueError(f'Invalid "vary" value ({vary}) in parameter {parameter}')
|
|
379
|
+
if not vary:
|
|
380
|
+
if 'min' in parameter:
|
|
381
|
+
logging.warning(f'Ignoring min in parameter {name} in '+
|
|
382
|
+
f'Fit.add_parameter (vary = {vary})')
|
|
383
|
+
parameter.pop('min')
|
|
384
|
+
if 'max' in parameter:
|
|
385
|
+
logging.warning(f'Ignoring max in parameter {name} in '+
|
|
386
|
+
f'Fit.add_parameter (vary = {vary})')
|
|
387
|
+
parameter.pop('max')
|
|
388
|
+
if self._norm is not None and name not in self._parameter_norms:
|
|
389
|
+
raise ValueError(f'Missing parameter normalization type for paremeter {name}')
|
|
390
|
+
self._parameters.add(**parameter)
|
|
391
|
+
|
|
392
|
+
def add_model(self, model, prefix=None, parameters=None, parameter_norms=None, **kwargs):
|
|
393
|
+
# Create the new model
|
|
394
|
+
# print(f'at start add_model:\nself._parameters:\n{self._parameters}')
|
|
395
|
+
# print(f'at start add_model: kwargs = {kwargs}')
|
|
396
|
+
# print(f'parameters = {parameters}')
|
|
397
|
+
# print(f'parameter_norms = {parameter_norms}')
|
|
398
|
+
# if len(self._parameters.keys()):
|
|
399
|
+
# print('\nAt start adding model:')
|
|
400
|
+
# self._parameters.pretty_print()
|
|
401
|
+
# print(f'parameter_norms:\n{self._parameter_norms}')
|
|
402
|
+
if prefix is not None and not isinstance(prefix, str):
|
|
403
|
+
logging.warning('Ignoring illegal prefix: {model} {type(model)}')
|
|
404
|
+
prefix = None
|
|
405
|
+
if prefix is None:
|
|
406
|
+
pprefix = ''
|
|
407
|
+
else:
|
|
408
|
+
pprefix = prefix
|
|
409
|
+
if parameters is not None:
|
|
410
|
+
if isinstance(parameters, dict):
|
|
411
|
+
parameters = (parameters, )
|
|
412
|
+
elif not is_dict_series(parameters):
|
|
413
|
+
illegal_value(parameters, 'parameters', 'Fit.add_model', raise_error=True)
|
|
414
|
+
parameters = deepcopy(parameters)
|
|
415
|
+
if parameter_norms is not None:
|
|
416
|
+
if isinstance(parameter_norms, dict):
|
|
417
|
+
parameter_norms = (parameter_norms, )
|
|
418
|
+
if not is_dict_series(parameter_norms):
|
|
419
|
+
illegal_value(parameter_norms, 'parameter_norms', 'Fit.add_model', raise_error=True)
|
|
420
|
+
new_parameter_norms = {}
|
|
421
|
+
if callable(model):
|
|
422
|
+
# Linear fit not yet implemented for callable models
|
|
423
|
+
self._try_linear_fit = False
|
|
424
|
+
if parameter_norms is None:
|
|
425
|
+
if parameters is None:
|
|
426
|
+
raise ValueError('Either "parameters" or "parameter_norms" is required in '+
|
|
427
|
+
f'{model}')
|
|
428
|
+
for par in parameters:
|
|
429
|
+
name = par['name']
|
|
430
|
+
if not isinstance(name, str):
|
|
431
|
+
raise ValueError(f'Invalid "name" value ({name}) in input parameters')
|
|
432
|
+
if par.get('norm') is not None:
|
|
433
|
+
norm = par.pop('norm')
|
|
434
|
+
if not isinstance(norm, bool):
|
|
435
|
+
raise ValueError(f'Invalid "norm" value ({norm}) in input parameters')
|
|
436
|
+
new_parameter_norms[f'{pprefix}{name}'] = norm
|
|
437
|
+
else:
|
|
438
|
+
for par in parameter_norms:
|
|
439
|
+
name = par['name']
|
|
440
|
+
if not isinstance(name, str):
|
|
441
|
+
raise ValueError(f'Invalid "name" value ({name}) in input parameters')
|
|
442
|
+
norm = par.get('norm')
|
|
443
|
+
if norm is None or not isinstance(norm, bool):
|
|
444
|
+
raise ValueError(f'Invalid "norm" value ({norm}) in input parameters')
|
|
445
|
+
new_parameter_norms[f'{pprefix}{name}'] = norm
|
|
446
|
+
if parameters is not None:
|
|
447
|
+
for par in parameters:
|
|
448
|
+
if par.get('expr') is not None:
|
|
449
|
+
raise KeyError(f'Invalid "expr" key ({par.get("expr")}) in parameter '+
|
|
450
|
+
f'{name} for a callable model {model}')
|
|
451
|
+
name = par['name']
|
|
452
|
+
if not isinstance(name, str):
|
|
453
|
+
raise ValueError(f'Invalid "name" value ({name}) in input parameters')
|
|
454
|
+
# RV FIX callable model will need partial deriv functions for any linear pars to get the linearized matrix, so for now skip linear solution option
|
|
455
|
+
newmodel = Model(model, prefix=prefix)
|
|
456
|
+
elif isinstance(model, str):
|
|
457
|
+
if model == 'constant': # Par: c
|
|
458
|
+
newmodel = ConstantModel(prefix=prefix)
|
|
459
|
+
new_parameter_norms[f'{pprefix}c'] = True
|
|
460
|
+
self._linear_parameters.append(f'{pprefix}c')
|
|
461
|
+
elif model == 'linear': # Par: slope, intercept
|
|
462
|
+
newmodel = LinearModel(prefix=prefix)
|
|
463
|
+
new_parameter_norms[f'{pprefix}slope'] = True
|
|
464
|
+
new_parameter_norms[f'{pprefix}intercept'] = True
|
|
465
|
+
self._linear_parameters.append(f'{pprefix}slope')
|
|
466
|
+
self._linear_parameters.append(f'{pprefix}intercept')
|
|
467
|
+
elif model == 'quadratic': # Par: a, b, c
|
|
468
|
+
newmodel = QuadraticModel(prefix=prefix)
|
|
469
|
+
new_parameter_norms[f'{pprefix}a'] = True
|
|
470
|
+
new_parameter_norms[f'{pprefix}b'] = True
|
|
471
|
+
new_parameter_norms[f'{pprefix}c'] = True
|
|
472
|
+
self._linear_parameters.append(f'{pprefix}a')
|
|
473
|
+
self._linear_parameters.append(f'{pprefix}b')
|
|
474
|
+
self._linear_parameters.append(f'{pprefix}c')
|
|
475
|
+
elif model == 'polynomial': # Par: c0, c1,..., c7
|
|
476
|
+
degree = kwargs.get('degree')
|
|
477
|
+
if degree is not None:
|
|
478
|
+
kwargs.pop('degree')
|
|
479
|
+
if degree is None or not is_int(degree, ge=0, le=7):
|
|
480
|
+
raise ValueError(f'Invalid parameter degree for build-in step model ({degree})')
|
|
481
|
+
newmodel = PolynomialModel(degree=degree, prefix=prefix)
|
|
482
|
+
for i in range(degree+1):
|
|
483
|
+
new_parameter_norms[f'{pprefix}c{i}'] = True
|
|
484
|
+
self._linear_parameters.append(f'{pprefix}c{i}')
|
|
485
|
+
elif model == 'gaussian': # Par: amplitude, center, sigma (fwhm, height)
|
|
486
|
+
newmodel = GaussianModel(prefix=prefix)
|
|
487
|
+
new_parameter_norms[f'{pprefix}amplitude'] = True
|
|
488
|
+
new_parameter_norms[f'{pprefix}center'] = False
|
|
489
|
+
new_parameter_norms[f'{pprefix}sigma'] = False
|
|
490
|
+
self._linear_parameters.append(f'{pprefix}amplitude')
|
|
491
|
+
self._nonlinear_parameters.append(f'{pprefix}center')
|
|
492
|
+
self._nonlinear_parameters.append(f'{pprefix}sigma')
|
|
493
|
+
# parameter norms for height and fwhm are needed to get correct errors
|
|
494
|
+
new_parameter_norms[f'{pprefix}height'] = True
|
|
495
|
+
new_parameter_norms[f'{pprefix}fwhm'] = False
|
|
496
|
+
elif model == 'lorentzian': # Par: amplitude, center, sigma (fwhm, height)
|
|
497
|
+
newmodel = LorentzianModel(prefix=prefix)
|
|
498
|
+
new_parameter_norms[f'{pprefix}amplitude'] = True
|
|
499
|
+
new_parameter_norms[f'{pprefix}center'] = False
|
|
500
|
+
new_parameter_norms[f'{pprefix}sigma'] = False
|
|
501
|
+
self._linear_parameters.append(f'{pprefix}amplitude')
|
|
502
|
+
self._nonlinear_parameters.append(f'{pprefix}center')
|
|
503
|
+
self._nonlinear_parameters.append(f'{pprefix}sigma')
|
|
504
|
+
# parameter norms for height and fwhm are needed to get correct errors
|
|
505
|
+
new_parameter_norms[f'{pprefix}height'] = True
|
|
506
|
+
new_parameter_norms[f'{pprefix}fwhm'] = False
|
|
507
|
+
elif model == 'exponential': # Par: amplitude, decay
|
|
508
|
+
newmodel = ExponentialModel(prefix=prefix)
|
|
509
|
+
new_parameter_norms[f'{pprefix}amplitude'] = True
|
|
510
|
+
new_parameter_norms[f'{pprefix}decay'] = False
|
|
511
|
+
self._linear_parameters.append(f'{pprefix}amplitude')
|
|
512
|
+
self._nonlinear_parameters.append(f'{pprefix}decay')
|
|
513
|
+
elif model == 'step': # Par: amplitude, center, sigma
|
|
514
|
+
form = kwargs.get('form')
|
|
515
|
+
if form is not None:
|
|
516
|
+
kwargs.pop('form')
|
|
517
|
+
if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'):
|
|
518
|
+
raise ValueError(f'Invalid parameter form for build-in step model ({form})')
|
|
519
|
+
newmodel = StepModel(prefix=prefix, form=form)
|
|
520
|
+
new_parameter_norms[f'{pprefix}amplitude'] = True
|
|
521
|
+
new_parameter_norms[f'{pprefix}center'] = False
|
|
522
|
+
new_parameter_norms[f'{pprefix}sigma'] = False
|
|
523
|
+
self._linear_parameters.append(f'{pprefix}amplitude')
|
|
524
|
+
self._nonlinear_parameters.append(f'{pprefix}center')
|
|
525
|
+
self._nonlinear_parameters.append(f'{pprefix}sigma')
|
|
526
|
+
elif model == 'rectangle': # Par: amplitude, center1, center2, sigma1, sigma2
|
|
527
|
+
form = kwargs.get('form')
|
|
528
|
+
if form is not None:
|
|
529
|
+
kwargs.pop('form')
|
|
530
|
+
if form is None or form not in ('linear', 'atan', 'arctan', 'erf', 'logistic'):
|
|
531
|
+
raise ValueError('Invalid parameter form for build-in rectangle model '+
|
|
532
|
+
f'({form})')
|
|
533
|
+
newmodel = RectangleModel(prefix=prefix, form=form)
|
|
534
|
+
new_parameter_norms[f'{pprefix}amplitude'] = True
|
|
535
|
+
new_parameter_norms[f'{pprefix}center1'] = False
|
|
536
|
+
new_parameter_norms[f'{pprefix}center2'] = False
|
|
537
|
+
new_parameter_norms[f'{pprefix}sigma1'] = False
|
|
538
|
+
new_parameter_norms[f'{pprefix}sigma2'] = False
|
|
539
|
+
self._linear_parameters.append(f'{pprefix}amplitude')
|
|
540
|
+
self._nonlinear_parameters.append(f'{pprefix}center1')
|
|
541
|
+
self._nonlinear_parameters.append(f'{pprefix}center2')
|
|
542
|
+
self._nonlinear_parameters.append(f'{pprefix}sigma1')
|
|
543
|
+
self._nonlinear_parameters.append(f'{pprefix}sigma2')
|
|
544
|
+
elif model == 'expression': # Par: by expression
|
|
545
|
+
expr = kwargs['expr']
|
|
546
|
+
if not isinstance(expr, str):
|
|
547
|
+
raise ValueError(f'Invalid "expr" value ({expr}) in {model}')
|
|
548
|
+
kwargs.pop('expr')
|
|
549
|
+
if parameter_norms is not None:
|
|
550
|
+
logging.warning('Ignoring parameter_norms (normalization determined from '+
|
|
551
|
+
'linearity)}')
|
|
552
|
+
if parameters is not None:
|
|
553
|
+
for par in parameters:
|
|
554
|
+
if par.get('expr') is not None:
|
|
555
|
+
raise KeyError(f'Invalid "expr" key ({par.get("expr")}) in parameter '+
|
|
556
|
+
f'({par}) for an expression model')
|
|
557
|
+
if par.get('norm') is not None:
|
|
558
|
+
logging.warning(f'Ignoring "norm" key in parameter ({par}) '+
|
|
559
|
+
'(normalization determined from linearity)}')
|
|
560
|
+
par.pop('norm')
|
|
561
|
+
name = par['name']
|
|
562
|
+
if not isinstance(name, str):
|
|
563
|
+
raise ValueError(f'Invalid "name" value ({name}) in input parameters')
|
|
564
|
+
ast = Interpreter()
|
|
565
|
+
expr_parameters = [name for name in get_ast_names(ast.parse(expr))
|
|
566
|
+
if name != 'x' and name not in self._parameters
|
|
567
|
+
and name not in ast.symtable]
|
|
568
|
+
# print(f'\nexpr_parameters: {expr_parameters}')
|
|
569
|
+
# print(f'expr = {expr}')
|
|
570
|
+
if prefix is None:
|
|
571
|
+
newmodel = ExpressionModel(expr=expr)
|
|
572
|
+
else:
|
|
573
|
+
for name in expr_parameters:
|
|
574
|
+
expr = sub(rf'\b{name}\b', f'{prefix}{name}', expr)
|
|
575
|
+
expr_parameters = [f'{prefix}{name}' for name in expr_parameters]
|
|
576
|
+
# print(f'\nexpr_parameters: {expr_parameters}')
|
|
577
|
+
# print(f'expr = {expr}')
|
|
578
|
+
newmodel = ExpressionModel(expr=expr, name=name)
|
|
579
|
+
# print(f'\nnewmodel = {newmodel.__dict__}')
|
|
580
|
+
# print(f'params_names = {newmodel._param_names}')
|
|
581
|
+
# print(f'params_names = {newmodel.param_names}')
|
|
582
|
+
# Remove already existing names
|
|
583
|
+
for name in newmodel.param_names.copy():
|
|
584
|
+
if name not in expr_parameters:
|
|
585
|
+
newmodel._func_allargs.remove(name)
|
|
586
|
+
newmodel._param_names.remove(name)
|
|
587
|
+
# print(f'params_names = {newmodel._param_names}')
|
|
588
|
+
# print(f'params_names = {newmodel.param_names}')
|
|
589
|
+
else:
|
|
590
|
+
raise ValueError(f'Unknown build-in fit model ({model})')
|
|
591
|
+
else:
|
|
592
|
+
illegal_value(model, 'model', 'Fit.add_model', raise_error=True)
|
|
593
|
+
|
|
594
|
+
# Add the new model to the current one
|
|
595
|
+
# print('\nBefore adding model:')
|
|
596
|
+
# print(f'\nnewmodel = {newmodel.__dict__}')
|
|
597
|
+
# if len(self._parameters):
|
|
598
|
+
# self._parameters.pretty_print()
|
|
599
|
+
if self._model is None:
|
|
600
|
+
self._model = newmodel
|
|
601
|
+
else:
|
|
602
|
+
self._model += newmodel
|
|
603
|
+
new_parameters = newmodel.make_params()
|
|
604
|
+
self._parameters += new_parameters
|
|
605
|
+
# print('\nAfter adding model:')
|
|
606
|
+
# print(f'\nnewmodel = {newmodel.__dict__}')
|
|
607
|
+
# print(f'\nnew_parameters = {new_parameters}')
|
|
608
|
+
# self._parameters.pretty_print()
|
|
609
|
+
|
|
610
|
+
# Check linearity of expression model paremeters
|
|
611
|
+
if isinstance(newmodel, ExpressionModel):
|
|
612
|
+
for name in newmodel.param_names:
|
|
613
|
+
if not diff(newmodel.expr, name, name):
|
|
614
|
+
if name not in self._linear_parameters:
|
|
615
|
+
self._linear_parameters.append(name)
|
|
616
|
+
new_parameter_norms[name] = True
|
|
617
|
+
# print(f'\nADDING {name} TO LINEAR')
|
|
618
|
+
else:
|
|
619
|
+
if name not in self._nonlinear_parameters:
|
|
620
|
+
self._nonlinear_parameters.append(name)
|
|
621
|
+
new_parameter_norms[name] = False
|
|
622
|
+
# print(f'\nADDING {name} TO NONLINEAR')
|
|
623
|
+
# print(f'new_parameter_norms:\n{new_parameter_norms}')
|
|
624
|
+
|
|
625
|
+
# Scale the default initial model parameters
|
|
626
|
+
if self._norm is not None:
|
|
627
|
+
for name, norm in new_parameter_norms.copy().items():
|
|
628
|
+
par = self._parameters.get(name)
|
|
629
|
+
if par is None:
|
|
630
|
+
new_parameter_norms.pop(name)
|
|
631
|
+
continue
|
|
632
|
+
if par.expr is None and norm:
|
|
633
|
+
value = par.value*self._norm[1]
|
|
634
|
+
_min = par.min
|
|
635
|
+
_max = par.max
|
|
636
|
+
if not np.isinf(_min) and abs(_min) != float_min:
|
|
637
|
+
_min *= self._norm[1]
|
|
638
|
+
if not np.isinf(_max) and abs(_max) != float_min:
|
|
639
|
+
_max *= self._norm[1]
|
|
640
|
+
par.set(value=value, min=_min, max=_max)
|
|
641
|
+
# print('\nAfter norm defaults:')
|
|
642
|
+
# self._parameters.pretty_print()
|
|
643
|
+
# print(f'parameters:\n{parameters}')
|
|
644
|
+
# print(f'all_parameters:\n{list(self.parameters)}')
|
|
645
|
+
# print(f'new_parameter_norms:\n{new_parameter_norms}')
|
|
646
|
+
# print(f'parameter_norms:\n{self._parameter_norms}')
|
|
647
|
+
|
|
648
|
+
# Initialize the model parameters from parameters
|
|
649
|
+
if prefix is None:
|
|
650
|
+
prefix = ""
|
|
651
|
+
if parameters is not None:
|
|
652
|
+
for parameter in parameters:
|
|
653
|
+
name = parameter['name']
|
|
654
|
+
if not isinstance(name, str):
|
|
655
|
+
raise ValueError(f'Invalid "name" value ({name}) in input parameters')
|
|
656
|
+
if name not in new_parameters:
|
|
657
|
+
name = prefix+name
|
|
658
|
+
parameter['name'] = name
|
|
659
|
+
if name not in new_parameters:
|
|
660
|
+
logging.warning(f'Ignoring superfluous parameter info for {name}')
|
|
661
|
+
continue
|
|
662
|
+
if name in self._parameters:
|
|
663
|
+
parameter.pop('name')
|
|
664
|
+
if 'norm' in parameter:
|
|
665
|
+
if not isinstance(parameter['norm'], bool):
|
|
666
|
+
illegal_value(parameter['norm'], 'norm', 'Fit.add_model',
|
|
667
|
+
raise_error=True)
|
|
668
|
+
new_parameter_norms[name] = parameter['norm']
|
|
669
|
+
parameter.pop('norm')
|
|
670
|
+
if parameter.get('expr') is not None:
|
|
671
|
+
if 'value' in parameter:
|
|
672
|
+
logging.warning(f'Ignoring value in parameter {name} '+
|
|
673
|
+
f'(set by expression: {parameter["expr"]})')
|
|
674
|
+
parameter.pop('value')
|
|
675
|
+
if 'vary' in parameter:
|
|
676
|
+
logging.warning(f'Ignoring vary in parameter {name} '+
|
|
677
|
+
f'(set by expression: {parameter["expr"]})')
|
|
678
|
+
parameter.pop('vary')
|
|
679
|
+
if 'min' in parameter:
|
|
680
|
+
logging.warning(f'Ignoring min in parameter {name} '+
|
|
681
|
+
f'(set by expression: {parameter["expr"]})')
|
|
682
|
+
parameter.pop('min')
|
|
683
|
+
if 'max' in parameter:
|
|
684
|
+
logging.warning(f'Ignoring max in parameter {name} '+
|
|
685
|
+
f'(set by expression: {parameter["expr"]})')
|
|
686
|
+
parameter.pop('max')
|
|
687
|
+
if 'vary' in parameter:
|
|
688
|
+
if not isinstance(parameter['vary'], bool):
|
|
689
|
+
illegal_value(parameter['vary'], 'vary', 'Fit.add_model',
|
|
690
|
+
raise_error=True)
|
|
691
|
+
if not parameter['vary']:
|
|
692
|
+
if 'min' in parameter:
|
|
693
|
+
logging.warning(f'Ignoring min in parameter {name} in '+
|
|
694
|
+
f'Fit.add_model (vary = {parameter["vary"]})')
|
|
695
|
+
parameter.pop('min')
|
|
696
|
+
if 'max' in parameter:
|
|
697
|
+
logging.warning(f'Ignoring max in parameter {name} in '+
|
|
698
|
+
f'Fit.add_model (vary = {parameter["vary"]})')
|
|
699
|
+
parameter.pop('max')
|
|
700
|
+
self._parameters[name].set(**parameter)
|
|
701
|
+
parameter['name'] = name
|
|
702
|
+
else:
|
|
703
|
+
illegal_value(parameter, 'parameter name', 'Fit.model', raise_error=True)
|
|
704
|
+
self._parameter_norms = {**self._parameter_norms, **new_parameter_norms}
|
|
705
|
+
# print('\nAfter parameter init:')
|
|
706
|
+
# self._parameters.pretty_print()
|
|
707
|
+
# print(f'parameters:\n{parameters}')
|
|
708
|
+
# print(f'new_parameter_norms:\n{new_parameter_norms}')
|
|
709
|
+
# print(f'parameter_norms:\n{self._parameter_norms}')
|
|
710
|
+
# print(f'kwargs:\n{kwargs}')
|
|
711
|
+
|
|
712
|
+
# Initialize the model parameters from kwargs
|
|
713
|
+
for name, value in {**kwargs}.items():
|
|
714
|
+
full_name = f'{pprefix}{name}'
|
|
715
|
+
if full_name in new_parameter_norms and isinstance(value, (int, float)):
|
|
716
|
+
kwargs.pop(name)
|
|
717
|
+
if self._parameters[full_name].expr is None:
|
|
718
|
+
self._parameters[full_name].set(value=value)
|
|
719
|
+
else:
|
|
720
|
+
logging.warning(f'Ignoring parameter {name} in Fit.fit (set by expression: '+
|
|
721
|
+
f'{self._parameters[full_name].expr})')
|
|
722
|
+
# print('\nAfter kwargs init:')
|
|
723
|
+
# self._parameters.pretty_print()
|
|
724
|
+
# print(f'parameter_norms:\n{self._parameter_norms}')
|
|
725
|
+
# print(f'kwargs:\n{kwargs}')
|
|
726
|
+
|
|
727
|
+
# Check parameter norms (also need it for expressions to renormalize the errors)
|
|
728
|
+
if self._norm is not None and (callable(model) or model == 'expression'):
|
|
729
|
+
missing_norm = False
|
|
730
|
+
for name in new_parameters.valuesdict():
|
|
731
|
+
if name not in self._parameter_norms:
|
|
732
|
+
print(f'new_parameters:\n{new_parameters.valuesdict()}')
|
|
733
|
+
print(f'self._parameter_norms:\n{self._parameter_norms}')
|
|
734
|
+
logging.error(f'Missing parameter normalization type for {name} in {model}')
|
|
735
|
+
missing_norm = True
|
|
736
|
+
if missing_norm:
|
|
737
|
+
raise ValueError
|
|
738
|
+
|
|
739
|
+
# print(f'at end add_model:\nself._parameters:\n{list(self.parameters)}')
|
|
740
|
+
# print(f'at end add_model: kwargs = {kwargs}')
|
|
741
|
+
# print(f'\nat end add_model: newmodel:\n{newmodel.__dict__}\n')
|
|
742
|
+
return(kwargs)
|
|
743
|
+
|
|
744
|
+
def eval(self, x, result=None):
|
|
745
|
+
if result is None:
|
|
746
|
+
result = self._result
|
|
747
|
+
if result is None:
|
|
748
|
+
return
|
|
749
|
+
return(result.eval(x=np.asarray(x))-self.normalization_offset)
|
|
750
|
+
|
|
751
|
+
def fit(self, interactive=False, guess=False, **kwargs):
|
|
752
|
+
# Check inputs
|
|
753
|
+
if self._model is None:
|
|
754
|
+
logging.error('Undefined fit model')
|
|
755
|
+
return
|
|
756
|
+
if not isinstance(interactive, bool):
|
|
757
|
+
illegal_value(interactive, 'interactive', 'Fit.fit', raise_error=True)
|
|
758
|
+
if not isinstance(guess, bool):
|
|
759
|
+
illegal_value(guess, 'guess', 'Fit.fit', raise_error=True)
|
|
760
|
+
if 'try_linear_fit' in kwargs:
|
|
761
|
+
try_linear_fit = kwargs.pop('try_linear_fit')
|
|
762
|
+
if not isinstance(try_linear_fit, bool):
|
|
763
|
+
illegal_value(try_linear_fit, 'try_linear_fit', 'Fit.fit', raise_error=True)
|
|
764
|
+
if not self._try_linear_fit:
|
|
765
|
+
logging.warning('Ignore superfluous keyword argument "try_linear_fit" (not '+
|
|
766
|
+
'yet supported for callable models)')
|
|
767
|
+
else:
|
|
768
|
+
self._try_linear_fit = try_linear_fit
|
|
769
|
+
# if self._result is None:
|
|
770
|
+
# if 'parameters' in kwargs:
|
|
771
|
+
# raise ValueError('Invalid parameter parameters ({kwargs["parameters"]})')
|
|
772
|
+
# else:
|
|
773
|
+
if self._result is not None:
|
|
774
|
+
if guess:
|
|
775
|
+
logging.warning('Ignoring input parameter guess in Fit.fit during refitting')
|
|
776
|
+
guess = False
|
|
777
|
+
|
|
778
|
+
# Check for circular expressions
|
|
779
|
+
# FIX TODO
|
|
780
|
+
# for name1, par1 in self._parameters.items():
|
|
781
|
+
# if par1.expr is not None:
|
|
782
|
+
|
|
783
|
+
# Apply mask if supplied:
|
|
784
|
+
if 'mask' in kwargs:
|
|
785
|
+
self._mask = kwargs.pop('mask')
|
|
786
|
+
if self._mask is not None:
|
|
787
|
+
self._mask = np.asarray(self._mask).astype(bool)
|
|
788
|
+
if self._x.size != self._mask.size:
|
|
789
|
+
raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+
|
|
790
|
+
f'{self._mask.size})')
|
|
791
|
+
|
|
792
|
+
# Estimate initial parameters with build-in lmfit guess method (only for a single model)
|
|
793
|
+
# print(f'\nat start fit: kwargs = {kwargs}')
|
|
794
|
+
#RV print('\nAt start of fit:')
|
|
795
|
+
#RV self._parameters.pretty_print()
|
|
796
|
+
# print(f'parameter_norms:\n{self._parameter_norms}')
|
|
797
|
+
if guess:
|
|
798
|
+
if self._mask is None:
|
|
799
|
+
self._parameters = self._model.guess(self._y, x=self._x)
|
|
800
|
+
else:
|
|
801
|
+
self._parameters = self._model.guess(np.asarray(self._y)[~self._mask],
|
|
802
|
+
x=self._x[~self._mask])
|
|
803
|
+
# print('\nAfter guess:')
|
|
804
|
+
# self._parameters.pretty_print()
|
|
805
|
+
|
|
806
|
+
# Add constant offset for a normalized model
|
|
807
|
+
if self._result is None and self._norm is not None and self._norm[0]:
|
|
808
|
+
self.add_model('constant', prefix='tmp_normalization_offset_', parameters={'name': 'c',
|
|
809
|
+
'value': -self._norm[0], 'vary': False, 'norm': True})
|
|
810
|
+
#'value': -self._norm[0]/self._norm[1], 'vary': False, 'norm': False})
|
|
811
|
+
|
|
812
|
+
# Adjust existing parameters for refit:
|
|
813
|
+
if 'parameters' in kwargs:
|
|
814
|
+
parameters = kwargs.pop('parameters')
|
|
815
|
+
if isinstance(parameters, dict):
|
|
816
|
+
parameters = (parameters, )
|
|
817
|
+
elif not is_dict_series(parameters):
|
|
818
|
+
illegal_value(parameters, 'parameters', 'Fit.fit', raise_error=True)
|
|
819
|
+
for par in parameters:
|
|
820
|
+
name = par['name']
|
|
821
|
+
if name not in self._parameters:
|
|
822
|
+
raise ValueError(f'Unable to match {name} parameter {par} to an existing one')
|
|
823
|
+
if self._parameters[name].expr is not None:
|
|
824
|
+
raise ValueError(f'Unable to modify {name} parameter {par} (currently an '+
|
|
825
|
+
'expression)')
|
|
826
|
+
if par.get('expr') is not None:
|
|
827
|
+
raise KeyError(f'Invalid "expr" key in {name} parameter {par}')
|
|
828
|
+
self._parameters[name].set(vary=par.get('vary'))
|
|
829
|
+
self._parameters[name].set(min=par.get('min'))
|
|
830
|
+
self._parameters[name].set(max=par.get('max'))
|
|
831
|
+
self._parameters[name].set(value=par.get('value'))
|
|
832
|
+
#RV print('\nAfter adjust:')
|
|
833
|
+
#RV self._parameters.pretty_print()
|
|
834
|
+
|
|
835
|
+
# Apply parameter updates through keyword arguments
|
|
836
|
+
# print(f'kwargs = {kwargs}')
|
|
837
|
+
# print(f'parameter_norms = {self._parameter_norms}')
|
|
838
|
+
for name in set(self._parameters) & set(kwargs):
|
|
839
|
+
value = kwargs.pop(name)
|
|
840
|
+
if self._parameters[name].expr is None:
|
|
841
|
+
self._parameters[name].set(value=value)
|
|
842
|
+
else:
|
|
843
|
+
logging.warning(f'Ignoring parameter {name} in Fit.fit (set by expression: '+
|
|
844
|
+
f'{self._parameters[name].expr})')
|
|
845
|
+
|
|
846
|
+
# Check for uninitialized parameters
|
|
847
|
+
for name, par in self._parameters.items():
|
|
848
|
+
if par.expr is None:
|
|
849
|
+
value = par.value
|
|
850
|
+
if value is None or np.isinf(value) or np.isnan(value):
|
|
851
|
+
if interactive:
|
|
852
|
+
value = input_num(f'Enter an initial value for {name}', default=1.0)
|
|
853
|
+
else:
|
|
854
|
+
value = 1.0
|
|
855
|
+
if self._norm is None or name not in self._parameter_norms:
|
|
856
|
+
self._parameters[name].set(value=value)
|
|
857
|
+
elif self._parameter_norms[name]:
|
|
858
|
+
self._parameters[name].set(value=value*self._norm[1])
|
|
859
|
+
|
|
860
|
+
# Check if model is linear
|
|
861
|
+
try:
|
|
862
|
+
linear_model = self._check_linearity_model()
|
|
863
|
+
except:
|
|
864
|
+
linear_model = False
|
|
865
|
+
# print(f'\n\n--------> linear_model = {linear_model}\n')
|
|
866
|
+
if kwargs.get('check_only_linearity') is not None:
|
|
867
|
+
return(linear_model)
|
|
868
|
+
|
|
869
|
+
# Normalize the data and initial parameters
|
|
870
|
+
#RV print('\nBefore normalization:')
|
|
871
|
+
#RV self._parameters.pretty_print()
|
|
872
|
+
# print(f'parameter_norms:\n{self._parameter_norms}')
|
|
873
|
+
self._normalize()
|
|
874
|
+
# print(f'norm = {self._norm}')
|
|
875
|
+
#RV print('\nAfter normalization:')
|
|
876
|
+
#RV self._parameters.pretty_print()
|
|
877
|
+
# self.print_fit_report()
|
|
878
|
+
# print(f'parameter_norms:\n{self._parameter_norms}')
|
|
879
|
+
|
|
880
|
+
if linear_model:
|
|
881
|
+
# Perform a linear fit by direct matrix solution with numpy
|
|
882
|
+
try:
|
|
883
|
+
if self._mask is None:
|
|
884
|
+
self._fit_linear_model(self._x, self._y_norm)
|
|
885
|
+
else:
|
|
886
|
+
self._fit_linear_model(self._x[~self._mask],
|
|
887
|
+
np.asarray(self._y_norm)[~self._mask])
|
|
888
|
+
except:
|
|
889
|
+
linear_model = False
|
|
890
|
+
if not linear_model:
|
|
891
|
+
# Perform a non-linear fit with lmfit
|
|
892
|
+
# Prevent initial values from sitting at boundaries
|
|
893
|
+
self._parameter_bounds = {name:{'min': par.min, 'max': par.max} for name, par in
|
|
894
|
+
self._parameters.items() if par.vary}
|
|
895
|
+
for par in self._parameters.values():
|
|
896
|
+
if par.vary:
|
|
897
|
+
par.set(value=self._reset_par_at_boundary(par, par.value))
|
|
898
|
+
# print('\nAfter checking boundaries:')
|
|
899
|
+
# self._parameters.pretty_print()
|
|
900
|
+
|
|
901
|
+
# Perform the fit
|
|
902
|
+
# fit_kws = None
|
|
903
|
+
# if 'Dfun' in kwargs:
|
|
904
|
+
# fit_kws = {'Dfun': kwargs.pop('Dfun')}
|
|
905
|
+
# self._result = self._model.fit(self._y_norm, self._parameters, x=self._x,
|
|
906
|
+
# fit_kws=fit_kws, **kwargs)
|
|
907
|
+
if self._mask is None:
|
|
908
|
+
self._result = self._model.fit(self._y_norm, self._parameters, x=self._x, **kwargs)
|
|
909
|
+
else:
|
|
910
|
+
self._result = self._model.fit(np.asarray(self._y_norm)[~self._mask],
|
|
911
|
+
self._parameters, x=self._x[~self._mask], **kwargs)
|
|
912
|
+
#RV print('\nAfter fit:')
|
|
913
|
+
# print(f'\nself._result ({self._result}):\n\t{self._result.__dict__}')
|
|
914
|
+
#RV self._parameters.pretty_print()
|
|
915
|
+
# self.print_fit_report()
|
|
916
|
+
|
|
917
|
+
# Set internal parameter values to fit results upon success
|
|
918
|
+
if self.success:
|
|
919
|
+
for name, par in self._parameters.items():
|
|
920
|
+
if par.expr is None and par.vary:
|
|
921
|
+
par.set(value=self._result.params[name].value)
|
|
922
|
+
# print('\nAfter update parameter values:')
|
|
923
|
+
# self._parameters.pretty_print()
|
|
924
|
+
|
|
925
|
+
# Renormalize the data and results
|
|
926
|
+
self._renormalize()
|
|
927
|
+
#RV print('\nAfter renormalization:')
|
|
928
|
+
#RV self._parameters.pretty_print()
|
|
929
|
+
# self.print_fit_report()
|
|
930
|
+
|
|
931
|
+
def plot(self, y=None, y_title=None, result=None, skip_init=False, plot_comp=True,
|
|
932
|
+
plot_comp_legends=False, plot_residual=False, plot_masked_data=True, **kwargs):
|
|
933
|
+
if result is None:
|
|
934
|
+
result = self._result
|
|
935
|
+
if result is None:
|
|
936
|
+
return
|
|
937
|
+
plots = []
|
|
938
|
+
legend = []
|
|
939
|
+
if self._mask is None:
|
|
940
|
+
mask = np.zeros(self._x.size).astype(bool)
|
|
941
|
+
plot_masked_data = False
|
|
942
|
+
else:
|
|
943
|
+
mask = self._mask
|
|
944
|
+
if y is not None:
|
|
945
|
+
if not isinstance(y, (tuple, list, np.ndarray)):
|
|
946
|
+
illegal_value(y, 'y', 'Fit.plot')
|
|
947
|
+
if len(y) != len(self._x):
|
|
948
|
+
logging.warning('Ignoring parameter y in Fit.plot (wrong dimension)')
|
|
949
|
+
y = None
|
|
950
|
+
if y is not None:
|
|
951
|
+
if y_title is None or not isinstance(y_title, str):
|
|
952
|
+
y_title = 'data'
|
|
953
|
+
plots += [(self._x, y, '.')]
|
|
954
|
+
legend += [y_title]
|
|
955
|
+
if self._y is not None:
|
|
956
|
+
plots += [(self._x, np.asarray(self._y), 'b.')]
|
|
957
|
+
legend += ['data']
|
|
958
|
+
if plot_masked_data:
|
|
959
|
+
plots += [(self._x[mask], np.asarray(self._y)[mask], 'bx')]
|
|
960
|
+
legend += ['masked data']
|
|
961
|
+
if isinstance(plot_residual, bool) and plot_residual:
|
|
962
|
+
plots += [(self._x[~mask], result.residual, 'r-')]
|
|
963
|
+
legend += ['residual']
|
|
964
|
+
plots += [(self._x[~mask], result.best_fit, 'k-')]
|
|
965
|
+
legend += ['best fit']
|
|
966
|
+
if not skip_init and hasattr(result, 'init_fit'):
|
|
967
|
+
plots += [(self._x[~mask], result.init_fit, 'g-')]
|
|
968
|
+
legend += ['init']
|
|
969
|
+
if plot_comp:
|
|
970
|
+
components = result.eval_components(x=self._x[~mask])
|
|
971
|
+
num_components = len(components)
|
|
972
|
+
if 'tmp_normalization_offset_' in components:
|
|
973
|
+
num_components -= 1
|
|
974
|
+
if num_components > 1:
|
|
975
|
+
eval_index = 0
|
|
976
|
+
for modelname, y in components.items():
|
|
977
|
+
if modelname == 'tmp_normalization_offset_':
|
|
978
|
+
continue
|
|
979
|
+
if modelname == '_eval':
|
|
980
|
+
modelname = f'eval{eval_index}'
|
|
981
|
+
if len(modelname) > 20:
|
|
982
|
+
modelname = f'{modelname[0:16]} ...'
|
|
983
|
+
if isinstance(y, (int, float)):
|
|
984
|
+
y *= np.ones(self._x[~mask].size)
|
|
985
|
+
plots += [(self._x[~mask], y, '--')]
|
|
986
|
+
if plot_comp_legends:
|
|
987
|
+
if modelname[-1] == '_':
|
|
988
|
+
legend.append(modelname[:-1])
|
|
989
|
+
else:
|
|
990
|
+
legend.append(modelname)
|
|
991
|
+
title = kwargs.get('title')
|
|
992
|
+
if title is not None:
|
|
993
|
+
kwargs.pop('title')
|
|
994
|
+
quick_plot(tuple(plots), legend=legend, title=title, block=True, **kwargs)
|
|
995
|
+
|
|
996
|
+
@staticmethod
|
|
997
|
+
def guess_init_peak(x, y, *args, center_guess=None, use_max_for_center=True):
|
|
998
|
+
""" Return a guess for the initial height, center and fwhm for a peak
|
|
999
|
+
"""
|
|
1000
|
+
# print(f'\n\nargs = {args}')
|
|
1001
|
+
# print(f'center_guess = {center_guess}')
|
|
1002
|
+
# quick_plot(x, y, vlines=center_guess, block=True)
|
|
1003
|
+
center_guesses = None
|
|
1004
|
+
x = np.asarray(x)
|
|
1005
|
+
y = np.asarray(y)
|
|
1006
|
+
if len(x) != len(y):
|
|
1007
|
+
logging.error(f'Invalid x and y lengths ({len(x)}, {len(y)}), skip initial guess')
|
|
1008
|
+
return(None, None, None)
|
|
1009
|
+
if isinstance(center_guess, (int, float)):
|
|
1010
|
+
if len(args):
|
|
1011
|
+
logging.warning('Ignoring additional arguments for single center_guess value')
|
|
1012
|
+
center_guesses = [center_guess]
|
|
1013
|
+
elif isinstance(center_guess, (tuple, list, np.ndarray)):
|
|
1014
|
+
if len(center_guess) == 1:
|
|
1015
|
+
logging.warning('Ignoring additional arguments for single center_guess value')
|
|
1016
|
+
if not isinstance(center_guess[0], (int, float)):
|
|
1017
|
+
raise ValueError(f'Invalid parameter center_guess ({type(center_guess[0])})')
|
|
1018
|
+
center_guess = center_guess[0]
|
|
1019
|
+
else:
|
|
1020
|
+
if len(args) != 1:
|
|
1021
|
+
raise ValueError(f'Invalid number of arguments ({len(args)})')
|
|
1022
|
+
n = args[0]
|
|
1023
|
+
if not is_index(n, 0, len(center_guess)):
|
|
1024
|
+
raise ValueError('Invalid argument')
|
|
1025
|
+
center_guesses = center_guess
|
|
1026
|
+
center_guess = center_guesses[n]
|
|
1027
|
+
elif center_guess is not None:
|
|
1028
|
+
raise ValueError(f'Invalid center_guess type ({type(center_guess)})')
|
|
1029
|
+
# print(f'x = {x}')
|
|
1030
|
+
# print(f'y = {y}')
|
|
1031
|
+
# print(f'center_guess = {center_guess}')
|
|
1032
|
+
|
|
1033
|
+
# Sort the inputs
|
|
1034
|
+
index = np.argsort(x)
|
|
1035
|
+
x = x[index]
|
|
1036
|
+
y = y[index]
|
|
1037
|
+
miny = y.min()
|
|
1038
|
+
# print(f'miny = {miny}')
|
|
1039
|
+
# print(f'x_range = {x[0]} {x[-1]} {len(x)}')
|
|
1040
|
+
# print(f'y_range = {y[0]} {y[-1]} {len(y)}')
|
|
1041
|
+
# quick_plot(x, y, vlines=center_guess, block=True)
|
|
1042
|
+
|
|
1043
|
+
# xx = x
|
|
1044
|
+
# yy = y
|
|
1045
|
+
# Set range for current peak
|
|
1046
|
+
# print(f'n = {n}')
|
|
1047
|
+
# print(f'center_guesses = {center_guesses}')
|
|
1048
|
+
if center_guesses is not None:
|
|
1049
|
+
if len(center_guesses) > 1:
|
|
1050
|
+
index = np.argsort(center_guesses)
|
|
1051
|
+
n = list(index).index(n)
|
|
1052
|
+
# print(f'n = {n}')
|
|
1053
|
+
# print(f'index = {index}')
|
|
1054
|
+
center_guesses = np.asarray(center_guesses)[index]
|
|
1055
|
+
# print(f'center_guesses = {center_guesses}')
|
|
1056
|
+
if n == 0:
|
|
1057
|
+
low = 0
|
|
1058
|
+
upp = index_nearest(x, (center_guesses[0]+center_guesses[1])/2)
|
|
1059
|
+
elif n == len(center_guesses)-1:
|
|
1060
|
+
low = index_nearest(x, (center_guesses[n-1]+center_guesses[n])/2)
|
|
1061
|
+
upp = len(x)
|
|
1062
|
+
else:
|
|
1063
|
+
low = index_nearest(x, (center_guesses[n-1]+center_guesses[n])/2)
|
|
1064
|
+
upp = index_nearest(x, (center_guesses[n]+center_guesses[n+1])/2)
|
|
1065
|
+
# print(f'low = {low}')
|
|
1066
|
+
# print(f'upp = {upp}')
|
|
1067
|
+
x = x[low:upp]
|
|
1068
|
+
y = y[low:upp]
|
|
1069
|
+
# quick_plot(x, y, vlines=(x[0], center_guess, x[-1]), block=True)
|
|
1070
|
+
|
|
1071
|
+
# Estimate FHHM
|
|
1072
|
+
maxy = y.max()
|
|
1073
|
+
# print(f'x_range = {x[0]} {x[-1]} {len(x)}')
|
|
1074
|
+
# print(f'y_range = {y[0]} {y[-1]} {len(y)} {miny} {maxy}')
|
|
1075
|
+
# print(f'center_guess = {center_guess}')
|
|
1076
|
+
if center_guess is None:
|
|
1077
|
+
center_index = np.argmax(y)
|
|
1078
|
+
center = x[center_index]
|
|
1079
|
+
height = maxy-miny
|
|
1080
|
+
else:
|
|
1081
|
+
if use_max_for_center:
|
|
1082
|
+
center_index = np.argmax(y)
|
|
1083
|
+
center = x[center_index]
|
|
1084
|
+
if center_index < 0.1*len(x) or center_index > 0.9*len(x):
|
|
1085
|
+
center_index = index_nearest(x, center_guess)
|
|
1086
|
+
center = center_guess
|
|
1087
|
+
else:
|
|
1088
|
+
center_index = index_nearest(x, center_guess)
|
|
1089
|
+
center = center_guess
|
|
1090
|
+
height = y[center_index]-miny
|
|
1091
|
+
# print(f'center_index = {center_index}')
|
|
1092
|
+
# print(f'center = {center}')
|
|
1093
|
+
# print(f'height = {height}')
|
|
1094
|
+
half_height = miny+0.5*height
|
|
1095
|
+
# print(f'half_height = {half_height}')
|
|
1096
|
+
fwhm_index1 = 0
|
|
1097
|
+
for i in range(center_index, fwhm_index1, -1):
|
|
1098
|
+
if y[i] < half_height:
|
|
1099
|
+
fwhm_index1 = i
|
|
1100
|
+
break
|
|
1101
|
+
# print(f'fwhm_index1 = {fwhm_index1} {x[fwhm_index1]}')
|
|
1102
|
+
fwhm_index2 = len(x)-1
|
|
1103
|
+
for i in range(center_index, fwhm_index2):
|
|
1104
|
+
if y[i] < half_height:
|
|
1105
|
+
fwhm_index2 = i
|
|
1106
|
+
break
|
|
1107
|
+
# print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}')
|
|
1108
|
+
# quick_plot((x,y,'o'), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True)
|
|
1109
|
+
if fwhm_index1 == 0 and fwhm_index2 < len(x)-1:
|
|
1110
|
+
fwhm = 2*(x[fwhm_index2]-center)
|
|
1111
|
+
elif fwhm_index1 > 0 and fwhm_index2 == len(x)-1:
|
|
1112
|
+
fwhm = 2*(center-x[fwhm_index1])
|
|
1113
|
+
else:
|
|
1114
|
+
fwhm = x[fwhm_index2]-x[fwhm_index1]
|
|
1115
|
+
# print(f'fwhm_index1 = {fwhm_index1} {x[fwhm_index1]}')
|
|
1116
|
+
# print(f'fwhm_index2 = {fwhm_index2} {x[fwhm_index2]}')
|
|
1117
|
+
# print(f'fwhm = {fwhm}')
|
|
1118
|
+
|
|
1119
|
+
# Return height, center and FWHM
|
|
1120
|
+
# quick_plot((x,y,'o'), (xx,yy), vlines=(x[fwhm_index1], center, x[fwhm_index2]), block=True)
|
|
1121
|
+
return(height, center, fwhm)
|
|
1122
|
+
|
|
1123
|
+
def _check_linearity_model(self):
|
|
1124
|
+
"""Identify the linearity of all model parameters and check if the model is linear or not
|
|
1125
|
+
"""
|
|
1126
|
+
if not self._try_linear_fit:
|
|
1127
|
+
logging.info('Skip linearity check (not yet supported for callable models)')
|
|
1128
|
+
return(False)
|
|
1129
|
+
free_parameters = [name for name, par in self._parameters.items() if par.vary]
|
|
1130
|
+
for component in self._model.components:
|
|
1131
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
1132
|
+
continue
|
|
1133
|
+
if isinstance(component, ExpressionModel):
|
|
1134
|
+
for name in free_parameters:
|
|
1135
|
+
if diff(component.expr, name, name):
|
|
1136
|
+
# print(f'\t\t{component.expr} is non-linear in {name}')
|
|
1137
|
+
self._nonlinear_parameters.append(name)
|
|
1138
|
+
if name in self._linear_parameters:
|
|
1139
|
+
self._linear_parameters.remove(name)
|
|
1140
|
+
else:
|
|
1141
|
+
model_parameters = component.param_names.copy()
|
|
1142
|
+
for basename, hint in component.param_hints.items():
|
|
1143
|
+
name = f'{component.prefix}{basename}'
|
|
1144
|
+
if hint.get('expr') is not None:
|
|
1145
|
+
model_parameters.remove(name)
|
|
1146
|
+
for name in model_parameters:
|
|
1147
|
+
expr = self._parameters[name].expr
|
|
1148
|
+
if expr is not None:
|
|
1149
|
+
for nname in free_parameters:
|
|
1150
|
+
if name in self._nonlinear_parameters:
|
|
1151
|
+
if diff(expr, nname):
|
|
1152
|
+
# print(f'\t\t{component} is non-linear in {nname} (through {name} = "{expr}")')
|
|
1153
|
+
self._nonlinear_parameters.append(nname)
|
|
1154
|
+
if nname in self._linear_parameters:
|
|
1155
|
+
self._linear_parameters.remove(nname)
|
|
1156
|
+
else:
|
|
1157
|
+
assert(name in self._linear_parameters)
|
|
1158
|
+
# print(f'\n\nexpr ({type(expr)}) = {expr}\nnname ({type(nname)}) = {nname}\n\n')
|
|
1159
|
+
if diff(expr, nname, nname):
|
|
1160
|
+
# print(f'\t\t{component} is non-linear in {nname} (through {name} = "{expr}")')
|
|
1161
|
+
self._nonlinear_parameters.append(nname)
|
|
1162
|
+
if nname in self._linear_parameters:
|
|
1163
|
+
self._linear_parameters.remove(nname)
|
|
1164
|
+
# print(f'\nfree parameters:\n\t{free_parameters}')
|
|
1165
|
+
# print(f'linear parameters:\n\t{self._linear_parameters}')
|
|
1166
|
+
# print(f'nonlinear parameters:\n\t{self._nonlinear_parameters}\n')
|
|
1167
|
+
if any(True for name in self._nonlinear_parameters if self._parameters[name].vary):
|
|
1168
|
+
return(False)
|
|
1169
|
+
return(True)
|
|
1170
|
+
|
|
1171
|
+
def _fit_linear_model(self, x, y):
|
|
1172
|
+
"""Perform a linear fit by direct matrix solution with numpy
|
|
1173
|
+
"""
|
|
1174
|
+
# Construct the matrix and the free parameter vector
|
|
1175
|
+
# print(f'\nparameters:')
|
|
1176
|
+
# self._parameters.pretty_print()
|
|
1177
|
+
# print(f'\nparameter_norms:\n\t{self._parameter_norms}')
|
|
1178
|
+
# print(f'\nlinear_parameters:\n\t{self._linear_parameters}')
|
|
1179
|
+
# print(f'nonlinear_parameters:\n\t{self._nonlinear_parameters}')
|
|
1180
|
+
free_parameters = [name for name, par in self._parameters.items() if par.vary]
|
|
1181
|
+
# print(f'free parameters:\n\t{free_parameters}\n')
|
|
1182
|
+
expr_parameters = {name:par.expr for name, par in self._parameters.items()
|
|
1183
|
+
if par.expr is not None}
|
|
1184
|
+
model_parameters = []
|
|
1185
|
+
for component in self._model.components:
|
|
1186
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
1187
|
+
continue
|
|
1188
|
+
model_parameters += component.param_names
|
|
1189
|
+
for basename, hint in component.param_hints.items():
|
|
1190
|
+
name = f'{component.prefix}{basename}'
|
|
1191
|
+
if hint.get('expr') is not None:
|
|
1192
|
+
expr_parameters.pop(name)
|
|
1193
|
+
model_parameters.remove(name)
|
|
1194
|
+
# print(f'expr parameters:\n{expr_parameters}')
|
|
1195
|
+
# print(f'model parameters:\n\t{model_parameters}\n')
|
|
1196
|
+
norm = 1.0
|
|
1197
|
+
if self._normalized:
|
|
1198
|
+
norm = self._norm[1]
|
|
1199
|
+
# print(f'\n\nself._normalized = {self._normalized}\nnorm = {norm}\nself._norm = {self._norm}\n')
|
|
1200
|
+
# Add expression parameters to asteval
|
|
1201
|
+
ast = Interpreter()
|
|
1202
|
+
# print(f'Adding to asteval sym table:')
|
|
1203
|
+
for name, expr in expr_parameters.items():
|
|
1204
|
+
# print(f'\tadding {name} {expr}')
|
|
1205
|
+
ast.symtable[name] = expr
|
|
1206
|
+
# Add constant parameters to asteval
|
|
1207
|
+
# (renormalize to use correctly in evaluation of expression models)
|
|
1208
|
+
for name, par in self._parameters.items():
|
|
1209
|
+
if par.expr is None and not par.vary:
|
|
1210
|
+
if self._parameter_norms[name]:
|
|
1211
|
+
# print(f'\tadding {name} {par.value*norm}')
|
|
1212
|
+
ast.symtable[name] = par.value*norm
|
|
1213
|
+
else:
|
|
1214
|
+
# print(f'\tadding {name} {par.value}')
|
|
1215
|
+
ast.symtable[name] = par.value
|
|
1216
|
+
A = np.zeros((len(x), len(free_parameters)), dtype='float64')
|
|
1217
|
+
y_const = np.zeros(len(x), dtype='float64')
|
|
1218
|
+
have_expression_model = False
|
|
1219
|
+
for component in self._model.components:
|
|
1220
|
+
if isinstance(component, ConstantModel):
|
|
1221
|
+
name = component.param_names[0]
|
|
1222
|
+
# print(f'\nConstant model: {name} {self._parameters[name]}\n')
|
|
1223
|
+
if name in free_parameters:
|
|
1224
|
+
# print(f'\t\t{name} is a free constant set matrix column {free_parameters.index(name)} to 1.0')
|
|
1225
|
+
A[:,free_parameters.index(name)] = 1.0
|
|
1226
|
+
else:
|
|
1227
|
+
if self._parameter_norms[name]:
|
|
1228
|
+
delta_y_const = self._parameters[name]*np.ones(len(x))
|
|
1229
|
+
else:
|
|
1230
|
+
delta_y_const = (self._parameters[name]*norm)*np.ones(len(x))
|
|
1231
|
+
y_const += delta_y_const
|
|
1232
|
+
# print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n')
|
|
1233
|
+
elif isinstance(component, ExpressionModel):
|
|
1234
|
+
have_expression_model = True
|
|
1235
|
+
const_expr = component.expr
|
|
1236
|
+
# print(f'\nExpression model:\nconst_expr: {const_expr}\n')
|
|
1237
|
+
for name in free_parameters:
|
|
1238
|
+
dexpr_dname = diff(component.expr, name)
|
|
1239
|
+
if dexpr_dname:
|
|
1240
|
+
const_expr = f'{const_expr}-({str(dexpr_dname)})*{name}'
|
|
1241
|
+
# print(f'\tconst_expr: {const_expr}')
|
|
1242
|
+
if not self._parameter_norms[name]:
|
|
1243
|
+
dexpr_dname = f'({dexpr_dname})/{norm}'
|
|
1244
|
+
# print(f'\t{component.expr} is linear in {name}\n\t\tadd "{str(dexpr_dname)}" to matrix as column {free_parameters.index(name)}')
|
|
1245
|
+
fx = [(lambda _: ast.eval(str(dexpr_dname)))(ast(f'x={v}')) for v in x]
|
|
1246
|
+
# print(f'\tfx:\n{fx}')
|
|
1247
|
+
if len(ast.error):
|
|
1248
|
+
raise ValueError(f'Unable to evaluate {dexpr_dname}')
|
|
1249
|
+
A[:,free_parameters.index(name)] += fx
|
|
1250
|
+
# if self._parameter_norms[name]:
|
|
1251
|
+
# print(f'\t\t{component.expr} is linear in {name} add "{str(dexpr_dname)}" to matrix as column {free_parameters.index(name)}')
|
|
1252
|
+
# A[:,free_parameters.index(name)] += fx
|
|
1253
|
+
# else:
|
|
1254
|
+
# print(f'\t\t{component.expr} is linear in {name} add "({str(dexpr_dname)})/{norm}" to matrix as column {free_parameters.index(name)}')
|
|
1255
|
+
# A[:,free_parameters.index(name)] += np.asarray(fx)/norm
|
|
1256
|
+
# FIX: find another solution if expr not supported by simplify
|
|
1257
|
+
const_expr = str(simplify(f'({const_expr})/{norm}'))
|
|
1258
|
+
# print(f'\nconst_expr: {const_expr}')
|
|
1259
|
+
delta_y_const = [(lambda _: ast.eval(const_expr))(ast(f'x = {v}')) for v in x]
|
|
1260
|
+
y_const += delta_y_const
|
|
1261
|
+
# print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n')
|
|
1262
|
+
if len(ast.error):
|
|
1263
|
+
raise ValueError(f'Unable to evaluate {const_expr}')
|
|
1264
|
+
else:
|
|
1265
|
+
free_model_parameters = [name for name in component.param_names
|
|
1266
|
+
if name in free_parameters or name in expr_parameters]
|
|
1267
|
+
# print(f'\nBuild-in model ({component}):\nfree_model_parameters: {free_model_parameters}\n')
|
|
1268
|
+
if not len(free_model_parameters):
|
|
1269
|
+
y_const += component.eval(params=self._parameters, x=x)
|
|
1270
|
+
elif isinstance(component, LinearModel):
|
|
1271
|
+
if f'{component.prefix}slope' in free_model_parameters:
|
|
1272
|
+
A[:,free_parameters.index(f'{component.prefix}slope')] = x
|
|
1273
|
+
else:
|
|
1274
|
+
y_const += self._parameters[f'{component.prefix}slope'].value*x
|
|
1275
|
+
if f'{component.prefix}intercept' in free_model_parameters:
|
|
1276
|
+
A[:,free_parameters.index(f'{component.prefix}intercept')] = 1.0
|
|
1277
|
+
else:
|
|
1278
|
+
y_const += self._parameters[f'{component.prefix}intercept'].value* \
|
|
1279
|
+
np.ones(len(x))
|
|
1280
|
+
elif isinstance(component, QuadraticModel):
|
|
1281
|
+
if f'{component.prefix}a' in free_model_parameters:
|
|
1282
|
+
A[:,free_parameters.index(f'{component.prefix}a')] = x**2
|
|
1283
|
+
else:
|
|
1284
|
+
y_const += self._parameters[f'{component.prefix}a'].value*x**2
|
|
1285
|
+
if f'{component.prefix}b' in free_model_parameters:
|
|
1286
|
+
A[:,free_parameters.index(f'{component.prefix}b')] = x
|
|
1287
|
+
else:
|
|
1288
|
+
y_const += self._parameters[f'{component.prefix}b'].value*x
|
|
1289
|
+
if f'{component.prefix}c' in free_model_parameters:
|
|
1290
|
+
A[:,free_parameters.index(f'{component.prefix}c')] = 1.0
|
|
1291
|
+
else:
|
|
1292
|
+
y_const += self._parameters[f'{component.prefix}c'].value*np.ones(len(x))
|
|
1293
|
+
else:
|
|
1294
|
+
# At this point each build-in model must be strictly proportional to each linear
|
|
1295
|
+
# model parameter. Without this assumption, the model equation is needed
|
|
1296
|
+
# For the current build-in lmfit models, this can only ever be the amplitude
|
|
1297
|
+
assert(len(free_model_parameters) == 1)
|
|
1298
|
+
name = f'{component.prefix}amplitude'
|
|
1299
|
+
assert(free_model_parameters[0] == name)
|
|
1300
|
+
assert(self._parameter_norms[name])
|
|
1301
|
+
expr = self._parameters[name].expr
|
|
1302
|
+
if expr is None:
|
|
1303
|
+
# print(f'\t{component} is linear in {name} add to matrix as column {free_parameters.index(name)}')
|
|
1304
|
+
parameters = deepcopy(self._parameters)
|
|
1305
|
+
parameters[name].set(value=1.0)
|
|
1306
|
+
index = free_parameters.index(name)
|
|
1307
|
+
A[:,free_parameters.index(name)] += component.eval(params=parameters, x=x)
|
|
1308
|
+
else:
|
|
1309
|
+
const_expr = expr
|
|
1310
|
+
# print(f'\tconst_expr: {const_expr}')
|
|
1311
|
+
parameters = deepcopy(self._parameters)
|
|
1312
|
+
parameters[name].set(value=1.0)
|
|
1313
|
+
dcomp_dname = component.eval(params=parameters, x=x)
|
|
1314
|
+
# print(f'\tdcomp_dname ({type(dcomp_dname)}):\n{dcomp_dname}')
|
|
1315
|
+
for nname in free_parameters:
|
|
1316
|
+
dexpr_dnname = diff(expr, nname)
|
|
1317
|
+
if dexpr_dnname:
|
|
1318
|
+
assert(self._parameter_norms[name])
|
|
1319
|
+
# print(f'\t\td({expr})/d{nname} = {dexpr_dnname}')
|
|
1320
|
+
# print(f'\t\t{component} is linear in {nname} (through {name} = "{expr}", add to matrix as column {free_parameters.index(nname)})')
|
|
1321
|
+
fx = np.asarray(dexpr_dnname*dcomp_dname, dtype='float64')
|
|
1322
|
+
# print(f'\t\tfx ({type(fx)}): {fx}')
|
|
1323
|
+
# print(f'free_parameters.index({nname}): {free_parameters.index(nname)}')
|
|
1324
|
+
if self._parameter_norms[nname]:
|
|
1325
|
+
A[:,free_parameters.index(nname)] += fx
|
|
1326
|
+
else:
|
|
1327
|
+
A[:,free_parameters.index(nname)] += fx/norm
|
|
1328
|
+
const_expr = f'{const_expr}-({dexpr_dnname})*{nname}'
|
|
1329
|
+
# print(f'\t\tconst_expr: {const_expr}')
|
|
1330
|
+
const_expr = str(simplify(f'({const_expr})/{norm}'))
|
|
1331
|
+
# print(f'\tconst_expr: {const_expr}')
|
|
1332
|
+
fx = [(lambda _: ast.eval(const_expr))(ast(f'x = {v}')) for v in x]
|
|
1333
|
+
# print(f'\tfx: {fx}')
|
|
1334
|
+
delta_y_const = np.multiply(fx, dcomp_dname)
|
|
1335
|
+
y_const += delta_y_const
|
|
1336
|
+
# print(f'\ndelta_y_const ({type(delta_y_const)}):\n{delta_y_const}\n')
|
|
1337
|
+
# print(A)
|
|
1338
|
+
# print(y_const)
|
|
1339
|
+
solution, residual, rank, s = np.linalg.lstsq(A, y-y_const, rcond=None)
|
|
1340
|
+
# print(f'\nsolution ({type(solution)} {solution.shape}):\n\t{solution}')
|
|
1341
|
+
# print(f'\nresidual ({type(residual)} {residual.shape}):\n\t{residual}')
|
|
1342
|
+
# print(f'\nrank ({type(rank)} {rank.shape}):\n\t{rank}')
|
|
1343
|
+
# print(f'\ns ({type(s)} {s.shape}):\n\t{s}\n')
|
|
1344
|
+
|
|
1345
|
+
# Assemble result (compensate for normalization in expression models)
|
|
1346
|
+
for name, value in zip(free_parameters, solution):
|
|
1347
|
+
self._parameters[name].set(value=value)
|
|
1348
|
+
if self._normalized and (have_expression_model or len(expr_parameters)):
|
|
1349
|
+
for name, norm in self._parameter_norms.items():
|
|
1350
|
+
par = self._parameters[name]
|
|
1351
|
+
if par.expr is None and norm:
|
|
1352
|
+
self._parameters[name].set(value=par.value*self._norm[1])
|
|
1353
|
+
# self._parameters.pretty_print()
|
|
1354
|
+
# print(f'\nself._parameter_norms:\n\t{self._parameter_norms}')
|
|
1355
|
+
self._result = ModelResult(self._model, deepcopy(self._parameters))
|
|
1356
|
+
self._result.best_fit = self._model.eval(params=self._parameters, x=x)
|
|
1357
|
+
if self._normalized and (have_expression_model or len(expr_parameters)):
|
|
1358
|
+
if 'tmp_normalization_offset_c' in self._parameters:
|
|
1359
|
+
offset = self._parameters['tmp_normalization_offset_c']
|
|
1360
|
+
else:
|
|
1361
|
+
offset = 0.0
|
|
1362
|
+
self._result.best_fit = (self._result.best_fit-offset-self._norm[0])/self._norm[1]
|
|
1363
|
+
if self._normalized:
|
|
1364
|
+
for name, norm in self._parameter_norms.items():
|
|
1365
|
+
par = self._parameters[name]
|
|
1366
|
+
if par.expr is None and norm:
|
|
1367
|
+
value = par.value/self._norm[1]
|
|
1368
|
+
self._parameters[name].set(value=value)
|
|
1369
|
+
self._result.params[name].set(value=value)
|
|
1370
|
+
# self._parameters.pretty_print()
|
|
1371
|
+
self._result.residual = self._result.best_fit-y
|
|
1372
|
+
self._result.components = self._model.components
|
|
1373
|
+
self._result.init_params = None
|
|
1374
|
+
# quick_plot((x, y, '.'), (x, y_const, 'g'), (x, self._result.best_fit, 'k'), (x, self._result.residual, 'r'), block=True)
|
|
1375
|
+
|
|
1376
|
+
def _normalize(self):
|
|
1377
|
+
"""Normalize the data and initial parameters
|
|
1378
|
+
"""
|
|
1379
|
+
if self._normalized:
|
|
1380
|
+
return
|
|
1381
|
+
if self._norm is None:
|
|
1382
|
+
if self._y is not None and self._y_norm is None:
|
|
1383
|
+
self._y_norm = np.asarray(self._y)
|
|
1384
|
+
else:
|
|
1385
|
+
if self._y is not None and self._y_norm is None:
|
|
1386
|
+
self._y_norm = (np.asarray(self._y)-self._norm[0])/self._norm[1]
|
|
1387
|
+
self._y_range = 1.0
|
|
1388
|
+
for name, norm in self._parameter_norms.items():
|
|
1389
|
+
par = self._parameters[name]
|
|
1390
|
+
if par.expr is None and norm:
|
|
1391
|
+
value = par.value/self._norm[1]
|
|
1392
|
+
_min = par.min
|
|
1393
|
+
_max = par.max
|
|
1394
|
+
if not np.isinf(_min) and abs(_min) != float_min:
|
|
1395
|
+
_min /= self._norm[1]
|
|
1396
|
+
if not np.isinf(_max) and abs(_max) != float_min:
|
|
1397
|
+
_max /= self._norm[1]
|
|
1398
|
+
par.set(value=value, min=_min, max=_max)
|
|
1399
|
+
self._normalized = True
|
|
1400
|
+
|
|
1401
|
+
def _renormalize(self):
|
|
1402
|
+
"""Renormalize the data and results
|
|
1403
|
+
"""
|
|
1404
|
+
if self._norm is None or not self._normalized:
|
|
1405
|
+
return
|
|
1406
|
+
self._normalized = False
|
|
1407
|
+
for name, norm in self._parameter_norms.items():
|
|
1408
|
+
par = self._parameters[name]
|
|
1409
|
+
if par.expr is None and norm:
|
|
1410
|
+
value = par.value*self._norm[1]
|
|
1411
|
+
_min = par.min
|
|
1412
|
+
_max = par.max
|
|
1413
|
+
if not np.isinf(_min) and abs(_min) != float_min:
|
|
1414
|
+
_min *= self._norm[1]
|
|
1415
|
+
if not np.isinf(_max) and abs(_max) != float_min:
|
|
1416
|
+
_max *= self._norm[1]
|
|
1417
|
+
par.set(value=value, min=_min, max=_max)
|
|
1418
|
+
if self._result is None:
|
|
1419
|
+
return
|
|
1420
|
+
self._result.best_fit = self._result.best_fit*self._norm[1]+self._norm[0]
|
|
1421
|
+
for name, par in self._result.params.items():
|
|
1422
|
+
if self._parameter_norms.get(name, False):
|
|
1423
|
+
if par.stderr is not None:
|
|
1424
|
+
par.stderr *= self._norm[1]
|
|
1425
|
+
if par.expr is None:
|
|
1426
|
+
_min = par.min
|
|
1427
|
+
_max = par.max
|
|
1428
|
+
value = par.value*self._norm[1]
|
|
1429
|
+
if par.init_value is not None:
|
|
1430
|
+
par.init_value *= self._norm[1]
|
|
1431
|
+
if not np.isinf(_min) and abs(_min) != float_min:
|
|
1432
|
+
_min *= self._norm[1]
|
|
1433
|
+
if not np.isinf(_max) and abs(_max) != float_min:
|
|
1434
|
+
_max *= self._norm[1]
|
|
1435
|
+
par.set(value=value, min=_min, max=_max)
|
|
1436
|
+
if hasattr(self._result, 'init_fit'):
|
|
1437
|
+
self._result.init_fit = self._result.init_fit*self._norm[1]+self._norm[0]
|
|
1438
|
+
if hasattr(self._result, 'init_values'):
|
|
1439
|
+
init_values = {}
|
|
1440
|
+
for name, value in self._result.init_values.items():
|
|
1441
|
+
if name not in self._parameter_norms or self._parameters[name].expr is not None:
|
|
1442
|
+
init_values[name] = value
|
|
1443
|
+
elif self._parameter_norms[name]:
|
|
1444
|
+
init_values[name] = value*self._norm[1]
|
|
1445
|
+
self._result.init_values = init_values
|
|
1446
|
+
for name, par in self._result.init_params.items():
|
|
1447
|
+
if par.expr is None and self._parameter_norms.get(name, False):
|
|
1448
|
+
value = par.value
|
|
1449
|
+
_min = par.min
|
|
1450
|
+
_max = par.max
|
|
1451
|
+
value *= self._norm[1]
|
|
1452
|
+
if not np.isinf(_min) and abs(_min) != float_min:
|
|
1453
|
+
_min *= self._norm[1]
|
|
1454
|
+
if not np.isinf(_max) and abs(_max) != float_min:
|
|
1455
|
+
_max *= self._norm[1]
|
|
1456
|
+
par.set(value=value, min=_min, max=_max)
|
|
1457
|
+
par.init_value = par.value
|
|
1458
|
+
# Don't renormalize chisqr, it has no useful meaning in physical units
|
|
1459
|
+
#self._result.chisqr *= self._norm[1]*self._norm[1]
|
|
1460
|
+
if self._result.covar is not None:
|
|
1461
|
+
for i, name in enumerate(self._result.var_names):
|
|
1462
|
+
if self._parameter_norms.get(name, False):
|
|
1463
|
+
for j in range(len(self._result.var_names)):
|
|
1464
|
+
if self._result.covar[i,j] is not None:
|
|
1465
|
+
self._result.covar[i,j] *= self._norm[1]
|
|
1466
|
+
if self._result.covar[j,i] is not None:
|
|
1467
|
+
self._result.covar[j,i] *= self._norm[1]
|
|
1468
|
+
# Don't renormalize redchi, it has no useful meaning in physical units
|
|
1469
|
+
#self._result.redchi *= self._norm[1]*self._norm[1]
|
|
1470
|
+
if self._result.residual is not None:
|
|
1471
|
+
self._result.residual *= self._norm[1]
|
|
1472
|
+
|
|
1473
|
+
def _reset_par_at_boundary(self, par, value):
|
|
1474
|
+
assert(par.vary)
|
|
1475
|
+
name = par.name
|
|
1476
|
+
_min = self._parameter_bounds[name]['min']
|
|
1477
|
+
_max = self._parameter_bounds[name]['max']
|
|
1478
|
+
if np.isinf(_min):
|
|
1479
|
+
if not np.isinf(_max):
|
|
1480
|
+
if self._parameter_norms.get(name, False):
|
|
1481
|
+
upp = _max-0.1*self._y_range
|
|
1482
|
+
elif _max == 0.0:
|
|
1483
|
+
upp = _max-0.1
|
|
1484
|
+
else:
|
|
1485
|
+
upp = _max-0.1*abs(_max)
|
|
1486
|
+
if value >= upp:
|
|
1487
|
+
return(upp)
|
|
1488
|
+
else:
|
|
1489
|
+
if np.isinf(_max):
|
|
1490
|
+
if self._parameter_norms.get(name, False):
|
|
1491
|
+
low = _min+0.1*self._y_range
|
|
1492
|
+
elif _min == 0.0:
|
|
1493
|
+
low = _min+0.1
|
|
1494
|
+
else:
|
|
1495
|
+
low = _min+0.1*abs(_min)
|
|
1496
|
+
if value <= low:
|
|
1497
|
+
return(low)
|
|
1498
|
+
else:
|
|
1499
|
+
low = 0.9*_min+0.1*_max
|
|
1500
|
+
upp = 0.1*_min+0.9*_max
|
|
1501
|
+
if value <= low:
|
|
1502
|
+
return(low)
|
|
1503
|
+
elif value >= upp:
|
|
1504
|
+
return(upp)
|
|
1505
|
+
return(value)
|
|
1506
|
+
|
|
1507
|
+
|
|
1508
|
+
class FitMultipeak(Fit):
|
|
1509
|
+
"""Fit data with multiple peaks
|
|
1510
|
+
"""
|
|
1511
|
+
def __init__(self, y, x=None, normalize=True):
|
|
1512
|
+
super().__init__(y, x=x, normalize=normalize)
|
|
1513
|
+
self._fwhm_max = None
|
|
1514
|
+
self._sigma_max = None
|
|
1515
|
+
|
|
1516
|
+
@classmethod
|
|
1517
|
+
def fit_multipeak(cls, y, centers, x=None, normalize=True, peak_models='gaussian',
|
|
1518
|
+
center_exprs=None, fit_type=None, background=None, fwhm_max=None,
|
|
1519
|
+
print_report=False, plot=False, x_eval=None):
|
|
1520
|
+
"""Make sure that centers and fwhm_max are in the correct units and consistent with expr
|
|
1521
|
+
for a uniform fit (fit_type == 'uniform')
|
|
1522
|
+
"""
|
|
1523
|
+
if x_eval is not None and not isinstance(x_eval, (tuple, list, np.ndarray)):
|
|
1524
|
+
raise ValueError(f'Invalid parameter x_eval ({x_eval})')
|
|
1525
|
+
fit = cls(y, x=x, normalize=normalize)
|
|
1526
|
+
success = fit.fit(centers, fit_type=fit_type, peak_models=peak_models, fwhm_max=fwhm_max,
|
|
1527
|
+
center_exprs=center_exprs, background=background, print_report=print_report,
|
|
1528
|
+
plot=plot)
|
|
1529
|
+
if x_eval is None:
|
|
1530
|
+
best_fit = fit.best_fit
|
|
1531
|
+
else:
|
|
1532
|
+
best_fit = fit.eval(x_eval)
|
|
1533
|
+
if success:
|
|
1534
|
+
return(best_fit, fit.residual, fit.best_values, fit.best_errors, fit.redchi, \
|
|
1535
|
+
fit.success)
|
|
1536
|
+
else:
|
|
1537
|
+
return(np.array([]), np.array([]), {}, {}, float_max, False)
|
|
1538
|
+
|
|
1539
|
+
def fit(self, centers, fit_type=None, peak_models=None, center_exprs=None, fwhm_max=None,
|
|
1540
|
+
background=None, print_report=False, plot=True, param_constraint=False):
|
|
1541
|
+
self._fwhm_max = fwhm_max
|
|
1542
|
+
# Create the multipeak model
|
|
1543
|
+
self._create_model(centers, fit_type, peak_models, center_exprs, background,
|
|
1544
|
+
param_constraint)
|
|
1545
|
+
|
|
1546
|
+
# RV: Obsolete Normalize the data and results
|
|
1547
|
+
# print('\nBefore fit before normalization in FitMultipeak:')
|
|
1548
|
+
# self._parameters.pretty_print()
|
|
1549
|
+
# self._normalize()
|
|
1550
|
+
# print('\nBefore fit after normalization in FitMultipeak:')
|
|
1551
|
+
# self._parameters.pretty_print()
|
|
1552
|
+
|
|
1553
|
+
# Perform the fit
|
|
1554
|
+
try:
|
|
1555
|
+
if param_constraint:
|
|
1556
|
+
super().fit(fit_kws={'xtol': 1.e-5, 'ftol': 1.e-5, 'gtol': 1.e-5})
|
|
1557
|
+
else:
|
|
1558
|
+
super().fit()
|
|
1559
|
+
except:
|
|
1560
|
+
return(False)
|
|
1561
|
+
|
|
1562
|
+
# Check for valid fit parameter results
|
|
1563
|
+
fit_failure = self._check_validity()
|
|
1564
|
+
success = True
|
|
1565
|
+
if fit_failure:
|
|
1566
|
+
if param_constraint:
|
|
1567
|
+
logging.warning(' -> Should not happen with param_constraint set, fail the fit')
|
|
1568
|
+
success = False
|
|
1569
|
+
else:
|
|
1570
|
+
logging.info(' -> Retry fitting with constraints')
|
|
1571
|
+
self.fit(centers, fit_type, peak_models, center_exprs, fwhm_max=fwhm_max,
|
|
1572
|
+
background=background, print_report=print_report, plot=plot,
|
|
1573
|
+
param_constraint=True)
|
|
1574
|
+
else:
|
|
1575
|
+
# RV: Obsolete Renormalize the data and results
|
|
1576
|
+
# print('\nAfter fit before renormalization in FitMultipeak:')
|
|
1577
|
+
# self._parameters.pretty_print()
|
|
1578
|
+
# self.print_fit_report()
|
|
1579
|
+
# self._renormalize()
|
|
1580
|
+
# print('\nAfter fit after renormalization in FitMultipeak:')
|
|
1581
|
+
# self._parameters.pretty_print()
|
|
1582
|
+
# self.print_fit_report()
|
|
1583
|
+
|
|
1584
|
+
# Print report and plot components if requested
|
|
1585
|
+
if print_report:
|
|
1586
|
+
self.print_fit_report()
|
|
1587
|
+
if plot:
|
|
1588
|
+
self.plot(skip_init=True, plot_comp=True, plot_comp_legends=True,
|
|
1589
|
+
plot_residual=True)
|
|
1590
|
+
|
|
1591
|
+
return(success)
|
|
1592
|
+
|
|
1593
|
+
def _create_model(self, centers, fit_type=None, peak_models=None, center_exprs=None,
|
|
1594
|
+
background=None, param_constraint=False):
|
|
1595
|
+
"""Create the multipeak model
|
|
1596
|
+
"""
|
|
1597
|
+
if isinstance(centers, (int, float)):
|
|
1598
|
+
centers = [centers]
|
|
1599
|
+
num_peaks = len(centers)
|
|
1600
|
+
if peak_models is None:
|
|
1601
|
+
peak_models = num_peaks*['gaussian']
|
|
1602
|
+
elif isinstance(peak_models, str) and peak_models in ('gaussian', 'lorentzian'):
|
|
1603
|
+
peak_models = num_peaks*[peak_models]
|
|
1604
|
+
else:
|
|
1605
|
+
raise ValueError(f'Invalid peak model parameter ({peak_models})')
|
|
1606
|
+
if len(peak_models) != num_peaks:
|
|
1607
|
+
raise ValueError(f'Inconsistent number of peaks in peak_models ({len(peak_models)} vs '+
|
|
1608
|
+
f'{num_peaks})')
|
|
1609
|
+
if num_peaks == 1:
|
|
1610
|
+
if fit_type is not None:
|
|
1611
|
+
logging.debug('Ignoring fit_type input for fitting one peak')
|
|
1612
|
+
fit_type = None
|
|
1613
|
+
if center_exprs is not None:
|
|
1614
|
+
logging.debug('Ignoring center_exprs input for fitting one peak')
|
|
1615
|
+
center_exprs = None
|
|
1616
|
+
else:
|
|
1617
|
+
if fit_type == 'uniform':
|
|
1618
|
+
if center_exprs is None:
|
|
1619
|
+
center_exprs = [f'scale_factor*{cen}' for cen in centers]
|
|
1620
|
+
if len(center_exprs) != num_peaks:
|
|
1621
|
+
raise ValueError(f'Inconsistent number of peaks in center_exprs '+
|
|
1622
|
+
f'({len(center_exprs)} vs {num_peaks})')
|
|
1623
|
+
elif fit_type == 'unconstrained' or fit_type is None:
|
|
1624
|
+
if center_exprs is not None:
|
|
1625
|
+
logging.warning('Ignoring center_exprs input for unconstrained fit')
|
|
1626
|
+
center_exprs = None
|
|
1627
|
+
else:
|
|
1628
|
+
raise ValueError(f'Invalid fit_type in fit_multigaussian {fit_type}')
|
|
1629
|
+
self._sigma_max = None
|
|
1630
|
+
if param_constraint:
|
|
1631
|
+
min_value = float_min
|
|
1632
|
+
if self._fwhm_max is not None:
|
|
1633
|
+
self._sigma_max = np.zeros(num_peaks)
|
|
1634
|
+
else:
|
|
1635
|
+
min_value = None
|
|
1636
|
+
|
|
1637
|
+
# Reset the fit
|
|
1638
|
+
self._model = None
|
|
1639
|
+
self._parameters = Parameters()
|
|
1640
|
+
self._result = None
|
|
1641
|
+
|
|
1642
|
+
# Add background model(s)
|
|
1643
|
+
if background is not None:
|
|
1644
|
+
if isinstance(background, dict):
|
|
1645
|
+
background = [background]
|
|
1646
|
+
if isinstance(background, str):
|
|
1647
|
+
self.add_model(background, prefix='bkgd_')
|
|
1648
|
+
elif is_dict_series(background):
|
|
1649
|
+
for model in deepcopy(background):
|
|
1650
|
+
if 'model' not in model:
|
|
1651
|
+
raise KeyError(f'Missing keyword "model" in model in background ({model})')
|
|
1652
|
+
name = model.pop('model')
|
|
1653
|
+
parameters=model.pop('parameters', None)
|
|
1654
|
+
self.add_model(name, prefix=f'bkgd_{name}_', parameters=parameters, **model)
|
|
1655
|
+
else:
|
|
1656
|
+
raise ValueError(f'Invalid parameter background ({background})')
|
|
1657
|
+
|
|
1658
|
+
# Add peaks and guess initial fit parameters
|
|
1659
|
+
ast = Interpreter()
|
|
1660
|
+
if num_peaks == 1:
|
|
1661
|
+
height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y)
|
|
1662
|
+
if self._fwhm_max is not None and fwhm_init > self._fwhm_max:
|
|
1663
|
+
fwhm_init = self._fwhm_max
|
|
1664
|
+
ast(f'fwhm = {fwhm_init}')
|
|
1665
|
+
ast(f'height = {height_init}')
|
|
1666
|
+
sig_init = ast(fwhm_factor[peak_models[0]])
|
|
1667
|
+
amp_init = ast(height_factor[peak_models[0]])
|
|
1668
|
+
sig_max = None
|
|
1669
|
+
if self._sigma_max is not None:
|
|
1670
|
+
ast(f'fwhm = {self._fwhm_max}')
|
|
1671
|
+
sig_max = ast(fwhm_factor[peak_models[0]])
|
|
1672
|
+
self._sigma_max[0] = sig_max
|
|
1673
|
+
self.add_model(peak_models[0], parameters=(
|
|
1674
|
+
{'name': 'amplitude', 'value': amp_init, 'min': min_value},
|
|
1675
|
+
{'name': 'center', 'value': cen_init, 'min': min_value},
|
|
1676
|
+
{'name': 'sigma', 'value': sig_init, 'min': min_value, 'max': sig_max}))
|
|
1677
|
+
else:
|
|
1678
|
+
if fit_type == 'uniform':
|
|
1679
|
+
self.add_parameter(name='scale_factor', value=1.0)
|
|
1680
|
+
for i in range(num_peaks):
|
|
1681
|
+
height_init, cen_init, fwhm_init = self.guess_init_peak(self._x, self._y, i,
|
|
1682
|
+
center_guess=centers)
|
|
1683
|
+
if self._fwhm_max is not None and fwhm_init > self._fwhm_max:
|
|
1684
|
+
fwhm_init = self._fwhm_max
|
|
1685
|
+
ast(f'fwhm = {fwhm_init}')
|
|
1686
|
+
ast(f'height = {height_init}')
|
|
1687
|
+
sig_init = ast(fwhm_factor[peak_models[i]])
|
|
1688
|
+
amp_init = ast(height_factor[peak_models[i]])
|
|
1689
|
+
sig_max = None
|
|
1690
|
+
if self._sigma_max is not None:
|
|
1691
|
+
ast(f'fwhm = {self._fwhm_max}')
|
|
1692
|
+
sig_max = ast(fwhm_factor[peak_models[i]])
|
|
1693
|
+
self._sigma_max[i] = sig_max
|
|
1694
|
+
if fit_type == 'uniform':
|
|
1695
|
+
self.add_model(peak_models[i], prefix=f'peak{i+1}_', parameters=(
|
|
1696
|
+
{'name': 'amplitude', 'value': amp_init, 'min': min_value},
|
|
1697
|
+
{'name': 'center', 'expr': center_exprs[i]},
|
|
1698
|
+
{'name': 'sigma', 'value': sig_init, 'min': min_value,
|
|
1699
|
+
'max': sig_max}))
|
|
1700
|
+
else:
|
|
1701
|
+
self.add_model('gaussian', prefix=f'peak{i+1}_', parameters=(
|
|
1702
|
+
{'name': 'amplitude', 'value': amp_init, 'min': min_value},
|
|
1703
|
+
{'name': 'center', 'value': cen_init, 'min': min_value},
|
|
1704
|
+
{'name': 'sigma', 'value': sig_init, 'min': min_value,
|
|
1705
|
+
'max': sig_max}))
|
|
1706
|
+
|
|
1707
|
+
def _check_validity(self):
|
|
1708
|
+
"""Check for valid fit parameter results
|
|
1709
|
+
"""
|
|
1710
|
+
fit_failure = False
|
|
1711
|
+
index = compile(r'\d+')
|
|
1712
|
+
for name, par in self.best_parameters.items():
|
|
1713
|
+
if 'bkgd' in name:
|
|
1714
|
+
# if ((name == 'bkgd_c' and par['value'] <= 0.0) or
|
|
1715
|
+
# (name.endswith('amplitude') and par['value'] <= 0.0) or
|
|
1716
|
+
if ((name.endswith('amplitude') and par['value'] <= 0.0) or
|
|
1717
|
+
(name.endswith('decay') and par['value'] <= 0.0)):
|
|
1718
|
+
logging.info(f'Invalid fit result for {name} ({par["value"]})')
|
|
1719
|
+
fit_failure = True
|
|
1720
|
+
elif (((name.endswith('amplitude') or name.endswith('height')) and
|
|
1721
|
+
par['value'] <= 0.0) or
|
|
1722
|
+
((name.endswith('sigma') or name.endswith('fwhm')) and par['value'] <= 0.0) or
|
|
1723
|
+
(name.endswith('center') and par['value'] <= 0.0) or
|
|
1724
|
+
(name == 'scale_factor' and par['value'] <= 0.0)):
|
|
1725
|
+
logging.info(f'Invalid fit result for {name} ({par["value"]})')
|
|
1726
|
+
fit_failure = True
|
|
1727
|
+
if 'bkgd' not in name and name.endswith('sigma') and self._sigma_max is not None:
|
|
1728
|
+
if name == 'sigma':
|
|
1729
|
+
sigma_max = self._sigma_max[0]
|
|
1730
|
+
else:
|
|
1731
|
+
sigma_max = self._sigma_max[int(index.search(name).group())-1]
|
|
1732
|
+
if par['value'] > sigma_max:
|
|
1733
|
+
logging.info(f'Invalid fit result for {name} ({par["value"]})')
|
|
1734
|
+
fit_failure = True
|
|
1735
|
+
elif par['value'] == sigma_max:
|
|
1736
|
+
logging.warning(f'Edge result on for {name} ({par["value"]})')
|
|
1737
|
+
if 'bkgd' not in name and name.endswith('fwhm') and self._fwhm_max is not None:
|
|
1738
|
+
if par['value'] > self._fwhm_max:
|
|
1739
|
+
logging.info(f'Invalid fit result for {name} ({par["value"]})')
|
|
1740
|
+
fit_failure = True
|
|
1741
|
+
elif par['value'] == self._fwhm_max:
|
|
1742
|
+
logging.warning(f'Edge result on for {name} ({par["value"]})')
|
|
1743
|
+
return(fit_failure)
|
|
1744
|
+
|
|
1745
|
+
|
|
1746
|
+
class FitMap(Fit):
|
|
1747
|
+
"""Fit a map of data
|
|
1748
|
+
"""
|
|
1749
|
+
def __init__(self, ymap, x=None, models=None, normalize=True, transpose=None, **kwargs):
|
|
1750
|
+
super().__init__(None)
|
|
1751
|
+
self._best_errors = None
|
|
1752
|
+
self._best_fit = None
|
|
1753
|
+
self._best_parameters = None
|
|
1754
|
+
self._best_values = None
|
|
1755
|
+
self._inv_transpose = None
|
|
1756
|
+
self._max_nfev = None
|
|
1757
|
+
self._memfolder = None
|
|
1758
|
+
self._new_parameters = None
|
|
1759
|
+
self._out_of_bounds = None
|
|
1760
|
+
self._plot = False
|
|
1761
|
+
self._print_report = False
|
|
1762
|
+
self._redchi = None
|
|
1763
|
+
self._redchi_cutoff = 0.1
|
|
1764
|
+
self._skip_init = True
|
|
1765
|
+
self._success = None
|
|
1766
|
+
self._transpose = None
|
|
1767
|
+
self._try_no_bounds = True
|
|
1768
|
+
|
|
1769
|
+
# At this point the fastest index should always be the signal dimension so that the slowest
|
|
1770
|
+
# ndim-1 dimensions are the map dimensions
|
|
1771
|
+
if isinstance(ymap, (tuple, list, np.ndarray)):
|
|
1772
|
+
self._x = np.asarray(x)
|
|
1773
|
+
elif have_xarray and isinstance(ymap, xr.DataArray):
|
|
1774
|
+
if x is not None:
|
|
1775
|
+
logging.warning('Ignoring superfluous input x ({x}) in Fit.__init__')
|
|
1776
|
+
self._x = np.asarray(ymap[ymap.dims[-1]])
|
|
1777
|
+
else:
|
|
1778
|
+
illegal_value(ymap, 'ymap', 'FitMap:__init__', raise_error=True)
|
|
1779
|
+
self._ymap = ymap
|
|
1780
|
+
|
|
1781
|
+
# Verify the input parameters
|
|
1782
|
+
if self._x.ndim != 1:
|
|
1783
|
+
raise ValueError(f'Invalid dimension for input x {self._x.ndim}')
|
|
1784
|
+
if self._ymap.ndim < 2:
|
|
1785
|
+
raise ValueError('Invalid number of dimension of the input dataset '+
|
|
1786
|
+
f'{self._ymap.ndim}')
|
|
1787
|
+
if self._x.size != self._ymap.shape[-1]:
|
|
1788
|
+
raise ValueError(f'Inconsistent x and y dimensions ({self._x.size} vs '+
|
|
1789
|
+
f'{self._ymap.shape[-1]})')
|
|
1790
|
+
if not isinstance(normalize, bool):
|
|
1791
|
+
logging.warning(f'Invalid value for normalize ({normalize}) in Fit.__init__: '+
|
|
1792
|
+
'setting normalize to True')
|
|
1793
|
+
normalize = True
|
|
1794
|
+
if isinstance(transpose, bool) and not transpose:
|
|
1795
|
+
transpose = None
|
|
1796
|
+
if transpose is not None and self._ymap.ndim < 3:
|
|
1797
|
+
logging.warning(f'Transpose meaningless for {self._ymap.ndim-1}D data maps: ignoring '+
|
|
1798
|
+
'transpose')
|
|
1799
|
+
if transpose is not None:
|
|
1800
|
+
if self._ymap.ndim == 3 and isinstance(transpose, bool) and transpose:
|
|
1801
|
+
self._transpose = (1, 0)
|
|
1802
|
+
elif not isinstance(transpose, (tuple, list)):
|
|
1803
|
+
logging.warning(f'Invalid data type for transpose ({transpose}, '+
|
|
1804
|
+
f'{type(transpose)}) in Fit.__init__: setting transpose to False')
|
|
1805
|
+
elif len(transpose) != self._ymap.ndim-1:
|
|
1806
|
+
logging.warning(f'Invalid dimension for transpose ({transpose}, must be equal to '+
|
|
1807
|
+
f'{self._ymap.ndim-1}) in Fit.__init__: setting transpose to False')
|
|
1808
|
+
elif any(i not in transpose for i in range(len(transpose))):
|
|
1809
|
+
logging.warning(f'Invalid index in transpose ({transpose}) '+
|
|
1810
|
+
f'in Fit.__init__: setting transpose to False')
|
|
1811
|
+
elif not all(i==transpose[i] for i in range(self._ymap.ndim-1)):
|
|
1812
|
+
self._transpose = transpose
|
|
1813
|
+
if self._transpose is not None:
|
|
1814
|
+
self._inv_transpose = tuple(self._transpose.index(i)
|
|
1815
|
+
for i in range(len(self._transpose)))
|
|
1816
|
+
|
|
1817
|
+
# Flatten the map (transpose if requested)
|
|
1818
|
+
# Store the flattened map in self._ymap_norm, whether normalized or not
|
|
1819
|
+
if self._transpose is not None:
|
|
1820
|
+
self._ymap_norm = np.transpose(np.asarray(self._ymap), list(self._transpose)+
|
|
1821
|
+
[len(self._transpose)])
|
|
1822
|
+
else:
|
|
1823
|
+
self._ymap_norm = np.asarray(self._ymap)
|
|
1824
|
+
self._map_dim = int(self._ymap_norm.size/self._x.size)
|
|
1825
|
+
self._map_shape = self._ymap_norm.shape[:-1]
|
|
1826
|
+
self._ymap_norm = np.reshape(self._ymap_norm, (self._map_dim, self._x.size))
|
|
1827
|
+
|
|
1828
|
+
# Check if a mask is provided
|
|
1829
|
+
if 'mask' in kwargs:
|
|
1830
|
+
self._mask = kwargs.pop('mask')
|
|
1831
|
+
if self._mask is None:
|
|
1832
|
+
ymap_min = float(self._ymap_norm.min())
|
|
1833
|
+
ymap_max = float(self._ymap_norm.max())
|
|
1834
|
+
else:
|
|
1835
|
+
self._mask = np.asarray(self._mask).astype(bool)
|
|
1836
|
+
if self._x.size != self._mask.size:
|
|
1837
|
+
raise ValueError(f'Inconsistent mask dimension ({self._x.size} vs '+
|
|
1838
|
+
f'{self._mask.size})')
|
|
1839
|
+
ymap_masked = np.asarray(self._ymap_norm)[:,~self._mask]
|
|
1840
|
+
ymap_min = float(ymap_masked.min())
|
|
1841
|
+
ymap_max = float(ymap_masked.max())
|
|
1842
|
+
|
|
1843
|
+
# Normalize the data
|
|
1844
|
+
self._y_range = ymap_max-ymap_min
|
|
1845
|
+
if normalize and self._y_range > 0.0:
|
|
1846
|
+
self._norm = (ymap_min, self._y_range)
|
|
1847
|
+
self._ymap_norm = (self._ymap_norm-self._norm[0])/self._norm[1]
|
|
1848
|
+
else:
|
|
1849
|
+
self._redchi_cutoff *= self._y_range**2
|
|
1850
|
+
if models is not None:
|
|
1851
|
+
if callable(models) or isinstance(models, str):
|
|
1852
|
+
kwargs = self.add_model(models, **kwargs)
|
|
1853
|
+
elif isinstance(models, (tuple, list)):
|
|
1854
|
+
for model in models:
|
|
1855
|
+
kwargs = self.add_model(model, **kwargs)
|
|
1856
|
+
self.fit(**kwargs)
|
|
1857
|
+
|
|
1858
|
+
@classmethod
|
|
1859
|
+
def fit_map(cls, ymap, models, x=None, normalize=True, **kwargs):
|
|
1860
|
+
return(cls(ymap, x=x, models=models, normalize=normalize, **kwargs))
|
|
1861
|
+
|
|
1862
|
+
@property
|
|
1863
|
+
def best_errors(self):
|
|
1864
|
+
return(self._best_errors)
|
|
1865
|
+
|
|
1866
|
+
@property
|
|
1867
|
+
def best_fit(self):
|
|
1868
|
+
return(self._best_fit)
|
|
1869
|
+
|
|
1870
|
+
@property
|
|
1871
|
+
def best_results(self):
|
|
1872
|
+
"""Convert the input data array to a data set and add the fit results.
|
|
1873
|
+
"""
|
|
1874
|
+
if self.best_values is None or self.best_errors is None or self.best_fit is None:
|
|
1875
|
+
return(None)
|
|
1876
|
+
if not have_xarray:
|
|
1877
|
+
logging.warning('Unable to load xarray module')
|
|
1878
|
+
return(None)
|
|
1879
|
+
best_values = self.best_values
|
|
1880
|
+
best_errors = self.best_errors
|
|
1881
|
+
if isinstance(self._ymap, xr.DataArray):
|
|
1882
|
+
best_results = self._ymap.to_dataset()
|
|
1883
|
+
dims = self._ymap.dims
|
|
1884
|
+
fit_name = f'{self._ymap.name}_fit'
|
|
1885
|
+
else:
|
|
1886
|
+
coords = {f'dim{n}_index':([f'dim{n}_index'], range(self._ymap.shape[n]))
|
|
1887
|
+
for n in range(self._ymap.ndim-1)}
|
|
1888
|
+
coords['x'] = (['x'], self._x)
|
|
1889
|
+
dims = list(coords.keys())
|
|
1890
|
+
best_results = xr.Dataset(coords=coords)
|
|
1891
|
+
best_results['y'] = (dims, self._ymap)
|
|
1892
|
+
fit_name = 'y_fit'
|
|
1893
|
+
best_results[fit_name] = (dims, self.best_fit)
|
|
1894
|
+
if self._mask is not None:
|
|
1895
|
+
best_results['mask'] = self._mask
|
|
1896
|
+
for n in range(best_values.shape[0]):
|
|
1897
|
+
best_results[f'{self._best_parameters[n]}_values'] = (dims[:-1], best_values[n])
|
|
1898
|
+
best_results[f'{self._best_parameters[n]}_errors'] = (dims[:-1], best_errors[n])
|
|
1899
|
+
best_results.attrs['components'] = self.components
|
|
1900
|
+
return(best_results)
|
|
1901
|
+
|
|
1902
|
+
@property
|
|
1903
|
+
def best_values(self):
|
|
1904
|
+
return(self._best_values)
|
|
1905
|
+
|
|
1906
|
+
@property
|
|
1907
|
+
def chisqr(self):
|
|
1908
|
+
logging.warning('property chisqr not defined for fit.FitMap')
|
|
1909
|
+
return(None)
|
|
1910
|
+
|
|
1911
|
+
@property
|
|
1912
|
+
def components(self):
|
|
1913
|
+
components = {}
|
|
1914
|
+
if self._result is None:
|
|
1915
|
+
logging.warning('Unable to collect components in FitMap.components')
|
|
1916
|
+
return(components)
|
|
1917
|
+
for component in self._result.components:
|
|
1918
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
1919
|
+
continue
|
|
1920
|
+
parameters = {}
|
|
1921
|
+
for name in component.param_names:
|
|
1922
|
+
if self._parameters[name].vary:
|
|
1923
|
+
parameters[name] = {'free': True}
|
|
1924
|
+
elif self._parameters[name].expr is not None:
|
|
1925
|
+
parameters[name] = {'free': False, 'expr': self._parameters[name].expr}
|
|
1926
|
+
else:
|
|
1927
|
+
parameters[name] = {'free': False, 'value': self.init_parameters[name]['value']}
|
|
1928
|
+
expr = None
|
|
1929
|
+
if isinstance(component, ExpressionModel):
|
|
1930
|
+
name = component._name
|
|
1931
|
+
if name[-1] == '_':
|
|
1932
|
+
name = name[:-1]
|
|
1933
|
+
expr = component.expr
|
|
1934
|
+
else:
|
|
1935
|
+
prefix = component.prefix
|
|
1936
|
+
if len(prefix):
|
|
1937
|
+
if prefix[-1] == '_':
|
|
1938
|
+
prefix = prefix[:-1]
|
|
1939
|
+
name = f'{prefix} ({component._name})'
|
|
1940
|
+
else:
|
|
1941
|
+
name = f'{component._name}'
|
|
1942
|
+
if expr is None:
|
|
1943
|
+
components[name] = {'parameters': parameters}
|
|
1944
|
+
else:
|
|
1945
|
+
components[name] = {'expr': expr, 'parameters': parameters}
|
|
1946
|
+
return(components)
|
|
1947
|
+
|
|
1948
|
+
@property
|
|
1949
|
+
def covar(self):
|
|
1950
|
+
logging.warning('property covar not defined for fit.FitMap')
|
|
1951
|
+
return(None)
|
|
1952
|
+
|
|
1953
|
+
@property
|
|
1954
|
+
def max_nfev(self):
|
|
1955
|
+
return(self._max_nfev)
|
|
1956
|
+
|
|
1957
|
+
@property
|
|
1958
|
+
def num_func_eval(self):
|
|
1959
|
+
logging.warning('property num_func_eval not defined for fit.FitMap')
|
|
1960
|
+
return(None)
|
|
1961
|
+
|
|
1962
|
+
@property
|
|
1963
|
+
def out_of_bounds(self):
|
|
1964
|
+
return(self._out_of_bounds)
|
|
1965
|
+
|
|
1966
|
+
@property
|
|
1967
|
+
def redchi(self):
|
|
1968
|
+
return(self._redchi)
|
|
1969
|
+
|
|
1970
|
+
@property
|
|
1971
|
+
def residual(self):
|
|
1972
|
+
if self.best_fit is None:
|
|
1973
|
+
return(None)
|
|
1974
|
+
if self._mask is None:
|
|
1975
|
+
return(np.asarray(self._ymap)-self.best_fit)
|
|
1976
|
+
else:
|
|
1977
|
+
ymap_flat = np.reshape(np.asarray(self._ymap), (self._map_dim, self._x.size))
|
|
1978
|
+
ymap_flat_masked = ymap_flat[:,~self._mask]
|
|
1979
|
+
ymap_masked = np.reshape(ymap_flat_masked,
|
|
1980
|
+
list(self._map_shape)+[ymap_flat_masked.shape[-1]])
|
|
1981
|
+
return(ymap_masked-self.best_fit)
|
|
1982
|
+
|
|
1983
|
+
@property
|
|
1984
|
+
def success(self):
|
|
1985
|
+
return(self._success)
|
|
1986
|
+
|
|
1987
|
+
@property
|
|
1988
|
+
def var_names(self):
|
|
1989
|
+
logging.warning('property var_names not defined for fit.FitMap')
|
|
1990
|
+
return(None)
|
|
1991
|
+
|
|
1992
|
+
@property
|
|
1993
|
+
def y(self):
|
|
1994
|
+
logging.warning('property y not defined for fit.FitMap')
|
|
1995
|
+
return(None)
|
|
1996
|
+
|
|
1997
|
+
@property
|
|
1998
|
+
def ymap(self):
|
|
1999
|
+
return(self._ymap)
|
|
2000
|
+
|
|
2001
|
+
def best_parameters(self, dims=None):
|
|
2002
|
+
if dims is None:
|
|
2003
|
+
return(self._best_parameters)
|
|
2004
|
+
if not isinstance(dims, (list, tuple)) or len(dims) != len(self._map_shape):
|
|
2005
|
+
illegal_value(dims, 'dims', 'FitMap.best_parameters', raise_error=True)
|
|
2006
|
+
if self.best_values is None or self.best_errors is None:
|
|
2007
|
+
logging.warning(f'Unable to obtain best parameter values for dims = {dims} in '+
|
|
2008
|
+
'FitMap.best_parameters')
|
|
2009
|
+
return({})
|
|
2010
|
+
# Create current parameters
|
|
2011
|
+
parameters = deepcopy(self._parameters)
|
|
2012
|
+
for n, name in enumerate(self._best_parameters):
|
|
2013
|
+
if self._parameters[name].vary:
|
|
2014
|
+
parameters[name].set(value=self.best_values[n][dims])
|
|
2015
|
+
parameters[name].stderr = self.best_errors[n][dims]
|
|
2016
|
+
parameters_dict = {}
|
|
2017
|
+
for name in sorted(parameters):
|
|
2018
|
+
if name != 'tmp_normalization_offset_c':
|
|
2019
|
+
par = parameters[name]
|
|
2020
|
+
parameters_dict[name] = {'value': par.value, 'error': par.stderr,
|
|
2021
|
+
'init_value': self.init_parameters[name]['value'], 'min': par.min,
|
|
2022
|
+
'max': par.max, 'vary': par.vary, 'expr': par.expr}
|
|
2023
|
+
return(parameters_dict)
|
|
2024
|
+
|
|
2025
|
+
def freemem(self):
|
|
2026
|
+
if self._memfolder is None:
|
|
2027
|
+
return
|
|
2028
|
+
try:
|
|
2029
|
+
rmtree(self._memfolder)
|
|
2030
|
+
self._memfolder = None
|
|
2031
|
+
except:
|
|
2032
|
+
logging.warning('Could not clean-up automatically.')
|
|
2033
|
+
|
|
2034
|
+
def plot(self, dims, y_title=None, plot_residual=False, plot_comp_legends=False,
|
|
2035
|
+
plot_masked_data=True):
|
|
2036
|
+
if not isinstance(dims, (list, tuple)) or len(dims) != len(self._map_shape):
|
|
2037
|
+
illegal_value(dims, 'dims', 'FitMap.plot', raise_error=True)
|
|
2038
|
+
if self._result is None or self.best_fit is None or self.best_values is None:
|
|
2039
|
+
logging.warning(f'Unable to plot fit for dims = {dims} in FitMap.plot')
|
|
2040
|
+
return
|
|
2041
|
+
if y_title is None or not isinstance(y_title, str):
|
|
2042
|
+
y_title = 'data'
|
|
2043
|
+
if self._mask is None:
|
|
2044
|
+
mask = np.zeros(self._x.size).astype(bool)
|
|
2045
|
+
plot_masked_data = False
|
|
2046
|
+
else:
|
|
2047
|
+
mask = self._mask
|
|
2048
|
+
plots = [(self._x, np.asarray(self._ymap[dims]), 'b.')]
|
|
2049
|
+
legend = [y_title]
|
|
2050
|
+
if plot_masked_data:
|
|
2051
|
+
plots += [(self._x[mask], np.asarray(self._ymap)[(*dims,mask)], 'bx')]
|
|
2052
|
+
legend += ['masked data']
|
|
2053
|
+
plots += [(self._x[~mask], self.best_fit[dims], 'k-')]
|
|
2054
|
+
legend += ['best fit']
|
|
2055
|
+
if plot_residual:
|
|
2056
|
+
plots += [(self._x[~mask], self.residual[dims], 'r--')]
|
|
2057
|
+
legend += ['residual']
|
|
2058
|
+
# Create current parameters
|
|
2059
|
+
parameters = deepcopy(self._parameters)
|
|
2060
|
+
for name in self._best_parameters:
|
|
2061
|
+
if self._parameters[name].vary:
|
|
2062
|
+
parameters[name].set(value=
|
|
2063
|
+
self.best_values[self._best_parameters.index(name)][dims])
|
|
2064
|
+
for component in self._result.components:
|
|
2065
|
+
if 'tmp_normalization_offset_c' in component.param_names:
|
|
2066
|
+
continue
|
|
2067
|
+
if isinstance(component, ExpressionModel):
|
|
2068
|
+
prefix = component._name
|
|
2069
|
+
if prefix[-1] == '_':
|
|
2070
|
+
prefix = prefix[:-1]
|
|
2071
|
+
modelname = f'{prefix}: {component.expr}'
|
|
2072
|
+
else:
|
|
2073
|
+
prefix = component.prefix
|
|
2074
|
+
if len(prefix):
|
|
2075
|
+
if prefix[-1] == '_':
|
|
2076
|
+
prefix = prefix[:-1]
|
|
2077
|
+
modelname = f'{prefix} ({component._name})'
|
|
2078
|
+
else:
|
|
2079
|
+
modelname = f'{component._name}'
|
|
2080
|
+
if len(modelname) > 20:
|
|
2081
|
+
modelname = f'{modelname[0:16]} ...'
|
|
2082
|
+
y = component.eval(params=parameters, x=self._x[~mask])
|
|
2083
|
+
if isinstance(y, (int, float)):
|
|
2084
|
+
y *= np.ones(self._x[~mask].size)
|
|
2085
|
+
plots += [(self._x[~mask], y, '--')]
|
|
2086
|
+
if plot_comp_legends:
|
|
2087
|
+
legend.append(modelname)
|
|
2088
|
+
quick_plot(tuple(plots), legend=legend, title=str(dims), block=True)
|
|
2089
|
+
|
|
2090
|
+
def fit(self, **kwargs):
|
|
2091
|
+
# t0 = time()
|
|
2092
|
+
# Check input parameters
|
|
2093
|
+
if self._model is None:
|
|
2094
|
+
logging.error('Undefined fit model')
|
|
2095
|
+
if 'num_proc' in kwargs:
|
|
2096
|
+
num_proc = kwargs.pop('num_proc')
|
|
2097
|
+
if not is_int(num_proc, ge=1):
|
|
2098
|
+
illegal_value(num_proc, 'num_proc', 'FitMap.fit', raise_error=True)
|
|
2099
|
+
else:
|
|
2100
|
+
num_proc = cpu_count()
|
|
2101
|
+
if num_proc > 1 and not have_joblib:
|
|
2102
|
+
logging.warning(f'Missing joblib in the conda environment, running FitMap serially')
|
|
2103
|
+
num_proc = 1
|
|
2104
|
+
if num_proc > cpu_count():
|
|
2105
|
+
logging.warning(f'The requested number of processors ({num_proc}) exceeds the maximum '+
|
|
2106
|
+
f'number of processors, num_proc reduced to ({cpu_count()})')
|
|
2107
|
+
num_proc = cpu_count()
|
|
2108
|
+
if 'try_no_bounds' in kwargs:
|
|
2109
|
+
self._try_no_bounds = kwargs.pop('try_no_bounds')
|
|
2110
|
+
if not isinstance(self._try_no_bounds, bool):
|
|
2111
|
+
illegal_value(self._try_no_bounds, 'try_no_bounds', 'FitMap.fit', raise_error=True)
|
|
2112
|
+
if 'redchi_cutoff' in kwargs:
|
|
2113
|
+
self._redchi_cutoff = kwargs.pop('redchi_cutoff')
|
|
2114
|
+
if not is_num(self._redchi_cutoff, gt=0):
|
|
2115
|
+
illegal_value(self._redchi_cutoff, 'redchi_cutoff', 'FitMap.fit', raise_error=True)
|
|
2116
|
+
if 'print_report' in kwargs:
|
|
2117
|
+
self._print_report = kwargs.pop('print_report')
|
|
2118
|
+
if not isinstance(self._print_report, bool):
|
|
2119
|
+
illegal_value(self._print_report, 'print_report', 'FitMap.fit', raise_error=True)
|
|
2120
|
+
if 'plot' in kwargs:
|
|
2121
|
+
self._plot = kwargs.pop('plot')
|
|
2122
|
+
if not isinstance(self._plot, bool):
|
|
2123
|
+
illegal_value(self._plot, 'plot', 'FitMap.fit', raise_error=True)
|
|
2124
|
+
if 'skip_init' in kwargs:
|
|
2125
|
+
self._skip_init = kwargs.pop('skip_init')
|
|
2126
|
+
if not isinstance(self._skip_init, bool):
|
|
2127
|
+
illegal_value(self._skip_init, 'skip_init', 'FitMap.fit', raise_error=True)
|
|
2128
|
+
|
|
2129
|
+
# Apply mask if supplied:
|
|
2130
|
+
if 'mask' in kwargs:
|
|
2131
|
+
self._mask = kwargs.pop('mask')
|
|
2132
|
+
if self._mask is not None:
|
|
2133
|
+
self._mask = np.asarray(self._mask).astype(bool)
|
|
2134
|
+
if self._x.size != self._mask.size:
|
|
2135
|
+
raise ValueError(f'Inconsistent x and mask dimensions ({self._x.size} vs '+
|
|
2136
|
+
f'{self._mask.size})')
|
|
2137
|
+
|
|
2138
|
+
# Add constant offset for a normalized single component model
|
|
2139
|
+
if self._result is None and self._norm is not None and self._norm[0]:
|
|
2140
|
+
self.add_model('constant', prefix='tmp_normalization_offset_', parameters={'name': 'c',
|
|
2141
|
+
'value': -self._norm[0], 'vary': False, 'norm': True})
|
|
2142
|
+
#'value': -self._norm[0]/self._norm[1], 'vary': False, 'norm': False})
|
|
2143
|
+
|
|
2144
|
+
# Adjust existing parameters for refit:
|
|
2145
|
+
if 'parameters' in kwargs:
|
|
2146
|
+
# print('\nIn FitMap before adjusting existing parameters for refit:')
|
|
2147
|
+
# self._parameters.pretty_print()
|
|
2148
|
+
# if self._result is None:
|
|
2149
|
+
# raise ValueError('Invalid parameter parameters ({parameters})')
|
|
2150
|
+
# if self._best_values is None:
|
|
2151
|
+
# raise ValueError('Valid self._best_values required for refitting in FitMap.fit')
|
|
2152
|
+
parameters = kwargs.pop('parameters')
|
|
2153
|
+
# print(f'\nparameters:\n{parameters}')
|
|
2154
|
+
if isinstance(parameters, dict):
|
|
2155
|
+
parameters = (parameters, )
|
|
2156
|
+
elif not is_dict_series(parameters):
|
|
2157
|
+
illegal_value(parameters, 'parameters', 'Fit.fit', raise_error=True)
|
|
2158
|
+
for par in parameters:
|
|
2159
|
+
name = par['name']
|
|
2160
|
+
if name not in self._parameters:
|
|
2161
|
+
raise ValueError(f'Unable to match {name} parameter {par} to an existing one')
|
|
2162
|
+
if self._parameters[name].expr is not None:
|
|
2163
|
+
raise ValueError(f'Unable to modify {name} parameter {par} (currently an '+
|
|
2164
|
+
'expression)')
|
|
2165
|
+
value = par.get('value')
|
|
2166
|
+
vary = par.get('vary')
|
|
2167
|
+
if par.get('expr') is not None:
|
|
2168
|
+
raise KeyError(f'Invalid "expr" key in {name} parameter {par}')
|
|
2169
|
+
self._parameters[name].set(value=value, vary=vary, min=par.get('min'),
|
|
2170
|
+
max=par.get('max'))
|
|
2171
|
+
# Overwrite existing best values for fixed parameters when a value is specified
|
|
2172
|
+
# print(f'best values befored resetting:\n{self._best_values}')
|
|
2173
|
+
if isinstance(value, (int, float)) and vary is False:
|
|
2174
|
+
for i, nname in enumerate(self._best_parameters):
|
|
2175
|
+
if nname == name:
|
|
2176
|
+
self._best_values[i] = value
|
|
2177
|
+
# print(f'best values after resetting (value={value}, vary={vary}):\n{self._best_values}')
|
|
2178
|
+
#RV print('\nIn FitMap after adjusting existing parameters for refit:')
|
|
2179
|
+
#RV self._parameters.pretty_print()
|
|
2180
|
+
|
|
2181
|
+
# Check for uninitialized parameters
|
|
2182
|
+
for name, par in self._parameters.items():
|
|
2183
|
+
if par.expr is None:
|
|
2184
|
+
value = par.value
|
|
2185
|
+
if value is None or np.isinf(value) or np.isnan(value):
|
|
2186
|
+
value = 1.0
|
|
2187
|
+
if self._norm is None or name not in self._parameter_norms:
|
|
2188
|
+
self._parameters[name].set(value=value)
|
|
2189
|
+
elif self._parameter_norms[name]:
|
|
2190
|
+
self._parameters[name].set(value=value*self._norm[1])
|
|
2191
|
+
|
|
2192
|
+
# Create the best parameter list, consisting of all varying parameters plus the expression
|
|
2193
|
+
# parameters in order to collect their errors
|
|
2194
|
+
if self._result is None:
|
|
2195
|
+
# Initial fit
|
|
2196
|
+
assert(self._best_parameters is None)
|
|
2197
|
+
self._best_parameters = [name for name, par in self._parameters.items()
|
|
2198
|
+
if par.vary or par.expr is not None]
|
|
2199
|
+
num_new_parameters = 0
|
|
2200
|
+
else:
|
|
2201
|
+
# Refit
|
|
2202
|
+
assert(len(self._best_parameters))
|
|
2203
|
+
self._new_parameters = [name for name, par in self._parameters.items()
|
|
2204
|
+
if name != 'tmp_normalization_offset_c' and name not in self._best_parameters and
|
|
2205
|
+
(par.vary or par.expr is not None)]
|
|
2206
|
+
num_new_parameters = len(self._new_parameters)
|
|
2207
|
+
num_best_parameters = len(self._best_parameters)
|
|
2208
|
+
|
|
2209
|
+
# Flatten and normalize the best values of the previous fit, remove the remaining results
|
|
2210
|
+
# of the previous fit
|
|
2211
|
+
if self._result is not None:
|
|
2212
|
+
# print('\nBefore flatten and normalize:')
|
|
2213
|
+
# print(f'self._best_values:\n{self._best_values}')
|
|
2214
|
+
self._out_of_bounds = None
|
|
2215
|
+
self._max_nfev = None
|
|
2216
|
+
self._redchi = None
|
|
2217
|
+
self._success = None
|
|
2218
|
+
self._best_fit = None
|
|
2219
|
+
self._best_errors = None
|
|
2220
|
+
assert(self._best_values is not None)
|
|
2221
|
+
assert(self._best_values.shape[0] == num_best_parameters)
|
|
2222
|
+
assert(self._best_values.shape[1:] == self._map_shape)
|
|
2223
|
+
if self._transpose is not None:
|
|
2224
|
+
self._best_values = np.transpose(self._best_values,
|
|
2225
|
+
[0]+[i+1 for i in self._transpose])
|
|
2226
|
+
self._best_values = [np.reshape(self._best_values[i], self._map_dim)
|
|
2227
|
+
for i in range(num_best_parameters)]
|
|
2228
|
+
if self._norm is not None:
|
|
2229
|
+
for i, name in enumerate(self._best_parameters):
|
|
2230
|
+
if self._parameter_norms.get(name, False):
|
|
2231
|
+
self._best_values[i] /= self._norm[1]
|
|
2232
|
+
#RV print('\nAfter flatten and normalize:')
|
|
2233
|
+
#RV print(f'self._best_values:\n{self._best_values}')
|
|
2234
|
+
|
|
2235
|
+
# Normalize the initial parameters (and best values for a refit)
|
|
2236
|
+
# print('\nIn FitMap before normalize:')
|
|
2237
|
+
# self._parameters.pretty_print()
|
|
2238
|
+
# print(f'\nparameter_norms:\n{self._parameter_norms}\n')
|
|
2239
|
+
self._normalize()
|
|
2240
|
+
# print('\nIn FitMap after normalize:')
|
|
2241
|
+
# self._parameters.pretty_print()
|
|
2242
|
+
# print(f'\nparameter_norms:\n{self._parameter_norms}\n')
|
|
2243
|
+
|
|
2244
|
+
# Prevent initial values from sitting at boundaries
|
|
2245
|
+
self._parameter_bounds = {name:{'min': par.min, 'max': par.max}
|
|
2246
|
+
for name, par in self._parameters.items() if par.vary}
|
|
2247
|
+
for name, par in self._parameters.items():
|
|
2248
|
+
if par.vary:
|
|
2249
|
+
par.set(value=self._reset_par_at_boundary(par, par.value))
|
|
2250
|
+
# print('\nAfter checking boundaries:')
|
|
2251
|
+
# self._parameters.pretty_print()
|
|
2252
|
+
|
|
2253
|
+
# Set parameter bounds to unbound (only use bounds when fit fails)
|
|
2254
|
+
if self._try_no_bounds:
|
|
2255
|
+
for name in self._parameter_bounds.keys():
|
|
2256
|
+
self._parameters[name].set(min=-np.inf, max=np.inf)
|
|
2257
|
+
|
|
2258
|
+
# Allocate memory to store fit results
|
|
2259
|
+
if self._mask is None:
|
|
2260
|
+
x_size = self._x.size
|
|
2261
|
+
else:
|
|
2262
|
+
x_size = self._x[~self._mask].size
|
|
2263
|
+
if num_proc == 1:
|
|
2264
|
+
self._out_of_bounds_flat = np.zeros(self._map_dim, dtype=bool)
|
|
2265
|
+
self._max_nfev_flat = np.zeros(self._map_dim, dtype=bool)
|
|
2266
|
+
self._redchi_flat = np.zeros(self._map_dim, dtype=np.float64)
|
|
2267
|
+
self._success_flat = np.zeros(self._map_dim, dtype=bool)
|
|
2268
|
+
self._best_fit_flat = np.zeros((self._map_dim, x_size),
|
|
2269
|
+
dtype=self._ymap_norm.dtype)
|
|
2270
|
+
self._best_errors_flat = [np.zeros(self._map_dim, dtype=np.float64)
|
|
2271
|
+
for _ in range(num_best_parameters+num_new_parameters)]
|
|
2272
|
+
if self._result is None:
|
|
2273
|
+
self._best_values_flat = [np.zeros(self._map_dim, dtype=np.float64)
|
|
2274
|
+
for _ in range(num_best_parameters)]
|
|
2275
|
+
else:
|
|
2276
|
+
self._best_values_flat = self._best_values
|
|
2277
|
+
self._best_values_flat += [np.zeros(self._map_dim, dtype=np.float64)
|
|
2278
|
+
for _ in range(num_new_parameters)]
|
|
2279
|
+
else:
|
|
2280
|
+
self._memfolder = './joblib_memmap'
|
|
2281
|
+
try:
|
|
2282
|
+
mkdir(self._memfolder)
|
|
2283
|
+
except FileExistsError:
|
|
2284
|
+
pass
|
|
2285
|
+
filename_memmap = path.join(self._memfolder, 'out_of_bounds_memmap')
|
|
2286
|
+
self._out_of_bounds_flat = np.memmap(filename_memmap, dtype=bool,
|
|
2287
|
+
shape=(self._map_dim), mode='w+')
|
|
2288
|
+
filename_memmap = path.join(self._memfolder, 'max_nfev_memmap')
|
|
2289
|
+
self._max_nfev_flat = np.memmap(filename_memmap, dtype=bool,
|
|
2290
|
+
shape=(self._map_dim), mode='w+')
|
|
2291
|
+
filename_memmap = path.join(self._memfolder, 'redchi_memmap')
|
|
2292
|
+
self._redchi_flat = np.memmap(filename_memmap, dtype=np.float64,
|
|
2293
|
+
shape=(self._map_dim), mode='w+')
|
|
2294
|
+
filename_memmap = path.join(self._memfolder, 'success_memmap')
|
|
2295
|
+
self._success_flat = np.memmap(filename_memmap, dtype=bool,
|
|
2296
|
+
shape=(self._map_dim), mode='w+')
|
|
2297
|
+
filename_memmap = path.join(self._memfolder, 'best_fit_memmap')
|
|
2298
|
+
self._best_fit_flat = np.memmap(filename_memmap, dtype=self._ymap_norm.dtype,
|
|
2299
|
+
shape=(self._map_dim, x_size), mode='w+')
|
|
2300
|
+
self._best_errors_flat = []
|
|
2301
|
+
for i in range(num_best_parameters+num_new_parameters):
|
|
2302
|
+
filename_memmap = path.join(self._memfolder, f'best_errors_memmap_{i}')
|
|
2303
|
+
self._best_errors_flat.append(np.memmap(filename_memmap, dtype=np.float64,
|
|
2304
|
+
shape=self._map_dim, mode='w+'))
|
|
2305
|
+
self._best_values_flat = []
|
|
2306
|
+
for i in range(num_best_parameters):
|
|
2307
|
+
filename_memmap = path.join(self._memfolder, f'best_values_memmap_{i}')
|
|
2308
|
+
self._best_values_flat.append(np.memmap(filename_memmap, dtype=np.float64,
|
|
2309
|
+
shape=self._map_dim, mode='w+'))
|
|
2310
|
+
if self._result is not None:
|
|
2311
|
+
self._best_values_flat[i][:] = self._best_values[i][:]
|
|
2312
|
+
for i in range(num_new_parameters):
|
|
2313
|
+
filename_memmap = path.join(self._memfolder,
|
|
2314
|
+
f'best_values_memmap_{i+num_best_parameters}')
|
|
2315
|
+
self._best_values_flat.append(np.memmap(filename_memmap, dtype=np.float64,
|
|
2316
|
+
shape=self._map_dim, mode='w+'))
|
|
2317
|
+
|
|
2318
|
+
# Update the best parameter list
|
|
2319
|
+
if num_new_parameters:
|
|
2320
|
+
self._best_parameters += self._new_parameters
|
|
2321
|
+
|
|
2322
|
+
# Perform the first fit to get model component info and initial parameters
|
|
2323
|
+
current_best_values = {}
|
|
2324
|
+
# print(f'0 before:\n{current_best_values}')
|
|
2325
|
+
# t1 = time()
|
|
2326
|
+
self._result = self._fit(0, current_best_values, return_result=True, **kwargs)
|
|
2327
|
+
# t2 = time()
|
|
2328
|
+
# print(f'0 after:\n{current_best_values}')
|
|
2329
|
+
# print('\nAfter the first fit:')
|
|
2330
|
+
# self._parameters.pretty_print()
|
|
2331
|
+
# print(self._result.fit_report(show_correl=False))
|
|
2332
|
+
|
|
2333
|
+
# Remove all irrelevant content from self._result
|
|
2334
|
+
for attr in ('_abort', 'aborted', 'aic', 'best_fit', 'best_values', 'bic', 'calc_covar',
|
|
2335
|
+
'call_kws', 'chisqr', 'ci_out', 'col_deriv', 'covar', 'data', 'errorbars',
|
|
2336
|
+
'flatchain', 'ier', 'init_vals', 'init_fit', 'iter_cb', 'jacfcn', 'kws',
|
|
2337
|
+
'last_internal_values', 'lmdif_message', 'message', 'method', 'nan_policy',
|
|
2338
|
+
'ndata', 'nfev', 'nfree', 'params', 'redchi', 'reduce_fcn', 'residual', 'result',
|
|
2339
|
+
'scale_covar', 'show_candidates', 'calc_covar', 'success', 'userargs', 'userfcn',
|
|
2340
|
+
'userkws', 'values', 'var_names', 'weights', 'user_options'):
|
|
2341
|
+
try:
|
|
2342
|
+
delattr(self._result, attr)
|
|
2343
|
+
except AttributeError:
|
|
2344
|
+
# logging.warning(f'Unknown attribute {attr} in fit.FtMap._cleanup_result')
|
|
2345
|
+
pass
|
|
2346
|
+
|
|
2347
|
+
# t3 = time()
|
|
2348
|
+
if num_proc == 1:
|
|
2349
|
+
# Perform the remaining fits serially
|
|
2350
|
+
for n in range(1, self._map_dim):
|
|
2351
|
+
# print(f'{n} before:\n{current_best_values}')
|
|
2352
|
+
self._fit(n, current_best_values, **kwargs)
|
|
2353
|
+
# print(f'{n} after:\n{current_best_values}')
|
|
2354
|
+
else:
|
|
2355
|
+
# Perform the remaining fits in parallel
|
|
2356
|
+
num_fit = self._map_dim-1
|
|
2357
|
+
# print(f'num_fit = {num_fit}')
|
|
2358
|
+
if num_proc > num_fit:
|
|
2359
|
+
logging.warning(f'The requested number of processors ({num_proc}) exceeds the '+
|
|
2360
|
+
f'number of fits, num_proc reduced to ({num_fit})')
|
|
2361
|
+
num_proc = num_fit
|
|
2362
|
+
num_fit_per_proc = 1
|
|
2363
|
+
else:
|
|
2364
|
+
num_fit_per_proc = round((num_fit)/num_proc)
|
|
2365
|
+
if num_proc*num_fit_per_proc < num_fit:
|
|
2366
|
+
num_fit_per_proc +=1
|
|
2367
|
+
# print(f'num_fit_per_proc = {num_fit_per_proc}')
|
|
2368
|
+
num_fit_batch = min(num_fit_per_proc, 40)
|
|
2369
|
+
# print(f'num_fit_batch = {num_fit_batch}')
|
|
2370
|
+
with Parallel(n_jobs=num_proc) as parallel:
|
|
2371
|
+
parallel(delayed(self._fit_parallel)(current_best_values, num_fit_batch,
|
|
2372
|
+
n_start, **kwargs) for n_start in range(1, self._map_dim, num_fit_batch))
|
|
2373
|
+
# t4 = time()
|
|
2374
|
+
|
|
2375
|
+
# Renormalize the initial parameters for external use
|
|
2376
|
+
if self._norm is not None and self._normalized:
|
|
2377
|
+
init_values = {}
|
|
2378
|
+
for name, value in self._result.init_values.items():
|
|
2379
|
+
if name not in self._parameter_norms or self._parameters[name].expr is not None:
|
|
2380
|
+
init_values[name] = value
|
|
2381
|
+
elif self._parameter_norms[name]:
|
|
2382
|
+
init_values[name] = value*self._norm[1]
|
|
2383
|
+
self._result.init_values = init_values
|
|
2384
|
+
for name, par in self._result.init_params.items():
|
|
2385
|
+
if par.expr is None and self._parameter_norms.get(name, False):
|
|
2386
|
+
_min = par.min
|
|
2387
|
+
_max = par.max
|
|
2388
|
+
value = par.value*self._norm[1]
|
|
2389
|
+
if not np.isinf(_min) and abs(_min) != float_min:
|
|
2390
|
+
_min *= self._norm[1]
|
|
2391
|
+
if not np.isinf(_max) and abs(_max) != float_min:
|
|
2392
|
+
_max *= self._norm[1]
|
|
2393
|
+
par.set(value=value, min=_min, max=_max)
|
|
2394
|
+
par.init_value = par.value
|
|
2395
|
+
|
|
2396
|
+
# Remap the best results
|
|
2397
|
+
# t5 = time()
|
|
2398
|
+
self._out_of_bounds = np.copy(np.reshape(self._out_of_bounds_flat, self._map_shape))
|
|
2399
|
+
self._max_nfev = np.copy(np.reshape(self._max_nfev_flat, self._map_shape))
|
|
2400
|
+
self._redchi = np.copy(np.reshape(self._redchi_flat, self._map_shape))
|
|
2401
|
+
self._success = np.copy(np.reshape(self._success_flat, self._map_shape))
|
|
2402
|
+
self._best_fit = np.copy(np.reshape(self._best_fit_flat,
|
|
2403
|
+
list(self._map_shape)+[x_size]))
|
|
2404
|
+
self._best_values = np.asarray([np.reshape(par, list(self._map_shape))
|
|
2405
|
+
for par in self._best_values_flat])
|
|
2406
|
+
self._best_errors = np.asarray([np.reshape(par, list(self._map_shape))
|
|
2407
|
+
for par in self._best_errors_flat])
|
|
2408
|
+
if self._inv_transpose is not None:
|
|
2409
|
+
self._out_of_bounds = np.transpose(self._out_of_bounds, self._inv_transpose)
|
|
2410
|
+
self._max_nfev = np.transpose(self._max_nfev, self._inv_transpose)
|
|
2411
|
+
self._redchi = np.transpose(self._redchi, self._inv_transpose)
|
|
2412
|
+
self._success = np.transpose(self._success, self._inv_transpose)
|
|
2413
|
+
self._best_fit = np.transpose(self._best_fit,
|
|
2414
|
+
list(self._inv_transpose)+[len(self._inv_transpose)])
|
|
2415
|
+
self._best_values = np.transpose(self._best_values,
|
|
2416
|
+
[0]+[i+1 for i in self._inv_transpose])
|
|
2417
|
+
self._best_errors = np.transpose(self._best_errors,
|
|
2418
|
+
[0]+[i+1 for i in self._inv_transpose])
|
|
2419
|
+
del self._out_of_bounds_flat
|
|
2420
|
+
del self._max_nfev_flat
|
|
2421
|
+
del self._redchi_flat
|
|
2422
|
+
del self._success_flat
|
|
2423
|
+
del self._best_fit_flat
|
|
2424
|
+
del self._best_values_flat
|
|
2425
|
+
del self._best_errors_flat
|
|
2426
|
+
# t6 = time()
|
|
2427
|
+
|
|
2428
|
+
# Restore parameter bounds and renormalize the parameters
|
|
2429
|
+
for name, par in self._parameter_bounds.items():
|
|
2430
|
+
self._parameters[name].set(min=par['min'], max=par['max'])
|
|
2431
|
+
self._normalized = False
|
|
2432
|
+
if self._norm is not None:
|
|
2433
|
+
for name, norm in self._parameter_norms.items():
|
|
2434
|
+
par = self._parameters[name]
|
|
2435
|
+
if par.expr is None and norm:
|
|
2436
|
+
value = par.value*self._norm[1]
|
|
2437
|
+
_min = par.min
|
|
2438
|
+
_max = par.max
|
|
2439
|
+
if not np.isinf(_min) and abs(_min) != float_min:
|
|
2440
|
+
_min *= self._norm[1]
|
|
2441
|
+
if not np.isinf(_max) and abs(_max) != float_min:
|
|
2442
|
+
_max *= self._norm[1]
|
|
2443
|
+
par.set(value=value, min=_min, max=_max)
|
|
2444
|
+
# t7 = time()
|
|
2445
|
+
# print(f'total run time in fit: {t7-t0:.2f} seconds')
|
|
2446
|
+
# print(f'run time first fit: {t2-t1:.2f} seconds')
|
|
2447
|
+
# print(f'run time remaining fits: {t4-t3:.2f} seconds')
|
|
2448
|
+
# print(f'run time remapping results: {t6-t5:.2f} seconds')
|
|
2449
|
+
|
|
2450
|
+
# print('\n\nAt end fit:')
|
|
2451
|
+
# self._parameters.pretty_print()
|
|
2452
|
+
# print(f'self._best_values:\n{self._best_values}\n\n')
|
|
2453
|
+
|
|
2454
|
+
# Free the shared memory
|
|
2455
|
+
self.freemem()
|
|
2456
|
+
|
|
2457
|
+
def _fit_parallel(self, current_best_values, num, n_start, **kwargs):
|
|
2458
|
+
num = min(num, self._map_dim-n_start)
|
|
2459
|
+
for n in range(num):
|
|
2460
|
+
# print(f'{n_start+n} before:\n{current_best_values}')
|
|
2461
|
+
self._fit(n_start+n, current_best_values, **kwargs)
|
|
2462
|
+
# print(f'{n_start+n} after:\n{current_best_values}')
|
|
2463
|
+
|
|
2464
|
+
def _fit(self, n, current_best_values, return_result=False, **kwargs):
|
|
2465
|
+
#RV print(f'\n\nstart FitMap._fit {n}\n')
|
|
2466
|
+
#RV print(f'current_best_values = {current_best_values}')
|
|
2467
|
+
#RV print(f'self._best_parameters = {self._best_parameters}')
|
|
2468
|
+
#RV print(f'self._new_parameters = {self._new_parameters}\n\n')
|
|
2469
|
+
# self._parameters.pretty_print()
|
|
2470
|
+
# Set parameters to current best values, but prevent them from sitting at boundaries
|
|
2471
|
+
if self._new_parameters is None:
|
|
2472
|
+
# Initial fit
|
|
2473
|
+
for name, value in current_best_values.items():
|
|
2474
|
+
par = self._parameters[name]
|
|
2475
|
+
par.set(value=self._reset_par_at_boundary(par, value))
|
|
2476
|
+
else:
|
|
2477
|
+
# Refit
|
|
2478
|
+
for i, name in enumerate(self._best_parameters):
|
|
2479
|
+
par = self._parameters[name]
|
|
2480
|
+
if name in self._new_parameters:
|
|
2481
|
+
if name in current_best_values:
|
|
2482
|
+
par.set(value=self._reset_par_at_boundary(par, current_best_values[name]))
|
|
2483
|
+
elif par.expr is None:
|
|
2484
|
+
par.set(value=self._best_values[i][n])
|
|
2485
|
+
#RV print(f'\nbefore fit {n}')
|
|
2486
|
+
#RV self._parameters.pretty_print()
|
|
2487
|
+
if self._mask is None:
|
|
2488
|
+
result = self._model.fit(self._ymap_norm[n], self._parameters, x=self._x, **kwargs)
|
|
2489
|
+
else:
|
|
2490
|
+
result = self._model.fit(self._ymap_norm[n][~self._mask], self._parameters,
|
|
2491
|
+
x=self._x[~self._mask], **kwargs)
|
|
2492
|
+
# print(f'\nafter fit {n}')
|
|
2493
|
+
# self._parameters.pretty_print()
|
|
2494
|
+
# print(result.fit_report(show_correl=False))
|
|
2495
|
+
out_of_bounds = False
|
|
2496
|
+
for name, par in self._parameter_bounds.items():
|
|
2497
|
+
value = result.params[name].value
|
|
2498
|
+
if not np.isinf(par['min']) and value < par['min']:
|
|
2499
|
+
out_of_bounds = True
|
|
2500
|
+
break
|
|
2501
|
+
if not np.isinf(par['max']) and value > par['max']:
|
|
2502
|
+
out_of_bounds = True
|
|
2503
|
+
break
|
|
2504
|
+
self._out_of_bounds_flat[n] = out_of_bounds
|
|
2505
|
+
if self._try_no_bounds and out_of_bounds:
|
|
2506
|
+
# Rerun fit with parameter bounds in place
|
|
2507
|
+
for name, par in self._parameter_bounds.items():
|
|
2508
|
+
self._parameters[name].set(min=par['min'], max=par['max'])
|
|
2509
|
+
# Set parameters to current best values, but prevent them from sitting at boundaries
|
|
2510
|
+
if self._new_parameters is None:
|
|
2511
|
+
# Initial fit
|
|
2512
|
+
for name, value in current_best_values.items():
|
|
2513
|
+
par = self._parameters[name]
|
|
2514
|
+
par.set(value=self._reset_par_at_boundary(par, value))
|
|
2515
|
+
else:
|
|
2516
|
+
# Refit
|
|
2517
|
+
for i, name in enumerate(self._best_parameters):
|
|
2518
|
+
par = self._parameters[name]
|
|
2519
|
+
if name in self._new_parameters:
|
|
2520
|
+
if name in current_best_values:
|
|
2521
|
+
par.set(value=self._reset_par_at_boundary(par,
|
|
2522
|
+
current_best_values[name]))
|
|
2523
|
+
elif par.expr is None:
|
|
2524
|
+
par.set(value=self._best_values[i][n])
|
|
2525
|
+
# print('\nbefore fit')
|
|
2526
|
+
# self._parameters.pretty_print()
|
|
2527
|
+
# print(result.fit_report(show_correl=False))
|
|
2528
|
+
if self._mask is None:
|
|
2529
|
+
result = self._model.fit(self._ymap_norm[n], self._parameters, x=self._x, **kwargs)
|
|
2530
|
+
else:
|
|
2531
|
+
result = self._model.fit(self._ymap_norm[n][~self._mask], self._parameters,
|
|
2532
|
+
x=self._x[~self._mask], **kwargs)
|
|
2533
|
+
# print(f'\nafter fit {n}')
|
|
2534
|
+
# self._parameters.pretty_print()
|
|
2535
|
+
# print(result.fit_report(show_correl=False))
|
|
2536
|
+
out_of_bounds = False
|
|
2537
|
+
for name, par in self._parameter_bounds.items():
|
|
2538
|
+
value = result.params[name].value
|
|
2539
|
+
if not np.isinf(par['min']) and value < par['min']:
|
|
2540
|
+
out_of_bounds = True
|
|
2541
|
+
break
|
|
2542
|
+
if not np.isinf(par['max']) and value > par['max']:
|
|
2543
|
+
out_of_bounds = True
|
|
2544
|
+
break
|
|
2545
|
+
# print(f'{n} redchi < redchi_cutoff = {result.redchi < self._redchi_cutoff} success = {result.success} out_of_bounds = {out_of_bounds}')
|
|
2546
|
+
# Reset parameters back to unbound
|
|
2547
|
+
for name in self._parameter_bounds.keys():
|
|
2548
|
+
self._parameters[name].set(min=-np.inf, max=np.inf)
|
|
2549
|
+
assert(not out_of_bounds)
|
|
2550
|
+
if result.redchi >= self._redchi_cutoff:
|
|
2551
|
+
result.success = False
|
|
2552
|
+
if result.nfev == result.max_nfev:
|
|
2553
|
+
# print(f'Maximum number of function evaluations reached for n = {n}')
|
|
2554
|
+
# logging.warning(f'Maximum number of function evaluations reached for n = {n}')
|
|
2555
|
+
if result.redchi < self._redchi_cutoff:
|
|
2556
|
+
result.success = True
|
|
2557
|
+
self._max_nfev_flat[n] = True
|
|
2558
|
+
if result.success:
|
|
2559
|
+
assert(all(True for par in current_best_values if par in result.params.values()))
|
|
2560
|
+
for par in result.params.values():
|
|
2561
|
+
if par.vary:
|
|
2562
|
+
current_best_values[par.name] = par.value
|
|
2563
|
+
else:
|
|
2564
|
+
logging.warning(f'Fit for n = {n} failed: {result.lmdif_message}')
|
|
2565
|
+
# Renormalize the data and results
|
|
2566
|
+
self._renormalize(n, result)
|
|
2567
|
+
if self._print_report:
|
|
2568
|
+
print(result.fit_report(show_correl=False))
|
|
2569
|
+
if self._plot:
|
|
2570
|
+
dims = np.unravel_index(n, self._map_shape)
|
|
2571
|
+
if self._inv_transpose is not None:
|
|
2572
|
+
dims= tuple(dims[self._inv_transpose[i]] for i in range(len(dims)))
|
|
2573
|
+
super().plot(result=result, y=np.asarray(self._ymap[dims]), plot_comp_legends=True,
|
|
2574
|
+
skip_init=self._skip_init, title=str(dims))
|
|
2575
|
+
#RV print(f'\n\nend FitMap._fit {n}\n')
|
|
2576
|
+
#RV print(f'current_best_values = {current_best_values}')
|
|
2577
|
+
# self._parameters.pretty_print()
|
|
2578
|
+
# print(result.fit_report(show_correl=False))
|
|
2579
|
+
#RV print(f'\nself._best_values_flat:\n{self._best_values_flat}\n\n')
|
|
2580
|
+
if return_result:
|
|
2581
|
+
return(result)
|
|
2582
|
+
|
|
2583
|
+
def _renormalize(self, n, result):
|
|
2584
|
+
self._redchi_flat[n] = np.float64(result.redchi)
|
|
2585
|
+
self._success_flat[n] = result.success
|
|
2586
|
+
if self._norm is None or not self._normalized:
|
|
2587
|
+
self._best_fit_flat[n] = result.best_fit
|
|
2588
|
+
for i, name in enumerate(self._best_parameters):
|
|
2589
|
+
self._best_values_flat[i][n] = np.float64(result.params[name].value)
|
|
2590
|
+
self._best_errors_flat[i][n] = np.float64(result.params[name].stderr)
|
|
2591
|
+
else:
|
|
2592
|
+
pars = set(self._parameter_norms) & set(self._best_parameters)
|
|
2593
|
+
for name, par in result.params.items():
|
|
2594
|
+
if name in pars and self._parameter_norms[name]:
|
|
2595
|
+
if par.stderr is not None:
|
|
2596
|
+
par.stderr *= self._norm[1]
|
|
2597
|
+
if par.expr is None:
|
|
2598
|
+
par.value *= self._norm[1]
|
|
2599
|
+
if self._print_report:
|
|
2600
|
+
if par.init_value is not None:
|
|
2601
|
+
par.init_value *= self._norm[1]
|
|
2602
|
+
if not np.isinf(par.min) and abs(par.min) != float_min:
|
|
2603
|
+
par.min *= self._norm[1]
|
|
2604
|
+
if not np.isinf(par.max) and abs(par.max) != float_min:
|
|
2605
|
+
par.max *= self._norm[1]
|
|
2606
|
+
self._best_fit_flat[n] = result.best_fit*self._norm[1]+self._norm[0]
|
|
2607
|
+
for i, name in enumerate(self._best_parameters):
|
|
2608
|
+
self._best_values_flat[i][n] = np.float64(result.params[name].value)
|
|
2609
|
+
self._best_errors_flat[i][n] = np.float64(result.params[name].stderr)
|
|
2610
|
+
if self._plot:
|
|
2611
|
+
if not self._skip_init:
|
|
2612
|
+
result.init_fit = result.init_fit*self._norm[1]+self._norm[0]
|
|
2613
|
+
result.best_fit = np.copy(self._best_fit_flat[n])
|