ChessAnalysisPipeline 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ChessAnalysisPipeline might be problematic. Click here for more details.

CHAP/utils/fit.py CHANGED
@@ -8,6 +8,7 @@ Description: General curve fitting module
8
8
  """
9
9
 
10
10
  # System modules
11
+ from collections import Counter
11
12
  from copy import deepcopy
12
13
  from logging import getLogger
13
14
  from os import (
@@ -15,10 +16,10 @@ from os import (
15
16
  mkdir,
16
17
  path,
17
18
  )
18
- from re import compile as re_compile
19
19
  from re import sub
20
20
  from shutil import rmtree
21
21
  from sys import float_info
22
+ from time import time
22
23
 
23
24
  # Third party modules
24
25
  try:
@@ -29,53 +30,24 @@ try:
29
30
  HAVE_JOBLIB = True
30
31
  except ImportError:
31
32
  HAVE_JOBLIB = False
32
- from lmfit import (
33
- Parameters,
34
- Model,
35
- )
36
- from lmfit.model import ModelResult
37
- from lmfit.models import (
38
- ConstantModel,
39
- LinearModel,
40
- QuadraticModel,
41
- PolynomialModel,
42
- ExponentialModel,
43
- StepModel,
44
- RectangleModel,
45
- ExpressionModel,
46
- GaussianModel,
47
- LorentzianModel,
48
- )
33
+ from nexusformat.nexus import NXdata
49
34
  import numpy as np
50
- try:
51
- from sympy import (
52
- diff,
53
- simplify,
54
- )
55
- except ImportError:
56
- pass
57
- try:
58
- import xarray as xr
59
- HAVE_XARRAY = True
60
- except ImportError:
61
- HAVE_XARRAY = False
62
35
 
63
36
  # Local modules
37
+ from CHAP.processor import Processor
64
38
  from CHAP.utils.general import (
65
39
  is_int,
66
40
  is_num,
67
- is_str_series,
68
41
  is_dict_series,
69
42
  is_index,
70
43
  index_nearest,
71
- input_num,
72
44
  quick_plot,
73
45
  )
74
- # eval_expr,
75
46
 
76
47
  logger = getLogger(__name__)
77
48
  FLOAT_MIN = float_info.min
78
49
  FLOAT_MAX = float_info.max
50
+ FLOAT_EPS = float_info.epsilon
79
51
 
80
52
  # sigma = fwhm_factor*fwhm
81
53
  fwhm_factor = {
@@ -96,94 +68,540 @@ height_factor = {
96
68
  }
97
69
 
98
70
 
71
+ class FitProcessor(Processor):
72
+ """
73
+ A processor to perform a fit on a data set or data map.
74
+ """
75
+ def process(self, data, config=None):
76
+ """
77
+ Fit the data and return a CHAP.utils.fit.Fit or
78
+ CHAP.utils.fit.FitMap object depending on the dimensionality
79
+ of the input data. The input data should contain a NeXus NXdata
80
+ object, with properly defined signal and axis.
81
+
82
+ :param data: Input data containing the
83
+ nexusformat.nexus.NXdata object to fit.
84
+ :type data: list[PipelineData]
85
+ :raises ValueError: Invalid input or configuration parameter.
86
+ :return: The fitted data object.
87
+ :rtype: Union[CHAP.utils.fit.Fit, CHAP.utils.fit.FitMap]
88
+ """
89
+ # Local modules
90
+ from CHAP.utils.models import (
91
+ FitConfig,
92
+ Multipeak,
93
+ )
94
+
95
+ # Unwrap the PipelineData if called as a Pipeline Processor
96
+ if not isinstance(data, (Fit, FitMap)) and not isinstance(data, NXdata):
97
+ data = self.unwrap_pipelinedata(data)[0]
98
+
99
+ if isinstance(data, (Fit, FitMap)):
100
+
101
+ # Refit/continue the fit with possibly updated parameters
102
+ fit = data
103
+ fit_config = None
104
+ if config is not None:
105
+ try:
106
+ fit_config = FitConfig(**config)
107
+ except Exception as dict_exc:
108
+ raise RuntimeError from dict_exc
109
+
110
+ if isinstance(data, FitMap):
111
+ fit.fit(config=fit_config)
112
+ else:
113
+ fit.fit(config=fit_config)
114
+ if fit_config is not None:
115
+ if fit_config.print_report:
116
+ fit.print_fit_report()
117
+ if fit_config.plot:
118
+ fit.plot(skip_init=True)
119
+
120
+ else:
121
+
122
+ # Get the default NXdata object
123
+ try:
124
+ nxdata = data.get_default()
125
+ assert nxdata is not None
126
+ except:
127
+ if nxdata is None or nxdata.nxclass != 'NXdata':
128
+ raise ValueError('Invalid default pathway to an NXdata '
129
+ f'object in ({data})')
130
+
131
+ # Get the fit configuration
132
+ try:
133
+ fit_config = self.get_config(data, 'utils.models.FitConfig')
134
+ except Exception as data_exc:
135
+ logger.info('No valid fit config in input pipeline '
136
+ 'data, using config parameter instead.')
137
+ try:
138
+ fit_config = FitConfig(**config)
139
+ except Exception as dict_exc:
140
+ raise RuntimeError from dict_exc
141
+
142
+ # Expand multipeak model if present
143
+ found_multipeak = False
144
+ for i, model in enumerate(deepcopy(fit_config.models)):
145
+ if isinstance(model, Multipeak):
146
+ if found_multipeak:
147
+ raise ValueError(
148
+ f'Invalid parameter models ({fit_config.models}) '
149
+ '(multiple instances of multipeak not allowed)')
150
+ parameters, models = self.create_multipeak_model(model)
151
+ if parameters:
152
+ fit_config.parameters += parameters
153
+ fit_config.models += models
154
+ fit_config.models.pop(i)
155
+ found_multipeak = True
156
+
157
+ # Instantiate the Fit or FitMap object and fit the data
158
+ if np.squeeze(nxdata.nxsignal).ndim == 1:
159
+ fit = Fit(nxdata, fit_config)
160
+ fit.fit()
161
+ if fit_config.print_report:
162
+ fit.print_fit_report()
163
+ if fit_config.plot:
164
+ fit.plot(skip_init=True)
165
+ else:
166
+ fit = FitMap(nxdata, fit_config)
167
+ fit.fit(
168
+ rel_height_cutoff=fit_config.rel_height_cutoff,
169
+ num_proc=fit_config.num_proc, plot=fit_config.plot,
170
+ print_report=fit_config.print_report)
171
+
172
+ return fit
173
+
174
+ @staticmethod
175
+ def create_multipeak_model(model_config):
176
+ """Create a multipeak model."""
177
+ # Local modules
178
+ from CHAP.utils.models import (
179
+ FitParameter,
180
+ Gaussian,
181
+ )
182
+
183
+ parameters = []
184
+ models = []
185
+ num_peak = len(model_config.centers)
186
+ if num_peak == 1 and model_config.fit_type == 'uniform':
187
+ logger.debug('Ignoring fit_type input for fitting one peak')
188
+ model_config.fit_type = 'unconstrained'
189
+
190
+ sig_min = FLOAT_MIN
191
+ sig_max = np.inf
192
+ if (model_config.fwhm_min is not None
193
+ or model_config.fwhm_max is not None):
194
+ # Third party modules
195
+ from asteval import Interpreter
196
+ ast = Interpreter()
197
+
198
+ if model_config.fwhm_min is not None:
199
+ ast(f'fwhm = {model_config.fwhm_min}')
200
+ sig_min = ast(fwhm_factor[model_config.peak_models])
201
+ if model_config.fwhm_max is not None:
202
+ ast(f'fwhm = {model_config.fwhm_max}')
203
+ sig_max = ast(fwhm_factor[model_config.peak_models])
204
+
205
+ if model_config.fit_type == 'uniform':
206
+ parameters.append(FitParameter(
207
+ name='scale_factor', value=1.0, min=FLOAT_MIN))
208
+ if num_peak == 1:
209
+ prefix = ''
210
+ for i, cen in enumerate(model_config.centers):
211
+ if num_peak > 1:
212
+ prefix = f'peak{i+1}_'
213
+ models.append(Gaussian(
214
+ model='gaussian',
215
+ prefix=prefix,
216
+ parameters=[
217
+ {'name': 'amplitude', 'min': FLOAT_MIN},
218
+ {'name': 'center', 'expr': f'scale_factor*{cen}'},
219
+ {'name': 'sigma', 'min': sig_min, 'max': sig_max}]))
220
+ else:
221
+ if num_peak == 1:
222
+ prefix = ''
223
+ for i, cen in enumerate(model_config.centers):
224
+ if num_peak > 1:
225
+ prefix = f'peak{i+1}_'
226
+ if model_config.centers_range is None:
227
+ cen_min = None
228
+ cen_max = None
229
+ else:
230
+ cen_min = cen - model_config.centers_range
231
+ cen_max = cen + model_config.centers_range
232
+ models.append(Gaussian(
233
+ model='gaussian',
234
+ prefix=prefix,
235
+ parameters=[
236
+ {'name': 'amplitude', 'min': FLOAT_MIN},
237
+ {'name': 'center', 'value': cen, 'min': cen_min,
238
+ 'max': cen_max},
239
+ {'name': 'sigma', 'min': sig_min, 'max': sig_max}]))
240
+
241
+ return parameters, models
242
+
243
+
244
+ class Component():
245
+ def __init__(self, model, prefix=''):
246
+ # Local modules
247
+ from CHAP.utils.models import models
248
+
249
+ self.func = models[model.model]
250
+ self.param_names = [f'{prefix}{par.name}' for par in model.parameters]
251
+ self.prefix = prefix
252
+ self._name = model.model
253
+
254
+
255
+ class Components(dict):
256
+ def __init__(self):
257
+ super().__init__(self)
258
+
259
+ def __setitem__(self, key, value):
260
+ if key not in self and not isinstance(key, str):
261
+ raise KeyError(f'Invalid component name ({key})')
262
+ if not isinstance(value, Component):
263
+ raise ValueError(f'Invalid component ({value})')
264
+ dict.__setitem__(self, key, value)
265
+ value.name = key
266
+
267
+ def add(self, model, prefix=''):
268
+ # Local modules
269
+ from CHAP.utils.models import model_classes
270
+
271
+ if not isinstance(model, model_classes):
272
+ raise ValueError(f'Invalid parameter model ({model})')
273
+ if not isinstance(prefix, str):
274
+ raise ValueError(f'Invalid parameter prefix ({prefix})')
275
+ name = f'{prefix}{model.model}'
276
+ self.__setitem__(name, Component(model, prefix))
277
+
278
+ @property
279
+ def components(self):
280
+ return self.values()
281
+
282
+
283
+ class Parameters(dict):
284
+ """
285
+ A dictionary of FitParameter objects, mimicking the functionality
286
+ of a similarly named class in the lmfit library.
287
+ """
288
+ def __init__(self):
289
+ super().__init__(self)
290
+
291
+ def __setitem__(self, key, value):
292
+ # Local modules
293
+ from CHAP.utils.models import FitParameter
294
+
295
+ if key in self:
296
+ raise KeyError(f'Duplicate name for FitParameter ({key})')
297
+ if key not in self and not isinstance(key, str):
298
+ raise KeyError(f'Invalid FitParameter name ({key})')
299
+ if value is not None and not isinstance(value, FitParameter):
300
+ raise ValueError(f'Invalid FitParameter ({value})')
301
+ dict.__setitem__(self, key, value)
302
+ value.name = key
303
+
304
+ def add(self, parameter, prefix=''):
305
+ """
306
+ Add a fit parameter.
307
+
308
+ :param parameter: The fit parameter to add to the dictionary.
309
+ :type parameter: Union[str, FitParameter]
310
+ :param prefix: The prefix for the model to which this
311
+ parameter belongs, defaults to `''`.
312
+ :type prefix: str, optional
313
+ """
314
+ # Local modules
315
+ from CHAP.utils.models import FitParameter
316
+
317
+ if isinstance(parameter, FitParameter):
318
+ name = f'{prefix}{parameter.name}'
319
+ self.__setitem__(name, parameter)
320
+ else:
321
+ raise RuntimeError('Must test')
322
+ parameter = f'{prefix}{parameter}'
323
+ self.__setitem__(
324
+ parameter,
325
+ FitParameter(name=parameter))
326
+ setattr(self[parameter.name], '_prefix', prefix)
327
+
328
+
329
+ class ModelResult():
330
+ """
331
+ The result of a model fit, mimicking the functionality of a
332
+ similarly named class in the lmfit library.
333
+ """
334
+ def __init__(
335
+ self, model, parameters, x=None, y=None, method=None, ast=None,
336
+ res_par_exprs=None, res_par_indices=None, res_par_names=None,
337
+ result=None):
338
+ self.components = model.components
339
+ self.params = deepcopy(parameters)
340
+ if x is None:
341
+ self.success = False
342
+ return
343
+ if method == 'leastsq':
344
+ best_pars = result[0]
345
+ self.ier = result[4]
346
+ self.message = result[3]
347
+ self.nfev = result[2]['nfev']
348
+ self.residual = result[2]['fvec']
349
+ self.success = 1 <= result[4] <= 4
350
+ else:
351
+ best_pars = result.x
352
+ self.ier = result.status
353
+ self.message = result.message
354
+ self.nfev = result.nfev
355
+ self.residual = result.fun
356
+ self.success = result.success
357
+ self.best_fit = y + self.residual
358
+ self.method = method
359
+ self.ndata = len(self.residual)
360
+ self.nvarys = len(res_par_indices)
361
+ self.x = x
362
+ self._ast = ast
363
+ self._expr_pars = {}
364
+
365
+ # Get the covarience matrix
366
+ self.chisqr = (self.residual**2).sum()
367
+ self.redchi = self.chisqr / (self.ndata-self.nvarys)
368
+ self.covar = None
369
+ if method == 'leastsq':
370
+ if result[1] is not None:
371
+ self.covar = result[1]*self.redchi
372
+ else:
373
+ try:
374
+ self.covar = self.redchi * np.linalg.inv(
375
+ np.dot(result.jac.T, result.jac))
376
+ except:
377
+ self.covar = None
378
+
379
+ # Update the fit parameters with the fit result
380
+ par_names = list(self.params.keys())
381
+ self.var_names = []
382
+ for i, (value, index) in enumerate(zip(best_pars, res_par_indices)):
383
+ par = self.params[par_names[index]]
384
+ par.set(value=value)
385
+ stderr = None
386
+ if self.covar is not None:
387
+ stderr = self.covar[i,i]
388
+ if stderr is not None:
389
+ if stderr < 0.0:
390
+ stderr = None
391
+ else:
392
+ stderr = np.sqrt(stderr)
393
+ self.var_names.append(par.name)
394
+ if res_par_exprs:
395
+ # Third party modules
396
+ from sympy import diff
397
+ for value, name in zip(best_pars, res_par_names):
398
+ self._ast.symtable[name] = value
399
+ for par_expr in res_par_exprs:
400
+ name = par_names[par_expr['index']]
401
+ expr = par_expr['expr']
402
+ par = self.params[name]
403
+ par.set(value=self._ast.eval(expr))
404
+ self._expr_pars[name] = expr
405
+ stderr = None
406
+ if self.covar is not None:
407
+ stderr = 0
408
+ for i, name in enumerate(self.var_names):
409
+ d = diff(expr, name)
410
+ if not d:
411
+ continue
412
+ for ii, nname in enumerate(self.var_names):
413
+ dd = diff(expr, nname)
414
+ if not dd:
415
+ continue
416
+ stderr += (self._ast.eval(str(d))
417
+ * self._ast.eval(str(dd))
418
+ * self.covar[i,ii])
419
+ stderr = np.sqrt(stderr)
420
+ setattr(par, '_stderr', stderr)
421
+
422
+ def eval_components(self, x=None, parameters=None):
423
+ """
424
+ Evaluate each component of a composite model function.
425
+
426
+ :param x: Independent variable, defaults to `None`, in which
427
+ case the class variable x is used.
428
+ :type x: Union[list, np.ndarray], optional
429
+ :param parameters: Composite model parameters, defaults to
430
+ None, in which case the class variable params is used.
431
+ :type parameters: Parameters, optional
432
+ :return: A dictionary with component name and evealuated
433
+ function values key, value pairs.
434
+ :rtype: dict
435
+ """
436
+ if x is None:
437
+ x = self.x
438
+ if parameters is None:
439
+ parameters = self.params
440
+ result = {}
441
+ for component in self.components:
442
+ if 'tmp_normalization_offset_c' in component.param_names:
443
+ continue
444
+ par_values = tuple(
445
+ parameters[par].value for par in component.param_names)
446
+ if component.prefix == '':
447
+ name = component._name
448
+ else:
449
+ name = component.prefix
450
+ result[name] = component.func(x, *par_values)
451
+ return result
452
+
453
+ def fit_report(self, show_correl=False):
454
+ """
455
+ Generates a report of the fitting results with their best
456
+ parameter values and uncertainties.
457
+
458
+ :param show_correl: Whether to show list of correlations,
459
+ defaults to `False`.
460
+ :type show_correl: bool, optional
461
+ """
462
+ # Local modules
463
+ from CHAP.utils.general import (
464
+ getfloat_attr,
465
+ gformat,
466
+ )
467
+
468
+ buff = []
469
+ add = buff.append
470
+ parnames = list(self.params.keys())
471
+ namelen = max(len(n) for n in parnames)
472
+
473
+ add("[[Fit Statistics]]")
474
+ add(f" # fitting method = {self.method}")
475
+ add(f" # function evals = {getfloat_attr(self, 'nfev')}")
476
+ add(f" # data points = {getfloat_attr(self, 'ndata')}")
477
+ add(f" # variables = {getfloat_attr(self, 'nvarys')}")
478
+ add(f" chi-square = {getfloat_attr(self, 'chisqr')}")
479
+ add(f" reduced chi-square = {getfloat_attr(self, 'redchi')}")
480
+ # add(f" Akaike info crit = {getfloat_attr(self, 'aic')}")
481
+ # add(f" Bayesian info crit = {getfloat_attr(self, 'bic')}")
482
+ # if hasattr(self, 'rsquared'):
483
+ # add(f" R-squared = {getfloat_attr(self, 'rsquared')}")
484
+
485
+ add("[[Variables]]")
486
+ for name in parnames:
487
+ par = self.params[name]
488
+ space = ' '*(namelen-len(name))
489
+ nout = f'{name}:{space}'
490
+ inval = '(init = ?)'
491
+ if par.init_value is not None:
492
+ inval = f'(init = {par.init_value:.7g})'
493
+ expr = self._expr_pars.get(name, par.expr)
494
+ if expr is not None:
495
+ val = self._ast.eval(expr)
496
+ else:
497
+ val = par.value
498
+ try:
499
+ val = gformat(par.value)
500
+ except (TypeError, ValueError):
501
+ val = ' Non Numeric Value?'
502
+ if par.stderr is not None:
503
+ serr = gformat(par.stderr)
504
+ try:
505
+ spercent = f'({abs(par.stderr/par.value):.2%})'
506
+ except ZeroDivisionError:
507
+ spercent = ''
508
+ val = f'{val} +/-{serr} {spercent}'
509
+ if par.vary:
510
+ add(f' {nout} {val} {inval}')
511
+ elif expr is not None:
512
+ add(f" {nout} {val} == '{expr}'")
513
+ else:
514
+ add(f' {nout} {par.value:.7g} (fixed)')
515
+
516
+ return '\n'.join(buff)
517
+
518
+
99
519
  class Fit:
100
520
  """
101
- Wrapper class for lmfit.
521
+ Wrapper class for scipy/lmfit.
102
522
  """
103
- def __init__(self, y, x=None, models=None, normalize=True, **kwargs):
523
+ def __init__(self, nxdata, config):
104
524
  """Initialize Fit."""
105
- # Third party modules
106
- if not isinstance(normalize, bool):
107
- raise ValueError(f'Invalid parameter normalize ({normalize})')
108
- self._fit_type = None
525
+ self._code = config.code
526
+ for model in config.models:
527
+ if model.model == 'expression' and self._code != 'lmfit':
528
+ self._code = 'lmfit'
529
+ logger.warning('Using lmfit instead of scipy with '
530
+ 'an expression model')
531
+ if self._code == 'scipy':
532
+ # Local modules
533
+ from CHAP.utils.fit import Parameters
534
+ else:
535
+ # Third party modules
536
+ from lmfit import Parameters
109
537
  self._mask = None
538
+ self._method = config.method
110
539
  self._model = None
111
540
  self._norm = None
112
541
  self._normalized = False
542
+ self._free_parameters = []
113
543
  self._parameters = Parameters()
544
+ if self._code == 'scipy':
545
+ self._ast = None
546
+ self._res_num_pars = []
547
+ self._res_par_exprs = []
548
+ self._res_par_indices = []
549
+ self._res_par_names = []
550
+ self._res_par_values = []
114
551
  self._parameter_bounds = None
115
- self._parameter_norms = {}
116
552
  self._linear_parameters = []
117
553
  self._nonlinear_parameters = []
118
554
  self._result = None
119
- self._try_linear_fit = True
120
- self._param_constraint = None
121
- self._fwhm_min = None
122
- self._fwhm_max = None
123
- self._sigma_min = None
124
- self._sigma_max = None
555
+ # self._try_linear_fit = True
556
+ # self._fwhm_min = None
557
+ # self._fwhm_max = None
558
+ # self._sigma_min = None
559
+ # self._sigma_max = None
560
+ self._x = None
125
561
  self._y = None
126
562
  self._y_norm = None
127
563
  self._y_range = None
128
- if 'try_linear_fit' in kwargs:
129
- self._try_linear_fit = kwargs.pop('try_linear_fit')
130
- if not isinstance(self._try_linear_fit, bool):
131
- raise ValueError(
132
- 'Invalid value of keyword argument try_linear_fit '
133
- f'({self._try_linear_fit})')
134
- if y is not None:
135
- if isinstance(y, (tuple, list, np.ndarray)):
136
- self._x = np.asarray(x)
137
- self._y = np.asarray(y)
138
- elif HAVE_XARRAY and isinstance(y, xr.DataArray):
139
- if x is not None:
140
- logger.warning('Ignoring superfluous input x ({x})')
141
- if y.ndim != 1:
142
- raise ValueError(
143
- 'Invalid DataArray dimensions for parameter y '
144
- f'({y.ndim})')
145
- self._x = np.asarray(y[y.dims[0]])
146
- self._y = y
564
+ # if 'try_linear_fit' in kwargs:
565
+ # self._try_linear_fit = kwargs.pop('try_linear_fit')
566
+ # if not isinstance(self._try_linear_fit, bool):
567
+ # raise ValueError(
568
+ # 'Invalid value of keyword argument try_linear_fit '
569
+ # f'({self._try_linear_fit})')
570
+ if nxdata is not None:
571
+ if isinstance(nxdata.attrs['axes'], str):
572
+ dim_x = nxdata.attrs['axes']
147
573
  else:
148
- raise ValueError(f'Invalid parameter y ({y})')
574
+ dim_x = nxdata.attrs['axes'][-1]
575
+ self._x = np.asarray(nxdata[dim_x])
576
+ self._y = np.squeeze(nxdata.nxsignal)
149
577
  if self._x.ndim != 1:
150
578
  raise ValueError(
151
- f'Invalid dimension for input x ({self._x.ndim})')
579
+ f'Invalid x dimension ({self._x.ndim})')
152
580
  if self._x.size != self._y.size:
153
581
  raise ValueError(
154
582
  f'Inconsistent x and y dimensions ({self._x.size} vs '
155
583
  f'{self._y.size})')
156
- if 'mask' in kwargs:
157
- self._mask = kwargs.pop('mask')
158
- if self._mask is None:
584
+ # if 'mask' in kwargs:
585
+ # self._mask = kwargs.pop('mask')
586
+ if True: #self._mask is None:
159
587
  y_min = float(self._y.min())
160
588
  self._y_range = float(self._y.max())-y_min
161
- if normalize and self._y_range > 0.0:
589
+ if self._y_range > 0.0:
162
590
  self._norm = (y_min, self._y_range)
163
- else:
164
- self._mask = np.asarray(self._mask).astype(bool)
165
- if self._x.size != self._mask.size:
166
- raise ValueError(
167
- f'Inconsistent x and mask dimensions ({self._x.size} '
168
- f'vs {self._mask.size})')
169
- y_masked = np.asarray(self._y)[~self._mask]
170
- y_min = float(y_masked.min())
171
- self._y_range = float(y_masked.max())-y_min
172
- if normalize and self._y_range > 0.0:
173
- if normalize and self._y_range > 0.0:
174
- self._norm = (y_min, self._y_range)
175
- if models is not None:
176
- if callable(models) or isinstance(models, str):
177
- kwargs = self.add_model(models, **kwargs)
178
- elif isinstance(models, (tuple, list)):
179
- for model in models:
180
- kwargs = self.add_model(model, **kwargs)
181
- self.fit(**kwargs)
182
-
183
- @classmethod
184
- def fit_data(cls, y, models, x=None, normalize=True, **kwargs):
185
- """Class method for Fit."""
186
- return cls(y, x=x, models=models, normalize=normalize, **kwargs)
591
+ # else:
592
+ # self._mask = np.asarray(self._mask).astype(bool)
593
+ # if self._x.size != self._mask.size:
594
+ # raise ValueError(
595
+ # f'Inconsistent x and mask dimensions ({self._x.size} '
596
+ # f'vs {self._mask.size})')
597
+ # y_masked = np.asarray(self._y)[~self._mask]
598
+ # y_min = float(y_masked.min())
599
+ # self._y_range = float(y_masked.max())-y_min
600
+ # if self._y_range > 0.0:
601
+ # self._norm = (y_min, self._y_range)
602
+
603
+ # Setup fit model
604
+ self._setup_fit_model(config.parameters, config.models)
187
605
 
188
606
  @property
189
607
  def best_errors(self):
@@ -201,6 +619,7 @@ class Fit:
201
619
  return None
202
620
  return self._result.best_fit
203
621
 
622
+ @property
204
623
  def best_parameters(self):
205
624
  """Return the best fit parameters."""
206
625
  if self._result is None:
@@ -219,39 +638,6 @@ class Fit:
219
638
  }
220
639
  return parameters
221
640
 
222
- @property
223
- def best_results(self):
224
- """
225
- Convert the input DataArray to a data set and add the fit
226
- results.
227
- """
228
- if self._result is None:
229
- return None
230
- if not HAVE_XARRAY:
231
- logger.warning(
232
- 'fit.best_results requires xarray in the conda environment')
233
- return None
234
- if isinstance(self._y, xr.DataArray):
235
- best_results = self._y.to_dataset()
236
- dims = self._y.dims
237
- fit_name = f'{self._y.name}_fit'
238
- else:
239
- coords = {'x': (['x'], self._x)}
240
- dims = ('x',)
241
- best_results = xr.Dataset(coords=coords)
242
- best_results['y'] = (dims, self._y)
243
- fit_name = 'y_fit'
244
- best_results[fit_name] = (dims, self.best_fit)
245
- if self._mask is not None:
246
- best_results['mask'] = self._mask
247
- best_results.coords['par_names'] = ('peak', self.best_values.keys())
248
- best_results['best_values'] = \
249
- (['par_names'], self.best_values.values())
250
- best_results['best_errors'] = \
251
- (['par_names'], self.best_errors.values())
252
- best_results.attrs['components'] = self.components
253
- return best_results
254
-
255
641
  @property
256
642
  def best_values(self):
257
643
  """Return values of the best fit parameters."""
@@ -270,6 +656,9 @@ class Fit:
270
656
 
271
657
  @property
272
658
  def components(self):
659
+ # Third party modules
660
+ from lmfit.models import ExpressionModel
661
+
273
662
  """Return the fit model components info."""
274
663
  components = {}
275
664
  if self._result is None:
@@ -355,10 +744,10 @@ class Fit:
355
744
  return 0.0
356
745
  if self._result.init_params is not None:
357
746
  normalization_offset = float(
358
- self._result.init_params['tmp_normalization_offset_c'])
747
+ self._result.init_params['tmp_normalization_offset_c'].value)
359
748
  else:
360
749
  normalization_offset = float(
361
- self._result.params['tmp_normalization_offset_c'])
750
+ self._result.params['tmp_normalization_offset_c'].value)
362
751
  return normalization_offset
363
752
 
364
753
  @property
@@ -389,7 +778,9 @@ class Fit:
389
778
  """Return the residual in the best fit."""
390
779
  if self._result is None:
391
780
  return None
392
- return self._result.residual
781
+ # lmfit return the negative of the residual in its common
782
+ # definition as (data - fit)
783
+ return -self._result.residual
393
784
 
394
785
  @property
395
786
  def success(self):
@@ -399,7 +790,8 @@ class Fit:
399
790
  if not self._result.success:
400
791
  logger.warning(
401
792
  f'ier = {self._result.ier}: {self._result.message}')
402
- if self._result.ier and self._result.ier != 5:
793
+ if (self._code == 'lmfit' and self._result.ier
794
+ and self._result.ier != 5):
403
795
  return True
404
796
  return self._result.success
405
797
 
@@ -429,692 +821,259 @@ class Fit:
429
821
  if result is not None:
430
822
  print(result.fit_report(show_correl=show_correl))
431
823
 
432
- def add_parameter(self, **parameter):
433
- """Add a fit fit parameter to the fit model."""
434
- if not isinstance(parameter, dict):
435
- raise ValueError(f'Invalid parameter ({parameter})')
824
+ def add_parameter(self, parameter):
825
+ # Local modules
826
+ from CHAP.utils.models import FitParameter
827
+
828
+ """Add a fit parameter to the fit model."""
436
829
  if parameter.get('expr') is not None:
437
830
  raise KeyError(f'Invalid "expr" key in parameter {parameter}')
438
831
  name = parameter['name']
439
- if not isinstance(name, str):
440
- raise ValueError(
441
- f'Invalid "name" value ({name}) in parameter {parameter}')
442
- if parameter.get('norm') is None:
443
- self._parameter_norms[name] = False
832
+ if not parameter['vary']:
833
+ logger.warning(
834
+ f'Ignoring min in parameter {name} in '
835
+ f'Fit.add_parameter (vary = {parameter["vary"]})')
836
+ parameter['min'] = -np.inf
837
+ logger.warning(
838
+ f'Ignoring max in parameter {name} in '
839
+ f'Fit.add_parameter (vary = {parameter["vary"]})')
840
+ parameter['max'] = np.inf
841
+ if self._code == 'scipy':
842
+ self._parameters.add(FitParameter(**parameter))
444
843
  else:
445
- norm = parameter.pop('norm')
446
- if self._norm is None:
447
- logger.warning(
448
- f'Ignoring norm in parameter {name} in Fit.add_parameter '
449
- '(normalization is turned off)')
450
- self._parameter_norms[name] = False
451
- else:
452
- if not isinstance(norm, bool):
453
- raise ValueError(
454
- f'Invalid "norm" value ({norm}) in parameter '
455
- f'{parameter}')
456
- self._parameter_norms[name] = norm
457
- vary = parameter.get('vary')
458
- if vary is not None:
459
- if not isinstance(vary, bool):
460
- raise ValueError(
461
- f'Invalid "vary" value ({vary}) in parameter {parameter}')
462
- if not vary:
463
- if 'min' in parameter:
464
- logger.warning(
465
- f'Ignoring min in parameter {name} in '
466
- f'Fit.add_parameter (vary = {vary})')
467
- parameter.pop('min')
468
- if 'max' in parameter:
469
- logger.warning(
470
- f'Ignoring max in parameter {name} in '
471
- f'Fit.add_parameter (vary = {vary})')
472
- parameter.pop('max')
473
- if self._norm is not None and name not in self._parameter_norms:
474
- raise ValueError(
475
- f'Missing parameter normalization type for parameter {name}')
476
- self._parameters.add(**parameter)
844
+ self._parameters.add(**parameter)
845
+ self._free_parameters.append(name)
477
846
 
478
- def add_model(
479
- self, model, prefix=None, parameters=None, parameter_norms=None,
480
- **kwargs):
847
+ def add_model(self, model, prefix):
481
848
  """Add a model component to the fit model."""
482
- # Third party modules
483
- from asteval import (
484
- Interpreter,
485
- get_ast_names,
849
+ if self._code == 'lmfit':
850
+ from lmfit.models import (
851
+ ConstantModel,
852
+ LinearModel,
853
+ QuadraticModel,
854
+ # PolynomialModel,
855
+ ExponentialModel,
856
+ GaussianModel,
857
+ LorentzianModel,
858
+ ExpressionModel,
859
+ # StepModel,
860
+ RectangleModel,
486
861
  )
487
862
 
488
- if prefix is not None and not isinstance(prefix, str):
489
- logger.warning('Ignoring illegal prefix: {model} {type(model)}')
490
- prefix = None
863
+ if model.model == 'expression':
864
+ expr = model.expr
865
+ else:
866
+ expr = None
867
+ parameters = model.parameters
868
+ model_name = model.model
869
+
491
870
  if prefix is None:
492
871
  pprefix = ''
493
872
  else:
494
873
  pprefix = prefix
495
- if parameters is not None:
496
- if isinstance(parameters, dict):
497
- parameters = (parameters, )
498
- elif is_dict_series(parameters):
499
- parameters = deepcopy(parameters)
500
- else:
501
- raise ValueError('Invalid parameter parameters ({parameters})')
502
- if parameter_norms is not None:
503
- if isinstance(parameter_norms, dict):
504
- parameter_norms = (parameter_norms, )
505
- if not is_dict_series(parameter_norms):
506
- raise ValueError(
507
- 'Invalid parameter parameters_norms ({parameters_norms})')
508
- new_parameter_norms = {}
509
- if callable(model):
510
- # Linear fit not yet implemented for callable models
511
- self._try_linear_fit = False
512
- if parameter_norms is None:
513
- if parameters is None:
514
- raise ValueError(
515
- 'Either parameters or parameter_norms is required in '
516
- f'{model}')
517
- for par in parameters:
518
- name = par['name']
519
- if not isinstance(name, str):
520
- raise ValueError(
521
- f'Invalid "name" value ({name}) in input '
522
- 'parameters')
523
- if par.get('norm') is not None:
524
- norm = par.pop('norm')
525
- if not isinstance(norm, bool):
526
- raise ValueError(
527
- f'Invalid "norm" value ({norm}) in input '
528
- 'parameters')
529
- new_parameter_norms[f'{pprefix}{name}'] = norm
530
- else:
531
- for par in parameter_norms:
532
- name = par['name']
533
- if not isinstance(name, str):
534
- raise ValueError(
535
- f'Invalid "name" value ({name}) in input '
536
- 'parameters')
537
- norm = par.get('norm')
538
- if norm is None or not isinstance(norm, bool):
539
- raise ValueError(
540
- f'Invalid "norm" value ({norm}) in input '
541
- 'parameters')
542
- new_parameter_norms[f'{pprefix}{name}'] = norm
543
- if parameters is not None:
544
- for par in parameters:
545
- if par.get('expr') is not None:
546
- raise KeyError(
547
- f'Invalid "expr" key ({par.get("expr")}) in '
548
- f'parameter {name} for a callable model {model}')
549
- name = par['name']
550
- if not isinstance(name, str):
551
- raise ValueError(
552
- f'Invalid "name" value ({name}) in input '
553
- 'parameters')
554
- # RV callable model will need partial deriv functions for any linear
555
- # parameter to get the linearized matrix, so for now skip linear
556
- # solution option
557
- newmodel = Model(model, prefix=prefix)
558
- elif isinstance(model, str):
559
- if model == 'constant':
560
- # Par: c
874
+ if self._code == 'scipy':
875
+ new_parameters = []
876
+ for par in deepcopy(parameters):
877
+ self._parameters.add(par, pprefix)
878
+ if self._parameters[par.name].expr is None:
879
+ self._parameters[par.name].set(value=par.default)
880
+ new_parameters.append(par.name)
881
+ self._res_num_pars += [len(parameters)]
882
+
883
+ if model_name == 'constant':
884
+ # Par: c
885
+ if self._code == 'lmfit':
561
886
  newmodel = ConstantModel(prefix=prefix)
562
- new_parameter_norms[f'{pprefix}c'] = True
563
- self._linear_parameters.append(f'{pprefix}c')
564
- elif model == 'linear':
565
- # Par: slope, intercept
887
+ self._linear_parameters.append(f'{pprefix}c')
888
+ elif model_name == 'linear':
889
+ # Par: slope, intercept
890
+ if self._code == 'lmfit':
566
891
  newmodel = LinearModel(prefix=prefix)
567
- new_parameter_norms[f'{pprefix}slope'] = True
568
- new_parameter_norms[f'{pprefix}intercept'] = True
569
- self._linear_parameters.append(f'{pprefix}slope')
570
- self._linear_parameters.append(f'{pprefix}intercept')
571
- elif model == 'quadratic':
572
- # Par: a, b, c
892
+ self._linear_parameters.append(f'{pprefix}slope')
893
+ self._linear_parameters.append(f'{pprefix}intercept')
894
+ elif model_name == 'quadratic':
895
+ # Par: a, b, c
896
+ if self._code == 'lmfit':
573
897
  newmodel = QuadraticModel(prefix=prefix)
574
- new_parameter_norms[f'{pprefix}a'] = True
575
- new_parameter_norms[f'{pprefix}b'] = True
576
- new_parameter_norms[f'{pprefix}c'] = True
577
- self._linear_parameters.append(f'{pprefix}a')
578
- self._linear_parameters.append(f'{pprefix}b')
579
- self._linear_parameters.append(f'{pprefix}c')
580
- elif model == 'polynomial':
581
- # Par: c0, c1,..., c7
582
- degree = kwargs.get('degree')
583
- if degree is not None:
584
- kwargs.pop('degree')
585
- if degree is None or not is_int(degree, ge=0, le=7):
586
- raise ValueError(
587
- 'Invalid parameter degree for build-in step model '
588
- f'({degree})')
589
- newmodel = PolynomialModel(degree=degree, prefix=prefix)
590
- for i in range(degree+1):
591
- new_parameter_norms[f'{pprefix}c{i}'] = True
592
- self._linear_parameters.append(f'{pprefix}c{i}')
593
- elif model == 'gaussian':
594
- # Par: amplitude, center, sigma (fwhm, height)
898
+ self._linear_parameters.append(f'{pprefix}a')
899
+ self._linear_parameters.append(f'{pprefix}b')
900
+ self._linear_parameters.append(f'{pprefix}c')
901
+ # elif model_name == 'polynomial':
902
+ # # Par: c0, c1,..., c7
903
+ # degree = kwargs.get('degree')
904
+ # if degree is not None:
905
+ # kwargs.pop('degree')
906
+ # if degree is None or not is_int(degree, ge=0, le=7):
907
+ # raise ValueError(
908
+ # 'Invalid parameter degree for build-in step model '
909
+ # f'({degree})')
910
+ # if self._code == 'lmfit':
911
+ # newmodel = PolynomialModel(degree=degree, prefix=prefix)
912
+ # for i in range(degree+1):
913
+ # self._linear_parameters.append(f'{pprefix}c{i}')
914
+ elif model_name == 'exponential':
915
+ # Par: amplitude, decay
916
+ if self._code == 'lmfit':
917
+ newmodel = ExponentialModel(prefix=prefix)
918
+ self._linear_parameters.append(f'{pprefix}amplitude')
919
+ self._nonlinear_parameters.append(f'{pprefix}decay')
920
+ elif model_name == 'gaussian':
921
+ # Par: amplitude, center, sigma (fwhm, height)
922
+ if self._code == 'lmfit':
595
923
  newmodel = GaussianModel(prefix=prefix)
596
- new_parameter_norms[f'{pprefix}amplitude'] = True
597
- new_parameter_norms[f'{pprefix}center'] = False
598
- new_parameter_norms[f'{pprefix}sigma'] = False
599
- self._linear_parameters.append(f'{pprefix}amplitude')
600
- self._nonlinear_parameters.append(f'{pprefix}center')
601
- self._nonlinear_parameters.append(f'{pprefix}sigma')
602
924
  # parameter norms for height and fwhm are needed to
603
925
  # get correct errors
604
- new_parameter_norms[f'{pprefix}height'] = True
605
- new_parameter_norms[f'{pprefix}fwhm'] = False
606
- elif model == 'lorentzian':
607
- # Par: amplitude, center, sigma (fwhm, height)
926
+ self._linear_parameters.append(f'{pprefix}amplitude')
927
+ self._nonlinear_parameters.append(f'{pprefix}center')
928
+ self._nonlinear_parameters.append(f'{pprefix}sigma')
929
+ elif model_name == 'lorentzian':
930
+ # Par: amplitude, center, sigma (fwhm, height)
931
+ if self._code == 'lmfit':
608
932
  newmodel = LorentzianModel(prefix=prefix)
609
- new_parameter_norms[f'{pprefix}amplitude'] = True
610
- new_parameter_norms[f'{pprefix}center'] = False
611
- new_parameter_norms[f'{pprefix}sigma'] = False
612
- self._linear_parameters.append(f'{pprefix}amplitude')
613
- self._nonlinear_parameters.append(f'{pprefix}center')
614
- self._nonlinear_parameters.append(f'{pprefix}sigma')
615
933
  # parameter norms for height and fwhm are needed to
616
934
  # get correct errors
617
- new_parameter_norms[f'{pprefix}height'] = True
618
- new_parameter_norms[f'{pprefix}fwhm'] = False
619
- elif model == 'exponential':
620
- # Par: amplitude, decay
621
- newmodel = ExponentialModel(prefix=prefix)
622
- new_parameter_norms[f'{pprefix}amplitude'] = True
623
- new_parameter_norms[f'{pprefix}decay'] = False
624
- self._linear_parameters.append(f'{pprefix}amplitude')
625
- self._nonlinear_parameters.append(f'{pprefix}decay')
626
- elif model == 'step':
627
- # Par: amplitude, center, sigma
628
- form = kwargs.get('form')
629
- if form is not None:
630
- kwargs.pop('form')
631
- if (form is None or form not in
632
- ('linear', 'atan', 'arctan', 'erf', 'logistic')):
633
- raise ValueError(
634
- 'Invalid parameter form for build-in step model '
635
- f'({form})')
636
- newmodel = StepModel(prefix=prefix, form=form)
637
- new_parameter_norms[f'{pprefix}amplitude'] = True
638
- new_parameter_norms[f'{pprefix}center'] = False
639
- new_parameter_norms[f'{pprefix}sigma'] = False
640
- self._linear_parameters.append(f'{pprefix}amplitude')
641
- self._nonlinear_parameters.append(f'{pprefix}center')
642
- self._nonlinear_parameters.append(f'{pprefix}sigma')
643
- elif model == 'rectangle':
644
- # Par: amplitude, center1, center2, sigma1, sigma2
645
- form = kwargs.get('form')
646
- if form is not None:
647
- kwargs.pop('form')
648
- if (form is None or form not in
649
- ('linear', 'atan', 'arctan', 'erf', 'logistic')):
650
- raise ValueError(
651
- 'Invalid parameter form for build-in rectangle model '
652
- f'({form})')
935
+ self._linear_parameters.append(f'{pprefix}amplitude')
936
+ self._nonlinear_parameters.append(f'{pprefix}center')
937
+ self._nonlinear_parameters.append(f'{pprefix}sigma')
938
+ # elif model_name == 'step':
939
+ # # Par: amplitude, center, sigma
940
+ # form = kwargs.get('form')
941
+ # if form is not None:
942
+ # kwargs.pop('form')
943
+ # if (form is None or form not in
944
+ # ('linear', 'atan', 'arctan', 'erf', 'logistic')):
945
+ # raise ValueError(
946
+ # 'Invalid parameter form for build-in step model '
947
+ # f'({form})')
948
+ # if self._code == 'lmfit':
949
+ # newmodel = StepModel(prefix=prefix, form=form)
950
+ # self._linear_parameters.append(f'{pprefix}amplitude')
951
+ # self._nonlinear_parameters.append(f'{pprefix}center')
952
+ # self._nonlinear_parameters.append(f'{pprefix}sigma')
953
+ elif model_name == 'rectangle':
954
+ # Par: amplitude, center1, center2, sigma1, sigma2
955
+ form = 'atan' #kwargs.get('form')
956
+ #if form is not None:
957
+ # kwargs.pop('form')
958
+ # RV: Implement and test other forms when needed
959
+ if (form is None or form not in
960
+ ('linear', 'atan', 'arctan', 'erf', 'logistic')):
961
+ raise ValueError(
962
+ 'Invalid parameter form for build-in rectangle model '
963
+ f'({form})')
964
+ if self._code == 'lmfit':
653
965
  newmodel = RectangleModel(prefix=prefix, form=form)
654
- new_parameter_norms[f'{pprefix}amplitude'] = True
655
- new_parameter_norms[f'{pprefix}center1'] = False
656
- new_parameter_norms[f'{pprefix}center2'] = False
657
- new_parameter_norms[f'{pprefix}sigma1'] = False
658
- new_parameter_norms[f'{pprefix}sigma2'] = False
659
- self._linear_parameters.append(f'{pprefix}amplitude')
660
- self._nonlinear_parameters.append(f'{pprefix}center1')
661
- self._nonlinear_parameters.append(f'{pprefix}center2')
662
- self._nonlinear_parameters.append(f'{pprefix}sigma1')
663
- self._nonlinear_parameters.append(f'{pprefix}sigma2')
664
- elif model == 'expression':
665
- # Par: by expression
666
- expr = kwargs['expr']
667
- if not isinstance(expr, str):
668
- raise ValueError(
669
- f'Invalid "expr" value ({expr}) in {model}')
670
- kwargs.pop('expr')
671
- if parameter_norms is not None:
672
- logger.warning(
673
- 'Ignoring parameter_norms (normalization '
674
- 'determined from linearity)}')
675
- if parameters is not None:
676
- for par in parameters:
677
- if par.get('expr') is not None:
678
- raise KeyError(
679
- f'Invalid "expr" key ({par.get("expr")}) in '
680
- f'parameter ({par}) for an expression model')
681
- if par.get('norm') is not None:
682
- logger.warning(
683
- f'Ignoring "norm" key in parameter ({par}) '
684
- '(normalization determined from linearity)')
685
- par.pop('norm')
686
- name = par['name']
687
- if not isinstance(name, str):
688
- raise ValueError(
689
- f'Invalid "name" value ({name}) in input '
690
- 'parameters')
691
- ast = Interpreter()
692
- expr_parameters = [
693
- name for name in get_ast_names(ast.parse(expr))
694
- if (name != 'x' and name not in self._parameters
695
- and name not in ast.symtable)]
696
- if prefix is None:
697
- newmodel = ExpressionModel(expr=expr)
698
- else:
699
- for name in expr_parameters:
700
- expr = sub(rf'\b{name}\b', f'{prefix}{name}', expr)
701
- expr_parameters = [
702
- f'{prefix}{name}' for name in expr_parameters]
703
- newmodel = ExpressionModel(expr=expr, name=name)
704
- # Remove already existing names
705
- for name in newmodel.param_names.copy():
706
- if name not in expr_parameters:
707
- newmodel._func_allargs.remove(name)
708
- newmodel._param_names.remove(name)
966
+ self._linear_parameters.append(f'{pprefix}amplitude')
967
+ self._nonlinear_parameters.append(f'{pprefix}center1')
968
+ self._nonlinear_parameters.append(f'{pprefix}center2')
969
+ self._nonlinear_parameters.append(f'{pprefix}sigma1')
970
+ self._nonlinear_parameters.append(f'{pprefix}sigma2')
971
+ elif model_name == 'expression' and self._code == 'lmfit':
972
+ # Third party modules
973
+ from asteval import (
974
+ Interpreter,
975
+ get_ast_names,
976
+ )
977
+ for par in parameters:
978
+ if par.expr is not None:
979
+ raise KeyError(
980
+ f'Invalid "expr" key ({par.expr}) in '
981
+ f'parameter ({par}) for an expression model')
982
+ ast = Interpreter()
983
+ expr_parameters = [
984
+ name for name in get_ast_names(ast.parse(expr))
985
+ if (name != 'x' and name not in self._parameters
986
+ and name not in ast.symtable)]
987
+ if prefix is None:
988
+ newmodel = ExpressionModel(expr=expr)
709
989
  else:
710
- raise ValueError(f'Unknown build-in fit model ({model})')
990
+ for name in expr_parameters:
991
+ expr = sub(rf'\b{name}\b', f'{prefix}{name}', expr)
992
+ expr_parameters = [
993
+ f'{prefix}{name}' for name in expr_parameters]
994
+ newmodel = ExpressionModel(expr=expr, name=model_name)
995
+ # Remove already existing names
996
+ for name in newmodel.param_names.copy():
997
+ if name not in expr_parameters:
998
+ newmodel._func_allargs.remove(name)
999
+ newmodel._param_names.remove(name)
711
1000
  else:
712
- raise ValueError('Invalid parameter model ({model})')
1001
+ raise ValueError(f'Unknown fit model ({model_name})')
713
1002
 
714
1003
  # Add the new model to the current one
715
- if self._model is None:
716
- self._model = newmodel
1004
+ if self._code == 'scipy':
1005
+ if self._model is None:
1006
+ self._model = Components()
1007
+ self._model.add(model, prefix)
717
1008
  else:
718
- self._model += newmodel
719
- new_parameters = newmodel.make_params()
720
- self._parameters += new_parameters
1009
+ if self._model is None:
1010
+ self._model = newmodel
1011
+ else:
1012
+ self._model += newmodel
1013
+ new_parameters = newmodel.make_params()
1014
+ self._parameters += new_parameters
721
1015
 
722
1016
  # Check linearity of expression model parameters
723
- if isinstance(newmodel, ExpressionModel):
1017
+ if self._code == 'lmfit' and isinstance(newmodel, ExpressionModel):
1018
+ # Third party modules
1019
+ from sympy import diff
724
1020
  for name in newmodel.param_names:
725
1021
  if not diff(newmodel.expr, name, name):
726
1022
  if name not in self._linear_parameters:
727
1023
  self._linear_parameters.append(name)
728
- new_parameter_norms[name] = True
729
1024
  else:
730
1025
  if name not in self._nonlinear_parameters:
731
1026
  self._nonlinear_parameters.append(name)
732
- new_parameter_norms[name] = False
733
1027
 
734
1028
  # Scale the default initial model parameters
735
1029
  if self._norm is not None:
736
- for name, norm in new_parameter_norms.copy().items():
737
- par = self._parameters.get(name)
738
- if par is None:
739
- new_parameter_norms.pop(name)
740
- continue
741
- if par.expr is None and norm:
742
- value = par.value*self._norm[1]
743
- _min = par.min
744
- _max = par.max
745
- if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
746
- _min *= self._norm[1]
747
- if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
748
- _max *= self._norm[1]
749
- par.set(value=value, min=_min, max=_max)
750
-
751
- # Initialize the model parameters from parameters
752
- if prefix is None:
753
- prefix = ''
754
- if parameters is not None:
755
- for parameter in parameters:
756
- name = parameter['name']
757
- if not isinstance(name, str):
758
- raise ValueError(
759
- f'Invalid "name" value ({name}) in input parameters')
760
- if name not in new_parameters:
761
- name = prefix+name
762
- parameter['name'] = name
1030
+ for name in new_parameters:
1031
+ if name in self._linear_parameters:
1032
+ par = self._parameters.get(name)
1033
+ if par.expr is None:
1034
+ if self._code == 'scipy':
1035
+ value = par.default
1036
+ else:
1037
+ value = None
1038
+ if value is None:
1039
+ value = par.value
1040
+ if value is not None:
1041
+ value *= self._norm[1]
1042
+ _min = par.min
1043
+ _max = par.max
1044
+ if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
1045
+ _min *= self._norm[1]
1046
+ if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
1047
+ _max *= self._norm[1]
1048
+ par.set(value=value, min=_min, max=_max)
1049
+
1050
+ # Initialize the model parameters
1051
+ for parameter in deepcopy(parameters):
1052
+ name = parameter.name
1053
+ if name not in new_parameters:
1054
+ name = pprefix+name
763
1055
  if name not in new_parameters:
764
- logger.warning(
765
- f'Ignoring superfluous parameter info for {name}')
766
- continue
767
- if name in self._parameters:
768
- parameter.pop('name')
769
- if 'norm' in parameter:
770
- if not isinstance(parameter['norm'], bool):
771
- raise ValueError(
772
- f'Invalid "norm" value ({norm}) in the '
773
- f'input parameter {name}')
774
- new_parameter_norms[name] = parameter['norm']
775
- parameter.pop('norm')
776
- if parameter.get('expr') is not None:
777
- if 'value' in parameter:
778
- logger.warning(
779
- f'Ignoring value in parameter {name} '
780
- f'(set by expression: {parameter["expr"]})')
781
- parameter.pop('value')
782
- if 'vary' in parameter:
783
- logger.warning(
784
- f'Ignoring vary in parameter {name} '
785
- f'(set by expression: {parameter["expr"]})')
786
- parameter.pop('vary')
787
- if 'min' in parameter:
788
- logger.warning(
789
- f'Ignoring min in parameter {name} '
790
- f'(set by expression: {parameter["expr"]})')
791
- parameter.pop('min')
792
- if 'max' in parameter:
793
- logger.warning(
794
- f'Ignoring max in parameter {name} '
795
- f'(set by expression: {parameter["expr"]})')
796
- parameter.pop('max')
797
- if 'vary' in parameter:
798
- if not isinstance(parameter['vary'], bool):
799
- raise ValueError(
800
- f'Invalid "vary" value ({parameter["vary"]}) '
801
- f'in the input parameter {name}')
802
- if not parameter['vary']:
803
- if 'min' in parameter:
804
- logger.warning(
805
- f'Ignoring min in parameter {name} '
806
- f'(vary = {parameter["vary"]})')
807
- parameter.pop('min')
808
- if 'max' in parameter:
809
- logger.warning(
810
- f'Ignoring max in parameter {name} '
811
- f'(vary = {parameter["vary"]})')
812
- parameter.pop('max')
813
- self._parameters[name].set(**parameter)
814
- parameter['name'] = name
815
- else:
816
1056
  raise ValueError(
817
- 'Invalid parameter name in parameters ({name})')
818
- self._parameter_norms = {
819
- **self._parameter_norms,
820
- **new_parameter_norms,
821
- }
822
-
823
- # Initialize the model parameters from kwargs
824
- for name, value in {**kwargs}.items():
825
- full_name = f'{pprefix}{name}'
826
- if (full_name in new_parameter_norms
827
- and isinstance(value, (int, float))):
828
- kwargs.pop(name)
829
- if self._parameters[full_name].expr is None:
830
- self._parameters[full_name].set(value=value)
831
- else:
832
- logger.warning(
833
- f'Ignoring parameter {name} (set by expression: '
834
- f'{self._parameters[full_name].expr})')
835
-
836
- # Check parameter norms
837
- # (also need it for expressions to renormalize the errors)
838
- if (self._norm is not None
839
- and (callable(model) or model == 'expression')):
840
- missing_norm = False
841
- for name in new_parameters.valuesdict():
842
- if name not in self._parameter_norms:
843
- print(f'new_parameters:\n{new_parameters.valuesdict()}')
844
- print(f'self._parameter_norms:\n{self._parameter_norms}')
845
- logger.error(
846
- f'Missing parameter normalization type for {name} in '
847
- f'{model}')
848
- missing_norm = True
849
- if missing_norm:
850
- raise ValueError
851
-
852
- return kwargs
853
-
854
- def create_multipeak_model(
855
- self, centers=None, fit_type=None, peak_models=None,
856
- center_exprs=None, background=None, param_constraint=True,
857
- fwhm_min=None, fwhm_max=None, centers_range=None):
858
- """Create a multipeak model."""
859
- # System modules
860
- from re import search as re_search
861
-
862
- # Third party modules
863
- from asteval import Interpreter
864
-
865
- if centers_range is None:
866
- centers_range = (self._x[0], self._x[-1])
867
- elif not is_index_range(centers_range, ge=self._x[0], le=self._x[-1]):
868
- raise ValueError(
869
- f'Invalid parameter centers_range ({centers_range})')
870
- if self._model is not None:
871
- if self._fit_type == 'uniform' and fit_type != 'uniform':
872
- logger.info('Use the existing multipeak model to refit a '
873
- 'uniform model with an unconstrained model')
874
- min_value = FLOAT_MIN if self._param_constraint else None
875
- if isinstance(self, FitMap):
876
- scale_factor_index = \
877
- self._best_parameters.index('scale_factor')
878
- self._best_parameters.pop(scale_factor_index)
879
- self._best_values = np.delete(
880
- self._best_values, scale_factor_index, 0)
881
- self._best_errors = np.delete(
882
- self._best_errors, scale_factor_index, 0)
883
- for name, par in self._parameters.items():
884
- if re_search('peak\d+_center', name) is not None:
885
- par.set(
886
- min=centers_range[0], max=centers_range[1],
887
- vary=True, expr=None)
888
- self._parameter_bounds[name] = {
889
- 'min': centers_range[0],
890
- 'max': centers_range[1],
891
- }
892
- else:
893
- for name, par in self._parameters.items():
894
- if re_search('peak\d+_center', name) is not None:
895
- par.set(
896
- value=self._result.params[name].value,
897
- min=min_value, vary=True, expr=None)
898
- self._parameter_bounds[name] = {
899
- 'min': min_value,
900
- 'max': np.inf,
901
- }
902
- self._parameters.pop('scale_factor')
903
- self._parameter_bounds.pop('scale_factor')
904
- self._parameter_norms.pop('scale_factor')
905
- return
1057
+ f'Unable to match parameter {name}')
1058
+ if parameter.expr is None:
1059
+ self._parameters[name].set(
1060
+ value=parameter.value, min=parameter.min,
1061
+ max=parameter.max, vary=parameter.vary)
906
1062
  else:
907
- logger.warning('Existing model cleared before creating a new '
908
- 'multipeak model')
909
- self._model = None
910
-
911
- if self._model is None and len(self._parameters):
912
- logger.warning('Existing fit parameters cleared before creating a '
913
- 'new multipeak model')
914
- self._parameters = Parameters()
915
- if isinstance(centers, (int, float)):
916
- centers = [centers]
917
- elif not isinstance(centers, (tuple, list, np.ndarray)):
918
- raise ValueError(f'Invalid parameter centers ({centers})')
919
- num_peaks = len(centers)
920
- if peak_models is None:
921
- peak_models = num_peaks*['gaussian']
922
- elif (isinstance(peak_models, str)
923
- and peak_models in ('gaussian', 'lorentzian')):
924
- peak_models = num_peaks*[peak_models]
925
- else:
926
- raise ValueError(f'Invalid parameter peak model ({peak_models})')
927
- if len(peak_models) != num_peaks:
928
- raise ValueError(
929
- 'Inconsistent number of peaks in peak_models '
930
- f'({len(peak_models)} vs {num_peaks})')
931
- if num_peaks == 1:
932
- if fit_type is not None:
933
- logger.debug('Ignoring fit_type input for fitting one peak')
934
- fit_type = None
935
- if center_exprs is not None:
936
- logger.debug(
937
- 'Ignoring center_exprs input for fitting one peak')
938
- center_exprs = None
939
- else:
940
- if fit_type == 'uniform':
941
- if center_exprs is None:
942
- center_exprs = [f'scale_factor*{cen}' for cen in centers]
943
- if len(center_exprs) != num_peaks:
944
- raise ValueError(
945
- 'Inconsistent number of peaks in center_exprs '
946
- f'({len(center_exprs)} vs {num_peaks})')
947
- elif fit_type == 'unconstrained' or fit_type is None:
948
- fit_type = 'unconstrained'
949
- if center_exprs is not None:
1063
+ if parameter.value is not None:
950
1064
  logger.warning(
951
- 'Ignoring center_exprs input for unconstrained fit')
952
- center_exprs = None
953
- else:
954
- raise ValueError(
955
- f'Invalid parameter fit_type ({fit_type})')
956
- self._fit_type = fit_type
957
- self._fwhm_min = fwhm_min
958
- self._fwhm_max = fwhm_max
959
- self._sigma_min = None
960
- self._sigma_max = None
961
- if param_constraint:
962
- self._param_constraint = True
963
- min_value = FLOAT_MIN
964
- if self._fwhm_min is not None:
965
- self._sigma_min = np.zeros(num_peaks)
966
- if self._fwhm_max is not None:
967
- self._sigma_max = np.zeros(num_peaks)
968
- else:
969
- min_value = None
970
-
971
- # Reset the fit
972
- self._result = None
973
- self._parameter_norms = {}
974
- self._linear_parameters = []
975
- self._nonlinear_parameters = []
976
- if hasattr(self, "_best_parameters"):
977
- self._best_parameters = None
978
-
979
- # Add background model(s)
980
- if background is not None:
981
- if isinstance(background, str):
982
- background = [{'model': background}]
983
- elif isinstance(background, dict):
984
- background = [background]
985
- elif is_str_series(background):
986
- background = [{'model': model}
987
- for model in deepcopy(background)]
988
- if is_dict_series(background):
989
- num_background = len(background)
990
- for model in deepcopy(background):
991
- if 'model' not in model:
992
- raise KeyError(
993
- 'Missing keyword "model" in model in background '
994
- f'({model})')
995
- name = model.pop('model')
996
- if num_background == 1:
997
- prefix = f'bkgd_'
998
- else:
999
- prefix = f'bkgd_{name}_'
1000
- parameters = model.pop('parameters', None)
1001
- if parameters is not None:
1002
- if isinstance(parameters, dict):
1003
- parameters = [parameters, ]
1004
- elif is_dict_series(parameters):
1005
- parameters = list(parameters)
1006
- else:
1007
- raise ValueError('Invalid parameters value in '
1008
- f'background model {name} ({parameters})')
1009
- if min_value is not None and name == 'exponential':
1010
- if parameters is None:
1011
- parameters = (
1012
- {'name': 'amplitude', 'min': min_value},
1013
- {'name': 'decay', 'min': min_value},
1014
- )
1015
- else:
1016
- for par_name in ('amplitude', 'decay'):
1017
- index = [i for i, par in enumerate(parameters)
1018
- if par['name'] == par_name]
1019
- if not len(index):
1020
- parameters.append(
1021
- {'name': par_name, 'min': min_value})
1022
- elif len(index) == 1:
1023
- parameter = parameters[index[0]]
1024
- _min = parameter.get('min', None)
1025
- if _min is None or _min < min_value:
1026
- parameter['min'] = min_value
1027
- else:
1028
- raise ValueError(
1029
- 'Invalid parameters value in '
1030
- f'background model {name} '
1031
- f'({parameters})')
1032
- if min_value is not None and name == 'gaussian':
1033
- if parameters is None:
1034
- parameters = (
1035
- {'name': 'amplitude', 'min': min_value},
1036
- {'name': 'center', 'min': min_value},
1037
- {'name': 'sigma', 'min': min_value},
1038
- )
1039
- else:
1040
- for par_name in ('amplitude', 'center', 'sigma'):
1041
- index = [i for i, par in enumerate(parameters)
1042
- if par['name'] == par_name]
1043
- if not len(index):
1044
- parameters.append(
1045
- {'name': par_name, 'min': min_value})
1046
- elif len(index) == 1:
1047
- parameter = parameters[index[0]]
1048
- _min = parameter.get('min', None)
1049
- if _min is None or _min < min_value:
1050
- parameter['min'] = min_value
1051
- else:
1052
- raise ValueError(
1053
- 'Invalid parameters value in '
1054
- f'background model {name} '
1055
- f'({parameters})')
1056
- self.add_model(
1057
- name, prefix=prefix, parameters=parameters,
1058
- **model)
1059
- else:
1060
- raise ValueError(
1061
- f'Invalid parameter background ({background})')
1062
-
1063
- # Add peaks and set initial fit parameters
1064
- ast = Interpreter()
1065
- if num_peaks == 1:
1066
- sig_min = None
1067
- if self._sigma_min is not None:
1068
- ast(f'fwhm = {self._fwhm_min}')
1069
- sig_min = ast(fwhm_factor[peak_models[0]])
1070
- self._sigma_min[0] = sig_min
1071
- sig_max = None
1072
- if self._sigma_max is not None:
1073
- ast(f'fwhm = {self._fwhm_max}')
1074
- sig_max = ast(fwhm_factor[peak_models[0]])
1075
- self._sigma_max[0] = sig_max
1076
- self.add_model(
1077
- peak_models[0],
1078
- parameters=(
1079
- {'name': 'amplitude', 'min': min_value},
1080
- {'name': 'center', 'value': centers[0],
1081
- 'min': centers_range[0], 'max': centers_range[1]},
1082
- {'name': 'sigma', 'min': sig_min, 'max': sig_max},
1083
- ))
1084
- else:
1085
- if fit_type == 'uniform':
1086
- self.add_parameter(
1087
- name='scale_factor', value=1.0, min=min_value)
1088
- for i in range(num_peaks):
1089
- sig_min = None
1090
- if self._sigma_min is not None:
1091
- ast(f'fwhm = {self._fwhm_min}')
1092
- sig_min = ast(fwhm_factor[peak_models[i]])
1093
- self._sigma_min[i] = sig_min
1094
- sig_max = None
1095
- if self._sigma_max is not None:
1096
- ast(f'fwhm = {self._fwhm_max}')
1097
- sig_max = ast(fwhm_factor[peak_models[i]])
1098
- self._sigma_max[i] = sig_max
1099
- if fit_type == 'uniform':
1100
- self.add_model(
1101
- peak_models[i], prefix=f'peak{i+1}_',
1102
- parameters=(
1103
- {'name': 'amplitude', 'min': min_value},
1104
- {'name': 'center', 'expr': center_exprs[i]},
1105
- {'name': 'sigma', 'min': sig_min, 'max': sig_max},
1106
- ))
1107
- else:
1108
- self.add_model(
1109
- 'gaussian',
1110
- prefix=f'peak{i+1}_',
1111
- parameters=(
1112
- {'name': 'amplitude', 'min': min_value},
1113
- {'name': 'center', 'value': centers[i],
1114
- 'min': centers_range[0], 'max': centers_range[1]},
1115
- {'name': 'sigma', 'min': min_value,
1116
- 'max': sig_max},
1117
- ))
1065
+ 'Ignoring input "value" for expression parameter'
1066
+ f'{name} = {parameter.expr}')
1067
+ if not np.isinf(parameter.min):
1068
+ logger.warning(
1069
+ 'Ignoring input "min" for expression parameter'
1070
+ f'{name} = {parameter.expr}')
1071
+ if not np.isinf(parameter.max):
1072
+ logger.warning(
1073
+ 'Ignoring input "max" for expression parameter'
1074
+ f'{name} = {parameter.expr}')
1075
+ self._parameters[name].set(
1076
+ value=None, min=-np.inf, max=np.inf, expr=parameter.expr)
1118
1077
 
1119
1078
  def eval(self, x, result=None):
1120
1079
  """Evaluate the best fit."""
@@ -1124,36 +1083,25 @@ class Fit:
1124
1083
  return None
1125
1084
  return result.eval(x=np.asarray(x))-self.normalization_offset
1126
1085
 
1127
- def fit(self, **kwargs):
1086
+ def fit(self, config=None, **kwargs):
1128
1087
  """Fit the model to the input data."""
1129
- # Third party modules
1130
- from asteval import Interpreter
1131
1088
 
1132
1089
  # Check input parameters
1133
1090
  if self._model is None:
1134
1091
  logger.error('Undefined fit model')
1135
1092
  return None
1136
- if 'interactive' in kwargs:
1137
- interactive = kwargs.pop('interactive')
1138
- if not isinstance(interactive, bool):
1139
- raise ValueError(
1140
- 'Invalid value of keyword argument interactive '
1141
- f'({interactive})')
1142
- else:
1143
- interactive = False
1144
- if 'guess' in kwargs:
1145
- guess = kwargs.pop('guess')
1146
- if not isinstance(guess, bool):
1147
- raise ValueError(
1148
- f'Invalid value of keyword argument guess ({guess})')
1149
- else:
1150
- guess = False
1093
+ self._mask = kwargs.pop('mask', None)
1094
+ guess = kwargs.pop('guess', False)
1095
+ if not isinstance(guess, bool):
1096
+ raise ValueError(
1097
+ f'Invalid value of keyword argument guess ({guess})')
1151
1098
  if self._result is not None:
1152
1099
  if guess:
1153
1100
  logger.warning(
1154
1101
  'Ignoring input parameter guess during refitting')
1155
1102
  guess = False
1156
1103
  if 'try_linear_fit' in kwargs:
1104
+ raise RuntimeError('try_linear_fit needs testing')
1157
1105
  try_linear_fit = kwargs.pop('try_linear_fit')
1158
1106
  if not isinstance(try_linear_fit, bool):
1159
1107
  raise ValueError(
@@ -1164,130 +1112,10 @@ class Fit:
1164
1112
  'Ignore superfluous keyword argument "try_linear_fit" '
1165
1113
  '(not yet supported for callable models)')
1166
1114
  else:
1167
- self._try_linear_fit = try_linear_fit
1168
-
1169
- # Apply mask if supplied:
1170
- if 'mask' in kwargs:
1171
- self._mask = kwargs.pop('mask')
1172
- if self._mask is not None:
1173
- self._mask = np.asarray(self._mask).astype(bool)
1174
- if self._x.size != self._mask.size:
1175
- raise ValueError(
1176
- f'Inconsistent x and mask dimensions ({self._x.size} vs '
1177
- f'{self._mask.size})')
1178
-
1179
- # Estimate initial parameters
1180
- if guess:
1181
- if self._mask is None:
1182
- xx = self._x
1183
- yy = self._y
1184
- else:
1185
- xx = self._x[~self._mask]
1186
- yy = np.asarray(self._y)[~self._mask]
1187
- try:
1188
- # Try with the build-in lmfit guess method
1189
- # (only implemented for a single model)
1190
- self._parameters = self._model.guess(yy, x=xx)
1191
- except:
1192
- ast = Interpreter()
1193
- # Should work for other peak-like models,
1194
- # but will need tests first
1195
- for component in self._model.components:
1196
- if isinstance(component, GaussianModel):
1197
- center = self._parameters[
1198
- f"{component.prefix}center"].value
1199
- height_init, cen_init, fwhm_init = \
1200
- self.guess_init_peak(
1201
- xx, yy, center_guess=center,
1202
- use_max_for_center=False)
1203
- if (self._fwhm_min is not None
1204
- and fwhm_init < self._fwhm_min):
1205
- fwhm_init = self._fwhm_min
1206
- elif (self._fwhm_max is not None
1207
- and fwhm_init > self._fwhm_max):
1208
- fwhm_init = self._fwhm_max
1209
- ast(f'fwhm = {fwhm_init}')
1210
- ast(f'height = {height_init}')
1211
- sig_init = ast(fwhm_factor[component._name])
1212
- amp_init = ast(height_factor[component._name])
1213
- par = self._parameters[
1214
- f"{component.prefix}amplitude"]
1215
- if par.vary:
1216
- par.set(value=amp_init)
1217
- par = self._parameters[
1218
- f"{component.prefix}center"]
1219
- if par.vary:
1220
- par.set(value=cen_init)
1221
- par = self._parameters[
1222
- f"{component.prefix}sigma"]
1223
- if par.vary:
1224
- par.set(value=sig_init)
1225
-
1226
- # Add constant offset for a normalized model
1227
- if self._result is None and self._norm is not None and self._norm[0]:
1228
- self.add_model(
1229
- 'constant', prefix='tmp_normalization_offset_',
1230
- parameters={
1231
- 'name': 'c',
1232
- 'value': -self._norm[0],
1233
- 'vary': False,
1234
- 'norm': True,
1235
- })
1236
- # 'value': -self._norm[0]/self._norm[1],
1237
- # 'vary': False,
1238
- # 'norm': False,
1239
-
1240
- # Adjust existing parameters for refit:
1241
- if 'parameters' in kwargs:
1242
- parameters = kwargs.pop('parameters')
1243
- if isinstance(parameters, dict):
1244
- parameters = (parameters, )
1245
- elif not is_dict_series(parameters):
1246
- raise ValueError(
1247
- 'Invalid value of keyword argument parameters '
1248
- f'({parameters})')
1249
- for par in parameters:
1250
- name = par['name']
1251
- if name not in self._parameters:
1252
- raise ValueError(
1253
- f'Unable to match {name} parameter {par} to an '
1254
- 'existing one')
1255
- if self._parameters[name].expr is not None:
1256
- raise ValueError(
1257
- f'Unable to modify {name} parameter {par} '
1258
- '(currently an expression)')
1259
- if par.get('expr') is not None:
1260
- raise KeyError(
1261
- f'Invalid "expr" key in {name} parameter {par}')
1262
- self._parameters[name].set(vary=par.get('vary'))
1263
- self._parameters[name].set(min=par.get('min'))
1264
- self._parameters[name].set(max=par.get('max'))
1265
- self._parameters[name].set(value=par.get('value'))
1266
-
1267
- # Apply parameter updates through keyword arguments
1268
- for name in set(self._parameters) & set(kwargs):
1269
- value = kwargs.pop(name)
1270
- if self._parameters[name].expr is None:
1271
- self._parameters[name].set(value=value)
1272
- else:
1273
- logger.warning(
1274
- f'Ignoring parameter {name} (set by expression: '
1275
- f'{self._parameters[name].expr})')
1115
+ self._try_linear_fit = try_linear_fit
1276
1116
 
1277
- # Check for uninitialized parameters
1278
- for name, par in self._parameters.items():
1279
- if par.expr is None:
1280
- value = par.value
1281
- if value is None or np.isinf(value) or np.isnan(value):
1282
- if interactive:
1283
- value = input_num(
1284
- f'Enter an initial value for {name}', default=1.0)
1285
- else:
1286
- value = 1.0
1287
- if self._norm is None or name not in self._parameter_norms:
1288
- self._parameters[name].set(value=value)
1289
- elif self._parameter_norms[name]:
1290
- self._parameters[name].set(value=value*self._norm[1])
1117
+ # Setup the fit
1118
+ self._setup_fit(config, guess)
1291
1119
 
1292
1120
  # Check if model is linear
1293
1121
  try:
@@ -1301,6 +1129,7 @@ class Fit:
1301
1129
  self._normalize()
1302
1130
 
1303
1131
  if linear_model:
1132
+ raise RuntimeError('linear solver needs testing')
1304
1133
  # Perform a linear fit by direct matrix solution with numpy
1305
1134
  try:
1306
1135
  if self._mask is None:
@@ -1312,30 +1141,8 @@ class Fit:
1312
1141
  except:
1313
1142
  linear_model = False
1314
1143
  if not linear_model:
1315
- # Perform a non-linear fit with lmfit
1316
- # Prevent initial values from sitting at boundaries
1317
- self._parameter_bounds = {
1318
- name:{'min': par.min, 'max': par.max}
1319
- for name, par in self._parameters.items() if par.vary}
1320
- self._reset_par_at_boundary()
1321
-
1322
- # Perform the fit
1323
- fit_kws = None
1324
- # if 'Dfun' in kwargs:
1325
- # fit_kws = {'Dfun': kwargs.pop('Dfun')}
1326
- # self._result = self._model.fit(
1327
- # self._y_norm, self._parameters, x=self._x, fit_kws=fit_kws,
1328
- # **kwargs)
1329
- if self._param_constraint:
1330
- fit_kws = {'xtol': 1.e-5, 'ftol': 1.e-5, 'gtol': 1.e-5}
1331
- if self._mask is None:
1332
- self._result = self._model.fit(
1333
- self._y_norm, self._parameters, x=self._x, fit_kws=fit_kws,
1334
- **kwargs)
1335
- else:
1336
- self._result = self._model.fit(
1337
- np.asarray(self._y_norm)[~self._mask], self._parameters,
1338
- x=self._x[~self._mask], fit_kws=fit_kws, **kwargs)
1144
+ self._result = self._fit_nonlinear_model(
1145
+ self._x, self._y_norm, **kwargs)
1339
1146
 
1340
1147
  # Set internal parameter values to fit results upon success
1341
1148
  if self.success:
@@ -1528,11 +1335,248 @@ class Fit:
1528
1335
 
1529
1336
  return height, center, fwhm
1530
1337
 
1338
+ def _create_prefixes(self, models):
1339
+ # Check for duplicate model names and create prefixes
1340
+ names = []
1341
+ prefixes = []
1342
+ for model in models:
1343
+ names.append(f'{model.prefix}{model.model}')
1344
+ prefixes.append(model.prefix)
1345
+ counts = Counter(names)
1346
+ for model, count in counts.items():
1347
+ if count > 1:
1348
+ n = 0
1349
+ for i, name in enumerate(names):
1350
+ if name == model:
1351
+ n += 1
1352
+ prefixes[i] = f'{name}{n}_'
1353
+
1354
+ return prefixes
1355
+
1356
+ def _setup_fit_model(self, parameters, models):
1357
+ """Setup the fit model."""
1358
+ # Check for duplicate model names and create prefixes
1359
+ prefixes = self._create_prefixes(models)
1360
+
1361
+ # Add the free fit parameters
1362
+ for par in parameters:
1363
+ self.add_parameter(par.dict())
1364
+
1365
+ # Add the model functions
1366
+ for prefix, model in zip(prefixes, models):
1367
+ self.add_model(model, prefix)
1368
+
1369
+ # Check linearity of free fit parameters:
1370
+ known_parameters = (
1371
+ self._linear_parameters + self._nonlinear_parameters)
1372
+ for name in reversed(self._parameters):
1373
+ if name not in known_parameters:
1374
+ for nname, par in self._parameters.items():
1375
+ if par.expr is not None:
1376
+ # Third party modules
1377
+ from sympy import diff
1378
+
1379
+ if nname in self._nonlinear_parameters:
1380
+ self._nonlinear_parameters.insert(0, name)
1381
+ elif diff(par.expr, name, name):
1382
+ self._nonlinear_parameters.insert(0, name)
1383
+ else:
1384
+ self._linear_parameters.insert(0, name)
1385
+
1386
+ def _setup_fit(self, config, guess=False):
1387
+ """Setup the fit."""
1388
+ # Apply mask if supplied:
1389
+ if self._mask is not None:
1390
+ raise RuntimeError('mask needs testing')
1391
+ self._mask = np.asarray(self._mask).astype(bool)
1392
+ if self._x.size != self._mask.size:
1393
+ raise ValueError(
1394
+ f'Inconsistent x and mask dimensions ({self._x.size} vs '
1395
+ f'{self._mask.size})')
1396
+
1397
+ # Estimate initial parameters
1398
+ if guess and not isinstance(self, FitMap):
1399
+ raise RuntimeError('Estimate initial parameters needs testing')
1400
+ if self._mask is None:
1401
+ xx = self._x
1402
+ yy = self._y
1403
+ else:
1404
+ xx = self._x[~self._mask]
1405
+ yy = np.asarray(self._y)[~self._mask]
1406
+ try:
1407
+ # Try with the build-in lmfit guess method
1408
+ # (only implemented for a single model)
1409
+ self._parameters = self._model.guess(yy, x=xx)
1410
+ except:
1411
+ # Third party modules
1412
+ from asteval import Interpreter
1413
+ from lmfit.models import GaussianModel
1414
+
1415
+ ast = Interpreter()
1416
+ # Should work for other peak-like models,
1417
+ # but will need tests first
1418
+ for component in self._model.components:
1419
+ if isinstance(component, GaussianModel):
1420
+ center = self._parameters[
1421
+ f"{component.prefix}center"].value
1422
+ height_init, cen_init, fwhm_init = \
1423
+ self.guess_init_peak(
1424
+ xx, yy, center_guess=center,
1425
+ use_max_for_center=False)
1426
+ if (self._fwhm_min is not None
1427
+ and fwhm_init < self._fwhm_min):
1428
+ fwhm_init = self._fwhm_min
1429
+ elif (self._fwhm_max is not None
1430
+ and fwhm_init > self._fwhm_max):
1431
+ fwhm_init = self._fwhm_max
1432
+ ast(f'fwhm = {fwhm_init}')
1433
+ ast(f'height = {height_init}')
1434
+ sig_init = ast(fwhm_factor[component._name])
1435
+ amp_init = ast(height_factor[component._name])
1436
+ par = self._parameters[
1437
+ f"{component.prefix}amplitude"]
1438
+ if par.vary:
1439
+ par.set(value=amp_init)
1440
+ par = self._parameters[
1441
+ f"{component.prefix}center"]
1442
+ if par.vary:
1443
+ par.set(value=cen_init)
1444
+ par = self._parameters[
1445
+ f"{component.prefix}sigma"]
1446
+ if par.vary:
1447
+ par.set(value=sig_init)
1448
+
1449
+ # Add constant offset for a normalized model
1450
+ if self._result is None and self._norm is not None and self._norm[0]:
1451
+ from CHAP.utils.models import Constant
1452
+ model = Constant(
1453
+ model='constant',
1454
+ parameters=[{
1455
+ 'name': 'c',
1456
+ 'value': -self._norm[0],
1457
+ 'vary': False,
1458
+ }])
1459
+ self.add_model(model, 'tmp_normalization_offset_')
1460
+
1461
+ # Adjust existing parameters for refit:
1462
+ if config is not None:
1463
+ # Local modules
1464
+ from CHAP.utils.models import (
1465
+ FitConfig,
1466
+ Multipeak,
1467
+ )
1468
+
1469
+ # Expand multipeak model if present
1470
+ scale_factor = None
1471
+ for i, model in enumerate(deepcopy(config.models)):
1472
+ found_multipeak = False
1473
+ if isinstance(model, Multipeak):
1474
+ if found_multipeak:
1475
+ raise ValueError(
1476
+ f'Invalid parameter models ({config.models}) '
1477
+ '(multiple instances of multipeak not allowed)')
1478
+ if (model.fit_type == 'uniform'
1479
+ and 'scale_factor' not in self._free_parameters):
1480
+ raise ValueError(
1481
+ f'Invalid parameter models ({config.models}) '
1482
+ '(uniform multipeak fit after unconstrained fit)')
1483
+ parameters, models = FitProcessor.create_multipeak_model(
1484
+ model)
1485
+ if (model.fit_type == 'unconstrained'
1486
+ and 'scale_factor' in self._free_parameters):
1487
+ # Third party modules
1488
+ from asteval import Interpreter
1489
+
1490
+ scale_factor = self._parameters['scale_factor'].value
1491
+ self._parameters.pop('scale_factor')
1492
+ self._free_parameters.remove('scale_factor')
1493
+ ast = Interpreter()
1494
+ ast(f'scale_factor = {scale_factor}')
1495
+ if parameters:
1496
+ config.parameters += parameters
1497
+ config.models += models
1498
+ config.models.remove(model)
1499
+ found_multipeak = True
1500
+
1501
+ # Check for duplicate model names and create prefixes
1502
+ prefixes = self._create_prefixes(config.models)
1503
+ if not isinstance(config, FitConfig):
1504
+ raise ValueError(f'Invalid parameter config ({config})')
1505
+ parameters = config.parameters
1506
+ for prefix, model in zip(prefixes, config.models):
1507
+ for par in model.parameters:
1508
+ par.name = f'{prefix}{par.name}'
1509
+ parameters += model.parameters
1510
+
1511
+ # Adjust parameters for refit as needed
1512
+ if isinstance(self, FitMap):
1513
+ scale_factor_index = \
1514
+ self._best_parameters.index('scale_factor')
1515
+ self._best_parameters.pop(scale_factor_index)
1516
+ self._best_values = np.delete(
1517
+ self._best_values, scale_factor_index, 0)
1518
+ self._best_errors = np.delete(
1519
+ self._best_errors, scale_factor_index, 0)
1520
+ for par in parameters:
1521
+ name = par.name
1522
+ if name not in self._parameters:
1523
+ raise ValueError(
1524
+ f'Unable to match {name} parameter {par} to an '
1525
+ 'existing one')
1526
+ ppar = self._parameters[name]
1527
+ if ppar.expr is not None:
1528
+ if (scale_factor is not None and 'center' in name
1529
+ and 'scale_factor' in ppar.expr):
1530
+ ppar.set(value=ast(ppar.expr), expr='')
1531
+ value = ppar.value
1532
+ else:
1533
+ raise ValueError(
1534
+ f'Unable to modify {name} parameter {par} '
1535
+ '(currently an expression)')
1536
+ else:
1537
+ value = par.value
1538
+ if par.expr is not None:
1539
+ raise KeyError(
1540
+ f'Invalid "expr" key in {name} parameter {par}')
1541
+ ppar.set(
1542
+ value=value, min=par.min, max=par.max, vary=par.vary)
1543
+
1544
+ # Set parameters configuration
1545
+ if self._code == 'scipy':
1546
+ self._res_par_exprs = []
1547
+ self._res_par_indices = []
1548
+ self._res_par_names = []
1549
+ self._res_par_values = []
1550
+ for i, (name, par) in enumerate(self._parameters.items()):
1551
+ self._res_par_values.append(par.value)
1552
+ if par.expr:
1553
+ self._res_par_exprs.append(
1554
+ {'expr': par.expr, 'index': i})
1555
+ else:
1556
+ if par.vary:
1557
+ self._res_par_indices.append(i)
1558
+ self._res_par_names.append(name)
1559
+
1560
+ # Check for uninitialized parameters
1561
+ for name, par in self._parameters.items():
1562
+ if par.expr is None:
1563
+ value = par.value
1564
+ if value is None or np.isinf(value) or np.isnan(value):
1565
+ if (self._norm is None
1566
+ or name in self._nonlinear_parameters):
1567
+ self._parameters[name].set(value=1.0)
1568
+ else:
1569
+ self._parameters[name].set(value=self._norm[1])
1570
+
1531
1571
  def _check_linearity_model(self):
1532
1572
  """
1533
1573
  Identify the linearity of all model parameters and check if
1534
1574
  the model is linear or not.
1535
1575
  """
1576
+ # Third party modules
1577
+ from lmfit.models import ExpressionModel
1578
+ from sympy import diff
1579
+
1536
1580
  if not self._try_linear_fit:
1537
1581
  logger.info(
1538
1582
  'Skip linearity check (not yet supported for callable models)')
@@ -1580,6 +1624,18 @@ class Fit:
1580
1624
  """
1581
1625
  # Third party modules
1582
1626
  from asteval import Interpreter
1627
+ from lmfit.model import ModelResult
1628
+ from lmfit.models import (
1629
+ ConstantModel,
1630
+ LinearModel,
1631
+ QuadraticModel,
1632
+ ExpressionModel,
1633
+ )
1634
+ # Third party modules
1635
+ from sympy import (
1636
+ diff,
1637
+ simplify,
1638
+ )
1583
1639
 
1584
1640
  # Construct the matrix and the free parameter vector
1585
1641
  free_parameters = \
@@ -1645,8 +1701,6 @@ class Fit:
1645
1701
  raise ValueError(
1646
1702
  f'Unable to evaluate {dexpr_dname}')
1647
1703
  mat_a[:,free_parameters.index(name)] += y_expr
1648
- # RV find another solution if expr not supported by
1649
- # simplify
1650
1704
  const_expr = str(simplify(f'({const_expr})/{norm}'))
1651
1705
  delta_y_const = [(lambda _: ast.eval(const_expr))
1652
1706
  (ast(f'x = {v}')) for v in x]
@@ -1743,7 +1797,9 @@ class Fit:
1743
1797
  par = self._parameters[name]
1744
1798
  if par.expr is None and norm:
1745
1799
  self._parameters[name].set(value=par.value*self._norm[1])
1746
- self._result = ModelResult(self._model, deepcopy(self._parameters))
1800
+ #RV FIX
1801
+ self._result = ModelResult(
1802
+ self._model, deepcopy(self._parameters), 'linear')
1747
1803
  self._result.best_fit = self._model.eval(params=self._parameters, x=x)
1748
1804
  if (self._normalized
1749
1805
  and (have_expression_model or expr_parameters)):
@@ -1760,10 +1816,103 @@ class Fit:
1760
1816
  value = par.value/self._norm[1]
1761
1817
  self._parameters[name].set(value=value)
1762
1818
  self._result.params[name].set(value=value)
1763
- self._result.residual = self._result.best_fit-y
1819
+ self._result.residual = y-self._result.best_fit
1764
1820
  self._result.components = self._model.components
1765
1821
  self._result.init_params = None
1766
1822
 
1823
+ def _fit_nonlinear_model(self, x, y, **kwargs):
1824
+ """
1825
+ Perform a nonlinear fit with spipy or lmfit
1826
+ """
1827
+ # Check bounds and prevent initial values at boundaries
1828
+ have_bounds = False
1829
+ self._parameter_bounds = {}
1830
+ for name, par in self._parameters.items():
1831
+ if par.vary:
1832
+ self._parameter_bounds[name] = {
1833
+ 'min': par.min, 'max': par.max}
1834
+ if not have_bounds and (
1835
+ not np.isinf(par.min) or not np.isinf(par.max)):
1836
+ have_bounds = True
1837
+ if have_bounds:
1838
+ self._reset_par_at_boundary()
1839
+
1840
+ # Perform the fit
1841
+ if self._mask is not None:
1842
+ x = x[~self._mask]
1843
+ y = np.asarray(y)[~self._mask]
1844
+ if self._code == 'scipy':
1845
+ # Third party modules
1846
+ from asteval import Interpreter
1847
+ from scipy.optimize import (
1848
+ leastsq,
1849
+ least_squares,
1850
+ )
1851
+
1852
+ assert self._mask is None
1853
+ self._ast = Interpreter()
1854
+ self._ast.basesymtable = {
1855
+ k:v for k, v in self._ast.symtable.items()}
1856
+ pars_init = []
1857
+ for i, (name, par) in enumerate(self._parameters.items()):
1858
+ setattr(par, '_init_value', par.value)
1859
+ self._res_par_values[i] = par.value
1860
+ if par.expr is None:
1861
+ self._ast.symtable[name] = par.value
1862
+ if par.vary:
1863
+ pars_init.append(par.value)
1864
+ if have_bounds:
1865
+ bounds = (
1866
+ [v['min'] for v in self._parameter_bounds.values()],
1867
+ [v['max'] for v in self._parameter_bounds.values()])
1868
+ if self._method in ('lm', 'leastsq'):
1869
+ self._method = 'trf'
1870
+ logger.warning(
1871
+ f'Fit method changed to {self._method} for fit with '
1872
+ 'bounds')
1873
+ else:
1874
+ bounds = (-np.inf, np.inf)
1875
+ init_params = deepcopy(self._parameters)
1876
+ # t0 = time()
1877
+ lskws = {
1878
+ 'ftol': 1.49012e-08,
1879
+ 'xtol': 1.49012e-08,
1880
+ 'gtol': 10*FLOAT_EPS,
1881
+ }
1882
+ if self._method == 'leastsq':
1883
+ lskws['maxfev'] = 64000
1884
+ result = leastsq(
1885
+ self._residual, pars_init, args=(x, y), full_output=True,
1886
+ **lskws)
1887
+ else:
1888
+ lskws['max_nfev'] = 64000
1889
+ result = least_squares(
1890
+ self._residual, pars_init, bounds=bounds,
1891
+ method=self._method, args=(x, y), **lskws)
1892
+ # t1 = time()
1893
+ # print(f'\n\nFitting took {1000*(t1-t0):.3f} ms\n\n')
1894
+ model_result = ModelResult(
1895
+ self._model, self._parameters, x, y, self._method, self._ast,
1896
+ self._res_par_exprs, self._res_par_indices,
1897
+ self._res_par_names, result)
1898
+ model_result.init_params = init_params
1899
+ model_result.init_values = {}
1900
+ for name, par in init_params.items():
1901
+ model_result.init_values[name] = par.value
1902
+ model_result.max_nfev = lskws.get('maxfev')
1903
+ else:
1904
+ fit_kws = {}
1905
+ # if 'Dfun' in kwargs:
1906
+ # fit_kws['Dfun'] = kwargs.pop('Dfun')
1907
+ # t0 = time()
1908
+ model_result = self._model.fit(
1909
+ y, self._parameters, x=x, method=self._method, fit_kws=fit_kws,
1910
+ **kwargs)
1911
+ # t1 = time()
1912
+ # print(f'\n\nFitting took {1000*(t1-t0):.3f} ms\n\n')
1913
+
1914
+ return model_result
1915
+
1767
1916
  def _normalize(self):
1768
1917
  """Normalize the data and initial parameters."""
1769
1918
  if self._normalized:
@@ -1776,9 +1925,9 @@ class Fit:
1776
1925
  self._y_norm = \
1777
1926
  (np.asarray(self._y)-self._norm[0]) / self._norm[1]
1778
1927
  self._y_range = 1.0
1779
- for name, norm in self._parameter_norms.items():
1928
+ for name in self._linear_parameters:
1780
1929
  par = self._parameters[name]
1781
- if par.expr is None and norm:
1930
+ if par.expr is None:
1782
1931
  value = par.value/self._norm[1]
1783
1932
  _min = par.min
1784
1933
  _max = par.max
@@ -1794,9 +1943,9 @@ class Fit:
1794
1943
  if self._norm is None or not self._normalized:
1795
1944
  return
1796
1945
  self._normalized = False
1797
- for name, norm in self._parameter_norms.items():
1946
+ for name in self._linear_parameters:
1798
1947
  par = self._parameters[name]
1799
- if par.expr is None and norm:
1948
+ if par.expr is None:
1800
1949
  value = par.value*self._norm[1]
1801
1950
  _min = par.min
1802
1951
  _max = par.max
@@ -1810,15 +1959,22 @@ class Fit:
1810
1959
  self._result.best_fit = (
1811
1960
  self._result.best_fit*self._norm[1] + self._norm[0])
1812
1961
  for name, par in self._result.params.items():
1813
- if self._parameter_norms.get(name, False):
1962
+ if name in self._linear_parameters:
1814
1963
  if par.stderr is not None:
1815
- par.stderr *= self._norm[1]
1964
+ if self._code == 'scipy':
1965
+ setattr(par, '_stderr', par.stderr*self._norm[1])
1966
+ else:
1967
+ par.stderr *= self._norm[1]
1816
1968
  if par.expr is None:
1817
1969
  _min = par.min
1818
1970
  _max = par.max
1819
1971
  value = par.value*self._norm[1]
1820
1972
  if par.init_value is not None:
1821
- par.init_value *= self._norm[1]
1973
+ if self._code == 'scipy':
1974
+ setattr(par, '_init_value',
1975
+ par.init_value*self._norm[1])
1976
+ else:
1977
+ par.init_value *= self._norm[1]
1822
1978
  if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
1823
1979
  _min *= self._norm[1]
1824
1980
  if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
@@ -1830,14 +1986,15 @@ class Fit:
1830
1986
  if hasattr(self._result, 'init_values'):
1831
1987
  init_values = {}
1832
1988
  for name, value in self._result.init_values.items():
1833
- if (name not in self._parameter_norms
1834
- or self._parameters[name].expr is not None):
1835
- init_values[name] = value
1836
- elif self._parameter_norms[name]:
1989
+ if name in self._linear_parameters:
1837
1990
  init_values[name] = value*self._norm[1]
1991
+ else:
1992
+ init_values[name] = value
1838
1993
  self._result.init_values = init_values
1994
+ if (hasattr(self._result, 'init_params')
1995
+ and self._result.init_params is not None):
1839
1996
  for name, par in self._result.init_params.items():
1840
- if par.expr is None and self._parameter_norms.get(name, False):
1997
+ if par.expr is None and name in self._linear_parameters:
1841
1998
  value = par.value
1842
1999
  _min = par.min
1843
2000
  _max = par.max
@@ -1847,18 +2004,24 @@ class Fit:
1847
2004
  if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
1848
2005
  _max *= self._norm[1]
1849
2006
  par.set(value=value, min=_min, max=_max)
1850
- par.init_value = par.value
2007
+ if self._code == 'scipy':
2008
+ setattr(par, '_init_value', par.value)
2009
+ else:
2010
+ par.init_value = par.value
1851
2011
  # Don't renormalize chisqr, it has no useful meaning in
1852
2012
  # physical units
1853
2013
  # self._result.chisqr *= self._norm[1]*self._norm[1]
1854
2014
  if self._result.covar is not None:
2015
+ norm_sq = self._norm[1]*self._norm[1]
1855
2016
  for i, name in enumerate(self._result.var_names):
1856
- if self._parameter_norms.get(name, False):
2017
+ if name in self._linear_parameters:
1857
2018
  for j in range(len(self._result.var_names)):
1858
2019
  if self._result.covar[i,j] is not None:
1859
- self._result.covar[i,j] *= self._norm[1]
2020
+ #self._result.covar[i,j] *= self._norm[1]
2021
+ self._result.covar[i,j] *= norm_sq
1860
2022
  if self._result.covar[j,i] is not None:
1861
- self._result.covar[j,i] *= self._norm[1]
2023
+ #self._result.covar[j,i] *= self._norm[1]
2024
+ self._result.covar[j,i] *= norm_sq
1862
2025
  # Don't renormalize redchi, it has no useful meaning in
1863
2026
  # physical units
1864
2027
  # self._result.redchi *= self._norm[1]*self._norm[1]
@@ -1866,6 +2029,7 @@ class Fit:
1866
2029
  self._result.residual *= self._norm[1]
1867
2030
 
1868
2031
  def _reset_par_at_boundary(self):
2032
+ fraction = 0.02
1869
2033
  for name, par in self._parameters.items():
1870
2034
  if par.vary:
1871
2035
  value = par.value
@@ -1873,42 +2037,58 @@ class Fit:
1873
2037
  _max = self._parameter_bounds[name]['max']
1874
2038
  if np.isinf(_min):
1875
2039
  if not np.isinf(_max):
1876
- if self._parameter_norms.get(name, False):
1877
- upp = _max-0.1*self._y_range
2040
+ if name in self._linear_parameters:
2041
+ upp = _max - fraction*self._y_range
1878
2042
  elif _max == 0.0:
1879
- upp = _max-0.1
2043
+ upp = _max - fraction
1880
2044
  else:
1881
- upp = _max-0.1*abs(_max)
2045
+ upp = _max - fraction*abs(_max)
1882
2046
  if value >= upp:
1883
2047
  par.set(value=upp)
1884
2048
  else:
1885
2049
  if np.isinf(_max):
1886
- if self._parameter_norms.get(name, False):
1887
- low = _min + 0.1*self._y_range
2050
+ if name in self._linear_parameters:
2051
+ low = _min + fraction*self._y_range
1888
2052
  elif _min == 0.0:
1889
- low = _min+0.1
2053
+ low = _min + fraction
1890
2054
  else:
1891
- low = _min + 0.1*abs(_min)
2055
+ low = _min + fraction*abs(_min)
1892
2056
  if value <= low:
1893
2057
  par.set(value=low)
1894
2058
  else:
1895
- low = 0.9*_min + 0.1*_max
1896
- upp = 0.1*_min + 0.9*_max
2059
+ low = (1.0-fraction)*_min + fraction*_max
2060
+ upp = fraction*_min + (1.0-fraction)*_max
1897
2061
  if value <= low:
1898
2062
  par.set(value=low)
1899
2063
  if value >= upp:
1900
2064
  par.set(value=upp)
1901
2065
 
2066
+ def _residual(self, pars, x, y):
2067
+ res = np.zeros((x.size))
2068
+ n_par = len(self._free_parameters)
2069
+ for par, index in zip(pars, self._res_par_indices):
2070
+ self._res_par_values[index] = par
2071
+ if self._res_par_exprs:
2072
+ for par, name in zip(pars, self._res_par_names):
2073
+ self._ast.symtable[name] = par
2074
+ for expr in self._res_par_exprs:
2075
+ self._res_par_values[expr['index']] = \
2076
+ self._ast.eval(expr['expr'])
2077
+ for component, num_par in zip(
2078
+ self._model.components, self._res_num_pars):
2079
+ res += component.func(
2080
+ x, *tuple(self._res_par_values[n_par:n_par+num_par]))
2081
+ n_par += num_par
2082
+ return res - y
2083
+
1902
2084
 
1903
2085
  class FitMap(Fit):
1904
2086
  """
1905
- Wrapper to the Fit class to fit dat on a N-dimensional map
2087
+ Wrapper to the Fit class to fit data on a N-dimensional map
1906
2088
  """
1907
- def __init__(
1908
- self, ymap, x=None, models=None, normalize=True, transpose=None,
1909
- **kwargs):
2089
+ def __init__(self, nxdata, config):
1910
2090
  """Initialize FitMap."""
1911
- super().__init__(None)
2091
+ super().__init__(None, config)
1912
2092
  self._best_errors = None
1913
2093
  self._best_fit = None
1914
2094
  self._best_parameters = None
@@ -1917,124 +2097,64 @@ class FitMap(Fit):
1917
2097
  self._max_nfev = None
1918
2098
  self._memfolder = None
1919
2099
  self._new_parameters = None
2100
+ self._num_func_eval = None
1920
2101
  self._out_of_bounds = None
1921
2102
  self._plot = False
1922
2103
  self._print_report = False
1923
2104
  self._redchi = None
1924
2105
  self._redchi_cutoff = 0.1
2106
+ self._rel_height_cutoff = None
1925
2107
  self._skip_init = True
1926
2108
  self._success = None
1927
- self._transpose = None
1928
2109
  self._try_no_bounds = True
1929
2110
 
1930
2111
  # At this point the fastest index should always be the signal
1931
2112
  # dimension so that the slowest ndim-1 dimensions are the
1932
2113
  # map dimensions
1933
- if isinstance(ymap, (tuple, list, np.ndarray)):
1934
- self._x = np.asarray(x)
1935
- elif HAVE_XARRAY and isinstance(ymap, xr.DataArray):
1936
- if x is not None:
1937
- logger.warning('Ignoring superfluous input x ({x})')
1938
- self._x = np.asarray(ymap[ymap.dims[-1]])
1939
- else:
1940
- raise ValueError('Invalid parameter ymap ({ymap})')
1941
- self._ymap = ymap
2114
+ self._x = np.asarray(nxdata[nxdata.attrs['axes'][-1]])
2115
+ self._ymap = np.asarray(nxdata.nxsignal)
1942
2116
 
1943
2117
  # Check input parameters
1944
2118
  if self._x.ndim != 1:
1945
- raise ValueError(f'Invalid dimension for input x {self._x.ndim}')
1946
- if self._ymap.ndim < 2:
1947
- raise ValueError(
1948
- 'Invalid number of dimension of the input dataset '
1949
- f'{self._ymap.ndim}')
2119
+ raise ValueError(f'Invalid x dimension ({self._x.ndim})')
1950
2120
  if self._x.size != self._ymap.shape[-1]:
1951
2121
  raise ValueError(
1952
2122
  f'Inconsistent x and y dimensions ({self._x.size} vs '
1953
2123
  f'{self._ymap.shape[-1]})')
1954
- if not isinstance(normalize, bool):
1955
- logger.warning(
1956
- f'Invalid value for normalize ({normalize}) in Fit.__init__: '
1957
- 'setting normalize to True')
1958
- normalize = True
1959
- if isinstance(transpose, bool) and not transpose:
1960
- transpose = None
1961
- if transpose is not None and self._ymap.ndim < 3:
1962
- logger.warning(
1963
- f'Transpose meaningless for {self._ymap.ndim-1}D data maps: '
1964
- 'ignoring transpose')
1965
- if transpose is not None:
1966
- if (self._ymap.ndim == 3 and isinstance(transpose, bool)
1967
- and transpose):
1968
- self._transpose = (1, 0)
1969
- elif not isinstance(transpose, (tuple, list)):
1970
- logger.warning(
1971
- f'Invalid data type for transpose ({transpose}, '
1972
- f'{type(transpose)}): setting transpose to False')
1973
- elif transpose != self._ymap.ndim-1:
1974
- logger.warning(
1975
- f'Invalid dimension for transpose ({transpose}, must be '
1976
- f'equal to {self._ymap.ndim-1}): '
1977
- 'setting transpose to False')
1978
- elif any(i not in transpose for i in range(len(transpose))):
1979
- logger.warning(
1980
- f'Invalid index in transpose ({transpose}): '
1981
- 'setting transpose to False')
1982
- elif not all(i == transpose[i] for i in range(self._ymap.ndim-1)):
1983
- self._transpose = transpose
1984
- if self._transpose is not None:
1985
- self._inv_transpose = tuple(
1986
- self._transpose.index(i)
1987
- for i in range(len(self._transpose)))
1988
-
1989
- # Flatten the map (transpose if requested)
1990
- # Store the flattened map in self._ymap_norm, whether
1991
- # normalized or not
1992
- if self._transpose is not None:
1993
- self._ymap_norm = np.transpose(
1994
- np.asarray(self._ymap),
1995
- list(self._transpose) + [len(self._transpose)])
1996
- else:
1997
- self._ymap_norm = np.asarray(self._ymap)
1998
- self._map_dim = int(self._ymap_norm.size/self._x.size)
1999
- self._map_shape = self._ymap_norm.shape[:-1]
2124
+
2125
+ # Flatten the map
2126
+ # Store the flattened map in self._ymap_norm
2127
+ self._map_dim = int(self._ymap.size/self._x.size)
2128
+ self._map_shape = self._ymap.shape[:-1]
2000
2129
  self._ymap_norm = np.reshape(
2001
- self._ymap_norm, (self._map_dim, self._x.size))
2130
+ self._ymap, (self._map_dim, self._x.size))
2002
2131
 
2003
2132
  # Check if a mask is provided
2004
- if 'mask' in kwargs:
2005
- self._mask = kwargs.pop('mask')
2006
- if self._mask is None:
2133
+ # if 'mask' in kwargs:
2134
+ # self._mask = kwargs.pop('mask')
2135
+ if True: #self._mask is None:
2007
2136
  ymap_min = float(self._ymap_norm.min())
2008
2137
  ymap_max = float(self._ymap_norm.max())
2009
- else:
2010
- self._mask = np.asarray(self._mask).astype(bool)
2011
- if self._x.size != self._mask.size:
2012
- raise ValueError(
2013
- f'Inconsistent mask dimension ({self._x.size} vs '
2014
- f'{self._mask.size})')
2015
- ymap_masked = np.asarray(self._ymap_norm)[:,~self._mask]
2016
- ymap_min = float(ymap_masked.min())
2017
- ymap_max = float(ymap_masked.max())
2138
+ # else:
2139
+ # self._mask = np.asarray(self._mask).astype(bool)
2140
+ # if self._x.size != self._mask.size:
2141
+ # raise ValueError(
2142
+ # f'Inconsistent mask dimension ({self._x.size} vs '
2143
+ # f'{self._mask.size})')
2144
+ # ymap_masked = np.asarray(self._ymap_norm)[:,~self._mask]
2145
+ # ymap_min = float(ymap_masked.min())
2146
+ # ymap_max = float(ymap_masked.max())
2018
2147
 
2019
2148
  # Normalize the data
2020
2149
  self._y_range = ymap_max-ymap_min
2021
- if normalize and self._y_range > 0.0:
2150
+ if self._y_range > 0.0:
2022
2151
  self._norm = (ymap_min, self._y_range)
2023
2152
  self._ymap_norm = (self._ymap_norm-self._norm[0]) / self._norm[1]
2024
2153
  else:
2025
2154
  self._redchi_cutoff *= self._y_range**2
2026
- if models is not None:
2027
- if callable(models) or isinstance(models, str):
2028
- kwargs = self.add_model(models, **kwargs)
2029
- elif isinstance(models, (tuple, list)):
2030
- for model in models:
2031
- kwargs = self.add_model(model, **kwargs)
2032
- self.fit(**kwargs)
2033
-
2034
- @classmethod
2035
- def fit_map(cls, ymap, models, x=None, normalize=True, **kwargs):
2036
- """Class method for FitMap."""
2037
- return cls(ymap, x=x, models=models, normalize=normalize, **kwargs)
2155
+
2156
+ # Setup fit model
2157
+ self._setup_fit_model(config.parameters, config.models)
2038
2158
 
2039
2159
  @property
2040
2160
  def best_errors(self):
@@ -2046,44 +2166,6 @@ class FitMap(Fit):
2046
2166
  """Return the best fits."""
2047
2167
  return self._best_fit
2048
2168
 
2049
- @property
2050
- def best_results(self):
2051
- """
2052
- Convert the input DataArray to a data set and add the fit
2053
- results.
2054
- """
2055
- if (self.best_values is None or self.best_errors is None
2056
- or self.best_fit is None):
2057
- return None
2058
- if not HAVE_XARRAY:
2059
- logger.warning('Unable to load xarray module')
2060
- return None
2061
- best_values = self.best_values
2062
- best_errors = self.best_errors
2063
- if isinstance(self._ymap, xr.DataArray):
2064
- best_results = self._ymap.to_dataset()
2065
- dims = self._ymap.dims
2066
- fit_name = f'{self._ymap.name}_fit'
2067
- else:
2068
- coords = {
2069
- f'dim{n}_index':([f'dim{n}_index'], range(self._ymap.shape[n]))
2070
- for n in range(self._ymap.ndim-1)}
2071
- coords['x'] = (['x'], self._x)
2072
- dims = list(coords.keys())
2073
- best_results = xr.Dataset(coords=coords)
2074
- best_results['y'] = (dims, self._ymap)
2075
- fit_name = 'y_fit'
2076
- best_results[fit_name] = (dims, self.best_fit)
2077
- if self._mask is not None:
2078
- best_results['mask'] = self._mask
2079
- for n in range(best_values.shape[0]):
2080
- best_results[f'{self._best_parameters[n]}_values'] = \
2081
- (dims[:-1], best_values[n])
2082
- best_results[f'{self._best_parameters[n]}_errors'] = \
2083
- (dims[:-1], best_errors[n])
2084
- best_results.attrs['components'] = self.components
2085
- return best_results
2086
-
2087
2169
  @property
2088
2170
  def best_values(self):
2089
2171
  """Return values of the best fit parameters."""
@@ -2097,6 +2179,9 @@ class FitMap(Fit):
2097
2179
  @property
2098
2180
  def components(self):
2099
2181
  """Return the fit model components info."""
2182
+ # Third party modules
2183
+ from lmfit.models import ExpressionModel
2184
+
2100
2185
  components = {}
2101
2186
  if self._result is None:
2102
2187
  logger.warning(
@@ -2149,7 +2234,8 @@ class FitMap(Fit):
2149
2234
  @property
2150
2235
  def max_nfev(self):
2151
2236
  """
2152
- Return the maximum number of function evaluations for each fit.
2237
+ Return if the maximum number of function evaluations is reached
2238
+ for each fit.
2153
2239
  """
2154
2240
  return self._max_nfev
2155
2241
 
@@ -2158,7 +2244,7 @@ class FitMap(Fit):
2158
2244
  """
2159
2245
  Return the number of function evaluations for each best fit.
2160
2246
  """
2161
- logger.warning('Undefined property num_func_eval')
2247
+ return self._num_func_eval
2162
2248
 
2163
2249
  @property
2164
2250
  def out_of_bounds(self):
@@ -2255,11 +2341,15 @@ class FitMap(Fit):
2255
2341
  self, dims=None, y_title=None, plot_residual=False,
2256
2342
  plot_comp_legends=False, plot_masked_data=True, **kwargs):
2257
2343
  """Plot the best fits."""
2344
+ # Third party modules
2345
+ from lmfit.models import ExpressionModel
2346
+
2258
2347
  if dims is None:
2259
2348
  dims = [0]*len(self._map_shape)
2260
2349
  if (not isinstance(dims, (list, tuple))
2261
2350
  or len(dims) != len(self._map_shape)):
2262
2351
  raise ValueError('Invalid parameter dims ({dims})')
2352
+ dims = tuple(dims)
2263
2353
  if (self._result is None or self.best_fit is None
2264
2354
  or self.best_values is None):
2265
2355
  logger.warning(
@@ -2317,18 +2407,28 @@ class FitMap(Fit):
2317
2407
  quick_plot(
2318
2408
  tuple(plots), legend=legend, title=str(dims), block=True, **kwargs)
2319
2409
 
2320
- def fit(self, **kwargs):
2410
+ def fit(self, config=None, **kwargs):
2321
2411
  """Fit the model to the input data."""
2412
+
2322
2413
  # Check input parameters
2323
2414
  if self._model is None:
2324
2415
  logger.error('Undefined fit model')
2325
- if 'num_proc' in kwargs:
2326
- num_proc = kwargs.pop('num_proc')
2327
- if not is_int(num_proc, ge=1):
2328
- raise ValueError(
2329
- 'Invalid value for keyword argument num_proc ({num_proc})')
2416
+ if config is None:
2417
+ num_proc = kwargs.pop('num_proc', cpu_count())
2418
+ self._rel_height_cutoff = kwargs.pop('rel_height_cutoff')
2419
+ self._try_no_bounds = kwargs.pop('try_no_bounds', False)
2420
+ self._redchi_cutoff = kwargs.pop('redchi_cutoff', 0.1)
2421
+ self._print_report = kwargs.pop('print_report', False)
2422
+ self._plot = kwargs.pop('plot', False)
2423
+ self._skip_init = kwargs.pop('skip_init', True)
2330
2424
  else:
2331
- num_proc = cpu_count()
2425
+ num_proc = config.num_proc
2426
+ self._rel_height_cutoff = config.rel_height_cutoff
2427
+ # self._try_no_bounds = config.try_no_bounds
2428
+ # self._redchi_cutoff = config.redchi_cutoff
2429
+ self._print_report = config.print_report
2430
+ self._plot = config.plot
2431
+ # self._skip_init = config.skip_init
2332
2432
  if num_proc > 1 and not HAVE_JOBLIB:
2333
2433
  logger.warning(
2334
2434
  'Missing joblib in the conda environment, running serially')
@@ -2339,106 +2439,10 @@ class FitMap(Fit):
2339
2439
  'maximum number of processors, num_proc reduced to '
2340
2440
  f'{cpu_count()}')
2341
2441
  num_proc = cpu_count()
2342
- if 'try_no_bounds' in kwargs:
2343
- self._try_no_bounds = kwargs.pop('try_no_bounds')
2344
- if not isinstance(self._try_no_bounds, bool):
2345
- raise ValueError(
2346
- 'Invalid value for keyword argument try_no_bounds '
2347
- f'({self._try_no_bounds})')
2348
- if 'redchi_cutoff' in kwargs:
2349
- self._redchi_cutoff = kwargs.pop('redchi_cutoff')
2350
- if not is_num(self._redchi_cutoff, gt=0):
2351
- raise ValueError(
2352
- 'Invalid value for keyword argument redchi_cutoff'
2353
- f'({self._redchi_cutoff})')
2354
- if 'print_report' in kwargs:
2355
- self._print_report = kwargs.pop('print_report')
2356
- if not isinstance(self._print_report, bool):
2357
- raise ValueError(
2358
- 'Invalid value for keyword argument print_report'
2359
- f'({self._print_report})')
2360
- if 'plot' in kwargs:
2361
- self._plot = kwargs.pop('plot')
2362
- if not isinstance(self._plot, bool):
2363
- raise ValueError(
2364
- 'Invalid value for keyword argument plot'
2365
- f'({self._plot})')
2366
- if 'skip_init' in kwargs:
2367
- self._skip_init = kwargs.pop('skip_init')
2368
- if not isinstance(self._skip_init, bool):
2369
- raise ValueError(
2370
- 'Invalid value for keyword argument skip_init'
2371
- f'({self._skip_init})')
2372
-
2373
- # Apply mask if supplied:
2374
- if 'mask' in kwargs:
2375
- self._mask = kwargs.pop('mask')
2376
- if self._mask is not None:
2377
- self._mask = np.asarray(self._mask).astype(bool)
2378
- if self._x.size != self._mask.size:
2379
- raise ValueError(
2380
- f'Inconsistent x and mask dimensions ({self._x.size} vs '
2381
- f'{self._mask.size})')
2382
-
2383
- # Add constant offset for a normalized single component model
2384
- if self._result is None and self._norm is not None and self._norm[0]:
2385
- self.add_model(
2386
- 'constant',
2387
- prefix='tmp_normalization_offset_',
2388
- parameters={
2389
- 'name': 'c',
2390
- 'value': -self._norm[0],
2391
- 'vary': False,
2392
- 'norm': True,
2393
- })
2394
- # 'value': -self._norm[0]/self._norm[1],
2395
- # 'vary': False,
2396
- # 'norm': False,
2442
+ self._redchi_cutoff *= self._y_range**2
2397
2443
 
2398
- # Adjust existing parameters for refit:
2399
- if 'parameters' in kwargs:
2400
- parameters = kwargs.pop('parameters')
2401
- if isinstance(parameters, dict):
2402
- parameters = (parameters, )
2403
- elif not is_dict_series(parameters):
2404
- raise ValueError(
2405
- 'Invalid value for keyword argument parameters'
2406
- f'({parameters})')
2407
- for par in parameters:
2408
- name = par['name']
2409
- if name not in self._parameters:
2410
- raise ValueError(
2411
- f'Unable to match {name} parameter {par} to an '
2412
- 'existing one')
2413
- if self._parameters[name].expr is not None:
2414
- raise ValueError(
2415
- f'Unable to modify {name} parameter {par} '
2416
- '(currently an expression)')
2417
- value = par.get('value')
2418
- vary = par.get('vary')
2419
- if par.get('expr') is not None:
2420
- raise KeyError(
2421
- f'Invalid "expr" key in {name} parameter {par}')
2422
- self._parameters[name].set(
2423
- value=value, vary=vary, min=par.get('min'),
2424
- max=par.get('max'))
2425
- # Overwrite existing best values for fixed parameters
2426
- # when a value is specified
2427
- if isinstance(value, (int, float)) and vary is False:
2428
- for i, nname in enumerate(self._best_parameters):
2429
- if nname == name:
2430
- self._best_values[i] = value
2431
-
2432
- # Check for uninitialized parameters
2433
- for name, par in self._parameters.items():
2434
- if par.expr is None:
2435
- value = par.value
2436
- if value is None or np.isinf(value) or np.isnan(value):
2437
- value = 1.0
2438
- if self._norm is None or name not in self._parameter_norms:
2439
- self._parameters[name].set(value=value)
2440
- elif self._parameter_norms[name]:
2441
- self._parameters[name].set(value=value*self._norm[1])
2444
+ # Setup the fit
2445
+ self._setup_fit(config)
2442
2446
 
2443
2447
  # Create the best parameter list, consisting of all varying
2444
2448
  # parameters plus the expression parameters in order to
@@ -2466,6 +2470,7 @@ class FitMap(Fit):
2466
2470
  if self._result is not None:
2467
2471
  self._out_of_bounds = None
2468
2472
  self._max_nfev = None
2473
+ self._num_func_eval = None
2469
2474
  self._redchi = None
2470
2475
  self._success = None
2471
2476
  self._best_fit = None
@@ -2473,15 +2478,12 @@ class FitMap(Fit):
2473
2478
  assert self._best_values is not None
2474
2479
  assert self._best_values.shape[0] == num_best_parameters
2475
2480
  assert self._best_values.shape[1:] == self._map_shape
2476
- if self._transpose is not None:
2477
- self._best_values = np.transpose(
2478
- self._best_values, [0]+[i+1 for i in self._transpose])
2479
2481
  self._best_values = [
2480
2482
  np.reshape(self._best_values[i], self._map_dim)
2481
2483
  for i in range(num_best_parameters)]
2482
2484
  if self._norm is not None:
2483
2485
  for i, name in enumerate(self._best_parameters):
2484
- if self._parameter_norms.get(name, False):
2486
+ if name in self._linear_parameters:
2485
2487
  self._best_values[i] /= self._norm[1]
2486
2488
 
2487
2489
  # Normalize the initial parameters
@@ -2508,6 +2510,7 @@ class FitMap(Fit):
2508
2510
  if num_proc == 1:
2509
2511
  self._out_of_bounds_flat = np.zeros(self._map_dim, dtype=bool)
2510
2512
  self._max_nfev_flat = np.zeros(self._map_dim, dtype=bool)
2513
+ self._num_func_eval_flat = np.zeros(self._map_dim, dtype=np.intc)
2511
2514
  self._redchi_flat = np.zeros(self._map_dim, dtype=np.float64)
2512
2515
  self._success_flat = np.zeros(self._map_dim, dtype=bool)
2513
2516
  self._best_fit_flat = np.zeros(
@@ -2525,7 +2528,7 @@ class FitMap(Fit):
2525
2528
  np.zeros(self._map_dim, dtype=np.float64)
2526
2529
  for _ in range(num_new_parameters)]
2527
2530
  else:
2528
- self._memfolder = './joblib_memmap'
2531
+ self._memfolder = 'joblib_memmap'
2529
2532
  try:
2530
2533
  mkdir(self._memfolder)
2531
2534
  except FileExistsError:
@@ -2537,6 +2540,11 @@ class FitMap(Fit):
2537
2540
  filename_memmap = path.join(self._memfolder, 'max_nfev_memmap')
2538
2541
  self._max_nfev_flat = np.memmap(
2539
2542
  filename_memmap, dtype=bool, shape=(self._map_dim), mode='w+')
2543
+ filename_memmap = path.join(
2544
+ self._memfolder, 'num_func_eval_memmap')
2545
+ self._num_func_eval_flat = np.memmap(
2546
+ filename_memmap, dtype=np.intc, shape=(self._map_dim),
2547
+ mode='w+')
2540
2548
  filename_memmap = path.join(self._memfolder, 'redchi_memmap')
2541
2549
  self._redchi_flat = np.memmap(
2542
2550
  filename_memmap, dtype=np.float64, shape=(self._map_dim),
@@ -2598,57 +2606,68 @@ class FitMap(Fit):
2598
2606
  except AttributeError:
2599
2607
  pass
2600
2608
 
2601
- if num_proc == 1:
2602
- # Perform the remaining fits serially
2603
- for n in range(1, self._map_dim):
2604
- self._fit(n, current_best_values, **kwargs)
2605
- else:
2606
- # Perform the remaining fits in parallel
2607
- num_fit = self._map_dim-1
2608
- if num_proc > num_fit:
2609
- logger.warning(
2610
- f'The requested number of processors ({num_proc}) exceeds '
2611
- f'the number of fits, num_proc reduced to {num_fit}')
2612
- num_proc = num_fit
2613
- num_fit_per_proc = 1
2609
+ if self._map_dim > 1:
2610
+ if num_proc == 1:
2611
+ # Perform the remaining fits serially
2612
+ for n in range(1, self._map_dim):
2613
+ self._fit(n, current_best_values, **kwargs)
2614
2614
  else:
2615
- num_fit_per_proc = round((num_fit)/num_proc)
2616
- if num_proc*num_fit_per_proc < num_fit:
2617
- num_fit_per_proc += 1
2618
- num_fit_batch = min(num_fit_per_proc, 40)
2619
- with Parallel(n_jobs=num_proc) as parallel:
2620
- parallel(
2621
- delayed(self._fit_parallel)
2622
- (current_best_values, num_fit_batch, n_start, **kwargs)
2623
- for n_start in range(1, self._map_dim, num_fit_batch))
2615
+ # Perform the remaining fits in parallel
2616
+ num_fit = self._map_dim-1
2617
+ if num_proc > num_fit:
2618
+ logger.warning(
2619
+ f'The requested number of processors ({num_proc}) '
2620
+ 'exceeds the number of fits, num_proc reduced to '
2621
+ f'{num_fit}')
2622
+ num_proc = num_fit
2623
+ num_fit_per_proc = 1
2624
+ else:
2625
+ num_fit_per_proc = round((num_fit)/num_proc)
2626
+ if num_proc*num_fit_per_proc < num_fit:
2627
+ num_fit_per_proc += 1
2628
+ num_fit_batch = min(num_fit_per_proc, 40)
2629
+ with Parallel(n_jobs=num_proc) as parallel:
2630
+ parallel(
2631
+ delayed(self._fit_parallel)
2632
+ (current_best_values, num_fit_batch, n_start,
2633
+ **kwargs)
2634
+ for n_start in range(1, self._map_dim, num_fit_batch))
2624
2635
 
2625
2636
  # Renormalize the initial parameters for external use
2626
2637
  if self._norm is not None and self._normalized:
2627
- init_values = {}
2628
- for name, value in self._result.init_values.items():
2629
- if (name not in self._parameter_norms
2630
- or self._parameters[name].expr is not None):
2631
- init_values[name] = value
2632
- elif self._parameter_norms[name]:
2633
- init_values[name] = value*self._norm[1]
2634
- self._result.init_values = init_values
2635
- for name, par in self._result.init_params.items():
2636
- if par.expr is None and self._parameter_norms.get(name, False):
2637
- _min = par.min
2638
- _max = par.max
2639
- value = par.value*self._norm[1]
2640
- if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
2641
- _min *= self._norm[1]
2642
- if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
2643
- _max *= self._norm[1]
2644
- par.set(value=value, min=_min, max=_max)
2645
- par.init_value = par.value
2638
+ if hasattr(self._result, 'init_values'):
2639
+ init_values = {}
2640
+ for name, value in self._result.init_values.items():
2641
+ if (name in self._nonlinear_parameters
2642
+ or self._parameters[name].expr is not None):
2643
+ init_values[name] = value
2644
+ else:
2645
+ init_values[name] = value*self._norm[1]
2646
+ self._result.init_values = init_values
2647
+ if (hasattr(self._result, 'init_params')
2648
+ and self._result.init_params is not None):
2649
+ for name, par in self._result.init_params.items():
2650
+ if par.expr is None and name in self._linear_parameters:
2651
+ _min = par.min
2652
+ _max = par.max
2653
+ value = par.value*self._norm[1]
2654
+ if not np.isinf(_min) and abs(_min) != FLOAT_MIN:
2655
+ _min *= self._norm[1]
2656
+ if not np.isinf(_max) and abs(_max) != FLOAT_MIN:
2657
+ _max *= self._norm[1]
2658
+ par.set(value=value, min=_min, max=_max)
2659
+ if self._code == 'scipy':
2660
+ setattr(par, '_init_value', par.value)
2661
+ else:
2662
+ par.init_value = par.value
2646
2663
 
2647
2664
  # Remap the best results
2648
2665
  self._out_of_bounds = np.copy(np.reshape(
2649
2666
  self._out_of_bounds_flat, self._map_shape))
2650
2667
  self._max_nfev = np.copy(np.reshape(
2651
2668
  self._max_nfev_flat, self._map_shape))
2669
+ self._num_func_eval = np.copy(np.reshape(
2670
+ self._num_func_eval_flat, self._map_shape))
2652
2671
  self._redchi = np.copy(np.reshape(self._redchi_flat, self._map_shape))
2653
2672
  self._success = np.copy(np.reshape(
2654
2673
  self._success_flat, self._map_shape))
@@ -2662,6 +2681,8 @@ class FitMap(Fit):
2662
2681
  self._out_of_bounds = np.transpose(
2663
2682
  self._out_of_bounds, self._inv_transpose)
2664
2683
  self._max_nfev = np.transpose(self._max_nfev, self._inv_transpose)
2684
+ self._num_func_eval = np.transpose(
2685
+ self._num_func_eval, self._inv_transpose)
2665
2686
  self._redchi = np.transpose(self._redchi, self._inv_transpose)
2666
2687
  self._success = np.transpose(self._success, self._inv_transpose)
2667
2688
  self._best_fit = np.transpose(
@@ -2673,6 +2694,7 @@ class FitMap(Fit):
2673
2694
  self._best_errors, [0] + [i+1 for i in self._inv_transpose])
2674
2695
  del self._out_of_bounds_flat
2675
2696
  del self._max_nfev_flat
2697
+ del self._num_func_eval_flat
2676
2698
  del self._redchi_flat
2677
2699
  del self._success_flat
2678
2700
  del self._best_fit_flat
@@ -2684,9 +2706,9 @@ class FitMap(Fit):
2684
2706
  self._parameters[name].set(min=par['min'], max=par['max'])
2685
2707
  self._normalized = False
2686
2708
  if self._norm is not None:
2687
- for name, norm in self._parameter_norms.items():
2709
+ for name in self._linear_parameters:
2688
2710
  par = self._parameters[name]
2689
- if par.expr is None and norm:
2711
+ if par.expr is None:
2690
2712
  value = par.value*self._norm[1]
2691
2713
  _min = par.min
2692
2714
  _max = par.max
@@ -2705,62 +2727,68 @@ class FitMap(Fit):
2705
2727
  self._fit(n_start+n, current_best_values, **kwargs)
2706
2728
 
2707
2729
  def _fit(self, n, current_best_values, return_result=False, **kwargs):
2708
- # Check input parameters
2709
- if 'rel_amplitude_cutoff' in kwargs:
2710
- rel_amplitude_cutoff = kwargs.pop('rel_amplitude_cutoff')
2711
- if (rel_amplitude_cutoff is not None
2712
- and not is_num(rel_amplitude_cutoff, gt=0.0, lt=1.0)):
2713
- logger.warning(
2714
- 'Ignoring invalid parameter rel_amplitude_cutoff '
2715
- f'in FitMap._fit() ({rel_amplitude_cutoff})')
2716
- rel_amplitude_cutoff = None
2717
- else:
2718
- rel_amplitude_cutoff = None
2730
+ # Do not attempt a fit if the data is entirely below the cutoff
2731
+ if (self._rel_height_cutoff is not None
2732
+ and self._ymap_norm[n].max() < self._rel_height_cutoff):
2733
+ logger.debug(f'Skipping fit for n = {n} (rel norm = '
2734
+ f'{self._ymap_norm[n].max():.5f})')
2735
+ if self._code == 'scipy':
2736
+ from CHAP.utils.fit import ModelResult
2737
+
2738
+ result = ModelResult(self._model, deepcopy(self._parameters))
2739
+ else:
2740
+ from lmfit.model import ModelResult
2741
+
2742
+ result = ModelResult(self._model, deepcopy(self._parameters))
2743
+ result.success = False
2744
+ # Renormalize the data and results
2745
+ self._renormalize(n, result)
2746
+ return result
2719
2747
 
2720
2748
  # Regular full fit
2721
2749
  result = self._fit_with_bounds_check(n, current_best_values, **kwargs)
2722
2750
 
2723
- if rel_amplitude_cutoff is not None:
2751
+ if self._rel_height_cutoff is not None:
2724
2752
  # Third party modules
2725
2753
  from lmfit.models import (
2726
2754
  GaussianModel,
2727
2755
  LorentzianModel,
2728
2756
  )
2729
2757
 
2730
- # Check for low amplitude peaks and refit without them
2731
- amplitudes = []
2758
+ # Check for low heights peaks and refit without them
2759
+ heights = []
2732
2760
  names = []
2733
2761
  for component in result.components:
2734
2762
  if isinstance(component, (GaussianModel, LorentzianModel)):
2735
2763
  for name in component.param_names:
2736
- if 'amplitude' in name:
2737
- amplitudes.append(result.params[name].value)
2764
+ if 'height' in name:
2765
+ heights.append(result.params[name].value)
2738
2766
  names.append(name)
2739
- if amplitudes:
2767
+ if heights:
2740
2768
  refit = False
2741
- amplitudes = np.asarray(amplitudes)/sum(amplitudes)
2769
+ max_height = max(heights)
2742
2770
  parameters_save = deepcopy(self._parameters)
2743
- for i, (name, amp) in enumerate(zip(names, amplitudes)):
2744
- if abs(amp) < rel_amplitude_cutoff:
2745
- self._parameters[name].set(
2746
- value=0.0, min=0.0, vary=False)
2771
+ for i, (name, height) in enumerate(zip(names, heights)):
2772
+ if height < self._rel_height_cutoff*max_height:
2773
+ self._parameters[
2774
+ name.replace('height', 'amplitude')].set(
2775
+ value=0.0, min=0.0, vary=False)
2747
2776
  self._parameters[
2748
- name.replace('amplitude', 'center')].set(
2777
+ name.replace('height', 'center')].set(
2749
2778
  vary=False)
2750
2779
  self._parameters[
2751
- name.replace('amplitude', 'sigma')].set(
2780
+ name.replace('height', 'sigma')].set(
2752
2781
  value=0.0, min=0.0, vary=False)
2753
2782
  refit = True
2754
2783
  if refit:
2755
2784
  result = self._fit_with_bounds_check(
2756
2785
  n, current_best_values, **kwargs)
2757
- # for name in names:
2758
- # result.params[name].error = 0.0
2759
2786
  # Reset fixed amplitudes back to default
2760
2787
  self._parameters = deepcopy(parameters_save)
2761
2788
 
2762
2789
  if result.redchi >= self._redchi_cutoff:
2763
2790
  result.success = False
2791
+ self._num_func_eval_flat[n] = result.nfev
2764
2792
  if result.nfev == result.max_nfev:
2765
2793
  if result.redchi < self._redchi_cutoff:
2766
2794
  result.success = True
@@ -2774,8 +2802,10 @@ class FitMap(Fit):
2774
2802
  current_best_values[par.name] = par.value
2775
2803
  else:
2776
2804
  logger.warning(f'Fit for n = {n} failed: {result.lmdif_message}')
2805
+
2777
2806
  # Renormalize the data and results
2778
2807
  self._renormalize(n, result)
2808
+
2779
2809
  if self._print_report:
2780
2810
  print(result.fit_report(show_correl=False))
2781
2811
  if self._plot:
@@ -2787,6 +2817,7 @@ class FitMap(Fit):
2787
2817
  result=result, y=np.asarray(self._ymap[dims]),
2788
2818
  plot_comp_legends=True, skip_init=self._skip_init,
2789
2819
  title=str(dims))
2820
+
2790
2821
  if return_result:
2791
2822
  return result
2792
2823
  return None
@@ -2811,13 +2842,8 @@ class FitMap(Fit):
2811
2842
  elif par.expr is None:
2812
2843
  par.set(value=self._best_values[i][n])
2813
2844
  self._reset_par_at_boundary()
2814
- if self._mask is None:
2815
- result = self._model.fit(
2816
- self._ymap_norm[n], self._parameters, x=self._x, **kwargs)
2817
- else:
2818
- result = self._model.fit(
2819
- self._ymap_norm[n][~self._mask], self._parameters,
2820
- x=self._x[~self._mask], **kwargs)
2845
+ result = self._fit_nonlinear_model(
2846
+ self._x, self._ymap_norm[n], **kwargs)
2821
2847
  out_of_bounds = False
2822
2848
  for name, par in self._parameter_bounds.items():
2823
2849
  if self._parameters[name].vary:
@@ -2853,13 +2879,8 @@ class FitMap(Fit):
2853
2879
  elif par.expr is None:
2854
2880
  par.set(value=self._best_values[i][n])
2855
2881
  self._reset_par_at_boundary()
2856
- if self._mask is None:
2857
- result = self._model.fit(
2858
- self._ymap_norm[n], self._parameters, x=self._x, **kwargs)
2859
- else:
2860
- result = self._model.fit(
2861
- self._ymap_norm[n][~self._mask], self._parameters,
2862
- x=self._x[~self._mask], **kwargs)
2882
+ result = self._fit_nonlinear_model(
2883
+ self._x, self._ymap_norm[n], **kwargs)
2863
2884
  out_of_bounds = False
2864
2885
  for name, par in self._parameter_bounds.items():
2865
2886
  if self._parameters[name].vary:
@@ -2876,41 +2897,50 @@ class FitMap(Fit):
2876
2897
  return result
2877
2898
 
2878
2899
  def _renormalize(self, n, result):
2879
- self._redchi_flat[n] = np.float64(result.redchi)
2880
2900
  self._success_flat[n] = result.success
2901
+ if result.success:
2902
+ self._redchi_flat[n] = np.float64(result.redchi)
2881
2903
  if self._norm is None or not self._normalized:
2882
- self._best_fit_flat[n] = result.best_fit
2883
2904
  for i, name in enumerate(self._best_parameters):
2884
2905
  self._best_values_flat[i][n] = np.float64(
2885
2906
  result.params[name].value)
2886
2907
  self._best_errors_flat[i][n] = np.float64(
2887
2908
  result.params[name].stderr)
2909
+ if result.success:
2910
+ self._best_fit_flat[n] = result.best_fit
2888
2911
  else:
2889
- pars = set(self._parameter_norms) & set(self._best_parameters)
2890
2912
  for name, par in result.params.items():
2891
- if name in pars and self._parameter_norms[name]:
2913
+ if name in self._linear_parameters:
2892
2914
  if par.stderr is not None:
2893
- par.stderr *= self._norm[1]
2915
+ if self._code == 'scipy':
2916
+ setattr(par, '_stderr', par.stderr*self._norm[1])
2917
+ else:
2918
+ par.stderr *= self._norm[1]
2894
2919
  if par.expr is None:
2895
2920
  par.value *= self._norm[1]
2896
2921
  if self._print_report:
2897
2922
  if par.init_value is not None:
2898
- par.init_value *= self._norm[1]
2923
+ if self._code == 'scipy':
2924
+ setattr(par, '_init_value',
2925
+ par.init_value*self._norm[1])
2926
+ else:
2927
+ par.init_value *= self._norm[1]
2899
2928
  if (not np.isinf(par.min)
2900
2929
  and abs(par.min) != FLOAT_MIN):
2901
2930
  par.min *= self._norm[1]
2902
2931
  if (not np.isinf(par.max)
2903
2932
  and abs(par.max) != FLOAT_MIN):
2904
2933
  par.max *= self._norm[1]
2905
- self._best_fit_flat[n] = (
2906
- result.best_fit*self._norm[1] + self._norm[0])
2907
2934
  for i, name in enumerate(self._best_parameters):
2908
2935
  self._best_values_flat[i][n] = np.float64(
2909
2936
  result.params[name].value)
2910
2937
  self._best_errors_flat[i][n] = np.float64(
2911
2938
  result.params[name].stderr)
2912
- if self._plot:
2913
- if not self._skip_init:
2914
- result.init_fit = (
2915
- result.init_fit*self._norm[1] + self._norm[0])
2916
- result.best_fit = np.copy(self._best_fit_flat[n])
2939
+ if result.success:
2940
+ self._best_fit_flat[n] = (
2941
+ result.best_fit*self._norm[1] + self._norm[0])
2942
+ if self._plot:
2943
+ if not self._skip_init:
2944
+ result.init_fit = (
2945
+ result.init_fit*self._norm[1] + self._norm[0])
2946
+ result.best_fit = np.copy(self._best_fit_flat[n])