rapidtide 3.0.9__py3-none-any.whl → 3.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rapidtide/fit.py CHANGED
@@ -254,16 +254,38 @@ def risetime_eval_loop(x, p):
254
254
  @conditionaljit()
255
255
  def trapezoid_eval(x, toplength, p):
256
256
  """
257
+ Evaluates the trapezoidal function at a given point.
258
+
259
+ The trapezoidal function is defined as:
260
+
261
+ f(x) = A * (1 - exp(-x / tau))
262
+
263
+ if 0 <= x < L
264
+
265
+ and
266
+
267
+ f(x) = A * exp(-(x - L) / gamma)
268
+
269
+ if x >= L
270
+
271
+ where A, tau, and gamma are parameters.
257
272
 
258
273
  Parameters
259
274
  ----------
260
- x
261
- toplength
262
- p
263
-
275
+ x: float or array-like
276
+ The point or vector at which to evaluate the trapezoidal function.
277
+ toplength: float
278
+ The length of the top plateau of the trapezoid.
279
+ p: list or tuple of floats
280
+ A list of four values [A, tau, gamma, L].
264
281
  Returns
265
282
  -------
283
+ float or array-like
284
+ The value of the trapezoidal function at x.
266
285
 
286
+ Notes
287
+ -----
288
+ This function is vectorized and can handle arrays of input points.
267
289
  """
268
290
  corrx = x - p[0]
269
291
  if corrx < 0.0:
@@ -277,15 +299,28 @@ def trapezoid_eval(x, toplength, p):
277
299
  @conditionaljit()
278
300
  def risetime_eval(x, p):
279
301
  """
302
+ Evaluates the rise time function at a given point.
303
+
304
+ The rise time function is defined as:
305
+
306
+ f(x) = A * (1 - exp(-x / tau))
307
+
308
+ where A and tau are parameters.
280
309
 
281
310
  Parameters
282
311
  ----------
283
- x
284
- p
285
-
312
+ x: float or array-like
313
+ The point at which to evaluate the rise time function.
314
+ p: list or tuple of floats
315
+ A list of two values [A, tau].
286
316
  Returns
287
317
  -------
318
+ float or array-like
319
+ The value of the rise time function at x.
288
320
 
321
+ Notes
322
+ -----
323
+ This function is vectorized and can handle arrays of input points.
289
324
  """
290
325
  corrx = x - p[0]
291
326
  if corrx < 0.0:
@@ -310,17 +345,39 @@ def gasboxcar(
310
345
  # generate the polynomial fit timecourse from the coefficients
311
346
  @conditionaljit()
312
347
  def trendgen(thexvals, thefitcoffs, demean):
313
- """
348
+ """Generates a polynomial trend based on input x-values and coefficients.
349
+
350
+ This function constructs a polynomial trend using the provided x-values and
351
+ a set of polynomial coefficients. The order of the polynomial is determined
352
+ from the shape of the `thefitcoffs` array. Optionally, a constant term
353
+ (the highest order coefficient) can be included or excluded from the trend.
314
354
 
315
355
  Parameters
316
356
  ----------
317
- thexvals
318
- thefitcoffs
319
- demean
357
+ thexvals : array_like
358
+ The x-values (independent variable) at which to evaluate the polynomial trend.
359
+ Expected to be a numpy array or similar.
360
+ thefitcoffs : array_like
361
+ A 1D array of polynomial coefficients. The length of this array minus one
362
+ determines the order of the polynomial. Coefficients are expected to be
363
+ ordered from the highest power of x down to the constant term (e.g.,
364
+ [a_n, a_n-1, ..., a_1, a_0] for a polynomial a_n*x^n + ... + a_0).
365
+ demean : bool
366
+ If True, the constant term (thefitcoffs[order]) is added to the generated
367
+ trend. If False, the constant term is excluded, effectively generating
368
+ a trend that is "demeaned" or centered around zero (assuming the constant
369
+ term represents the mean or offset).
320
370
 
321
371
  Returns
322
372
  -------
373
+ numpy.ndarray
374
+ A numpy array containing the calculated polynomial trend, with the same
375
+ shape as `thexvals`.
323
376
 
377
+ Notes
378
+ -----
379
+ This function implicitly assumes that `thexvals` is a numpy array or
380
+ behaves similarly for element-wise multiplication (`np.multiply`).
324
381
  """
325
382
  theshape = thefitcoffs.shape
326
383
  order = theshape[0] - 1
@@ -337,18 +394,34 @@ def trendgen(thexvals, thefitcoffs, demean):
337
394
 
338
395
  # @conditionaljit()
339
396
  def detrend(inputdata, order=1, demean=False):
340
- """
341
-
342
- Parameters
343
- ----------
344
- inputdata
345
- order
346
- demean
347
-
348
- Returns
349
- -------
350
-
351
- """
397
+ """Estimates and removes a polynomial trend timecourse.
398
+
399
+ This routine calculates a polynomial defined by a set of coefficients
400
+ at specified time points to create a trend timecourse, and subtracts it
401
+ from the input signal. Optionally, it can remove the mean of the input
402
+ data as well.
403
+
404
+ Parameters
405
+ ----------
406
+ thetimepoints : numpy.ndarray
407
+ A 1D NumPy array of time points at which to evaluate the polynomial.
408
+ thecoffs : list or numpy.ndarray
409
+ A list or 1D NumPy array of polynomial coefficients, typically in
410
+ decreasing order of power (e.g., `[a, b, c]` for `ax^2 + bx + c`).
411
+ demean : bool
412
+ If True, the mean of the generated trend timecourse will be subtracted,
413
+ effectively centering the trend around zero.
414
+
415
+ Returns
416
+ -------
417
+ numpy.ndarray
418
+ A 1D NumPy array representing the generated polynomial trend timecourse.
419
+
420
+ Notes
421
+ -----
422
+ - This function utilizes `numpy.polyval` to evaluate the polynomial.
423
+ - Requires the `numpy` library.
424
+ """
352
425
  thetimepoints = np.arange(0.0, len(inputdata), 1.0) - len(inputdata) / 2.0
353
426
  try:
354
427
  thecoffs = Polynomial.fit(thetimepoints, inputdata, order).convert().coef[::-1]
@@ -361,15 +434,30 @@ def detrend(inputdata, order=1, demean=False):
361
434
  @conditionaljit()
362
435
  def findfirstabove(theyvals, thevalue):
363
436
  """
437
+ Find the index of the first element in an array that is greater than or equal to a specified value.
438
+
439
+ This function iterates through the input array `theyvals` and returns the index of the
440
+ first element that is greater than or equal to `thevalue`. If no such element exists,
441
+ it returns the length of the array.
364
442
 
365
443
  Parameters
366
444
  ----------
367
- theyvals
368
- thevalue
369
-
445
+ theyvals : array_like
446
+ A 1D array of numeric values to be searched.
447
+ thevalue : float or int
448
+ The threshold value to compare against elements in `theyvals`.
370
449
  Returns
371
450
  -------
451
+ int
452
+ The index of the first element in `theyvals` that is greater than or equal to `thevalue`.
453
+ If no such element exists, returns the length of `theyvals`.
372
454
 
455
+ Examples
456
+ --------
457
+ >>> findfirstabove([1, 2, 3, 4], 3)
458
+ 2
459
+ >>> findfirstabove([1, 2, 3, 4], 5)
460
+ 4
373
461
  """
374
462
  for i in range(0, len(theyvals)):
375
463
  if theyvals[i] >= thevalue:
@@ -393,26 +481,47 @@ def findtrapezoidfunc(
393
481
  displayplots=False,
394
482
  ):
395
483
  """
484
+ Find the best-fitting trapezoidal function parameters to a data set.
485
+
486
+ This function uses least-squares optimization to fit a trapezoidal function
487
+ defined by `trapezoid_eval` to the input data (`theyvals`), using `thexvals`
488
+ as the independent variable. The shape of the trapezoid is fixed by `thetoplength`.
396
489
 
397
490
  Parameters
398
491
  ----------
399
- thexvals
400
- theyvals
401
- thetoplength
402
- initguess
403
- debug
404
- minrise
405
- maxrise
406
- minfall
407
- maxfall
408
- minstart
409
- maxstart
410
- refine
411
- displayplots
412
-
492
+ thexvals : array_like
493
+ Independent variable values (time points) for the data.
494
+ theyvals : array_like
495
+ Dependent variable values (signal intensity) corresponding to `thexvals`.
496
+ thetoplength : float
497
+ The length of the top plateau of the trapezoid function.
498
+ initguess : array_like, optional
499
+ Initial guess for [start, amplitude, risetime, falltime].
500
+ If None, uses defaults based on data statistics.
501
+ debug : bool, optional
502
+ If True, print intermediate values during computation (default: False).
503
+ minrise : float, optional
504
+ Minimum allowed rise time parameter (default: 0.0).
505
+ maxrise : float, optional
506
+ Maximum allowed rise time parameter (default: 200.0).
507
+ minfall : float, optional
508
+ Minimum allowed fall time parameter (default: 0.0).
509
+ maxfall : float, optional
510
+ Maximum allowed fall time parameter (default: 200.0).
511
+ minstart : float, optional
512
+ Minimum allowed start time parameter (default: -100.0).
513
+ maxstart : float, optional
514
+ Maximum allowed start time parameter (default: 100.0).
515
+ refine : bool, optional
516
+ If True, perform additional refinement steps (not implemented in this version).
517
+ displayplots : bool, optional
518
+ If True, display plots during computation (not implemented in this version).
413
519
  Returns
414
520
  -------
415
-
521
+ tuple of floats
522
+ The fitted parameters [start, amplitude, risetime, falltime] if successful,
523
+ or [0.0, 0.0, 0.0, 0.0] if the solution is outside the valid parameter bounds.
524
+ A fifth value (integer) indicating success (1) or failure (0).
416
525
  """
417
526
  # guess at parameters: risestart, riseamplitude, risetime
418
527
  if initguess is None:
@@ -505,18 +614,50 @@ def territorydecomp(
505
614
  inputmap, template, atlas, inputmask=None, intercept=True, fitorder=1, debug=False
506
615
  ):
507
616
  """
617
+ Decompose an input map into territories defined by an atlas using polynomial regression.
618
+
619
+ This function performs a decomposition of an input map (e.g., a brain image) into
620
+ distinct regions (territories) as defined by an atlas. For each territory, it fits
621
+ a polynomial model to the template values and the corresponding data in that region.
622
+ The resulting coefficients are used to project the model back onto the original map.
508
623
 
509
624
  Parameters
510
625
  ----------
511
- inputmap
512
- atlas
513
- inputmask
514
- fitorder
515
- debug
516
-
626
+ inputmap : numpy.ndarray
627
+ Input data to be decomposed. Can be 3D or 4D (e.g., time series).
628
+ template : numpy.ndarray
629
+ Template values corresponding to the spatial locations in `inputmap`.
630
+ Should have the same shape as `inputmap` (or be broadcastable).
631
+ atlas : numpy.ndarray
632
+ Atlas defining the territories. Each unique integer value represents a distinct region.
633
+ Must have the same shape as `inputmap`.
634
+ inputmask : numpy.ndarray, optional
635
+ Mask to define valid voxels in `inputmap`. If None, all voxels are considered valid.
636
+ Should have the same shape as `inputmap`.
637
+ intercept : bool, optional
638
+ If True, include an intercept term in the polynomial fit (default: True).
639
+ fitorder : int, optional
640
+ The order of the polynomial to fit for each territory (default: 1).
641
+ debug : bool, optional
642
+ If True, print debugging information during computation (default: False).
517
643
  Returns
518
644
  -------
519
-
645
+ tuple of numpy.ndarray
646
+ A tuple containing:
647
+ - fitmap : numpy.ndarray
648
+ The decomposed map with fitted values projected back onto the original spatial locations.
649
+ - thecoffs : numpy.ndarray
650
+ Array of polynomial coefficients for each territory and map. Shape is (nummaps, numterritories, fitorder+1)
651
+ if `intercept` is True, or (nummaps, numterritories, fitorder) otherwise.
652
+ - theR2s : numpy.ndarray
653
+ R-squared values for the fits for each territory and map. Shape is (nummaps, numterritories).
654
+
655
+ Notes
656
+ -----
657
+ - The function assumes that `inputmap` and `template` are aligned in space.
658
+ - If `inputmask` is not provided, all voxels are considered valid.
659
+ - The number of territories is determined by the maximum value in `atlas`.
660
+ - For each territory, a polynomial regression is performed using the template values as predictors.
520
661
  """
521
662
  datadims = len(inputmap.shape)
522
663
  if datadims > 3:
@@ -684,6 +825,48 @@ def territorystats(
684
825
 
685
826
  @conditionaljit()
686
827
  def refinepeak_quad(x, y, peakindex, stride=1):
828
+ """
829
+ Refine the location and properties of a peak using quadratic interpolation.
830
+
831
+ This function takes a peak index and a set of data points to perform
832
+ quadratic interpolation around the peak to estimate its precise location,
833
+ value, and width. It also determines whether the point is a local maximum or minimum.
834
+
835
+ Parameters
836
+ ----------
837
+ x : array-like
838
+ Independent variable values (e.g., time points).
839
+ y : array-like
840
+ Dependent variable values (e.g., signal intensity) corresponding to `x`.
841
+ peakindex : int
842
+ Index of the peak in the arrays `x` and `y`.
843
+ stride : int, optional
844
+ Number of data points to use on either side of the peak for interpolation.
845
+ Default is 1.
846
+
847
+ Returns
848
+ -------
849
+ tuple
850
+ A tuple containing:
851
+ - peakloc : float
852
+ The refined location of the peak.
853
+ - peakval : float
854
+ The refined value at the peak.
855
+ - peakwidth : float
856
+ The estimated width of the peak.
857
+ - ismax : bool or None
858
+ True if the point is a local maximum, False if it's a local minimum,
859
+ and None if the point cannot be determined (e.g., at boundaries).
860
+ - badfit : bool
861
+ True if the fit could not be performed due to invalid conditions,
862
+ such as being at the boundary or having equal values on both sides.
863
+
864
+ Notes
865
+ -----
866
+ The function uses a quadratic fit to estimate peak properties. It checks for
867
+ valid conditions before performing the fit, including ensuring that the peak
868
+ is not at the edge of the data and that it's either a local maximum or minimum.
869
+ """
687
870
  # first make sure this actually is a peak
688
871
  ismax = None
689
872
  badfit = False
@@ -737,32 +920,114 @@ def findmaxlag_gauss(
737
920
  displayplots=False,
738
921
  ):
739
922
  """
923
+ Find the maximum lag in a cross-correlation function by fitting a Gaussian curve to the peak.
924
+
925
+ This function locates the peak in a cross-correlation function and optionally fits a Gaussian
926
+ curve to determine the precise lag time, amplitude, and width. It includes extensive error
927
+ checking and validation to ensure robust results.
740
928
 
741
929
  Parameters
742
930
  ----------
743
- thexcorr_x
744
- thexcorr_y
745
- lagmin
746
- lagmax
747
- widthmax
748
- edgebufferfrac
749
- threshval
750
- uthreshval
751
- debug
752
- tweaklims
753
- zerooutbadfit
754
- refine
755
- maxguess
756
- useguess
757
- searchfrac
758
- fastgauss
759
- lagmod
760
- enforcethresh
761
- displayplots
931
+ thexcorr_x : array_like
932
+ X-axis values (lag times) of the cross-correlation function.
933
+ thexcorr_y : array_like
934
+ Y-axis values (correlation coefficients) of the cross-correlation function.
935
+ lagmin : float
936
+ Minimum allowable lag value in seconds.
937
+ lagmax : float
938
+ Maximum allowable lag value in seconds.
939
+ widthmax : float
940
+ Maximum allowable width of the Gaussian peak in seconds.
941
+ edgebufferfrac : float, optional
942
+ Fraction of array length to exclude from each edge during search. Default is 0.0.
943
+ threshval : float, optional
944
+ Minimum correlation threshold for a valid peak. Default is 0.0.
945
+ uthreshval : float, optional
946
+ Upper threshold value (currently unused). Default is 30.0.
947
+ debug : bool, optional
948
+ Enable debug output showing initial vs final parameter values. Default is False.
949
+ tweaklims : bool, optional
950
+ Automatically adjust search limits to avoid edge artifacts. Default is True.
951
+ zerooutbadfit : bool, optional
952
+ Set output to zero when fit fails rather than using initial guess. Default is True.
953
+ refine : bool, optional
954
+ Perform least-squares refinement of the Gaussian fit. Default is False.
955
+ maxguess : float, optional
956
+ Initial guess for maximum lag position. Used when useguess=True. Default is 0.0.
957
+ useguess : bool, optional
958
+ Use the provided maxguess instead of finding peak automatically. Default is False.
959
+ searchfrac : float, optional
960
+ Fraction of peak height used to determine initial width estimate. Default is 0.5.
961
+ fastgauss : bool, optional
962
+ Use fast non-iterative Gaussian fitting (less accurate). Default is False.
963
+ lagmod : float, optional
964
+ Modulus for lag values to handle wraparound. Default is 1000.0.
965
+ enforcethresh : bool, optional
966
+ Enforce minimum threshold requirements. Default is True.
967
+ absmaxsigma : float, optional
968
+ Absolute maximum allowed sigma (width) value. Default is 1000.0.
969
+ absminsigma : float, optional
970
+ Absolute minimum allowed sigma (width) value. Default is 0.1.
971
+ displayplots : bool, optional
972
+ Show matplotlib plots of data and fitted curve. Default is False.
762
973
 
763
974
  Returns
764
975
  -------
765
-
976
+ maxindex : int
977
+ Array index of the maximum correlation value.
978
+ maxlag : numpy.float64
979
+ Time lag at maximum correlation in seconds.
980
+ maxval : numpy.float64
981
+ Maximum correlation coefficient value.
982
+ maxsigma : numpy.float64
983
+ Width (sigma) of the fitted Gaussian peak.
984
+ maskval : numpy.uint16
985
+ Validity mask (1 = valid fit, 0 = invalid fit).
986
+ failreason : numpy.uint16
987
+ Bitwise failure reason code. Possible values:
988
+ - 0x01: Correlation amplitude below threshold
989
+ - 0x02: Correlation amplitude above maximum (>1.0)
990
+ - 0x04: Search window too narrow (<3 points)
991
+ - 0x08: Fitted width exceeds widthmax
992
+ - 0x10: Fitted lag outside [lagmin, lagmax] range
993
+ - 0x20: Peak found at edge of search range
994
+ - 0x40: Fitting procedure failed
995
+ - 0x80: Initial parameter estimation failed
996
+ fitstart : int
997
+ Starting index used for fitting.
998
+ fitend : int
999
+ Ending index used for fitting.
1000
+
1001
+ Notes
1002
+ -----
1003
+ - The function assumes cross-correlation data where Y-values represent correlation
1004
+ coefficients (typically in range [-1, 1]).
1005
+ - When refine=False, uses simple peak-finding based on maximum value.
1006
+ - When refine=True, performs least-squares Gaussian fit for sub-bin precision.
1007
+ - All time-related parameters (lagmin, lagmax, widthmax) should be in the same
1008
+ units as thexcorr_x.
1009
+ - The fastgauss option provides faster but less accurate non-iterative fitting.
1010
+
1011
+ Examples
1012
+ --------
1013
+ Basic usage without refinement:
1014
+
1015
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend = \\
1016
+ ... findmaxlag_gauss(lag_times, correlations, -10.0, 10.0, 5.0)
1017
+ >>> if maskval == 1:
1018
+ ... print(f"Peak found at lag: {maxlag:.3f} s, correlation: {maxval:.3f}")
1019
+
1020
+ Advanced usage with refinement:
1021
+
1022
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend = \\
1023
+ ... findmaxlag_gauss(lag_times, correlations, -5.0, 5.0, 2.0,
1024
+ ... refine=True, threshval=0.1, displayplots=True)
1025
+
1026
+ Using an initial guess:
1027
+
1028
+ >>> maxindex, maxlag, maxval, maxsigma, maskval, failreason, fitstart, fitend = \\
1029
+ ... findmaxlag_gauss(lag_times, correlations, -10.0, 10.0, 3.0,
1030
+ ... useguess=True, maxguess=2.5, refine=True)
766
1031
  """
767
1032
  # set initial parameters
768
1033
  # widthmax is in seconds
@@ -782,7 +1047,7 @@ def findmaxlag_gauss(
782
1047
  if tweaklims:
783
1048
  lowerlim = 0
784
1049
  upperlim = numlagbins - 1
785
- while (thexcorr_y[lowerlim + 1] < thexcorr_y[lowerlim]) and (lowerlim + 1) < upperlim:
1050
+ while (thexcorr_y[lowerlim + 1] < thexcorr_y[lowerlim]) and (lowerlim + 1) <= upperlim:
786
1051
  lowerlim += 1
787
1052
  while (thexcorr_y[upperlim - 1] < thexcorr_y[upperlim]) and (upperlim - 1) > lowerlim:
788
1053
  upperlim -= 1
@@ -837,7 +1102,9 @@ def findmaxlag_gauss(
837
1102
  if (maxindex - j < lowerlimit) or (j > searchbins):
838
1103
  j -= 1
839
1104
  # This is calculated from first principles, but it's always big by a factor or ~1.4.
840
- # Which makes me think I dropped a factor if sqrt(2). So fix that with a final division
1105
+ # Which makes me think I dropped a factor if sqrt(2). So fix that with a final division.
1106
+ if searchfrac <= 0 or searchfrac >= 1:
1107
+ raise ValueError("searchfrac must be between 0 and 1 (exclusive)")
841
1108
  maxsigma_init = np.float64(
842
1109
  ((i + j + 1) * binwidth / (2.0 * np.sqrt(-np.log(searchfrac)))) / np.sqrt(2.0)
843
1110
  )
@@ -893,12 +1160,22 @@ def findmaxlag_gauss(
893
1160
  p0 = np.array([maxval_init, maxlag_init, maxsigma_init], dtype="float64")
894
1161
 
895
1162
  if fitend - fitstart >= 3:
896
- plsq, dummy = sp.optimize.leastsq(
897
- gaussresiduals, p0, args=(data, X), maxfev=5000
898
- )
899
- maxval = plsq[0]
900
- maxlag = np.fmod((1.0 * plsq[1]), lagmod)
901
- maxsigma = plsq[2]
1163
+ try:
1164
+ plsq, ier = sp.optimize.leastsq(
1165
+ gaussresiduals, p0, args=(data, X), maxfev=5000
1166
+ )
1167
+ if ier not in [1, 2, 3, 4]: # Check for successful convergence
1168
+ maxval = np.float64(0.0)
1169
+ maxlag = np.float64(0.0)
1170
+ maxsigma = np.float64(0.0)
1171
+ else:
1172
+ maxval = plsq[0]
1173
+ maxlag = np.fmod((1.0 * plsq[1]), lagmod)
1174
+ maxsigma = plsq[2]
1175
+ except:
1176
+ maxval = np.float64(0.0)
1177
+ maxlag = np.float64(0.0)
1178
+ maxsigma = np.float64(0.0)
902
1179
  # if maxval > 1.0, fit failed catastrophically, zero out or reset to initial value
903
1180
  # corrected logic for 1.1.6
904
1181
  if (np.fabs(maxval)) > 1.0 or (lagmin > maxlag) or (maxlag > lagmax):
@@ -906,7 +1183,7 @@ def findmaxlag_gauss(
906
1183
  maxval = np.float64(0.0)
907
1184
  maxlag = np.float64(0.0)
908
1185
  maxsigma = np.float64(0.0)
909
- maskval = np.int16(0)
1186
+ maskval = np.uint16(0)
910
1187
  else:
911
1188
  maxval = np.float64(maxval_init)
912
1189
  maxlag = np.float64(maxlag_init)
@@ -916,7 +1193,7 @@ def findmaxlag_gauss(
916
1193
  maxval = np.float64(0.0)
917
1194
  maxlag = np.float64(0.0)
918
1195
  maxsigma = np.float64(0.0)
919
- maskval = np.int16(0)
1196
+ maskval = np.uint16(0)
920
1197
  else:
921
1198
  if maxsigma > absmaxsigma:
922
1199
  maxsigma = absmaxsigma
@@ -1053,20 +1330,43 @@ def sincfit(height, loc, width, baseline, xvals, yvals):
1053
1330
 
1054
1331
 
1055
1332
  def gaussfit(height, loc, width, xvals, yvals):
1056
- """
1057
-
1058
- Parameters
1059
- ----------
1060
- height
1061
- loc
1062
- width
1063
- xvals
1064
- yvals
1065
-
1066
- Returns
1067
- -------
1068
-
1069
- """
1333
+ """Performs a non-linear least squares fit of a Gaussian function to data.
1334
+
1335
+ This routine uses `scipy.optimize.leastsq` to find the optimal parameters
1336
+ (height, location, and width) that best describe a Gaussian curve fitted
1337
+ to the provided `yvals` data against `xvals`. It requires an external
1338
+ `gaussresiduals` function to compute the residuals.
1339
+
1340
+ Parameters
1341
+ ----------
1342
+ height : float
1343
+ Initial guess for the amplitude or peak height of the Gaussian.
1344
+ loc : float
1345
+ Initial guess for the mean (center) of the Gaussian.
1346
+ width : float
1347
+ Initial guess for the standard deviation (width) of the Gaussian.
1348
+ xvals : numpy.ndarray or list
1349
+ The independent variable data points.
1350
+ yvals : numpy.ndarray or list
1351
+ The dependent variable data points to which the Gaussian will be fitted.
1352
+
1353
+ Returns
1354
+ -------
1355
+ tuple
1356
+ A tuple containing the fitted parameters:
1357
+ - float: The fitted height of the Gaussian.
1358
+ - float: The fitted location (mean) of the Gaussian.
1359
+ - float: The fitted width (standard deviation) of the Gaussian.
1360
+
1361
+ Notes
1362
+ -----
1363
+ - This function relies on an external function `gaussresiduals(params, y, x)`
1364
+ which should calculate the difference between the observed `y` values and
1365
+ the Gaussian function evaluated at `x` with the given `params` (height, loc, width).
1366
+ - `scipy.optimize.leastsq` is used for the optimization, which requires
1367
+ `scipy` and `numpy` to be imported (e.g., `import scipy.optimize as sp`
1368
+ and `import numpy as np`).
1369
+ """
1070
1370
  plsq, dummy = sp.optimize.leastsq(
1071
1371
  gaussresiduals, np.array([height, loc, width]), args=(yvals, xvals), maxfev=5000
1072
1372
  )
@@ -1074,6 +1374,32 @@ def gaussfit(height, loc, width, xvals, yvals):
1074
1374
 
1075
1375
 
1076
1376
  def gram_schmidt(theregressors, debug=False):
1377
+ r"""Performs Gram-Schmidt orthogonalization on a set of vectors.
1378
+
1379
+ This routine takes a set of input vectors (rows of a 2D array) and
1380
+ transforms them into an orthonormal basis using the Gram-Schmidt process.
1381
+ It ensures that the resulting vectors are mutually orthogonal and
1382
+ have a unit norm. Linearly dependent vectors are effectively skipped
1383
+ if their orthogonal component is negligible.
1384
+
1385
+ Args:
1386
+ theregressors (numpy.ndarray): A 2D NumPy array where each row
1387
+ represents a vector to be orthogonalized.
1388
+ debug (bool, optional): If True, prints debug information about
1389
+ input and output dimensions. Defaults to False.
1390
+
1391
+ Returns:
1392
+ numpy.ndarray: A 2D NumPy array representing the orthonormal basis.
1393
+ Each row is an orthonormal vector. The number of rows may be
1394
+ less than the input if some vectors were linearly dependent.
1395
+
1396
+ Notes:
1397
+ - The function normalizes each orthogonalized vector to unit length.
1398
+ - A small tolerance (1e-10) is used to check if a vector's orthogonal
1399
+ component is effectively zero, indicating linear dependence.
1400
+ - Requires the `numpy` library for array operations and linear algebra.
1401
+ """
1402
+
1077
1403
  if debug:
1078
1404
  print("gram_schmidt, input dimensions:", theregressors.shape)
1079
1405
  basis = []
@@ -1088,6 +1414,37 @@ def gram_schmidt(theregressors, debug=False):
1088
1414
 
1089
1415
 
1090
1416
  def mlproject(thefit, theevs, intercept):
1417
+ r"""Calculates a linear combination (weighted sum) of explanatory variables.
1418
+
1419
+ This routine computes a predicted output by multiplying a set of
1420
+ explanatory variables by corresponding coefficients and summing the results.
1421
+ It can optionally include an intercept term. This is a common operation
1422
+ in linear regression and other statistical models.
1423
+
1424
+ Args:
1425
+ thefit (numpy.ndarray or list): A 1D array or list of coefficients
1426
+ (weights) to be applied to the explanatory variables. If `intercept`
1427
+ is True, the first element of `thefit` is treated as the intercept.
1428
+ theevs (list of numpy.ndarray): A list where each element is a 1D NumPy
1429
+ array representing an explanatory variable (feature time series).
1430
+ The length of `theevs` should match the number of non-intercept
1431
+ coefficients in `thefit`.
1432
+ intercept (bool): If True, the first element of `thefit` is used as
1433
+ an intercept term, and the remaining elements of `thefit` are
1434
+ applied to `theevs`. If False, no intercept is added, and all
1435
+ elements of `thefit` are applied to `theevs` starting from the
1436
+ first element.
1437
+
1438
+ Returns:
1439
+ numpy.ndarray: A 1D NumPy array representing the calculated linear
1440
+ combination. Its length will be the same as the explanatory variables.
1441
+
1442
+ Notes:
1443
+ The calculation performed is conceptually equivalent to:
1444
+ `output = intercept_term + (coefficient_1 * ev_1) + (coefficient_2 * ev_2) + ...`
1445
+ where `intercept_term` is `thefit[0]` if `intercept` is True, otherwise 0.
1446
+ """
1447
+
1091
1448
  thedest = theevs[0] * 0.0
1092
1449
  if intercept:
1093
1450
  thedest[:] = thefit[0]
@@ -1187,18 +1544,49 @@ def mlregress(X, y, intercept=True, debug=False):
1187
1544
  def calcexpandedregressors(
1188
1545
  confounddict, labels=None, start=0, end=-1, deriv=True, order=1, debug=False
1189
1546
  ):
1190
- r"""Calculates various motion related timecourses from motion data dict, and returns an array
1191
-
1192
- Parameters
1193
- ----------
1194
- confounddict: dict
1195
- A dictionary of the confound vectors
1196
-
1197
- Returns
1198
- -------
1199
- motionregressors: array
1200
- All the derivative timecourses to use in a numpy array
1201
-
1547
+ r"""Calculates expanded regressors from a dictionary of confound vectors.
1548
+
1549
+ This routine generates a comprehensive set of motion-related regressors by
1550
+ including higher-order polynomial terms and derivatives of the original
1551
+ confound timecourses. It is commonly used in neuroimaging analysis to
1552
+ account for subject movement.
1553
+
1554
+ Args:
1555
+ confounddict (dict): A dictionary where keys are labels (e.g., 'rot_x',
1556
+ 'trans_y') and values are the corresponding 1D time series (NumPy
1557
+ arrays or lists).
1558
+ labels (list, optional): A list of specific confound labels from
1559
+ `confounddict` to process. If None, all labels in `confounddict`
1560
+ will be used. Defaults to None.
1561
+ start (int, optional): The starting index (inclusive) for slicing the
1562
+ timecourses. Defaults to 0.
1563
+ end (int, optional): The ending index (exclusive) for slicing the
1564
+ timecourses. If None, slicing continues to the end of the timecourse.
1565
+ Defaults to None.
1566
+ deriv (bool, optional): If True, the first derivative of each selected
1567
+ timecourse (and its polynomial expansions) is calculated and
1568
+ included as a regressor. Defaults to False.
1569
+ order (int, optional): The polynomial order for expansion. If `order > 1`,
1570
+ terms like `label^2`, `label^3`, up to `label^order` will be
1571
+ included. Defaults to 1 (no polynomial expansion).
1572
+ debug (bool, optional): If True, prints debug information during
1573
+ processing. Defaults to False.
1574
+
1575
+ Returns:
1576
+ tuple: A tuple containing:
1577
+ - outputregressors (numpy.ndarray): A 2D NumPy array where each row
1578
+ represents a generated regressor (original, polynomial, or derivative)
1579
+ and columns represent time points.
1580
+ - outlabels (list): A list of strings, providing the labels for each
1581
+ row in `outputregressors`, indicating what each regressor represents
1582
+ (e.g., 'rot_x', 'rot_x^2', 'rot_x_deriv').
1583
+
1584
+ Notes:
1585
+ - The derivatives are calculated using `numpy.gradient`.
1586
+ - The function handles slicing of the timecourses based on `start` and `end`
1587
+ parameters.
1588
+ - The output regressors are concatenated horizontally to form the final
1589
+ `outputregressors` array.
1202
1590
  """
1203
1591
  if labels is None:
1204
1592
  localconfounddict = confounddict.copy()
@@ -2051,10 +2439,15 @@ def simfuncpeakfit(
2051
2439
  if debug:
2052
2440
  print("fit input array:", p0)
2053
2441
  try:
2054
- plsq, dummy = sp.optimize.leastsq(gaussresiduals, p0, args=(data, X), maxfev=5000)
2055
- maxval = plsq[0] + baseline
2056
- maxlag = np.fmod((1.0 * plsq[1]), lagmod)
2057
- maxsigma = plsq[2]
2442
+ plsq, ier = sp.optimize.leastsq(gaussresiduals, p0, args=(data, X), maxfev=5000)
2443
+ if ier not in [1, 2, 3, 4]: # Check for successful convergence
2444
+ maxval = np.float64(0.0)
2445
+ maxlag = np.float64(0.0)
2446
+ maxsigma = np.float64(0.0)
2447
+ else:
2448
+ maxval = plsq[0] + baseline
2449
+ maxlag = np.fmod((1.0 * plsq[1]), lagmod)
2450
+ maxsigma = plsq[2]
2058
2451
  except:
2059
2452
  maxval = np.float64(0.0)
2060
2453
  maxlag = np.float64(0.0)