rapidtide 3.1__py3-none-any.whl → 3.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rapidtide/OrthoImageItem.py +4 -4
- rapidtide/_version.py +3 -3
- rapidtide/calccoherence.py +4 -4
- rapidtide/calcnullsimfunc.py +2 -5
- rapidtide/calcsimfunc.py +1 -4
- rapidtide/correlate.py +130 -127
- rapidtide/data/examples/src/testfmri +41 -9
- rapidtide/data/examples/src/testhappy +8 -8
- rapidtide/dlfilter.py +21 -22
- rapidtide/dlfiltertorch.py +18 -19
- rapidtide/filter.py +4 -4
- rapidtide/fit.py +18 -18
- rapidtide/happy_supportfuncs.py +84 -82
- rapidtide/helper_classes.py +2 -2
- rapidtide/io.py +88 -83
- rapidtide/linfitfiltpass.py +30 -49
- rapidtide/makelaggedtcs.py +11 -16
- rapidtide/maskutil.py +30 -14
- rapidtide/miscmath.py +2 -2
- rapidtide/patchmatch.py +10 -11
- rapidtide/peakeval.py +1 -3
- rapidtide/ppgproc.py +3 -3
- rapidtide/qualitycheck.py +2 -2
- rapidtide/refinedelay.py +12 -3
- rapidtide/refineregressor.py +20 -29
- rapidtide/scripts/showxcorr_legacy.py +7 -7
- rapidtide/scripts/stupidramtricks.py +15 -17
- rapidtide/simFuncClasses.py +2 -2
- rapidtide/simfuncfit.py +27 -41
- rapidtide/tests/test_cleanregressor.py +1 -2
- rapidtide/tests/test_fullrunhappy_v3.py +11 -5
- rapidtide/tests/test_fullrunhappy_v4.py +9 -1
- rapidtide/tests/test_getparsers.py +11 -3
- rapidtide/tests/test_refinedelay.py +0 -1
- rapidtide/tests/test_simroundtrip.py +8 -0
- rapidtide/tests/test_stcorrelate.py +3 -1
- rapidtide/util.py +6 -6
- rapidtide/voxelData.py +1 -1
- rapidtide/wiener.py +122 -16
- rapidtide/wiener2.py +3 -3
- rapidtide/workflows/applyppgproc.py +33 -15
- rapidtide/workflows/calcSimFuncMap.py +11 -22
- rapidtide/workflows/ccorrica.py +4 -2
- rapidtide/workflows/cleanregressor.py +6 -11
- rapidtide/workflows/delayvar.py +8 -13
- rapidtide/workflows/fitSimFuncMap.py +2 -9
- rapidtide/workflows/happy.py +6 -6
- rapidtide/workflows/happy_parser.py +36 -25
- rapidtide/workflows/pairproc.py +10 -2
- rapidtide/workflows/pixelcomp.py +1 -2
- rapidtide/workflows/rankimage.py +1 -1
- rapidtide/workflows/rapidtide.py +98 -63
- rapidtide/workflows/refineDelayMap.py +7 -6
- rapidtide/workflows/refineRegressor.py +6 -16
- rapidtide/workflows/regressfrommaps.py +9 -6
- rapidtide/workflows/retrolagtcs.py +5 -7
- rapidtide/workflows/retroregress.py +11 -17
- rapidtide/workflows/roisummarize.py +11 -10
- rapidtide/workflows/showarbcorr.py +2 -2
- rapidtide/workflows/showxcorrx.py +6 -6
- rapidtide/workflows/simdata.py +31 -31
- rapidtide/workflows/spatialmi.py +0 -1
- rapidtide/workflows/tidepool.py +6 -4
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/METADATA +8 -7
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/RECORD +69 -70
- rapidtide/wiener_doc.py +0 -255
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/WHEEL +0 -0
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/entry_points.txt +0 -0
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/licenses/LICENSE +0 -0
- {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/top_level.txt +0 -0
rapidtide/dlfilter.py
CHANGED
|
@@ -26,6 +26,7 @@ import warnings
|
|
|
26
26
|
import matplotlib as mpl
|
|
27
27
|
import matplotlib.pyplot as plt
|
|
28
28
|
import numpy as np
|
|
29
|
+
from numpy.typing import NDArray
|
|
29
30
|
|
|
30
31
|
with warnings.catch_warnings():
|
|
31
32
|
warnings.simplefilter("ignore")
|
|
@@ -495,20 +496,20 @@ class DeepLearningFilter:
|
|
|
495
496
|
)
|
|
496
497
|
|
|
497
498
|
@tf.function
|
|
498
|
-
def predict_model(self, X:
|
|
499
|
+
def predict_model(self, X: NDArray) -> NDArray:
|
|
499
500
|
"""
|
|
500
501
|
Make predictions using the trained model.
|
|
501
502
|
|
|
502
503
|
Parameters
|
|
503
504
|
----------
|
|
504
|
-
X :
|
|
505
|
+
X : NDArray
|
|
505
506
|
Input features for prediction. Shape should be (n_samples, n_features)
|
|
506
507
|
where n_samples is the number of samples and n_features is the number
|
|
507
508
|
of features expected by the model.
|
|
508
509
|
|
|
509
510
|
Returns
|
|
510
511
|
-------
|
|
511
|
-
|
|
512
|
+
NDArray
|
|
512
513
|
Model predictions. Shape will depend on the specific model type but
|
|
513
514
|
typically follows (n_samples,) for regression or (n_samples, n_classes)
|
|
514
515
|
for classification.
|
|
@@ -964,7 +965,7 @@ class DeepLearningFilter:
|
|
|
964
965
|
self.savemodel()
|
|
965
966
|
self.trained = True
|
|
966
967
|
|
|
967
|
-
def apply(self, inputdata:
|
|
968
|
+
def apply(self, inputdata: NDArray, badpts: NDArray | None = None) -> NDArray:
|
|
968
969
|
"""
|
|
969
970
|
Apply a sliding-window prediction model to the input data, optionally incorporating bad points.
|
|
970
971
|
|
|
@@ -975,15 +976,15 @@ class DeepLearningFilter:
|
|
|
975
976
|
|
|
976
977
|
Parameters
|
|
977
978
|
----------
|
|
978
|
-
inputdata :
|
|
979
|
+
inputdata : NDArray
|
|
979
980
|
Input data array of shape (N,) to be processed.
|
|
980
|
-
badpts :
|
|
981
|
+
badpts : NDArray | None, optional
|
|
981
982
|
Array of same shape as `inputdata` indicating bad or invalid points. If None, no bad points
|
|
982
983
|
are considered. Default is None.
|
|
983
984
|
|
|
984
985
|
Returns
|
|
985
986
|
-------
|
|
986
|
-
|
|
987
|
+
NDArray
|
|
987
988
|
Predicted data array of the same shape as `inputdata`, with predictions aggregated and
|
|
988
989
|
weighted across overlapping windows.
|
|
989
990
|
|
|
@@ -2813,14 +2814,14 @@ class HybridDLFilter(DeepLearningFilter):
|
|
|
2813
2814
|
|
|
2814
2815
|
|
|
2815
2816
|
def filtscale(
|
|
2816
|
-
data:
|
|
2817
|
+
data: NDArray,
|
|
2817
2818
|
scalefac: float = 1.0,
|
|
2818
2819
|
reverse: bool = False,
|
|
2819
2820
|
hybrid: bool = False,
|
|
2820
2821
|
lognormalize: bool = True,
|
|
2821
2822
|
epsilon: float = 1e-10,
|
|
2822
2823
|
numorders: int = 6,
|
|
2823
|
-
) -> tuple[
|
|
2824
|
+
) -> tuple[NDArray, float] | NDArray:
|
|
2824
2825
|
"""
|
|
2825
2826
|
Apply or reverse a frequency-domain scaling and normalization to input data.
|
|
2826
2827
|
|
|
@@ -2831,7 +2832,7 @@ def filtscale(
|
|
|
2831
2832
|
|
|
2832
2833
|
Parameters
|
|
2833
2834
|
----------
|
|
2834
|
-
data :
|
|
2835
|
+
data : NDArray
|
|
2835
2836
|
Input signal or transformed data (depending on `reverse` flag).
|
|
2836
2837
|
scalefac : float, optional
|
|
2837
2838
|
Scaling factor used for normalization. Default is 1.0.
|
|
@@ -2851,7 +2852,7 @@ def filtscale(
|
|
|
2851
2852
|
|
|
2852
2853
|
Returns
|
|
2853
2854
|
-------
|
|
2854
|
-
tuple[
|
|
2855
|
+
tuple[NDArray, float] or NDArray
|
|
2855
2856
|
- If `reverse=False`: A tuple of (transformed data, scale factor).
|
|
2856
2857
|
The transformed data is a stacked array of magnitude and phase components
|
|
2857
2858
|
(or original data in hybrid mode).
|
|
@@ -3025,7 +3026,7 @@ def getmatchedtcs(
|
|
|
3025
3026
|
>>> matched_files, length = getmatchedtcs("sub-*/func/*cardiacfromfmri_25.0Hz*")
|
|
3026
3027
|
>>> print(f"Found {len(matched_files)} files with {length} timepoints each.")
|
|
3027
3028
|
"""
|
|
3028
|
-
# list all
|
|
3029
|
+
# list all the target files
|
|
3029
3030
|
fromfile = sorted(glob.glob(searchstring))
|
|
3030
3031
|
if debug:
|
|
3031
3032
|
print(f"searchstring: {searchstring} -> {fromfile}")
|
|
@@ -3073,9 +3074,7 @@ def readindata(
|
|
|
3073
3074
|
readlim: int | None = None,
|
|
3074
3075
|
readskip: int | None = None,
|
|
3075
3076
|
debug: bool = False,
|
|
3076
|
-
) ->
|
|
3077
|
-
tuple[np.ndarray, np.ndarray, list[str]] | tuple[np.ndarray, np.ndarray, list[str], np.ndarray]
|
|
3078
|
-
):
|
|
3077
|
+
) -> tuple[NDArray, NDArray, list[str]] | tuple[NDArray, NDArray, list[str], NDArray]:
|
|
3079
3078
|
"""
|
|
3080
3079
|
Read and process time-series data from a list of matched files.
|
|
3081
3080
|
|
|
@@ -3112,7 +3111,7 @@ def readindata(
|
|
|
3112
3111
|
|
|
3113
3112
|
Returns
|
|
3114
3113
|
-------
|
|
3115
|
-
tuple of (
|
|
3114
|
+
tuple of (NDArray, NDArray, list[str]) or (NDArray, NDArray, list[str], NDArray)
|
|
3116
3115
|
- `x1[startskip:-endskip, :count]`: Array of x-axis time series data.
|
|
3117
3116
|
- `y1[startskip:-endskip, :count]`: Array of y-axis time series data.
|
|
3118
3117
|
- `names[:count]`: List of file names that passed quality checks.
|
|
@@ -3174,7 +3173,7 @@ def readindata(
|
|
|
3174
3173
|
|
|
3175
3174
|
Returns
|
|
3176
3175
|
-------
|
|
3177
|
-
tuple of (
|
|
3176
|
+
tuple of (NDArray, NDArray, list[str]) or (NDArray, NDArray, list[str], NDArray)
|
|
3178
3177
|
- `x1[startskip:-endskip, :count]`: Array of x-axis time series data.
|
|
3179
3178
|
- `y1[startskip:-endskip, :count]`: Array of y-axis time series data.
|
|
3180
3179
|
- `names[:count]`: List of file names that passed quality checks.
|
|
@@ -3366,8 +3365,8 @@ def prep(
|
|
|
3366
3365
|
countlim: int | None = None,
|
|
3367
3366
|
debug: bool = False,
|
|
3368
3367
|
) -> (
|
|
3369
|
-
tuple[
|
|
3370
|
-
| tuple[
|
|
3368
|
+
tuple[NDArray, NDArray, NDArray, NDArray, int, int, int]
|
|
3369
|
+
| tuple[NDArray, NDArray, NDArray, NDArray, int, int, int, NDArray, NDArray]
|
|
3371
3370
|
):
|
|
3372
3371
|
"""
|
|
3373
3372
|
Prepare time-series data for training and validation by reading, normalizing,
|
|
@@ -3422,7 +3421,7 @@ def prep(
|
|
|
3422
3421
|
|
|
3423
3422
|
Returns
|
|
3424
3423
|
-------
|
|
3425
|
-
tuple of (
|
|
3424
|
+
tuple of (NDArray, NDArray, NDArray, NDArray, int, int, int)
|
|
3426
3425
|
If `dofft` is False:
|
|
3427
3426
|
- train_x : ndarray of shape (n_train, window_size, 1)
|
|
3428
3427
|
- train_y : ndarray of shape (n_train, window_size, 1)
|
|
@@ -3432,8 +3431,8 @@ def prep(
|
|
|
3432
3431
|
- tclen : int
|
|
3433
3432
|
- batchsize : int
|
|
3434
3433
|
|
|
3435
|
-
tuple of (
|
|
3436
|
-
|
|
3434
|
+
tuple of (NDArray, NDArray, NDArray, NDArray, int, int, int,
|
|
3435
|
+
NDArray, NDArray)
|
|
3437
3436
|
If `dofft` is True:
|
|
3438
3437
|
- train_x : ndarray of shape (n_train, window_size, 2)
|
|
3439
3438
|
- train_y : ndarray of shape (n_train, window_size, 2)
|
rapidtide/dlfiltertorch.py
CHANGED
|
@@ -26,6 +26,7 @@ import matplotlib as mpl
|
|
|
26
26
|
import matplotlib.pyplot as plt
|
|
27
27
|
import numpy as np
|
|
28
28
|
import tqdm
|
|
29
|
+
from numpy.typing import NDArray
|
|
29
30
|
|
|
30
31
|
with warnings.catch_warnings():
|
|
31
32
|
warnings.simplefilter("ignore")
|
|
@@ -395,20 +396,20 @@ class DeepLearningFilter:
|
|
|
395
396
|
countlim=self.countlim,
|
|
396
397
|
)
|
|
397
398
|
|
|
398
|
-
def predict_model(self, X:
|
|
399
|
+
def predict_model(self, X: NDArray) -> NDArray:
|
|
399
400
|
"""
|
|
400
401
|
Make predictions using the trained model.
|
|
401
402
|
|
|
402
403
|
Parameters
|
|
403
404
|
----------
|
|
404
|
-
X :
|
|
405
|
+
X : NDArray
|
|
405
406
|
Input features for prediction. Shape should be (n_samples, n_features)
|
|
406
407
|
where n_samples is the number of samples and n_features is the number
|
|
407
408
|
of features expected by the model.
|
|
408
409
|
|
|
409
410
|
Returns
|
|
410
411
|
-------
|
|
411
|
-
|
|
412
|
+
NDArray
|
|
412
413
|
Model predictions. Shape will depend on the specific model type but
|
|
413
414
|
typically follows (n_samples,) for regression or (n_samples, n_classes)
|
|
414
415
|
for classification.
|
|
@@ -1067,7 +1068,7 @@ class DeepLearningFilter:
|
|
|
1067
1068
|
self.savemodel()
|
|
1068
1069
|
self.trained = True
|
|
1069
1070
|
|
|
1070
|
-
def apply(self, inputdata:
|
|
1071
|
+
def apply(self, inputdata: NDArray, badpts: NDArray | None = None) -> NDArray:
|
|
1071
1072
|
"""
|
|
1072
1073
|
Apply a sliding-window prediction model to the input data, optionally incorporating bad points.
|
|
1073
1074
|
|
|
@@ -1078,15 +1079,15 @@ class DeepLearningFilter:
|
|
|
1078
1079
|
|
|
1079
1080
|
Parameters
|
|
1080
1081
|
----------
|
|
1081
|
-
inputdata :
|
|
1082
|
+
inputdata : NDArray
|
|
1082
1083
|
Input data array of shape (N,) to be processed.
|
|
1083
|
-
badpts :
|
|
1084
|
+
badpts : NDArray | None, optional
|
|
1084
1085
|
Array of same shape as `inputdata` indicating bad or invalid points. If None, no bad points
|
|
1085
1086
|
are considered. Default is None.
|
|
1086
1087
|
|
|
1087
1088
|
Returns
|
|
1088
1089
|
-------
|
|
1089
|
-
|
|
1090
|
+
NDArray
|
|
1090
1091
|
Predicted data array of the same shape as `inputdata`, with predictions aggregated and
|
|
1091
1092
|
weighted across overlapping windows.
|
|
1092
1093
|
|
|
@@ -3958,14 +3959,14 @@ class HybridDLFilter(DeepLearningFilter):
|
|
|
3958
3959
|
|
|
3959
3960
|
|
|
3960
3961
|
def filtscale(
|
|
3961
|
-
data:
|
|
3962
|
+
data: NDArray,
|
|
3962
3963
|
scalefac: float = 1.0,
|
|
3963
3964
|
reverse: bool = False,
|
|
3964
3965
|
hybrid: bool = False,
|
|
3965
3966
|
lognormalize: bool = True,
|
|
3966
3967
|
epsilon: float = 1e-10,
|
|
3967
3968
|
numorders: int = 6,
|
|
3968
|
-
) -> tuple[
|
|
3969
|
+
) -> tuple[NDArray, float] | NDArray:
|
|
3969
3970
|
"""
|
|
3970
3971
|
Apply or reverse a scaling transformation to spectral data.
|
|
3971
3972
|
|
|
@@ -3977,7 +3978,7 @@ def filtscale(
|
|
|
3977
3978
|
|
|
3978
3979
|
Parameters
|
|
3979
3980
|
----------
|
|
3980
|
-
data :
|
|
3981
|
+
data : NDArray
|
|
3981
3982
|
Input time-domain signal or scaled spectral data depending on `reverse` flag.
|
|
3982
3983
|
scalefac : float, optional
|
|
3983
3984
|
Scaling factor used in normalization. Default is 1.0.
|
|
@@ -3996,7 +3997,7 @@ def filtscale(
|
|
|
3996
3997
|
|
|
3997
3998
|
Returns
|
|
3998
3999
|
-------
|
|
3999
|
-
tuple[
|
|
4000
|
+
tuple[NDArray, float] or NDArray
|
|
4000
4001
|
- If `reverse` is False: Returns a tuple of (scaled_data, scalefac).
|
|
4001
4002
|
`scaled_data` is a stacked array of magnitude and phase (or original signal
|
|
4002
4003
|
and magnitude in hybrid mode).
|
|
@@ -4217,9 +4218,7 @@ def readindata(
|
|
|
4217
4218
|
readlim: int | None = None,
|
|
4218
4219
|
readskip: int | None = None,
|
|
4219
4220
|
debug: bool = False,
|
|
4220
|
-
) ->
|
|
4221
|
-
tuple[np.ndarray, np.ndarray, list[str]] | tuple[np.ndarray, np.ndarray, list[str], np.ndarray]
|
|
4222
|
-
):
|
|
4221
|
+
) -> tuple[NDArray, NDArray, list[str]] | tuple[NDArray, NDArray, list[str], NDArray]:
|
|
4223
4222
|
"""
|
|
4224
4223
|
Read and process time-series data from a list of matched files.
|
|
4225
4224
|
|
|
@@ -4258,7 +4257,7 @@ def readindata(
|
|
|
4258
4257
|
|
|
4259
4258
|
Returns
|
|
4260
4259
|
-------
|
|
4261
|
-
tuple of (
|
|
4260
|
+
tuple of (NDArray, NDArray, list[str]) or (NDArray, NDArray, list[str], NDArray)
|
|
4262
4261
|
- `x1`: Array of shape `(tclen, count)` containing x-time series data.
|
|
4263
4262
|
- `y1`: Array of shape `(tclen, count)` containing y-time series data.
|
|
4264
4263
|
- `names`: List of file names that passed quality checks.
|
|
@@ -4441,8 +4440,8 @@ def prep(
|
|
|
4441
4440
|
countlim: int | None = None,
|
|
4442
4441
|
debug: bool = False,
|
|
4443
4442
|
) -> (
|
|
4444
|
-
tuple[
|
|
4445
|
-
| tuple[
|
|
4443
|
+
tuple[NDArray, NDArray, NDArray, NDArray, int, int, int]
|
|
4444
|
+
| tuple[NDArray, NDArray, NDArray, NDArray, int, int, int, NDArray, NDArray]
|
|
4446
4445
|
):
|
|
4447
4446
|
"""
|
|
4448
4447
|
Prepare time-series data for training and validation by reading, normalizing,
|
|
@@ -4497,7 +4496,7 @@ def prep(
|
|
|
4497
4496
|
|
|
4498
4497
|
Returns
|
|
4499
4498
|
-------
|
|
4500
|
-
tuple of (
|
|
4499
|
+
tuple of (NDArray, NDArray, NDArray, NDArray, int, int, int)
|
|
4501
4500
|
If `dofft` is False:
|
|
4502
4501
|
- train_x : Training input data (shape: [n_windows, window_size, 1])
|
|
4503
4502
|
- train_y : Training target data (shape: [n_windows, window_size, 1])
|
|
@@ -4507,7 +4506,7 @@ def prep(
|
|
|
4507
4506
|
- tclen : Total time points after skipping
|
|
4508
4507
|
- batchsize : Number of windows per subject
|
|
4509
4508
|
|
|
4510
|
-
tuple of (
|
|
4509
|
+
tuple of (NDArray, NDArray, NDArray, NDArray, int, int, int, NDArray, NDArray)
|
|
4511
4510
|
If `dofft` is True:
|
|
4512
4511
|
- train_x : Training input data (shape: [n_windows, window_size, 2])
|
|
4513
4512
|
- train_y : Training target data (shape: [n_windows, window_size, 2])
|
rapidtide/filter.py
CHANGED
|
@@ -1513,9 +1513,9 @@ def wiener_deconvolution(signal: NDArray, kernel: NDArray, lambd: float) -> NDAr
|
|
|
1513
1513
|
|
|
1514
1514
|
Parameters
|
|
1515
1515
|
----------
|
|
1516
|
-
signal :
|
|
1516
|
+
signal : NDArray
|
|
1517
1517
|
Input signal to be deconvolved, 1D array.
|
|
1518
|
-
kernel :
|
|
1518
|
+
kernel : NDArray
|
|
1519
1519
|
Convolution kernel (point spread function), 1D array.
|
|
1520
1520
|
lambd : float
|
|
1521
1521
|
Regularization parameter representing the signal-to-noise ratio in
|
|
@@ -1523,7 +1523,7 @@ def wiener_deconvolution(signal: NDArray, kernel: NDArray, lambd: float) -> NDAr
|
|
|
1523
1523
|
|
|
1524
1524
|
Returns
|
|
1525
1525
|
-------
|
|
1526
|
-
|
|
1526
|
+
NDArray
|
|
1527
1527
|
Deconvolved signal, same length as input signal.
|
|
1528
1528
|
|
|
1529
1529
|
Notes
|
|
@@ -1601,7 +1601,7 @@ def spectrum(
|
|
|
1601
1601
|
|
|
1602
1602
|
Parameters
|
|
1603
1603
|
----------
|
|
1604
|
-
spectrum :
|
|
1604
|
+
spectrum : NDArray
|
|
1605
1605
|
Input spectrum array. Should contain non-negative values.
|
|
1606
1606
|
|
|
1607
1607
|
Returns
|
rapidtide/fit.py
CHANGED
|
@@ -1056,13 +1056,13 @@ def territorydecomp(
|
|
|
1056
1056
|
----------
|
|
1057
1057
|
inputmap : NDArray[np.floating[Any]]
|
|
1058
1058
|
Input data to be decomposed. Can be 3D or 4D (e.g., time series).
|
|
1059
|
-
template :
|
|
1059
|
+
template : NDArray
|
|
1060
1060
|
Template values corresponding to the spatial locations in `inputmap`.
|
|
1061
1061
|
Should have the same shape as `inputmap` (or be broadcastable).
|
|
1062
|
-
atlas :
|
|
1062
|
+
atlas : NDArray
|
|
1063
1063
|
Atlas defining the territories. Each unique integer value represents a distinct region.
|
|
1064
1064
|
Must have the same shape as `inputmap`.
|
|
1065
|
-
inputmask :
|
|
1065
|
+
inputmask : NDArray, optional
|
|
1066
1066
|
Mask to define valid voxels in `inputmap`. If None, all voxels are considered valid.
|
|
1067
1067
|
Should have the same shape as `inputmap`.
|
|
1068
1068
|
intercept : bool, optional
|
|
@@ -1074,14 +1074,14 @@ def territorydecomp(
|
|
|
1074
1074
|
|
|
1075
1075
|
Returns
|
|
1076
1076
|
-------
|
|
1077
|
-
tuple of
|
|
1077
|
+
tuple of NDArray
|
|
1078
1078
|
A tuple containing:
|
|
1079
|
-
- fitmap :
|
|
1079
|
+
- fitmap : NDArray
|
|
1080
1080
|
The decomposed map with fitted values projected back onto the original spatial locations.
|
|
1081
|
-
- thecoffs :
|
|
1081
|
+
- thecoffs : NDArray
|
|
1082
1082
|
Array of polynomial coefficients for each territory and map. Shape is (nummaps, numterritories, fitorder+1)
|
|
1083
1083
|
if `intercept` is True, or (nummaps, numterritories, fitorder) otherwise.
|
|
1084
|
-
- theR2s :
|
|
1084
|
+
- theR2s : NDArray
|
|
1085
1085
|
R-squared values for the fits for each territory and map. Shape is (nummaps, numterritories).
|
|
1086
1086
|
|
|
1087
1087
|
Notes
|
|
@@ -2232,7 +2232,7 @@ def gram_schmidt(theregressors: NDArray, debug: bool = False) -> NDArray:
|
|
|
2232
2232
|
|
|
2233
2233
|
Parameters
|
|
2234
2234
|
----------
|
|
2235
|
-
theregressors :
|
|
2235
|
+
theregressors : NDArray
|
|
2236
2236
|
A 2D NumPy array where each row represents a vector to be orthogonalized.
|
|
2237
2237
|
debug : bool, optional
|
|
2238
2238
|
If True, prints debug information about input and output dimensions.
|
|
@@ -2240,7 +2240,7 @@ def gram_schmidt(theregressors: NDArray, debug: bool = False) -> NDArray:
|
|
|
2240
2240
|
|
|
2241
2241
|
Returns
|
|
2242
2242
|
-------
|
|
2243
|
-
|
|
2243
|
+
NDArray
|
|
2244
2244
|
A 2D NumPy array representing the orthonormal basis. Each row is an
|
|
2245
2245
|
orthonormal vector. The number of rows may be less than the input if
|
|
2246
2246
|
some vectors were linearly dependent.
|
|
@@ -2292,7 +2292,7 @@ def mlproject(thefit: NDArray, theevs: list, intercept: bool) -> NDArray:
|
|
|
2292
2292
|
A 1D array or list of coefficients (weights) to be applied to the
|
|
2293
2293
|
explanatory variables. If `intercept` is True, the first element of
|
|
2294
2294
|
`thefit` is treated as the intercept.
|
|
2295
|
-
theevs : list of
|
|
2295
|
+
theevs : list of NDArray
|
|
2296
2296
|
A list where each element is a 1D NumPy array representing an
|
|
2297
2297
|
explanatory variable (feature time series). The length of `theevs`
|
|
2298
2298
|
should match the number of non-intercept coefficients in `thefit`.
|
|
@@ -2510,9 +2510,9 @@ def calcexpandedregressors(
|
|
|
2510
2510
|
|
|
2511
2511
|
Returns
|
|
2512
2512
|
-------
|
|
2513
|
-
tuple of (
|
|
2513
|
+
tuple of (NDArray, list)
|
|
2514
2514
|
A tuple containing:
|
|
2515
|
-
- outputregressors :
|
|
2515
|
+
- outputregressors : NDArray
|
|
2516
2516
|
A 2D NumPy array where each row represents a generated regressor
|
|
2517
2517
|
(original, polynomial, or derivative) and columns represent time points.
|
|
2518
2518
|
- outlabels : list of str
|
|
@@ -2587,6 +2587,7 @@ def calcexpandedregressors(
|
|
|
2587
2587
|
activecolumn += 1
|
|
2588
2588
|
return outputregressors, outlabels
|
|
2589
2589
|
|
|
2590
|
+
|
|
2590
2591
|
@conditionaljit()
|
|
2591
2592
|
def derivativelinfitfilt(
|
|
2592
2593
|
thedata: NDArray, theevs: NDArray, nderivs: int = 1, debug: bool = False
|
|
@@ -2670,6 +2671,7 @@ def derivativelinfitfilt(
|
|
|
2670
2671
|
|
|
2671
2672
|
return filtered, thenewevs, datatoremove, R, coffs
|
|
2672
2673
|
|
|
2674
|
+
|
|
2673
2675
|
@conditionaljit()
|
|
2674
2676
|
def expandedlinfitfilt(
|
|
2675
2677
|
thedata: NDArray, theevs: NDArray, ncomps: int = 1, debug: bool = False
|
|
@@ -2752,6 +2754,7 @@ def expandedlinfitfilt(
|
|
|
2752
2754
|
|
|
2753
2755
|
return filtered, thenewevs, datatoremove, R, coffs
|
|
2754
2756
|
|
|
2757
|
+
|
|
2755
2758
|
@conditionaljit()
|
|
2756
2759
|
def linfitfilt(
|
|
2757
2760
|
thedata: NDArray, theevs: NDArray, debug: bool = False
|
|
@@ -2843,8 +2846,7 @@ def confoundregress(
|
|
|
2843
2846
|
regressors: NDArray,
|
|
2844
2847
|
debug: bool = False,
|
|
2845
2848
|
showprogressbar: bool = True,
|
|
2846
|
-
|
|
2847
|
-
rt_floattype: str = "float64",
|
|
2849
|
+
rt_floattype: np.dtype = np.float64,
|
|
2848
2850
|
) -> Tuple[NDArray, NDArray]:
|
|
2849
2851
|
"""
|
|
2850
2852
|
Filters multiple regressors out of an array of data using linear regression.
|
|
@@ -2864,10 +2866,8 @@ def confoundregress(
|
|
|
2864
2866
|
Print additional diagnostic information if True. Default is False.
|
|
2865
2867
|
showprogressbar : bool, optional
|
|
2866
2868
|
Show progress bar during processing. Default is True.
|
|
2867
|
-
|
|
2869
|
+
rt_floattype : np.dtype, optional
|
|
2868
2870
|
The data type used for floating-point calculations. Default is np.float64.
|
|
2869
|
-
rt_floattype : str, optional
|
|
2870
|
-
The string representation of the floating-point data type. Default is "float64".
|
|
2871
2871
|
|
|
2872
2872
|
Returns
|
|
2873
2873
|
-------
|
|
@@ -2905,7 +2905,7 @@ def confoundregress(
|
|
|
2905
2905
|
if i == 0 and debug:
|
|
2906
2906
|
print("fit shape:", thefit.shape)
|
|
2907
2907
|
for j in range(regressors.shape[0]):
|
|
2908
|
-
datatoremove +=
|
|
2908
|
+
datatoremove += (thefit[0, 1 + j] * regressors[j, :]).astype(rt_floattype)
|
|
2909
2909
|
filtereddata[i, :] = data[i, :] - datatoremove
|
|
2910
2910
|
r2value[i] = R2
|
|
2911
2911
|
return filtereddata, r2value
|