rapidtide 3.1__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. rapidtide/OrthoImageItem.py +4 -4
  2. rapidtide/_version.py +3 -3
  3. rapidtide/calccoherence.py +4 -4
  4. rapidtide/calcnullsimfunc.py +2 -5
  5. rapidtide/calcsimfunc.py +1 -4
  6. rapidtide/correlate.py +130 -127
  7. rapidtide/data/examples/src/testfmri +41 -9
  8. rapidtide/data/examples/src/testhappy +8 -8
  9. rapidtide/dlfilter.py +21 -22
  10. rapidtide/dlfiltertorch.py +18 -19
  11. rapidtide/filter.py +4 -4
  12. rapidtide/fit.py +18 -18
  13. rapidtide/happy_supportfuncs.py +84 -82
  14. rapidtide/helper_classes.py +2 -2
  15. rapidtide/io.py +88 -83
  16. rapidtide/linfitfiltpass.py +30 -49
  17. rapidtide/makelaggedtcs.py +11 -16
  18. rapidtide/maskutil.py +30 -14
  19. rapidtide/miscmath.py +2 -2
  20. rapidtide/patchmatch.py +10 -11
  21. rapidtide/peakeval.py +1 -3
  22. rapidtide/ppgproc.py +3 -3
  23. rapidtide/qualitycheck.py +2 -2
  24. rapidtide/refinedelay.py +12 -3
  25. rapidtide/refineregressor.py +20 -29
  26. rapidtide/scripts/showxcorr_legacy.py +7 -7
  27. rapidtide/scripts/stupidramtricks.py +15 -17
  28. rapidtide/simFuncClasses.py +2 -2
  29. rapidtide/simfuncfit.py +27 -41
  30. rapidtide/tests/test_cleanregressor.py +1 -2
  31. rapidtide/tests/test_fullrunhappy_v3.py +11 -5
  32. rapidtide/tests/test_fullrunhappy_v4.py +9 -1
  33. rapidtide/tests/test_getparsers.py +11 -3
  34. rapidtide/tests/test_refinedelay.py +0 -1
  35. rapidtide/tests/test_simroundtrip.py +8 -0
  36. rapidtide/tests/test_stcorrelate.py +3 -1
  37. rapidtide/util.py +6 -6
  38. rapidtide/voxelData.py +1 -1
  39. rapidtide/wiener.py +122 -16
  40. rapidtide/wiener2.py +3 -3
  41. rapidtide/workflows/applyppgproc.py +33 -15
  42. rapidtide/workflows/calcSimFuncMap.py +11 -22
  43. rapidtide/workflows/ccorrica.py +4 -2
  44. rapidtide/workflows/cleanregressor.py +6 -11
  45. rapidtide/workflows/delayvar.py +8 -13
  46. rapidtide/workflows/fitSimFuncMap.py +2 -9
  47. rapidtide/workflows/happy.py +6 -6
  48. rapidtide/workflows/happy_parser.py +36 -25
  49. rapidtide/workflows/pairproc.py +10 -2
  50. rapidtide/workflows/pixelcomp.py +1 -2
  51. rapidtide/workflows/rankimage.py +1 -1
  52. rapidtide/workflows/rapidtide.py +98 -63
  53. rapidtide/workflows/refineDelayMap.py +7 -6
  54. rapidtide/workflows/refineRegressor.py +6 -16
  55. rapidtide/workflows/regressfrommaps.py +9 -6
  56. rapidtide/workflows/retrolagtcs.py +5 -7
  57. rapidtide/workflows/retroregress.py +11 -17
  58. rapidtide/workflows/roisummarize.py +11 -10
  59. rapidtide/workflows/showarbcorr.py +2 -2
  60. rapidtide/workflows/showxcorrx.py +6 -6
  61. rapidtide/workflows/simdata.py +31 -31
  62. rapidtide/workflows/spatialmi.py +0 -1
  63. rapidtide/workflows/tidepool.py +6 -4
  64. {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/METADATA +8 -7
  65. {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/RECORD +69 -70
  66. rapidtide/wiener_doc.py +0 -255
  67. {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/WHEEL +0 -0
  68. {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/entry_points.txt +0 -0
  69. {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/licenses/LICENSE +0 -0
  70. {rapidtide-3.1.dist-info → rapidtide-3.1.1.dist-info}/top_level.txt +0 -0
@@ -19,7 +19,7 @@
19
19
  import gc
20
20
  import logging
21
21
  import sys
22
- from typing import Any, Callable
22
+ from typing import Any
23
23
 
24
24
  import numpy as np
25
25
  import statsmodels as sm
@@ -48,7 +48,7 @@ def _procOneVoxelTimeShift(
48
48
  "debug": False,
49
49
  }
50
50
  options.update(kwargs)
51
- detrendorder = options["detrendorder"]
51
+ detrendorder = int(options["detrendorder"])
52
52
  offsettime = options["offsettime"]
53
53
  debug = options["debug"]
54
54
  if debug:
@@ -234,8 +234,7 @@ def alignvoxels(
234
234
  chunksize: int = 1000,
235
235
  padtrs: int = 60,
236
236
  debug: bool = False,
237
- rt_floatset: type = np.float64,
238
- rt_floattype: str = "float64",
237
+ rt_floattype: np.dtype = np.float64,
239
238
  ) -> int:
240
239
  """
241
240
  Apply temporal alignment (timeshift) to all voxels in fMRI data based on correlation peaks.
@@ -246,21 +245,21 @@ def alignvoxels(
246
245
 
247
246
  Parameters
248
247
  ----------
249
- fmridata : 4D numpy.ndarray
248
+ fmridata : 4D NDArray
250
249
  fMRI data, filtered to the passband, with shape (nx, ny, nz, nt)
251
250
  fmritr : float
252
251
  Data repetition time (TR), in seconds
253
- shiftedtcs : 4D numpy.ndarray
252
+ shiftedtcs : 4D NDArray
254
253
  Destination array for time-aligned voxel timecourses, shape (nx, ny, nz, nt)
255
- weights : 4D numpy.ndarray
254
+ weights : 4D NDArray
256
255
  Weights for each timepoint in the final regressor, shape (nx, ny, nz, nt)
257
- paddedshiftedtcs : 4D numpy.ndarray
256
+ paddedshiftedtcs : 4D NDArray
258
257
  Time-aligned voxel timecourses with padding, shape (nx, ny, nz, nt + 2*padtrs)
259
- paddedweights : 4D numpy.ndarray
258
+ paddedweights : 4D NDArray
260
259
  Weights for each timepoint in the padded regressor, shape (nx, ny, nz, nt + 2*padtrs)
261
- lagtimes : 3D numpy.ndarray
260
+ lagtimes : 3D NDArray
262
261
  Time delay of maximum crosscorrelation in seconds, shape (nx, ny, nz)
263
- lagmask : 3D numpy.ndarray
262
+ lagmask : 3D NDArray
264
263
  Mask of voxels with successful correlation fits, shape (nx, ny, nz)
265
264
  detrendorder : int, optional
266
265
  Order of polynomial used to detrend the data (default is 1)
@@ -278,10 +277,8 @@ def alignvoxels(
278
277
  Number of timepoints to pad on each end of the timecourses (default is 60)
279
278
  debug : bool, optional
280
279
  If True, enable additional debugging output (default is False)
281
- rt_floatset : type, optional
280
+ rt_floattype : np.dtype, optional
282
281
  Function to coerce variable types (default is np.float64)
283
- rt_floattype : str, optional
284
- Data type for internal variables ('float32' or 'float64') (default is 'float64')
285
282
 
286
283
  Returns
287
284
  -------
@@ -378,8 +375,7 @@ def makerefinemask(
378
375
  excludemask: NDArray | None = None,
379
376
  fixdelay: bool = False,
380
377
  debug: bool = False,
381
- rt_floatset: type = np.float64,
382
- rt_floattype: str = "float64",
378
+ rt_floattype: np.dtype = np.float64,
383
379
  ) -> tuple[int, NDArray | None, int, int, int, int, int]:
384
380
  """
385
381
  Determine which voxels should be used for regressor refinement based on correlation strength,
@@ -426,10 +422,8 @@ def makerefinemask(
426
422
  If True, uses the raw `lagmask` without applying delay thresholds. Default is False.
427
423
  debug : bool, optional
428
424
  Enable additional debugging output. Default is False.
429
- rt_floatset : callable, optional
430
- Function to coerce variable types. Default is `np.float64`.
431
- rt_floattype : str, optional
432
- Data type for internal variables. Must be 'float32' or 'float64'. Default is 'float64'.
425
+ rt_floattype : np.dtype, optional
426
+ Data type for internal arrays. Default is `np.float64`.
433
427
 
434
428
  Returns
435
429
  -------
@@ -693,8 +687,7 @@ def dorefine(
693
687
  cleanrefined: bool = False,
694
688
  bipolar: bool = False,
695
689
  debug: bool = False,
696
- rt_floatset: type = np.float64,
697
- rt_floattype: str = "float64",
690
+ rt_floattype: np.dtype = np.float64,
698
691
  ) -> tuple[int, NDArray]:
699
692
  """
700
693
  Refine timecourses using specified method (ICA, PCA, weighted average, or unweighted average).
@@ -748,10 +741,8 @@ def dorefine(
748
741
  If True, flip sign of negative lag strengths (default is False).
749
742
  debug : bool, optional
750
743
  If True, print debug information (default is False).
751
- rt_floatset : type, optional
744
+ rt_floattype : np.dtype, optional
752
745
  Data type for floating-point numbers (default is np.float64).
753
- rt_floattype : str, optional
754
- String representation of floating-point data type (default is "float64").
755
746
 
756
747
  Returns
757
748
  -------
@@ -883,7 +874,7 @@ def dorefine(
883
874
  theprefilter.apply(fmrifreq, icadata),
884
875
  detrendorder=detrendorder,
885
876
  )
886
- thepxcorr = pearsonr(filteredavg, filteredica)[0]
877
+ thepxcorr = pearsonr(filteredavg, filteredica).statistic
887
878
  LGR.info(f"ica/avg correlation = {thepxcorr}")
888
879
  if thepxcorr > 0.0:
889
880
  outputdata = 1.0 * icadata
@@ -918,7 +909,7 @@ def dorefine(
918
909
  theprefilter.apply(fmrifreq, pcadata),
919
910
  detrendorder=detrendorder,
920
911
  )
921
- thepxcorr = pearsonr(filteredavg, filteredpca)[0]
912
+ thepxcorr = pearsonr(filteredavg, filteredpca).statistic
922
913
  LGR.info(f"pca/avg correlation = {thepxcorr}")
923
914
  if thepxcorr > 0.0:
924
915
  outputdata = 1.0 * pcadata
@@ -934,8 +925,8 @@ def dorefine(
934
925
  if cleanrefined:
935
926
  thefit, R2 = tide_fit.mlregress(averagediscard, averagedata)
936
927
 
937
- fitcoff = rt_floatset(thefit[0, 1])
938
- datatoremove = rt_floatset(fitcoff * averagediscard)
928
+ fitcoff = thefit[0, 1]
929
+ datatoremove = (fitcoff * averagediscard).astype(rt_floattype)
939
930
  outputdata -= datatoremove
940
931
 
941
932
  # garbage collect
@@ -79,7 +79,7 @@ def getNullDistributionData(
79
79
  corrlist[i] = theshuffledxcorr_trim[argmax(theshuffledxcorr_trim)]
80
80
 
81
81
  # find and tabulate correlation coefficient at 0 lag
82
- corrlist_pear[i] = pearsonr(filteredindata, filteredshuffleddata)[0]
82
+ corrlist_pear[i] = pearsonr(filteredindata, filteredshuffleddata).statistic
83
83
 
84
84
  # progress
85
85
  # tide_util.progressbar(i + 1, numreps, label='Percent complete')
@@ -455,7 +455,7 @@ def main():
455
455
 
456
456
  # report the pearson correlation
457
457
  if showpearson and verbose:
458
- print("Pearson_R:\t", thepxcorr[0])
458
+ print("Pearson_R:\t", thepxcorr.statistic)
459
459
  if estimate_significance:
460
460
  for idx, percentile in enumerate(thepercentiles):
461
461
  print(
@@ -467,7 +467,7 @@ def main():
467
467
  print("")
468
468
 
469
469
  if debug:
470
- print(thepxcorr)
470
+ print(thepxcorr.statistic, thepxcorr.pvalue)
471
471
 
472
472
  if verbose:
473
473
  if uselabel:
@@ -497,7 +497,7 @@ def main():
497
497
  "xcorr_R(P=0.05)",
498
498
  "xcorr_maxdelay",
499
499
  )
500
- print(thelabel, thepxcorr[0], pearpcts_fit[0], R, pcts_fit[0], -maxdelay)
500
+ print(thelabel, thepxcorr.statistic, pearpcts_fit[0], R, pcts_fit[0], -maxdelay)
501
501
  else:
502
502
  if labelline:
503
503
  print(
@@ -507,7 +507,7 @@ def main():
507
507
  "xcorr_R(P=0.05)",
508
508
  "xcorr_maxdelay",
509
509
  )
510
- print(thepxcorr[0], pearpcts_fit[0], R, pcts_fit[0], -maxdelay)
510
+ print(thepxcorr.statistic, pearpcts_fit[0], R, pcts_fit[0], -maxdelay)
511
511
  else:
512
512
  if uselabel:
513
513
  if labelline:
@@ -518,11 +518,11 @@ def main():
518
518
  "xcorr_R",
519
519
  "xcorr_maxdelay",
520
520
  )
521
- print(thelabel, thepxcorr[0], thepxcorr[1], R, -maxdelay)
521
+ print(thelabel, thepxcorr.statistic, thepxcorr.pvalue, R, -maxdelay)
522
522
  else:
523
523
  if labelline:
524
524
  print("pearson_r\tpearson_p\txcorr_R\txcorr_t\txcorr_maxdelay")
525
- print(thepxcorr[0], "\t", thepxcorr[1], "\t", R, "\t", -maxdelay)
525
+ print(thepxcorr.statistic, "\t", thepxcorr.pvalue, "\t", R, "\t", -maxdelay)
526
526
 
527
527
  if displayplots:
528
528
  fig = figure()
@@ -102,19 +102,15 @@ def stupidramtricks(args):
102
102
  args.nprocs = tide_multiproc.maxcpus()
103
103
 
104
104
  if args.internalprecision == "double":
105
- rt_floattype = "float64"
106
- rt_floatset = np.float64
105
+ rt_floattype = np.float64
107
106
  else:
108
- rt_floattype = "float32"
109
- rt_floatset = np.float32
107
+ rt_floattype = np.float32
110
108
 
111
109
  # set the output precision
112
110
  if args.outputprecision == "double":
113
- rt_outfloattype = "float64"
114
- rt_outfloatset = np.float64
111
+ rt_outfloattype = np.float64
115
112
  else:
116
- rt_outfloattype = "float32"
117
- rt_outfloatset = np.float32
113
+ rt_outfloattype = np.float32
118
114
 
119
115
  # select the voxels in the mask
120
116
  print("setting sizes")
@@ -139,23 +135,25 @@ def stupidramtricks(args):
139
135
  print("allocating shared memory")
140
136
  # first move fmridata into shared memory
141
137
  fmridata, fmridata_shm = tide_util.numpy2shared(
142
- fmridata, rt_floatset, name=f"fmridata_{args.pid}"
138
+ fmridata, rt_floattype, name=f"fmridata_{args.pid}"
143
139
  )
144
140
  sLFOfitmean, sLFOfitmean_shm = tide_util.allocshared(
145
- internalvalidspaceshape, rt_outfloatset
141
+ internalvalidspaceshape, rt_outfloattype
142
+ )
143
+ rvalue, rvalue_shm = tide_util.allocshared(internalvalidspaceshape, rt_outfloattype)
144
+ r2value, r2value_shm = tide_util.allocshared(internalvalidspaceshape, rt_outfloattype)
145
+ fitNorm, fitNorm_shm = tide_util.allocshared(
146
+ internalvalidspaceshapederivs, rt_outfloattype
146
147
  )
147
- rvalue, rvalue_shm = tide_util.allocshared(internalvalidspaceshape, rt_outfloatset)
148
- r2value, r2value_shm = tide_util.allocshared(internalvalidspaceshape, rt_outfloatset)
149
- fitNorm, fitNorm_shm = tide_util.allocshared(internalvalidspaceshapederivs, rt_outfloatset)
150
148
  fitcoeff, fitcoeff_shm = tide_util.allocshared(
151
- internalvalidspaceshapederivs, rt_outfloatset
149
+ internalvalidspaceshapederivs, rt_outfloattype
152
150
  )
153
151
  movingsignal, movingsignal_shm = tide_util.allocshared(
154
- internalvalidfmrishape, rt_outfloatset
152
+ internalvalidfmrishape, rt_outfloattype
155
153
  )
156
- lagtc, lagtc_shm = tide_util.allocshared(internalvalidfmrishape, rt_floatset)
154
+ lagtc, lagtc_shm = tide_util.allocshared(internalvalidfmrishape, rt_floattype)
157
155
  filtereddata, filtereddata_shm = tide_util.allocshared(
158
- internalvalidfmrishape, rt_outfloatset
156
+ internalvalidfmrishape, rt_outfloattype
159
157
  )
160
158
 
161
159
  location = "in shared memory"
@@ -158,7 +158,7 @@ class SimilarityFunctionator:
158
158
 
159
159
  Parameters
160
160
  ----------
161
- thetc : numpy.ndarray
161
+ thetc : NDArray
162
162
  Input timecourse data to be prepared
163
163
  isreftc : bool, optional
164
164
  Flag indicating whether the input is a reference timecourse. If True, the timecourse
@@ -166,7 +166,7 @@ class SimilarityFunctionator:
166
166
 
167
167
  Returns
168
168
  -------
169
- numpy.ndarray
169
+ NDArray
170
170
  Prepared and normalized timecourse data after filtering, normalization, detrending,
171
171
  and window function application
172
172
 
rapidtide/simfuncfit.py CHANGED
@@ -40,8 +40,7 @@ def onesimfuncfit(
40
40
  lthreshval: float = 0.0,
41
41
  fixdelay: bool = False,
42
42
  initialdelayvalue: float = 0.0,
43
- rt_floatset: type = np.float64,
44
- rt_floattype: str = "float64",
43
+ rt_floattype: np.dtype = np.float64,
45
44
  ) -> Tuple[int, float, float, float, int, int, int, int]:
46
45
  """
47
46
  Perform a single fit on a correlation function using the provided fitter.
@@ -70,10 +69,8 @@ def onesimfuncfit(
70
69
  Default is False.
71
70
  initialdelayvalue : float, optional
72
71
  The fixed delay value to use when `fixdelay=True`. Default is 0.0.
73
- rt_floatset : type, optional
72
+ rt_floattype : np.dtype, optional
74
73
  The data type to use for floating-point values. Default is `np.float64`.
75
- rt_floattype : str, optional
76
- String representation of the floating-point type. Default is "float64".
77
74
 
78
75
  Returns
79
76
  -------
@@ -96,7 +93,6 @@ def onesimfuncfit(
96
93
  Examples
97
94
  --------
98
95
  >>> import numpy as np
99
- >>> from some_module import some_fitter_class
100
96
  >>> corr_func = np.random.rand(100)
101
97
  >>> fitter = some_fitter_class()
102
98
  >>> result = onesimfuncfit(corr_func, fitter)
@@ -128,10 +124,10 @@ def onesimfuncfit(
128
124
  else:
129
125
  # do something different
130
126
  failreason = np.uint32(0)
131
- maxlag = rt_floatset(initialdelayvalue)
127
+ maxlag = initialdelayvalue
132
128
  maxindex = np.int16(bisect.bisect_left(thefitter.corrtimeaxis, initialdelayvalue))
133
- maxval = rt_floatset(correlationfunc[maxindex])
134
- maxsigma = rt_floatset(1.0)
129
+ maxval = correlationfunc[maxindex]
130
+ maxsigma = 1.0
135
131
  maskval = np.uint16(1)
136
132
  peakstart = maxindex
137
133
  peakend = maxindex
@@ -148,8 +144,7 @@ def _procOneVoxelFitcorr(
148
144
  initiallag: Optional[float] = None,
149
145
  fixdelay: bool = False,
150
146
  initialdelayvalue: float = 0.0,
151
- rt_floatset: type = np.float64,
152
- rt_floattype: str = "float64",
147
+ rt_floattype: np.dtype = np.float64,
153
148
  ) -> Tuple[int, int, float, float, float, NDArray, NDArray, float, int, int]:
154
149
  """
155
150
  Process a single voxel for correlation fitting.
@@ -176,10 +171,8 @@ def _procOneVoxelFitcorr(
176
171
  If True, fixes the delay during fitting. Default is False.
177
172
  initialdelayvalue : float, optional
178
173
  Initial delay value if `fixdelay` is True. Default is 0.0.
179
- rt_floatset : type, optional
174
+ rt_floattype : np.dtype, optional
180
175
  Type to use for real-valued floating-point arrays. Default is `np.float64`.
181
- rt_floattype : str, optional
182
- String representation of the floating-point type. Default is "float64".
183
176
 
184
177
  Returns
185
178
  -------
@@ -212,8 +205,7 @@ def _procOneVoxelFitcorr(
212
205
  ... despeckle_thresh=5.0,
213
206
  ... fixdelay=False,
214
207
  ... initialdelayvalue=0.0,
215
- ... rt_floatset=np.float64,
216
- ... rt_floattype="float64"
208
+ ... rt_floattype=np.float64,
217
209
  ... )
218
210
  >>> print(result)
219
211
  (10, 1, 1.23, 0.95, 0.12, array([...]), array([...]), 0.90, 1, 0)
@@ -235,7 +227,6 @@ def _procOneVoxelFitcorr(
235
227
  fixdelay=fixdelay,
236
228
  initialdelayvalue=initialdelayvalue,
237
229
  initiallag=initiallag,
238
- rt_floatset=rt_floatset,
239
230
  rt_floattype=rt_floattype,
240
231
  )
241
232
 
@@ -244,29 +235,29 @@ def _procOneVoxelFitcorr(
244
235
 
245
236
  # now tuck everything away in the appropriate output array
246
237
  volumetotalinc = 0
247
- thewindowout = rt_floatset(0.0 * corr_y)
238
+ thewindowout = np.zeros_like(corr_y, rt_floattype)
248
239
  thewindowout[peakstart : peakend + 1] = 1.0
249
240
  if (maskval == 0) and thefitter.zerooutbadfit:
250
- thetime = rt_floatset(0.0)
251
- thestrength = rt_floatset(0.0)
252
- thesigma = rt_floatset(0.0)
253
- thegaussout = 0.0 * corr_y
254
- theR2 = rt_floatset(0.0)
241
+ thetime = 0.0
242
+ thestrength = 0.0
243
+ thesigma = 0.0
244
+ thegaussout = np.zeros_like(corr_y, rt_floattype)
245
+ theR2 = 0.0
255
246
  else:
256
247
  volumetotalinc = 1
257
- thetime = rt_floatset(np.fmod(maxlag, thefitter.lagmod))
258
- thestrength = rt_floatset(maxval)
259
- thesigma = rt_floatset(maxsigma)
260
- thegaussout = rt_floatset(0.0 * corr_y)
261
- thewindowout = rt_floatset(0.0 * corr_y)
248
+ thetime = np.fmod(maxlag, thefitter.lagmod)
249
+ thestrength = maxval
250
+ thesigma = maxsigma
251
+ thegaussout = np.zeros_like(corr_y, rt_floattype)
252
+ thewindowout = np.zeros_like(corr_y, rt_floattype)
262
253
  if (not fixdelay) and (maxsigma != 0.0):
263
- thegaussout = rt_floatset(
264
- tide_fit.gauss_eval(thefitter.corrtimeaxis, [maxval, maxlag, maxsigma])
265
- )
254
+ thegaussout = tide_fit.gauss_eval(
255
+ thefitter.corrtimeaxis, [maxval, maxlag, maxsigma]
256
+ ).astype(rt_floattype)
266
257
  else:
267
- thegaussout = rt_floatset(0.0)
268
- thewindowout = rt_floatset(0.0)
269
- theR2 = rt_floatset(thestrength * thestrength)
258
+ thegaussout = 0.0
259
+ thewindowout = 0.0
260
+ theR2 = thestrength * thestrength
270
261
 
271
262
  return (
272
263
  vox,
@@ -304,8 +295,7 @@ def fitcorr(
304
295
  chunksize: int = 1000,
305
296
  despeckle_thresh: float = 5.0,
306
297
  initiallags: Optional[NDArray] = None,
307
- rt_floatset: type = np.float64,
308
- rt_floattype: str = "float64",
298
+ rt_floattype: np.dtype = np.float64,
309
299
  ) -> int:
310
300
  """
311
301
  Fit correlation data to extract lag parameters and related statistics for each voxel.
@@ -358,10 +348,8 @@ def fitcorr(
358
348
  Threshold for despeckling, by default 5.0.
359
349
  initiallags : NDArray, optional
360
350
  Initial lag values for each voxel, by default None.
361
- rt_floatset : type, optional
351
+ rt_floattype : np.dtype, optional
362
352
  Floating-point type for runtime, by default np.float64.
363
- rt_floattype : str, optional
364
- String representation of floating-point type, by default "float64".
365
353
 
366
354
  Returns
367
355
  -------
@@ -443,7 +431,6 @@ def fitcorr(
443
431
  initiallag=thislag,
444
432
  fixdelay=fixdelay,
445
433
  initialdelayvalue=thisinitialdelayvalue,
446
- rt_floatset=rt_floatset,
447
434
  rt_floattype=rt_floattype,
448
435
  )
449
436
  )
@@ -528,7 +515,6 @@ def fitcorr(
528
515
  initiallag=thislag,
529
516
  fixdelay=fixdelay,
530
517
  initialdelayvalue=thisinitialdelayvalue,
531
- rt_floatset=rt_floatset,
532
518
  rt_floattype=rt_floattype,
533
519
  )
534
520
  if (
@@ -150,8 +150,7 @@ def test_cleanregressor(debug=False, local=False, displayplots=False):
150
150
  respdelete=False,
151
151
  displayplots=displayplots,
152
152
  debug=debug,
153
- rt_floattype="float64",
154
- rt_floatset=np.float64,
153
+ rt_floattype=np.float64,
155
154
  )
156
155
  print(f"\t{len(referencetc)=}")
157
156
  print(f"\t{len(resampref_y)=}")
@@ -24,6 +24,13 @@ import rapidtide.workflows.happy as happy_workflow
24
24
  import rapidtide.workflows.happy_parser as happy_parser
25
25
  from rapidtide.tests.utils import get_examples_path, get_test_temp_path
26
26
 
27
+ try:
28
+ import tensorflow as tf
29
+
30
+ tensorflowexists = True
31
+ except ImportError:
32
+ tensorflowexists = False
33
+
27
34
 
28
35
  def test_fullrunhappy_v3(debug=False, local=False, displayplots=False):
29
36
  # set input and output directories
@@ -45,13 +52,12 @@ def test_fullrunhappy_v3(debug=False, local=False, displayplots=False):
45
52
  os.path.join(exampleroot, "sub-HAPPYTEST_smallmask.nii.gz"),
46
53
  "--mklthreads",
47
54
  "-1",
48
- "--usetensorflow",
49
- "--model",
50
- "model_revised_tf2",
51
55
  "--cardcalconly",
52
56
  ]
53
- # "--motionfile",
54
- # os.path.join(exampleroot, "sub-HAPPYTEST_mcf.par"),
57
+ if tensorflowexists:
58
+ inputargs.append("--usetensorflow")
59
+ inputargs.append("--model")
60
+ inputargs.append("model_revised_tf2")
55
61
  happy_workflow.happy_main(happy_parser.process_args(inputargs=inputargs))
56
62
 
57
63
 
@@ -24,7 +24,14 @@ import rapidtide.workflows.happy as happy_workflow
24
24
  import rapidtide.workflows.happy_parser as happy_parser
25
25
  from rapidtide.tests.utils import get_examples_path, get_test_temp_path
26
26
 
27
+ try:
28
+ import tensorflow as tf
27
29
 
30
+ tensorflowexists = True
31
+ except ImportError:
32
+ tensorflowexists = False
33
+
34
+
28
35
  def test_fullrunhappy_v4(debug=False, local=False, displayplots=False):
29
36
  # set input and output directories
30
37
  if local:
@@ -45,12 +52,13 @@ def test_fullrunhappy_v4(debug=False, local=False, displayplots=False):
45
52
  os.path.join(exampleroot, "sub-HAPPYTEST_smallmask.nii.gz"),
46
53
  "--mklthreads",
47
54
  "-1",
48
- "--usetensorflow",
49
55
  "--usenewvesselmethod",
50
56
  "--motionfile",
51
57
  os.path.join(exampleroot, "sub-HAPPYTEST_mcf.par"),
52
58
  "--aliasedcorrelation",
53
59
  ]
60
+ if tensorflowexists:
61
+ inputargs.append("--usetensorflow")
54
62
  happy_workflow.happy_main(happy_parser.process_args(inputargs=inputargs))
55
63
 
56
64
 
@@ -20,7 +20,6 @@ import numpy as np
20
20
 
21
21
  from rapidtide.workflows.adjustoffset import _get_parser as adjustoffset_getparser
22
22
  from rapidtide.workflows.aligntcs import _get_parser as aligntcs_getparser
23
- from rapidtide.workflows.applydlfilter import _get_parser as applydlfilter_getparser
24
23
  from rapidtide.workflows.atlasaverage import _get_parser as atlasaverage_getparser
25
24
  from rapidtide.workflows.atlastool import _get_parser as atlastool_getparser
26
25
  from rapidtide.workflows.calctexticc import _get_parser as calctexticc_getparser
@@ -84,7 +83,6 @@ def test_parsers(debug=False):
84
83
  parserlist = [
85
84
  adjustoffset_getparser,
86
85
  aligntcs_getparser,
87
- applydlfilter_getparser,
88
86
  atlasaverage_getparser,
89
87
  atlastool_getparser,
90
88
  calctexticc_getparser,
@@ -137,7 +135,17 @@ def test_parsers(debug=False):
137
135
  tcfrom3col_getparser,
138
136
  variabilityizer_getparser,
139
137
  ]
140
-
138
+ try:
139
+ import tensorflow as tf
140
+ dlfilterloads = True
141
+ except ImportError:
142
+ dlfilterloads = False
143
+ if dlfilterloads:
144
+ from rapidtide.workflows.applydlfilter import (
145
+ _get_parser as applydlfilter_getparser,
146
+ )
147
+ parserlist.append(applydlfilter_getparser)
148
+
141
149
  for thegetparser in parserlist:
142
150
  theusage = thegetparser().format_help()
143
151
  if debug:
@@ -144,7 +144,6 @@ def eval_refinedelay(
144
144
  theheader["pixdim"][4] = 1.0
145
145
 
146
146
  rt_floattype = "float64"
147
- rt_floatset = np.float64
148
147
  sLFOfitmean = np.zeros(numlags, dtype=rt_floattype)
149
148
  rvalue = np.zeros(numlags, dtype=rt_floattype)
150
149
  r2value = np.zeros(numlags, dtype=rt_floattype)
@@ -41,6 +41,10 @@ def test_simroundtrip(debug=False, local=False, displayplots=False):
41
41
  inputargs = [
42
42
  os.path.join(exampleroot, "sub-RAPIDTIDETEST.nii.gz"),
43
43
  os.path.join(testtemproot, "sub-RAPIDTIDETESTSIM"),
44
+ "--corrmask",
45
+ os.path.join(exampleroot, "sub-RAPIDTIDETEST_restrictedmask.nii.gz"),
46
+ "--globalmeaninclude",
47
+ os.path.join(exampleroot, "sub-RAPIDTIDETEST_brainmask.nii.gz"),
44
48
  "--spatialfilt",
45
49
  "2",
46
50
  "--simcalcrange",
@@ -57,6 +61,8 @@ def test_simroundtrip(debug=False, local=False, displayplots=False):
57
61
  ]
58
62
  rapidtide_workflow.rapidtide_main(rapidtide_parser.process_args(inputargs=inputargs))
59
63
 
64
+ print("initial rapidtide run complete")
65
+
60
66
  # now simulate data from maps
61
67
  print(testtemproot)
62
68
  inputargs = [
@@ -77,6 +83,7 @@ def test_simroundtrip(debug=False, local=False, displayplots=False):
77
83
  ]
78
84
 
79
85
  pf.generic_init(rapidtide_simdata._get_parser, rapidtide_simdata.simdata, inputargs=inputargs)
86
+ print("simulated dataset generated")
80
87
 
81
88
  # run repeat rapidtide
82
89
  inputargs = [
@@ -97,6 +104,7 @@ def test_simroundtrip(debug=False, local=False, displayplots=False):
97
104
  "4.0",
98
105
  ]
99
106
  rapidtide_workflow.rapidtide_main(rapidtide_parser.process_args(inputargs=inputargs))
107
+ print("repeat rapidtide completed")
100
108
 
101
109
  absthresh = 1e-10
102
110
  msethresh = 1e-12
@@ -58,6 +58,7 @@ def test_stcorrelate(debug=False):
58
58
  times, corrpertime, ppertime = shorttermcorr_1D(
59
59
  sig1, sig2, tr, windowtime, samplestep=int(stepsize // tr), detrendorder=0
60
60
  )
61
+ print(f"1D correlation: {corrpertime=}, {ppertime=}")
61
62
  # plength = len(times)
62
63
  times, xcorrpertime, Rvals, delayvals, valid = shorttermcorr_2D(
63
64
  sig1,
@@ -67,8 +68,9 @@ def test_stcorrelate(debug=False):
67
68
  samplestep=int(stepsize // tr),
68
69
  weighting=corrweighting,
69
70
  detrendorder=0,
70
- displayplots=False,
71
+ displayplots=debug,
71
72
  )
73
+ print(f"2D correlation: {Rvals=}, {delayvals=}, {valid=}")
72
74
  # xlength = len(times)
73
75
  writenpvecs(corrpertime, outfilename + "_pearson.txt")
74
76
  writenpvecs(ppertime, outfilename + "_pvalue.txt")
rapidtide/util.py CHANGED
@@ -2022,7 +2022,7 @@ def comparehappyruns(root1: str, root2: str, debug: bool = False) -> dict[str, A
2022
2022
 
2023
2023
  # shared memory routines
2024
2024
  def numpy2shared(
2025
- inarray: NDArray, theouttype: type, name: str | None = None
2025
+ inarray: NDArray, theouttype: np.dtype, name: str | None = None
2026
2026
  ) -> tuple[NDArray, shared_memory.SharedMemory]:
2027
2027
  """
2028
2028
  Convert a numpy array to a shared memory array.
@@ -2033,9 +2033,9 @@ def numpy2shared(
2033
2033
 
2034
2034
  Parameters
2035
2035
  ----------
2036
- inarray : numpy.ndarray
2036
+ inarray : NDArray
2037
2037
  Input numpy array to be converted to shared memory.
2038
- theouttype : type
2038
+ theouttype : dtype
2039
2039
  Data type of the output shared memory array.
2040
2040
  name : str, optional
2041
2041
  Name of the shared memory block. If None, an anonymous shared memory
@@ -2043,7 +2043,7 @@ def numpy2shared(
2043
2043
 
2044
2044
  Returns
2045
2045
  -------
2046
- tuple[numpy.ndarray, multiprocessing.shared_memory.SharedMemory]
2046
+ tuple[NDArray, multiprocessing.shared_memory.SharedMemory]
2047
2047
  A tuple containing:
2048
2048
  - The shared memory array with the same shape as input array
2049
2049
  - The shared memory object that manages the memory block
@@ -2078,7 +2078,7 @@ def numpy2shared(
2078
2078
 
2079
2079
 
2080
2080
  def allocshared(
2081
- theshape: tuple[int, ...], thetype: type, name: str | None = None
2081
+ theshape: tuple[int, ...], thetype: np.dtype, name: str | None = None
2082
2082
  ) -> tuple[NDArray, shared_memory.SharedMemory]:
2083
2083
  """
2084
2084
  Allocate shared memory for a numpy array.
@@ -2137,7 +2137,7 @@ def allocshared(
2137
2137
 
2138
2138
 
2139
2139
  def allocarray(
2140
- theshape: tuple[int, ...], thetype: type, shared: bool = False, name: str | None = None
2140
+ theshape: tuple[int, ...], thetype: np.dtype, shared: bool = False, name: str | None = None
2141
2141
  ) -> tuple[NDArray, shared_memory.SharedMemory | None]:
2142
2142
  """
2143
2143
  Allocate and return a numpy array with specified shape and type.
rapidtide/voxelData.py CHANGED
@@ -681,7 +681,7 @@ class VoxelData:
681
681
 
682
682
  Parameters
683
683
  ----------
684
- validvoxels : numpy.ndarray
684
+ validvoxels : NDArray
685
685
  Array containing the valid voxel coordinates. The first dimension
686
686
  represents the number of valid spatial locations.
687
687