rapidtide 3.0a12__py3-none-any.whl → 3.0a14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. cloud/gmscalc-HCPYA +1 -1
  2. cloud/rapidtide-HCPYA +3 -3
  3. rapidtide/Colortables.py +10 -10
  4. rapidtide/DerivativeDelay.py +213 -0
  5. rapidtide/{Refiner.py → RegressorRefiner.py} +1 -1
  6. rapidtide/__init__.py +2 -1
  7. rapidtide/_version.py +1 -1
  8. rapidtide/data/examples/src/test_mlregressallt.py +32 -17
  9. rapidtide/data/examples/src/testalign +1 -1
  10. rapidtide/data/examples/src/testboth +1 -1
  11. rapidtide/data/examples/src/testcifti +11 -0
  12. rapidtide/data/examples/src/testdelayvar +14 -0
  13. rapidtide/data/examples/src/testfmri +1 -0
  14. rapidtide/data/examples/src/testglmfilt +8 -6
  15. rapidtide/data/examples/src/testhappy +1 -1
  16. rapidtide/data/examples/src/testnewrefine +11 -11
  17. rapidtide/data/examples/src/testnoiseamp +2 -2
  18. rapidtide/data/examples/src/testretro +16 -7
  19. rapidtide/data/examples/src/testretrolagtcs +1 -1
  20. rapidtide/dlfilter.py +0 -1
  21. rapidtide/fit.py +41 -9
  22. rapidtide/happy_supportfuncs.py +5 -0
  23. rapidtide/io.py +13 -2
  24. rapidtide/{glmpass.py → linfitfiltpass.py} +23 -19
  25. rapidtide/makelaggedtcs.py +8 -5
  26. rapidtide/multiproc.py +8 -11
  27. rapidtide/refinedelay.py +234 -109
  28. rapidtide/resample.py +3 -0
  29. rapidtide/scripts/{retroglm.py → delayvar.py} +2 -2
  30. rapidtide/scripts/{glmfilt.py → linfitfilt.py} +2 -2
  31. rapidtide/scripts/retroregress.py +28 -0
  32. rapidtide/scripts/stupidramtricks.py +9 -7
  33. rapidtide/simfuncfit.py +1 -1
  34. rapidtide/tests/cleanposttest +21 -0
  35. rapidtide/tests/test_delayestimation.py +3 -3
  36. rapidtide/tests/test_fastresampler.py +1 -2
  37. rapidtide/tests/test_fullrunhappy_v1.py +14 -6
  38. rapidtide/tests/test_fullrunhappy_v2.py +17 -9
  39. rapidtide/tests/test_fullrunhappy_v3.py +16 -8
  40. rapidtide/tests/test_fullrunhappy_v4.py +16 -8
  41. rapidtide/tests/test_fullrunhappy_v5.py +14 -6
  42. rapidtide/tests/test_fullrunrapidtide_v1.py +20 -12
  43. rapidtide/tests/test_fullrunrapidtide_v2.py +21 -13
  44. rapidtide/tests/test_fullrunrapidtide_v3.py +15 -7
  45. rapidtide/tests/test_fullrunrapidtide_v4.py +14 -7
  46. rapidtide/tests/test_fullrunrapidtide_v5.py +13 -5
  47. rapidtide/tests/test_fullrunrapidtide_v6.py +34 -26
  48. rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +9 -9
  49. rapidtide/tests/test_motionregress.py +3 -3
  50. rapidtide/tests/test_refinedelay.py +14 -12
  51. rapidtide/tidepoolTemplate_alt_qt6.py +172 -45
  52. rapidtide/tidepoolTemplate_big_qt6.py +196 -53
  53. rapidtide/tidepoolTemplate_qt6.py +150 -39
  54. rapidtide/workflows/atlasaverage.py +40 -12
  55. rapidtide/workflows/delayvar.py +1136 -0
  56. rapidtide/workflows/happy.py +37 -11
  57. rapidtide/workflows/happy_parser.py +4 -4
  58. rapidtide/workflows/{glmfilt.py → linfitfilt.py} +4 -4
  59. rapidtide/workflows/rapidtide.py +246 -178
  60. rapidtide/workflows/rapidtide_parser.py +116 -101
  61. rapidtide/workflows/{glmfrommaps.py → regressfrommaps.py} +30 -26
  62. rapidtide/workflows/retrolagtcs.py +13 -12
  63. rapidtide/workflows/{retroglm.py → retroregress.py} +182 -141
  64. {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/METADATA +3 -2
  65. {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/RECORD +69 -64
  66. {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/WHEEL +1 -1
  67. {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/entry_points.txt +3 -2
  68. rapidtide/data/examples/src/testoutputsize +0 -45
  69. {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info/licenses}/LICENSE +0 -0
  70. {rapidtide-3.0a12.dist-info → rapidtide-3.0a14.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1136 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ #
4
+ # Copyright 2016-2024 Blaise Frederick
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+ #
19
+ import argparse
20
+ import copy
21
+ import logging
22
+ import os
23
+ import sys
24
+ import time
25
+ from pathlib import Path
26
+
27
+ import numpy as np
28
+ from scipy.stats import pearsonr
29
+ from sklearn.decomposition import PCA
30
+ from tf_keras.src.dtensor.integration_test_utils import train_step
31
+
32
+ import rapidtide.filter as tide_filt
33
+ import rapidtide.io as tide_io
34
+ import rapidtide.multiproc as tide_multiproc
35
+ import rapidtide.refinedelay as tide_refinedelay
36
+ import rapidtide.resample as tide_resample
37
+ import rapidtide.stats as tide_stats
38
+ import rapidtide.util as tide_util
39
+ import rapidtide.workflows.parser_funcs as pf
40
+ import rapidtide.workflows.regressfrommaps as tide_regressfrommaps
41
+
42
+ from .utils import setup_logger
43
+
44
+
45
+ # Create a sentinel.
46
+ # from https://stackoverflow.com/questions/58594956/find-out-which-arguments-were-passed-explicitly-in-argparse
47
+ class _Sentinel:
48
+ pass
49
+
50
+
51
+ sentinel = _Sentinel()
52
+ LGR = logging.getLogger(__name__)
53
+ ErrorLGR = logging.getLogger("ERROR")
54
+ TimingLGR = logging.getLogger("TIMING")
55
+
56
+ DEFAULT_REGRESSIONFILTDERIVS = 0
57
+ DEFAULT_PATCHTHRESH = 3.0
58
+ DEFAULT_REFINEDELAYMINDELAY = -2.5
59
+ DEFAULT_REFINEDELAYMAXDELAY = 2.5
60
+ DEFAULT_REFINEDELAYNUMPOINTS = 201
61
+ DEFAULT_DELAYOFFSETSPATIALFILT = -1
62
+ DEFAULT_WINDOWSIZE = 30.0
63
+ DEFAULT_SYSTEMICFITTYPE = "pca"
64
+ DEFAULT_PCACOMPONENTS = 1
65
+ DEFAULT_LAGMIN = 0.0
66
+ DEFAULT_LAGMAX = 0.0
67
+ DEFAULT_TRAINSTEP = 0.5
68
+
69
+
70
+ def _get_parser():
71
+ """
72
+ Argument parser for glmfilt
73
+ """
74
+ parser = argparse.ArgumentParser(
75
+ prog="delayvar",
76
+ description="Calculate variation in delay time over the course of an acquisition.",
77
+ allow_abbrev=False,
78
+ )
79
+
80
+ # Required arguments
81
+ parser.add_argument(
82
+ "fmrifile",
83
+ type=lambda x: pf.is_valid_file(parser, x),
84
+ help="The name of 4D nifti fmri file to filter.",
85
+ )
86
+ parser.add_argument(
87
+ "datafileroot",
88
+ type=str,
89
+ help="The root name of the previously run rapidtide dataset (everything up to but not including the underscore.)",
90
+ )
91
+ parser.add_argument(
92
+ "--alternateoutput",
93
+ dest="alternateoutput",
94
+ type=str,
95
+ help="Alternate output root (if not specified, will use the same root as the previous dataset).",
96
+ default=None,
97
+ )
98
+ parser.add_argument(
99
+ "--nprocs",
100
+ dest="nprocs",
101
+ action="store",
102
+ type=int,
103
+ metavar="NPROCS",
104
+ help=(
105
+ "Use NPROCS worker processes for multiprocessing. "
106
+ "Setting NPROCS to less than 1 sets the number of "
107
+ "worker processes to n_cpus."
108
+ ),
109
+ default=1,
110
+ )
111
+ parser.add_argument(
112
+ "--numskip",
113
+ dest="numskip",
114
+ action="store",
115
+ type=lambda x: pf.is_int(parser, x, minval=0),
116
+ metavar="NUMSKIP",
117
+ help=("Skip NUMSKIP points at the beginning of the fmri file."),
118
+ default=0,
119
+ )
120
+ parser.add_argument(
121
+ "--outputlevel",
122
+ dest="outputlevel",
123
+ action="store",
124
+ type=str,
125
+ choices=["min", "less", "normal", "more", "max"],
126
+ help=(
127
+ "The level of file output produced. 'min' produces only absolutely essential files, 'less' adds in "
128
+ "the sLFO filtered data (rather than just filter efficacy metrics), 'normal' saves what you "
129
+ "would typically want around for interactive data exploration, "
130
+ "'more' adds files that are sometimes useful, and 'max' outputs anything you might possibly want. "
131
+ "Selecting 'max' will produce ~3x your input datafile size as output. "
132
+ f'Default is "normal".'
133
+ ),
134
+ default="normal",
135
+ )
136
+ parser.add_argument(
137
+ "--noprogressbar",
138
+ dest="showprogressbar",
139
+ action="store_false",
140
+ help=("Will disable showing progress bars (helpful if stdout is going to a file)."),
141
+ default=True,
142
+ )
143
+ parser.add_argument(
144
+ "--nohpfilter",
145
+ dest="hpf",
146
+ action="store_false",
147
+ help=("Disable highpass filtering on data and regressor."),
148
+ default=True,
149
+ )
150
+ parser.add_argument(
151
+ "--trainrange",
152
+ dest="lag_extrema",
153
+ action=pf.IndicateSpecifiedAction,
154
+ nargs=2,
155
+ type=float,
156
+ metavar=("LAGMIN", "LAGMAX"),
157
+ help=(
158
+ "Set the range of delay offset center frequencies to span LAGMIN to LAGMAX. The derivative "
159
+ "ratio calculation only works over a narrow range, so if the static offset is large, "
160
+ "you need to train the ratio calculation with a central delay close to that value. "
161
+ f"LAGMAX. Default is {DEFAULT_LAGMIN} to {DEFAULT_LAGMAX} seconds. "
162
+ ),
163
+ default=(DEFAULT_LAGMIN, DEFAULT_LAGMAX),
164
+ )
165
+ parser.add_argument(
166
+ "--trainstep",
167
+ dest="trainstep",
168
+ action="store",
169
+ type=float,
170
+ metavar="STEP",
171
+ help=(
172
+ "Use this step size (in seconds) to span the training width. The derivative "
173
+ "ratio calculation only works over a narrow range, so if the static offset is large, "
174
+ "you need to train the ratio calculation with a central delay close to that value. "
175
+ f"Default is {DEFAULT_TRAINSTEP}"
176
+ ),
177
+ default=DEFAULT_TRAINSTEP,
178
+ )
179
+ parser.add_argument(
180
+ "--delaypatchthresh",
181
+ dest="delaypatchthresh",
182
+ action="store",
183
+ type=float,
184
+ metavar="NUMMADs",
185
+ help=(
186
+ "Maximum number of robust standard deviations to permit in the offset delay refine map. "
187
+ f"Default is {DEFAULT_PATCHTHRESH}"
188
+ ),
189
+ default=DEFAULT_PATCHTHRESH,
190
+ )
191
+ parser.add_argument(
192
+ "--systemicfittype",
193
+ dest="systemicfittype",
194
+ action="store",
195
+ type=str,
196
+ choices=[
197
+ "mean",
198
+ "pca",
199
+ ],
200
+ help=(
201
+ f"Use mean or pca to fit the systemic variation in delay offset. "
202
+ f'Default is "{DEFAULT_SYSTEMICFITTYPE}".'
203
+ ),
204
+ default=DEFAULT_SYSTEMICFITTYPE,
205
+ )
206
+ parser.add_argument(
207
+ "--pcacomponents",
208
+ metavar="NCOMP",
209
+ dest="pcacomponents",
210
+ type=float,
211
+ help="Use NCOMP components for PCA fit of delay offset.",
212
+ default=DEFAULT_PCACOMPONENTS,
213
+ )
214
+ parser.add_argument(
215
+ "--verbose",
216
+ dest="verbose",
217
+ action="store_true",
218
+ help=("Be wicked chatty."),
219
+ default=False,
220
+ )
221
+ parser.add_argument(
222
+ "--debug",
223
+ dest="debug",
224
+ action="store_true",
225
+ help=("Output lots of helpful information."),
226
+ default=False,
227
+ )
228
+ parser.add_argument(
229
+ "--focaldebug",
230
+ dest="focaldebug",
231
+ action="store_true",
232
+ help=("Output lots of helpful information on a limited subset of operations."),
233
+ default=False,
234
+ )
235
+ experimental = parser.add_argument_group(
236
+ "Experimental options (not fully tested, or not tested at all, may not work). Beware!"
237
+ )
238
+ experimental.add_argument(
239
+ "--windowsize",
240
+ dest="windowsize",
241
+ action="store",
242
+ type=lambda x: pf.is_float(parser, x, minval=10.0),
243
+ metavar="SIZE",
244
+ help=(
245
+ f"Set segmented delay analysis window size to SIZE seconds. Default is {DEFAULT_WINDOWSIZE}."
246
+ ),
247
+ default=DEFAULT_WINDOWSIZE,
248
+ )
249
+ experimental.add_argument(
250
+ "--windelayoffsetspatialfilt",
251
+ dest="windelayoffsetgausssigma",
252
+ action="store",
253
+ type=float,
254
+ metavar="GAUSSSIGMA",
255
+ help=(
256
+ "Spatially filter fMRI data prior to calculating windowed delay offsets "
257
+ "using GAUSSSIGMA in mm. Set GAUSSSIGMA negative "
258
+ "to have rapidtide set it to half the mean voxel "
259
+ "dimension (a rule of thumb for a good value)."
260
+ ),
261
+ default=DEFAULT_DELAYOFFSETSPATIALFILT,
262
+ )
263
+
264
+ return parser
265
+
266
+
267
+ def delayvar(args):
268
+ # get the pid of the parent process
269
+ args.pid = os.getpid()
270
+
271
+ args.lagmin = args.lag_extrema[0]
272
+ args.lagmax = args.lag_extrema[1]
273
+
274
+ # specify the output name
275
+ if args.alternateoutput is None:
276
+ outputname = args.datafileroot
277
+ else:
278
+ outputname = args.alternateoutput
279
+
280
+ # start the loggers low that we know the output name
281
+ sh = logging.StreamHandler()
282
+ if args.debug:
283
+ logging.basicConfig(level=logging.DEBUG, handlers=[sh])
284
+ else:
285
+ logging.basicConfig(level=logging.INFO, handlers=[sh])
286
+ # Set up loggers for workflow
287
+ setup_logger(
288
+ logger_filename=f"{outputname}_retrolog.txt",
289
+ timing_filename=f"{outputname}_retroruntimings.tsv",
290
+ error_filename=f"{outputname}_retroerrorlog.txt",
291
+ verbose=False,
292
+ debug=args.debug,
293
+ )
294
+ TimingLGR.info("Start")
295
+ LGR.info(f"starting delayvar")
296
+
297
+ # set some global values
298
+ args.mindelay = DEFAULT_REFINEDELAYMINDELAY
299
+ args.maxdelay = DEFAULT_REFINEDELAYMAXDELAY
300
+ args.numpoints = DEFAULT_REFINEDELAYNUMPOINTS
301
+
302
+ if args.outputlevel == "min":
303
+ args.saveminimumsLFOfiltfiles = False
304
+ args.savenormalsLFOfiltfiles = False
305
+ args.savemovingsignal = False
306
+ args.saveallsLFOfiltfiles = False
307
+ elif args.outputlevel == "less":
308
+ args.saveminimumsLFOfiltfiles = True
309
+ args.savenormalsLFOfiltfiles = False
310
+ args.savemovingsignal = False
311
+ args.saveallsLFOfiltfiles = False
312
+ elif args.outputlevel == "normal":
313
+ args.saveminimumsLFOfiltfiles = True
314
+ args.savenormalsLFOfiltfiles = True
315
+ args.savemovingsignal = False
316
+ args.saveallsLFOfiltfiles = False
317
+ elif args.outputlevel == "more":
318
+ args.saveminimumsLFOfiltfiles = True
319
+ args.savenormalsLFOfiltfiles = True
320
+ args.savemovingsignal = True
321
+ args.saveallsLFOfiltfiles = False
322
+ elif args.outputlevel == "max":
323
+ args.saveminimumsLFOfiltfiles = True
324
+ args.savenormalsLFOfiltfiles = True
325
+ args.savemovingsignal = True
326
+ args.saveallsLFOfiltfiles = True
327
+ else:
328
+ print(f"illegal output level {args['outputlevel']}")
329
+ sys.exit()
330
+
331
+ thecommandline = " ".join(sys.argv[1:])
332
+
333
+ if args.nprocs < 1:
334
+ args.nprocs = tide_multiproc.maxcpus()
335
+ # don't use shared memory if there is only one process
336
+ if args.nprocs == 1:
337
+ usesharedmem = False
338
+ else:
339
+ usesharedmem = True
340
+
341
+ # read the runoptions file, update if necessary
342
+ print("reading runoptions")
343
+ runoptionsfile = f"{args.datafileroot}_desc-runoptions_info"
344
+ therunoptions = tide_io.readoptionsfile(runoptionsfile)
345
+ sublist = (
346
+ ("retroglmcompatible", "retroregresscompatible"),
347
+ ("glmthreshval", "regressfiltthreshval"),
348
+ )
349
+ therunoptions["singleproc_regressionfilt"] = False
350
+ therunoptions["nprocs_regressionfilt"] = args.nprocs
351
+ for subpair in sublist:
352
+ try:
353
+ therunoptions[subpair[1]] = therunoptions[subpair[0]]
354
+ print(f"substituting {subpair[1]} for {subpair[0]} in runoptions")
355
+ except KeyError:
356
+ pass
357
+
358
+ try:
359
+ candoretroregress = therunoptions["retroregresscompatible"]
360
+ except KeyError:
361
+ print(
362
+ f"based on {runoptionsfile}, this rapidtide dataset does not support retrospective GLM calculation"
363
+ )
364
+ sys.exit()
365
+
366
+ if therunoptions["internalprecision"] == "double":
367
+ rt_floattype = "float64"
368
+ rt_floatset = np.float64
369
+ else:
370
+ rt_floattype = "float32"
371
+ rt_floatset = np.float32
372
+
373
+ # set the output precision
374
+ if therunoptions["outputprecision"] == "double":
375
+ rt_outfloattype = "float64"
376
+ rt_outfloatset = np.float64
377
+ else:
378
+ rt_outfloattype = "float32"
379
+ rt_outfloatset = np.float32
380
+ therunoptions["saveminimumsLFOfiltfiles"] = args.saveminimumsLFOfiltfiles
381
+
382
+ # read the fmri input files
383
+ print("reading fmrifile")
384
+ fmri_input, fmri_data, fmri_header, fmri_dims, fmri_sizes = tide_io.readfromnifti(
385
+ args.fmrifile
386
+ )
387
+
388
+ # create the canary file
389
+ Path(f"{outputname}_DELAYVARISRUNNING.txt").touch()
390
+
391
+ if args.debug:
392
+ print(f"{fmri_data.shape=}")
393
+ xdim, ydim, slicedim, fmritr = tide_io.parseniftisizes(fmri_sizes)
394
+ xsize, ysize, numslices, timepoints = tide_io.parseniftidims(fmri_dims)
395
+ numspatiallocs = int(xsize) * int(ysize) * int(numslices)
396
+ fmri_data_spacebytime = fmri_data.reshape((numspatiallocs, timepoints))
397
+ if args.debug:
398
+ print(f"{fmri_data_spacebytime.shape=}")
399
+
400
+ # read the processed mask
401
+ print("reading procfit maskfile")
402
+ procmaskfile = f"{args.datafileroot}_desc-processed_mask.nii.gz"
403
+ (
404
+ procmask_input,
405
+ procmask,
406
+ procmask_header,
407
+ procmask_dims,
408
+ procmask_sizes,
409
+ ) = tide_io.readfromnifti(procmaskfile)
410
+ if not tide_io.checkspacematch(fmri_header, procmask_header):
411
+ raise ValueError("procmask dimensions do not match fmri dimensions")
412
+ procmask_spacebytime = procmask.reshape((numspatiallocs))
413
+ if args.debug:
414
+ print(f"{procmask_spacebytime.shape=}")
415
+ print(f"{tide_stats.getmasksize(procmask_spacebytime)=}")
416
+
417
+ # read the corrfit mask
418
+ print("reading corrfit maskfile")
419
+ corrmaskfile = f"{args.datafileroot}_desc-corrfit_mask.nii.gz"
420
+ (
421
+ corrmask_input,
422
+ corrmask,
423
+ corrmask_header,
424
+ corrmask_dims,
425
+ corrmask_sizes,
426
+ ) = tide_io.readfromnifti(corrmaskfile)
427
+ if not tide_io.checkspacematch(fmri_header, corrmask_header):
428
+ raise ValueError("corrmask dimensions do not match fmri dimensions")
429
+ corrmask_spacebytime = corrmask.reshape((numspatiallocs))
430
+ if args.debug:
431
+ print(f"{corrmask_spacebytime.shape=}")
432
+ print(f"{tide_stats.getmasksize(corrmask_spacebytime)=}")
433
+
434
+ print("reading lagtimes")
435
+ lagtimesfile = f"{args.datafileroot}_desc-maxtimerefined_map.nii.gz"
436
+ if not os.path.exists(lagtimesfile):
437
+ lagtimesfile = f"{args.datafileroot}_desc-maxtime_map.nii.gz"
438
+ (
439
+ lagtimes_input,
440
+ lagtimes,
441
+ lagtimes_header,
442
+ lagtimes_dims,
443
+ lagtimes_sizes,
444
+ ) = tide_io.readfromnifti(lagtimesfile)
445
+ if not tide_io.checkspacematch(fmri_header, lagtimes_header):
446
+ raise ValueError("lagtimes dimensions do not match fmri dimensions")
447
+ if args.debug:
448
+ print(f"{lagtimes.shape=}")
449
+ lagtimes_spacebytime = lagtimes.reshape((numspatiallocs))
450
+ if args.debug:
451
+ print(f"{lagtimes_spacebytime.shape=}")
452
+
453
+ startpt = args.numskip
454
+ endpt = timepoints - 1
455
+ validtimepoints = endpt - startpt + 1
456
+ skiptime = startpt * fmritr
457
+ initial_fmri_x = (
458
+ np.linspace(0.0, validtimepoints * fmritr, num=validtimepoints, endpoint=False) + skiptime
459
+ )
460
+
461
+ # read the lagtc generator file
462
+ print("reading lagtc generator")
463
+ lagtcgeneratorfile = f"{args.datafileroot}_desc-lagtcgenerator_timeseries"
464
+ thepadtime = therunoptions["padseconds"]
465
+ genlagtc = tide_resample.FastResamplerFromFile(lagtcgeneratorfile, padtime=thepadtime)
466
+
467
+ # select the voxels in the mask
468
+ print("figuring out valid voxels")
469
+ validvoxels = np.where(procmask_spacebytime > 0)[0]
470
+ numvalidspatiallocs = np.shape(validvoxels)[0]
471
+ if args.debug:
472
+ print(f"{numvalidspatiallocs=}")
473
+
474
+ # slicing to valid voxels
475
+ print("selecting valid voxels")
476
+ fmri_data_valid = fmri_data_spacebytime[validvoxels, :]
477
+ lagtimes_valid = lagtimes_spacebytime[validvoxels]
478
+ corrmask_valid = corrmask_spacebytime[validvoxels]
479
+ procmask_valid = procmask_spacebytime[validvoxels]
480
+ if args.debug:
481
+ print(f"{fmri_data_valid.shape=}")
482
+
483
+ oversampfactor = int(therunoptions["oversampfactor"])
484
+ if args.debug:
485
+ print(f"{outputname=}")
486
+ oversamptr = fmritr / oversampfactor
487
+ try:
488
+ threshval = therunoptions["regressfiltthreshval"]
489
+ except KeyError:
490
+ threshval = 0.0
491
+ therunoptions["regressfiltthreshval"] = threshval
492
+ mode = "glm"
493
+
494
+ if args.debug:
495
+ print(f"{validvoxels.shape=}")
496
+ np.savetxt(f"{outputname}_validvoxels.txt", validvoxels)
497
+
498
+ outputpath = os.path.dirname(outputname)
499
+ rawsources = [
500
+ os.path.relpath(args.fmrifile, start=outputpath),
501
+ os.path.relpath(lagtimesfile, start=outputpath),
502
+ os.path.relpath(corrmaskfile, start=outputpath),
503
+ os.path.relpath(procmaskfile, start=outputpath),
504
+ os.path.relpath(runoptionsfile, start=outputpath),
505
+ os.path.relpath(lagtcgeneratorfile, start=outputpath),
506
+ ]
507
+
508
+ bidsbasedict = {
509
+ "RawSources": rawsources,
510
+ "Units": "arbitrary",
511
+ "CommandLineArgs": thecommandline,
512
+ }
513
+
514
+ # windowed delay deviation estimation
515
+ lagstouse_valid = lagtimes_valid
516
+
517
+ # find the robust range of the static delays
518
+ (
519
+ pct02,
520
+ pct98,
521
+ ) = tide_stats.getfracvals(lagstouse_valid, [0.02, 0.98], debug=args.debug)
522
+ if args.lagmin == -999:
523
+ args.lagmin = np.round(pct02 / args.trainstep, 0) * args.trainstep
524
+ if args.lagmax == -999:
525
+ args.lagmax = np.round(pct98 / args.trainstep, 0) * args.trainstep
526
+
527
+ print("\n\nWindowed delay estimation")
528
+ TimingLGR.info("Windowed delay estimation start")
529
+ LGR.info("\n\nWindowed delay estimation")
530
+
531
+ if args.windelayoffsetgausssigma < 0.0:
532
+ # set gausssigma automatically
533
+ args.windelayoffsetgausssigma = np.mean([xdim, ydim, slicedim]) / 2.0
534
+
535
+ wintrs = int(np.round(args.windowsize / fmritr, 0))
536
+ wintrs += wintrs % 2
537
+ winskip = wintrs // 2
538
+ numtrs = fmri_data_valid.shape[1]
539
+ numwins = (numtrs // winskip) - 2
540
+ winspace = winskip * fmritr
541
+ winwidth = wintrs * fmritr
542
+
543
+ # make a highpass filter
544
+ if args.hpf:
545
+ hpfcutoff = 1.0 / winwidth
546
+ thehpf = tide_filt.NoncausalFilter(
547
+ "arb",
548
+ transferfunc="trapezoidal",
549
+ padtime=30.0,
550
+ padtype="reflect",
551
+ )
552
+ thehpf.setfreqs(hpfcutoff * 0.95, hpfcutoff, 0.15, 0.15)
553
+
554
+ # make a filtered lagtc generator if necessary
555
+ if args.hpf:
556
+ reference_x, reference_y, dummy, dummy, genlagsamplerate = genlagtc.getdata()
557
+ genlagtc = tide_resample.FastResampler(
558
+ reference_x,
559
+ thehpf.apply(genlagsamplerate, reference_y),
560
+ padtime=thepadtime,
561
+ )
562
+ genlagtc.save(f"{outputname}_desc-hpflagtcgenerator_timeseries")
563
+
564
+ # and filter the data if necessary
565
+ if args.hpf:
566
+ Fs = 1.0 / fmritr
567
+ print("highpass filtering fmri data")
568
+ themean = fmri_data_valid.mean(axis=1)
569
+ for vox in range(fmri_data_valid.shape[0]):
570
+ fmri_data_valid[vox, :] = thehpf.apply(Fs, fmri_data_valid[vox, :]) + themean[vox]
571
+ if args.focaldebug:
572
+ # dump the filtered fmri input file
573
+ theheader = copy.deepcopy(fmri_header)
574
+ theheader["dim"][4] = validtimepoints
575
+ theheader["pixdim"][4] = fmritr
576
+
577
+ maplist = [
578
+ (
579
+ fmri_data_valid,
580
+ "hpfinputdata",
581
+ "bold",
582
+ None,
583
+ "fMRI data after highpass filtering",
584
+ ),
585
+ ]
586
+ tide_io.savemaplist(
587
+ outputname,
588
+ maplist,
589
+ validvoxels,
590
+ (xsize, ysize, numslices, validtimepoints),
591
+ theheader,
592
+ bidsbasedict,
593
+ textio=therunoptions["textio"],
594
+ fileiscifti=False,
595
+ rt_floattype=rt_floattype,
596
+ cifti_hdr=None,
597
+ )
598
+
599
+ # allocate destination arrays
600
+ internalwinspaceshape = (numvalidspatiallocs, numwins)
601
+ internalwinspaceshapederivs = (
602
+ numvalidspatiallocs,
603
+ 2,
604
+ numwins,
605
+ )
606
+ internalwinfmrishape = (numvalidspatiallocs, wintrs)
607
+ if args.debug:
608
+ print(f"window space shape = {internalwinspaceshape}")
609
+ print(f"internalwindowfmrishape shape = {internalwinfmrishape}")
610
+
611
+ windowedregressderivratios = np.zeros(internalwinspaceshape, dtype=float)
612
+ windowedregressrvalues = np.zeros(internalwinspaceshape, dtype=float)
613
+ windowedmedfiltregressderivratios = np.zeros(internalwinspaceshape, dtype=float)
614
+ windowedfilteredregressderivratios = np.zeros(internalwinspaceshape, dtype=float)
615
+ windoweddelayoffset = np.zeros(internalwinspaceshape, dtype=float)
616
+ windowedclosestoffset = np.zeros(internalwinspaceshape, dtype=float)
617
+ if usesharedmem:
618
+ if args.debug:
619
+ print("allocating shared memory")
620
+ winsLFOfitmean, winsLFOfitmean_shm = tide_util.allocshared(
621
+ internalwinspaceshape, rt_outfloatset
622
+ )
623
+ winrvalue, winrvalue_shm = tide_util.allocshared(internalwinspaceshape, rt_outfloatset)
624
+ winr2value, winr2value_shm = tide_util.allocshared(internalwinspaceshape, rt_outfloatset)
625
+ winfitNorm, winfitNorm_shm = tide_util.allocshared(
626
+ internalwinspaceshapederivs, rt_outfloatset
627
+ )
628
+ winfitcoeff, winitcoeff_shm = tide_util.allocshared(
629
+ internalwinspaceshapederivs, rt_outfloatset
630
+ )
631
+ winmovingsignal, winmovingsignal_shm = tide_util.allocshared(
632
+ internalwinfmrishape, rt_outfloatset
633
+ )
634
+ winlagtc, winlagtc_shm = tide_util.allocshared(internalwinfmrishape, rt_floatset)
635
+ winfiltereddata, winfiltereddata_shm = tide_util.allocshared(
636
+ internalwinfmrishape, rt_outfloatset
637
+ )
638
+ else:
639
+ if args.debug:
640
+ print("allocating memory")
641
+ winsLFOfitmean = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
642
+ winrvalue = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
643
+ winr2value = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
644
+ winfitNorm = np.zeros(internalwinspaceshapederivs, dtype=rt_outfloattype)
645
+ winfitcoeff = np.zeros(internalwinspaceshapederivs, dtype=rt_outfloattype)
646
+ winmovingsignal = np.zeros(internalwinfmrishape, dtype=rt_outfloattype)
647
+ winlagtc = np.zeros(internalwinfmrishape, dtype=rt_floattype)
648
+ winfiltereddata = np.zeros(internalwinfmrishape, dtype=rt_outfloattype)
649
+ if args.debug:
650
+ print(f"wintrs={wintrs}, winskip={winskip}, numtrs={numtrs}, numwins={numwins}")
651
+ thewindowprocoptions = therunoptions
652
+ if args.verbose:
653
+ thewindowprocoptions["showprogressbar"] = True
654
+ else:
655
+ thewindowprocoptions["showprogressbar"] = False
656
+ if args.focaldebug:
657
+ thewindowprocoptions["saveminimumsLFOfiltfiles"] = True
658
+ winoutputlevel = "max"
659
+ else:
660
+ thewindowprocoptions["saveminimumsLFOfiltfiles"] = False
661
+ winoutputlevel = "min"
662
+
663
+ # Now get the derivative ratios the individual windows
664
+ print("Finding derivative ratios:")
665
+ for thewin in range(numwins):
666
+ print(f"\tProcessing window {thewin + 1} of {numwins}")
667
+ starttr = thewin * winskip
668
+ endtr = starttr + wintrs
669
+ winlabel = f"_win-{str(thewin + 1).zfill(3)}"
670
+ if args.verbose:
671
+ thisLGR = LGR
672
+ thisTimingLGR = TimingLGR
673
+ else:
674
+ thisLGR = None
675
+ thisTimingLGR = None
676
+
677
+ windowedregressderivratios[:, thewin], windowedregressrvalues[:, thewin] = (
678
+ tide_refinedelay.getderivratios(
679
+ fmri_data_valid,
680
+ validvoxels,
681
+ initial_fmri_x,
682
+ lagstouse_valid,
683
+ corrmask_valid,
684
+ genlagtc,
685
+ mode,
686
+ outputname + winlabel,
687
+ oversamptr,
688
+ winsLFOfitmean[:, thewin],
689
+ winrvalue[:, thewin],
690
+ winr2value[:, thewin],
691
+ winfitNorm[:, :, thewin],
692
+ winfitcoeff[:, :, thewin],
693
+ winmovingsignal,
694
+ winlagtc,
695
+ winfiltereddata,
696
+ thisLGR,
697
+ thisTimingLGR,
698
+ thewindowprocoptions,
699
+ regressderivs=1,
700
+ starttr=starttr,
701
+ endtr=endtr,
702
+ debug=args.debug,
703
+ )
704
+ )
705
+ if args.focaldebug:
706
+ theheader = copy.deepcopy(fmri_header)
707
+ theheader["dim"][4] = wintrs
708
+ theheader["toffset"] = winwidth / 2.0
709
+ maplist = [
710
+ (
711
+ winlagtc,
712
+ "windowedlagtcs",
713
+ "bold",
714
+ None,
715
+ f"Lagtcs in each {winspace} second window",
716
+ ),
717
+ ]
718
+ tide_io.savemaplist(
719
+ outputname + winlabel,
720
+ maplist,
721
+ validvoxels,
722
+ (xsize, ysize, numslices, wintrs),
723
+ theheader,
724
+ bidsbasedict,
725
+ debug=args.debug,
726
+ )
727
+
728
+ # Filter the derivative ratios
729
+ print("Filtering derivative ratios:")
730
+ for thewin in range(numwins):
731
+ print(f"\tProcessing window {thewin + 1} of {numwins}")
732
+ (
733
+ windowedmedfiltregressderivratios[:, thewin],
734
+ windowedfilteredregressderivratios[:, thewin],
735
+ windoweddelayoffsetMAD,
736
+ ) = tide_refinedelay.filterderivratios(
737
+ windowedregressderivratios[:, thewin],
738
+ (xsize, ysize, numslices),
739
+ validvoxels,
740
+ (xdim, ydim, slicedim),
741
+ gausssigma=args.windelayoffsetgausssigma,
742
+ patchthresh=args.delaypatchthresh,
743
+ fileiscifti=False,
744
+ textio=False,
745
+ rt_floattype=rt_floattype,
746
+ verbose=args.verbose,
747
+ debug=args.debug,
748
+ )
749
+
750
+ # Train the ratio offsets
751
+ print("Training ratio offsets:")
752
+ for thewin in range(numwins):
753
+ print(f"\tProcessing window {thewin + 1} of {numwins}")
754
+ starttr = thewin * winskip
755
+ endtr = starttr + wintrs
756
+ winlabel = f"_win-{str(thewin + 1).zfill(3)}"
757
+ # find the mapping of glm ratios to delays
758
+ tide_refinedelay.trainratiotooffset(
759
+ genlagtc,
760
+ initial_fmri_x[starttr:endtr],
761
+ outputname + winlabel,
762
+ winoutputlevel,
763
+ trainlagmin=args.lagmin,
764
+ trainlagmax=args.lagmax,
765
+ trainlagstep=args.trainstep,
766
+ mindelay=args.mindelay,
767
+ maxdelay=args.maxdelay,
768
+ numpoints=args.numpoints,
769
+ verbose=args.verbose,
770
+ debug=args.focaldebug,
771
+ )
772
+ TimingLGR.info("Refinement calibration end")
773
+
774
+ # now calculate the delay offsets
775
+ print("Calculating delay offsets:")
776
+ for thewin in range(numwins):
777
+ print(f"\tProcessing window {thewin + 1} of {numwins}")
778
+ winlabel = f"_win-{str(thewin + 1).zfill(3)}"
779
+ TimingLGR.info("Calculating delay offsets")
780
+ if args.debug:
781
+ print(
782
+ f"calculating delayoffsets for {windowedfilteredregressderivratios.shape[0]} voxels"
783
+ )
784
+ for i in range(windowedfilteredregressderivratios.shape[0]):
785
+ (windoweddelayoffset[i, thewin], windowedclosestoffset[i, thewin]) = (
786
+ tide_refinedelay.ratiotodelay(
787
+ windowedfilteredregressderivratios[i, thewin],
788
+ offset=lagstouse_valid[i],
789
+ debug=args.focaldebug,
790
+ )
791
+ )
792
+ namesuffix = "_desc-delayoffset_hist"
793
+ tide_stats.makeandsavehistogram(
794
+ windoweddelayoffset[:, thewin],
795
+ therunoptions["histlen"],
796
+ 1,
797
+ outputname + winlabel + namesuffix,
798
+ displaytitle="Histogram of delay offsets calculated from GLM",
799
+ dictvarname="delayoffsethist",
800
+ thedict=None,
801
+ )
802
+
803
+ # now see if there are common timecourses in the delay offsets
804
+ themean = np.mean(windoweddelayoffset, axis=1)
805
+ thevar = np.var(windoweddelayoffset, axis=1)
806
+ scaledvoxels = windoweddelayoffset * 0.0
807
+ for vox in range(0, windoweddelayoffset.shape[0]):
808
+ scaledvoxels[vox, :] = windoweddelayoffset[vox, :] - themean[vox]
809
+ if thevar[vox] > 0.0:
810
+ scaledvoxels[vox, :] = scaledvoxels[vox, :] / thevar[vox]
811
+ if args.systemicfittype == "pca":
812
+ if args.pcacomponents < 0.0:
813
+ pcacomponents = "mle"
814
+ elif args.pcacomponents >= 1.0:
815
+ pcacomponents = int(np.round(args.pcacomponents))
816
+ elif args.pcacomponents == 0.0:
817
+ print("0.0 is not an allowed value for pcacomponents")
818
+ sys.exit()
819
+ else:
820
+ pcacomponents = args.pcacomponents
821
+
822
+ # use the method of "A novel perspective to calibrate temporal delays in cerebrovascular reactivity
823
+ # using hypercapnic and hyperoxic respiratory challenges". NeuroImage 187, 154?165 (2019).
824
+ print(f"performing pca refinement with pcacomponents set to {pcacomponents}")
825
+ try:
826
+ thefit = PCA(n_components=pcacomponents).fit(scaledvoxels)
827
+ except ValueError:
828
+ if pcacomponents == "mle":
829
+ print("mle estimation failed - falling back to pcacomponents=0.8")
830
+ thefit = PCA(n_components=0.8).fit(scaledvoxels)
831
+ else:
832
+ print("unhandled math exception in PCA refinement - exiting")
833
+ sys.exit()
834
+ print(
835
+ f"Using {len(thefit.components_)} component(s), accounting for "
836
+ + f"{100.0 * np.cumsum(thefit.explained_variance_ratio_)[len(thefit.components_) - 1]}% of the variance"
837
+ )
838
+ reduceddata = thefit.inverse_transform(thefit.transform(scaledvoxels))
839
+ # unscale the PCA cleaned data
840
+ for vox in range(0, windoweddelayoffset.shape[0]):
841
+ reduceddata[vox, :] = reduceddata[vox, :] * thevar[vox] + themean[vox]
842
+ if args.debug:
843
+ print("complex processing: reduceddata.shape =", scaledvoxels.shape)
844
+ # pcadata = np.mean(reduceddata, axis=0)
845
+ pcadata = thefit.components_[0]
846
+ averagedata = np.mean(windoweddelayoffset, axis=0)
847
+ thepxcorr = pearsonr(averagedata, pcadata)[0]
848
+ LGR.info(f"pca/avg correlation = {thepxcorr}")
849
+ if thepxcorr > 0.0:
850
+ systemiccomp = 1.0 * pcadata
851
+ else:
852
+ systemiccomp = -1.0 * pcadata
853
+ thecomponents = thefit.components_[:]
854
+ tide_io.writebidstsv(
855
+ f"{outputname}_desc-pcacomponents_timeseries",
856
+ thecomponents,
857
+ 1.0 / winspace,
858
+ )
859
+ tide_io.writevec(
860
+ 100.0 * thefit.explained_variance_ratio_,
861
+ f"{outputname}_desc-pcaexplainedvarianceratio_info.tsv",
862
+ )
863
+ elif args.systemicfittype == "mean":
864
+ systemiccomp = np.mean(scaledvoxels, axis=0)
865
+ reduceddata = None
866
+ else:
867
+ print("unhandled systemic filter type")
868
+ sys.exit(0)
869
+ tide_io.writebidstsv(
870
+ f"{outputname}_desc-systemiccomponent_timeseries",
871
+ systemiccomp,
872
+ 1.0 / winspace,
873
+ )
874
+
875
+ doregress = False
876
+ if doregress:
877
+ if usesharedmem:
878
+ if args.debug:
879
+ print("allocating shared memory")
880
+ systemicsLFOfitmean, systemicsLFOfitmean_shm = tide_util.allocshared(
881
+ internalwinspaceshape, rt_outfloatset
882
+ )
883
+ systemicrvalue, systemicrvalue_shm = tide_util.allocshared(
884
+ internalwinspaceshape, rt_outfloatset
885
+ )
886
+ systemicr2value, systemicr2value_shm = tide_util.allocshared(
887
+ internalwinspaceshape, rt_outfloatset
888
+ )
889
+ systemicfitNorm, systemicfitNorm_shm = tide_util.allocshared(
890
+ internalwinspaceshapederivs, rt_outfloatset
891
+ )
892
+ systemicfitcoeff, systemicitcoeff_shm = tide_util.allocshared(
893
+ internalwinspaceshapederivs, rt_outfloatset
894
+ )
895
+ systemicmovingsignal, systemicmovingsignal_shm = tide_util.allocshared(
896
+ internalwinspaceshape, rt_outfloatset
897
+ )
898
+ systemiclagtc, systemiclagtc_shm = tide_util.allocshared(
899
+ internalwinspaceshape, rt_floatset
900
+ )
901
+ systemicfiltereddata, systemicfiltereddata_shm = tide_util.allocshared(
902
+ internalwinspaceshape, rt_outfloatset
903
+ )
904
+ else:
905
+ if args.debug:
906
+ print("allocating memory")
907
+ systemicsLFOfitmean = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
908
+ systemicrvalue = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
909
+ systemicr2value = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
910
+ systemicfitNorm = np.zeros(internalwinspaceshapederivs, dtype=rt_outfloattype)
911
+ systemicfitcoeff = np.zeros(internalwinspaceshapederivs, dtype=rt_outfloattype)
912
+ systemicmovingsignal = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
913
+ systemiclagtc = np.zeros(internalwinspaceshape, dtype=rt_floattype)
914
+ systemicfiltereddata = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
915
+
916
+ windowlocs = np.linspace(0.0, winspace * numwins, num=numwins, endpoint=False) + skiptime
917
+ voxelsprocessed_regressionfilt, regressorset, evset = tide_regressfrommaps.regressfrommaps(
918
+ windoweddelayoffset,
919
+ validvoxels,
920
+ windowlocs,
921
+ 0.0 * lagstouse_valid,
922
+ corrmask_valid,
923
+ genlagtc,
924
+ mode,
925
+ outputname,
926
+ oversamptr,
927
+ systemicsLFOfitmean,
928
+ systemicrvalue,
929
+ systemicr2value,
930
+ systemicfitNorm[:, :],
931
+ systemicfitcoeff[:, :],
932
+ systemicmovingsignal,
933
+ systemiclagtc,
934
+ systemicfiltereddata,
935
+ LGR,
936
+ TimingLGR,
937
+ threshval,
938
+ False,
939
+ nprocs_makelaggedtcs=args.nprocs,
940
+ nprocs_regressionfilt=args.nprocs,
941
+ regressderivs=1,
942
+ showprogressbar=args.showprogressbar,
943
+ debug=args.debug,
944
+ )
945
+
946
+ theheader = copy.deepcopy(fmri_header)
947
+ theheader["dim"][4] = numwins
948
+ theheader["pixdim"][4] = winspace
949
+ theheader["toffset"] = winwidth / 2.0
950
+ maplist = [
951
+ (
952
+ windoweddelayoffset,
953
+ "windoweddelayoffset",
954
+ "info",
955
+ None,
956
+ f"Delay offsets in each {winspace} second window",
957
+ ),
958
+ (
959
+ windowedclosestoffset,
960
+ "windowedclosestoffset",
961
+ "info",
962
+ None,
963
+ f"Closest delay offsets in each {winspace} second window",
964
+ ),
965
+ (
966
+ np.square(windowedregressrvalues),
967
+ "windowedregressr2values",
968
+ "info",
969
+ None,
970
+ f"R2 values for regression in each {winspace} second window",
971
+ ),
972
+ ]
973
+ if doregress:
974
+ maplist += [
975
+ (
976
+ systemicfiltereddata,
977
+ "systemicfiltereddata",
978
+ "info",
979
+ None,
980
+ f"Systemic filtered delay offsets in each {winspace} second window",
981
+ ),
982
+ (
983
+ np.square(systemicr2value),
984
+ "systemicr2value",
985
+ "info",
986
+ None,
987
+ f"R2 values for systemic regression in each {winspace} second window",
988
+ ),
989
+ ]
990
+ if args.focaldebug:
991
+ maplist += [
992
+ (
993
+ systemicsLFOfitmean,
994
+ "systemicsLFOfitmean",
995
+ "info",
996
+ None,
997
+ f"Constant coefficient for systemic filter",
998
+ ),
999
+ (
1000
+ systemicfitcoeff[:, 0],
1001
+ "systemiccoffEV0",
1002
+ "info",
1003
+ None,
1004
+ f"Coefficient 0 for systemic filter",
1005
+ ),
1006
+ (
1007
+ systemicfitcoeff[:, 1],
1008
+ "systemiccoffEV1",
1009
+ "info",
1010
+ None,
1011
+ f"Coefficient 1 for systemic filter",
1012
+ ),
1013
+ ]
1014
+ if reduceddata is not None:
1015
+ maplist += (
1016
+ (
1017
+ reduceddata,
1018
+ "windoweddelayoffsetPCA",
1019
+ "info",
1020
+ None,
1021
+ f"PCA cleaned delay offsets in each {winspace} second window",
1022
+ ),
1023
+ )
1024
+
1025
+ """(
1026
+ filtwindoweddelayoffset,
1027
+ "filtwindoweddelayoffset",
1028
+ "info",
1029
+ None,
1030
+ f"Delay offsets in each {winspace} second window with the systemic component removed",
1031
+ ),"""
1032
+ if args.focaldebug:
1033
+ maplist += [
1034
+ (
1035
+ windowedmedfiltregressderivratios,
1036
+ "windowedmedfiltregressderivratios",
1037
+ "info",
1038
+ None,
1039
+ f"Mediean filtered derivative ratios in each {winspace} second window",
1040
+ ),
1041
+ (
1042
+ windowedfilteredregressderivratios,
1043
+ "windowedfilteredregressderivratios",
1044
+ "info",
1045
+ None,
1046
+ f"Filtered derivative ratios in each {winspace} second window",
1047
+ ),
1048
+ (
1049
+ windowedregressderivratios,
1050
+ "windowedregressderivratios",
1051
+ "info",
1052
+ None,
1053
+ f"Raw derivative ratios in each {winspace} second window",
1054
+ ),
1055
+ ]
1056
+ tide_io.savemaplist(
1057
+ outputname,
1058
+ maplist,
1059
+ validvoxels,
1060
+ (xsize, ysize, numslices, numwins),
1061
+ theheader,
1062
+ bidsbasedict,
1063
+ debug=args.debug,
1064
+ )
1065
+ #########################
1066
+ # End window processing
1067
+ #########################
1068
+
1069
+ # save outputs
1070
+ TimingLGR.info("Starting output save")
1071
+ bidsdict = bidsbasedict.copy()
1072
+
1073
+ # read the runoptions file
1074
+ print("writing runoptions")
1075
+ therunoptions["delayvar_runtime"] = time.strftime(
1076
+ "%a, %d %b %Y %H:%M:%S %Z", time.localtime(time.time())
1077
+ )
1078
+
1079
+ # clean up shared memory
1080
+ if usesharedmem:
1081
+ tide_util.cleanup_shm(winsLFOfitmean_shm)
1082
+ tide_util.cleanup_shm(winrvalue_shm)
1083
+ tide_util.cleanup_shm(winr2value_shm)
1084
+ tide_util.cleanup_shm(winfitNorm_shm)
1085
+ tide_util.cleanup_shm(winitcoeff_shm)
1086
+ tide_util.cleanup_shm(winmovingsignal_shm)
1087
+ tide_util.cleanup_shm(winlagtc_shm)
1088
+ tide_util.cleanup_shm(winfiltereddata_shm)
1089
+ if doregress:
1090
+ tide_util.cleanup_shm(systemicsLFOfitmean_shm)
1091
+ tide_util.cleanup_shm(systemicrvalue_shm)
1092
+ tide_util.cleanup_shm(systemicr2value_shm)
1093
+ tide_util.cleanup_shm(systemicfitNorm_shm)
1094
+ tide_util.cleanup_shm(systemicitcoeff_shm)
1095
+ tide_util.cleanup_shm(systemicmovingsignal_shm)
1096
+ tide_util.cleanup_shm(systemiclagtc_shm)
1097
+ tide_util.cleanup_shm(systemicfiltereddata_shm)
1098
+ TimingLGR.info("Shared memory cleanup complete")
1099
+
1100
+ # shut down logging
1101
+ TimingLGR.info("Done")
1102
+ logging.shutdown()
1103
+
1104
+ # reformat timing information and delete the unformatted version
1105
+ timingdata, therunoptions["totalretroruntime"] = tide_util.proctiminglogfile(
1106
+ f"{outputname}_retroruntimings.tsv"
1107
+ )
1108
+ tide_io.writevec(
1109
+ timingdata,
1110
+ f"{outputname}_desc-formattedretroruntimings_info.tsv",
1111
+ )
1112
+ Path(f"{outputname}_retroruntimings.tsv").unlink(missing_ok=True)
1113
+
1114
+ # save the modified runoptions file
1115
+ tide_io.writedicttojson(therunoptions, f"{outputname}_desc-runoptions_info.json")
1116
+
1117
+ # shut down the loggers
1118
+ for thelogger in [LGR, ErrorLGR, TimingLGR]:
1119
+ handlers = thelogger.handlers[:]
1120
+ for handler in handlers:
1121
+ thelogger.removeHandler(handler)
1122
+ handler.close()
1123
+
1124
+ # delete the canary file
1125
+ Path(f"{outputname}_DELAYVARISRUNNING.txt").unlink()
1126
+
1127
+ # create the finished file
1128
+ Path(f"{outputname}_DELAYVARDONE.txt").touch()
1129
+
1130
+
1131
+ def process_args(inputargs=None):
1132
+ """
1133
+ Compile arguments for delayvar workflow.
1134
+ """
1135
+ args, argstowrite = pf.setargs(_get_parser, inputargs=inputargs)
1136
+ return args