rapidtide 3.0a12__py3-none-any.whl → 3.0a13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. cloud/gmscalc-HCPYA +1 -1
  2. cloud/rapidtide-HCPYA +3 -3
  3. rapidtide/Colortables.py +10 -10
  4. rapidtide/DerivativeDelay.py +211 -0
  5. rapidtide/{Refiner.py → RegressorRefiner.py} +1 -1
  6. rapidtide/__init__.py +2 -1
  7. rapidtide/_version.py +1 -1
  8. rapidtide/data/examples/src/test_mlregressallt.py +32 -17
  9. rapidtide/data/examples/src/testalign +1 -1
  10. rapidtide/data/examples/src/testboth +1 -1
  11. rapidtide/data/examples/src/testcifti +11 -0
  12. rapidtide/data/examples/src/testdelayvar +13 -0
  13. rapidtide/data/examples/src/testfmri +1 -0
  14. rapidtide/data/examples/src/testglmfilt +8 -6
  15. rapidtide/data/examples/src/testhappy +1 -1
  16. rapidtide/data/examples/src/testnewrefine +11 -11
  17. rapidtide/data/examples/src/testnoiseamp +2 -2
  18. rapidtide/data/examples/src/testretro +16 -7
  19. rapidtide/data/examples/src/testretrolagtcs +1 -1
  20. rapidtide/dlfilter.py +0 -1
  21. rapidtide/fit.py +41 -9
  22. rapidtide/happy_supportfuncs.py +5 -0
  23. rapidtide/io.py +13 -2
  24. rapidtide/{glmpass.py → linfitfiltpass.py} +21 -19
  25. rapidtide/refinedelay.py +96 -58
  26. rapidtide/resample.py +3 -0
  27. rapidtide/scripts/{retroglm.py → delayvar.py} +2 -2
  28. rapidtide/scripts/{glmfilt.py → linfitfilt.py} +2 -2
  29. rapidtide/scripts/retroregress.py +28 -0
  30. rapidtide/scripts/stupidramtricks.py +9 -7
  31. rapidtide/simfuncfit.py +1 -1
  32. rapidtide/tests/cleanposttest +21 -0
  33. rapidtide/tests/test_delayestimation.py +3 -3
  34. rapidtide/tests/test_fastresampler.py +1 -2
  35. rapidtide/tests/test_fullrunhappy_v1.py +1 -1
  36. rapidtide/tests/test_fullrunhappy_v2.py +1 -1
  37. rapidtide/tests/test_fullrunrapidtide_v1.py +2 -2
  38. rapidtide/tests/test_fullrunrapidtide_v3.py +1 -1
  39. rapidtide/tests/test_fullrunrapidtide_v5.py +1 -1
  40. rapidtide/tests/test_fullrunrapidtide_v6.py +11 -11
  41. rapidtide/tests/{test_glmpass.py → test_linfitfiltpass.py} +9 -9
  42. rapidtide/tests/test_motionregress.py +3 -3
  43. rapidtide/tests/test_refinedelay.py +12 -12
  44. rapidtide/tidepoolTemplate_alt_qt6.py +172 -45
  45. rapidtide/tidepoolTemplate_big_qt6.py +196 -53
  46. rapidtide/tidepoolTemplate_qt6.py +150 -39
  47. rapidtide/workflows/delayvar.py +1048 -0
  48. rapidtide/workflows/happy.py +37 -11
  49. rapidtide/workflows/happy_parser.py +4 -4
  50. rapidtide/workflows/{glmfilt.py → linfitfilt.py} +4 -4
  51. rapidtide/workflows/rapidtide.py +235 -171
  52. rapidtide/workflows/rapidtide_parser.py +103 -86
  53. rapidtide/workflows/{glmfrommaps.py → regressfrommaps.py} +28 -26
  54. rapidtide/workflows/retrolagtcs.py +12 -12
  55. rapidtide/workflows/{retroglm.py → retroregress.py} +158 -141
  56. {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/METADATA +3 -2
  57. {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/RECORD +61 -56
  58. {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/WHEEL +1 -1
  59. {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/entry_points.txt +3 -2
  60. rapidtide/data/examples/src/testoutputsize +0 -45
  61. {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info/licenses}/LICENSE +0 -0
  62. {rapidtide-3.0a12.dist-info → rapidtide-3.0a13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1048 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ #
4
+ # Copyright 2016-2024 Blaise Frederick
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+ #
19
+ import argparse
20
+ import copy
21
+ import logging
22
+ import os
23
+ import sys
24
+ import time
25
+ from pathlib import Path
26
+
27
+ import numpy as np
28
+ from scipy.stats import pearsonr
29
+ from sklearn.decomposition import PCA
30
+
31
+ import rapidtide.filter as tide_filt
32
+ import rapidtide.io as tide_io
33
+ import rapidtide.multiproc as tide_multiproc
34
+ import rapidtide.refinedelay as tide_refinedelay
35
+ import rapidtide.resample as tide_resample
36
+ import rapidtide.stats as tide_stats
37
+ import rapidtide.util as tide_util
38
+ import rapidtide.workflows.parser_funcs as pf
39
+ import rapidtide.workflows.regressfrommaps as tide_regressfrommaps
40
+
41
+ from .utils import setup_logger
42
+
43
+
44
+ # Create a sentinel.
45
+ # from https://stackoverflow.com/questions/58594956/find-out-which-arguments-were-passed-explicitly-in-argparse
46
+ class _Sentinel:
47
+ pass
48
+
49
+
50
+ sentinel = _Sentinel()
51
+ LGR = logging.getLogger(__name__)
52
+ ErrorLGR = logging.getLogger("ERROR")
53
+ TimingLGR = logging.getLogger("TIMING")
54
+
55
+ DEFAULT_REGRESSIONFILTDERIVS = 0
56
+ DEFAULT_PATCHTHRESH = 3.0
57
+ DEFAULT_REFINEDELAYMINDELAY = -2.5
58
+ DEFAULT_REFINEDELAYMAXDELAY = 2.5
59
+ DEFAULT_REFINEDELAYNUMPOINTS = 201
60
+ DEFAULT_DELAYOFFSETSPATIALFILT = -1
61
+ DEFAULT_WINDOWSIZE = 30.0
62
+ DEFAULT_SYSTEMICFITTYPE = "pca"
63
+ DEFAULT_PCACOMPONENTS = 1
64
+ DEFAULT_TRAINWIDTH = 0.0
65
+ DEFAULT_TRAINSTEP = 0.5
66
+
67
+
68
+ def _get_parser():
69
+ """
70
+ Argument parser for glmfilt
71
+ """
72
+ parser = argparse.ArgumentParser(
73
+ prog="delayvar",
74
+ description="Calculate variation in delay time over the course of an acquisition.",
75
+ allow_abbrev=False,
76
+ )
77
+
78
+ # Required arguments
79
+ parser.add_argument(
80
+ "fmrifile",
81
+ type=lambda x: pf.is_valid_file(parser, x),
82
+ help="The name of 4D nifti fmri file to filter.",
83
+ )
84
+ parser.add_argument(
85
+ "datafileroot",
86
+ type=str,
87
+ help="The root name of the previously run rapidtide dataset (everything up to but not including the underscore.)",
88
+ )
89
+ parser.add_argument(
90
+ "--alternateoutput",
91
+ dest="alternateoutput",
92
+ type=str,
93
+ help="Alternate output root (if not specified, will use the same root as the previous dataset).",
94
+ default=None,
95
+ )
96
+ parser.add_argument(
97
+ "--nprocs",
98
+ dest="nprocs",
99
+ action="store",
100
+ type=int,
101
+ metavar="NPROCS",
102
+ help=(
103
+ "Use NPROCS worker processes for multiprocessing. "
104
+ "Setting NPROCS to less than 1 sets the number of "
105
+ "worker processes to n_cpus."
106
+ ),
107
+ default=1,
108
+ )
109
+ parser.add_argument(
110
+ "--numskip",
111
+ dest="numskip",
112
+ action="store",
113
+ type=lambda x: pf.is_int(parser, x, minval=0),
114
+ metavar="NUMSKIP",
115
+ help=("Skip NUMSKIP points at the beginning of the fmri file."),
116
+ default=0,
117
+ )
118
+ parser.add_argument(
119
+ "--outputlevel",
120
+ dest="outputlevel",
121
+ action="store",
122
+ type=str,
123
+ choices=["min", "less", "normal", "more", "max"],
124
+ help=(
125
+ "The level of file output produced. 'min' produces only absolutely essential files, 'less' adds in "
126
+ "the sLFO filtered data (rather than just filter efficacy metrics), 'normal' saves what you "
127
+ "would typically want around for interactive data exploration, "
128
+ "'more' adds files that are sometimes useful, and 'max' outputs anything you might possibly want. "
129
+ "Selecting 'max' will produce ~3x your input datafile size as output. "
130
+ f'Default is "normal".'
131
+ ),
132
+ default="normal",
133
+ )
134
+ parser.add_argument(
135
+ "--noprogressbar",
136
+ dest="showprogressbar",
137
+ action="store_false",
138
+ help=("Will disable showing progress bars (helpful if stdout is going to a file)."),
139
+ default=True,
140
+ )
141
+ parser.add_argument(
142
+ "--nohpfilter",
143
+ dest="hpf",
144
+ action="store_false",
145
+ help=("Disable highpass filtering on data and regressor."),
146
+ default=True,
147
+ )
148
+ parser.add_argument(
149
+ "--trainwidth",
150
+ dest="trainwidth",
151
+ action="store",
152
+ type=float,
153
+ metavar="WIDTH",
154
+ help=(
155
+ "Train the ratio offset function over this range of central delays (in seconds). The derivative "
156
+ "ratio calculation only works over a narrow range, so if the static offset is large, "
157
+ "you need to train the ratio calculation with a central delay close to that value. "
158
+ "Set negative to select the width automatically. "
159
+ f"Default is {DEFAULT_TRAINWIDTH}"
160
+ ),
161
+ default=DEFAULT_TRAINWIDTH,
162
+ )
163
+ parser.add_argument(
164
+ "--trainstep",
165
+ dest="trainstep",
166
+ action="store",
167
+ type=float,
168
+ metavar="STEP",
169
+ help=(
170
+ "Use this step size (in seconds) to span the training width. The derivative "
171
+ "ratio calculation only works over a narrow range, so if the static offset is large, "
172
+ "you need to train the ratio calculation with a central delay close to that value. "
173
+ f"Default is {DEFAULT_TRAINSTEP}"
174
+ ),
175
+ default=DEFAULT_TRAINSTEP,
176
+ )
177
+ parser.add_argument(
178
+ "--delaypatchthresh",
179
+ dest="delaypatchthresh",
180
+ action="store",
181
+ type=float,
182
+ metavar="NUMMADs",
183
+ help=(
184
+ "Maximum number of robust standard deviations to permit in the offset delay refine map. "
185
+ f"Default is {DEFAULT_PATCHTHRESH}"
186
+ ),
187
+ default=DEFAULT_PATCHTHRESH,
188
+ )
189
+ parser.add_argument(
190
+ "--systemicfittype",
191
+ dest="systemicfittype",
192
+ action="store",
193
+ type=str,
194
+ choices=[
195
+ "mean",
196
+ "pca",
197
+ ],
198
+ help=(
199
+ f"Use mean or pca to fit the systemic variation in delay offset. "
200
+ f'Default is "{DEFAULT_SYSTEMICFITTYPE}".'
201
+ ),
202
+ default=DEFAULT_SYSTEMICFITTYPE,
203
+ )
204
+ parser.add_argument(
205
+ "--pcacomponents",
206
+ metavar="NCOMP",
207
+ dest="pcacomponents",
208
+ type=float,
209
+ help="Use NCOMP components for PCA fit of delay offset.",
210
+ default=DEFAULT_PCACOMPONENTS,
211
+ )
212
+ parser.add_argument(
213
+ "--debug",
214
+ dest="debug",
215
+ action="store_true",
216
+ help=("Output lots of helpful information."),
217
+ default=False,
218
+ )
219
+ parser.add_argument(
220
+ "--focaldebug",
221
+ dest="focaldebug",
222
+ action="store_true",
223
+ help=("Output lots of helpful information on a limited subset of operations."),
224
+ default=False,
225
+ )
226
+ experimental = parser.add_argument_group(
227
+ "Experimental options (not fully tested, or not tested at all, may not work). Beware!"
228
+ )
229
+ experimental.add_argument(
230
+ "--windowsize",
231
+ dest="windowsize",
232
+ action="store",
233
+ type=lambda x: pf.is_float(parser, x, minval=10.0),
234
+ metavar="SIZE",
235
+ help=(
236
+ f"Set segmented delay analysis window size to SIZE seconds. Default is {DEFAULT_WINDOWSIZE}."
237
+ ),
238
+ default=DEFAULT_WINDOWSIZE,
239
+ )
240
+ experimental.add_argument(
241
+ "--windelayoffsetspatialfilt",
242
+ dest="windelayoffsetgausssigma",
243
+ action="store",
244
+ type=float,
245
+ metavar="GAUSSSIGMA",
246
+ help=(
247
+ "Spatially filter fMRI data prior to calculating windowed delay offsets "
248
+ "using GAUSSSIGMA in mm. Set GAUSSSIGMA negative "
249
+ "to have rapidtide set it to half the mean voxel "
250
+ "dimension (a rule of thumb for a good value)."
251
+ ),
252
+ default=DEFAULT_DELAYOFFSETSPATIALFILT,
253
+ )
254
+
255
+ return parser
256
+
257
+
258
+ def delayvar(args):
259
+ # get the pid of the parent process
260
+ args.pid = os.getpid()
261
+
262
+ # specify the output name
263
+ if args.alternateoutput is None:
264
+ outputname = args.datafileroot
265
+ else:
266
+ outputname = args.alternateoutput
267
+
268
+ # start the loggers low that we know the output name
269
+ sh = logging.StreamHandler()
270
+ if args.debug:
271
+ logging.basicConfig(level=logging.DEBUG, handlers=[sh])
272
+ else:
273
+ logging.basicConfig(level=logging.INFO, handlers=[sh])
274
+ # Set up loggers for workflow
275
+ setup_logger(
276
+ logger_filename=f"{outputname}_retrolog.txt",
277
+ timing_filename=f"{outputname}_retroruntimings.tsv",
278
+ error_filename=f"{outputname}_retroerrorlog.txt",
279
+ verbose=False,
280
+ debug=args.debug,
281
+ )
282
+ TimingLGR.info("Start")
283
+ LGR.info(f"starting delayvar")
284
+
285
+ # set some global values
286
+ args.mindelay = DEFAULT_REFINEDELAYMINDELAY
287
+ args.maxdelay = DEFAULT_REFINEDELAYMAXDELAY
288
+ args.numpoints = DEFAULT_REFINEDELAYNUMPOINTS
289
+
290
+ if args.outputlevel == "min":
291
+ args.saveminimumsLFOfiltfiles = False
292
+ args.savenormalsLFOfiltfiles = False
293
+ args.savemovingsignal = False
294
+ args.saveallsLFOfiltfiles = False
295
+ elif args.outputlevel == "less":
296
+ args.saveminimumsLFOfiltfiles = True
297
+ args.savenormalsLFOfiltfiles = False
298
+ args.savemovingsignal = False
299
+ args.saveallsLFOfiltfiles = False
300
+ elif args.outputlevel == "normal":
301
+ args.saveminimumsLFOfiltfiles = True
302
+ args.savenormalsLFOfiltfiles = True
303
+ args.savemovingsignal = False
304
+ args.saveallsLFOfiltfiles = False
305
+ elif args.outputlevel == "more":
306
+ args.saveminimumsLFOfiltfiles = True
307
+ args.savenormalsLFOfiltfiles = True
308
+ args.savemovingsignal = True
309
+ args.saveallsLFOfiltfiles = False
310
+ elif args.outputlevel == "max":
311
+ args.saveminimumsLFOfiltfiles = True
312
+ args.savenormalsLFOfiltfiles = True
313
+ args.savemovingsignal = True
314
+ args.saveallsLFOfiltfiles = True
315
+ else:
316
+ print(f"illegal output level {args['outputlevel']}")
317
+ sys.exit()
318
+
319
+ thecommandline = " ".join(sys.argv[1:])
320
+
321
+ if args.nprocs < 1:
322
+ args.nprocs = tide_multiproc.maxcpus()
323
+ # don't use shared memory if there is only one process
324
+ if args.nprocs == 1:
325
+ usesharedmem = False
326
+ else:
327
+ usesharedmem = True
328
+
329
+ # read the runoptions file, update if necessary
330
+ print("reading runoptions")
331
+ runoptionsfile = f"{args.datafileroot}_desc-runoptions_info"
332
+ therunoptions = tide_io.readoptionsfile(runoptionsfile)
333
+ sublist = (
334
+ ("retroglmcompatible", "retroregresscompatible"),
335
+ ("glmthreshval", "regressfiltthreshval"),
336
+ )
337
+ therunoptions["singleproc_regressionfilt"] = False
338
+ therunoptions["nprocs_regressionfilt"] = args.nprocs
339
+ for subpair in sublist:
340
+ try:
341
+ therunoptions[subpair[1]] = therunoptions[subpair[0]]
342
+ print(f"substituting {subpair[1]} for {subpair[0]} in runoptions")
343
+ except KeyError:
344
+ pass
345
+
346
+ try:
347
+ candoretroregress = therunoptions["retroregresscompatible"]
348
+ except KeyError:
349
+ print(
350
+ f"based on {runoptionsfile}, this rapidtide dataset does not support retrospective GLM calculation"
351
+ )
352
+ sys.exit()
353
+
354
+ if therunoptions["internalprecision"] == "double":
355
+ rt_floattype = "float64"
356
+ rt_floatset = np.float64
357
+ else:
358
+ rt_floattype = "float32"
359
+ rt_floatset = np.float32
360
+
361
+ # set the output precision
362
+ if therunoptions["outputprecision"] == "double":
363
+ rt_outfloattype = "float64"
364
+ rt_outfloatset = np.float64
365
+ else:
366
+ rt_outfloattype = "float32"
367
+ rt_outfloatset = np.float32
368
+ therunoptions["saveminimumsLFOfiltfiles"] = args.saveminimumsLFOfiltfiles
369
+
370
+ # read the fmri input files
371
+ print("reading fmrifile")
372
+ fmri_input, fmri_data, fmri_header, fmri_dims, fmri_sizes = tide_io.readfromnifti(
373
+ args.fmrifile
374
+ )
375
+
376
+ # create the canary file
377
+ Path(f"{outputname}_DELAYVARISRUNNING.txt").touch()
378
+
379
+ if args.debug:
380
+ print(f"{fmri_data.shape=}")
381
+ xdim, ydim, slicedim, fmritr = tide_io.parseniftisizes(fmri_sizes)
382
+ xsize, ysize, numslices, timepoints = tide_io.parseniftidims(fmri_dims)
383
+ numspatiallocs = int(xsize) * int(ysize) * int(numslices)
384
+ fmri_data_spacebytime = fmri_data.reshape((numspatiallocs, timepoints))
385
+ if args.debug:
386
+ print(f"{fmri_data_spacebytime.shape=}")
387
+
388
+ # read the processed mask
389
+ print("reading procfit maskfile")
390
+ procmaskfile = f"{args.datafileroot}_desc-processed_mask.nii.gz"
391
+ (
392
+ procmask_input,
393
+ procmask,
394
+ procmask_header,
395
+ procmask_dims,
396
+ procmask_sizes,
397
+ ) = tide_io.readfromnifti(procmaskfile)
398
+ if not tide_io.checkspacematch(fmri_header, procmask_header):
399
+ raise ValueError("procmask dimensions do not match fmri dimensions")
400
+ procmask_spacebytime = procmask.reshape((numspatiallocs))
401
+ if args.debug:
402
+ print(f"{procmask_spacebytime.shape=}")
403
+ print(f"{tide_stats.getmasksize(procmask_spacebytime)=}")
404
+
405
+ # read the corrfit mask
406
+ print("reading corrfit maskfile")
407
+ corrmaskfile = f"{args.datafileroot}_desc-corrfit_mask.nii.gz"
408
+ (
409
+ corrmask_input,
410
+ corrmask,
411
+ corrmask_header,
412
+ corrmask_dims,
413
+ corrmask_sizes,
414
+ ) = tide_io.readfromnifti(corrmaskfile)
415
+ if not tide_io.checkspacematch(fmri_header, corrmask_header):
416
+ raise ValueError("corrmask dimensions do not match fmri dimensions")
417
+ corrmask_spacebytime = corrmask.reshape((numspatiallocs))
418
+ if args.debug:
419
+ print(f"{corrmask_spacebytime.shape=}")
420
+ print(f"{tide_stats.getmasksize(corrmask_spacebytime)=}")
421
+
422
+ print("reading lagtimes")
423
+ lagtimesfile = f"{args.datafileroot}_desc-maxtimerefined_map.nii.gz"
424
+ if not os.path.exists(lagtimesfile):
425
+ lagtimesfile = f"{args.datafileroot}_desc-maxtime_map.nii.gz"
426
+ (
427
+ lagtimes_input,
428
+ lagtimes,
429
+ lagtimes_header,
430
+ lagtimes_dims,
431
+ lagtimes_sizes,
432
+ ) = tide_io.readfromnifti(lagtimesfile)
433
+ if not tide_io.checkspacematch(fmri_header, lagtimes_header):
434
+ raise ValueError("lagtimes dimensions do not match fmri dimensions")
435
+ if args.debug:
436
+ print(f"{lagtimes.shape=}")
437
+ lagtimes_spacebytime = lagtimes.reshape((numspatiallocs))
438
+ if args.debug:
439
+ print(f"{lagtimes_spacebytime.shape=}")
440
+
441
+ startpt = args.numskip
442
+ endpt = timepoints - 1
443
+ validtimepoints = endpt - startpt + 1
444
+ skiptime = startpt * fmritr
445
+ initial_fmri_x = (
446
+ np.linspace(0.0, validtimepoints * fmritr, num=validtimepoints, endpoint=False) + skiptime
447
+ )
448
+
449
+ # read the lagtc generator file
450
+ print("reading lagtc generator")
451
+ lagtcgeneratorfile = f"{args.datafileroot}_desc-lagtcgenerator_timeseries"
452
+ thepadtime = therunoptions["padseconds"]
453
+ genlagtc = tide_resample.FastResamplerFromFile(lagtcgeneratorfile, padtime=thepadtime)
454
+
455
+ # select the voxels in the mask
456
+ print("figuring out valid voxels")
457
+ validvoxels = np.where(procmask_spacebytime > 0)[0]
458
+ numvalidspatiallocs = np.shape(validvoxels)[0]
459
+ if args.debug:
460
+ print(f"{numvalidspatiallocs=}")
461
+
462
+ # slicing to valid voxels
463
+ print("selecting valid voxels")
464
+ fmri_data_valid = fmri_data_spacebytime[validvoxels, :]
465
+ lagtimes_valid = lagtimes_spacebytime[validvoxels]
466
+ corrmask_valid = corrmask_spacebytime[validvoxels]
467
+ procmask_valid = procmask_spacebytime[validvoxels]
468
+ if args.debug:
469
+ print(f"{fmri_data_valid.shape=}")
470
+
471
+ oversampfactor = int(therunoptions["oversampfactor"])
472
+ if args.debug:
473
+ print(f"{outputname=}")
474
+ oversamptr = fmritr / oversampfactor
475
+ try:
476
+ threshval = therunoptions["regressfiltthreshval"]
477
+ except KeyError:
478
+ threshval = 0.0
479
+ therunoptions["regressfiltthreshval"] = threshval
480
+ mode = "glm"
481
+
482
+ if args.debug:
483
+ print(f"{validvoxels.shape=}")
484
+ np.savetxt(f"{outputname}_validvoxels.txt", validvoxels)
485
+
486
+ outputpath = os.path.dirname(outputname)
487
+ rawsources = [
488
+ os.path.relpath(args.fmrifile, start=outputpath),
489
+ os.path.relpath(lagtimesfile, start=outputpath),
490
+ os.path.relpath(corrmaskfile, start=outputpath),
491
+ os.path.relpath(procmaskfile, start=outputpath),
492
+ os.path.relpath(runoptionsfile, start=outputpath),
493
+ os.path.relpath(lagtcgeneratorfile, start=outputpath),
494
+ ]
495
+
496
+ bidsbasedict = {
497
+ "RawSources": rawsources,
498
+ "Units": "arbitrary",
499
+ "CommandLineArgs": thecommandline,
500
+ }
501
+
502
+ # windowed delay deviation estimation
503
+ lagstouse_valid = lagtimes_valid
504
+
505
+ print("\n\nWindowed delay estimation")
506
+ TimingLGR.info("Windowed delay estimation start")
507
+ LGR.info("\n\nWindowed delay estimation")
508
+
509
+ if args.windelayoffsetgausssigma < 0.0:
510
+ # set gausssigma automatically
511
+ args.windelayoffsetgausssigma = np.mean([xdim, ydim, slicedim]) / 2.0
512
+
513
+ wintrs = int(np.round(args.windowsize / fmritr, 0))
514
+ wintrs += wintrs % 2
515
+ winskip = wintrs // 2
516
+ numtrs = fmri_data_valid.shape[1]
517
+ numwins = (numtrs // winskip) - 2
518
+ winspace = winskip * fmritr
519
+ winwidth = wintrs * fmritr
520
+
521
+ # make a highpass filter
522
+ if args.hpf:
523
+ hpfcutoff = 1.0 / winwidth
524
+ thehpf = tide_filt.NoncausalFilter(
525
+ "arb",
526
+ transferfunc="trapezoidal",
527
+ padtime=30.0,
528
+ padtype="reflect",
529
+ )
530
+ thehpf.setfreqs(hpfcutoff * 0.95, hpfcutoff, 0.15, 0.15)
531
+
532
+ # make a filtered lagtc generator if necessary
533
+ if args.hpf:
534
+ reference_x, reference_y, dummy, dummy, genlagsamplerate = genlagtc.getdata()
535
+ genlagtc = tide_resample.FastResampler(
536
+ reference_x,
537
+ thehpf.apply(genlagsamplerate, reference_y),
538
+ padtime=thepadtime,
539
+ )
540
+ genlagtc.save(f"{outputname}_desc-lagtcgenerator_timeseries")
541
+
542
+ # and filter the data if necessary
543
+ if args.hpf:
544
+ Fs = 1.0 / fmritr
545
+ print("highpass filtering fmri data")
546
+ themean = fmri_data_valid.mean(axis=1)
547
+ for vox in range(fmri_data_valid.shape[0]):
548
+ fmri_data_valid[vox, :] = thehpf.apply(Fs, fmri_data_valid[vox, :]) + themean[vox]
549
+ if args.focaldebug:
550
+ # dump the filtered fmri input file
551
+ theheader = copy.deepcopy(fmri_header)
552
+ theheader["dim"][4] = validtimepoints
553
+ theheader["pixdim"][4] = fmritr
554
+
555
+ maplist = [
556
+ (
557
+ fmri_data_valid,
558
+ "hpfinputdata",
559
+ "bold",
560
+ None,
561
+ "fMRI data after highpass filtering",
562
+ ),
563
+ ]
564
+ tide_io.savemaplist(
565
+ outputname,
566
+ maplist,
567
+ validvoxels,
568
+ (xsize, ysize, numslices, validtimepoints),
569
+ theheader,
570
+ bidsbasedict,
571
+ textio=therunoptions["textio"],
572
+ fileiscifti=False,
573
+ rt_floattype=rt_floattype,
574
+ cifti_hdr=None,
575
+ )
576
+
577
+ # allocate destination arrays
578
+ internalwinspaceshape = (numvalidspatiallocs, numwins)
579
+ internalwinspaceshapederivs = (
580
+ numvalidspatiallocs,
581
+ 2,
582
+ numwins,
583
+ )
584
+ internalwinfmrishape = (numvalidspatiallocs, wintrs)
585
+ if args.debug or args.focaldebug:
586
+ print(f"window space shape = {internalwinspaceshape}")
587
+ print(f"internalwindowfmrishape shape = {internalwinfmrishape}")
588
+
589
+ windowedregressderivratios = np.zeros(internalwinspaceshape, dtype=float)
590
+ windowedregressrvalues = np.zeros(internalwinspaceshape, dtype=float)
591
+ windowedmedfiltregressderivratios = np.zeros(internalwinspaceshape, dtype=float)
592
+ windowedfilteredregressderivratios = np.zeros(internalwinspaceshape, dtype=float)
593
+ windoweddelayoffset = np.zeros(internalwinspaceshape, dtype=float)
594
+ if usesharedmem:
595
+ if args.debug:
596
+ print("allocating shared memory")
597
+ winsLFOfitmean, winsLFOfitmean_shm = tide_util.allocshared(
598
+ internalwinspaceshape, rt_outfloatset
599
+ )
600
+ winrvalue, winrvalue_shm = tide_util.allocshared(internalwinspaceshape, rt_outfloatset)
601
+ winr2value, winr2value_shm = tide_util.allocshared(internalwinspaceshape, rt_outfloatset)
602
+ winfitNorm, winfitNorm_shm = tide_util.allocshared(
603
+ internalwinspaceshapederivs, rt_outfloatset
604
+ )
605
+ winfitcoeff, winitcoeff_shm = tide_util.allocshared(
606
+ internalwinspaceshapederivs, rt_outfloatset
607
+ )
608
+ winmovingsignal, winmovingsignal_shm = tide_util.allocshared(
609
+ internalwinfmrishape, rt_outfloatset
610
+ )
611
+ winlagtc, winlagtc_shm = tide_util.allocshared(internalwinfmrishape, rt_floatset)
612
+ winfiltereddata, winfiltereddata_shm = tide_util.allocshared(
613
+ internalwinfmrishape, rt_outfloatset
614
+ )
615
+ else:
616
+ if args.debug:
617
+ print("allocating memory")
618
+ winsLFOfitmean = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
619
+ winrvalue = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
620
+ winr2value = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
621
+ winfitNorm = np.zeros(internalwinspaceshapederivs, dtype=rt_outfloattype)
622
+ winfitcoeff = np.zeros(internalwinspaceshapederivs, dtype=rt_outfloattype)
623
+ winmovingsignal = np.zeros(internalwinfmrishape, dtype=rt_outfloattype)
624
+ winlagtc = np.zeros(internalwinfmrishape, dtype=rt_floattype)
625
+ winfiltereddata = np.zeros(internalwinfmrishape, dtype=rt_outfloattype)
626
+ if args.debug:
627
+ print(f"wintrs={wintrs}, winskip={winskip}, numtrs={numtrs}, numwins={numwins}")
628
+ thewindowprocoptions = therunoptions
629
+ if args.focaldebug:
630
+ thewindowprocoptions["saveminimumsLFOfiltfiles"] = True
631
+ winoutputlevel = "max"
632
+ else:
633
+ thewindowprocoptions["saveminimumsLFOfiltfiles"] = False
634
+ winoutputlevel = "min"
635
+ for thewin in range(numwins):
636
+ print(f"Processing window {thewin + 1} of {numwins}")
637
+ starttr = thewin * winskip
638
+ endtr = starttr + wintrs
639
+ winlabel = f"_win-{str(thewin + 1).zfill(3)}"
640
+
641
+ windowedregressderivratios[:, thewin], windowedregressrvalues[:, thewin] = (
642
+ tide_refinedelay.getderivratios(
643
+ fmri_data_valid,
644
+ validvoxels,
645
+ initial_fmri_x,
646
+ lagstouse_valid,
647
+ corrmask_valid,
648
+ genlagtc,
649
+ mode,
650
+ outputname + winlabel,
651
+ oversamptr,
652
+ winsLFOfitmean[:, thewin],
653
+ winrvalue[:, thewin],
654
+ winr2value[:, thewin],
655
+ winfitNorm[:, :, thewin],
656
+ winfitcoeff[:, :, thewin],
657
+ winmovingsignal,
658
+ winlagtc,
659
+ winfiltereddata,
660
+ LGR,
661
+ TimingLGR,
662
+ thewindowprocoptions,
663
+ regressderivs=1,
664
+ starttr=starttr,
665
+ endtr=endtr,
666
+ debug=args.debug,
667
+ )
668
+ )
669
+
670
+ (
671
+ windowedmedfiltregressderivratios[:, thewin],
672
+ windowedfilteredregressderivratios[:, thewin],
673
+ windoweddelayoffsetMAD,
674
+ ) = tide_refinedelay.filterderivratios(
675
+ windowedregressderivratios[:, thewin],
676
+ (xsize, ysize, numslices),
677
+ validvoxels,
678
+ (xdim, ydim, slicedim),
679
+ gausssigma=args.windelayoffsetgausssigma,
680
+ patchthresh=args.delaypatchthresh,
681
+ fileiscifti=False,
682
+ textio=False,
683
+ rt_floattype=rt_floattype,
684
+ debug=args.debug,
685
+ )
686
+
687
+ # find the mapping of glm ratios to delays
688
+ tide_refinedelay.trainratiotooffset(
689
+ genlagtc,
690
+ initial_fmri_x[starttr:endtr],
691
+ outputname + winlabel,
692
+ winoutputlevel,
693
+ trainwidth=args.trainwidth,
694
+ trainstep=args.trainstep,
695
+ mindelay=args.mindelay,
696
+ maxdelay=args.maxdelay,
697
+ numpoints=args.numpoints,
698
+ debug=args.debug,
699
+ )
700
+ TimingLGR.info("Refinement calibration end")
701
+
702
+ # now calculate the delay offsets
703
+ TimingLGR.info("Calculating delay offsets")
704
+ if args.focaldebug:
705
+ print(
706
+ f"calculating delayoffsets for {windowedfilteredregressderivratios.shape[0]} voxels"
707
+ )
708
+ for i in range(windowedfilteredregressderivratios.shape[0]):
709
+ windoweddelayoffset[i, thewin] = tide_refinedelay.ratiotodelay(
710
+ windowedfilteredregressderivratios[i, thewin]
711
+ )
712
+
713
+ # now see if there are common timecourses in the delay offsets
714
+ themean = np.mean(windoweddelayoffset, axis=1)
715
+ thevar = np.var(windoweddelayoffset, axis=1)
716
+ scaledvoxels = windoweddelayoffset * 0.0
717
+ for vox in range(0, windoweddelayoffset.shape[0]):
718
+ scaledvoxels[vox, :] = windoweddelayoffset[vox, :] - themean[vox]
719
+ if thevar[vox] > 0.0:
720
+ scaledvoxels[vox, :] = scaledvoxels[vox, :] / thevar[vox]
721
+ if args.systemicfittype == "pca":
722
+ if args.pcacomponents < 0.0:
723
+ pcacomponents = "mle"
724
+ elif args.pcacomponents >= 1.0:
725
+ pcacomponents = int(np.round(args.pcacomponents))
726
+ elif args.pcacomponents == 0.0:
727
+ print("0.0 is not an allowed value for pcacomponents")
728
+ sys.exit()
729
+ else:
730
+ pcacomponents = args.pcacomponents
731
+
732
+ # use the method of "A novel perspective to calibrate temporal delays in cerebrovascular reactivity
733
+ # using hypercapnic and hyperoxic respiratory challenges". NeuroImage 187, 154?165 (2019).
734
+ print(f"performing pca refinement with pcacomponents set to {pcacomponents}")
735
+ try:
736
+ thefit = PCA(n_components=pcacomponents).fit(scaledvoxels)
737
+ except ValueError:
738
+ if pcacomponents == "mle":
739
+ print("mle estimation failed - falling back to pcacomponents=0.8")
740
+ thefit = PCA(n_components=0.8).fit(scaledvoxels)
741
+ else:
742
+ print("unhandled math exception in PCA refinement - exiting")
743
+ sys.exit()
744
+ print(
745
+ f"Using {len(thefit.components_)} component(s), accounting for "
746
+ + f"{100.0 * np.cumsum(thefit.explained_variance_ratio_)[len(thefit.components_) - 1]}% of the variance"
747
+ )
748
+ reduceddata = thefit.inverse_transform(thefit.transform(scaledvoxels))
749
+ # unscale the PCA cleaned data
750
+ for vox in range(0, windoweddelayoffset.shape[0]):
751
+ reduceddata[vox, :] = reduceddata[vox, :] * thevar[vox] + themean[vox]
752
+ if args.focaldebug:
753
+ print("complex processing: reduceddata.shape =", scaledvoxels.shape)
754
+ # pcadata = np.mean(reduceddata, axis=0)
755
+ pcadata = thefit.components_[0]
756
+ averagedata = np.mean(windoweddelayoffset, axis=0)
757
+ thepxcorr = pearsonr(averagedata, pcadata)[0]
758
+ LGR.info(f"pca/avg correlation = {thepxcorr}")
759
+ if thepxcorr > 0.0:
760
+ systemiccomp = 1.0 * pcadata
761
+ else:
762
+ systemiccomp = -1.0 * pcadata
763
+ thecomponents = thefit.components_[:]
764
+ tide_io.writebidstsv(
765
+ f"{outputname}_desc-pcacomponents_timeseries",
766
+ thecomponents,
767
+ 1.0 / winspace,
768
+ )
769
+ tide_io.writevec(
770
+ 100.0 * thefit.explained_variance_ratio_,
771
+ f"{outputname}_desc-pcaexplainedvarianceratio_info.tsv",
772
+ )
773
+ elif args.systemicfittype == "mean":
774
+ systemiccomp = np.mean(scaledvoxels, axis=0)
775
+ reduceddata = None
776
+ else:
777
+ print("unhandled systemic filter type")
778
+ sys.exit(0)
779
+ tide_io.writebidstsv(
780
+ f"{outputname}_desc-systemiccomponent_timeseries",
781
+ systemiccomp,
782
+ 1.0 / winspace,
783
+ )
784
+
785
+ doregress = False
786
+ if doregress:
787
+ if usesharedmem:
788
+ if args.debug:
789
+ print("allocating shared memory")
790
+ systemicsLFOfitmean, systemicsLFOfitmean_shm = tide_util.allocshared(
791
+ internalwinspaceshape, rt_outfloatset
792
+ )
793
+ systemicrvalue, systemicrvalue_shm = tide_util.allocshared(
794
+ internalwinspaceshape, rt_outfloatset
795
+ )
796
+ systemicr2value, systemicr2value_shm = tide_util.allocshared(
797
+ internalwinspaceshape, rt_outfloatset
798
+ )
799
+ systemicfitNorm, systemicfitNorm_shm = tide_util.allocshared(
800
+ internalwinspaceshapederivs, rt_outfloatset
801
+ )
802
+ systemicfitcoeff, systemicitcoeff_shm = tide_util.allocshared(
803
+ internalwinspaceshapederivs, rt_outfloatset
804
+ )
805
+ systemicmovingsignal, systemicmovingsignal_shm = tide_util.allocshared(
806
+ internalwinspaceshape, rt_outfloatset
807
+ )
808
+ systemiclagtc, systemiclagtc_shm = tide_util.allocshared(
809
+ internalwinspaceshape, rt_floatset
810
+ )
811
+ systemicfiltereddata, systemicfiltereddata_shm = tide_util.allocshared(
812
+ internalwinspaceshape, rt_outfloatset
813
+ )
814
+ else:
815
+ if args.debug:
816
+ print("allocating memory")
817
+ systemicsLFOfitmean = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
818
+ systemicrvalue = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
819
+ systemicr2value = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
820
+ systemicfitNorm = np.zeros(internalwinspaceshapederivs, dtype=rt_outfloattype)
821
+ systemicfitcoeff = np.zeros(internalwinspaceshapederivs, dtype=rt_outfloattype)
822
+ systemicmovingsignal = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
823
+ systemiclagtc = np.zeros(internalwinspaceshape, dtype=rt_floattype)
824
+ systemicfiltereddata = np.zeros(internalwinspaceshape, dtype=rt_outfloattype)
825
+
826
+ windowlocs = np.linspace(0.0, winspace * numwins, num=numwins, endpoint=False) + skiptime
827
+ voxelsprocessed_regressionfilt, regressorset, evset = tide_regressfrommaps.regressfrommaps(
828
+ windoweddelayoffset,
829
+ validvoxels,
830
+ windowlocs,
831
+ 0.0 * lagstouse_valid,
832
+ corrmask_valid,
833
+ genlagtc,
834
+ mode,
835
+ outputname,
836
+ oversamptr,
837
+ systemicsLFOfitmean,
838
+ systemicrvalue,
839
+ systemicr2value,
840
+ systemicfitNorm[:, :],
841
+ systemicfitcoeff[:, :],
842
+ systemicmovingsignal,
843
+ systemiclagtc,
844
+ systemicfiltereddata,
845
+ LGR,
846
+ TimingLGR,
847
+ threshval,
848
+ False,
849
+ nprocs_makelaggedtcs=args.nprocs,
850
+ nprocs_regressionfilt=args.nprocs,
851
+ regressderivs=1,
852
+ showprogressbar=args.showprogressbar,
853
+ debug=args.focaldebug,
854
+ )
855
+
856
+ namesuffix = f"_desc-delayoffsetwin{thewin}_hist"
857
+ tide_stats.makeandsavehistogram(
858
+ windoweddelayoffset[:, thewin],
859
+ therunoptions["histlen"],
860
+ 1,
861
+ outputname + namesuffix,
862
+ displaytitle="Histogram of delay offsets calculated from GLM",
863
+ dictvarname="delayoffsethist",
864
+ thedict=None,
865
+ )
866
+ theheader = copy.deepcopy(fmri_header)
867
+ theheader["dim"][4] = numwins
868
+ theheader["pixdim"][4] = winspace
869
+ maplist = [
870
+ (
871
+ windoweddelayoffset,
872
+ "windoweddelayoffset",
873
+ "info",
874
+ None,
875
+ f"Delay offsets in each {winspace} second window",
876
+ ),
877
+ (
878
+ np.square(windowedregressrvalues),
879
+ "windowedregressr2values",
880
+ "info",
881
+ None,
882
+ f"R2 values for regression in each {winspace} second window",
883
+ ),
884
+ ]
885
+ if doregress:
886
+ maplist += [
887
+ (
888
+ systemicfiltereddata,
889
+ "systemicfiltereddata",
890
+ "info",
891
+ None,
892
+ f"Systemic filtered delay offsets in each {winspace} second window",
893
+ ),
894
+ (
895
+ np.square(systemicr2value),
896
+ "systemicr2value",
897
+ "info",
898
+ None,
899
+ f"R2 values for systemic regression in each {winspace} second window",
900
+ ),
901
+ ]
902
+ if args.focaldebug:
903
+ maplist += [
904
+ (
905
+ systemicsLFOfitmean,
906
+ "systemicsLFOfitmean",
907
+ "info",
908
+ None,
909
+ f"Constant coefficient for systemic filter",
910
+ ),
911
+ (
912
+ systemicfitcoeff[:, 0],
913
+ "systemiccoffEV0",
914
+ "info",
915
+ None,
916
+ f"Coefficient 0 for systemic filter",
917
+ ),
918
+ (
919
+ systemicfitcoeff[:, 1],
920
+ "systemiccoffEV1",
921
+ "info",
922
+ None,
923
+ f"Coefficient 1 for systemic filter",
924
+ ),
925
+ ]
926
+ if reduceddata is not None:
927
+ maplist += (
928
+ (
929
+ reduceddata,
930
+ "windoweddelayoffsetPCA",
931
+ "info",
932
+ None,
933
+ f"PCA cleaned delay offsets in each {winspace} second window",
934
+ ),
935
+ )
936
+
937
+ """(
938
+ filtwindoweddelayoffset,
939
+ "filtwindoweddelayoffset",
940
+ "info",
941
+ None,
942
+ f"Delay offsets in each {winspace} second window with the systemic component removed",
943
+ ),"""
944
+ if args.focaldebug:
945
+ maplist += [
946
+ (
947
+ windowedmedfiltregressderivratios,
948
+ "windowedmedfiltregressderivratios",
949
+ "info",
950
+ None,
951
+ f"Mediean filtered derivative ratios in each {winspace} second window",
952
+ ),
953
+ (
954
+ windowedfilteredregressderivratios,
955
+ "windowedfilteredregressderivratios",
956
+ "info",
957
+ None,
958
+ f"Filtered derivative ratios in each {winspace} second window",
959
+ ),
960
+ (
961
+ windowedregressderivratios,
962
+ "windowedregressderivratios",
963
+ "info",
964
+ None,
965
+ f"Raw derivative ratios in each {winspace} second window",
966
+ ),
967
+ ]
968
+ tide_io.savemaplist(
969
+ outputname,
970
+ maplist,
971
+ validvoxels,
972
+ (xsize, ysize, numslices, numwins),
973
+ theheader,
974
+ bidsbasedict,
975
+ debug=args.debug,
976
+ )
977
+ #########################
978
+ # End window processing
979
+ #########################
980
+
981
+ # save outputs
982
+ TimingLGR.info("Starting output save")
983
+ bidsdict = bidsbasedict.copy()
984
+
985
+ # read the runoptions file
986
+ print("writing runoptions")
987
+ therunoptions["delayvar_runtime"] = time.strftime(
988
+ "%a, %d %b %Y %H:%M:%S %Z", time.localtime(time.time())
989
+ )
990
+
991
+ # clean up shared memory
992
+ if usesharedmem:
993
+ tide_util.cleanup_shm(winsLFOfitmean_shm)
994
+ tide_util.cleanup_shm(winrvalue_shm)
995
+ tide_util.cleanup_shm(winr2value_shm)
996
+ tide_util.cleanup_shm(winfitNorm_shm)
997
+ tide_util.cleanup_shm(winitcoeff_shm)
998
+ tide_util.cleanup_shm(winmovingsignal_shm)
999
+ tide_util.cleanup_shm(winlagtc_shm)
1000
+ tide_util.cleanup_shm(winfiltereddata_shm)
1001
+ if doregress:
1002
+ tide_util.cleanup_shm(systemicsLFOfitmean_shm)
1003
+ tide_util.cleanup_shm(systemicrvalue_shm)
1004
+ tide_util.cleanup_shm(systemicr2value_shm)
1005
+ tide_util.cleanup_shm(systemicfitNorm_shm)
1006
+ tide_util.cleanup_shm(systemicitcoeff_shm)
1007
+ tide_util.cleanup_shm(systemicmovingsignal_shm)
1008
+ tide_util.cleanup_shm(systemiclagtc_shm)
1009
+ tide_util.cleanup_shm(systemicfiltereddata_shm)
1010
+ TimingLGR.info("Shared memory cleanup complete")
1011
+
1012
+ # shut down logging
1013
+ TimingLGR.info("Done")
1014
+ logging.shutdown()
1015
+
1016
+ # reformat timing information and delete the unformatted version
1017
+ timingdata, therunoptions["totalretroruntime"] = tide_util.proctiminglogfile(
1018
+ f"{outputname}_retroruntimings.tsv"
1019
+ )
1020
+ tide_io.writevec(
1021
+ timingdata,
1022
+ f"{outputname}_desc-formattedretroruntimings_info.tsv",
1023
+ )
1024
+ Path(f"{outputname}_retroruntimings.tsv").unlink(missing_ok=True)
1025
+
1026
+ # save the modified runoptions file
1027
+ tide_io.writedicttojson(therunoptions, f"{outputname}_desc-runoptions_info.json")
1028
+
1029
+ # shut down the loggers
1030
+ for thelogger in [LGR, ErrorLGR, TimingLGR]:
1031
+ handlers = thelogger.handlers[:]
1032
+ for handler in handlers:
1033
+ thelogger.removeHandler(handler)
1034
+ handler.close()
1035
+
1036
+ # delete the canary file
1037
+ Path(f"{outputname}_DELAYVARISRUNNING.txt").unlink()
1038
+
1039
+ # create the finished file
1040
+ Path(f"{outputname}_DELAYVARDONE.txt").touch()
1041
+
1042
+
1043
+ def process_args(inputargs=None):
1044
+ """
1045
+ Compile arguments for delayvar workflow.
1046
+ """
1047
+ args, argstowrite = pf.setargs(_get_parser, inputargs=inputargs)
1048
+ return args