rapidtide 3.0a14__py3-none-any.whl → 3.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (231) hide show
  1. rapidtide/Colortables.py +1 -1
  2. rapidtide/DerivativeDelay.py +1 -1
  3. rapidtide/OrthoImageItem.py +1 -1
  4. rapidtide/RapidtideDataset.py +3 -1
  5. rapidtide/RegressorRefiner.py +1 -1
  6. rapidtide/calcandfitcorrpairs.py +1 -1
  7. rapidtide/calccoherence.py +1 -1
  8. rapidtide/calcnullsimfunc.py +1 -1
  9. rapidtide/calcsimfunc.py +1 -1
  10. rapidtide/correlate.py +1 -1
  11. rapidtide/data/examples/src/test_findmaxlag.py +1 -1
  12. rapidtide/data/examples/src/testfmri +3 -2
  13. rapidtide/data/examples/src/testfuncs +1 -1
  14. rapidtide/data/examples/src/testhappy +12 -1
  15. rapidtide/data/examples/src/testmodels +33 -0
  16. rapidtide/data/models/model_cnn_w064_l13_fn20_fl08/loss.png +0 -0
  17. rapidtide/data/models/model_cnn_w064_l13_fn20_fl08/loss.txt +1 -0
  18. rapidtide/data/models/model_cnn_w064_l13_fn20_fl08/model.keras +0 -0
  19. rapidtide/data/models/model_cnn_w064_l13_fn20_fl08/model_meta.json +167 -0
  20. rapidtide/data/models/model_revised_tf2/model.keras +0 -0
  21. rapidtide/data/models/{model_serdar → model_revised_tf2}/model_meta.json +1 -1
  22. rapidtide/data/models/model_serdar2_tf2/model.keras +0 -0
  23. rapidtide/data/models/{model_serdar2 → model_serdar2_tf2}/model_meta.json +1 -1
  24. rapidtide/data/models/model_serdar_tf2/model.keras +0 -0
  25. rapidtide/data/models/{model_revised → model_serdar_tf2}/model_meta.json +1 -1
  26. rapidtide/dlfilter.py +325 -241
  27. rapidtide/externaltools.py +1 -1
  28. rapidtide/fMRIData_class.py +1 -1
  29. rapidtide/filter.py +1 -8
  30. rapidtide/fit.py +1 -8
  31. rapidtide/happy_supportfuncs.py +30 -1
  32. rapidtide/helper_classes.py +1 -1
  33. rapidtide/io.py +1 -1
  34. rapidtide/linfitfiltpass.py +1 -1
  35. rapidtide/makelaggedtcs.py +1 -1
  36. rapidtide/maskutil.py +1 -1
  37. rapidtide/miscmath.py +1 -8
  38. rapidtide/multiproc.py +1 -1
  39. rapidtide/patchmatch.py +1 -1
  40. rapidtide/peakeval.py +1 -1
  41. rapidtide/qualitycheck.py +1 -1
  42. rapidtide/refinedelay.py +1 -3
  43. rapidtide/refineregressor.py +1 -1
  44. rapidtide/resample.py +1 -1
  45. rapidtide/scripts/adjustoffset.py +1 -1
  46. rapidtide/scripts/aligntcs.py +1 -1
  47. rapidtide/scripts/applydlfilter.py +1 -1
  48. rapidtide/scripts/atlasaverage.py +1 -1
  49. rapidtide/scripts/atlastool.py +1 -1
  50. rapidtide/scripts/calcicc.py +1 -1
  51. rapidtide/scripts/calctexticc.py +1 -1
  52. rapidtide/scripts/calcttest.py +1 -1
  53. rapidtide/scripts/ccorrica.py +1 -1
  54. rapidtide/scripts/delayvar.py +2 -2
  55. rapidtide/scripts/diffrois.py +1 -1
  56. rapidtide/scripts/endtidalproc.py +1 -1
  57. rapidtide/scripts/fdica.py +1 -1
  58. rapidtide/scripts/filtnifti.py +1 -1
  59. rapidtide/scripts/filttc.py +1 -1
  60. rapidtide/scripts/fingerprint.py +1 -1
  61. rapidtide/scripts/fixtr.py +1 -1
  62. rapidtide/scripts/gmscalc.py +1 -1
  63. rapidtide/scripts/happy.py +1 -1
  64. rapidtide/scripts/happy2std.py +1 -1
  65. rapidtide/scripts/happywarp.py +1 -1
  66. rapidtide/scripts/histnifti.py +1 -1
  67. rapidtide/scripts/histtc.py +1 -1
  68. rapidtide/scripts/linfitfilt.py +1 -1
  69. rapidtide/scripts/localflow.py +1 -1
  70. rapidtide/scripts/mergequality.py +1 -1
  71. rapidtide/scripts/pairproc.py +1 -1
  72. rapidtide/scripts/pairwisemergenifti.py +1 -1
  73. rapidtide/scripts/physiofreq.py +1 -1
  74. rapidtide/scripts/pixelcomp.py +1 -1
  75. rapidtide/scripts/plethquality.py +1 -1
  76. rapidtide/scripts/polyfitim.py +1 -1
  77. rapidtide/scripts/proj2flow.py +1 -1
  78. rapidtide/scripts/rankimage.py +1 -1
  79. rapidtide/scripts/rapidtide.py +1 -1
  80. rapidtide/scripts/rapidtide2std.py +1 -1
  81. rapidtide/scripts/resamplenifti.py +1 -1
  82. rapidtide/scripts/resampletc.py +1 -1
  83. rapidtide/scripts/retrolagtcs.py +1 -1
  84. rapidtide/scripts/retroregress.py +1 -1
  85. rapidtide/scripts/roisummarize.py +1 -1
  86. rapidtide/scripts/runqualitycheck.py +1 -1
  87. rapidtide/scripts/showarbcorr.py +1 -1
  88. rapidtide/scripts/showhist.py +1 -1
  89. rapidtide/scripts/showstxcorr.py +1 -1
  90. rapidtide/scripts/showtc.py +1 -1
  91. rapidtide/scripts/showxcorr_legacy.py +1 -1
  92. rapidtide/scripts/showxcorrx.py +1 -1
  93. rapidtide/scripts/showxy.py +1 -1
  94. rapidtide/scripts/simdata.py +1 -1
  95. rapidtide/scripts/spatialdecomp.py +1 -1
  96. rapidtide/scripts/spatialfit.py +1 -1
  97. rapidtide/scripts/spatialmi.py +1 -1
  98. rapidtide/scripts/spectrogram.py +1 -1
  99. rapidtide/scripts/stupidramtricks.py +1 -1
  100. rapidtide/scripts/synthASL.py +1 -1
  101. rapidtide/scripts/tcfrom2col.py +1 -1
  102. rapidtide/scripts/tcfrom3col.py +1 -1
  103. rapidtide/scripts/temporaldecomp.py +1 -1
  104. rapidtide/scripts/testhrv.py +1 -1
  105. rapidtide/scripts/threeD.py +1 -1
  106. rapidtide/scripts/tidepool.py +1 -1
  107. rapidtide/scripts/variabilityizer.py +1 -1
  108. rapidtide/simfuncfit.py +1 -1
  109. rapidtide/stats.py +1 -8
  110. rapidtide/tests/cleanposttest +1 -1
  111. rapidtide/tests/resethappytargets +1 -1
  112. rapidtide/tests/resetrapidtidetargets +1 -1
  113. rapidtide/tests/resettargets +1 -1
  114. rapidtide/tests/runlocaltest +1 -1
  115. rapidtide/tests/showkernels +1 -1
  116. rapidtide/tests/test_aliasedcorrelate.py +1 -1
  117. rapidtide/tests/test_aligntcs.py +1 -1
  118. rapidtide/tests/test_calcicc.py +1 -1
  119. rapidtide/tests/test_congrid.py +1 -1
  120. rapidtide/tests/test_correlate.py +1 -1
  121. rapidtide/tests/test_corrpass.py +1 -1
  122. rapidtide/tests/test_delayestimation.py +1 -1
  123. rapidtide/tests/test_doresample.py +1 -1
  124. rapidtide/tests/test_fastresampler.py +1 -1
  125. rapidtide/tests/test_findmaxlag.py +1 -1
  126. rapidtide/tests/test_fullrunhappy_v1.py +2 -2
  127. rapidtide/tests/test_fullrunhappy_v2.py +2 -2
  128. rapidtide/tests/test_fullrunhappy_v3.py +2 -2
  129. rapidtide/tests/test_fullrunhappy_v4.py +2 -2
  130. rapidtide/tests/test_fullrunhappy_v5.py +1 -3
  131. rapidtide/tests/test_fullrunrapidtide_v1.py +1 -1
  132. rapidtide/tests/test_fullrunrapidtide_v2.py +3 -1
  133. rapidtide/tests/test_fullrunrapidtide_v3.py +1 -1
  134. rapidtide/tests/test_fullrunrapidtide_v4.py +1 -1
  135. rapidtide/tests/test_fullrunrapidtide_v5.py +1 -1
  136. rapidtide/tests/test_fullrunrapidtide_v6.py +1 -1
  137. rapidtide/tests/test_io.py +1 -1
  138. rapidtide/tests/test_linfitfiltpass.py +1 -1
  139. rapidtide/tests/test_mi.py +1 -1
  140. rapidtide/tests/test_miscmath.py +1 -1
  141. rapidtide/tests/test_motionregress.py +1 -1
  142. rapidtide/tests/test_nullcorr.py +1 -1
  143. rapidtide/tests/test_padvec.py +1 -1
  144. rapidtide/tests/test_phaseanalysis.py +1 -1
  145. rapidtide/tests/test_rapidtideparser.py +1 -1
  146. rapidtide/tests/test_refinedelay.py +1 -2
  147. rapidtide/tests/test_runmisc.py +1 -1
  148. rapidtide/tests/test_sharedmem.py +1 -1
  149. rapidtide/tests/test_simulate.py +1 -1
  150. rapidtide/tests/test_stcorrelate.py +1 -1
  151. rapidtide/tests/test_timeshift.py +1 -1
  152. rapidtide/tests/test_valtoindex.py +1 -1
  153. rapidtide/tests/test_zRapidtideDataset.py +1 -1
  154. rapidtide/tests/utils.py +1 -1
  155. rapidtide/transformerdlfilter.py +2 -4
  156. rapidtide/util.py +37 -14
  157. rapidtide/voxelData.py +278 -0
  158. rapidtide/wiener.py +1 -1
  159. rapidtide/wiener2.py +1 -1
  160. rapidtide/workflows/adjustoffset.py +1 -1
  161. rapidtide/workflows/aligntcs.py +1 -1
  162. rapidtide/workflows/applydlfilter.py +20 -65
  163. rapidtide/workflows/atlasaverage.py +1 -1
  164. rapidtide/workflows/atlastool.py +1 -1
  165. rapidtide/workflows/calctexticc.py +1 -1
  166. rapidtide/workflows/ccorrica.py +1 -1
  167. rapidtide/workflows/cleanregressor.py +243 -0
  168. rapidtide/workflows/delayestimation.py +488 -0
  169. rapidtide/workflows/delayvar.py +1 -1
  170. rapidtide/workflows/diffrois.py +1 -1
  171. rapidtide/workflows/endtidalproc.py +1 -1
  172. rapidtide/workflows/fdica.py +1 -1
  173. rapidtide/workflows/filtnifti.py +1 -1
  174. rapidtide/workflows/filttc.py +1 -1
  175. rapidtide/workflows/fixtr.py +1 -1
  176. rapidtide/workflows/gmscalc.py +1 -1
  177. rapidtide/workflows/happy.py +49 -3
  178. rapidtide/workflows/happy2std.py +1 -1
  179. rapidtide/workflows/happy_parser.py +30 -6
  180. rapidtide/workflows/histnifti.py +1 -1
  181. rapidtide/workflows/histtc.py +1 -1
  182. rapidtide/workflows/linfitfilt.py +1 -1
  183. rapidtide/workflows/localflow.py +1 -1
  184. rapidtide/workflows/mergequality.py +1 -1
  185. rapidtide/workflows/niftidecomp.py +1 -1
  186. rapidtide/workflows/niftistats.py +1 -1
  187. rapidtide/workflows/pairproc.py +1 -1
  188. rapidtide/workflows/pairwisemergenifti.py +1 -1
  189. rapidtide/workflows/parser_funcs.py +1 -1
  190. rapidtide/workflows/physiofreq.py +1 -1
  191. rapidtide/workflows/pixelcomp.py +1 -1
  192. rapidtide/workflows/plethquality.py +1 -1
  193. rapidtide/workflows/polyfitim.py +1 -1
  194. rapidtide/workflows/proj2flow.py +1 -1
  195. rapidtide/workflows/rankimage.py +1 -1
  196. rapidtide/workflows/rapidtide.py +163 -679
  197. rapidtide/workflows/rapidtide2std.py +1 -1
  198. rapidtide/workflows/rapidtide_parser.py +1 -98
  199. rapidtide/workflows/regressfrommaps.py +4 -48
  200. rapidtide/workflows/resamplenifti.py +1 -1
  201. rapidtide/workflows/resampletc.py +1 -1
  202. rapidtide/workflows/retrolagtcs.py +1 -1
  203. rapidtide/workflows/retroregress.py +1 -1
  204. rapidtide/workflows/roisummarize.py +1 -1
  205. rapidtide/workflows/runqualitycheck.py +1 -1
  206. rapidtide/workflows/showarbcorr.py +1 -1
  207. rapidtide/workflows/showhist.py +1 -1
  208. rapidtide/workflows/showstxcorr.py +1 -1
  209. rapidtide/workflows/showtc.py +1 -1
  210. rapidtide/workflows/showxcorrx.py +1 -1
  211. rapidtide/workflows/showxy.py +1 -1
  212. rapidtide/workflows/simdata.py +1 -1
  213. rapidtide/workflows/spatialfit.py +1 -1
  214. rapidtide/workflows/spatialmi.py +1 -1
  215. rapidtide/workflows/spectrogram.py +1 -1
  216. rapidtide/workflows/synthASL.py +1 -1
  217. rapidtide/workflows/tcfrom2col.py +1 -1
  218. rapidtide/workflows/tcfrom3col.py +1 -1
  219. rapidtide/workflows/tidepool.py +2 -1
  220. rapidtide/workflows/utils.py +1 -1
  221. rapidtide/workflows/variabilityizer.py +1 -1
  222. {rapidtide-3.0a14.dist-info → rapidtide-3.0.1.dist-info}/METADATA +7 -9
  223. rapidtide-3.0.1.dist-info/RECORD +353 -0
  224. {rapidtide-3.0a14.dist-info → rapidtide-3.0.1.dist-info}/WHEEL +1 -1
  225. rapidtide/data/models/model_revised/model.h5 +0 -0
  226. rapidtide/data/models/model_serdar/model.h5 +0 -0
  227. rapidtide/data/models/model_serdar2/model.h5 +0 -0
  228. rapidtide-3.0a14.dist-info/RECORD +0 -345
  229. {rapidtide-3.0a14.dist-info → rapidtide-3.0.1.dist-info}/entry_points.txt +0 -0
  230. {rapidtide-3.0a14.dist-info → rapidtide-3.0.1.dist-info}/licenses/LICENSE +0 -0
  231. {rapidtide-3.0a14.dist-info → rapidtide-3.0.1.dist-info}/top_level.txt +0 -0
rapidtide/dlfilter.py CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  #
4
- # Copyright 2016-2024 Blaise Frederick
4
+ # Copyright 2016-2025 Blaise Frederick
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -16,11 +16,6 @@
16
16
  # limitations under the License.
17
17
  #
18
18
  #
19
- """
20
- Created on Sat Jul 28 23:01:07 2018
21
-
22
- @author: neuro
23
- """
24
19
  import glob
25
20
  import logging
26
21
  import os
@@ -41,7 +36,6 @@ with warnings.catch_warnings():
41
36
  else:
42
37
  pyfftwpresent = True
43
38
 
44
-
45
39
  from scipy import fftpack
46
40
  from statsmodels.robust.scale import mad
47
41
 
@@ -49,36 +43,50 @@ if pyfftwpresent:
49
43
  fftpack = pyfftw.interfaces.scipy_fftpack
50
44
  pyfftw.interfaces.cache.enable()
51
45
 
52
- import rapidtide.io as tide_io
53
-
54
- LGR = logging.getLogger("GENERAL")
55
- LGR.debug("setting backend to Agg")
56
- mpl.use("Agg")
57
-
58
- os.environ["TF_USE_LEGACY_KERAS"] = "1"
59
- os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
60
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
61
-
62
- import tensorflow.compat.v1 as tf
63
-
64
- LGR.debug("using tensorflow v2x")
65
- # tf.disable_v2_behavior()
66
- from tensorflow.keras.callbacks import ModelCheckpoint, TerminateOnNaN
67
- from tensorflow.keras.layers import (
46
+ import tensorflow as tf
47
+ import tf_keras.backend as K
48
+ from tf_keras.callbacks import (
49
+ EarlyStopping,
50
+ ModelCheckpoint,
51
+ TensorBoard,
52
+ TerminateOnNaN,
53
+ )
54
+ from tf_keras.layers import (
68
55
  LSTM,
69
56
  Activation,
70
57
  BatchNormalization,
71
58
  Bidirectional,
59
+ Concatenate,
72
60
  Convolution1D,
73
61
  Dense,
74
62
  Dropout,
63
+ Flatten,
75
64
  GlobalMaxPool1D,
65
+ Input,
76
66
  MaxPooling1D,
67
+ Reshape,
77
68
  TimeDistributed,
78
69
  UpSampling1D,
79
70
  )
80
- from tensorflow.keras.models import Sequential, load_model
81
- from tensorflow.keras.optimizers import RMSprop
71
+ from tf_keras.models import Model, Sequential, load_model
72
+ from tf_keras.optimizers.legacy import RMSprop
73
+
74
+ import rapidtide.io as tide_io
75
+
76
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
77
+
78
+ LGR = logging.getLogger("GENERAL")
79
+ LGR.debug("setting backend to Agg")
80
+ mpl.use("Agg")
81
+
82
+ # Disable GPU if desired
83
+ # figure out what sorts of devices we have
84
+ physical_devices = tf.config.list_physical_devices()
85
+ print(physical_devices)
86
+ # try:
87
+ # tf.config.set_visible_devices([], "GPU")
88
+ # except Exception as e:
89
+ # LGR.warning(f"Failed to disable GPU: {e}")
82
90
 
83
91
  LGR.debug(f"tensorflow version: >>>{tf.__version__}<<<")
84
92
 
@@ -108,7 +116,6 @@ class DeepLearningFilter:
108
116
  model = None
109
117
  modelpath = None
110
118
  inputsize = None
111
- usehdf = True
112
119
  infodict = {}
113
120
 
114
121
  def __init__(
@@ -125,10 +132,10 @@ class DeepLearningFilter:
125
132
  usebadpts=False,
126
133
  thesuffix="25.0Hz",
127
134
  modelpath=".",
128
- usehdf=True,
129
135
  thedatadir="/Users/frederic/Documents/MR_data/physioconn/timecourses",
130
136
  inputfrag="abc",
131
137
  targetfrag="xyz",
138
+ corrthresh=0.5,
132
139
  excludebysubject=True,
133
140
  startskip=200,
134
141
  endskip=200,
@@ -151,12 +158,12 @@ class DeepLearningFilter:
151
158
  self.inputsize = 1
152
159
  self.activation = activation
153
160
  self.modelroot = modelroot
154
- self.usehdf = usehdf
155
161
  self.dofft = dofft
156
162
  self.thesuffix = thesuffix
157
163
  self.thedatadir = thedatadir
158
164
  self.modelpath = modelpath
159
165
  LGR.info(f"modeldir from DeepLearningFilter: {self.modelpath}")
166
+ self.corrthresh = corrthresh
160
167
  self.excludethresh = excludethresh
161
168
  self.readlim = readlim
162
169
  self.readskip = readskip
@@ -177,6 +184,7 @@ class DeepLearningFilter:
177
184
  self.infodict["window_size"] = self.window_size
178
185
  self.infodict["usebadpts"] = self.usebadpts
179
186
  self.infodict["dofft"] = self.dofft
187
+ self.infodict["corrthresh"] = self.corrthresh
180
188
  self.infodict["excludethresh"] = self.excludethresh
181
189
  self.infodict["num_pretrain_epochs"] = self.num_pretrain_epochs
182
190
  self.infodict["num_epochs"] = self.num_epochs
@@ -210,6 +218,7 @@ class DeepLearningFilter:
210
218
  targetfrag=self.targetfrag,
211
219
  startskip=self.startskip,
212
220
  endskip=self.endskip,
221
+ corrthresh=self.corrthresh,
213
222
  step=self.step,
214
223
  dofft=self.dofft,
215
224
  usebadpts=self.usebadpts,
@@ -236,6 +245,7 @@ class DeepLearningFilter:
236
245
  targetfrag=self.targetfrag,
237
246
  startskip=self.startskip,
238
247
  endskip=self.endskip,
248
+ corrthresh=self.corrthresh,
239
249
  step=self.step,
240
250
  dofft=self.dofft,
241
251
  usebadpts=self.usebadpts,
@@ -246,11 +256,15 @@ class DeepLearningFilter:
246
256
  countlim=self.countlim,
247
257
  )
248
258
 
259
+ @tf.function
260
+ def predict_model(self, X):
261
+ return self.model(X, training=False)
262
+
249
263
  def evaluate(self):
250
264
  self.lossfilename = os.path.join(self.modelname, "loss.png")
251
265
  LGR.info(f"lossfilename: {self.lossfilename}")
252
266
 
253
- YPred = self.model.predict(self.val_x, verbose=0)
267
+ YPred = self.predict_model(self.val_x).numpy()
254
268
 
255
269
  error = self.val_y - YPred
256
270
  self.pred_error = np.mean(np.square(error))
@@ -307,30 +321,29 @@ class DeepLearningFilter:
307
321
  self.infodict["prediction_error"] = self.pred_error
308
322
  tide_io.writedicttojson(self.infodict, os.path.join(self.modelname, "model_meta.json"))
309
323
 
310
- def savemodel(self, usehdf=True):
311
- if usehdf:
312
- # save the trained model as a single hdf file
313
- self.model.save(os.path.join(self.modelname, "model.h5"))
324
+ def savemodel(self, altname=None):
325
+ if altname is None:
326
+ modelsavename = self.modelname
314
327
  else:
315
- # save the model structure to JSON
316
- model_json = self.model.to_json()
317
- with open(os.path.join(self.modelname, "model.json"), "w") as json_file:
318
- json_file.write(model_json)
319
- # save the weights to hdf
320
- self.model.save_weights(os.path.join(self.modelname, "model_weights.h5"))
321
-
322
- def loadmodel(self, modelname, usehdf=True, verbose=False):
328
+ modelsavename = altname
329
+ LGR.info(f"saving {modelsavename}")
330
+ self.model.save(os.path.join(modelsavename, "model.keras"))
331
+
332
+ def loadmodel(self, modelname, verbose=False):
323
333
  # read in the data
324
334
  LGR.info(f"loading {modelname}")
325
-
326
- if usehdf:
335
+ try:
336
+ # load the keras format model if it exists
337
+ self.model = load_model(os.path.join(self.modelpath, modelname, "model.keras"))
338
+ self.config = self.model.get_config()
339
+ except OSError:
327
340
  # load in the model with weights from hdf
328
- self.model = load_model(os.path.join(self.modelpath, modelname, "model.h5"))
329
- else:
330
- with open(os.path.join(self.modelname, "model.json"), "r") as json_file:
331
- loaded_model_json = json_file.read()
332
- self.model = model_from_json(loaded_model_json)
333
- self.model.load_weights(os.path.join(self.modelname, "model_weights.h5"))
341
+ try:
342
+ self.model = load_model(os.path.join(self.modelpath, modelname, "model.h5"))
343
+ except OSError:
344
+ print(f"Could not load {modelname}")
345
+ sys.exit()
346
+
334
347
  if verbose:
335
348
  self.model.summary()
336
349
 
@@ -338,6 +351,8 @@ class DeepLearningFilter:
338
351
  self.infodict = tide_io.readdictfromjson(
339
352
  os.path.join(self.modelpath, modelname, "model_meta.json")
340
353
  )
354
+ if verbose:
355
+ print(self.infodict)
341
356
  self.window_size = self.infodict["window_size"]
342
357
  self.usebadpts = self.infodict["usebadpts"]
343
358
 
@@ -350,52 +365,60 @@ class DeepLearningFilter:
350
365
  self.getname()
351
366
  self.makenet()
352
367
  self.model.summary()
353
- self.savemodel(usehdf=True)
354
- self.savemodel(usehdf=False)
368
+ self.savemodel()
355
369
  self.initmetadata()
356
370
  self.initialized = True
357
371
  self.trained = False
358
372
 
359
373
  def train(self):
360
374
  self.intermediatemodelpath = os.path.join(
361
- self.modelname, "model_e{epoch:02d}_v{val_loss:.4f}.h5"
375
+ self.modelname, "model_e{epoch:02d}_v{val_loss:.4f}.keras"
362
376
  )
377
+ train_dataset = (
378
+ tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y))
379
+ .shuffle(2048)
380
+ .batch(1024)
381
+ )
382
+ val_dataset = tf.data.Dataset.from_tensor_slices((self.val_x, self.val_y)).batch(1024)
363
383
  if self.usetensorboard:
364
384
  tensorboard = TensorBoard(
365
- log_dir=self.intermediatemodelpath + "logs/{}".format(time())
385
+ log_dir=os.path.join(self.intermediatemodelpath, "logs", str(int(time.time())))
366
386
  )
367
387
  self.model.fit(self.train_x, self.train_y, verbose=1, callbacks=[tensorboard])
368
388
  else:
369
389
  if self.num_pretrain_epochs > 0:
370
390
  LGR.info("pretraining model to reproduce input data")
371
391
  self.history = self.model.fit(
372
- self.train_y,
373
- self.train_y,
374
- batch_size=1024,
392
+ train_dataset,
393
+ validation_data=val_dataset,
375
394
  epochs=self.num_pretrain_epochs,
376
- shuffle=True,
377
395
  verbose=1,
378
396
  callbacks=[
379
397
  TerminateOnNaN(),
380
- ModelCheckpoint(self.intermediatemodelpath),
398
+ ModelCheckpoint(self.intermediatemodelpath, save_format="keras"),
399
+ EarlyStopping(
400
+ monitor="val_loss", # or 'val_mae', etc.
401
+ patience=10, # number of epochs to wait
402
+ restore_best_weights=True,
403
+ ),
381
404
  ],
382
- validation_data=(self.val_y, self.val_y),
383
405
  )
384
406
  self.history = self.model.fit(
385
- self.train_x,
386
- self.train_y,
387
- batch_size=1024,
407
+ train_dataset,
408
+ validation_data=val_dataset,
388
409
  epochs=self.num_epochs,
389
- shuffle=True,
390
410
  verbose=1,
391
411
  callbacks=[
392
412
  TerminateOnNaN(),
393
- ModelCheckpoint(self.intermediatemodelpath),
413
+ ModelCheckpoint(self.intermediatemodelpath, save_format="keras"),
414
+ EarlyStopping(
415
+ monitor="val_loss", # or 'val_mae', etc.
416
+ patience=10, # number of epochs to wait
417
+ restore_best_weights=True,
418
+ ),
394
419
  ],
395
- validation_data=(self.val_x, self.val_y),
396
420
  )
397
- self.savemodel(usehdf=True)
398
- self.savemodel(usehdf=False)
421
+ self.savemodel()
399
422
  self.trained = True
400
423
 
401
424
  def apply(self, inputdata, badpts=None):
@@ -416,7 +439,7 @@ class DeepLearningFilter:
416
439
  for i in range(X.shape[0]):
417
440
  X[i, :, 0] = scaleddata[i : i + self.window_size]
418
441
 
419
- Y = self.model.predict(X, verbose=0)
442
+ Y = self.predict_model(X).numpy()
420
443
  for i in range(X.shape[0]):
421
444
  predicteddata[i : i + self.window_size] += Y[i, :, 0]
422
445
 
@@ -462,12 +485,13 @@ class MultiscaleCNNDLFilter(DeepLearningFilter):
462
485
  [
463
486
  "model",
464
487
  "multiscalecnn",
465
- "w" + str(self.window_size),
466
- "l" + str(self.num_layers),
467
- "fn" + str(self.num_filters),
468
- "fl" + str(self.kernel_size),
469
- "e" + str(self.num_epochs),
488
+ "w" + str(self.window_size).zfill(3),
489
+ "l" + str(self.num_layers).zfill(2),
490
+ "fn" + str(self.num_filters).zfill(2),
491
+ "fl" + str(self.kernel_size).zfill(2),
492
+ "e" + str(self.num_epochs).zfill(3),
470
493
  "t" + str(self.excludethresh),
494
+ "ct" + str(self.corrthresh),
471
495
  "s" + str(self.step),
472
496
  "d" + str(self.dilation_rate),
473
497
  self.activation,
@@ -491,9 +515,9 @@ class MultiscaleCNNDLFilter(DeepLearningFilter):
491
515
  input_seq = Input(shape=(inputlen, self.input_width))
492
516
 
493
517
  # 1-D convolution and global max-pooling
494
- convolved = Conv1D(self.num_filters, kernelsize, padding="same", activation="tanh")(
495
- input_seq
496
- )
518
+ convolved = Convolution1D(
519
+ filters=self.num_filters, kernel_size=kernelsize, padding="same", activation="tanh"
520
+ )(input_seq)
497
521
  processed = GlobalMaxPool1D()(convolved)
498
522
 
499
523
  # dense layer with dropout regularization
@@ -538,12 +562,13 @@ class CNNDLFilter(DeepLearningFilter):
538
562
  [
539
563
  "model",
540
564
  "cnn",
541
- "w" + str(self.window_size),
542
- "l" + str(self.num_layers),
543
- "fn" + str(self.num_filters),
544
- "fl" + str(self.kernel_size),
545
- "e" + str(self.num_epochs),
565
+ "w" + str(self.window_size).zfill(3),
566
+ "l" + str(self.num_layers).zfill(2),
567
+ "fn" + str(self.num_filters).zfill(2),
568
+ "fl" + str(self.kernel_size).zfill(2),
569
+ "e" + str(self.num_epochs).zfill(3),
546
570
  "t" + str(self.excludethresh),
571
+ "ct" + str(self.corrthresh),
547
572
  "s" + str(self.step),
548
573
  "d" + str(self.dilation_rate),
549
574
  self.activation,
@@ -611,10 +636,11 @@ class DenseAutoencoderDLFilter(DeepLearningFilter):
611
636
  [
612
637
  "model",
613
638
  "denseautoencoder",
614
- "w" + str(self.window_size),
615
- "en" + str(self.encoding_dim),
616
- "e" + str(self.num_epochs),
639
+ "w" + str(self.window_size).zfill(3),
640
+ "en" + str(self.encoding_dim).zfill(3),
641
+ "e" + str(self.num_epochs).zfill(3),
617
642
  "t" + str(self.excludethresh),
643
+ "ct" + str(self.corrthresh),
618
644
  "s" + str(self.step),
619
645
  self.activation,
620
646
  ]
@@ -696,12 +722,13 @@ class ConvAutoencoderDLFilter(DeepLearningFilter):
696
722
  [
697
723
  "model",
698
724
  "convautoencoder",
699
- "w" + str(self.window_size),
700
- "en" + str(self.encoding_dim),
701
- "fn" + str(self.num_filters),
702
- "fl" + str(self.kernel_size),
703
- "e" + str(self.num_epochs),
725
+ "w" + str(self.window_size).zfill(3),
726
+ "en" + str(self.encoding_dim).zfill(3),
727
+ "fn" + str(self.num_filters).zfill(2),
728
+ "fl" + str(self.kernel_size).zfill(2),
729
+ "e" + str(self.num_epochs).zfill(3),
704
730
  "t" + str(self.excludethresh),
731
+ "ct" + str(self.corrthresh),
705
732
  "s" + str(self.step),
706
733
  self.activation,
707
734
  ]
@@ -720,103 +747,130 @@ class ConvAutoencoderDLFilter(DeepLearningFilter):
720
747
  pass
721
748
 
722
749
  def makenet(self):
723
- self.model = Sequential()
750
+ input_layer = Input(shape=(self.window_size, self.inputsize))
751
+ x = input_layer
724
752
 
725
- # make the input layer
726
- self.model.add(
727
- Convolution1D(
728
- filters=self.num_filters,
729
- kernel_size=self.kernel_size,
730
- padding="same",
731
- input_shape=(None, self.inputsize),
732
- )
753
+ # Initial conv block
754
+ x = Convolution1D(filters=self.num_filters, kernel_size=self.kernel_size, padding="same")(
755
+ x
733
756
  )
734
- self.model.add(BatchNormalization())
735
- self.model.add(Dropout(rate=self.dropout_rate))
736
- self.model.add(Activation(self.activation))
737
- self.model.add(MaxPooling1D(2, padding="same"))
757
+ x = BatchNormalization()(x)
758
+ x = Dropout(rate=self.dropout_rate)(x)
759
+ x = Activation(self.activation)(x)
760
+ x = MaxPooling1D(pool_size=2, padding="same")(x)
738
761
 
739
- layersize = self.windowsize
762
+ layersize = self.window_size
740
763
  nfilters = self.num_filters
741
- num_encodinglayers = 3
742
- num_decodinglayers = 3
743
- layerprops = [(layersize, nfilters)]
744
- # make the encoding layers
745
- for i in range(num_encodinglayers):
746
- layersize = int(layersize // 2)
747
- nfilters *= 2
748
- LGR.info(f"input layer size: {layersize}, nfilters: {nfilters}")
749
- self.model.add(
750
- Convolution1D(filters=nfilters, kernel_size=self.kernel_size, padding="same")
751
- )
752
- self.model.add(BatchNormalization())
753
- self.model.add(Dropout(rate=self.dropout_rate))
754
- self.model.add(Activation(self.activation))
755
- self.model.add(MaxPooling1D(2, padding="same"))
764
+ filter_list = []
756
765
 
757
- # make the decoding layers
758
- for i in range(num_decodinglayers):
759
- self.model.add(UpSampling1D(2))
766
+ # Encoding path (3 layers)
767
+ for _ in range(3):
768
+ layersize = int(np.ceil(layersize / 2))
769
+ nfilters *= 2
770
+ filter_list.append(nfilters)
771
+ x = Convolution1D(filters=nfilters, kernel_size=self.kernel_size, padding="same")(x)
772
+ x = BatchNormalization()(x)
773
+ x = Dropout(rate=self.dropout_rate)(x)
774
+ x = Activation(self.activation)(x)
775
+ x = MaxPooling1D(pool_size=2, padding="same")(x)
776
+
777
+ # Save shape for reshaping later
778
+ shape_before_flatten = K.int_shape(x)[1:] # (timesteps, channels)
779
+
780
+ # Bottleneck
781
+ x = Flatten()(x)
782
+ x = Dense(self.encoding_dim, activation=self.activation, name="encoded")(x)
783
+ x = Dense(np.prod(shape_before_flatten), activation=self.activation)(x)
784
+ x = Reshape(shape_before_flatten)(x)
785
+
786
+ # Decoding path (mirror)
787
+ for filters in reversed(filter_list):
760
788
  layersize *= 2
761
- nfilters = int(nfilters // 2)
762
- LGR.info(f"input layer size: {layersize}")
763
- self.model.add(
764
- Convolution1D(
765
- filters=self.num_filters,
766
- kernel_size=self.kernel_size,
767
- padding="same",
768
- )
769
- )
770
- self.model.add(BatchNormalization())
771
- self.model.add(Dropout(rate=self.dropout_rate))
772
- self.model.add(Activation(self.activation))
789
+ x = UpSampling1D(size=2)(x)
790
+ x = Convolution1D(filters=filters, kernel_size=self.kernel_size, padding="same")(x)
791
+ x = BatchNormalization()(x)
792
+ x = Dropout(rate=self.dropout_rate)(x)
793
+ x = Activation(self.activation)(x)
794
+
795
+ # Final upsampling (to match initial maxpool)
796
+ x = UpSampling1D(size=2)(x)
797
+ x = Convolution1D(filters=self.inputsize, kernel_size=self.kernel_size, padding="same")(x)
798
+
799
+ output_layer = x
800
+ self.model = Model(inputs=input_layer, outputs=output_layer)
801
+ self.model.compile(optimizer="adam", loss="mse")
773
802
 
774
- # make the intermediate encoding layers
775
- for i in range(1, self.num_layers - 1):
776
- LGR.info(f"input layer size: {layersize}")
777
- self.model.add(
778
- Convolution1D(
779
- filters=self.num_filters,
780
- kernel_size=self.kernel_size,
781
- padding="same",
782
- )
783
- )
784
- self.model.add(BatchNormalization())
785
- self.model.add(Dropout(rate=self.dropout_rate))
786
- self.model.add(Activation(self.activation))
787
- self.model.add(MaxPooling1D(2, padding="same"))
788
- layersize = int(layersize // 2)
789
803
 
790
- # make the encoding layer
791
- LGR.info(f"input layer size: {layersize}")
792
- self.model.add(
793
- Convolution1D(filters=self.num_filters, kernel_size=self.kernel_size, padding="same")
804
+ class CRNNDLFilter(DeepLearningFilter):
805
+ def __init__(
806
+ self, encoding_dim=10, num_filters=10, kernel_size=5, dilation_rate=1, *args, **kwargs
807
+ ):
808
+ self.num_filters = num_filters
809
+ self.kernel_size = kernel_size
810
+ self.dilation_rate = dilation_rate
811
+ self.encoding_dim = encoding_dim
812
+ self.infodict["nettype"] = "cnn"
813
+ self.infodict["num_filters"] = self.num_filters
814
+ self.infodict["kernel_size"] = self.kernel_size
815
+ self.infodict["encoding_dim"] = self.encoding_dim
816
+ super(CRNNDLFilter, self).__init__(*args, **kwargs)
817
+
818
+ def getname(self):
819
+ self.modelname = "_".join(
820
+ [
821
+ "model",
822
+ "crnn",
823
+ "w" + str(self.window_size).zfill(3),
824
+ "en" + str(self.encoding_dim).zfill(3),
825
+ "fn" + str(self.num_filters).zfill(2),
826
+ "fl" + str(self.kernel_size).zfill(2),
827
+ "e" + str(self.num_epochs).zfill(3),
828
+ "t" + str(self.excludethresh),
829
+ "ct" + str(self.corrthresh),
830
+ "s" + str(self.step),
831
+ self.activation,
832
+ ]
794
833
  )
795
- self.model.add(BatchNormalization())
796
- self.model.add(Dropout(rate=self.dropout_rate))
797
- self.model.add(Activation(self.activation))
834
+ if self.usebadpts:
835
+ self.modelname += "_usebadpts"
836
+ if self.excludebysubject:
837
+ self.modelname += "_excludebysubject"
838
+ if self.namesuffix is not None:
839
+ self.modelname += "_" + self.namesuffix
840
+ self.modelpath = os.path.join(self.modelroot, self.modelname)
798
841
 
799
- # make the intermediate decoding layers
800
- for i in range(1, self.num_layers):
801
- self.model.add(UpSampling1D(2))
802
- layersize = layersize * 2
803
- LGR.info(f"input layer size: {layersize}")
804
- self.model.add(
805
- Convolution1D(
806
- filters=self.num_filters,
807
- kernel_size=self.kernel_size,
808
- padding="same",
809
- )
810
- )
811
- self.model.add(BatchNormalization())
812
- self.model.add(Dropout(rate=self.dropout_rate))
813
- self.model.add(Activation(self.activation))
842
+ try:
843
+ os.makedirs(self.modelpath)
844
+ except OSError:
845
+ pass
814
846
 
815
- # make the output layer
816
- LGR.info(f"input layer size: {layersize}")
817
- self.model.add(
818
- Convolution1D(filters=self.inputsize, kernel_size=self.kernel_size, padding="same")
847
+ def makenet(self):
848
+ input_layer = Input(shape=(self.window_size, self.inputsize))
849
+ x = input_layer
850
+
851
+ # Convolutional front-end: feature extraction
852
+ x = Convolution1D(filters=self.num_filters, kernel_size=self.kernel_size, padding="same")(
853
+ x
819
854
  )
855
+ x = BatchNormalization()(x)
856
+ x = Dropout(rate=self.dropout_rate)(x)
857
+ x = Activation(self.activation)(x)
858
+
859
+ x = Convolution1D(
860
+ filters=self.num_filters * 2, kernel_size=self.kernel_size, padding="same"
861
+ )(x)
862
+ x = BatchNormalization()(x)
863
+ x = Dropout(rate=self.dropout_rate)(x)
864
+ x = Activation(self.activation)(x)
865
+
866
+ # Recurrent layer: temporal modeling
867
+ x = Bidirectional(LSTM(units=self.encoding_dim, return_sequences=True))(x)
868
+
869
+ # Output mapping to inputsize channels
870
+ output_layer = Dense(self.inputsize)(x)
871
+
872
+ # Model definition
873
+ self.model = Model(inputs=input_layer, outputs=output_layer)
820
874
  self.model.compile(optimizer="adam", loss="mse")
821
875
 
822
876
 
@@ -832,13 +886,14 @@ class LSTMDLFilter(DeepLearningFilter):
832
886
  [
833
887
  "model",
834
888
  "lstm",
835
- "w" + str(self.window_size),
836
- "l" + str(self.num_layers),
889
+ "w" + str(self.window_size).zfill(3),
890
+ "l" + str(self.num_layers).zfill(2),
837
891
  "nu" + str(self.num_units),
838
892
  "d" + str(self.dropout_rate),
839
893
  "rd" + str(self.dropout_rate),
840
- "e" + str(self.num_epochs),
894
+ "e" + str(self.num_epochs).zfill(3),
841
895
  "t" + str(self.excludethresh),
896
+ "ct" + str(self.corrthresh),
842
897
  "s" + str(self.step),
843
898
  ]
844
899
  )
@@ -890,15 +945,16 @@ class HybridDLFilter(DeepLearningFilter):
890
945
  [
891
946
  "model",
892
947
  "hybrid",
893
- "w" + str(self.window_size),
894
- "l" + str(self.num_layers),
895
- "fn" + str(self.num_filters),
896
- "fl" + str(self.kernel_size),
948
+ "w" + str(self.window_size).zfill(3),
949
+ "l" + str(self.num_layers).zfill(2),
950
+ "fn" + str(self.num_filters).zfill(2),
951
+ "fl" + str(self.kernel_size).zfill(2),
897
952
  "nu" + str(self.num_units),
898
953
  "d" + str(self.dropout_rate),
899
954
  "rd" + str(self.dropout_rate),
900
- "e" + str(self.num_epochs),
955
+ "e" + str(self.num_epochs).zfill(3),
901
956
  "t" + str(self.excludethresh),
957
+ "ct" + str(self.corrthresh),
902
958
  "s" + str(self.step),
903
959
  self.activation,
904
960
  ]
@@ -1042,43 +1098,39 @@ def targettoinput(name, targetfrag="xyz", inputfrag="abc"):
1042
1098
  return name.replace(targetfrag, inputfrag)
1043
1099
 
1044
1100
 
1045
- def getmatchedfiles(searchstring, usebadpts=False, targetfrag="xyz", inputfrag="abc"):
1101
+ def getmatchedtcs(searchstring, usebadpts=False, targetfrag="xyz", inputfrag="abc", debug=False):
1046
1102
  # list all of the target files
1047
1103
  fromfile = sorted(glob.glob(searchstring))
1048
- LGR.debug(f"searchstring: {searchstring} -> {fromfile}")
1104
+ if debug:
1105
+ print(f"searchstring: {searchstring} -> {fromfile}")
1049
1106
 
1050
- # make sure all files exist
1107
+ # make sure all timecourses exist
1108
+ # we need cardiacfromfmri_25.0Hz as x, normpleth as y, and perhaps badpts
1051
1109
  matchedfilelist = []
1052
1110
  for targetname in fromfile:
1053
- if os.path.isfile(targettoinput(targetname, targetfrag=targetfrag, inputfrag=inputfrag)):
1054
- if usebadpts:
1055
- if os.path.isfile(
1056
- tobadpts(targetname.replace("alignedpleth", "pleth"))
1057
- ) and os.path.isfile(
1058
- tobadpts(
1059
- targettoinput(
1060
- targetname,
1061
- targetfrag=targetfrag,
1062
- inputfrag=inputfrag,
1063
- )
1064
- )
1065
- ):
1066
- matchedfilelist.append(targetname)
1067
- LGR.debug(matchedfilelist[-1])
1068
- else:
1069
- matchedfilelist.append(targetname)
1070
- LGR.debug(matchedfilelist[-1])
1071
- if usebadpts:
1072
- LGR.info(f"{len(matchedfilelist)} runs pass all 4 files present check")
1073
- else:
1074
- LGR.info(f"{len(matchedfilelist)} runs pass both files present check")
1111
+ infofile = targetname.replace("_desc-stdrescardfromfmri_timeseries", "_info")
1112
+ if os.path.isfile(infofile):
1113
+ matchedfilelist.append(targetname)
1114
+ print(f"{targetname} is complete")
1115
+ LGR.debug(matchedfilelist[-1])
1116
+ else:
1117
+ print(f"{targetname} is incomplete")
1118
+ print(f"found {len(matchedfilelist)} matched files")
1075
1119
 
1076
1120
  # find out how long the files are
1077
- tempy = np.loadtxt(matchedfilelist[0])
1078
- tempx = np.loadtxt(
1079
- targettoinput(matchedfilelist[0], targetfrag=targetfrag, inputfrag=inputfrag)
1121
+ (
1122
+ samplerate,
1123
+ starttime,
1124
+ columns,
1125
+ inputarray,
1126
+ compression,
1127
+ columnsource,
1128
+ ) = tide_io.readbidstsv(
1129
+ matchedfilelist[0],
1130
+ colspec="cardiacfromfmri_25.0Hz,normpleth",
1080
1131
  )
1081
- tclen = np.min([tempx.shape[0], tempy.shape[0]])
1132
+ print(f"{inputarray.shape=}")
1133
+ tclen = inputarray.shape[1]
1082
1134
  LGR.info(f"tclen set to {tclen}")
1083
1135
  return matchedfilelist, tclen
1084
1136
 
@@ -1091,8 +1143,10 @@ def readindata(
1091
1143
  usebadpts=False,
1092
1144
  startskip=0,
1093
1145
  endskip=0,
1146
+ corrthresh=0.5,
1094
1147
  readlim=None,
1095
1148
  readskip=None,
1149
+ debug=False,
1096
1150
  ):
1097
1151
  LGR.info(
1098
1152
  "readindata called with usebadpts, startskip, endskip, readlim, readskip, targetfrag, inputfrag = "
@@ -1114,20 +1168,36 @@ def readindata(
1114
1168
  # now read the data in
1115
1169
  count = 0
1116
1170
  LGR.info("checking data")
1171
+ lowcorrfiles = []
1117
1172
  nanfiles = []
1118
1173
  shortfiles = []
1119
1174
  strangemagfiles = []
1120
1175
  for i in range(readskip, readskip + s):
1176
+ lowcorrfound = False
1121
1177
  nanfound = False
1122
1178
  LGR.info(f"processing {matchedfilelist[i]}")
1123
- tempy = np.loadtxt(matchedfilelist[i])
1124
- tempx = np.loadtxt(
1125
- targettoinput(
1126
- matchedfilelist[i],
1127
- targetfrag=targetfrag,
1128
- inputfrag=inputfrag,
1129
- )
1179
+
1180
+ # read the info dict first
1181
+ infodict = tide_io.readdictfromjson(
1182
+ matchedfilelist[i].replace("_desc-stdrescardfromfmri_timeseries", "_info")
1183
+ )
1184
+ if infodict["corrcoeff_raw2pleth"] < corrthresh:
1185
+ lowcorrfound = True
1186
+ lowcorrfiles.append(matchedfilelist[i])
1187
+ (
1188
+ samplerate,
1189
+ starttime,
1190
+ columns,
1191
+ inputarray,
1192
+ compression,
1193
+ columnsource,
1194
+ ) = tide_io.readbidstsv(
1195
+ matchedfilelist[i],
1196
+ colspec="cardiacfromfmri_25.0Hz,normpleth",
1130
1197
  )
1198
+ tempy = inputarray[1, :]
1199
+ tempx = inputarray[0, :]
1200
+
1131
1201
  if np.any(np.isnan(tempy)):
1132
1202
  LGR.info(f"NaN found in file {matchedfilelist[i]} - discarding")
1133
1203
  nanfound = True
@@ -1141,23 +1211,23 @@ def readindata(
1141
1211
  nanfiles.append(nan_fname)
1142
1212
  strangefound = False
1143
1213
  if not (0.5 < np.std(tempx) < 20.0):
1144
- strange_fname = targettoinput(
1145
- matchedfilelist[i], targetfrag=targetfrag, inputfrag=inputfrag
1214
+ strange_fname = matchedfilelist[i]
1215
+ LGR.info(
1216
+ f"file {strange_fname} has an extreme cardiacfromfmri standard deviation - discarding"
1146
1217
  )
1147
- LGR.info(f"file {strange_fname} has an extreme standard deviation - discarding")
1148
1218
  strangefound = True
1149
1219
  strangemagfiles.append(strange_fname)
1150
1220
  if not (0.5 < np.std(tempy) < 20.0):
1151
- LGR.info(f"file {matchedfilelist[i]} has an extreme standard deviation - discarding")
1221
+ LGR.info(
1222
+ f"file {matchedfilelist[i]} has an extreme normpleth standard deviation - discarding"
1223
+ )
1152
1224
  strangefound = True
1153
1225
  strangemagfiles.append(matchedfilelist[i])
1154
1226
  shortfound = False
1155
1227
  ntempx = tempx.shape[0]
1156
1228
  ntempy = tempy.shape[0]
1157
1229
  if ntempx < tclen:
1158
- short_fname = targettoinput(
1159
- matchedfilelist[i], targetfrag=targetfrag, inputfrag=inputfrag
1160
- )
1230
+ short_fname = matchedfilelist[i]
1161
1231
  LGR.info(f"file {short_fname} is short - discarding")
1162
1232
  shortfound = True
1163
1233
  shortfiles.append(short_fname)
@@ -1171,26 +1241,31 @@ def readindata(
1171
1241
  and (not nanfound)
1172
1242
  and (not shortfound)
1173
1243
  and (not strangefound)
1244
+ and (not lowcorrfound)
1174
1245
  ):
1175
1246
  x1[:tclen, count] = tempx[:tclen]
1176
1247
  y1[:tclen, count] = tempy[:tclen]
1177
1248
  names.append(matchedfilelist[i])
1249
+ if debug:
1250
+ print(f"{matchedfilelist[i]} included:")
1178
1251
  if usebadpts:
1179
- tempbad1 = np.loadtxt(
1180
- tobadpts(matchedfilelist[i].replace("alignedpleth", "pleth"))
1181
- )
1182
- tempbad2 = np.loadtxt(
1183
- tobadpts(
1184
- targettoinput(
1185
- matchedfilelist[i],
1186
- targetfrag=targetfrag,
1187
- inputfrag=inputfrag,
1188
- )
1189
- )
1190
- )
1191
- bad1[:tclen, count] = 1.0 - (1.0 - tempbad1[:tclen]) * (1.0 - tempbad2[:tclen])
1252
+ bad1[:tclen, count] = inputarray[2, :]
1192
1253
  count += 1
1254
+ else:
1255
+ print(f"{matchedfilelist[i]} excluded:")
1256
+ if ntempx < tclen:
1257
+ print("\tx data too short")
1258
+ if ntempy < tclen:
1259
+ print("\ty data too short")
1260
+ print(f"\t{nanfound=}")
1261
+ print(f"\t{shortfound=}")
1262
+ print(f"\t{strangefound=}")
1263
+ print(f"\t{lowcorrfound=}")
1193
1264
  LGR.info(f"{count} runs pass file length check")
1265
+ if len(lowcorrfiles) > 0:
1266
+ LGR.info("files with low raw/pleth correlations:")
1267
+ for thefile in lowcorrfiles:
1268
+ LGR.info(f"\t{thefile}")
1194
1269
  if len(nanfiles) > 0:
1195
1270
  LGR.info("files with NaNs:")
1196
1271
  for thefile in nanfiles:
@@ -1204,6 +1279,7 @@ def readindata(
1204
1279
  for thefile in strangemagfiles:
1205
1280
  LGR.info(f"\t{thefile}")
1206
1281
 
1282
+ print(f"training set contains {count} runs of length {tclen}")
1207
1283
  if usebadpts:
1208
1284
  return (
1209
1285
  x1[startskip:-endskip, :count],
@@ -1228,13 +1304,15 @@ def prep(
1228
1304
  endskip=200,
1229
1305
  excludebysubject=True,
1230
1306
  thesuffix="sliceres",
1231
- thedatadir="/data1/frederic/test/output",
1307
+ thedatadir="/data/frederic/physioconn/output_2025",
1232
1308
  inputfrag="abc",
1233
1309
  targetfrag="xyz",
1310
+ corrthresh=0.5,
1234
1311
  dofft=False,
1235
1312
  readlim=None,
1236
1313
  readskip=None,
1237
1314
  countlim=None,
1315
+ debug=False,
1238
1316
  ):
1239
1317
  """
1240
1318
  prep - reads in training and validation data for 1D filter
@@ -1252,6 +1330,7 @@ def prep(
1252
1330
  thedatadir
1253
1331
  inputfrag
1254
1332
  targetfrag
1333
+ corrthresh
1255
1334
  dofft
1256
1335
  readlim
1257
1336
  readskip
@@ -1263,21 +1342,25 @@ def prep(
1263
1342
 
1264
1343
  """
1265
1344
 
1266
- searchstring = os.path.join(thedatadir, "*_" + targetfrag + "_" + thesuffix + ".txt")
1345
+ searchstring = os.path.join(thedatadir, "*", "*_desc-stdrescardfromfmri_timeseries.json")
1267
1346
 
1268
1347
  # find matched files
1269
- matchedfilelist, tclen = getmatchedfiles(
1348
+ matchedfilelist, tclen = getmatchedtcs(
1270
1349
  searchstring,
1271
1350
  usebadpts=usebadpts,
1272
1351
  targetfrag=targetfrag,
1273
1352
  inputfrag=inputfrag,
1353
+ debug=debug,
1274
1354
  )
1355
+ # print("matchedfilelist", matchedfilelist)
1356
+ print("tclen", tclen)
1275
1357
 
1276
1358
  # read in the data from the matched files
1277
1359
  if usebadpts:
1278
1360
  x, y, names, bad = readindata(
1279
1361
  matchedfilelist,
1280
1362
  tclen,
1363
+ corrthresh=corrthresh,
1281
1364
  targetfrag=targetfrag,
1282
1365
  inputfrag=inputfrag,
1283
1366
  usebadpts=True,
@@ -1290,6 +1373,7 @@ def prep(
1290
1373
  x, y, names = readindata(
1291
1374
  matchedfilelist,
1292
1375
  tclen,
1376
+ corrthresh=corrthresh,
1293
1377
  targetfrag=targetfrag,
1294
1378
  inputfrag=inputfrag,
1295
1379
  startskip=startskip,