rapidtide 3.0.5__py3-none-any.whl → 3.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. rapidtide/RapidtideDataset.py +17 -0
  2. rapidtide/_version.py +3 -3
  3. rapidtide/calccoherence.py +51 -73
  4. rapidtide/calcnullsimfunc.py +65 -111
  5. rapidtide/calcsimfunc.py +73 -91
  6. rapidtide/correlate.py +25 -6
  7. rapidtide/data/examples/src/testatlasaverage +22 -0
  8. rapidtide/data/examples/src/testfmri +16 -1
  9. rapidtide/data/examples/src/testhappy +57 -60
  10. rapidtide/data/examples/src/testsimdata +45 -28
  11. rapidtide/genericmultiproc.py +122 -0
  12. rapidtide/happy_supportfuncs.py +608 -107
  13. rapidtide/linfitfiltpass.py +8 -1
  14. rapidtide/makelaggedtcs.py +49 -78
  15. rapidtide/multiproc.py +5 -17
  16. rapidtide/refineregressor.py +59 -81
  17. rapidtide/resample.py +24 -14
  18. rapidtide/tests/.coveragerc +9 -0
  19. rapidtide/tests/test_congrid.py +68 -79
  20. rapidtide/tests/test_externaltools.py +69 -0
  21. rapidtide/tests/test_fastresampler.py +1 -0
  22. rapidtide/tests/test_fullrunrapidtide_v2.py +1 -0
  23. rapidtide/tests/test_nullcorr.py +2 -5
  24. rapidtide/tests/test_parserfuncs.py +46 -15
  25. rapidtide/tests/test_zRapidtideDataset.py +2 -2
  26. rapidtide/voxelData.py +17 -3
  27. rapidtide/workflows/ccorrica.py +1 -2
  28. rapidtide/workflows/cleanregressor.py +3 -2
  29. rapidtide/workflows/happy.py +62 -3
  30. rapidtide/workflows/happy_parser.py +36 -0
  31. rapidtide/workflows/rapidtide.py +18 -13
  32. rapidtide/workflows/rapidtide_parser.py +8 -1
  33. rapidtide/workflows/regressfrommaps.py +0 -2
  34. rapidtide/workflows/showarbcorr.py +19 -6
  35. rapidtide/workflows/showxcorrx.py +4 -8
  36. rapidtide/workflows/simdata.py +149 -65
  37. {rapidtide-3.0.5.dist-info → rapidtide-3.0.7.dist-info}/METADATA +1 -1
  38. {rapidtide-3.0.5.dist-info → rapidtide-3.0.7.dist-info}/RECORD +42 -43
  39. {rapidtide-3.0.5.dist-info → rapidtide-3.0.7.dist-info}/WHEEL +1 -1
  40. rapidtide/DerivativeDelay.py +0 -209
  41. rapidtide/calcandfitcorrpairs.py +0 -262
  42. rapidtide/transformerdlfilter.py +0 -126
  43. {rapidtide-3.0.5.dist-info → rapidtide-3.0.7.dist-info}/entry_points.txt +0 -0
  44. {rapidtide-3.0.5.dist-info → rapidtide-3.0.7.dist-info}/licenses/LICENSE +0 -0
  45. {rapidtide-3.0.5.dist-info → rapidtide-3.0.7.dist-info}/top_level.txt +0 -0
@@ -1,262 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
- #
4
- # Copyright 2016-2025 Blaise Frederick
5
- #
6
- # Licensed under the Apache License, Version 2.0 (the "License");
7
- # you may not use this file except in compliance with the License.
8
- # You may obtain a copy of the License at
9
- #
10
- # http://www.apache.org/licenses/LICENSE-2.0
11
- #
12
- # Unless required by applicable law or agreed to in writing, software
13
- # distributed under the License is distributed on an "AS IS" BASIS,
14
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
- # See the License for the specific language governing permissions and
16
- # limitations under the License.
17
- #
18
- #
19
- import gc
20
- import logging
21
- import warnings
22
-
23
- import numpy as np
24
- from tqdm import tqdm
25
-
26
- import rapidtide.correlate as tide_corr
27
- import rapidtide.fit as tide_fit
28
- import rapidtide.multiproc as tide_multiproc
29
-
30
- warnings.simplefilter(action="ignore", category=FutureWarning)
31
- LGR = logging.getLogger("GENERAL")
32
-
33
-
34
- def _procOneVoxelCorrelation(
35
- thedata,
36
- index,
37
- neighborindex,
38
- Fs,
39
- dofit=False,
40
- lagmin=-12.5,
41
- lagmax=12.5,
42
- widthmax=100.0,
43
- negsearch=15.0,
44
- possearch=15.0,
45
- padding=0,
46
- debug=False,
47
- ):
48
- tc1 = thedata[index, :]
49
- tc2 = thedata[neighborindex, :]
50
- if np.any(tc1) != 0.0 and np.any(tc2) != 0.0:
51
- thesimfunc = tide_corr.fastcorrelate(
52
- tc1,
53
- tc2,
54
- zeropadding=padding,
55
- usefft=True,
56
- debug=debug,
57
- )
58
- similarityfunclen = len(thesimfunc)
59
- similarityfuncorigin = similarityfunclen // 2 + 1
60
-
61
- negpoints = int(negsearch * Fs)
62
- pospoints = int(possearch * Fs)
63
- trimsimfunc = thesimfunc[
64
- similarityfuncorigin - negpoints : similarityfuncorigin + pospoints
65
- ]
66
- offset = 0.0
67
- trimtimeaxis = (
68
- (
69
- np.arange(0.0, similarityfunclen) * (1.0 / Fs)
70
- - ((similarityfunclen - 1) * (1.0 / Fs)) / 2.0
71
- )
72
- - offset
73
- )[similarityfuncorigin - negpoints : similarityfuncorigin + pospoints]
74
- if dofit:
75
- (
76
- maxindex,
77
- maxtime,
78
- maxcorr,
79
- maxsigma,
80
- maskval,
81
- failreason,
82
- peakstart,
83
- peakend,
84
- ) = tide_fit.simfuncpeakfit(
85
- trimsimfunc,
86
- trimtimeaxis,
87
- useguess=False,
88
- maxguess=0.0,
89
- displayplots=False,
90
- functype="correlation",
91
- peakfittype="gauss",
92
- searchfrac=0.5,
93
- lagmod=1000.0,
94
- enforcethresh=True,
95
- allowhighfitamps=False,
96
- lagmin=lagmin,
97
- lagmax=lagmax,
98
- absmaxsigma=1000.0,
99
- absminsigma=0.25,
100
- hardlimit=True,
101
- bipolar=False,
102
- lthreshval=0.0,
103
- uthreshval=1.0,
104
- zerooutbadfit=True,
105
- debug=False,
106
- )
107
- else:
108
- maxtime = trimtimeaxis[np.argmax(trimsimfunc)]
109
- maxcorr = np.max(trimsimfunc)
110
- maskval = 1
111
- failreason = 0
112
- if debug:
113
- print(f"{maxtime=}")
114
- print(f"{maxcorr=}")
115
- print(f"{maskval=}")
116
- print(f"{negsearch=}")
117
- print(f"{possearch=}")
118
- print(f"{Fs=}")
119
- print(f"{len(trimtimeaxis)=}")
120
- print(trimsimfunc, trimtimeaxis)
121
- return index, neighborindex, maxcorr, maxtime, maskval, failreason
122
- else:
123
- return index, neighborindex, 0.0, 0.0, 0, 0
124
-
125
-
126
- def correlationpass(
127
- fmridata,
128
- referencetc,
129
- theCorrelator,
130
- fmri_x,
131
- os_fmri_x,
132
- lagmininpts,
133
- lagmaxinpts,
134
- corrout,
135
- meanval,
136
- nprocs=1,
137
- alwaysmultiproc=False,
138
- oversampfactor=1,
139
- interptype="univariate",
140
- showprogressbar=True,
141
- chunksize=1000,
142
- rt_floatset=np.float64,
143
- rt_floattype="float64",
144
- ):
145
- """
146
-
147
- Parameters
148
- ----------
149
- fmridata
150
- referencetc - the reference regressor, already oversampled
151
- theCorrelator
152
- fmri_x
153
- os_fmri_x
154
- tr
155
- lagmininpts
156
- lagmaxinpts
157
- corrout
158
- meanval
159
- nprocs
160
- oversampfactor
161
- interptype
162
- showprogressbar
163
- chunksize
164
- rt_floatset
165
- rt_floattype
166
-
167
- Returns
168
- -------
169
-
170
- """
171
- inputshape = np.shape(fmridata)
172
- volumetotal = 0
173
- thetc = np.zeros(np.shape(os_fmri_x), dtype=rt_floattype)
174
- theglobalmaxlist = []
175
- if nprocs > 1 or alwaysmultiproc:
176
- # define the consumer function here so it inherits most of the arguments
177
- def correlation_consumer(inQ, outQ):
178
- while True:
179
- try:
180
- # get a new message
181
- val = inQ.get()
182
-
183
- # this is the 'TERM' signal
184
- if val is None:
185
- break
186
-
187
- # process and send the data
188
- outQ.put(
189
- _procOneVoxelCorrelation(
190
- val,
191
- idx1,
192
- idx2,
193
- Fs,
194
- dofit=False,
195
- lagmin=-12.5,
196
- lagmax=12.5,
197
- widthmax=100.0,
198
- negsearch=15.0,
199
- possearch=15.0,
200
- padding=0,
201
- debug=False,
202
- )
203
- )
204
-
205
- except Exception as e:
206
- print("error!", e)
207
- break
208
-
209
- data_out = tide_multiproc.run_multiproc(
210
- correlation_consumer,
211
- inputshape,
212
- None,
213
- nprocs=nprocs,
214
- showprogressbar=showprogressbar,
215
- chunksize=chunksize,
216
- )
217
-
218
- # unpack the data
219
- volumetotal = 0
220
- for voxel in data_out:
221
- maxcorr[voxel[0], voxel[1]] = voxel[2]
222
- maxtime[voxel[0], voxel[1]] = voxel[3]
223
- maskval[voxel[0], voxel[1]] = voxel[4]
224
- failreason[voxel[0], voxel[1]] = voxel[5]
225
- volumetotal += 1
226
- del data_out
227
- else:
228
- for vox in tqdm(
229
- range(0, inputshape[0]),
230
- desc="Voxel",
231
- disable=(not showprogressbar),
232
- ):
233
- (
234
- dummy,
235
- meanval[vox],
236
- corrout[vox, :],
237
- thecorrscale,
238
- theglobalmax,
239
- ) = _procOneVoxelCorrelation(
240
- vox,
241
- thetc,
242
- theCorrelator,
243
- fmri_x,
244
- fmridata[vox, :],
245
- os_fmri_x,
246
- oversampfactor=oversampfactor,
247
- interptype=interptype,
248
- rt_floatset=rt_floatset,
249
- rt_floattype=rt_floattype,
250
- )
251
- theglobalmaxlist.append(theglobalmax + 0)
252
- volumetotal += 1
253
- LGR.info(f"\nSimilarity function calculated on {volumetotal} voxels")
254
-
255
- # garbage collect
256
- uncollected = gc.collect()
257
- if uncollected != 0:
258
- LGR.info(f"garbage collected - unable to collect {uncollected} objects")
259
- else:
260
- LGR.info("garbage collected")
261
-
262
- return volumetotal, theglobalmaxlist, thecorrscale
@@ -1,126 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
- #
4
- # Copyright 2016-2025 Blaise Frederick
5
- #
6
- # Licensed under the Apache License, Version 2.0 (the "License");
7
- # you may not use this file except in compliance with the License.
8
- # You may obtain a copy of the License at
9
- #
10
- # http://www.apache.org/licenses/LICENSE-2.0
11
- #
12
- # Unless required by applicable law or agreed to in writing, software
13
- # distributed under the License is distributed on an "AS IS" BASIS,
14
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
- # See the License for the specific language governing permissions and
16
- # limitations under the License.
17
- #
18
- #
19
- """This module contains all an alternate filter routine"""
20
- import tensorflow as tf
21
- from tensorflow.keras.layers import (
22
- Dense,
23
- Dropout,
24
- Input,
25
- LayerNormalization,
26
- MultiHeadAttention,
27
- )
28
- from tensorflow.keras.models import Model
29
-
30
-
31
- # Positional Encoding to add temporal information to the sequence
32
- class PositionalEncoding(tf.keras.layers.Layer):
33
- def __init__(self, sequence_len, d_model):
34
- super(PositionalEncoding, self).__init__()
35
- self.pos_encoding = self.positional_encoding(sequence_len, d_model)
36
-
37
- def get_angles(self, pos, i, d_model):
38
- angle_rates = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32))
39
- return pos * angle_rates
40
-
41
- def positional_encoding(self, sequence_len, d_model):
42
- angle_rads = self.get_angles(
43
- tf.range(sequence_len)[:, tf.newaxis], tf.range(d_model)[tf.newaxis, :], d_model
44
- )
45
-
46
- # Apply sin to even indices in the array; cos to odd indices
47
- sines = tf.sin(angle_rads[:, 0::2])
48
- cosines = tf.cos(angle_rads[:, 1::2])
49
-
50
- pos_encoding = tf.concat([sines, cosines], axis=-1)
51
- pos_encoding = pos_encoding[tf.newaxis, ...]
52
- return tf.cast(pos_encoding, tf.float32)
53
-
54
- def call(self, x):
55
- return x + self.pos_encoding[:, : tf.shape(x)[1], :]
56
-
57
-
58
- # Transformer block implementation
59
- class TransformerBlock(tf.keras.layers.Layer):
60
- def __init__(self, d_model, num_heads, ff_dim, rate=0.1):
61
- super(TransformerBlock, self).__init__()
62
- self.att = MultiHeadAttention(num_heads=num_heads, key_dim=d_model)
63
- self.ffn = tf.keras.Sequential(
64
- [
65
- Dense(ff_dim, activation="relu"), # Feed Forward Network
66
- Dense(d_model),
67
- ]
68
- )
69
- self.layernorm1 = LayerNormalization(epsilon=1e-6)
70
- self.layernorm2 = LayerNormalization(epsilon=1e-6)
71
- self.dropout1 = Dropout(rate)
72
- self.dropout2 = Dropout(rate)
73
-
74
- def call(self, x, training):
75
- attn_output = self.att(x, x) # Self-attention
76
- attn_output = self.dropout1(attn_output, training=training)
77
- out1 = self.layernorm1(x + attn_output) # Add & Norm
78
-
79
- ffn_output = self.ffn(out1) # Feed forward
80
- ffn_output = self.dropout2(ffn_output, training=training)
81
- return self.layernorm2(out1 + ffn_output) # Add & Norm
82
-
83
-
84
- # Building the Transformer-based Cardiac Waveform Filter model
85
- def build_transformer_model(
86
- input_shape, d_model=128, num_heads=4, ff_dim=512, num_layers=3, dropout_rate=0.1
87
- ):
88
- inputs = Input(shape=input_shape)
89
-
90
- # Positional Encoding
91
- x = PositionalEncoding(input_shape[0], d_model)(inputs)
92
-
93
- # Stack Transformer layers
94
- for _ in range(num_layers):
95
- x = TransformerBlock(d_model, num_heads, ff_dim, rate=dropout_rate)(x)
96
-
97
- # Output layer for waveform filtering (regression to the original signal)
98
- outputs = Dense(1, activation="linear")(x)
99
-
100
- model = Model(inputs=inputs, outputs=outputs)
101
- return model
102
-
103
-
104
- # Compile and train the model
105
- def compile_and_train_model(
106
- model, train_data, train_labels, val_data, val_labels, epochs=50, batch_size=32
107
- ):
108
- model.compile(optimizer="adam", loss="mse", metrics=["mae"])
109
- history = model.fit(
110
- train_data,
111
- train_labels,
112
- validation_data=(val_data, val_labels),
113
- epochs=epochs,
114
- batch_size=batch_size,
115
- )
116
- return history
117
-
118
-
119
- # Example usage:
120
- # Assuming train_data and train_labels are the time-series data of cardiac signals
121
- input_shape = (1000, 1) # For example, 1000 time points with 1 feature (cardiac waveform)
122
- transformer_model = build_transformer_model(input_shape)
123
- transformer_model.summary()
124
-
125
- # After this, you would use your train_data and val_data (cardiac waveforms) to train the model.
126
- # Example: compile_and_train_model(transformer_model, train_data, train_labels, val_data, val_labels)