modulo-vki 2.0.6__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,342 +1,359 @@
1
- import numpy as np
2
- from scipy import signal
3
- from scipy.sparse.linalg import svds, eigsh
4
- from sklearn.decomposition import TruncatedSVD
5
- from scipy.linalg import eigh
6
- from sklearn.utils.extmath import randomized_svd
7
-
8
-
9
- def Bound_EXT(S, Ex, boundaries):
10
- """
11
- This function computes the extension of a signal for
12
- filtering purposes
13
-
14
- :param S: The Input signal
15
- :param Nf: The Size of the Kernel (must be an odd number!)
16
- :param boundaries: The type of extension:
17
- ‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
18
- ‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
19
- ‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
20
- ‘extrap’ Extrapolation The input is extended via linear extrapolation.
21
-
22
-
23
- """
24
- # We first perform a zero padding
25
- # Ex=int((Nf-1)/2) # Extension on each size
26
- size_Ext = 2 * Ex + len(S) # Compute the size of the extended signal
27
- S_extend = np.zeros((int(size_Ext))) # Initialize extended signal
28
- S_extend[Ex:int((size_Ext - Ex))] = S; # Assign the Signal on the zeroes
29
-
30
- if boundaries == "reflect":
31
- LEFT = np.flip(S[0:Ex]) # Prepare the reflection on the left
32
- RIGHT = np.flip(S[len(S) - Ex:len(S)]) # Prepare the reflectino on the right
33
- S_extend[0:Ex] = LEFT
34
- S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
35
- elif boundaries == "nearest":
36
- LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
37
- RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
38
- S_extend[0:Ex] = LEFT
39
- S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
40
- elif boundaries == "wrap":
41
- LEFT = S[len(S) - Ex:len(S)] # Wrap on the Left
42
- RIGHT = S[0:Ex] # Wrap on the Right
43
- S_extend[0:Ex] = LEFT
44
- S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
45
- elif boundaries == "extrap":
46
- LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
47
- RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
48
- S_extend[0:Ex] = LEFT
49
- S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
50
- print('Not active yet, replaced by nearest')
51
- return S_extend
52
-
53
-
54
- def conv_m(K, h, Ex, boundaries):
55
- """
56
- This function computes the 2D convolution by perfoming 2 sets of 1D convolutions.
57
- Moreover, we here use the fft with an appropriate extension
58
- that avoids the periodicity condition.
59
-
60
- :param K: Matrix to be filtered
61
- :param h: The 1D Kernel of the filter
62
- :param boundaries: The type of extension:
63
- ‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
64
- ‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
65
- ‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
66
- ‘extrap’ Extrapolation The input is extended via linear extrapolation.
67
- """
68
- # Filter along the raws
69
- n_t = np.shape(K)[0]
70
- # Ex=int(n_t/2)
71
- K_F1 = np.zeros(np.shape(K))
72
- K_F2 = np.zeros(np.shape(K))
73
- # K_F=np.zeros(np.shape(K))
74
- for k in range(0, n_t):
75
- S = K[:, k]
76
- S_Ext = Bound_EXT(S, Ex, boundaries)
77
- S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
78
- # Compute where to take the signal
79
- Ex1 = int((len(S_Filt) - len(S)) / 2)
80
- # K_F1[k,:]=S_Filt[Ex:(len(S_Filt)-Ex)]
81
- K_F1[:, k] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
82
- for k in range(0, n_t):
83
- S = K_F1[k, :]
84
- S_Ext = Bound_EXT(S, Ex, boundaries)
85
- S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
86
- # Compute where to take the signal
87
- Ex1 = int((len(S_Filt) - len(S)) / 2)
88
- # K_F2[:,k]=S_Filt[Ex:(len(S_Filt)-Ex)]
89
- K_F2[k, :] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
90
- # K_F=K_F1+K_F2
91
- return K_F2
92
-
93
-
94
- def _loop_gemm(a, b, c=None, chunksize=100):
95
- size_i = a.shape[0]
96
- size_zip = a.shape[1]
97
-
98
- size_j = b.shape[1]
99
- size_alt_zip = b.shape[0]
100
-
101
- if size_zip != size_alt_zip:
102
- ValueError("Loop GEMM zip index is not of the same size for both tensors")
103
-
104
- if c is None:
105
- c = np.zeros((size_i, size_j))
106
-
107
- istart = 0
108
- for i in range(int(np.ceil(size_i / float(chunksize)))):
109
-
110
- left_slice = slice(istart, istart + chunksize)
111
- left_view = a[left_slice]
112
-
113
- jstart = 0
114
- for j in range(int(np.ceil(size_j / float(chunksize)))):
115
- right_slice = slice(jstart, jstart + chunksize)
116
- right_view = b[:, right_slice]
117
-
118
- c[left_slice, right_slice] = np.dot(left_view, right_view)
119
- jstart += chunksize
120
-
121
- istart += chunksize
122
-
123
- return c
124
-
125
-
126
- def svds_RND(K, R_K):
127
- """
128
- Quick and dirty implementation of randomized SVD
129
- for computing eigenvalues of K. We follow same input/output structure
130
- as for the svds in scipy
131
- """
132
- svd = TruncatedSVD(R_K)
133
- svd.fit_transform(K)
134
- Psi_P = svd.components_.T
135
- Lambda_P = svd.singular_values_
136
- return Psi_P, Lambda_P
137
-
138
-
139
- def overlap(array, len_chunk, len_sep=1):
140
- """
141
- Returns a matrix of all full overlapping chunks of the input `array`, with a chunk
142
- length of `len_chunk` and a separation length of `len_sep`. Begins with the first full
143
- chunk in the array.
144
- This function is taken from https://stackoverflow.com/questions/38163366/split-list-into-separate-but-overlapping-chunks
145
- it is designed to split an array with certain overlaps
146
-
147
- :param array:
148
- :param len_chunk:
149
- :param len_sep:
150
- :return array_matrix:
151
- """
152
-
153
- n_arrays = int(np.ceil((array.size - len_chunk + 1) / len_sep))
154
-
155
- array_matrix = np.tile(array, n_arrays).reshape(n_arrays, -1)
156
-
157
- columns = np.array(((len_sep * np.arange(0, n_arrays)).reshape(n_arrays, -1) + np.tile(
158
- np.arange(0, len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
159
-
160
- rows = np.array((np.arange(n_arrays).reshape(n_arrays, -1) + np.tile(
161
- np.zeros(len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
162
-
163
- return array_matrix[rows, columns]
164
-
165
-
166
- # def switch_svds_K(A, n_modes, svd_solver):
167
- # """
168
- # Utility function to switch between different svd solvers
169
- # for the diagonalization of K. Being K symmetric and positive definite,
170
- # Its eigenvalue decomposition is equivalent to an SVD.
171
- # Thus we are using SVD solvers as eig solvers here.
172
- # The options are the same used for switch_svds (which goes for the SVD of D)
173
-
174
- # --------------------------------------------------------------------------------------------------------------------
175
- # Parameters:
176
- # -----------
177
- # :param A: np.array,
178
- # Array of which compute the SVD
179
- # :param n_modes: int,
180
- # Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
181
- # computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
182
- # :param svd_solver: str,
183
- # can be:
184
- # 'svd_numpy'.
185
- # This uses np.linalg.svd.
186
- # It is the most accurate but the slowest and most expensive.
187
- # It computes all the modes.
188
-
189
- # 'svd_sklearn_truncated'
190
- # This uses the TruncatedSVD from scikitlearn. This uses either
191
- # svds from scipy or randomized from sklearn depending on the size
192
- # of the matrix. These are the two remaining options.
193
- # To the merit of chosing this is to let sklearn take the decision as to
194
- # what to use.
195
-
196
- # 'svd_scipy_sparse'
197
- # This uses the svds from scipy.
198
-
199
- # 'svd_sklearn_randomized',
200
- # This uses the randomized from sklearn.
201
- # Returns
202
- # --------
203
- # :return Psi_P, np.array (N_S x n_modes)
204
- # :return Sigma_P, np.array (n_modes)
205
- # """
206
- # if svd_solver.lower() == 'svd_sklearn_truncated':
207
- # svd = TruncatedSVD(n_modes)
208
- # svd.fit_transform(A)
209
- # Psi_P = svd.components_.T
210
- # Lambda_P = svd.singular_values_
211
- # Sigma_P = np.sqrt(Lambda_P)
212
- # elif svd_solver.lower() == 'svd_numpy':
213
- # Psi_P, Lambda_P, _ = np.linalg.svd(A)
214
- # Sigma_P = np.sqrt(Lambda_P)
215
- # Psi_P = Psi_P[:, :n_modes]
216
- # Sigma_P = Sigma_P[:n_modes]
217
- # elif svd_solver.lower() == 'svd_sklearn_randomized':
218
- # Psi_P, Lambda_P = randomized_svd(A, n_modes)
219
- # Sigma_P = np.sqrt(Lambda_P)
220
- # elif svd_solver.lower() == 'svd_scipy_sparse':
221
- # Psi_P, Lambda_P, _ = svds(A, k=n_modes)
222
- # Sigma_P = np.sqrt(Lambda_P)
223
-
224
- # return Psi_P, Sigma_P
225
-
226
-
227
- def switch_svds(A, n_modes, svd_solver='svd_sklearn_truncated'):
228
- """
229
- Utility function to switch between different svd solvers
230
- for the SVD of the snapshot matrix. This is a true SVD solver.
231
-
232
- --------------------------------------------------------------------------------------------------------------------
233
- Parameters:
234
- -----------
235
- :param A: np.array,
236
- Array of which compute the SVD
237
- :param n_modes: int,
238
- Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
239
- computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
240
- :param svd_solver: str,
241
- can be:
242
- 'svd_numpy'.
243
- This uses np.linalg.svd.
244
- It is the most accurate but the slowest and most expensive.
245
- It computes all the modes.
246
-
247
- 'svd_sklearn_truncated'
248
- This uses the TruncatedSVD from scikitlearn. This uses either
249
- svds from scipy or randomized from sklearn depending on the size
250
- of the matrix. These are the two remaining options.
251
- The merit of chosing this is to let sklearn take the decision as to
252
- what to use. One prefers to force the usage of any of those with the other two
253
- options
254
-
255
- 'svd_scipy_sparse'
256
- This uses the svds from scipy.
257
-
258
- 'svd_sklearn_randomized',
259
- This uses the randomized from sklearn.
260
-
261
- Returns
262
- --------
263
- :return Psi_P, np.array (N_S x n_modes)
264
- :return Sigma_P, np.array (n_modes)
265
- """
266
- if svd_solver.lower() == 'svd_sklearn_truncated':
267
- svd = TruncatedSVD(n_modes)
268
- X_transformed = svd.fit_transform(A)
269
- Phi_P = X_transformed / svd.singular_values_
270
- Psi_P = svd.components_.T
271
- Sigma_P = svd.singular_values_
272
- elif svd_solver.lower() == 'svd_numpy':
273
- Phi_P, Sigma_P, Psi_P = np.linalg.svd(A)
274
- Phi_P = Phi_P[:, 0:n_modes]
275
- Psi_P = Psi_P.T[:, 0:n_modes]
276
- Sigma_P = Sigma_P[0:n_modes]
277
- elif svd_solver.lower() == 'svd_sklearn_randomized':
278
- Phi_P, Sigma_P, Psi_P = randomized_svd(A, n_modes)
279
- Psi_P = Psi_P.T
280
- elif svd_solver.lower() == 'svd_scipy_sparse':
281
- Phi_P, Sigma_P, Psi_P = svds(A, k=n_modes)
282
- Psi_P = Psi_P.T
283
- # It turns out that this does not rank them in decreasing order.
284
- # Hence we do it manually:
285
- idx = np.flip(np.argsort(Sigma_P))
286
- Sigma_P = Sigma_P[idx]
287
- Phi_P = Phi_P[:, idx]
288
- Psi_P = Psi_P[:, idx]
289
-
290
- return Phi_P, Psi_P, Sigma_P
291
-
292
-
293
- def switch_eigs(A, n_modes, eig_solver):
294
- """
295
- Utility function to switch between different eig solvers in a consistent way across different
296
- methods of the package.
297
- --------------------------------------------------------------------------------------------------------------------
298
- Parameters:
299
- -----------
300
- :param A: np.array,
301
- Array of which compute the eigenvalues
302
- :param n_modes: int,
303
- Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
304
- computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
305
- :param eig_solver: str,
306
- can be:
307
- 'svd_sklearn_randomized',
308
- This uses svd truncated approach, which picks either randomized svd or scipy svds.
309
- By default, it should pick mostly the first.
310
-
311
- 'eigsh' from scipy sparse. This is a compromise between the previous and the following.
312
-
313
- 'eigh' from scipy lin alg. This is the most precise, although a bit more expensive
314
-
315
- Returns
316
- --------
317
- :return Psi_P, np.array (N_S x n_modes)
318
- :return Sigma_P, np.array (n_modes)
319
- """
320
- if eig_solver.lower() == 'svd_sklearn_randomized':
321
- Psi_P, Lambda_P = svds_RND(A, n_modes)
322
- elif eig_solver.lower() == 'eigh':
323
- n = np.shape(A)[0]
324
- Lambda_P, Psi_P = eigh(A, subset_by_index=[n - n_modes, n - 1])
325
- # It turns out that this does not rank them in decreasing order.
326
- # Hence we do it manually:
327
- idx = np.flip(np.argsort(Lambda_P))
328
- Lambda_P = Lambda_P[idx]
329
- Psi_P = Psi_P[:, idx]
330
- elif eig_solver.lower() == 'eigsh':
331
- Lambda_P, Psi_P = eigsh(A, k=n_modes)
332
- # It turns out that this does not rank them in decreasing order.
333
- # Hence we do it manually:
334
- idx = np.flip(np.argsort(Lambda_P))
335
- Lambda_P = Lambda_P[idx]
336
- Psi_P = Psi_P[:, idx]
337
- else:
338
- raise ValueError('eig_solver must be svd_sklearn_randomized, eigh or eigsh')
339
-
340
- Sigma_P = np.sqrt(Lambda_P)
341
-
1
+ import numpy as np
2
+ from scipy import signal
3
+ from scipy.sparse.linalg import svds, eigsh
4
+ from sklearn.decomposition import TruncatedSVD
5
+ from scipy.linalg import eigh
6
+ from sklearn.utils.extmath import randomized_svd
7
+
8
+
9
+ def Bound_EXT(S, Ex, boundaries):
10
+ """
11
+ This function computes the extension of a signal for
12
+ filtering purposes
13
+
14
+ :param S: The Input signal
15
+ :param Nf: The Size of the Kernel (must be an odd number!)
16
+ :param boundaries: The type of extension:
17
+ ‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
18
+ ‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
19
+ ‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
20
+ ‘extrap’ Extrapolation The input is extended via linear extrapolation.
21
+
22
+
23
+ """
24
+ # We first perform a zero padding
25
+ # Ex=int((Nf-1)/2) # Extension on each size
26
+ size_Ext = 2 * Ex + len(S) # Compute the size of the extended signal
27
+ S_extend = np.zeros((int(size_Ext))) # Initialize extended signal
28
+ S_extend[Ex:int((size_Ext - Ex))] = S # Assign the Signal on the zeroes
29
+
30
+ if boundaries == "reflect":
31
+ LEFT = np.flip(S[0:Ex]) # Prepare the reflection on the left
32
+ RIGHT = np.flip(S[len(S) - Ex:len(S)]) # Prepare the reflection on the right
33
+ S_extend[0:Ex] = LEFT
34
+ S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
35
+ elif boundaries == "nearest":
36
+ LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
37
+ RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
38
+ S_extend[0:Ex] = LEFT
39
+ S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
40
+ elif boundaries == "wrap":
41
+ LEFT = S[len(S) - Ex:len(S)] # Wrap on the Left
42
+ RIGHT = S[0:Ex] # Wrap on the Right
43
+ S_extend[0:Ex] = LEFT
44
+ S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
45
+ elif boundaries == "extrap":
46
+ LEFT = np.ones(Ex) * S[0] # Prepare the constant on the left
47
+ RIGHT = np.ones(Ex) * S[len(S) - 1] # Prepare the constant on the Right
48
+ S_extend[0:Ex] = LEFT
49
+ S_extend[len(S_extend) - Ex:len(S_extend)] = RIGHT
50
+ print('Not active yet, replaced by nearest')
51
+ return S_extend
52
+
53
+
54
+ def conv_m(K, h, Ex, boundaries):
55
+ """
56
+ This function computes the 2D convolution by perfoming 2 sets of 1D convolutions.
57
+ Moreover, we here use the fft with an appropriate extension
58
+ that avoids the periodicity condition.
59
+
60
+ :param K: Matrix to be filtered
61
+ :param h: The 1D Kernel of the filter
62
+ :param boundaries: The type of extension:
63
+ ‘reflect’ (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel.
64
+ ‘nearest’ (a a a a | a b c d | d d d d) The input is extended by replicating the last pixel.
65
+ ‘wrap’ (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge.
66
+ ‘extrap’ Extrapolation The input is extended via linear extrapolation.
67
+ """
68
+ # Filter along the raws
69
+ n_t = np.shape(K)[0]
70
+ # Ex=int(n_t/2)
71
+ K_F1 = np.zeros(np.shape(K))
72
+ K_F2 = np.zeros(np.shape(K))
73
+ # K_F=np.zeros(np.shape(K))
74
+ for k in range(0, n_t):
75
+ S = K[:, k]
76
+ S_Ext = Bound_EXT(S, Ex, boundaries)
77
+ S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
78
+ # Compute where to take the signal
79
+ Ex1 = int((len(S_Filt) - len(S)) / 2)
80
+ # K_F1[k,:]=S_Filt[Ex:(len(S_Filt)-Ex)]
81
+ K_F1[:, k] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
82
+ for k in range(0, n_t):
83
+ S = K_F1[k, :]
84
+ S_Ext = Bound_EXT(S, Ex, boundaries)
85
+ S_Filt = signal.fftconvolve(S_Ext, h, mode='valid')
86
+ # Compute where to take the signal
87
+ Ex1 = int((len(S_Filt) - len(S)) / 2)
88
+ # K_F2[:,k]=S_Filt[Ex:(len(S_Filt)-Ex)]
89
+ K_F2[k, :] = S_Filt[Ex1:(len(S_Filt) - Ex1)]
90
+ # K_F=K_F1+K_F2
91
+ return K_F2
92
+
93
+ def conv_m_2D(K, h, Ex, boundaries):
94
+
95
+ # Extended K
96
+ K_ext = np.pad(K, Ex, mode=boundaries)
97
+
98
+ # Filtering matrix
99
+ h_mat = np.outer(np.atleast_2d(h).T, np.atleast_2d(h))
100
+
101
+ # Filtering
102
+ K_filt = signal.fftconvolve(K_ext, h_mat, mode='valid')
103
+
104
+ # Interior K
105
+ Ex1 = int((len(K_filt) - len(K)) / 2)
106
+ K_F2 = K_filt[Ex1:(len(K_filt) - Ex1), Ex1:(len(K_filt) - Ex1)]
107
+
108
+ return K_F2
109
+
110
+
111
+ def _loop_gemm(a, b, c=None, chunksize=100):
112
+ size_i = a.shape[0]
113
+ size_zip = a.shape[1]
114
+
115
+ size_j = b.shape[1]
116
+ size_alt_zip = b.shape[0]
117
+
118
+ if size_zip != size_alt_zip:
119
+ ValueError("Loop GEMM zip index is not of the same size for both tensors")
120
+
121
+ if c is None:
122
+ c = np.zeros((size_i, size_j))
123
+
124
+ istart = 0
125
+ for i in range(int(np.ceil(size_i / float(chunksize)))):
126
+
127
+ left_slice = slice(istart, istart + chunksize)
128
+ left_view = a[left_slice]
129
+
130
+ jstart = 0
131
+ for j in range(int(np.ceil(size_j / float(chunksize)))):
132
+ right_slice = slice(jstart, jstart + chunksize)
133
+ right_view = b[:, right_slice]
134
+
135
+ c[left_slice, right_slice] = np.dot(left_view, right_view)
136
+ jstart += chunksize
137
+
138
+ istart += chunksize
139
+
140
+ return c
141
+
142
+
143
+ def svds_RND(K, R_K):
144
+ """
145
+ Quick and dirty implementation of randomized SVD
146
+ for computing eigenvalues of K. We follow same input/output structure
147
+ as for the svds in scipy
148
+ """
149
+ svd = TruncatedSVD(R_K)
150
+ svd.fit_transform(K)
151
+ Psi_P = svd.components_.T
152
+ Lambda_P = svd.singular_values_
153
+ return Psi_P, Lambda_P
154
+
155
+
156
+ def overlap(array, len_chunk, len_sep=1):
157
+ """
158
+ Returns a matrix of all full overlapping chunks of the input `array`, with a chunk
159
+ length of `len_chunk` and a separation length of `len_sep`. Begins with the first full
160
+ chunk in the array.
161
+ This function is taken from https://stackoverflow.com/questions/38163366/split-list-into-separate-but-overlapping-chunks
162
+ it is designed to split an array with certain overlaps
163
+
164
+ :param array:
165
+ :param len_chunk:
166
+ :param len_sep:
167
+ :return array_matrix:
168
+ """
169
+
170
+ n_arrays = int(np.ceil((array.size - len_chunk + 1) / len_sep))
171
+
172
+ array_matrix = np.tile(array, n_arrays).reshape(n_arrays, -1)
173
+
174
+ columns = np.array(((len_sep * np.arange(0, n_arrays)).reshape(n_arrays, -1) + np.tile(
175
+ np.arange(0, len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
176
+
177
+ rows = np.array((np.arange(n_arrays).reshape(n_arrays, -1) + np.tile(
178
+ np.zeros(len_chunk), n_arrays).reshape(n_arrays, -1)), dtype=np.intp)
179
+
180
+ return array_matrix[rows, columns]
181
+
182
+
183
+ # def switch_svds_K(A, n_modes, svd_solver):
184
+ # """
185
+ # Utility function to switch between different svd solvers
186
+ # for the diagonalization of K. Being K symmetric and positive definite,
187
+ # Its eigenvalue decomposition is equivalent to an SVD.
188
+ # Thus we are using SVD solvers as eig solvers here.
189
+ # The options are the same used for switch_svds (which goes for the SVD of D)
190
+
191
+ # --------------------------------------------------------------------------------------------------------------------
192
+ # Parameters:
193
+ # -----------
194
+ # :param A: np.array,
195
+ # Array of which compute the SVD
196
+ # :param n_modes: int,
197
+ # Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
198
+ # computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
199
+ # :param svd_solver: str,
200
+ # can be:
201
+ # 'svd_numpy'.
202
+ # This uses np.linalg.svd.
203
+ # It is the most accurate but the slowest and most expensive.
204
+ # It computes all the modes.
205
+
206
+ # 'svd_sklearn_truncated'
207
+ # This uses the TruncatedSVD from scikitlearn. This uses either
208
+ # svds from scipy or randomized from sklearn depending on the size
209
+ # of the matrix. These are the two remaining options.
210
+ # To the merit of chosing this is to let sklearn take the decision as to
211
+ # what to use.
212
+
213
+ # 'svd_scipy_sparse'
214
+ # This uses the svds from scipy.
215
+
216
+ # 'svd_sklearn_randomized',
217
+ # This uses the randomized from sklearn.
218
+ # Returns
219
+ # --------
220
+ # :return Psi_P, np.array (N_S x n_modes)
221
+ # :return Sigma_P, np.array (n_modes)
222
+ # """
223
+ # if svd_solver.lower() == 'svd_sklearn_truncated':
224
+ # svd = TruncatedSVD(n_modes)
225
+ # svd.fit_transform(A)
226
+ # Psi_P = svd.components_.T
227
+ # Lambda_P = svd.singular_values_
228
+ # Sigma_P = np.sqrt(Lambda_P)
229
+ # elif svd_solver.lower() == 'svd_numpy':
230
+ # Psi_P, Lambda_P, _ = np.linalg.svd(A)
231
+ # Sigma_P = np.sqrt(Lambda_P)
232
+ # Psi_P = Psi_P[:, :n_modes]
233
+ # Sigma_P = Sigma_P[:n_modes]
234
+ # elif svd_solver.lower() == 'svd_sklearn_randomized':
235
+ # Psi_P, Lambda_P = randomized_svd(A, n_modes)
236
+ # Sigma_P = np.sqrt(Lambda_P)
237
+ # elif svd_solver.lower() == 'svd_scipy_sparse':
238
+ # Psi_P, Lambda_P, _ = svds(A, k=n_modes)
239
+ # Sigma_P = np.sqrt(Lambda_P)
240
+
241
+ # return Psi_P, Sigma_P
242
+
243
+
244
+ def switch_svds(A, n_modes, svd_solver='svd_sklearn_truncated'):
245
+ """
246
+ Utility function to switch between different svd solvers
247
+ for the SVD of the snapshot matrix. This is a true SVD solver.
248
+
249
+ --------------------------------------------------------------------------------------------------------------------
250
+ Parameters:
251
+ -----------
252
+ :param A: np.array,
253
+ Array of which compute the SVD
254
+ :param n_modes: int,
255
+ Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
256
+ computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
257
+ :param svd_solver: str,
258
+ can be:
259
+ 'svd_numpy'.
260
+ This uses np.linalg.svd.
261
+ It is the most accurate but the slowest and most expensive.
262
+ It computes all the modes.
263
+
264
+ 'svd_sklearn_truncated'
265
+ This uses the TruncatedSVD from scikitlearn. This uses either
266
+ svds from scipy or randomized from sklearn depending on the size
267
+ of the matrix. These are the two remaining options.
268
+ The merit of chosing this is to let sklearn take the decision as to
269
+ what to use. One prefers to force the usage of any of those with the other two
270
+ options
271
+
272
+ 'svd_scipy_sparse'
273
+ This uses the svds from scipy.
274
+
275
+ 'svd_sklearn_randomized',
276
+ This uses the randomized from sklearn.
277
+
278
+ Returns
279
+ --------
280
+ :return Psi_P, np.array (N_S x n_modes)
281
+ :return Sigma_P, np.array (n_modes)
282
+ """
283
+ if svd_solver.lower() == 'svd_sklearn_truncated':
284
+ svd = TruncatedSVD(n_modes)
285
+ X_transformed = svd.fit_transform(A)
286
+ Phi_P = X_transformed / svd.singular_values_
287
+ Psi_P = svd.components_.T
288
+ Sigma_P = svd.singular_values_
289
+ elif svd_solver.lower() == 'svd_numpy':
290
+ Phi_P, Sigma_P, Psi_P = np.linalg.svd(A)
291
+ Phi_P = Phi_P[:, 0:n_modes]
292
+ Psi_P = Psi_P.T[:, 0:n_modes]
293
+ Sigma_P = Sigma_P[0:n_modes]
294
+ elif svd_solver.lower() == 'svd_sklearn_randomized':
295
+ Phi_P, Sigma_P, Psi_P = randomized_svd(A, n_modes)
296
+ Psi_P = Psi_P.T
297
+ elif svd_solver.lower() == 'svd_scipy_sparse':
298
+ Phi_P, Sigma_P, Psi_P = svds(A, k=n_modes)
299
+ Psi_P = Psi_P.T
300
+ # It turns out that this does not rank them in decreasing order.
301
+ # Hence we do it manually:
302
+ idx = np.flip(np.argsort(Sigma_P))
303
+ Sigma_P = Sigma_P[idx]
304
+ Phi_P = Phi_P[:, idx]
305
+ Psi_P = Psi_P[:, idx]
306
+
307
+ return Phi_P, Psi_P, Sigma_P
308
+
309
+
310
+ def switch_eigs(A, n_modes, eig_solver):
311
+ """
312
+ Utility function to switch between different eig solvers in a consistent way across different
313
+ methods of the package.
314
+ --------------------------------------------------------------------------------------------------------------------
315
+ Parameters:
316
+ -----------
317
+ :param A: np.array,
318
+ Array of which compute the eigenvalues
319
+ :param n_modes: int,
320
+ Number of modes to be computed. Note that if the `svd_numpy` method is chosen, the full matrix are
321
+ computed, but then only the first n_modes are returned. Thus, it is not more computationally efficient.
322
+ :param eig_solver: str,
323
+ can be:
324
+ 'svd_sklearn_randomized',
325
+ This uses svd truncated approach, which picks either randomized svd or scipy svds.
326
+ By default, it should pick mostly the first.
327
+
328
+ 'eigsh' from scipy sparse. This is a compromise between the previous and the following.
329
+
330
+ 'eigh' from scipy lin alg. This is the most precise, although a bit more expensive
331
+
332
+ Returns
333
+ --------
334
+ :return Psi_P, np.array (N_S x n_modes)
335
+ :return Sigma_P, np.array (n_modes)
336
+ """
337
+ if eig_solver.lower() == 'svd_sklearn_randomized':
338
+ Psi_P, Lambda_P = svds_RND(A, n_modes)
339
+ elif eig_solver.lower() == 'eigh':
340
+ n = np.shape(A)[0]
341
+ Lambda_P, Psi_P = eigh(A, subset_by_index=[n - n_modes, n - 1])
342
+ # It turns out that this does not rank them in decreasing order.
343
+ # Hence we do it manually:
344
+ idx = np.flip(np.argsort(Lambda_P))
345
+ Lambda_P = Lambda_P[idx]
346
+ Psi_P = Psi_P[:, idx]
347
+ elif eig_solver.lower() == 'eigsh':
348
+ Lambda_P, Psi_P = eigsh(A, k=n_modes)
349
+ # It turns out that this does not rank them in decreasing order.
350
+ # Hence we do it manually:
351
+ idx = np.flip(np.argsort(Lambda_P))
352
+ Lambda_P = Lambda_P[idx]
353
+ Psi_P = Psi_P[:, idx]
354
+ else:
355
+ raise ValueError('eig_solver must be svd_sklearn_randomized, eigh or eigsh')
356
+
357
+ Sigma_P = np.sqrt(Lambda_P)
358
+
342
359
  return Psi_P, Sigma_P