modulo-vki 2.0.6__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,81 +1,209 @@
1
- import os
2
- from tqdm import tqdm
3
- import numpy as np
4
- import math
5
-
6
-
7
- def CorrelationMatrix(N_T, N_PARTITIONS=1, MEMORY_SAVING=False, FOLDER_OUT='./', SAVE_K=False, D=None,weights = np.array([])):
8
- """
9
- This method computes the temporal correlation matrix, given a data matrix as input. It's possible to use memory saving
10
- then splitting the computing in different tranches if computationally heavy. If D has been computed using MODULO
11
- then the dimension dim_col and N_PARTITIONS is automatically loaded
12
-
13
- :param N_T: int. Number of temporal snapshots
14
- :param D: np.array. Data matrix
15
- :param SAVE_K: bool. If SAVE_K=True, the matrix K is saved on disk. If the MEMORY_SAVING feature is active, this is done by default.
16
- :param MEMORY_SAVING: bool. If MEMORY_SAVING = True, the computation of the correlation matrix is done by steps. It requires the data matrix to be partitioned, following algorithm in MODULO._data_processing.
17
- :param FOLDER_OUT: str. Folder in which the temporal correlation matrix will be stored
18
- :param N_PARTITIONS: int. Number of partitions to be read in computing the correlation matrix. If _data_processing is used to partition the data matrix, this is inherited from the main class
19
- :param weights: weight vector [w_i,....,w_{N_s}] where w_i = area_cell_i/area_grid. Only needed if grid is non-uniform & MEMORY_SAVING== True
20
- :return: K (: np.array) if the memory saving is not active. None type otherwise.
21
- """
22
-
23
- if not MEMORY_SAVING:
24
- print("\n Computing Temporal correlation matrix K ...")
25
- K = np.dot(D.T, D)
26
- print("\n Done.")
27
-
28
- else:
29
- SAVE_K = True
30
- print("\n Using Memory Saving feature...")
31
- K = np.zeros((N_T, N_T))
32
- dim_col = math.floor(N_T / N_PARTITIONS)
33
-
34
- if N_T % N_PARTITIONS != 0:
35
- tot_blocks_col = N_PARTITIONS + 1
36
- else:
37
- tot_blocks_col = N_PARTITIONS
38
-
39
- for k in tqdm(range(tot_blocks_col)):
40
-
41
- di = np.load(FOLDER_OUT + f"/data_partitions/di_{k + 1}.npz")['di']
42
- if weights.size != 0:
43
- di = np.transpose(np.transpose(di) * np.sqrt(weights))
44
-
45
- ind_start = k * dim_col
46
- ind_end = ind_start + dim_col
47
-
48
- if (k == tot_blocks_col - 1) and (N_T - dim_col * N_PARTITIONS > 0):
49
- dim_col = N_T - dim_col * N_PARTITIONS
50
- ind_end = ind_start + dim_col
51
-
52
- K[ind_start:ind_end, ind_start:ind_end] = np.dot(di.transpose(), di)
53
-
54
- block = k + 2
55
-
56
- while block <= tot_blocks_col:
57
- dj = np.load(FOLDER_OUT + f"/data_partitions/di_{block}.npz")['di']
58
- if weights.size != 0:
59
- dj = np.transpose(np.transpose(dj) * np.sqrt(weights))
60
-
61
- ind_start_out = (block - 1) * dim_col
62
- ind_end_out = ind_start_out + dim_col
63
-
64
- if (block == tot_blocks_col) and (N_T - dim_col * N_PARTITIONS > 0):
65
- dim_col = N_T - dim_col * N_PARTITIONS
66
- ind_end_out = ind_start_out + dim_col
67
- dj = dj[:, :dim_col]
68
-
69
- K[ind_start:ind_end, ind_start_out:ind_end_out] = np.dot(di.T, dj)
70
-
71
- K[ind_start_out:ind_end_out, ind_start:ind_end] = K[ind_start:ind_end, ind_start_out:ind_end_out].T
72
-
73
- block += 1
74
-
75
- dim_col = math.floor(N_T / N_PARTITIONS)
76
-
77
- if SAVE_K:
78
- os.makedirs(FOLDER_OUT + '/correlation_matrix', exist_ok=True)
79
- np.savez(FOLDER_OUT + "/correlation_matrix/k_matrix", K=K)
80
-
81
- return K if not MEMORY_SAVING else None
1
+ import os
2
+ from tqdm import tqdm
3
+ import numpy as np
4
+ import math
5
+ from scipy.signal import firwin
6
+ from scipy import signal
7
+ from sklearn.metrics.pairwise import pairwise_kernels
8
+
9
+ def CorrelationMatrix(N_T,
10
+ N_PARTITIONS=1,
11
+ MEMORY_SAVING=False,
12
+ FOLDER_OUT='./',
13
+ SAVE_K=False,
14
+ D=None,
15
+ weights=np.array([]),
16
+ verbose=True):
17
+ """
18
+ Computes the temporal correlation matrix from the provided data matrix.
19
+
20
+ If MEMORY_SAVING is active, computation is split into partitions to reduce memory load.
21
+ If data matrix D has been computed using MODULO, parameters like dimensions and number of partitions
22
+ will automatically be inferred.
23
+
24
+ Parameters
25
+ ----------
26
+ N_T : int
27
+ Number of temporal snapshots.
28
+
29
+ N_PARTITIONS : int, default=1
30
+ Number of partitions for memory-saving computation.
31
+ Inherited automatically if using MODULO's partitioning.
32
+
33
+ MEMORY_SAVING : bool, default=False
34
+ Activates partitioned computation of the correlation matrix to reduce memory usage.
35
+ Requires pre-partitioned data according to MODULO's `_data_processing`.
36
+
37
+ FOLDER_OUT : str, default='./'
38
+ Output directory where the temporal correlation matrix will be saved if required.
39
+
40
+ SAVE_K : bool, default=False
41
+ Flag to save the computed correlation matrix K to disk. Automatically enforced
42
+ if MEMORY_SAVING is active.
43
+
44
+ D : np.ndarray, optional
45
+ Data matrix used to compute the correlation matrix. Required if MEMORY_SAVING is False.
46
+
47
+ weights : np.ndarray, default=np.array([])
48
+ Weight vector `[w_1, w_2, ..., w_Ns]` defined as `w_i = area_cell_i / area_grid`.
49
+ Needed only for non-uniform grids when MEMORY_SAVING is True.
50
+
51
+ Returns
52
+ -------
53
+ K : np.ndarray or None
54
+ Temporal correlation matrix if MEMORY_SAVING is False; otherwise returns None,
55
+ as matrix is managed via disk storage in partitioned computations.
56
+ """
57
+
58
+ if not MEMORY_SAVING:
59
+ if verbose:
60
+ print("Computing Temporal correlation matrix K ...")
61
+ K = np.dot(D.T, D)
62
+ if verbose:
63
+ print("Done.")
64
+
65
+ else:
66
+ SAVE_K = True
67
+ if verbose:
68
+ print("\n Using Memory Saving feature...")
69
+ K = np.zeros((N_T, N_T))
70
+ dim_col = math.floor(N_T / N_PARTITIONS)
71
+
72
+ if N_T % N_PARTITIONS != 0:
73
+ tot_blocks_col = N_PARTITIONS + 1
74
+ else:
75
+ tot_blocks_col = N_PARTITIONS
76
+
77
+ for k in tqdm(range(tot_blocks_col)):
78
+
79
+ di = np.load(FOLDER_OUT + f"/data_partitions/di_{k + 1}.npz")['di']
80
+ if weights.size != 0:
81
+ di = np.transpose(np.transpose(di) * np.sqrt(weights))
82
+
83
+ ind_start = k * dim_col
84
+ ind_end = ind_start + dim_col
85
+
86
+ if (k == tot_blocks_col - 1) and (N_T - dim_col * N_PARTITIONS > 0):
87
+ dim_col = N_T - dim_col * N_PARTITIONS
88
+ ind_end = ind_start + dim_col
89
+
90
+ K[ind_start:ind_end, ind_start:ind_end] = np.dot(di.transpose(), di)
91
+
92
+ block = k + 2
93
+
94
+ while block <= tot_blocks_col:
95
+ dj = np.load(FOLDER_OUT + f"/data_partitions/di_{block}.npz")['di']
96
+ if weights.size != 0:
97
+ dj = np.transpose(np.transpose(dj) * np.sqrt(weights))
98
+
99
+ ind_start_out = (block - 1) * dim_col
100
+ ind_end_out = ind_start_out + dim_col
101
+
102
+ if (block == tot_blocks_col) and (N_T - dim_col * N_PARTITIONS > 0):
103
+ dim_col = N_T - dim_col * N_PARTITIONS
104
+ ind_end_out = ind_start_out + dim_col
105
+ dj = dj[:, :dim_col]
106
+
107
+ K[ind_start:ind_end, ind_start_out:ind_end_out] = np.dot(di.T, dj)
108
+
109
+ K[ind_start_out:ind_end_out, ind_start:ind_end] = K[ind_start:ind_end, ind_start_out:ind_end_out].T
110
+
111
+ block += 1
112
+
113
+ dim_col = math.floor(N_T / N_PARTITIONS)
114
+
115
+ if SAVE_K:
116
+ os.makedirs(FOLDER_OUT + '/correlation_matrix', exist_ok=True)
117
+ np.savez(FOLDER_OUT + "/correlation_matrix/k_matrix", K=K)
118
+
119
+ return K if not MEMORY_SAVING else None
120
+
121
+
122
+ def spectral_filter(K: np.ndarray, N_o:int, f_c: float) -> np.ndarray:
123
+ """
124
+ Zero‐phase band‐pass filter of the correlation matrix K along its diagonals.
125
+ Used for the SPOD proposed by Sieber et al.
126
+
127
+ Parameters
128
+ ----------
129
+ K : (n_t, n_t) array
130
+ Original temporal correlation matrix.
131
+ N_o : int
132
+ Semi‐order of the FIR filter (true filter length = 2*N_o+1).
133
+ f_c : float
134
+ Normalized cutoff frequency (0 < f_c < 0.5).
135
+
136
+ Returns
137
+ -------
138
+ K_F : (n_t, n_t) array
139
+ The filtered correlation matrix.
140
+ """
141
+ n_t = K.shape[0]
142
+
143
+ # extend K for edge-padding
144
+ K_ext = np.pad(
145
+ K,
146
+ pad_width=((N_o, N_o), (N_o, N_o)),
147
+ mode='constant', # or 'edge', 'reflect', etc.
148
+ constant_values=0
149
+ )
150
+
151
+ # Fill the edges ( a bit of repetition but ok.. )
152
+ # Row-wise, Upper part
153
+ for i in range(0, N_o):
154
+ K_ext[i, i:i + n_t] = K[0, :]
155
+
156
+ # Row-wise, bottom part
157
+ for i in range(N_o + n_t, n_t + 2 * N_o):
158
+ K_ext[i, i - n_t + 1:i + 1] = K[-1, :]
159
+
160
+ # Column-wise, left part
161
+ for j in range(0, N_o):
162
+ K_ext[j:j + n_t, j] = K[:, 0]
163
+
164
+ # Column-wise, right part
165
+ for j in range(N_o + n_t, 2 * N_o + n_t):
166
+ K_ext[j - n_t + 1:j + 1, j] = K[:, -1]
167
+
168
+ # K_e = np.zeros((n_t + 2 * N_o, n_t + 2 * N_o))
169
+ # From which we clearly know that:
170
+ # K_e[N_o:n_t + N_o, N_o:n_t + N_o] = K
171
+
172
+ # create 2D kernel for FIR
173
+ h1d = firwin(N_o, f_c)
174
+ L = K_ext.shape[0]
175
+
176
+ pad_l = (L - N_o) // 2
177
+ pad_r = L - N_o - pad_l
178
+
179
+ # symmetrically padded kernel in 1D
180
+ h1d_pad = np.pad(h1d, (pad_l, pad_r))
181
+
182
+ # we make it 2D diagonal
183
+ h2d = np.diag(h1d_pad)
184
+
185
+ # finally filter K_ext and return the trimmed filtered without boundaries
186
+ K_ext_filt = signal.fftconvolve(K_ext, h2d, mode='same')
187
+ K_F = K_ext_filt[N_o : N_o + n_t, N_o : N_o + n_t]
188
+
189
+ return K_F
190
+
191
+ def kernelized_K(D, M_ij, k_m, metric, cent, alpha):
192
+
193
+ n_s, n_t = D.shape
194
+
195
+ gamma = - np.log(k_m) / M_ij
196
+ K_zeta = pairwise_kernels(D.T, metric=metric, gamma=gamma) # kernel substitute of the inner product
197
+
198
+ # Center the Kernel Matrix (if cent is True):
199
+ if cent:
200
+ H = np.eye(n_t) - 1 / n_t * np.ones_like(K_zeta)
201
+ K_zeta = H @ K_zeta @ H.T
202
+
203
+ # add `Ridge term` to enforce strictly pos. def. eigs and well-conditioning
204
+ K_r = K_zeta + alpha * np.eye(n_t)
205
+
206
+ return K_r
207
+
208
+
209
+
@@ -1,180 +1,180 @@
1
- import numpy as np
2
- import os
3
- from tqdm import tqdm
4
- import math
5
-
6
-
7
- def spatial_basis_mPOD(D, PSI_M, N_T, N_PARTITIONS, N_S, MEMORY_SAVING, FOLDER_OUT, SAVE: bool = False,weights: np.array = np.array([])):
8
- """
9
- Given the temporal basis of the mPOD now the spatial ones are computed
10
-
11
- :param D:
12
- Snapshot matrix D: if memory savig is active, this is ignored.
13
- :param PSI_M: np.array.:
14
- The mPOD temporal basis Psi tentatively assembled from all scales
15
- :param N_T: int.
16
- Number of snapshots
17
- :param N_PARTITIONS: int.
18
- Number of partitions in the memory saving
19
- :param N_S: int.
20
- Number of grid points in space
21
- :param MEMORY_SAVING: bool.
22
- Inherited from main class, if True turns on the MEMORY_SAVING feature, loading the partitions and starting the proper algorithm
23
- :param FOLDER_OUT: str.
24
- Folder in which the results are saved if SAVE_SPATIAL_POD = True
25
- :param SAVE_SPATIAL_POD: bool.
26
- If True, results are saved on disk and released from memory
27
- :param weights: np.array
28
- weight vector [w_i,....,w_{N_s}] where w_i = area_cell_i/area_grid. Only needed if grid is non-uniform & MEMORY_SAVING== True
29
- :return: Phi_M, Psi_M, Sigma_M: np.arrays. The final (sorted) mPOD decomposition
30
- """
31
-
32
- R1 = 0; R2 = 0
33
- if MEMORY_SAVING:
34
- SAVE = True
35
- os.makedirs(FOLDER_OUT + '/mPOD/', exist_ok=True)
36
- dim_col = math.floor(N_T / N_PARTITIONS)
37
- dim_row = math.floor(N_S / N_PARTITIONS)
38
- dr = np.zeros((dim_row, N_T))
39
-
40
- # 1 --- Converting partitions dC to dR
41
- if N_S % N_PARTITIONS != 0:
42
- tot_blocks_row = N_PARTITIONS + 1
43
- else:
44
- tot_blocks_row = N_PARTITIONS
45
-
46
- if N_T % N_PARTITIONS != 0:
47
- tot_blocks_col = N_PARTITIONS + 1
48
- else:
49
- tot_blocks_col = N_PARTITIONS
50
-
51
- fixed = 0
52
-
53
- for i in range(1, tot_blocks_row + 1):
54
- # --- Check if dim_row has to be fixed:
55
- if i == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0):
56
- dim_row_fix = N_S - dim_row * N_PARTITIONS
57
- dr = np.zeros((dim_row_fix, N_T))
58
-
59
- for cont in range(1, tot_blocks_col + 1):
60
- di = np.load(FOLDER_OUT + f"/data_partitions/di_{cont}.npz")['di']
61
-
62
- if i == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0) and fixed == 0:
63
- R1 = R2
64
- R2 = R1 + (N_S - dim_row * N_PARTITIONS)
65
- fixed = 1
66
- elif fixed == 0:
67
- R1 = (i - 1) * dim_row
68
- R2 = i * dim_row
69
-
70
- # Same as before, but we don't need the variable fixed because if
71
- # % the code runs this loop, it will be the last time
72
-
73
- if cont == tot_blocks_col and (N_T - dim_col * N_PARTITIONS > 0):
74
- C1 = C2
75
- C2 = C1 + (N_T - dim_col * N_PARTITIONS)
76
- else:
77
- C1 = (cont - 1) * dim_col
78
- C2 = cont * dim_col
79
-
80
- dr[:, C1:C2] = di[R1:R2, :]
81
-
82
- # 2 --- Computing partitions R of PHI_SIGMA
83
- PHI_SIGMA_BLOCK = dr @ PSI_M
84
- np.savez(FOLDER_OUT + f'/mPOD/phi_sigma_{i}', PHI_SIGMA_BLOCK)
85
-
86
- # 3 --- Convert partitions R to partitions C and get SIGMA
87
- R = PSI_M.shape[1]
88
- dim_col = math.floor(R / N_PARTITIONS)
89
- dim_row = math.floor(N_S / N_PARTITIONS)
90
- dps = np.zeros((N_S, dim_col))
91
- SIGMA_M = []
92
- PHI_M = []
93
-
94
- if R % N_PARTITIONS != 0:
95
- tot_blocks_col = N_PARTITIONS + 1
96
- else:
97
- tot_blocks_col = N_PARTITIONS
98
-
99
- fixed = 0
100
-
101
- # Here we apply the same logic of the loop before
102
-
103
- for j in range(1, tot_blocks_col + 1):
104
-
105
- if j == tot_blocks_col and (R - dim_col * N_PARTITIONS > 0):
106
- dim_col_fix = R - dim_col * N_PARTITIONS
107
- dps = np.zeros((N_S, dim_col_fix))
108
-
109
- for k in range(1, tot_blocks_row + 1):
110
- PHI_SIGMA_BLOCK = np.load(FOLDER_OUT + f"/mPOD/phi_sigma_{k}.npz")['arr_0']
111
-
112
- if j == tot_blocks_col and (R - dim_col * N_PARTITIONS > 0) and fixed == 0:
113
- R1 = R2
114
- R2 = R1 + (R - dim_col * N_PARTITIONS)
115
- fixed = 1
116
- elif fixed == 0:
117
- R1 = (j - 1) * dim_col
118
- R2 = j * dim_col
119
-
120
- if k == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0):
121
- C1 = C2
122
- C2 = C1 + (N_S - dim_row * N_PARTITIONS)
123
- else:
124
- C1 = (k - 1) * dim_row
125
- C2 = k * dim_row
126
-
127
- dps[C1:C2, :] = PHI_SIGMA_BLOCK[:, R1:R2]
128
-
129
- # Getting sigmas and phis
130
- for z in range(R1, R2):
131
- zz = z - R1
132
- if weights.size == 0:
133
- SIGMA_M.append(np.linalg.norm(dps[:, zz]))
134
- else:
135
- SIGMA_M.append(np.linalg.norm(dps[:, zz]*np.sqrt(weights)))
136
- tmp = dps[:, zz] / SIGMA_M[z]
137
- #print(f'Shape tmp = {np.shape(tmp)}')
138
- PHI_M.append(tmp)
139
- np.savez(FOLDER_OUT + f'/mPOD/phi_{z + 1}', tmp)
140
-
141
- Indices = np.argsort(SIGMA_M)[::-1] # find indices for sorting in decreasing order
142
- SIGMA_M = np.asarray(SIGMA_M)
143
- PHI_M = np.asarray(PHI_M).T
144
- PSI_M = np.asarray(PSI_M)
145
- Sorted_Sigmas = SIGMA_M[Indices] # Sort all the sigmas
146
- Phi_M = PHI_M[:, Indices] # Sorted Spatial Structures Matrix
147
- Psi_M = PSI_M[:, Indices] # Sorted Temporal Structures Matrix
148
- Sigma_M = Sorted_Sigmas # Sorted Amplitude Matrix
149
-
150
- else:
151
- R = PSI_M.shape[1]
152
- PHI_M_SIGMA_M = np.dot(D, (PSI_M))
153
- # Initialize the output
154
- PHI_M = np.zeros((N_S, R))
155
- SIGMA_M = np.zeros((R))
156
-
157
- for i in tqdm(range(0, R)):
158
- # print('Completing mPOD Mode ' + str(i))
159
- # Assign the norm as amplitude
160
- if weights.size == 0:
161
- SIGMA_M[i] = np.linalg.norm(PHI_M_SIGMA_M[:, i])
162
- else:
163
- SIGMA_M[i] = np.linalg.norm(PHI_M_SIGMA_M[:, i]*np.sqrt(weights))
164
- # Normalize the columns of C to get spatial modes
165
- PHI_M[:, i] = PHI_M_SIGMA_M[:, i] / SIGMA_M[i]
166
-
167
- Indices = np.flipud(np.argsort(SIGMA_M)) # find indices for sorting in decreasing order
168
- Sorted_Sigmas = SIGMA_M[Indices] # Sort all the sigmas
169
- Phi_M = PHI_M[:, Indices] # Sorted Spatial Structures Matrix
170
- Psi_M = PSI_M[:, Indices] # Sorted Temporal Structures Matrix
171
- Sigma_M = Sorted_Sigmas # Sorted Amplitude Matrix
172
-
173
- if SAVE:
174
- '''Saving results in MODULO tmp proper folder'''
175
- os.makedirs(FOLDER_OUT + '/mPOD/', exist_ok=True)
176
- np.savez(FOLDER_OUT + "/mPOD/sorted_phis", Phi_M)
177
- np.savez(FOLDER_OUT + "/mPOD/sorted_psis", Psi_M)
178
- np.savez(FOLDER_OUT + "/mPOD/sorted_sigma", Sorted_Sigmas)
179
-
180
- return Phi_M, Psi_M, Sigma_M
1
+ import numpy as np
2
+ import os
3
+ from tqdm import tqdm
4
+ import math
5
+
6
+
7
+ def spatial_basis_mPOD(D, PSI_M, N_T, N_PARTITIONS, N_S, MEMORY_SAVING, FOLDER_OUT, SAVE: bool = False,weights: np.array = np.array([])):
8
+ """
9
+ Given the temporal basis of the mPOD now the spatial ones are computed
10
+
11
+ :param D:
12
+ Snapshot matrix D: if memory savig is active, this is ignored.
13
+ :param PSI_M: np.array.:
14
+ The mPOD temporal basis Psi tentatively assembled from all scales
15
+ :param N_T: int.
16
+ Number of snapshots
17
+ :param N_PARTITIONS: int.
18
+ Number of partitions in the memory saving
19
+ :param N_S: int.
20
+ Number of grid points in space
21
+ :param MEMORY_SAVING: bool.
22
+ Inherited from main class, if True turns on the MEMORY_SAVING feature, loading the partitions and starting the proper algorithm
23
+ :param FOLDER_OUT: str.
24
+ Folder in which the results are saved if SAVE_SPATIAL_POD = True
25
+ :param SAVE_SPATIAL_POD: bool.
26
+ If True, results are saved on disk and released from memory
27
+ :param weights: np.array
28
+ weight vector [w_i,....,w_{N_s}] where w_i = area_cell_i/area_grid. Only needed if grid is non-uniform & MEMORY_SAVING== True
29
+ :return: Phi_M, Psi_M, Sigma_M: np.arrays. The final (sorted) mPOD decomposition
30
+ """
31
+
32
+ R1 = 0; R2 = 0
33
+ if MEMORY_SAVING:
34
+ SAVE = True
35
+ os.makedirs(FOLDER_OUT + '/mPOD/', exist_ok=True)
36
+ dim_col = math.floor(N_T / N_PARTITIONS)
37
+ dim_row = math.floor(N_S / N_PARTITIONS)
38
+ dr = np.zeros((dim_row, N_T))
39
+
40
+ # 1 --- Converting partitions dC to dR
41
+ if N_S % N_PARTITIONS != 0:
42
+ tot_blocks_row = N_PARTITIONS + 1
43
+ else:
44
+ tot_blocks_row = N_PARTITIONS
45
+
46
+ if N_T % N_PARTITIONS != 0:
47
+ tot_blocks_col = N_PARTITIONS + 1
48
+ else:
49
+ tot_blocks_col = N_PARTITIONS
50
+
51
+ fixed = 0
52
+
53
+ for i in range(1, tot_blocks_row + 1):
54
+ # --- Check if dim_row has to be fixed:
55
+ if i == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0):
56
+ dim_row_fix = N_S - dim_row * N_PARTITIONS
57
+ dr = np.zeros((dim_row_fix, N_T))
58
+
59
+ for cont in range(1, tot_blocks_col + 1):
60
+ di = np.load(FOLDER_OUT + f"/data_partitions/di_{cont}.npz")['di']
61
+
62
+ if i == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0) and fixed == 0:
63
+ R1 = R2
64
+ R2 = R1 + (N_S - dim_row * N_PARTITIONS)
65
+ fixed = 1
66
+ elif fixed == 0:
67
+ R1 = (i - 1) * dim_row
68
+ R2 = i * dim_row
69
+
70
+ # Same as before, but we don't need the variable fixed because if
71
+ # % the code runs this loop, it will be the last time
72
+
73
+ if cont == tot_blocks_col and (N_T - dim_col * N_PARTITIONS > 0):
74
+ C1 = C2
75
+ C2 = C1 + (N_T - dim_col * N_PARTITIONS)
76
+ else:
77
+ C1 = (cont - 1) * dim_col
78
+ C2 = cont * dim_col
79
+
80
+ dr[:, C1:C2] = di[R1:R2, :]
81
+
82
+ # 2 --- Computing partitions R of PHI_SIGMA
83
+ PHI_SIGMA_BLOCK = dr @ PSI_M
84
+ np.savez(FOLDER_OUT + f'/mPOD/phi_sigma_{i}', PHI_SIGMA_BLOCK)
85
+
86
+ # 3 --- Convert partitions R to partitions C and get SIGMA
87
+ R = PSI_M.shape[1]
88
+ dim_col = math.floor(R / N_PARTITIONS)
89
+ dim_row = math.floor(N_S / N_PARTITIONS)
90
+ dps = np.zeros((N_S, dim_col))
91
+ SIGMA_M = []
92
+ PHI_M = []
93
+
94
+ if R % N_PARTITIONS != 0:
95
+ tot_blocks_col = N_PARTITIONS + 1
96
+ else:
97
+ tot_blocks_col = N_PARTITIONS
98
+
99
+ fixed = 0
100
+
101
+ # Here we apply the same logic of the loop before
102
+
103
+ for j in range(1, tot_blocks_col + 1):
104
+
105
+ if j == tot_blocks_col and (R - dim_col * N_PARTITIONS > 0):
106
+ dim_col_fix = R - dim_col * N_PARTITIONS
107
+ dps = np.zeros((N_S, dim_col_fix))
108
+
109
+ for k in range(1, tot_blocks_row + 1):
110
+ PHI_SIGMA_BLOCK = np.load(FOLDER_OUT + f"/mPOD/phi_sigma_{k}.npz")['arr_0']
111
+
112
+ if j == tot_blocks_col and (R - dim_col * N_PARTITIONS > 0) and fixed == 0:
113
+ R1 = R2
114
+ R2 = R1 + (R - dim_col * N_PARTITIONS)
115
+ fixed = 1
116
+ elif fixed == 0:
117
+ R1 = (j - 1) * dim_col
118
+ R2 = j * dim_col
119
+
120
+ if k == tot_blocks_row and (N_S - dim_row * N_PARTITIONS > 0):
121
+ C1 = C2
122
+ C2 = C1 + (N_S - dim_row * N_PARTITIONS)
123
+ else:
124
+ C1 = (k - 1) * dim_row
125
+ C2 = k * dim_row
126
+
127
+ dps[C1:C2, :] = PHI_SIGMA_BLOCK[:, R1:R2]
128
+
129
+ # Getting sigmas and phis
130
+ for z in range(R1, R2):
131
+ zz = z - R1
132
+ if weights.size == 0:
133
+ SIGMA_M.append(np.linalg.norm(dps[:, zz]))
134
+ else:
135
+ SIGMA_M.append(np.linalg.norm(dps[:, zz]*np.sqrt(weights)))
136
+ tmp = dps[:, zz] / SIGMA_M[z]
137
+ #print(f'Shape tmp = {np.shape(tmp)}')
138
+ PHI_M.append(tmp)
139
+ np.savez(FOLDER_OUT + f'/mPOD/phi_{z + 1}', tmp)
140
+
141
+ Indices = np.argsort(SIGMA_M)[::-1] # find indices for sorting in decreasing order
142
+ SIGMA_M = np.asarray(SIGMA_M)
143
+ PHI_M = np.asarray(PHI_M).T
144
+ PSI_M = np.asarray(PSI_M)
145
+ Sorted_Sigmas = SIGMA_M[Indices] # Sort all the sigmas
146
+ Phi_M = PHI_M[:, Indices] # Sorted Spatial Structures Matrix
147
+ Psi_M = PSI_M[:, Indices] # Sorted Temporal Structures Matrix
148
+ Sigma_M = Sorted_Sigmas # Sorted Amplitude Matrix
149
+
150
+ else:
151
+ R = PSI_M.shape[1]
152
+ PHI_M_SIGMA_M = np.dot(D, (PSI_M))
153
+ # Initialize the output
154
+ PHI_M = np.zeros((N_S, R))
155
+ SIGMA_M = np.zeros((R))
156
+
157
+ for i in tqdm(range(0, R)):
158
+ # print('Completing mPOD Mode ' + str(i))
159
+ # Assign the norm as amplitude
160
+ if weights.size == 0:
161
+ SIGMA_M[i] = np.linalg.norm(PHI_M_SIGMA_M[:, i])
162
+ else:
163
+ SIGMA_M[i] = np.linalg.norm(PHI_M_SIGMA_M[:, i]*np.sqrt(weights))
164
+ # Normalize the columns of C to get spatial modes
165
+ PHI_M[:, i] = PHI_M_SIGMA_M[:, i] / SIGMA_M[i]
166
+
167
+ Indices = np.flipud(np.argsort(SIGMA_M)) # find indices for sorting in decreasing order
168
+ Sorted_Sigmas = SIGMA_M[Indices] # Sort all the sigmas
169
+ Phi_M = PHI_M[:, Indices] # Sorted Spatial Structures Matrix
170
+ Psi_M = PSI_M[:, Indices] # Sorted Temporal Structures Matrix
171
+ Sigma_M = Sorted_Sigmas # Sorted Amplitude Matrix
172
+
173
+ if SAVE:
174
+ '''Saving results in MODULO tmp proper folder'''
175
+ os.makedirs(FOLDER_OUT + '/mPOD/', exist_ok=True)
176
+ np.savez(FOLDER_OUT + "/mPOD/sorted_phis", Phi_M)
177
+ np.savez(FOLDER_OUT + "/mPOD/sorted_psis", Psi_M)
178
+ np.savez(FOLDER_OUT + "/mPOD/sorted_sigma", Sorted_Sigmas)
179
+
180
+ return Phi_M, Psi_M, Sigma_M