ticoi 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ticoi might be problematic. Click here for more details.

@@ -0,0 +1,1015 @@
1
+ """
2
+ auxiliary functions to process the temporal inversion.
3
+
4
+ Author : Laurane Charrier, Lei Guo, Nathan Lioret
5
+ Reference:
6
+ Charrier, L., Yan, Y., Koeniguer, E. C., Leinss, S., & Trouvé, E. (2021). Extraction of velocity time series with an optimal temporal sampling from displacement
7
+ observation networks. IEEE Transactions on Geoscience and Remote Sensing.
8
+ Charrier, L., Yan, Y., Colin Koeniguer, E., Mouginot, J., Millan, R., & Trouvé, E. (2022). Fusion of multi-temporal and multi-sensor ice velocity observations.
9
+ ISPRS annals of the photogrammetry, remote sensing and spatial information sciences, 3, 311-318.
10
+ """
11
+
12
+ import math as m
13
+
14
+ import numpy as np
15
+ import scipy.linalg as la
16
+ import scipy.optimize as opt
17
+ import scipy.sparse as sp
18
+ from numba import jit
19
+ from scipy.linalg import inv
20
+
21
+ # %% ======================================================================== #
22
+ # CONSTRUCTION OF THE SYSTEM #
23
+ # =========================================================================%% #
24
+
25
+
26
+ def mu_regularisation(regu: str | int, A: np.ndarray, dates_range: np.ndarray, ini: np.ndarray | None = None):
27
+ """
28
+ Compute the Tikhonov regularisation matrix
29
+
30
+ :param regu: str, type of regularization
31
+ :param A: np array, design matrix
32
+ :param dates_range: list, list of estimated dates
33
+ :param ini: initial parameter (velocity and/or acceleration mean)
34
+
35
+ :return mu: Tikhonov regularisation matrix
36
+ """
37
+
38
+ # First order Tikhonov regularisation
39
+ if regu == 1:
40
+ mu = np.diag(np.full(A.shape[1], -1, dtype="float32"))
41
+ mu[np.arange(A.shape[1] - 1), np.arange(A.shape[1] - 1) + 1] = 1
42
+ mu /= np.diff(dates_range) / np.timedelta64(1, "D")
43
+ mu = np.delete(mu, -1, axis=0)
44
+
45
+ # First order Tikhonov regularisation, with an apriori on the acceleration
46
+ elif regu == "1accelnotnull":
47
+ mu = np.diag(np.full(A.shape[1], -1, dtype="float32"))
48
+ mu[np.arange(A.shape[1] - 1), np.arange(A.shape[1] - 1) + 1] = 1
49
+ mu /= np.diff(dates_range) / np.timedelta64(1, "D")
50
+ mu = np.delete(mu, -1, axis=0)
51
+
52
+ # Second order Tikhonov regularisation
53
+ elif regu == 2:
54
+ delta = np.diff(dates_range) / np.timedelta64(1, "D")
55
+ mu = np.zeros((A.shape[1], A.shape[1]), dtype="float64")
56
+ mu[range(1, A.shape[1] - 1), range(0, A.shape[1] - 2)] = 1 / delta[:-2]
57
+ mu[range(1, A.shape[1] - 1), range(1, A.shape[1] - 1)] = -2 / delta[1:-1]
58
+ mu[range(1, A.shape[1] - 1), range(2, A.shape[1])] = 1 / delta[2:]
59
+ mu[0, 0] = 0
60
+ mu[-1, -1] = 0
61
+
62
+ # Regularisation on the direction when vx and vy are inverted together
63
+ elif regu == "directionxy":
64
+ mu = np.zeros((A.shape[1], 2 * A.shape[1]), dtype="float64")
65
+ delta = [(dates_range[k + 1] - dates_range[k]) / np.timedelta64(1, "D") for k in range(len(dates_range) - 1)]
66
+
67
+ if len(ini) == 2:
68
+ vv = np.array(ini[0]) ** 2 + np.array(ini[1]) ** 2
69
+ for k in range(
70
+ len(dates_range) - 1
71
+ ): # Force estimated vector to be colinear to the averaged vector : vector product equal to 1
72
+ mu[k, k] = ini[0][k] / int(delta[k]) / vv[k] # vx * meanvx
73
+ mu[k, k + len(dates_range) - 1] = ini[1][k] / int(delta[k]) / vv[k] # vy * meanvy
74
+
75
+ elif len(ini) == 4:
76
+ vv = np.sqrt(ini[0] ** 2 + ini[1] ** 2) / 365 * np.sqrt(ini[2] ** 2 + ini[3] ** 2) / delta
77
+ for k in range(
78
+ len(dates_range) - 1
79
+ ): # Force estimated vector to be colinear to the averaged vector : vector product equal to 1
80
+ mu[k, k] = ini[0][k] / 365 / int(delta[k]) / vv[k] # vx * meanvx
81
+ mu[k, k + len(dates_range) - 1] = ini[1][k] / 365 / int(delta[k]) / vv[k] # vy * meanvy
82
+
83
+ else:
84
+ raise ValueError("Enter 1, 2,'1accelnotnull', 'directionxy")
85
+
86
+ return mu
87
+
88
+
89
+ def construction_dates_range_np(data: np.ndarray) -> np.ndarray:
90
+ """
91
+ Construction of the dates of the estimated displacement in X with an irregular temporal sampling (ILF)
92
+ :param data: an array where each line is (date1, date2, other elements) for which a velocity have been measured
93
+ :return: the dates of the estimated displacement in X
94
+ """
95
+
96
+ dates = np.concatenate([data[:, 0], data[:, 1]]) # concatante date1 and date2
97
+ dates = np.unique(dates) # remove duplicates
98
+ dates = np.sort(dates) # Sort the dates
99
+ return dates
100
+
101
+
102
+ @jit(nopython=True) # use numba
103
+ def construction_a_lf(dates: np.ndarray, dates_range: np.ndarray) -> np.ndarray:
104
+ """
105
+ Construction of the design matrix A in the formulation AX = Y.
106
+ It corresponds to the Leap Frog formulation, where each value in X is the estimated displacement between each consecutive date
107
+
108
+ :param dates: np array, where each line is (date1, date2) for which a velocity is computed (it corresponds to the original displacements)
109
+ :param dates_range: dates of estimated displacemements in X
110
+
111
+ :return: The design matrix A which represent the temporal closure of the displacement measurement network
112
+ """
113
+ # Search at which index in dates_range is stored each date in dates
114
+ date1_indices = np.searchsorted(dates_range, dates[:, 0])
115
+ date2_indices = np.searchsorted(dates_range, dates[:, 1]) - 1
116
+
117
+ A = np.zeros((dates.shape[0], dates_range[1:].shape[0]), dtype="int32")
118
+ for y in range(dates.shape[0]):
119
+ A[y, date1_indices[y] : date2_indices[y] + 1] = 1
120
+
121
+ return A
122
+
123
+
124
+ # %% ======================================================================== #
125
+ # WEIGHT #
126
+ # =========================================================================%% #
127
+ def weight_for_inversion(
128
+ weight_origine: bool,
129
+ conf: bool,
130
+ data: np.ndarray,
131
+ pos: int,
132
+ inside_Tukey: bool = False,
133
+ temporal_decorrelation: np.ndarray | None = None,
134
+ ) -> np.ndarray:
135
+ """
136
+ Initialisation of the weights
137
+
138
+ :param weight_origine: if True the weights are calculated from the data quality indicators
139
+ :param conf: if True the weights correspond to the confidence intervals between 0 and 1 (1 is highest quality)
140
+ :param data:the data array
141
+ :param pos: the position of the variable dx or dy
142
+ :param inside_Tukey: if True the weight will be injected inside the Tukey biweight function
143
+ :param temporal_decorrelation: apriori weight, for examples a list of 0 and 1 to detect temporal decorrelation
144
+
145
+ :return Weight: np array of the initial weights
146
+ """
147
+
148
+ # Weight based on data quality
149
+ if weight_origine and not inside_Tukey:
150
+ if conf: # Based on data quality given in confidence indicator, i.e. between 0 and 1 (1 is highest quality)
151
+ Weight = data[:, pos]
152
+ else: # The data quality corresponds to errors in m/y or m/d
153
+ # Normalization of the errors
154
+
155
+ Weight = 1 - (data[:, pos] - np.min(data[:, pos])) / (np.max(data[:, pos]) - np.min(data[:, pos]))
156
+ # try:
157
+ # Weight = data[:, pos] / (stats.median_abs_deviation(data[:, pos]) / 0.6745)
158
+ # except ZeroDivisionError:
159
+ # Weight = data[:, pos] / (average_absolute_deviation(data[:, pos]) / 0.6745)
160
+ # # Weight = data[:, pos] / (average_absolute_deviation(data[:, pos]) / 0.6745)
161
+ # Weight = TukeyBiweight(Weight, 4.685)
162
+
163
+ if temporal_decorrelation is not None:
164
+ Weight = np.multiply(temporal_decorrelation, Weight)
165
+
166
+ # Apriori weights (ex : detection of temporal decorrelation)
167
+ elif temporal_decorrelation is not None:
168
+ Weight = temporal_decorrelation
169
+ elif weight_origine:
170
+ Weight = data[:, pos]
171
+ else: # If no apriori knowledge, identity matrix
172
+ Weight = np.ones(data.shape[0])
173
+
174
+ return Weight
175
+
176
+
177
+ def TukeyBiweight(z: np.ndarray, c: float) -> np.ndarray:
178
+ """
179
+ Tukey's biweight function used at each iteration of the inversion to update the weights.
180
+
181
+ :param z: Internally studentized residual
182
+ :param c: Constant value
183
+
184
+ :return weight: Selected weights
185
+ """
186
+ subset = np.less_equal(abs(z), c)
187
+ weight = np.ma.array(((1 - (z / c) ** 2) ** 2), mask=~subset)
188
+
189
+ return weight.filled(0)
190
+
191
+
192
+ def hat_matrix(A: np.ndarray, coef: int, mu: np.ndarray, W: np.ndarray | None = None) -> np.ndarray:
193
+ """
194
+ :param A: matrix of the temporal invserion system AX=Y
195
+ :param coef: coefficient of the regularization
196
+ :param mu: regularization matrix
197
+ :return: hat matrix of the system AX=Y or AX=BY, with a second order Tikonov regularisation
198
+ """
199
+
200
+ if W is None:
201
+ A = sp.csc_matrix(A)
202
+ return A @ inv(A.T @ A + coef * mu.T @ mu) @ A.T
203
+ else:
204
+ A = A[W != 0]
205
+ ATW = np.multiply(A.T, W[W != 0][np.newaxis, :]).astype("float32")
206
+ A = sp.csc_matrix(A)
207
+ return A @ inv(ATW @ A + coef * mu.T @ mu) @ ATW
208
+
209
+
210
+ def GCV_function(H: np.ndarray, Residu: np.ndarray, W: np.ndarray) -> np.ndarray:
211
+ """
212
+ Compute the Generalized Cross Validation
213
+ :param H:
214
+ :param Residu:
215
+ :param W:
216
+ :return:
217
+ """
218
+ n = W[W != 0].shape[0]
219
+ d = np.sum(np.diag(H)) / n # matrix trace divided by n
220
+ gcv = np.sum((Residu / (1 - d)) ** 2) / n #
221
+ return gcv
222
+
223
+
224
+ def studentized_residual(
225
+ A: np.ndarray, residu: np.ndarray, dates_range: np.ndarray, W: np.ndarray, coef: int, regu: np.ndarray
226
+ ) -> np.ndarray:
227
+ """
228
+
229
+ :param A: matrix of the temporal invserion system AX=Y
230
+ :param residu: residual (difference between AX and Y (or BY))
231
+ :param dates_range: an array with all the dates included in data, list
232
+ :return: internally studentized residual
233
+ """
234
+ if A.shape[0] == A.shape[1]:
235
+ sigma = m.sqrt(sum(residu**2))
236
+ else:
237
+ sigma = m.sqrt(sum(residu**2) / (A.shape[0] - A.shape[1]))
238
+ H = np.diag(hat_matrix(A, dates_range, W, coef, regu))
239
+ Hii = np.where(H == 1.0, 0.99, H) # to avoid a division by 0
240
+ z = np.array(residu / (sigma * np.sqrt(1 - Hii))).astype("float")
241
+ return np.nan_to_num(z)
242
+
243
+
244
+ def externally_studentized_residual(
245
+ A: np.ndarray, residu: np.ndarray, dates_range: np.ndarray, W: np.ndarray | None, coef: int, regu: np.ndarray
246
+ ) -> np.ndarray:
247
+ """
248
+
249
+ :param A: matrix of the temporal invserion system AX=Y
250
+ :param residu:
251
+ :param dates_range: an array with all the dates included in data, list
252
+ :return: externally studentized residual
253
+ """
254
+ n = A.shape[0]
255
+ p = A.shape[1]
256
+ r = studentized_residual(A, residu, dates_range, W, coef, regu)
257
+ return r * ((n - p - 1) / (n - p - r**2)) ** (1 / 2)
258
+
259
+ # %% ======================================================================== #
260
+ # LINERA INTERPOLATOR #
261
+ # =========================================================================%% #
262
+
263
+
264
+ @jit(nopython=True)
265
+ def average_absolute_deviation(data: np.ndarray) -> float:
266
+ """Computes the Average Absolute Deviation (AAD). Used when the Median Absolute Deviation (MAD) is equal to 0."""
267
+ return np.mean(np.absolute(data - np.mean(data)))
268
+
269
+
270
+ @jit(nopython=True)
271
+ def find_date_obs(data: np.ndarray, dates_range: np.ndarray) -> np.ndarray:
272
+ """
273
+ Finds the index in dates_range corresponding to each first and last date in data
274
+ :param data: an array where each line is (date1, date2, other elements ) for which a velocity is computed (correspond to the original displacements)
275
+ :param dates_range: dates of the estimated displacement in X with an irregular temporal sampling (ILF)
276
+ :return: index in dates_range corresponding to each first and last date in data
277
+ """
278
+ date1_indices = np.searchsorted(dates_range, data[:, 0])
279
+ date2_indices = np.searchsorted(dates_range, data[:, 1]) - 1
280
+ return np.column_stack((date1_indices, date2_indices))
281
+
282
+
283
+ @jit(nopython=True)
284
+ def matvecregu1_numba(
285
+ X: np.ndarray, Y: np.ndarray, identification_obs: np.ndarray, delta: np.ndarray, coef: int, weight: np.ndarray
286
+ ):
287
+ for j in range(len(identification_obs)):
288
+ Y[j] = np.sum(X[identification_obs[j][0] : identification_obs[j][1] + 1]) * weight[j]
289
+ Y[len(identification_obs) : len(identification_obs) + len(X) - 1] = np.diff(X / delta) * coef
290
+ return Y
291
+
292
+
293
+ @jit(nopython=True)
294
+ def matvec_numba(X: np.ndarray, Y: np.ndarray, identification_obs: np.ndarray):
295
+ for j in range(len(identification_obs)):
296
+ Y[j] = np.sum(X[identification_obs[j][0] : identification_obs[j][1] + 1])
297
+ return Y
298
+
299
+
300
+ @jit(nopython=True)
301
+ def rmatvecregu1_numba(X, Y, identification_obs, coef, delta, weight):
302
+ for j in range(len(identification_obs)):
303
+ X[identification_obs[j][0] : identification_obs[j][1] + 1] += Y[j] * weight[j]
304
+ X[0] -= Y[len(identification_obs)] / delta[0] * coef
305
+ for j in range(len(identification_obs) + 1, len(identification_obs) + len(X) - 1):
306
+ X[j - len(identification_obs)] += (Y[j - 1] - Y[j]) / delta[j - len(identification_obs)] * coef
307
+ X[len(X) - 1] += Y[len(identification_obs) + len(X) - 2] / delta[len(X) - 1] * coef
308
+ return X
309
+
310
+
311
+ @jit(nopython=True)
312
+ def rmatvecA_numba(X, Y, identification_obs):
313
+ for j in range(len(identification_obs)):
314
+ X[identification_obs[j][0] : identification_obs[j][1] + 1] += Y[j]
315
+ return X
316
+
317
+
318
+ class class_linear_operator:
319
+ def __init__(self):
320
+ self.X_length = [] # length of the estimated velocity time-series
321
+ self.delta = np.array() # temporal baseline of the estimated velocity time-series
322
+ self.identification_obs = np.array()
323
+ self.coef = 50 # coefficient of the regularization
324
+
325
+ def load(self, identification_obs, dates_range, coef):
326
+ self.__init__()
327
+ self.X_length = len(dates_range) - 1
328
+ self.delta = np.diff(dates_range) / np.timedelta64(1, "D")
329
+ self.identification_obs = identification_obs
330
+ self.identification_obs_original = identification_obs
331
+ self.coef = coef
332
+
333
+ def update_from_weight(self, Y, Weight):
334
+ """
335
+ Updates the vector Y, the vector Weight according to the Weight
336
+ If some weight are 0, delete the observations
337
+ :param Y:
338
+ :param Weight:
339
+ :return:
340
+ """
341
+ Y = Y[Weight != 0]
342
+ self.identification_obs = self.identification_obs_original[Weight != 0]
343
+ self.Weight = Weight[Weight != 0]
344
+ return Y
345
+
346
+ def matvecregu1(self, X):
347
+ """
348
+ function to go from X to Y, corresponds to A
349
+ Regularisation of first order Tikhonov (minimization of the acceleration), with an apriori or not, the equation is weighted by self.Weight
350
+ :param X: np.array, estimated displacements
351
+ :return: np.array, observed displacements
352
+ """
353
+ Y = np.zeros(len(self.identification_obs) + len(X) - 1)
354
+ Y = matvecregu1_numba(
355
+ X, Y, self.identification_obs, self.delta, self.coef, self.Weight
356
+ ) # call numba, to make the computation faster
357
+ return Y
358
+
359
+ def rmatvecregu1(self, Y):
360
+ """
361
+ function to go from Y to X, corresponds to A.T
362
+ Regularisation of first order Tikhonov (minimization of the acceleration), with an apriori or not, the equation is weighted by self.Weight
363
+ :param Y: np.array, observed displacements
364
+ :return: np.array, estimated displacements
365
+ """
366
+ X = np.zeros(self.X_length)
367
+ X = rmatvecregu1_numba(
368
+ X, Y, self.identification_obs, self.coef, self.delta, self.Weight
369
+ ) # call numba, to make the computation faster
370
+ return X
371
+
372
+ def matvec(self, X): # No regularization, no weight
373
+ """
374
+ function to go from X to Y, corresponds to A
375
+ No regularization, no weight
376
+ :param X: np.array, estimated displacements
377
+ :return: np.array, observed displacements
378
+ """
379
+ Y = np.zeros(len(self.identification_obs_original))
380
+ Y = matvec_numba(X, Y, self.identification_obs_original)
381
+ return Y
382
+
383
+ def rmatvec(self, Y): # No regularization, no weight
384
+ """
385
+ function to go from X to Y, corresponds to A
386
+ No regularization, no weight
387
+ :param Y: np.array, observed displacements
388
+ :return: np.array, estimated displacements
389
+ """
390
+ X = np.zeros(self.X_length)
391
+ X = rmatvecA_numba(X, Y, self.identification_obs_original)
392
+ return X
393
+
394
+ # %% ======================================================================== #
395
+ # PROPERTY OF THE SYSTEM #
396
+ # =========================================================================%% #
397
+
398
+
399
+ def is_convex(A: np.ndarray) -> bool:
400
+ """
401
+ Check if the dot product function A.dot(X) is convex.
402
+
403
+ Parameters:
404
+ A (numpy.ndarray): The matrix A in the dot product function.
405
+
406
+ Returns:
407
+ bool: True if the function is convex, False otherwise.
408
+ """
409
+ hessian_matrix = A.T @ A # Compute the Hessian matrix
410
+ return np.all(np.linalg.eigvals(hessian_matrix) >= 0)
411
+
412
+
413
+ def matrix_property(A: np.ndarray) -> str:
414
+ """
415
+ Evaluate if the matrix is under determined, over-determined and/or ill posed
416
+ :param A: Design matrix to evaluate
417
+ :return: matrix property
418
+ """
419
+ if A.shape[0] < A.shape[1]: # System is under-determined
420
+ return "under-determined"
421
+ elif A.shape[0] >= A.shape[1]: # System is over-determined
422
+ return "over-determined"
423
+ if np.linalg.matrix_rank(A) < A.shape[1]: # System is ill-posed
424
+ return f"ill posed, rank A = {np.linalg.matrix_rank(A)}"
425
+
426
+ # %% ======================================================================== #
427
+ # INVERSION #
428
+ # =========================================================================%% #
429
+
430
+
431
+ def inversion_one_component(
432
+ A: np.ndarray,
433
+ dates_range: np.ndarray,
434
+ v_pos: int,
435
+ data: np.ndarray,
436
+ solver: str,
437
+ Weight: int | np.ndarray,
438
+ mu: np.ndarray,
439
+ coef: int = 1,
440
+ ini: None | np.ndarray = None,
441
+ result_quality: None | list = None,
442
+ regu: int | str = 1,
443
+ accel: None | np.ndarray = None,
444
+ linear_operator: "class_linear_operator" = None,
445
+ verbose: bool = False,
446
+ ) -> (np.ndarray, np.ndarray | None):
447
+ """
448
+ Invert the system AX = Y for one component of the velocity, using a given solver
449
+
450
+ :param A: Matrix of the temporal inversion system AX = Y
451
+ :param dates_range: An array with all the dates included in data, list (dates of X)
452
+ :param v_pos: Position of the v variable within data
453
+ :param data: An array where each line is (date1, date2, other elements) for which a velocity is computed (Y)
454
+ :param solver: Solver used for the inversion: 'LSMR', 'LSMR_ini', 'LS', 'LSQR'
455
+ :param Weight: Weight for the inversion if Weight=1 perform an Ordinary Least Square
456
+ :param mu: Regularization matrix
457
+ :param coef: Coefficient of the regularization
458
+ :param ini: Initialization of the inversion
459
+ :param: result_quality: None or list of str, which can contain 'Norm_residual' to determine the L2 norm of the residuals from the last inversion, 'X_contribution' to determine the number of Y observations which have contributed to estimate each value in X (it corresponds to A.dot(weight))
460
+ :param regu : type of regularization
461
+ :param accel: apriori on the acceleration
462
+ :param linear_operator: linear operator or None
463
+
464
+ :return X: The ILF temporal inversion of AX = Y using the given solver
465
+ :return residu_norm: Norm of the residual (when showing the L curve)
466
+ """
467
+
468
+ # Total process : about 50ms
469
+ if verbose:
470
+ matrix_property(A) # Matrix A properties
471
+
472
+ if len(data.shape) > 1:
473
+ v = data[:, v_pos]
474
+ else:
475
+ v = data
476
+
477
+ if isinstance(Weight, int) and Weight == 1:
478
+ Weight = np.ones(v.shape[0]) # Equivalent to an Ordinary Least Square
479
+
480
+ if regu == "1accelnotnull": # Apriori on the acceleration
481
+ D_regu = np.multiply(accel[v_pos - 2], coef)
482
+ else:
483
+ D_regu = np.zeros(mu.shape[0])
484
+
485
+ if linear_operator is None:
486
+ F_regu = np.multiply(coef, mu)
487
+ else:
488
+ v = linear_operator.update_from_weight(v, Weight) # Update v, Weight,
489
+ A_l = sp.linalg.LinearOperator(
490
+ (v.shape[0] + len(dates_range) - 2, len(dates_range) - 1),
491
+ matvec=linear_operator.matvecregu1,
492
+ rmatvec=linear_operator.rmatvecregu1,
493
+ )
494
+
495
+ if solver == "LSMR":
496
+ F = np.vstack([np.multiply(Weight[Weight != 0][:, np.newaxis], A[Weight != 0]), F_regu]).astype("float64")
497
+ D = np.hstack([np.multiply(Weight[Weight != 0], v[Weight != 0]), D_regu]).astype("float64")
498
+ F = sp.csc_matrix(F) # column-scaling so that each column have the same euclidean norme (i.e. 1)
499
+ X = sp.linalg.lsmr(
500
+ F, D
501
+ )[
502
+ 0
503
+ ] # If atol or btol is None, a default value of 1.0e-6 will be used. Ideally, they should be estimates of the relative error in the entries of A and b respectively.
504
+
505
+ elif solver == "LSMR_ini": # 50ms
506
+ if ini is None:
507
+ raise ValueError("Please provide an initialization for the solver LSMR_ini")
508
+ # 16.7 ms ± 141 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
509
+ if not linear_operator:
510
+ condi = Weight != 0
511
+ W = Weight[condi]
512
+ F = sp.csc_matrix(
513
+ np.vstack([np.multiply(W[:, np.newaxis], A[condi]), F_regu])
514
+ ) # stack ax and regu, and remove rows with only 0
515
+ if verbose:
516
+ print("Is F convex?", is_convex(F.toarray()))
517
+ D = np.hstack([np.multiply(W, v[condi]), D_regu]) # stack ax and regu, and remove rows with only
518
+ if isinstance(ini, list): # if rolling mean
519
+ x0 = ini[v_pos - 2]
520
+ elif ini.shape[0] == 2: # if only the average of the entire time series
521
+ x0 = np.full(len(dates_range) - 1, ini[v_pos - 2], dtype="float64")
522
+ else:
523
+ x0 = ini
524
+
525
+ # 24 ms ± 419 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
526
+ if not linear_operator:
527
+ X = sp.linalg.lsmr(F, D, x0=x0)[0]
528
+ else:
529
+ X = sp.linalg.lsmr(A_l, np.concatenate([linear_operator.Weight * v, D_regu]), x0=x0)[0]
530
+
531
+ elif solver == "LS": # 136 ms ± 6.48 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) #time consuming
532
+ F = np.vstack([np.multiply(Weight[Weight != 0][:, np.newaxis], A[Weight != 0]), F_regu]).astype("float32")
533
+ D = np.hstack([np.multiply(Weight[Weight != 0], v[Weight != 0]), D_regu]).astype("float32")
534
+ X = np.linalg.lstsq(F, D, rcond=None)[0]
535
+
536
+ elif solver == "L1": # solving using L1-norm, time consuming !
537
+ F = np.vstack([np.multiply(Weight[Weight != 0][:, np.newaxis], A[Weight != 0]), F_regu]).astype("float32")
538
+ D = np.hstack([np.multiply(Weight[Weight != 0], v[Weight != 0]), D_regu]).astype("float32")
539
+ X = opt.minimize(lambda x: la.norm(D - F @ x, ord=1), np.zeros(F.shape[1]))
540
+
541
+ elif solver == "LSQR":
542
+ F = np.vstack([np.multiply(Weight[Weight != 0][:, np.newaxis], A[Weight != 0]), F_regu]).astype("float32")
543
+ D = np.hstack([np.multiply(Weight[Weight != 0], v[Weight != 0]), D_regu]).astype("float32")
544
+ F = sp.csc_matrix(F)
545
+ X, istop, itn, r1norm = sp.linalg.lsqr(F, D)[:4]
546
+
547
+ else:
548
+ raise ValueError("Enter 'LSMR', 'LSMR_ini', 'LS', 'LSQR'")
549
+
550
+ if result_quality is not None and "Norm_residual" in result_quality: # to show the L_curve
551
+ R_lcurve = F.dot(X) - D # 50.7 µs ± 327 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
552
+ residu_norm = [
553
+ np.linalg.norm(R_lcurve[: np.multiply(Weight[Weight != 0], v[Weight != 0]).shape[0]], ord=2),
554
+ np.linalg.norm(R_lcurve[np.multiply(Weight[Weight != 0], v[Weight != 0]).shape[0] :] / coef, ord=2),
555
+ ]
556
+ else:
557
+ residu_norm = None
558
+
559
+ return X, residu_norm
560
+
561
+
562
+ def inversion_two_components(
563
+ A: np.ndarray,
564
+ dates_range: np.ndarray,
565
+ v_pos: int,
566
+ data: np.ndarray,
567
+ solver: str,
568
+ Weight: int | np.ndarray,
569
+ mu: np.ndarray,
570
+ coef: int = 1,
571
+ ini: None | np.ndarray = None,
572
+ show_L_curve: bool = False,
573
+ verbose: bool = False,
574
+ ):
575
+ """
576
+ Invert the system AX = Y for two component dx and dy at the same time.
577
+ It allow to constrain the direction of the displacement.
578
+ :param A: Matrix of the temporal inversion system AX=Y
579
+ :param dates_range: An array with all the dates included in data, list
580
+ :param v_pos: Position of the v variable within data
581
+ :param data: An array where each line is (date1, date2, other elements) for which a velocity is computed
582
+ :param solver: LS_regu, LS_SVD, LSQR or LSQR_ini
583
+ :param coef: Coef of Tikhonov regularisation
584
+ :param Weight: Weight for the inversion if Weight=1 perform an Ordinary Least Square
585
+
586
+ :return result_dx, result_dy: Computed displacements along x and y axis
587
+ :return residu_normx, residu_norm_y: Norm of the residu along x and y axis (when showing the L curve)
588
+ """
589
+
590
+ if verbose: # A properties
591
+ if A.shape[0] < A.shape[1]: # System is under-determined
592
+ print("under-determined")
593
+ elif A.shape[0] >= A.shape[1]: # System is over-determined
594
+ print("over-determined")
595
+ if np.linalg.matrix_rank(A) < A.shape[1]: # Systeme is ill-conditioned
596
+ print("ill conditioned")
597
+ print("rank A", np.linalg.matrix_rank(A))
598
+
599
+ c = np.concatenate([A, np.zeros(A.shape)], axis=0)
600
+ A = np.concatenate([c, np.concatenate([np.zeros(A.shape), A], axis=0)], axis=1)
601
+ dates_range = np.concatenate([dates_range, dates_range])
602
+ del c
603
+ F_regu = np.multiply(coef, mu)
604
+ # D_regu = np.zeros(mu.shape[0])
605
+ D_regu = np.ones(mu.shape[0]) * coef
606
+
607
+ v = np.concatenate([data[:, 2].T, data[:, 3].T]) # Concatenate vx and vy observations
608
+
609
+ # del delta, mean
610
+
611
+ if solver == "LSMR":
612
+ F = np.vstack([np.multiply(Weight[Weight != 0][:, np.newaxis], A[Weight != 0]), F_regu]).astype("float64")
613
+ D = np.hstack([np.multiply(Weight[Weight != 0], v[Weight != 0]), D_regu]).astype("float64")
614
+ F = sp.csc_matrix(F) # column-scaling so that each column have the same euclidean norme (i.e. 1)
615
+ # If atol or btol is None, a default value of 1.0e-6 will be used. Ideally, they should be estimates of the relative error in the entries of A and b respectively.
616
+ X = sp.linalg.lsmr(F, D)[0]
617
+
618
+ elif solver == "LSMR_ini":
619
+ F = np.vstack([np.multiply(Weight[Weight != 0][:, np.newaxis], A[Weight != 0]), F_regu]).astype(
620
+ "float64"
621
+ ) # stack ax and regu, and remove rows with only 0
622
+ D = np.hstack([np.multiply(Weight[Weight != 0], v[Weight != 0]), D_regu]).astype(
623
+ "float64"
624
+ ) # stack ax and regu, and remove rows with only
625
+
626
+ if type(ini) is not list:
627
+ x0 = np.concatenate(ini)
628
+ elif ini.shape[0] == 2:
629
+ x0 = np.full(F.shape[1], ini[v_pos - 2], dtype="float64")
630
+ else:
631
+ x0 = ini
632
+ # del ini
633
+
634
+ F = sp.csc_matrix(F)
635
+ X = sp.linalg.lsmr(F, D, x0=x0)[0]
636
+
637
+ elif solver == "LS":
638
+ F = np.vstack([np.multiply(Weight[Weight != 0][:, np.newaxis], A[Weight != 0]), coef * mu]).astype("float64")
639
+ D = np.hstack([np.multiply(Weight[Weight != 0], v[Weight != 0]), np.zeros(mu.shape[0])]).astype("float64")
640
+ X = np.linalg.lstsq(F, D, rcond=None)[0]
641
+
642
+ elif solver == "LSQR" or solver == "LSQR_ini":
643
+ F = np.vstack([np.multiply(Weight[Weight != 0][:, np.newaxis], A[Weight != 0]), coef * mu]).astype("float64")
644
+ D = np.hstack([np.multiply(Weight[Weight != 0], v[Weight != 0]), np.zeros(mu.shape[0])]).astype("float64")
645
+ F = sp.csc_matrix(F) # column-scaling so that each column have the same euclidean norme (i.e. 1)
646
+ X, istop, itn, r1norm = sp.linalg.lsqr(F, D)[:4]
647
+
648
+ else:
649
+ raise ValueError("Enter LS, LS_SVD,LSMR, LSQR or LSQR_ini")
650
+
651
+ if show_L_curve:
652
+ R_lcurve = F.dot(X) - D
653
+ residu_norm = [
654
+ np.linalg.norm(R_lcurve[: np.multiply(Weight[Weight != 0], v[Weight != 0]).shape[0]], ord=2),
655
+ np.linalg.norm(R_lcurve[np.multiply(Weight[Weight != 0], v[Weight != 0]).shape[0] :] / coef, ord=2),
656
+ ]
657
+ else:
658
+ residu_norm = None
659
+
660
+ if residu_norm is not None:
661
+ return X[: X.shape[0] // 2], X[X.shape[0] // 2 :], None, None
662
+ else:
663
+ return (
664
+ X[: X.shape[0] // 2],
665
+ X[X.shape[0] // 2 :],
666
+ residu_norm[: X.shape[0] // 2],
667
+ residu_norm[X.shape[0] // 2 :],
668
+ )
669
+
670
+ # %% ======================================================================== #
671
+ # OLD FUNCTION #
672
+ # =========================================================================%% #
673
+
674
+
675
+ # def BYAX_construction_B(A:np.ndarray, dates_range:np.ndarray, data:np.ndarray, all_possibilities:bool=False, verbose:bool=False):
676
+ # '''
677
+ # Construction of matrix B which combine the displacement observations as explained in the TGRS paper
678
+ # Note: This function may not be up to date
679
+ # :param A: matrix of the temporal invserion system AX=Y
680
+ # :param dates_range: an array with all the dates included in data, list
681
+ # :param data: an array where each line is (date1, date2, other elements) for which a velocity is computed
682
+ # :param all_possibilities:
683
+ # :param verbose:
684
+ # :return:
685
+ # '''
686
+ #
687
+ # import copy
688
+ # B = np.zeros((A.shape[0], A.shape[0]))
689
+ # dates = copy.deepcopy(data[:, :2])
690
+ # sensors = copy.deepcopy(data[:, 6])
691
+ # authors = copy.deepcopy(data[:, 7])
692
+ # for ligne in range(A.shape[0]):
693
+ # if A[ligne, :].any() == False: # il n'y a que des 0 sur la ligne
694
+ # if verbose:
695
+ # print(f'ligne avec des 0 seulement {ligne}')
696
+ # print(f'original dates {data[ligne, 0]} - {data[ligne, 1]}')
697
+ #
698
+ # if data[ligne, 1] not in dates_range and data[
699
+ # ligne, 0] not in dates_range: # date1 and date2 are not in Date_range
700
+ # if verbose: print('Date1 and Date2 not in dates_range')
701
+ #
702
+ # if all_possibilities:
703
+ # Save_d1_add_to_date1 = {} # dico qui contient l'element de Y ajoute (colone de B) (cle) et la ligne de B sur laquelle cette combinaison est ecrite (valeur)
704
+ # Save_d2_sub_date1 = {}
705
+ #
706
+ # Add_to_date2 = np.where(data[:, 0] == data[ligne, 1])[0]
707
+ # if Add_to_date2.shape[0] != 0:
708
+ # for i_add_to_date2 in Add_to_date2:
709
+ # if data[i_add_to_date2, 1] in dates_range:
710
+ # # print(f'Add {data[i_add_to_date2, 0]} - {data[i_add_to_date2, 1]}')
711
+ # if B[ligne, :].any() == False: # s'il n'y a pas encore de combinaison remplie
712
+ # B[ligne, i_add_to_date2] = B[ligne, ligne] = 1
713
+ # if verbose: print(
714
+ # f'Add {i_add_to_date2} {data[i_add_to_date2, 0]} - {data[i_add_to_date2, 1]}')
715
+ # dates[ligne, 0] = data[ligne, 0]
716
+ # dates[ligne, 1] = data[i_add_to_date2, 1]
717
+ # sensors[ligne] = f'{sensors[ligne]};{sensors[i_add_to_date2]}'
718
+ # authors[ligne] = f'{authors[ligne]};{authors[i_add_to_date2]}'
719
+ # if all_possibilities: Save_d1_add_to_date1[i_add_to_date2] = ligne
720
+ # # print(f'New date {data[ligne, 0]} - {data[i_add_to_date2, 1]}')
721
+ # break
722
+ # elif all_possibilities: # s'il y a en a mais qu'on veut toute les possibilite
723
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
724
+ # B[-1, i_add_to_date2] = B[-1, ligne] = 1
725
+ # if verbose: print(
726
+ # f'Add {i_add_to_date2} as new line {B.shape[0] - 1} {data[i_add_to_date2, 0]} - {data[i_add_to_date2, 1]}')
727
+ # dates = np.append(dates, [[data[ligne, 0], data[i_add_to_date2, 1]]], axis=0)
728
+ # Save_d1_add_to_date1[i_add_to_date2] = B.shape[0] - 1
729
+ #
730
+ # Sub_to_date2 = np.where(data[:, 1] == data[ligne, 1])[0]
731
+ # if Sub_to_date2.shape[0] != 0:
732
+ # for i_sub_to_date2 in Sub_to_date2:
733
+ # if data[i_sub_to_date2, 0] > data[ligne, 0]:
734
+ # if data[i_sub_to_date2, 0] in dates_range:
735
+ # if B[ligne, :].any() == False: # s'il n'y a pas encore de combinaison remplie
736
+ # B[ligne, ligne] = 1
737
+ # B[ligne, i_sub_to_date2] = -1
738
+ # if verbose: print(
739
+ # f'Sub {i_sub_to_date2} {data[i_sub_to_date2, 0]} - {data[i_sub_to_date2, 1]}')
740
+ # dates[ligne, 0] = data[ligne, 0]
741
+ # dates[ligne, 1] = data[i_sub_to_date2, 0]
742
+ # sensors[ligne] = f'{sensors[ligne]};{sensors[i_sub_to_date2]}'
743
+ # authors[ligne] = f'{authors[ligne]};{authors[i_sub_to_date2]}'
744
+ # if all_possibilities: Save_d2_sub_date1[i_sub_to_date2] = ligne
745
+ # break # si une combinaison est trouvee on sort de la boucle for
746
+ # # print(f'New date {data[ligne, 0]} - {data[i_sub_to_date2, 0]}')
747
+ # elif all_possibilities:
748
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
749
+ # B[-1, ligne] = 1
750
+ # B[-1, i_sub_to_date2] = -1
751
+ # if verbose: print(
752
+ # f'Sub {i_sub_to_date2} as new ligne {B.shape[0] - 1} {data[i_sub_to_date2, 0]} - {data[i_sub_to_date2, 1]}')
753
+ # dates = np.append(dates, [[data[ligne, 0], data[i_sub_to_date2, 0]]], axis=0)
754
+ # Save_d2_sub_date1[i_sub_to_date2] = B.shape[0] - 1
755
+ #
756
+ # Add_to_date1 = np.where(data[:, 1] == data[ligne, 0])[0]
757
+ # if Add_to_date1.shape[0] != 0:
758
+ # for i_add_to_date1 in Add_to_date1:
759
+ # if data[i_add_to_date1, 0] in dates_range:
760
+ # if np.count_nonzero(B[ligne, :]) < 3: # only combinations of three displacements
761
+ # B[ligne, i_add_to_date1] = B[ligne, ligne] = 1
762
+ # dates[ligne, 0] = data[i_add_to_date1, 0]
763
+ # sensors[ligne] = f'{sensors[ligne]};{sensors[i_add_to_date1]}'
764
+ # authors[ligne] = f'{authors[ligne]};{authors[i_add_to_date1]}'
765
+ # if verbose:
766
+ # print(f'Add {i_add_to_date1} {data[i_add_to_date1, 0]} - {data[i_add_to_date1, 1]}')
767
+ # print(f'New date {data[i_add_to_date1, 0]} - {dates[ligne, 1]}')
768
+ # break
769
+ # elif all_possibilities:
770
+ # if len(Save_d1_add_to_date1) != 0:
771
+ # for colonneB, ligneB in Save_d1_add_to_date1.items():
772
+ # if np.count_nonzero(B[ligneB, :]) < 3:
773
+ # B[ligneB, i_add_to_date1] = 1
774
+ # dates[ligneB, 0] = data[i_add_to_date1, 0]
775
+ # if verbose:
776
+ # print(
777
+ # f'Add to the line {ligneB} the index {i_add_to_date1} {data[i_add_to_date1, 0]} - {data[i_add_to_date1, 1]}')
778
+ # print(f'New date {data[i_add_to_date1, 0]} - {data[colonneB, 1]}')
779
+ # else:
780
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
781
+ # B[-1, i_add_to_date1] = B[-1, ligne] = B[-1, colonneB] = 1
782
+ # dates = np.append(dates, [[data[i_add_to_date1, 0], data[colonneB, 1]]],
783
+ # axis=0)
784
+ # if verbose:
785
+ # print(
786
+ # f'Add to the line {B.shape[0]} the index {i_add_to_date1} {data[i_add_to_date1, 0]} - {data[i_add_to_date1, 1]}')
787
+ # print(f'New date {data[i_add_to_date1, 0]} - {data[colonneB, 1]}')
788
+ # if len(Save_d2_sub_date1) != 0:
789
+ # for colonneB, ligneB in Save_d2_sub_date1.items():
790
+ # if np.count_nonzero(B[ligneB, :]) < 3:
791
+ # B[ligneB, i_add_to_date1] = 1
792
+ # dates[ligneB, 0] = data[i_add_to_date1, 0]
793
+ # if verbose:
794
+ # print(
795
+ # f'Add to the line {ligneB} the index {i_add_to_date1} {data[i_add_to_date1, 0]} - {data[i_add_to_date1, 1]}')
796
+ # print(f'New date {data[i_add_to_date1, 0]} - {data[colonneB, 0]}')
797
+ # else:
798
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
799
+ # B[-1, i_add_to_date1] = B[-1, ligne] = 1
800
+ # B[-1, colonneB] = -1
801
+ # dates = np.append(dates, [[data[i_add_to_date1, 0], data[colonneB, 0]]],
802
+ # axis=0)
803
+ # if verbose:
804
+ # print(
805
+ # f'Add to the line {B.shape[0] - 1} the index {i_add_to_date1} {data[i_add_to_date1, 0]} - {data[i_add_to_date1, 1]}')
806
+ # print(f'New date {data[i_add_to_date1, 0]} - {data[colonneB, 0]}')
807
+ #
808
+ # Sub_to_date1 = np.where(data[:, 0] == data[ligne, 0])[0]
809
+ # if Sub_to_date1.shape[0] != 0:
810
+ # for i_sub_to_date1 in Sub_to_date1:
811
+ # if data[i_sub_to_date1, 1] < data[ligne, 1]:
812
+ # if data[i_sub_to_date1, 1] in dates_range:
813
+ # if np.count_nonzero(B[ligne, :]) < 3 and np.count_nonzero(B[ligne,
814
+ # :ligne + 1]) < 2: # si une combinaison de 3 n'a pas deja ete trouve et que date1 n'a pas deja ete modifie
815
+ # B[ligne, ligne] = 1
816
+ # B[ligne, i_sub_to_date1] = -1
817
+ # dates[ligne, 0] = data[i_sub_to_date1, 1]
818
+ # sensors[ligne] = f'{sensors[ligne]};{sensors[i_sub_to_date1]}'
819
+ # authors[ligne] = f'{authors[ligne]};{authors[i_sub_to_date1]}'
820
+ # if verbose:
821
+ # print(
822
+ # f'Sub {i_sub_to_date1} {data[i_sub_to_date1, 0]} - {data[i_sub_to_date1, 1]}')
823
+ # print(f'New date {data[i_sub_to_date1, 0]} - {data[ligne, 1]}')
824
+ # break
825
+ # elif all_possibilities:
826
+ # if len(Save_d1_add_to_date1) != 0:
827
+ # for colonneB, ligneB in Save_d1_add_to_date1.items():
828
+ # if np.count_nonzero(B[ligneB, :]) < 3:
829
+ # B[ligneB, i_sub_to_date1] = -1
830
+ # dates[ligneB, 0] = data[i_sub_to_date1, 1]
831
+ # if verbose:
832
+ # print(
833
+ # f'Add to the line {ligneB} the index {i_sub_to_date1} {data[i_sub_to_date1, 0]} - {data[i_sub_to_date1, 1]}')
834
+ # print(f'New date {data[i_sub_to_date1, 1]} - {data[colonneB, 1]}')
835
+ # else:
836
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
837
+ # B[-1, i_sub_to_date1] = -1
838
+ # B[-1, ligne] = B[-1, colonneB] = 1
839
+ # dates = np.append(dates, [[data[i_sub_to_date1, 1], data[colonneB, 1]]],
840
+ # axis=0)
841
+ # if verbose:
842
+ # print(
843
+ # f'Add to the line {B.shape[0] - 1} the index {i_sub_to_date1} {data[i_sub_to_date1, 0]} - {data[i_sub_to_date1, 1]}')
844
+ # print(f'New date {data[i_sub_to_date1, 1]} - {data[colonneB, 1]}')
845
+ # if len(Save_d2_sub_date1) != 0:
846
+ # for colonneB, ligneB in Save_d2_sub_date1.items():
847
+ # if np.count_nonzero(B[ligneB, :]) < 3:
848
+ # B[ligneB, i_sub_to_date1] = -1
849
+ # dates[ligneB, 0] = data[i_sub_to_date1, 1]
850
+ # if verbose:
851
+ # print(
852
+ # f'Add to the line {ligneB} the index {i_sub_to_date1} {data[i_sub_to_date1, 0]} - {data[i_sub_to_date1, 1]}')
853
+ # print(f'New date {data[i_sub_to_date1, 0]} - {data[colonneB, 0]}')
854
+ # else:
855
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
856
+ # B[-1, i_sub_to_date1] = B[-1, colonneB] = -1
857
+ # B[-1, ligne] = 1
858
+ # dates = np.append(dates, [[data[i_sub_to_date1, 1], data[colonneB, 1]]],
859
+ # axis=0)
860
+ # if verbose:
861
+ # print(
862
+ # f'Add to the line {B.shape[0] - 1} the index {i_sub_to_date1} {data[i_sub_to_date1, 0]} - {data[i_sub_to_date1, 1]}')
863
+ # print(f'New date {data[i_sub_to_date1, 0]} - {data[colonneB, 0]}')
864
+ #
865
+ # if np.count_nonzero(B[ligne,
866
+ # :]) == 2: # if a combination of 3 displacements have not been found or the combination found does not work since date1 is higher than date2
867
+ # if verbose:
868
+ # print('Try of combination of three failed')
869
+ # print(dates[ligne])
870
+ # B[ligne, :].fill(0)
871
+ # B[ligne, ligne] = 1
872
+ # dates[ligne, 0] = data[ligne, 0]
873
+ # dates[ligne, 1] = data[ligne, 1]
874
+ # sensors[ligne] = sensors[ligne].split(';')[0]
875
+ # authors[ligne] = authors[ligne].split(';')[0]
876
+ #
877
+ # if dates[ligne, 0] >= dates[ligne, 1]:
878
+ # if verbose:
879
+ # print('ligne qui ne satisfait pas date1 inf a date2')
880
+ # print(dates[ligne])
881
+ # B[ligne, :].fill(0)
882
+ # B[ligne, ligne] = 1
883
+ # dates[ligne, 0] = data[ligne, 0]
884
+ # dates[ligne, 1] = data[ligne, 1]
885
+ # sensors[ligne] = sensors[ligne].split(';')[0]
886
+ # authors[ligne] = authors[ligne].split(';')[0]
887
+ #
888
+ # elif data[
889
+ # ligne, 0] not in dates_range: # si la date1 seulement n'existe pas dans X, alors on essaye d'ajouter un deplacement a la date1, ou de retirer a la date1
890
+ # if verbose: print(f'Date1 {data[ligne, 0]} pas dans dates_range')
891
+ # Add_to_date1 = np.where(data[:, 1] == data[ligne, 0])[
892
+ # 0] # Les Y ayant une date2 égale à la date1 de l'element Y[ligne]
893
+ # if Add_to_date1.shape[0] != 0:
894
+ # for i_add_to_date1 in Add_to_date1:
895
+ # if data[i_add_to_date1, 0] in dates_range:
896
+ # if B[ligne, :].any() == False:
897
+ # if B[i_add_to_date1, i_add_to_date1] != 1 or B[
898
+ # i_add_to_date1, ligne] != 1: # si la combinaison n'a pas deja etait utilisee
899
+ # B[ligne, i_add_to_date1] = B[ligne, ligne] = 1
900
+ # dates[ligne, 0] = data[i_add_to_date1, 0]
901
+ # dates[ligne, 1] = data[ligne, 1]
902
+ # sensors[ligne] = f'{sensors[ligne]};{sensors[i_add_to_date1]}'
903
+ # authors[ligne] = f'{authors[ligne]};{authors[i_add_to_date1]}'
904
+ # if verbose:
905
+ # print(
906
+ # f'Add {i_add_to_date1} {data[i_add_to_date1, 0]} - {data[i_add_to_date1, 1]}')
907
+ # print(f'New date {data[i_add_to_date1, 0]} - {data[ligne, 1]}')
908
+ # elif verbose:
909
+ # print(
910
+ # f'Combi deja existante Add {i_add_to_date1} {data[i_add_to_date1, 0]} - {data[i_add_to_date1, 1]}')
911
+ # elif all_possibilities:
912
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
913
+ # B[-1, i_add_to_date1] = B[-1, ligne] = 1
914
+ # dates = np.append(dates, [[data[i_add_to_date1, 0], data[ligne, 1]]], axis=0)
915
+ # if verbose:
916
+ # print(
917
+ # f'Add {i_add_to_date1} as a new line {B.shape[0]} {data[i_add_to_date1, 0]} - {data[i_add_to_date1, 1]}')
918
+ # print(f'New date {data[i_add_to_date1, 0]} - {data[ligne, 1]}')
919
+ #
920
+ # Sub_to_date1 = np.where(data[:, 0] == data[ligne, 0])[
921
+ # 0] # Les Y ayant une date2 égale à la date1 de l'element Y[ligne]
922
+ # if Sub_to_date1.shape[0] != 0:
923
+ # for i_sub_to_date1 in Sub_to_date1:
924
+ # if data[i_sub_to_date1, 1] < data[ligne, 1]:
925
+ # if data[i_sub_to_date1, 1] in dates_range:
926
+ # if B[ligne, :].any() == False:
927
+ # if B[i_sub_to_date1, i_sub_to_date1] != 1 or B[i_sub_to_date1, ligne] != 1:
928
+ # B[ligne, ligne] = 1
929
+ # B[ligne, i_sub_to_date1] = -1
930
+ # dates[ligne, 0] = data[i_sub_to_date1, 1]
931
+ # dates[ligne, 1] = data[ligne, 1]
932
+ # sensors[ligne] = f'{sensors[ligne]};{sensors[i_sub_to_date1]}'
933
+ # authors[ligne] = f'{authors[ligne]};{authors[i_sub_to_date1]}'
934
+ # if verbose:
935
+ # print(f'New date {data[i_sub_to_date1, 0]} - {data[ligne, 1]}')
936
+ # print(
937
+ # f'Sub {i_sub_to_date1} {data[i_sub_to_date1, 0]} - {data[i_sub_to_date1, 1]}')
938
+ # elif verbose:
939
+ # print(
940
+ # f'Combi deja existante Sub {i_sub_to_date1} {data[i_sub_to_date1, 0]} - {data[i_sub_to_date1, 1]}')
941
+ # elif all_possibilities:
942
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
943
+ # B[-1, ligne] = 1
944
+ # B[-1, i_sub_to_date1] = -1
945
+ # dates = np.append(dates, [[data[i_sub_to_date1, 1], data[ligne, 1]]], axis=0)
946
+ # if verbose:
947
+ # print(f'New date {data[i_sub_to_date1, 0]} - {data[ligne, 1]}')
948
+ # print(
949
+ # f'Sub {i_sub_to_date1} as new ligne {B.shape[0]} {data[i_sub_to_date1, 0]} - {data[i_sub_to_date1, 1]}')
950
+ #
951
+ # else:
952
+ # if verbose: print(f'Date2 {data[ligne, 1]} pas dans dates_range')
953
+ # Add_to_date2 = np.where(data[:, 0] == data[ligne, 1])[
954
+ # 0] # Les Y ayant une date1 égale à la date2 de l'element Y[ligne]
955
+ # # Les Y ayant une date1 égale à la date2 de l'element Y[ligne]
956
+ # if Add_to_date2.shape[0] != 0:
957
+ # for i_add_to_date2 in Add_to_date2:
958
+ # if data[i_add_to_date2, 1] in dates_range:
959
+ # # print(f'Add {data[i_add_to_date2, 0]} - {data[i_add_to_date2, 1]}')
960
+ # if B[ligne, :].any() == False:
961
+ # if B[i_add_to_date2, i_add_to_date2] != 1 or B[i_add_to_date2, ligne - 1] != 1:
962
+ # B[ligne, i_add_to_date2] = B[ligne, ligne] = 1
963
+ # dates[ligne, 0] = data[ligne, 0]
964
+ # dates[ligne, 1] = data[i_add_to_date2, 1]
965
+ # sensors[ligne] = f'{sensors[ligne]};{sensors[i_add_to_date2]}'
966
+ # authors[ligne] = f'{authors[ligne]};{authors[i_add_to_date2]}'
967
+ # if verbose:
968
+ # print(f'New date {data[ligne, 0]} - {data[i_add_to_date2, 1]}')
969
+ # print(
970
+ # f'Add {i_add_to_date2} {data[i_add_to_date2, 0]} - {data[i_add_to_date2, 1]}')
971
+ # elif verbose:
972
+ # print(
973
+ # f'Combi deja existante Add {i_add_to_date2} {data[i_add_to_date2, 0]} - {data[i_add_to_date2, 1]}')
974
+ # elif all_possibilities:
975
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
976
+ # B[-1, i_add_to_date2] = B[-1, ligne] = 1
977
+ # dates = np.append(dates, [[data[ligne, 0], data[i_add_to_date2, 1]]], axis=0)
978
+ # if verbose:
979
+ # print(
980
+ # f'Add {i_add_to_date2} as new line {B.shape[0]} {data[i_add_to_date2, 0]} - {data[i_add_to_date2, 1]}')
981
+ # print(f'New date {data[ligne, 0]} - {data[i_add_to_date2, 1]}')
982
+ #
983
+ # Sub_to_date2 = np.where(data[:, 1] == data[ligne, 1])[0]
984
+ # if Sub_to_date2.shape[0] != 0:
985
+ # for i_sub_to_date2 in Sub_to_date2:
986
+ # if data[i_sub_to_date2, 0] > data[ligne, 0]:
987
+ # if data[i_sub_to_date2, 0] in dates_range:
988
+ # if B[ligne, :].any() == False:
989
+ # if B[i_sub_to_date2, ligne] != 1 or B[i_sub_to_date2, i_sub_to_date2] != -1:
990
+ # B[ligne, ligne] = 1
991
+ # B[ligne, i_sub_to_date2] = -1
992
+ # dates[ligne, 0] = data[ligne, 0]
993
+ # dates[ligne, 1] = data[i_sub_to_date2, 0]
994
+ # sensors[ligne] = f'{sensors[ligne]};{sensors[i_sub_to_date2]}'
995
+ # authors[ligne] = f'{authors[ligne]};{authors[i_sub_to_date2]}'
996
+ # if verbose:
997
+ # print(f'New date {data[ligne, 0]} - {data[i_sub_to_date2, 0]}')
998
+ # print(
999
+ # f'Sub {i_sub_to_date2} {data[i_sub_to_date2, 0]} - {data[i_sub_to_date2, 1]}')
1000
+ # elif verbose:
1001
+ # print(
1002
+ # f'Combi deja existante Sub {i_sub_to_date2} {data[i_sub_to_date2, 0]} - {data[i_sub_to_date2, 1]}')
1003
+ # elif all_possibilities:
1004
+ # B = np.append(B, [[0] * B.shape[1]], axis=0)
1005
+ # B[-1, ligne] = 1
1006
+ # B[-1, i_sub_to_date2] = -1
1007
+ # dates = np.append(dates, [[data[ligne, 0], data[i_sub_to_date2, 0]]], axis=0)
1008
+ # if verbose: print(
1009
+ # f'Sub {i_sub_to_date2} as new ligne {B.shape[0]} {data[i_sub_to_date2, 0]} - {data[i_sub_to_date2, 1]}')
1010
+ # else:
1011
+ # B[ligne, ligne] = 1
1012
+ #
1013
+ # return B, dates, sensors, authors
1014
+ #
1015
+ #