GreeDS 2.1__py3-none-any.whl → 2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {greeds-2.1.dist-info → greeds-2.2.dist-info}/METADATA +1 -1
- greeds-2.2.dist-info/RECORD +6 -0
- greeds-2.2.dist-info/top_level.txt +2 -0
- GreeDS/GreeDS.py +0 -381
- GreeDS/__init__.py +0 -2
- greeds-2.1.dist-info/RECORD +0 -8
- greeds-2.1.dist-info/top_level.txt +0 -1
- {greeds-2.1.dist-info → greeds-2.2.dist-info}/WHEEL +0 -0
- /GreeDS/rotation.py → /rotation.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: GreeDS
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.2
|
4
4
|
Summary: This package is a ADI or ARDI sequence processing tool that aim to distangle extended signal (like disks) from quasi-static speakels) using iterative PCA
|
5
5
|
Home-page: https://github.com/Sand-jrd/GreeDS
|
6
6
|
Author: Sandrine Juillard
|
@@ -0,0 +1,6 @@
|
|
1
|
+
GreeDS.py,sha256=pas7Kc1Gf8l3anbtVx_eWm_z-uSVkpiQEMZ1jDCHr8Q,13145
|
2
|
+
rotation.py,sha256=e7lhJ6_dkmSjnG0wY22v01hIWDNc6umHPyH8OrVEXAQ,4120
|
3
|
+
greeds-2.2.dist-info/METADATA,sha256=rT6aiLTeXRd5tanwNuGexx7Qj9z_34lHt5YYafy9FNA,825
|
4
|
+
greeds-2.2.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
5
|
+
greeds-2.2.dist-info/top_level.txt,sha256=VRq2oiWuomPeZnbkMvK6a21tDqr_NhTNZP6d1mnWcCE,16
|
6
|
+
greeds-2.2.dist-info/RECORD,,
|
GreeDS/GreeDS.py
DELETED
@@ -1,381 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
|
-
# -*- coding: utf-8 -*-
|
3
|
-
"""
|
4
|
-
Created on Thu Nov 10 08:43:22 2022
|
5
|
-
|
6
|
-
______________________________
|
7
|
-
GreeDS
|
8
|
-
______________________________
|
9
|
-
|
10
|
-
GreeDS algorithm from Pairet etal 2020.
|
11
|
-
|
12
|
-
Basic implemented that works independently from MAYONNAISE.
|
13
|
-
|
14
|
-
Require the dependancy torch and kornia
|
15
|
-
|
16
|
-
@author: sand-jrd
|
17
|
-
"""
|
18
|
-
|
19
|
-
import torch
|
20
|
-
import numpy as np
|
21
|
-
from rotation import tensor_rotate_fft
|
22
|
-
import photutils
|
23
|
-
import matplotlib.pyplot as plt
|
24
|
-
from torchvision.transforms.functional import (rotate, InterpolationMode)
|
25
|
-
|
26
|
-
def cube_rotate(cube, angles, fft=False):
|
27
|
-
new_cube = torch.zeros(cube.shape)
|
28
|
-
if not fft:
|
29
|
-
for ii in range(len(angles)):
|
30
|
-
new_cube[ii] = rotate(torch.unsqueeze(cube[ii], 0), -float(angles[ii]),
|
31
|
-
InterpolationMode.BILINEAR)[0]
|
32
|
-
return new_cube
|
33
|
-
else:
|
34
|
-
for ii in range(len(angles)):
|
35
|
-
new_cube[ii] = tensor_rotate_fft(torch.unsqueeze(cube[ii], 0), -float(angles[ii]))
|
36
|
-
return new_cube
|
37
|
-
|
38
|
-
def circle(shape: tuple, r: float, offset=(0.5, 0.5)):
|
39
|
-
""" Create circle of 0 in a 2D matrix of ones"
|
40
|
-
|
41
|
-
Parameters
|
42
|
-
----------
|
43
|
-
|
44
|
-
shape : tuple
|
45
|
-
shape x,y of the matrix
|
46
|
-
|
47
|
-
r : float
|
48
|
-
radius of the circle
|
49
|
-
offset : (optional) float
|
50
|
-
offset from the center
|
51
|
-
|
52
|
-
Returns
|
53
|
-
-------
|
54
|
-
M : ndarray
|
55
|
-
Zeros matrix with a circle filled with ones
|
56
|
-
|
57
|
-
"""
|
58
|
-
assert (len(shape) == 2 or len(shape) == 3)
|
59
|
-
if isinstance(offset, (int, float)): offset = (offset, offset)
|
60
|
-
|
61
|
-
nb_f = shape[0] if len(shape) == 3 else 0
|
62
|
-
shape = shape[1:] if len(shape) == 3 else shape
|
63
|
-
|
64
|
-
M = np.zeros(shape)
|
65
|
-
w, l = shape
|
66
|
-
for x in range(0, w):
|
67
|
-
for y in range(0, l):
|
68
|
-
if pow(x - (w // 2) + offset[0], 2) + pow(y - (l // 2) + offset[1], 2) < pow(r, 2):
|
69
|
-
M[x, y] = 1
|
70
|
-
|
71
|
-
if nb_f: M = np.tile(M, (nb_f, 1, 1))
|
72
|
-
|
73
|
-
return 1 - M
|
74
|
-
|
75
|
-
class GtolNotReached(Exception):
|
76
|
-
"""Considere increasing gtol or chosing another set of parameters"""
|
77
|
-
pass
|
78
|
-
|
79
|
-
|
80
|
-
def GreeDS(cube, angles, r=1, l=10, r_start=1, pup=6, refs=None, x_start=None, full_output=0, returnL=False,
|
81
|
-
returntype="numpy"):
|
82
|
-
"""
|
83
|
-
|
84
|
-
Parameters
|
85
|
-
----------
|
86
|
-
x_start
|
87
|
-
cube : numpy array
|
88
|
-
3D cube of data. shape : (nb_frame, length, width)
|
89
|
-
|
90
|
-
angles : numpy array
|
91
|
-
1D array of PA angle. Must be the same length as cube nb_frame
|
92
|
-
|
93
|
-
r : int
|
94
|
-
Number of rank to iterate over. The default is 1.
|
95
|
-
|
96
|
-
l : int or str {'incr'}
|
97
|
-
Number of iteration per rank. The default is 10.
|
98
|
-
If set to 'incr', the number of iteration per rank will increase with rank.
|
99
|
-
|
100
|
-
r_start : int
|
101
|
-
First rank estimate, r_start < r
|
102
|
-
GreeDS will iterate from rank r-start to r
|
103
|
-
|
104
|
-
pup : int
|
105
|
-
Raduis of the pupil mask
|
106
|
-
|
107
|
-
refs : numpy array
|
108
|
-
3D cube of reference frames. shape = (nb_frame, length, width)
|
109
|
-
|
110
|
-
returntype : {"numpy", "tensor"}
|
111
|
-
Type of the function output
|
112
|
-
|
113
|
-
full_output : int (0 to 3)
|
114
|
-
Choose to return :
|
115
|
-
* 0/False -> only last estimation
|
116
|
-
* 1/True -> every iter over r*l
|
117
|
-
* 2 -> every iter over r
|
118
|
-
* 3 -> every iter over l
|
119
|
-
|
120
|
-
returnL : bool
|
121
|
-
Return PSF estimation
|
122
|
-
|
123
|
-
Returns
|
124
|
-
-------
|
125
|
-
x_k [full_ouputs=False]
|
126
|
-
Estimated circumstellar signal.
|
127
|
-
|
128
|
-
iter_frames [full_ouputs=True]
|
129
|
-
Estimated circumstellar signal x_k for different iterations.
|
130
|
-
|
131
|
-
"""
|
132
|
-
|
133
|
-
# Shapes
|
134
|
-
shape = cube.shape[-2:]
|
135
|
-
len_img = shape[0]
|
136
|
-
nb_frame = len(angles)
|
137
|
-
nb_frame_es = len(angles)
|
138
|
-
|
139
|
-
# References
|
140
|
-
if refs is not None:
|
141
|
-
assert (refs.shape[-2:] == shape)
|
142
|
-
refs = torch.from_numpy(refs)
|
143
|
-
print("Cube filled with " + str(int(100 * refs.shape[0] / nb_frame)) + " percent of reference frames")
|
144
|
-
nb_frame_es = len(angles) + refs.shape[0]
|
145
|
-
|
146
|
-
# Convert to use torch
|
147
|
-
cube = torch.from_numpy(cube)
|
148
|
-
|
149
|
-
angles = torch.from_numpy(angles)
|
150
|
-
pup = 1 if pup == 0 else circle(shape, pup)
|
151
|
-
|
152
|
-
iter_frames = []
|
153
|
-
iter_L = []
|
154
|
-
|
155
|
-
x_k = torch.zeros(shape)
|
156
|
-
if x_start is not None: x_k = torch.from_numpy(x_start)
|
157
|
-
|
158
|
-
incr = True if l == "incr" else False
|
159
|
-
|
160
|
-
# One iteration of greeDS
|
161
|
-
def GreeDS_iter(x, q):
|
162
|
-
|
163
|
-
R = cube - cube_rotate(x.expand(nb_frame, len_img, len_img), -angles)
|
164
|
-
|
165
|
-
if refs is not None:
|
166
|
-
R = torch.cat((R, refs))
|
167
|
-
|
168
|
-
U, Sigma, V = torch.pca_lowrank(R.view(nb_frame_es, len_img * len_img), q=q, niter=1, center=False)
|
169
|
-
L = (U @ torch.diag(Sigma) @ V.T).reshape(nb_frame_es, len_img, len_img)
|
170
|
-
|
171
|
-
if refs is not None: L = L[:nb_frame]
|
172
|
-
L *= L > 0
|
173
|
-
|
174
|
-
S_der = cube_rotate(cube - L, angles)
|
175
|
-
|
176
|
-
frame = torch.mean(S_der, axis=0) * pup
|
177
|
-
frame *= frame > 0
|
178
|
-
|
179
|
-
return frame, L
|
180
|
-
|
181
|
-
## Main loop over N_comp and nb_rank.
|
182
|
-
for ncomp in range(r_start, r + 1):
|
183
|
-
|
184
|
-
if incr: l = ncomp - r_start + 1
|
185
|
-
|
186
|
-
for _ in range(1, l + 1):
|
187
|
-
|
188
|
-
x_k1, xl = GreeDS_iter(x_k, ncomp)
|
189
|
-
x_k = x_k1.clone()
|
190
|
-
|
191
|
-
if full_output == 1:
|
192
|
-
iter_frames.append(x_k1.numpy())
|
193
|
-
if returnL: iter_L.append(xl.numpy())
|
194
|
-
if full_output == 3 and ncomp == r + 1: iter_frames.append(x_k1.numpy())
|
195
|
-
|
196
|
-
if full_output == 2: iter_frames.append(x_k1.numpy())
|
197
|
-
|
198
|
-
iter_frames = np.array(iter_frames)
|
199
|
-
iter_L = np.array(iter_L)
|
200
|
-
if returntype == "numpy":
|
201
|
-
x_k = x_k.numpy()
|
202
|
-
xl = xl.numpy()
|
203
|
-
|
204
|
-
if returnL:
|
205
|
-
if full_output:
|
206
|
-
return iter_frames, iter_L
|
207
|
-
else:
|
208
|
-
return x_k, xl
|
209
|
-
|
210
|
-
if full_output:
|
211
|
-
return iter_frames
|
212
|
-
else:
|
213
|
-
return x_k
|
214
|
-
|
215
|
-
# %%
|
216
|
-
|
217
|
-
def find_optimal_iter(res, noise_lim=30, singal_lim=(10, 30), apps=None, gtol=1e-2, win=2, plot=False, saveplot=False,
|
218
|
-
returnSNR=False, app_size=8, l=10, r_start=1, r=10):
|
219
|
-
"""Find the optimal iteration in two steps : 1-Ensuring signal have converged 2-Minimizing SNR
|
220
|
-
|
221
|
-
Parameters
|
222
|
-
----------
|
223
|
-
|
224
|
-
res : numpy array
|
225
|
-
3D cube of GreeDS estimate. shape : (nb_frame, length, width)
|
226
|
-
|
227
|
-
noise_lim : int [default=30]
|
228
|
-
Limit raduis of noise region
|
229
|
-
|
230
|
-
singal_lim : tuple [default=(10,30)] or "app"
|
231
|
-
Inner and outter raduis of signal region
|
232
|
-
|
233
|
-
gtol : int [gtol=0.1]
|
234
|
-
Gradient tolerance
|
235
|
-
|
236
|
-
win : int [default=3]
|
237
|
-
Moving average window
|
238
|
-
|
239
|
-
|
240
|
-
Returns
|
241
|
-
-------
|
242
|
-
|
243
|
-
res[indx] : numpy array
|
244
|
-
Optimal frame estimate
|
245
|
-
|
246
|
-
indx : int
|
247
|
-
Optimal index frame
|
248
|
-
|
249
|
-
"""
|
250
|
-
size = res.shape[1]
|
251
|
-
pup = circle((size, size), 8) - circle((size, size), size // 2)
|
252
|
-
|
253
|
-
## Defining Noise and signal region & Computing flx variation
|
254
|
-
|
255
|
-
if str(noise_lim) == "app":
|
256
|
-
img = res[3]
|
257
|
-
plt.title("Click to define apperture of noise region")
|
258
|
-
plt.imshow(pup * img, vmax=np.percentile(img, 99))
|
259
|
-
apps = plt.ginput(n=1)[0]
|
260
|
-
siftx = apps[0]
|
261
|
-
sifty = apps[1]
|
262
|
-
|
263
|
-
fwhm_aper = photutils.CircularAperture([siftx, sifty], app_size)
|
264
|
-
noise = fwhm_aper.to_mask().to_image(img.shape)
|
265
|
-
flx_noise = np.array(
|
266
|
-
[photutils.aperture_photometry(frame, fwhm_aper, method='exact')["aperture_sum"] for frame in
|
267
|
-
res]).flatten()
|
268
|
-
plt.close("all")
|
269
|
-
else:
|
270
|
-
noise = circle((size, size), size // 2) - circle((size, size), size // 2 - noise_lim)
|
271
|
-
flx_noise = np.sum(res * noise, axis=(1, 2)) / np.sum(noise)
|
272
|
-
|
273
|
-
if str(singal_lim) == "app":
|
274
|
-
img = res[10]
|
275
|
-
plt.figure("Click to define apperture of signal region")
|
276
|
-
plt.title("Click to define apperture of signal region")
|
277
|
-
plt.imshow(pup * img, vmax=np.percentile(img, 99.99))
|
278
|
-
if apps is None: apps = plt.ginput(n=1)[0]
|
279
|
-
print(apps)
|
280
|
-
siftx = apps[0]
|
281
|
-
sifty = apps[1]
|
282
|
-
fwhm_aper = photutils.CircularAperture([siftx, sifty], app_size)
|
283
|
-
signal = fwhm_aper.to_mask().to_image(img.shape)
|
284
|
-
flx_sig = np.array([photutils.aperture_photometry(frame, fwhm_aper, method='exact')["aperture_sum"] for frame in
|
285
|
-
res]).flatten()
|
286
|
-
plt.close("Click to define apperture of signal region")
|
287
|
-
|
288
|
-
else:
|
289
|
-
signal = circle((size, size), singal_lim[1]) - circle((size, size), singal_lim[0])
|
290
|
-
flx_sig = np.sum(res * signal, axis=(1, 2)) / np.sum(signal)
|
291
|
-
|
292
|
-
# Computing gradient to find the convergence of signal
|
293
|
-
grad = (flx_sig[0:-1] - flx_sig[1:]) / np.mean(flx_sig)
|
294
|
-
if win: grad = np.convolve(grad, np.ones(win), 'valid') / win # moving avg grads
|
295
|
-
|
296
|
-
valid_conv = np.flatnonzero(np.convolve(abs(grad) < gtol, np.ones(win, dtype=int)) == win)
|
297
|
-
if len(valid_conv) < 1:
|
298
|
-
while len(valid_conv) < 1:
|
299
|
-
gtol *= 2
|
300
|
-
print("gtol too small, increasing tolerernce : {:2e}".format(gtol))
|
301
|
-
valid_conv = np.flatnonzero(np.convolve(abs(grad) < gtol, np.ones(win, dtype=int)) == win)
|
302
|
-
if gtol > 1: valid_conv = [len(grad) + 2 - win - 1]
|
303
|
-
|
304
|
-
conv_indx = valid_conv[0] - 2 + win
|
305
|
-
|
306
|
-
SNR = flx_sig / flx_noise
|
307
|
-
indx = np.argmax(SNR[conv_indx:]) + conv_indx
|
308
|
-
|
309
|
-
minortick = np.array(range(len(res)))
|
310
|
-
if l == "incr":
|
311
|
-
majortick = []
|
312
|
-
tmp = 0
|
313
|
-
for k in range(0, r - r_start):
|
314
|
-
majortick.append(tmp + k)
|
315
|
-
tmp = tmp + k
|
316
|
-
majortick = np.array(majortick)
|
317
|
-
else:
|
318
|
-
majortick = np.array(range(0, r - r_start)) * l
|
319
|
-
|
320
|
-
majroticklab = ["rank " + str(k) for k in range(r_start, r)]
|
321
|
-
|
322
|
-
if plot or saveplot:
|
323
|
-
if not plot: plt.ioff()
|
324
|
-
plt.close("Find Optimal Iteration")
|
325
|
-
plt.figure("Find Optimal Iteration", (16, 9))
|
326
|
-
point_param = {'color': "black", 'markersize': 7, 'marker': "o"}
|
327
|
-
text_param = {'color': "black", 'weight': "bold", 'size': 10, 'xytext': (-10, 20),
|
328
|
-
'textcoords': 'offset points'}
|
329
|
-
plt.subplot(3, 2, 1), plt.imshow(noise), plt.title("Noise")
|
330
|
-
plt.subplot(3, 2, 2), plt.imshow(signal), plt.title("Signal")
|
331
|
-
|
332
|
-
ax = plt.subplot(3, 2, 3)
|
333
|
-
plt.plot(flx_sig / np.mean(flx_sig), label="Variation gradient")
|
334
|
-
plt.plot([conv_indx], [flx_sig[conv_indx] / np.mean(flx_sig)], **point_param)
|
335
|
-
plt.annotate('Convergence', xy=(indx, grad[conv_indx]), **text_param)
|
336
|
-
plt.legend(loc="lower right")
|
337
|
-
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
338
|
-
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
339
|
-
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
340
|
-
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
341
|
-
|
342
|
-
ax = plt.subplot(3, 2, 5)
|
343
|
-
plt.plot(grad, label="Variation gradient")
|
344
|
-
plt.plot([gtol] * len(grad), color="red", label="tolerance")
|
345
|
-
plt.plot([-gtol] * len(grad), color="red")
|
346
|
-
plt.plot([conv_indx], [grad[conv_indx]], **point_param)
|
347
|
-
plt.annotate(f'Convergence {conv_indx}', xy=(conv_indx, grad[conv_indx]), **text_param)
|
348
|
-
plt.legend(loc="lower right")
|
349
|
-
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
350
|
-
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
351
|
-
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
352
|
-
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
353
|
-
|
354
|
-
ax = plt.subplot(3, 2, 4)
|
355
|
-
plt.plot(flx_sig / np.mean(flx_sig), color="tab:orange", label="RELATIVE Signal variation")
|
356
|
-
plt.plot(flx_noise / np.mean(flx_noise), color="tab:blue", label="RELATIVE Noise variation")
|
357
|
-
plt.plot([indx], [flx_sig[indx] / np.mean(flx_sig)], **point_param)
|
358
|
-
plt.annotate('Max SNR', xy=(indx, flx_sig[indx] / np.mean(flx_sig)), **text_param)
|
359
|
-
plt.legend(loc="lower right")
|
360
|
-
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
361
|
-
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
362
|
-
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
363
|
-
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
364
|
-
|
365
|
-
ax = plt.subplot(3, 2, 6)
|
366
|
-
plt.plot(SNR, label="SNR"),
|
367
|
-
plt.plot([indx], [SNR[indx]], **point_param)
|
368
|
-
plt.annotate(f'Max SNR {indx}', xy=(indx, SNR[indx]), **text_param)
|
369
|
-
plt.legend(loc="lower right")
|
370
|
-
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
371
|
-
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
372
|
-
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
373
|
-
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
374
|
-
|
375
|
-
if saveplot: plt.savefig(saveplot + ".png")
|
376
|
-
if not plot: plt.close("all")
|
377
|
-
|
378
|
-
if returnSNR:
|
379
|
-
return res[indx], indx, SNR[indx], np.mean(grad[conv_indx:])
|
380
|
-
else:
|
381
|
-
return res[indx], indx
|
GreeDS/__init__.py
DELETED
greeds-2.1.dist-info/RECORD
DELETED
@@ -1,8 +0,0 @@
|
|
1
|
-
GreeDS.py,sha256=pas7Kc1Gf8l3anbtVx_eWm_z-uSVkpiQEMZ1jDCHr8Q,13145
|
2
|
-
GreeDS/GreeDS.py,sha256=pas7Kc1Gf8l3anbtVx_eWm_z-uSVkpiQEMZ1jDCHr8Q,13145
|
3
|
-
GreeDS/__init__.py,sha256=dMI2hE2fDwpERIGxf2kj-ZyszO1iVfb-wZXk0JpXbl0,47
|
4
|
-
GreeDS/rotation.py,sha256=e7lhJ6_dkmSjnG0wY22v01hIWDNc6umHPyH8OrVEXAQ,4120
|
5
|
-
greeds-2.1.dist-info/METADATA,sha256=QQgbYVjtO6P-2upcZXMKQDqCaUGXyw6ukgkfAz-YabY,825
|
6
|
-
greeds-2.1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
7
|
-
greeds-2.1.dist-info/top_level.txt,sha256=LhNsQ_Ur0zEBXYu8pTisvNMuhgosOa1KG0KHAB4wlEk,7
|
8
|
-
greeds-2.1.dist-info/RECORD,,
|
@@ -1 +0,0 @@
|
|
1
|
-
GreeDS
|
File without changes
|
File without changes
|