GreeDS 2.0__py3-none-any.whl → 2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- GreeDS/GreeDS.py +381 -0
- GreeDS/__init__.py +2 -0
- GreeDS/rotation.py +133 -0
- GreeDS.py +381 -0
- {GreeDS-2.0.dist-info → greeds-2.1.dist-info}/METADATA +8 -3
- greeds-2.1.dist-info/RECORD +8 -0
- {GreeDS-2.0.dist-info → greeds-2.1.dist-info}/WHEEL +1 -2
- greeds-2.1.dist-info/top_level.txt +1 -0
- GreeDS-2.0.dist-info/RECORD +0 -4
- GreeDS-2.0.dist-info/top_level.txt +0 -1
GreeDS/GreeDS.py
ADDED
@@ -0,0 +1,381 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
"""
|
4
|
+
Created on Thu Nov 10 08:43:22 2022
|
5
|
+
|
6
|
+
______________________________
|
7
|
+
GreeDS
|
8
|
+
______________________________
|
9
|
+
|
10
|
+
GreeDS algorithm from Pairet etal 2020.
|
11
|
+
|
12
|
+
Basic implemented that works independently from MAYONNAISE.
|
13
|
+
|
14
|
+
Require the dependancy torch and kornia
|
15
|
+
|
16
|
+
@author: sand-jrd
|
17
|
+
"""
|
18
|
+
|
19
|
+
import torch
|
20
|
+
import numpy as np
|
21
|
+
from rotation import tensor_rotate_fft
|
22
|
+
import photutils
|
23
|
+
import matplotlib.pyplot as plt
|
24
|
+
from torchvision.transforms.functional import (rotate, InterpolationMode)
|
25
|
+
|
26
|
+
def cube_rotate(cube, angles, fft=False):
|
27
|
+
new_cube = torch.zeros(cube.shape)
|
28
|
+
if not fft:
|
29
|
+
for ii in range(len(angles)):
|
30
|
+
new_cube[ii] = rotate(torch.unsqueeze(cube[ii], 0), -float(angles[ii]),
|
31
|
+
InterpolationMode.BILINEAR)[0]
|
32
|
+
return new_cube
|
33
|
+
else:
|
34
|
+
for ii in range(len(angles)):
|
35
|
+
new_cube[ii] = tensor_rotate_fft(torch.unsqueeze(cube[ii], 0), -float(angles[ii]))
|
36
|
+
return new_cube
|
37
|
+
|
38
|
+
def circle(shape: tuple, r: float, offset=(0.5, 0.5)):
|
39
|
+
""" Create circle of 0 in a 2D matrix of ones"
|
40
|
+
|
41
|
+
Parameters
|
42
|
+
----------
|
43
|
+
|
44
|
+
shape : tuple
|
45
|
+
shape x,y of the matrix
|
46
|
+
|
47
|
+
r : float
|
48
|
+
radius of the circle
|
49
|
+
offset : (optional) float
|
50
|
+
offset from the center
|
51
|
+
|
52
|
+
Returns
|
53
|
+
-------
|
54
|
+
M : ndarray
|
55
|
+
Zeros matrix with a circle filled with ones
|
56
|
+
|
57
|
+
"""
|
58
|
+
assert (len(shape) == 2 or len(shape) == 3)
|
59
|
+
if isinstance(offset, (int, float)): offset = (offset, offset)
|
60
|
+
|
61
|
+
nb_f = shape[0] if len(shape) == 3 else 0
|
62
|
+
shape = shape[1:] if len(shape) == 3 else shape
|
63
|
+
|
64
|
+
M = np.zeros(shape)
|
65
|
+
w, l = shape
|
66
|
+
for x in range(0, w):
|
67
|
+
for y in range(0, l):
|
68
|
+
if pow(x - (w // 2) + offset[0], 2) + pow(y - (l // 2) + offset[1], 2) < pow(r, 2):
|
69
|
+
M[x, y] = 1
|
70
|
+
|
71
|
+
if nb_f: M = np.tile(M, (nb_f, 1, 1))
|
72
|
+
|
73
|
+
return 1 - M
|
74
|
+
|
75
|
+
class GtolNotReached(Exception):
|
76
|
+
"""Considere increasing gtol or chosing another set of parameters"""
|
77
|
+
pass
|
78
|
+
|
79
|
+
|
80
|
+
def GreeDS(cube, angles, r=1, l=10, r_start=1, pup=6, refs=None, x_start=None, full_output=0, returnL=False,
|
81
|
+
returntype="numpy"):
|
82
|
+
"""
|
83
|
+
|
84
|
+
Parameters
|
85
|
+
----------
|
86
|
+
x_start
|
87
|
+
cube : numpy array
|
88
|
+
3D cube of data. shape : (nb_frame, length, width)
|
89
|
+
|
90
|
+
angles : numpy array
|
91
|
+
1D array of PA angle. Must be the same length as cube nb_frame
|
92
|
+
|
93
|
+
r : int
|
94
|
+
Number of rank to iterate over. The default is 1.
|
95
|
+
|
96
|
+
l : int or str {'incr'}
|
97
|
+
Number of iteration per rank. The default is 10.
|
98
|
+
If set to 'incr', the number of iteration per rank will increase with rank.
|
99
|
+
|
100
|
+
r_start : int
|
101
|
+
First rank estimate, r_start < r
|
102
|
+
GreeDS will iterate from rank r-start to r
|
103
|
+
|
104
|
+
pup : int
|
105
|
+
Raduis of the pupil mask
|
106
|
+
|
107
|
+
refs : numpy array
|
108
|
+
3D cube of reference frames. shape = (nb_frame, length, width)
|
109
|
+
|
110
|
+
returntype : {"numpy", "tensor"}
|
111
|
+
Type of the function output
|
112
|
+
|
113
|
+
full_output : int (0 to 3)
|
114
|
+
Choose to return :
|
115
|
+
* 0/False -> only last estimation
|
116
|
+
* 1/True -> every iter over r*l
|
117
|
+
* 2 -> every iter over r
|
118
|
+
* 3 -> every iter over l
|
119
|
+
|
120
|
+
returnL : bool
|
121
|
+
Return PSF estimation
|
122
|
+
|
123
|
+
Returns
|
124
|
+
-------
|
125
|
+
x_k [full_ouputs=False]
|
126
|
+
Estimated circumstellar signal.
|
127
|
+
|
128
|
+
iter_frames [full_ouputs=True]
|
129
|
+
Estimated circumstellar signal x_k for different iterations.
|
130
|
+
|
131
|
+
"""
|
132
|
+
|
133
|
+
# Shapes
|
134
|
+
shape = cube.shape[-2:]
|
135
|
+
len_img = shape[0]
|
136
|
+
nb_frame = len(angles)
|
137
|
+
nb_frame_es = len(angles)
|
138
|
+
|
139
|
+
# References
|
140
|
+
if refs is not None:
|
141
|
+
assert (refs.shape[-2:] == shape)
|
142
|
+
refs = torch.from_numpy(refs)
|
143
|
+
print("Cube filled with " + str(int(100 * refs.shape[0] / nb_frame)) + " percent of reference frames")
|
144
|
+
nb_frame_es = len(angles) + refs.shape[0]
|
145
|
+
|
146
|
+
# Convert to use torch
|
147
|
+
cube = torch.from_numpy(cube)
|
148
|
+
|
149
|
+
angles = torch.from_numpy(angles)
|
150
|
+
pup = 1 if pup == 0 else circle(shape, pup)
|
151
|
+
|
152
|
+
iter_frames = []
|
153
|
+
iter_L = []
|
154
|
+
|
155
|
+
x_k = torch.zeros(shape)
|
156
|
+
if x_start is not None: x_k = torch.from_numpy(x_start)
|
157
|
+
|
158
|
+
incr = True if l == "incr" else False
|
159
|
+
|
160
|
+
# One iteration of greeDS
|
161
|
+
def GreeDS_iter(x, q):
|
162
|
+
|
163
|
+
R = cube - cube_rotate(x.expand(nb_frame, len_img, len_img), -angles)
|
164
|
+
|
165
|
+
if refs is not None:
|
166
|
+
R = torch.cat((R, refs))
|
167
|
+
|
168
|
+
U, Sigma, V = torch.pca_lowrank(R.view(nb_frame_es, len_img * len_img), q=q, niter=1, center=False)
|
169
|
+
L = (U @ torch.diag(Sigma) @ V.T).reshape(nb_frame_es, len_img, len_img)
|
170
|
+
|
171
|
+
if refs is not None: L = L[:nb_frame]
|
172
|
+
L *= L > 0
|
173
|
+
|
174
|
+
S_der = cube_rotate(cube - L, angles)
|
175
|
+
|
176
|
+
frame = torch.mean(S_der, axis=0) * pup
|
177
|
+
frame *= frame > 0
|
178
|
+
|
179
|
+
return frame, L
|
180
|
+
|
181
|
+
## Main loop over N_comp and nb_rank.
|
182
|
+
for ncomp in range(r_start, r + 1):
|
183
|
+
|
184
|
+
if incr: l = ncomp - r_start + 1
|
185
|
+
|
186
|
+
for _ in range(1, l + 1):
|
187
|
+
|
188
|
+
x_k1, xl = GreeDS_iter(x_k, ncomp)
|
189
|
+
x_k = x_k1.clone()
|
190
|
+
|
191
|
+
if full_output == 1:
|
192
|
+
iter_frames.append(x_k1.numpy())
|
193
|
+
if returnL: iter_L.append(xl.numpy())
|
194
|
+
if full_output == 3 and ncomp == r + 1: iter_frames.append(x_k1.numpy())
|
195
|
+
|
196
|
+
if full_output == 2: iter_frames.append(x_k1.numpy())
|
197
|
+
|
198
|
+
iter_frames = np.array(iter_frames)
|
199
|
+
iter_L = np.array(iter_L)
|
200
|
+
if returntype == "numpy":
|
201
|
+
x_k = x_k.numpy()
|
202
|
+
xl = xl.numpy()
|
203
|
+
|
204
|
+
if returnL:
|
205
|
+
if full_output:
|
206
|
+
return iter_frames, iter_L
|
207
|
+
else:
|
208
|
+
return x_k, xl
|
209
|
+
|
210
|
+
if full_output:
|
211
|
+
return iter_frames
|
212
|
+
else:
|
213
|
+
return x_k
|
214
|
+
|
215
|
+
# %%
|
216
|
+
|
217
|
+
def find_optimal_iter(res, noise_lim=30, singal_lim=(10, 30), apps=None, gtol=1e-2, win=2, plot=False, saveplot=False,
|
218
|
+
returnSNR=False, app_size=8, l=10, r_start=1, r=10):
|
219
|
+
"""Find the optimal iteration in two steps : 1-Ensuring signal have converged 2-Minimizing SNR
|
220
|
+
|
221
|
+
Parameters
|
222
|
+
----------
|
223
|
+
|
224
|
+
res : numpy array
|
225
|
+
3D cube of GreeDS estimate. shape : (nb_frame, length, width)
|
226
|
+
|
227
|
+
noise_lim : int [default=30]
|
228
|
+
Limit raduis of noise region
|
229
|
+
|
230
|
+
singal_lim : tuple [default=(10,30)] or "app"
|
231
|
+
Inner and outter raduis of signal region
|
232
|
+
|
233
|
+
gtol : int [gtol=0.1]
|
234
|
+
Gradient tolerance
|
235
|
+
|
236
|
+
win : int [default=3]
|
237
|
+
Moving average window
|
238
|
+
|
239
|
+
|
240
|
+
Returns
|
241
|
+
-------
|
242
|
+
|
243
|
+
res[indx] : numpy array
|
244
|
+
Optimal frame estimate
|
245
|
+
|
246
|
+
indx : int
|
247
|
+
Optimal index frame
|
248
|
+
|
249
|
+
"""
|
250
|
+
size = res.shape[1]
|
251
|
+
pup = circle((size, size), 8) - circle((size, size), size // 2)
|
252
|
+
|
253
|
+
## Defining Noise and signal region & Computing flx variation
|
254
|
+
|
255
|
+
if str(noise_lim) == "app":
|
256
|
+
img = res[3]
|
257
|
+
plt.title("Click to define apperture of noise region")
|
258
|
+
plt.imshow(pup * img, vmax=np.percentile(img, 99))
|
259
|
+
apps = plt.ginput(n=1)[0]
|
260
|
+
siftx = apps[0]
|
261
|
+
sifty = apps[1]
|
262
|
+
|
263
|
+
fwhm_aper = photutils.CircularAperture([siftx, sifty], app_size)
|
264
|
+
noise = fwhm_aper.to_mask().to_image(img.shape)
|
265
|
+
flx_noise = np.array(
|
266
|
+
[photutils.aperture_photometry(frame, fwhm_aper, method='exact')["aperture_sum"] for frame in
|
267
|
+
res]).flatten()
|
268
|
+
plt.close("all")
|
269
|
+
else:
|
270
|
+
noise = circle((size, size), size // 2) - circle((size, size), size // 2 - noise_lim)
|
271
|
+
flx_noise = np.sum(res * noise, axis=(1, 2)) / np.sum(noise)
|
272
|
+
|
273
|
+
if str(singal_lim) == "app":
|
274
|
+
img = res[10]
|
275
|
+
plt.figure("Click to define apperture of signal region")
|
276
|
+
plt.title("Click to define apperture of signal region")
|
277
|
+
plt.imshow(pup * img, vmax=np.percentile(img, 99.99))
|
278
|
+
if apps is None: apps = plt.ginput(n=1)[0]
|
279
|
+
print(apps)
|
280
|
+
siftx = apps[0]
|
281
|
+
sifty = apps[1]
|
282
|
+
fwhm_aper = photutils.CircularAperture([siftx, sifty], app_size)
|
283
|
+
signal = fwhm_aper.to_mask().to_image(img.shape)
|
284
|
+
flx_sig = np.array([photutils.aperture_photometry(frame, fwhm_aper, method='exact')["aperture_sum"] for frame in
|
285
|
+
res]).flatten()
|
286
|
+
plt.close("Click to define apperture of signal region")
|
287
|
+
|
288
|
+
else:
|
289
|
+
signal = circle((size, size), singal_lim[1]) - circle((size, size), singal_lim[0])
|
290
|
+
flx_sig = np.sum(res * signal, axis=(1, 2)) / np.sum(signal)
|
291
|
+
|
292
|
+
# Computing gradient to find the convergence of signal
|
293
|
+
grad = (flx_sig[0:-1] - flx_sig[1:]) / np.mean(flx_sig)
|
294
|
+
if win: grad = np.convolve(grad, np.ones(win), 'valid') / win # moving avg grads
|
295
|
+
|
296
|
+
valid_conv = np.flatnonzero(np.convolve(abs(grad) < gtol, np.ones(win, dtype=int)) == win)
|
297
|
+
if len(valid_conv) < 1:
|
298
|
+
while len(valid_conv) < 1:
|
299
|
+
gtol *= 2
|
300
|
+
print("gtol too small, increasing tolerernce : {:2e}".format(gtol))
|
301
|
+
valid_conv = np.flatnonzero(np.convolve(abs(grad) < gtol, np.ones(win, dtype=int)) == win)
|
302
|
+
if gtol > 1: valid_conv = [len(grad) + 2 - win - 1]
|
303
|
+
|
304
|
+
conv_indx = valid_conv[0] - 2 + win
|
305
|
+
|
306
|
+
SNR = flx_sig / flx_noise
|
307
|
+
indx = np.argmax(SNR[conv_indx:]) + conv_indx
|
308
|
+
|
309
|
+
minortick = np.array(range(len(res)))
|
310
|
+
if l == "incr":
|
311
|
+
majortick = []
|
312
|
+
tmp = 0
|
313
|
+
for k in range(0, r - r_start):
|
314
|
+
majortick.append(tmp + k)
|
315
|
+
tmp = tmp + k
|
316
|
+
majortick = np.array(majortick)
|
317
|
+
else:
|
318
|
+
majortick = np.array(range(0, r - r_start)) * l
|
319
|
+
|
320
|
+
majroticklab = ["rank " + str(k) for k in range(r_start, r)]
|
321
|
+
|
322
|
+
if plot or saveplot:
|
323
|
+
if not plot: plt.ioff()
|
324
|
+
plt.close("Find Optimal Iteration")
|
325
|
+
plt.figure("Find Optimal Iteration", (16, 9))
|
326
|
+
point_param = {'color': "black", 'markersize': 7, 'marker': "o"}
|
327
|
+
text_param = {'color': "black", 'weight': "bold", 'size': 10, 'xytext': (-10, 20),
|
328
|
+
'textcoords': 'offset points'}
|
329
|
+
plt.subplot(3, 2, 1), plt.imshow(noise), plt.title("Noise")
|
330
|
+
plt.subplot(3, 2, 2), plt.imshow(signal), plt.title("Signal")
|
331
|
+
|
332
|
+
ax = plt.subplot(3, 2, 3)
|
333
|
+
plt.plot(flx_sig / np.mean(flx_sig), label="Variation gradient")
|
334
|
+
plt.plot([conv_indx], [flx_sig[conv_indx] / np.mean(flx_sig)], **point_param)
|
335
|
+
plt.annotate('Convergence', xy=(indx, grad[conv_indx]), **text_param)
|
336
|
+
plt.legend(loc="lower right")
|
337
|
+
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
338
|
+
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
339
|
+
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
340
|
+
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
341
|
+
|
342
|
+
ax = plt.subplot(3, 2, 5)
|
343
|
+
plt.plot(grad, label="Variation gradient")
|
344
|
+
plt.plot([gtol] * len(grad), color="red", label="tolerance")
|
345
|
+
plt.plot([-gtol] * len(grad), color="red")
|
346
|
+
plt.plot([conv_indx], [grad[conv_indx]], **point_param)
|
347
|
+
plt.annotate(f'Convergence {conv_indx}', xy=(conv_indx, grad[conv_indx]), **text_param)
|
348
|
+
plt.legend(loc="lower right")
|
349
|
+
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
350
|
+
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
351
|
+
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
352
|
+
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
353
|
+
|
354
|
+
ax = plt.subplot(3, 2, 4)
|
355
|
+
plt.plot(flx_sig / np.mean(flx_sig), color="tab:orange", label="RELATIVE Signal variation")
|
356
|
+
plt.plot(flx_noise / np.mean(flx_noise), color="tab:blue", label="RELATIVE Noise variation")
|
357
|
+
plt.plot([indx], [flx_sig[indx] / np.mean(flx_sig)], **point_param)
|
358
|
+
plt.annotate('Max SNR', xy=(indx, flx_sig[indx] / np.mean(flx_sig)), **text_param)
|
359
|
+
plt.legend(loc="lower right")
|
360
|
+
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
361
|
+
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
362
|
+
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
363
|
+
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
364
|
+
|
365
|
+
ax = plt.subplot(3, 2, 6)
|
366
|
+
plt.plot(SNR, label="SNR"),
|
367
|
+
plt.plot([indx], [SNR[indx]], **point_param)
|
368
|
+
plt.annotate(f'Max SNR {indx}', xy=(indx, SNR[indx]), **text_param)
|
369
|
+
plt.legend(loc="lower right")
|
370
|
+
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
371
|
+
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
372
|
+
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
373
|
+
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
374
|
+
|
375
|
+
if saveplot: plt.savefig(saveplot + ".png")
|
376
|
+
if not plot: plt.close("all")
|
377
|
+
|
378
|
+
if returnSNR:
|
379
|
+
return res[indx], indx, SNR[indx], np.mean(grad[conv_indx:])
|
380
|
+
else:
|
381
|
+
return res[indx], indx
|
GreeDS/__init__.py
ADDED
GreeDS/rotation.py
ADDED
@@ -0,0 +1,133 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import torch
|
3
|
+
import torch.fft as tf
|
4
|
+
|
5
|
+
def frame_center(array, verbose=False):
|
6
|
+
"""
|
7
|
+
Return the coordinates y,x of the frame(s) center.
|
8
|
+
If odd: dim/2-0.5
|
9
|
+
If even: dim/2
|
10
|
+
|
11
|
+
Parameters
|
12
|
+
----------
|
13
|
+
array : 2d/3d/4d numpy ndarray
|
14
|
+
Frame or cube.
|
15
|
+
verbose : bool optional
|
16
|
+
If True the center coordinates are printed out.
|
17
|
+
|
18
|
+
Returns
|
19
|
+
-------
|
20
|
+
cy, cx : int
|
21
|
+
Coordinates of the center.
|
22
|
+
|
23
|
+
"""
|
24
|
+
if array.ndim == 2:
|
25
|
+
shape = array.shape
|
26
|
+
elif array.ndim == 3:
|
27
|
+
shape = array[0].shape
|
28
|
+
elif array.ndim == 4:
|
29
|
+
shape = array[0, 0].shape
|
30
|
+
else:
|
31
|
+
raise ValueError('`array` is not a 2d, 3d or 4d array')
|
32
|
+
|
33
|
+
cy = shape[0] / 2
|
34
|
+
cx = shape[1] / 2
|
35
|
+
|
36
|
+
if shape[0] % 2:
|
37
|
+
cy -= 0.5
|
38
|
+
if shape[1] % 2:
|
39
|
+
cx -= 0.5
|
40
|
+
|
41
|
+
if verbose:
|
42
|
+
print('Center px coordinates at x,y = ({}, {})'.format(cx, cy))
|
43
|
+
|
44
|
+
return int(cy), int(cx)
|
45
|
+
|
46
|
+
|
47
|
+
def tensor_rotate_fft(tensor: torch.Tensor, angle: float) -> torch.Tensor:
|
48
|
+
""" Rotates Tensor using Fourier transform phases:
|
49
|
+
Rotation = 3 consecutive lin. shears = 3 consecutive FFT phase shifts
|
50
|
+
See details in Larkin et al. (1997) and Hagelberg et al. (2016).
|
51
|
+
Note: this is significantly slower than interpolation methods
|
52
|
+
(e.g. opencv/lanczos4 or ndimage), but preserves the flux better
|
53
|
+
(by construction it preserves the total power). It is more prone to
|
54
|
+
large-scale Gibbs artefacts, so make sure no sharp edge is present in
|
55
|
+
the image to be rotated.
|
56
|
+
/!\ This is a blindly coded adaptation for Tensor of the vip function rotate_fft
|
57
|
+
(https://github.com/vortex-exoplanet/VIP/blob/51e1d734dcdbee1fbd0175aa3d0ab62eec83d5fa/vip_hci/preproc/derotation.py#L507)
|
58
|
+
/!\ This suppose the frame is perfectly centred
|
59
|
+
! Warning: if input frame has even dimensions, the center of rotation
|
60
|
+
will NOT be between the 4 central pixels, instead it will be on the top
|
61
|
+
right of those 4 pixels. Make sure your images are centered with
|
62
|
+
respect to that pixel before rotation.
|
63
|
+
Parameters
|
64
|
+
----------
|
65
|
+
tensor : torch.Tensor
|
66
|
+
Input image, 2d array.
|
67
|
+
angle : float
|
68
|
+
Rotation angle.
|
69
|
+
Returns
|
70
|
+
-------
|
71
|
+
array_out : torch.Tensor
|
72
|
+
Resulting frame.
|
73
|
+
"""
|
74
|
+
y_ori, x_ori = tensor.shape[1:]
|
75
|
+
|
76
|
+
while angle < 0:
|
77
|
+
angle += 360
|
78
|
+
while angle > 360:
|
79
|
+
angle -= 360
|
80
|
+
|
81
|
+
if angle > 45:
|
82
|
+
dangle = angle % 90
|
83
|
+
if dangle > 45:
|
84
|
+
dangle = -(90 - dangle)
|
85
|
+
nangle = int(np.rint(angle / 90))
|
86
|
+
tensor_in = torch.rot90(tensor, nangle, [1, 2])
|
87
|
+
else:
|
88
|
+
dangle = angle
|
89
|
+
tensor_in = tensor.clone()
|
90
|
+
|
91
|
+
if y_ori % 2 or x_ori % 2:
|
92
|
+
# NO NEED TO SHIFT BY 0.5px: FFT assumes rot. center on cx+0.5, cy+0.5!
|
93
|
+
tensor_in = tensor_in[:, :-1, :-1]
|
94
|
+
|
95
|
+
a = np.tan(np.deg2rad(dangle) / 2).item()
|
96
|
+
b = -np.sin(np.deg2rad(dangle)).item()
|
97
|
+
|
98
|
+
y_new, x_new = tensor_in.shape[1:]
|
99
|
+
arr_xy = torch.from_numpy(np.mgrid[0:y_new, 0:x_new])
|
100
|
+
cy, cx = frame_center(tensor[0])
|
101
|
+
arr_y = arr_xy[0] - cy
|
102
|
+
arr_x = arr_xy[1] - cx
|
103
|
+
|
104
|
+
s_x = tensor_fft_shear(tensor_in, arr_x, a, ax=2)
|
105
|
+
s_xy = tensor_fft_shear(s_x, arr_y, b, ax=1)
|
106
|
+
s_xyx = tensor_fft_shear(s_xy, arr_x, a, ax=2)
|
107
|
+
|
108
|
+
if y_ori % 2 or x_ori % 2:
|
109
|
+
# set it back to original dimensions
|
110
|
+
array_out = torch.zeros([1, s_xyx.shape[1]+1, s_xyx.shape[2]+1])
|
111
|
+
array_out[0, :-1, :-1] = torch.real(s_xyx)
|
112
|
+
else:
|
113
|
+
array_out = torch.real(s_xyx)
|
114
|
+
|
115
|
+
return array_out
|
116
|
+
|
117
|
+
|
118
|
+
def tensor_fft_shear(arr, arr_ori, c, ax):
|
119
|
+
ax2 = 1 - (ax-1) % 2
|
120
|
+
freqs = tf.fftfreq(arr_ori.shape[ax2], dtype=torch.float64)
|
121
|
+
sh_freqs = tf.fftshift(freqs)
|
122
|
+
arr_u = torch.tile(sh_freqs, (arr_ori.shape[ax-1], 1))
|
123
|
+
if ax == 2:
|
124
|
+
arr_u = torch.transpose(arr_u, 0, 1)
|
125
|
+
s_x = tf.fftshift(arr)
|
126
|
+
s_x = tf.fft(s_x, dim=ax)
|
127
|
+
s_x = tf.fftshift(s_x)
|
128
|
+
s_x = torch.exp(-2j * torch.pi * c * arr_u * arr_ori) * s_x
|
129
|
+
s_x = tf.fftshift(s_x)
|
130
|
+
s_x = tf.ifft(s_x, dim=ax)
|
131
|
+
s_x = tf.fftshift(s_x)
|
132
|
+
|
133
|
+
return s_x
|
GreeDS.py
ADDED
@@ -0,0 +1,381 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
"""
|
4
|
+
Created on Thu Nov 10 08:43:22 2022
|
5
|
+
|
6
|
+
______________________________
|
7
|
+
GreeDS
|
8
|
+
______________________________
|
9
|
+
|
10
|
+
GreeDS algorithm from Pairet etal 2020.
|
11
|
+
|
12
|
+
Basic implemented that works independently from MAYONNAISE.
|
13
|
+
|
14
|
+
Require the dependancy torch and kornia
|
15
|
+
|
16
|
+
@author: sand-jrd
|
17
|
+
"""
|
18
|
+
|
19
|
+
import torch
|
20
|
+
import numpy as np
|
21
|
+
from rotation import tensor_rotate_fft
|
22
|
+
import photutils
|
23
|
+
import matplotlib.pyplot as plt
|
24
|
+
from torchvision.transforms.functional import (rotate, InterpolationMode)
|
25
|
+
|
26
|
+
def cube_rotate(cube, angles, fft=False):
|
27
|
+
new_cube = torch.zeros(cube.shape)
|
28
|
+
if not fft:
|
29
|
+
for ii in range(len(angles)):
|
30
|
+
new_cube[ii] = rotate(torch.unsqueeze(cube[ii], 0), -float(angles[ii]),
|
31
|
+
InterpolationMode.BILINEAR)[0]
|
32
|
+
return new_cube
|
33
|
+
else:
|
34
|
+
for ii in range(len(angles)):
|
35
|
+
new_cube[ii] = tensor_rotate_fft(torch.unsqueeze(cube[ii], 0), -float(angles[ii]))
|
36
|
+
return new_cube
|
37
|
+
|
38
|
+
def circle(shape: tuple, r: float, offset=(0.5, 0.5)):
|
39
|
+
""" Create circle of 0 in a 2D matrix of ones"
|
40
|
+
|
41
|
+
Parameters
|
42
|
+
----------
|
43
|
+
|
44
|
+
shape : tuple
|
45
|
+
shape x,y of the matrix
|
46
|
+
|
47
|
+
r : float
|
48
|
+
radius of the circle
|
49
|
+
offset : (optional) float
|
50
|
+
offset from the center
|
51
|
+
|
52
|
+
Returns
|
53
|
+
-------
|
54
|
+
M : ndarray
|
55
|
+
Zeros matrix with a circle filled with ones
|
56
|
+
|
57
|
+
"""
|
58
|
+
assert (len(shape) == 2 or len(shape) == 3)
|
59
|
+
if isinstance(offset, (int, float)): offset = (offset, offset)
|
60
|
+
|
61
|
+
nb_f = shape[0] if len(shape) == 3 else 0
|
62
|
+
shape = shape[1:] if len(shape) == 3 else shape
|
63
|
+
|
64
|
+
M = np.zeros(shape)
|
65
|
+
w, l = shape
|
66
|
+
for x in range(0, w):
|
67
|
+
for y in range(0, l):
|
68
|
+
if pow(x - (w // 2) + offset[0], 2) + pow(y - (l // 2) + offset[1], 2) < pow(r, 2):
|
69
|
+
M[x, y] = 1
|
70
|
+
|
71
|
+
if nb_f: M = np.tile(M, (nb_f, 1, 1))
|
72
|
+
|
73
|
+
return 1 - M
|
74
|
+
|
75
|
+
class GtolNotReached(Exception):
|
76
|
+
"""Considere increasing gtol or chosing another set of parameters"""
|
77
|
+
pass
|
78
|
+
|
79
|
+
|
80
|
+
def GreeDS(cube, angles, r=1, l=10, r_start=1, pup=6, refs=None, x_start=None, full_output=0, returnL=False,
|
81
|
+
returntype="numpy"):
|
82
|
+
"""
|
83
|
+
|
84
|
+
Parameters
|
85
|
+
----------
|
86
|
+
x_start
|
87
|
+
cube : numpy array
|
88
|
+
3D cube of data. shape : (nb_frame, length, width)
|
89
|
+
|
90
|
+
angles : numpy array
|
91
|
+
1D array of PA angle. Must be the same length as cube nb_frame
|
92
|
+
|
93
|
+
r : int
|
94
|
+
Number of rank to iterate over. The default is 1.
|
95
|
+
|
96
|
+
l : int or str {'incr'}
|
97
|
+
Number of iteration per rank. The default is 10.
|
98
|
+
If set to 'incr', the number of iteration per rank will increase with rank.
|
99
|
+
|
100
|
+
r_start : int
|
101
|
+
First rank estimate, r_start < r
|
102
|
+
GreeDS will iterate from rank r-start to r
|
103
|
+
|
104
|
+
pup : int
|
105
|
+
Raduis of the pupil mask
|
106
|
+
|
107
|
+
refs : numpy array
|
108
|
+
3D cube of reference frames. shape = (nb_frame, length, width)
|
109
|
+
|
110
|
+
returntype : {"numpy", "tensor"}
|
111
|
+
Type of the function output
|
112
|
+
|
113
|
+
full_output : int (0 to 3)
|
114
|
+
Choose to return :
|
115
|
+
* 0/False -> only last estimation
|
116
|
+
* 1/True -> every iter over r*l
|
117
|
+
* 2 -> every iter over r
|
118
|
+
* 3 -> every iter over l
|
119
|
+
|
120
|
+
returnL : bool
|
121
|
+
Return PSF estimation
|
122
|
+
|
123
|
+
Returns
|
124
|
+
-------
|
125
|
+
x_k [full_ouputs=False]
|
126
|
+
Estimated circumstellar signal.
|
127
|
+
|
128
|
+
iter_frames [full_ouputs=True]
|
129
|
+
Estimated circumstellar signal x_k for different iterations.
|
130
|
+
|
131
|
+
"""
|
132
|
+
|
133
|
+
# Shapes
|
134
|
+
shape = cube.shape[-2:]
|
135
|
+
len_img = shape[0]
|
136
|
+
nb_frame = len(angles)
|
137
|
+
nb_frame_es = len(angles)
|
138
|
+
|
139
|
+
# References
|
140
|
+
if refs is not None:
|
141
|
+
assert (refs.shape[-2:] == shape)
|
142
|
+
refs = torch.from_numpy(refs)
|
143
|
+
print("Cube filled with " + str(int(100 * refs.shape[0] / nb_frame)) + " percent of reference frames")
|
144
|
+
nb_frame_es = len(angles) + refs.shape[0]
|
145
|
+
|
146
|
+
# Convert to use torch
|
147
|
+
cube = torch.from_numpy(cube)
|
148
|
+
|
149
|
+
angles = torch.from_numpy(angles)
|
150
|
+
pup = 1 if pup == 0 else circle(shape, pup)
|
151
|
+
|
152
|
+
iter_frames = []
|
153
|
+
iter_L = []
|
154
|
+
|
155
|
+
x_k = torch.zeros(shape)
|
156
|
+
if x_start is not None: x_k = torch.from_numpy(x_start)
|
157
|
+
|
158
|
+
incr = True if l == "incr" else False
|
159
|
+
|
160
|
+
# One iteration of greeDS
|
161
|
+
def GreeDS_iter(x, q):
|
162
|
+
|
163
|
+
R = cube - cube_rotate(x.expand(nb_frame, len_img, len_img), -angles)
|
164
|
+
|
165
|
+
if refs is not None:
|
166
|
+
R = torch.cat((R, refs))
|
167
|
+
|
168
|
+
U, Sigma, V = torch.pca_lowrank(R.view(nb_frame_es, len_img * len_img), q=q, niter=1, center=False)
|
169
|
+
L = (U @ torch.diag(Sigma) @ V.T).reshape(nb_frame_es, len_img, len_img)
|
170
|
+
|
171
|
+
if refs is not None: L = L[:nb_frame]
|
172
|
+
L *= L > 0
|
173
|
+
|
174
|
+
S_der = cube_rotate(cube - L, angles)
|
175
|
+
|
176
|
+
frame = torch.mean(S_der, axis=0) * pup
|
177
|
+
frame *= frame > 0
|
178
|
+
|
179
|
+
return frame, L
|
180
|
+
|
181
|
+
## Main loop over N_comp and nb_rank.
|
182
|
+
for ncomp in range(r_start, r + 1):
|
183
|
+
|
184
|
+
if incr: l = ncomp - r_start + 1
|
185
|
+
|
186
|
+
for _ in range(1, l + 1):
|
187
|
+
|
188
|
+
x_k1, xl = GreeDS_iter(x_k, ncomp)
|
189
|
+
x_k = x_k1.clone()
|
190
|
+
|
191
|
+
if full_output == 1:
|
192
|
+
iter_frames.append(x_k1.numpy())
|
193
|
+
if returnL: iter_L.append(xl.numpy())
|
194
|
+
if full_output == 3 and ncomp == r + 1: iter_frames.append(x_k1.numpy())
|
195
|
+
|
196
|
+
if full_output == 2: iter_frames.append(x_k1.numpy())
|
197
|
+
|
198
|
+
iter_frames = np.array(iter_frames)
|
199
|
+
iter_L = np.array(iter_L)
|
200
|
+
if returntype == "numpy":
|
201
|
+
x_k = x_k.numpy()
|
202
|
+
xl = xl.numpy()
|
203
|
+
|
204
|
+
if returnL:
|
205
|
+
if full_output:
|
206
|
+
return iter_frames, iter_L
|
207
|
+
else:
|
208
|
+
return x_k, xl
|
209
|
+
|
210
|
+
if full_output:
|
211
|
+
return iter_frames
|
212
|
+
else:
|
213
|
+
return x_k
|
214
|
+
|
215
|
+
# %%
|
216
|
+
|
217
|
+
def find_optimal_iter(res, noise_lim=30, singal_lim=(10, 30), apps=None, gtol=1e-2, win=2, plot=False, saveplot=False,
|
218
|
+
returnSNR=False, app_size=8, l=10, r_start=1, r=10):
|
219
|
+
"""Find the optimal iteration in two steps : 1-Ensuring signal have converged 2-Minimizing SNR
|
220
|
+
|
221
|
+
Parameters
|
222
|
+
----------
|
223
|
+
|
224
|
+
res : numpy array
|
225
|
+
3D cube of GreeDS estimate. shape : (nb_frame, length, width)
|
226
|
+
|
227
|
+
noise_lim : int [default=30]
|
228
|
+
Limit raduis of noise region
|
229
|
+
|
230
|
+
singal_lim : tuple [default=(10,30)] or "app"
|
231
|
+
Inner and outter raduis of signal region
|
232
|
+
|
233
|
+
gtol : int [gtol=0.1]
|
234
|
+
Gradient tolerance
|
235
|
+
|
236
|
+
win : int [default=3]
|
237
|
+
Moving average window
|
238
|
+
|
239
|
+
|
240
|
+
Returns
|
241
|
+
-------
|
242
|
+
|
243
|
+
res[indx] : numpy array
|
244
|
+
Optimal frame estimate
|
245
|
+
|
246
|
+
indx : int
|
247
|
+
Optimal index frame
|
248
|
+
|
249
|
+
"""
|
250
|
+
size = res.shape[1]
|
251
|
+
pup = circle((size, size), 8) - circle((size, size), size // 2)
|
252
|
+
|
253
|
+
## Defining Noise and signal region & Computing flx variation
|
254
|
+
|
255
|
+
if str(noise_lim) == "app":
|
256
|
+
img = res[3]
|
257
|
+
plt.title("Click to define apperture of noise region")
|
258
|
+
plt.imshow(pup * img, vmax=np.percentile(img, 99))
|
259
|
+
apps = plt.ginput(n=1)[0]
|
260
|
+
siftx = apps[0]
|
261
|
+
sifty = apps[1]
|
262
|
+
|
263
|
+
fwhm_aper = photutils.CircularAperture([siftx, sifty], app_size)
|
264
|
+
noise = fwhm_aper.to_mask().to_image(img.shape)
|
265
|
+
flx_noise = np.array(
|
266
|
+
[photutils.aperture_photometry(frame, fwhm_aper, method='exact')["aperture_sum"] for frame in
|
267
|
+
res]).flatten()
|
268
|
+
plt.close("all")
|
269
|
+
else:
|
270
|
+
noise = circle((size, size), size // 2) - circle((size, size), size // 2 - noise_lim)
|
271
|
+
flx_noise = np.sum(res * noise, axis=(1, 2)) / np.sum(noise)
|
272
|
+
|
273
|
+
if str(singal_lim) == "app":
|
274
|
+
img = res[10]
|
275
|
+
plt.figure("Click to define apperture of signal region")
|
276
|
+
plt.title("Click to define apperture of signal region")
|
277
|
+
plt.imshow(pup * img, vmax=np.percentile(img, 99.99))
|
278
|
+
if apps is None: apps = plt.ginput(n=1)[0]
|
279
|
+
print(apps)
|
280
|
+
siftx = apps[0]
|
281
|
+
sifty = apps[1]
|
282
|
+
fwhm_aper = photutils.CircularAperture([siftx, sifty], app_size)
|
283
|
+
signal = fwhm_aper.to_mask().to_image(img.shape)
|
284
|
+
flx_sig = np.array([photutils.aperture_photometry(frame, fwhm_aper, method='exact')["aperture_sum"] for frame in
|
285
|
+
res]).flatten()
|
286
|
+
plt.close("Click to define apperture of signal region")
|
287
|
+
|
288
|
+
else:
|
289
|
+
signal = circle((size, size), singal_lim[1]) - circle((size, size), singal_lim[0])
|
290
|
+
flx_sig = np.sum(res * signal, axis=(1, 2)) / np.sum(signal)
|
291
|
+
|
292
|
+
# Computing gradient to find the convergence of signal
|
293
|
+
grad = (flx_sig[0:-1] - flx_sig[1:]) / np.mean(flx_sig)
|
294
|
+
if win: grad = np.convolve(grad, np.ones(win), 'valid') / win # moving avg grads
|
295
|
+
|
296
|
+
valid_conv = np.flatnonzero(np.convolve(abs(grad) < gtol, np.ones(win, dtype=int)) == win)
|
297
|
+
if len(valid_conv) < 1:
|
298
|
+
while len(valid_conv) < 1:
|
299
|
+
gtol *= 2
|
300
|
+
print("gtol too small, increasing tolerernce : {:2e}".format(gtol))
|
301
|
+
valid_conv = np.flatnonzero(np.convolve(abs(grad) < gtol, np.ones(win, dtype=int)) == win)
|
302
|
+
if gtol > 1: valid_conv = [len(grad) + 2 - win - 1]
|
303
|
+
|
304
|
+
conv_indx = valid_conv[0] - 2 + win
|
305
|
+
|
306
|
+
SNR = flx_sig / flx_noise
|
307
|
+
indx = np.argmax(SNR[conv_indx:]) + conv_indx
|
308
|
+
|
309
|
+
minortick = np.array(range(len(res)))
|
310
|
+
if l == "incr":
|
311
|
+
majortick = []
|
312
|
+
tmp = 0
|
313
|
+
for k in range(0, r - r_start):
|
314
|
+
majortick.append(tmp + k)
|
315
|
+
tmp = tmp + k
|
316
|
+
majortick = np.array(majortick)
|
317
|
+
else:
|
318
|
+
majortick = np.array(range(0, r - r_start)) * l
|
319
|
+
|
320
|
+
majroticklab = ["rank " + str(k) for k in range(r_start, r)]
|
321
|
+
|
322
|
+
if plot or saveplot:
|
323
|
+
if not plot: plt.ioff()
|
324
|
+
plt.close("Find Optimal Iteration")
|
325
|
+
plt.figure("Find Optimal Iteration", (16, 9))
|
326
|
+
point_param = {'color': "black", 'markersize': 7, 'marker': "o"}
|
327
|
+
text_param = {'color': "black", 'weight': "bold", 'size': 10, 'xytext': (-10, 20),
|
328
|
+
'textcoords': 'offset points'}
|
329
|
+
plt.subplot(3, 2, 1), plt.imshow(noise), plt.title("Noise")
|
330
|
+
plt.subplot(3, 2, 2), plt.imshow(signal), plt.title("Signal")
|
331
|
+
|
332
|
+
ax = plt.subplot(3, 2, 3)
|
333
|
+
plt.plot(flx_sig / np.mean(flx_sig), label="Variation gradient")
|
334
|
+
plt.plot([conv_indx], [flx_sig[conv_indx] / np.mean(flx_sig)], **point_param)
|
335
|
+
plt.annotate('Convergence', xy=(indx, grad[conv_indx]), **text_param)
|
336
|
+
plt.legend(loc="lower right")
|
337
|
+
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
338
|
+
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
339
|
+
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
340
|
+
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
341
|
+
|
342
|
+
ax = plt.subplot(3, 2, 5)
|
343
|
+
plt.plot(grad, label="Variation gradient")
|
344
|
+
plt.plot([gtol] * len(grad), color="red", label="tolerance")
|
345
|
+
plt.plot([-gtol] * len(grad), color="red")
|
346
|
+
plt.plot([conv_indx], [grad[conv_indx]], **point_param)
|
347
|
+
plt.annotate(f'Convergence {conv_indx}', xy=(conv_indx, grad[conv_indx]), **text_param)
|
348
|
+
plt.legend(loc="lower right")
|
349
|
+
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
350
|
+
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
351
|
+
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
352
|
+
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
353
|
+
|
354
|
+
ax = plt.subplot(3, 2, 4)
|
355
|
+
plt.plot(flx_sig / np.mean(flx_sig), color="tab:orange", label="RELATIVE Signal variation")
|
356
|
+
plt.plot(flx_noise / np.mean(flx_noise), color="tab:blue", label="RELATIVE Noise variation")
|
357
|
+
plt.plot([indx], [flx_sig[indx] / np.mean(flx_sig)], **point_param)
|
358
|
+
plt.annotate('Max SNR', xy=(indx, flx_sig[indx] / np.mean(flx_sig)), **text_param)
|
359
|
+
plt.legend(loc="lower right")
|
360
|
+
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
361
|
+
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
362
|
+
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
363
|
+
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
364
|
+
|
365
|
+
ax = plt.subplot(3, 2, 6)
|
366
|
+
plt.plot(SNR, label="SNR"),
|
367
|
+
plt.plot([indx], [SNR[indx]], **point_param)
|
368
|
+
plt.annotate(f'Max SNR {indx}', xy=(indx, SNR[indx]), **text_param)
|
369
|
+
plt.legend(loc="lower right")
|
370
|
+
ax.set_xticks(minortick, minor=True) # labels=np.array(list(range(1,10,3))*10)
|
371
|
+
ax.tick_params(axis='x', which='minor', length=3, width=1, colors="gray", pad=1, labelsize=8)
|
372
|
+
ax.tick_params(axis='x', which='major', length=5, width=1, colors='r', labelrotation=30, labelsize=10)
|
373
|
+
ax.set_xticks(majortick, labels=majroticklab, minor=False)
|
374
|
+
|
375
|
+
if saveplot: plt.savefig(saveplot + ".png")
|
376
|
+
if not plot: plt.close("all")
|
377
|
+
|
378
|
+
if returnSNR:
|
379
|
+
return res[indx], indx, SNR[indx], np.mean(grad[conv_indx:])
|
380
|
+
else:
|
381
|
+
return res[indx], indx
|
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: GreeDS
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.1
|
4
4
|
Summary: This package is a ADI or ARDI sequence processing tool that aim to distangle extended signal (like disks) from quasi-static speakels) using iterative PCA
|
5
5
|
Home-page: https://github.com/Sand-jrd/GreeDS
|
6
6
|
Author: Sandrine Juillard
|
@@ -16,4 +16,9 @@ Requires-Dist: torchvision
|
|
16
16
|
Requires-Dist: photutils
|
17
17
|
Requires-Dist: setuptools
|
18
18
|
Requires-Dist: pip
|
19
|
-
|
19
|
+
Dynamic: author
|
20
|
+
Dynamic: author-email
|
21
|
+
Dynamic: classifier
|
22
|
+
Dynamic: home-page
|
23
|
+
Dynamic: requires-dist
|
24
|
+
Dynamic: summary
|
@@ -0,0 +1,8 @@
|
|
1
|
+
GreeDS.py,sha256=pas7Kc1Gf8l3anbtVx_eWm_z-uSVkpiQEMZ1jDCHr8Q,13145
|
2
|
+
GreeDS/GreeDS.py,sha256=pas7Kc1Gf8l3anbtVx_eWm_z-uSVkpiQEMZ1jDCHr8Q,13145
|
3
|
+
GreeDS/__init__.py,sha256=dMI2hE2fDwpERIGxf2kj-ZyszO1iVfb-wZXk0JpXbl0,47
|
4
|
+
GreeDS/rotation.py,sha256=e7lhJ6_dkmSjnG0wY22v01hIWDNc6umHPyH8OrVEXAQ,4120
|
5
|
+
greeds-2.1.dist-info/METADATA,sha256=QQgbYVjtO6P-2upcZXMKQDqCaUGXyw6ukgkfAz-YabY,825
|
6
|
+
greeds-2.1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
7
|
+
greeds-2.1.dist-info/top_level.txt,sha256=LhNsQ_Ur0zEBXYu8pTisvNMuhgosOa1KG0KHAB4wlEk,7
|
8
|
+
greeds-2.1.dist-info/RECORD,,
|
@@ -0,0 +1 @@
|
|
1
|
+
GreeDS
|
GreeDS-2.0.dist-info/RECORD
DELETED
@@ -1,4 +0,0 @@
|
|
1
|
-
GreeDS-2.0.dist-info/METADATA,sha256=dFH9FSS9aeeb7MdoUBFPF9yVUtBfpQwfDk6YHFO8doc,709
|
2
|
-
GreeDS-2.0.dist-info/WHEEL,sha256=AHX6tWk3qWuce7vKLrj7lnulVHEdWoltgauo8bgCXgU,109
|
3
|
-
GreeDS-2.0.dist-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
4
|
-
GreeDS-2.0.dist-info/RECORD,,
|
@@ -1 +0,0 @@
|
|
1
|
-
|