pyTEMlib 0.2025.4.1__py3-none-any.whl → 0.2025.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyTEMlib might be problematic. Click here for more details.
- build/lib/pyTEMlib/__init__.py +33 -0
- build/lib/pyTEMlib/animation.py +640 -0
- build/lib/pyTEMlib/atom_tools.py +238 -0
- build/lib/pyTEMlib/config_dir.py +31 -0
- build/lib/pyTEMlib/crystal_tools.py +1219 -0
- build/lib/pyTEMlib/diffraction_plot.py +756 -0
- build/lib/pyTEMlib/dynamic_scattering.py +293 -0
- build/lib/pyTEMlib/eds_tools.py +826 -0
- build/lib/pyTEMlib/eds_xsections.py +432 -0
- build/lib/pyTEMlib/eels_tools/__init__.py +44 -0
- build/lib/pyTEMlib/eels_tools/core_loss_tools.py +751 -0
- build/lib/pyTEMlib/eels_tools/eels_database.py +134 -0
- build/lib/pyTEMlib/eels_tools/low_loss_tools.py +655 -0
- build/lib/pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
- build/lib/pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
- build/lib/pyTEMlib/file_reader.py +274 -0
- build/lib/pyTEMlib/file_tools.py +811 -0
- build/lib/pyTEMlib/get_bote_salvat.py +69 -0
- build/lib/pyTEMlib/graph_tools.py +1153 -0
- build/lib/pyTEMlib/graph_viz.py +599 -0
- build/lib/pyTEMlib/image/__init__.py +37 -0
- build/lib/pyTEMlib/image/image_atoms.py +270 -0
- build/lib/pyTEMlib/image/image_clean.py +197 -0
- build/lib/pyTEMlib/image/image_distortion.py +299 -0
- build/lib/pyTEMlib/image/image_fft.py +277 -0
- build/lib/pyTEMlib/image/image_graph.py +926 -0
- build/lib/pyTEMlib/image/image_registration.py +316 -0
- build/lib/pyTEMlib/image/image_utilities.py +309 -0
- build/lib/pyTEMlib/image/image_window.py +421 -0
- build/lib/pyTEMlib/image_tools.py +699 -0
- build/lib/pyTEMlib/interactive_image.py +1 -0
- build/lib/pyTEMlib/kinematic_scattering.py +1196 -0
- build/lib/pyTEMlib/microscope.py +61 -0
- build/lib/pyTEMlib/probe_tools.py +906 -0
- build/lib/pyTEMlib/sidpy_tools.py +153 -0
- build/lib/pyTEMlib/simulation_tools.py +104 -0
- build/lib/pyTEMlib/test.py +437 -0
- build/lib/pyTEMlib/utilities.py +314 -0
- build/lib/pyTEMlib/version.py +5 -0
- build/lib/pyTEMlib/xrpa_x_sections.py +20976 -0
- pyTEMlib/__init__.py +25 -3
- pyTEMlib/animation.py +31 -22
- pyTEMlib/atom_tools.py +29 -34
- pyTEMlib/config_dir.py +2 -28
- pyTEMlib/crystal_tools.py +129 -165
- pyTEMlib/eds_tools.py +559 -342
- pyTEMlib/eds_xsections.py +432 -0
- pyTEMlib/eels_tools/__init__.py +44 -0
- pyTEMlib/eels_tools/core_loss_tools.py +751 -0
- pyTEMlib/eels_tools/eels_database.py +134 -0
- pyTEMlib/eels_tools/low_loss_tools.py +655 -0
- pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
- pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
- pyTEMlib/file_reader.py +274 -0
- pyTEMlib/file_tools.py +260 -1130
- pyTEMlib/get_bote_salvat.py +69 -0
- pyTEMlib/graph_tools.py +101 -174
- pyTEMlib/graph_viz.py +150 -0
- pyTEMlib/image/__init__.py +37 -0
- pyTEMlib/image/image_atoms.py +270 -0
- pyTEMlib/image/image_clean.py +197 -0
- pyTEMlib/image/image_distortion.py +299 -0
- pyTEMlib/image/image_fft.py +277 -0
- pyTEMlib/image/image_graph.py +926 -0
- pyTEMlib/image/image_registration.py +316 -0
- pyTEMlib/image/image_utilities.py +309 -0
- pyTEMlib/image/image_window.py +421 -0
- pyTEMlib/image_tools.py +154 -915
- pyTEMlib/kinematic_scattering.py +1 -1
- pyTEMlib/probe_tools.py +1 -1
- pyTEMlib/test.py +437 -0
- pyTEMlib/utilities.py +314 -0
- pyTEMlib/version.py +2 -3
- pyTEMlib/xrpa_x_sections.py +14 -10
- {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/METADATA +13 -16
- pytemlib-0.2025.9.1.dist-info/RECORD +86 -0
- {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/WHEEL +1 -1
- pytemlib-0.2025.9.1.dist-info/top_level.txt +6 -0
- pyTEMlib/core_loss_widget.py +0 -721
- pyTEMlib/eels_dialog.py +0 -754
- pyTEMlib/eels_dialog_utilities.py +0 -1199
- pyTEMlib/eels_tools.py +0 -2359
- pyTEMlib/file_tools_qt.py +0 -193
- pyTEMlib/image_dialog.py +0 -158
- pyTEMlib/image_dlg.py +0 -146
- pyTEMlib/info_widget.py +0 -1086
- pyTEMlib/info_widget3.py +0 -1120
- pyTEMlib/low_loss_widget.py +0 -479
- pyTEMlib/peak_dialog.py +0 -1129
- pyTEMlib/peak_dlg.py +0 -286
- pytemlib-0.2025.4.1.dist-info/RECORD +0 -38
- pytemlib-0.2025.4.1.dist-info/top_level.txt +0 -1
- {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/entry_points.txt +0 -0
- {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/licenses/LICENSE +0 -0
pyTEMlib/image_tools.py
CHANGED
|
@@ -3,80 +3,31 @@ image_tools.py
|
|
|
3
3
|
by Gerd Duscher, UTK
|
|
4
4
|
part of pyTEMlib
|
|
5
5
|
MIT license except where stated differently
|
|
6
|
+
|
|
7
|
+
This version is build on top of pycroscopy.image package of the pycrocsopy ecosysgtem.
|
|
6
8
|
"""
|
|
7
9
|
|
|
8
10
|
import numpy as np
|
|
9
|
-
import matplotlib
|
|
10
|
-
import matplotlib as mpl
|
|
11
|
-
import matplotlib.pylab as plt
|
|
12
|
-
import matplotlib.widgets as mwidgets
|
|
13
|
-
# from matplotlib.widgets import RectangleSelector
|
|
14
|
-
|
|
15
|
-
import sidpy
|
|
16
|
-
import pyTEMlib.file_tools as ft
|
|
17
|
-
import pyTEMlib.sidpy_tools
|
|
18
|
-
|
|
19
|
-
from tqdm.auto import trange, tqdm
|
|
20
11
|
|
|
21
|
-
# import itertools
|
|
22
|
-
from itertools import product
|
|
23
|
-
|
|
24
|
-
from scipy import fftpack
|
|
25
12
|
import scipy
|
|
26
|
-
# from scipy import signal
|
|
27
|
-
from scipy.interpolate import interp1d # , interp2d
|
|
28
|
-
import scipy.optimize as optimization
|
|
29
|
-
|
|
30
|
-
# Multidimensional Image library
|
|
31
|
-
import scipy.ndimage as ndimage
|
|
32
|
-
import scipy.constants as const
|
|
33
|
-
|
|
34
|
-
# from scipy.spatial import Voronoi, KDTree, cKDTree
|
|
35
|
-
|
|
36
13
|
import skimage
|
|
14
|
+
import sklearn
|
|
15
|
+
import matplotlib
|
|
16
|
+
import matplotlib.pylab as plt
|
|
37
17
|
|
|
38
|
-
import
|
|
39
|
-
|
|
40
|
-
from skimage.feature import peak_local_max
|
|
41
|
-
# from skimage.measure import points_in_poly
|
|
42
|
-
|
|
43
|
-
# our blob detectors from the scipy image package
|
|
44
|
-
from skimage.feature import blob_log # blob_dog, blob_doh
|
|
45
|
-
|
|
46
|
-
from sklearn.feature_extraction import image
|
|
47
|
-
from sklearn.utils.extmath import randomized_svd
|
|
48
|
-
from sklearn.cluster import DBSCAN
|
|
49
|
-
|
|
50
|
-
from collections import Counter
|
|
51
|
-
|
|
52
|
-
# center diff function
|
|
53
|
-
from skimage.filters import threshold_otsu, sobel
|
|
54
|
-
from scipy.optimize import leastsq
|
|
55
|
-
from sklearn.cluster import DBSCAN
|
|
56
|
-
|
|
57
|
-
from ase.build import fcc110
|
|
58
|
-
|
|
59
|
-
from scipy.ndimage import rotate
|
|
60
|
-
from scipy.interpolate import RegularGridInterpolator
|
|
61
|
-
from scipy.signal import fftconvolve
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
_SimpleITK_present = True
|
|
65
|
-
try:
|
|
66
|
-
import SimpleITK as sitk
|
|
67
|
-
except ImportError:
|
|
68
|
-
sitk = False
|
|
69
|
-
_SimpleITK_present = False
|
|
18
|
+
import sidpy
|
|
19
|
+
import pyTEMlib
|
|
70
20
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
21
|
+
## import all function of the image package of pycroscopy
|
|
22
|
+
from .image import *
|
|
23
|
+
from .image.image_utilities import pol2cart, cart2pol, xy2polar
|
|
74
24
|
|
|
75
25
|
|
|
76
26
|
def get_atomic_pseudo_potential(fov, atoms, size=512, rotation=0):
|
|
77
|
-
|
|
27
|
+
"""Big assumption: the atoms are not near the edge of the unit cell
|
|
78
28
|
# If any atoms are close to the edge (ex. [0,0]) then the potential will be clipped
|
|
79
29
|
# before calling the function, shift the atoms to the center of the unit cell
|
|
30
|
+
"""
|
|
80
31
|
|
|
81
32
|
pixel_size = fov / size
|
|
82
33
|
max_size = int(size * np.sqrt(2) + 1) # Maximum size to accommodate rotation
|
|
@@ -91,42 +42,51 @@ def get_atomic_pseudo_potential(fov, atoms, size=512, rotation=0):
|
|
|
91
42
|
x = pos[0] / pixel_size
|
|
92
43
|
y = pos[1] / pixel_size
|
|
93
44
|
atom_width = 0.5 # Angstrom
|
|
94
|
-
|
|
95
|
-
|
|
45
|
+
# important for images at various fov. Room for improvement with theory
|
|
46
|
+
gauss_width = atom_width/pixel_size
|
|
47
|
+
gauss = pyTEMlib.probe_tools.make_gauss(max_size, max_size,
|
|
48
|
+
width=gauss_width,
|
|
49
|
+
x0=x, y0=y)
|
|
96
50
|
unit_cell_potential += gauss * atomic_number # gauss is already normalized to 1
|
|
97
51
|
|
|
98
52
|
# Create interpolation function for unit cell potential
|
|
99
53
|
x_grid = np.linspace(0, fov * max_size / size, max_size)
|
|
100
54
|
y_grid = np.linspace(0, fov * max_size / size, max_size)
|
|
101
|
-
interpolator = RegularGridInterpolator((x_grid, y_grid),
|
|
102
|
-
|
|
55
|
+
interpolator = scipy.interpolate.RegularGridInterpolator((x_grid, y_grid),
|
|
56
|
+
unit_cell_potential,
|
|
57
|
+
bounds_error=False,
|
|
58
|
+
fill_value=0)
|
|
103
59
|
# Vectorized computation of the full potential map with max_size
|
|
104
|
-
x_coords, y_coords = np.meshgrid(np.linspace(0, fov, max_size),
|
|
60
|
+
x_coords, y_coords = np.meshgrid(np.linspace(0, fov, max_size),
|
|
61
|
+
np.linspace(0, fov, max_size),
|
|
62
|
+
indexing="ij")
|
|
105
63
|
xtal_x = x_coords % unit_cell_size[0]
|
|
106
64
|
xtal_y = y_coords % unit_cell_size[1]
|
|
107
65
|
potential_map = interpolator((xtal_x.ravel(), xtal_y.ravel())).reshape(max_size, max_size)
|
|
108
66
|
|
|
109
67
|
# Rotate and crop the potential map
|
|
110
|
-
potential_map = rotate(potential_map, rotation, reshape=False)
|
|
68
|
+
potential_map = scipy.ndimage.rotate(potential_map, rotation, reshape=False)
|
|
111
69
|
center = potential_map.shape[0] // 2
|
|
112
|
-
potential_map = potential_map[center - size // 2:center + size // 2,
|
|
113
|
-
|
|
70
|
+
potential_map = potential_map[center - size // 2:center + size // 2,
|
|
71
|
+
center - size // 2:center + size // 2]
|
|
114
72
|
potential_map = scipy.ndimage.gaussian_filter(potential_map,3)
|
|
115
73
|
|
|
116
74
|
return potential_map
|
|
117
75
|
|
|
118
76
|
def convolve_probe(ab, potential):
|
|
77
|
+
""" Convolve probe with potential using FFT based convolution"""
|
|
119
78
|
# the pixel sizes should be the exact same as the potential
|
|
120
79
|
final_sizes = potential.shape
|
|
121
80
|
|
|
122
81
|
# Perform FFT-based convolution
|
|
123
82
|
pad_height = pad_width = potential.shape[0] // 2
|
|
124
|
-
potential = np.pad(potential, ((pad_height, pad_height),
|
|
83
|
+
potential = np.pad(potential, ((pad_height, pad_height),
|
|
84
|
+
(pad_width, pad_width)), mode='constant')
|
|
125
85
|
|
|
126
|
-
probe,
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
convolved = fftconvolve(potential, probe, mode='same')
|
|
86
|
+
probe, _, _ = pyTEMlib.probe_tools.get_probe(ab, potential.shape[0],
|
|
87
|
+
potential.shape[1],
|
|
88
|
+
scale='mrad', verbose=False)
|
|
89
|
+
convolved = scipy.signal.fftconvolve(potential, probe, mode='same')
|
|
130
90
|
|
|
131
91
|
# Crop to original potential size
|
|
132
92
|
start_row = pad_height
|
|
@@ -134,16 +94,13 @@ def convolve_probe(ab, potential):
|
|
|
134
94
|
end_row = start_row + final_sizes[0]
|
|
135
95
|
end_col = start_col + final_sizes[1]
|
|
136
96
|
|
|
137
|
-
image = convolved[start_row:end_row, start_col:end_col]
|
|
138
|
-
|
|
97
|
+
image = convolved[start_row:end_row, start_col:end_col]
|
|
139
98
|
return probe, image
|
|
140
99
|
|
|
141
|
-
|
|
142
|
-
# Wavelength in 1/nm
|
|
143
100
|
def get_wavelength(e0):
|
|
144
101
|
"""
|
|
145
102
|
Calculates the relativistic corrected de Broglie wave length of an electron
|
|
146
|
-
|
|
103
|
+
# Wavelength in 1/nm
|
|
147
104
|
Parameters
|
|
148
105
|
----------
|
|
149
106
|
e0: float
|
|
@@ -153,14 +110,16 @@ def get_wavelength(e0):
|
|
|
153
110
|
-------
|
|
154
111
|
wave length in 1/nm
|
|
155
112
|
"""
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
113
|
+
ev = scipy.constants.e * e0
|
|
114
|
+
h = scipy.constants.h
|
|
115
|
+
m = scipy.constants.m_e
|
|
116
|
+
c = scipy.constants.c
|
|
117
|
+
return h / np.sqrt(2 * m * ev * (1 + ev / (2 * m * c**2))) * 10**9
|
|
160
118
|
|
|
161
119
|
def fourier_transform(dset: sidpy.Dataset) -> sidpy.Dataset:
|
|
162
120
|
"""
|
|
163
|
-
Reads information into dictionary 'tags', performs 'FFT',
|
|
121
|
+
Reads information into dictionary 'tags', performs 'FFT',
|
|
122
|
+
and provides a smoothed FT and reciprocal
|
|
164
123
|
and intensity limits for visualization.
|
|
165
124
|
|
|
166
125
|
Parameters
|
|
@@ -182,8 +141,8 @@ def fourier_transform(dset: sidpy.Dataset) -> sidpy.Dataset:
|
|
|
182
141
|
assert isinstance(dset, sidpy.Dataset), 'Expected a sidpy Dataset'
|
|
183
142
|
|
|
184
143
|
selection = []
|
|
185
|
-
image_dims =
|
|
186
|
-
if dset.data_type ==
|
|
144
|
+
image_dims = dset.get_image_dims(return_axis=True)
|
|
145
|
+
if dset.data_type.name == 'IMAGE_STACK':
|
|
187
146
|
stack_dim = dset.get_dimensions_by_type('TEMPORAL')
|
|
188
147
|
|
|
189
148
|
if len(image_dims) != 2:
|
|
@@ -193,184 +152,73 @@ def fourier_transform(dset: sidpy.Dataset) -> sidpy.Dataset:
|
|
|
193
152
|
if i in image_dims:
|
|
194
153
|
selection.append(slice(None))
|
|
195
154
|
if len(stack_dim) == 0:
|
|
196
|
-
|
|
155
|
+
stack_dim = i
|
|
197
156
|
selection.append(slice(None))
|
|
198
157
|
elif i in stack_dim:
|
|
199
|
-
|
|
158
|
+
stack_dim = i
|
|
200
159
|
selection.append(slice(None))
|
|
201
160
|
else:
|
|
202
161
|
selection.append(slice(0, 1))
|
|
203
162
|
|
|
204
163
|
image_stack = np.squeeze(np.array(dset)[selection])
|
|
205
164
|
new_image = np.sum(np.array(image_stack), axis=stack_dim)
|
|
206
|
-
elif dset.data_type ==
|
|
165
|
+
elif dset.data_type.name == 'IMAGE':
|
|
207
166
|
new_image = np.array(dset)
|
|
208
167
|
else:
|
|
209
168
|
return
|
|
210
169
|
|
|
211
170
|
new_image = new_image - new_image.min()
|
|
212
|
-
|
|
213
|
-
fft_transform = (np.fft.fftshift(np.fft.fft2(new_image)))
|
|
214
|
-
|
|
215
|
-
image_dims = pyTEMlib.sidpy_tools.get_image_dims(dset)
|
|
171
|
+
fft_transform = (np.fft.fftshift(np.fft.fft2(np.array(new_image))))
|
|
216
172
|
|
|
217
|
-
|
|
218
|
-
units_y = '1/' + dset._axes[image_dims[1]].units
|
|
173
|
+
image_dims = dset.get_image_dims(return_axis=True)
|
|
219
174
|
|
|
175
|
+
units_x = '1/' + image_dims[0].units
|
|
176
|
+
units_y = '1/' + image_dims[1].units
|
|
220
177
|
fft_dset = sidpy.Dataset.from_array(fft_transform)
|
|
221
178
|
fft_dset.quantity = dset.quantity
|
|
222
179
|
fft_dset.units = 'a.u.'
|
|
223
180
|
fft_dset.data_type = 'IMAGE'
|
|
224
181
|
fft_dset.source = dset.title
|
|
225
182
|
fft_dset.modality = 'fft'
|
|
226
|
-
|
|
227
|
-
fft_dset.set_dimension(0, sidpy.Dimension(
|
|
228
|
-
|
|
229
|
-
|
|
183
|
+
axis = np.fft.fftshift(np.fft.fftfreq(new_image.shape[0], d=dset.x[1]-dset.x[0]))
|
|
184
|
+
fft_dset.set_dimension(0, sidpy.Dimension(axis,
|
|
185
|
+
name='u', units=units_x,
|
|
186
|
+
dimension_type='RECIPROCAL',
|
|
230
187
|
quantity='reciprocal_length'))
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
name='v', units=units_y,
|
|
188
|
+
axis = np.fft.fftshift(np.fft.fftfreq(new_image.shape[1], d=dset.y[1]-dset.y[0]))
|
|
189
|
+
fft_dset.set_dimension(1, sidpy.Dimension(axis,
|
|
190
|
+
name='v', units=units_y,
|
|
191
|
+
dimension_type='RECIPROCAL',
|
|
234
192
|
quantity='reciprocal_length'))
|
|
235
|
-
|
|
236
193
|
return fft_dset
|
|
237
194
|
|
|
238
195
|
|
|
239
|
-
def
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
Parameters
|
|
244
|
-
----------
|
|
245
|
-
dset: sidpy.Dataset
|
|
246
|
-
image
|
|
247
|
-
smoothing: int
|
|
248
|
-
Gaussian smoothing
|
|
249
|
-
|
|
250
|
-
Returns
|
|
251
|
-
-------
|
|
252
|
-
power_spec: sidpy.Dataset
|
|
253
|
-
power spectrum with correct dimensions
|
|
254
|
-
|
|
255
|
-
"""
|
|
256
|
-
|
|
257
|
-
fft_transform = fourier_transform(dset) # dset.fft()
|
|
258
|
-
fft_mag = np.abs(fft_transform)
|
|
259
|
-
fft_mag2 = ndimage.gaussian_filter(fft_mag, sigma=(smoothing, smoothing), order=0)
|
|
260
|
-
|
|
261
|
-
power_spec = fft_transform.like_data(np.log(1.+fft_mag2))
|
|
262
|
-
|
|
263
|
-
# prepare mask
|
|
264
|
-
x, y = np.meshgrid(power_spec.v.values, power_spec.u.values)
|
|
265
|
-
mask = np.zeros(power_spec.shape)
|
|
266
|
-
|
|
267
|
-
mask_spot = x ** 2 + y ** 2 > 1 ** 2
|
|
268
|
-
mask = mask + mask_spot
|
|
269
|
-
mask_spot = x ** 2 + y ** 2 < 11 ** 2
|
|
270
|
-
mask = mask + mask_spot
|
|
271
|
-
|
|
272
|
-
mask[np.where(mask == 1)] = 0 # just in case of overlapping disks
|
|
273
|
-
|
|
274
|
-
minimum_intensity = np.array(power_spec)[np.where(mask == 2)].min() * 0.95
|
|
275
|
-
maximum_intensity = np.array(power_spec)[np.where(mask == 2)].max() * 1.05
|
|
276
|
-
power_spec.metadata = {'fft': {'smoothing': smoothing,
|
|
277
|
-
'minimum_intensity': minimum_intensity, 'maximum_intensity': maximum_intensity}}
|
|
278
|
-
power_spec.title = 'power spectrum ' + power_spec.source
|
|
279
|
-
|
|
280
|
-
return power_spec
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
def diffractogram_spots(dset, spot_threshold, return_center=True, eps=0.1):
|
|
284
|
-
"""Find spots in diffractogram and sort them by distance from center
|
|
285
|
-
|
|
286
|
-
Uses blob_log from scipy.spatial
|
|
287
|
-
|
|
288
|
-
Parameters
|
|
289
|
-
----------
|
|
290
|
-
dset: sidpy.Dataset
|
|
291
|
-
diffractogram
|
|
292
|
-
spot_threshold: float
|
|
293
|
-
threshold for blob finder
|
|
294
|
-
return_center: bool, optional
|
|
295
|
-
return center of image if true
|
|
296
|
-
eps: float, optional
|
|
297
|
-
threshold for blob finder
|
|
298
|
-
|
|
299
|
-
Returns
|
|
300
|
-
-------
|
|
301
|
-
spots: numpy array
|
|
302
|
-
sorted position (x,y) and radius (r) of all spots
|
|
303
|
-
"""
|
|
304
|
-
|
|
305
|
-
# spot detection (for future reference there is no symmetry assumed here)
|
|
306
|
-
data = np.array(np.log(1+np.abs(dset)))
|
|
307
|
-
data = data - data.min()
|
|
308
|
-
data = data/data.max()
|
|
309
|
-
# some images are strange and blob_log does not work on the power spectrum
|
|
310
|
-
try:
|
|
311
|
-
spots_random = blob_log(data, max_sigma=5, threshold=spot_threshold)
|
|
312
|
-
except ValueError:
|
|
313
|
-
spots_random = peak_local_max(np.array(data.T), min_distance=3, threshold_rel=spot_threshold)
|
|
314
|
-
spots_random = np.hstack(spots_random, np.zeros((spots_random.shape[0], 1)))
|
|
315
|
-
|
|
316
|
-
print(f'Found {spots_random.shape[0]} reflections')
|
|
317
|
-
|
|
318
|
-
# Needed for conversion from pixel to Reciprocal space
|
|
319
|
-
image_dims = dset.get_image_dims(return_axis=True)
|
|
320
|
-
rec_scale = np.array([image_dims[0].slope, image_dims[1].slope])
|
|
321
|
-
|
|
322
|
-
spots_random[:, :2] = spots_random[:, :2]*rec_scale+[dset.u.values[0], dset.v.values[0]]
|
|
323
|
-
# sort reflections
|
|
324
|
-
spots_random[:, 2] = np.linalg.norm(spots_random[:, 0:2], axis=1)
|
|
325
|
-
spots_index = np.argsort(spots_random[:, 2])
|
|
326
|
-
spots = spots_random[spots_index]
|
|
327
|
-
# third row is angles
|
|
328
|
-
spots[:, 2] = np.arctan2(spots[:, 0], spots[:, 1])
|
|
329
|
-
|
|
330
|
-
center = [0, 0]
|
|
331
|
-
|
|
332
|
-
if return_center:
|
|
333
|
-
points = spots[:, 0:2]
|
|
334
|
-
|
|
335
|
-
# Calculate the midpoints between all points
|
|
336
|
-
reshaped_points = points[:, np.newaxis, :]
|
|
337
|
-
midpoints = (reshaped_points + reshaped_points.transpose(1, 0, 2)) / 2.0
|
|
338
|
-
midpoints = midpoints.reshape(-1, 2)
|
|
339
|
-
|
|
340
|
-
# Find the most dense cluster of midpoints
|
|
341
|
-
dbscan = DBSCAN(eps=eps, min_samples=2)
|
|
342
|
-
labels = dbscan.fit_predict(midpoints)
|
|
343
|
-
cluster_counter = Counter(labels)
|
|
344
|
-
largest_cluster_label = max(cluster_counter, key=cluster_counter.get)
|
|
345
|
-
largest_cluster_points = midpoints[labels == largest_cluster_label]
|
|
346
|
-
|
|
347
|
-
# Average of these midpoints must be the center
|
|
348
|
-
center = np.mean(largest_cluster_points, axis=0)
|
|
349
|
-
|
|
350
|
-
return spots, center
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
def center_diffractogram(dset, return_plot = True, smoothing = 1, min_samples = 10, beamstop_size = 0.1):
|
|
196
|
+
def center_diffractogram(dset, return_plot = True, smoothing = 1,
|
|
197
|
+
min_samples = 10, beamstop_size = 0.1):
|
|
198
|
+
"""Find center of diffractogram by fitting a circle to the diffraction ring"""
|
|
199
|
+
mean_radius = 0
|
|
354
200
|
try:
|
|
355
201
|
diff = np.array(dset).T.astype(np.float16)
|
|
356
202
|
diff[diff < 0] = 0
|
|
357
|
-
threshold = threshold_otsu(diff)
|
|
203
|
+
threshold = skimage.filters.threshold_otsu(diff)
|
|
358
204
|
binary = (diff > threshold).astype(float)
|
|
359
|
-
|
|
360
|
-
|
|
205
|
+
# Smooth before edge detection
|
|
206
|
+
smoothed_image = scipy.ndimage.gaussian_filter(binary, sigma=smoothing)
|
|
207
|
+
smooth_threshold = skimage.filters.threshold_otsu(smoothed_image)
|
|
361
208
|
smooth_binary = (smoothed_image > smooth_threshold).astype(float)
|
|
362
209
|
|
|
363
210
|
# add a circle to mask the beamstop
|
|
364
211
|
x, y = np.meshgrid(np.arange(dset.shape[0]), np.arange(dset.shape[1]))
|
|
365
|
-
|
|
212
|
+
radius = (x - dset.shape[0] / 2) ** 2 + (y - dset.shape[1] / 2) ** 2
|
|
213
|
+
circle = radius < (beamstop_size * dset.shape[0] / 2) ** 2
|
|
366
214
|
smooth_binary[circle] = 1
|
|
367
|
-
|
|
215
|
+
|
|
368
216
|
# Find the edges using the Sobel operator
|
|
369
|
-
edges = sobel(smooth_binary)
|
|
217
|
+
edges = skimage.filters.sobel(smooth_binary)
|
|
370
218
|
edge_points = np.argwhere(edges)
|
|
371
219
|
|
|
372
220
|
# Use DBSCAN to cluster the edge points
|
|
373
|
-
db = DBSCAN(eps=10, min_samples=min_samples).fit(edge_points)
|
|
221
|
+
db = sklearn.cluster.DBSCAN(eps=10, min_samples=min_samples).fit(edge_points)
|
|
374
222
|
labels = db.labels_
|
|
375
223
|
if len(set(labels)) == 1:
|
|
376
224
|
raise ValueError("DBSCAN clustering resulted in only one group, check the parameters.")
|
|
@@ -383,14 +231,16 @@ def center_diffractogram(dset, return_plot = True, smoothing = 1, min_samples =
|
|
|
383
231
|
|
|
384
232
|
# Fit a circle to the diffraction ring
|
|
385
233
|
def calc_distance(c, x, y):
|
|
386
|
-
|
|
387
|
-
return
|
|
234
|
+
ri = np.sqrt((x - c[0])**2 + (y - c[1])**2)
|
|
235
|
+
return ri - ri.mean()
|
|
388
236
|
x_m = np.mean(edge_points[:, 1])
|
|
389
237
|
y_m = np.mean(edge_points[:, 0])
|
|
390
238
|
center_guess = x_m, y_m
|
|
391
|
-
center,
|
|
392
|
-
|
|
393
|
-
|
|
239
|
+
center, _ = scipy.optimize.leastsq(calc_distance, center_guess,
|
|
240
|
+
args=(edge_points[:, 1], edge_points[:, 0]))
|
|
241
|
+
mean_radius = (np.mean(calc_distance(center, edge_points[:, 1], edge_points[:, 0]))
|
|
242
|
+
+ np.sqrt((edge_points[:, 1] - center[0])**2
|
|
243
|
+
+ (edge_points[:, 0] - center[1])**2).mean())
|
|
394
244
|
finally:
|
|
395
245
|
if return_plot:
|
|
396
246
|
fig, ax = plt.subplots(1, 5, figsize=(14, 4), sharex=True, sharey=True)
|
|
@@ -411,358 +261,9 @@ def center_diffractogram(dset, return_plot = True, smoothing = 1, min_samples =
|
|
|
411
261
|
for axis in ax:
|
|
412
262
|
axis.axis('off')
|
|
413
263
|
fig.tight_layout()
|
|
414
|
-
|
|
415
264
|
return center
|
|
416
265
|
|
|
417
266
|
|
|
418
|
-
def adaptive_fourier_filter(dset, spots, low_pass=3, reflection_radius=0.3):
|
|
419
|
-
"""
|
|
420
|
-
Use spots in diffractogram for a Fourier Filter
|
|
421
|
-
|
|
422
|
-
Parameters:
|
|
423
|
-
-----------
|
|
424
|
-
dset: sidpy.Dataset
|
|
425
|
-
image to be filtered
|
|
426
|
-
spots: np.ndarray(N,2)
|
|
427
|
-
sorted spots in diffractogram in 1/nm
|
|
428
|
-
low_pass: float
|
|
429
|
-
low pass filter in center of diffractogram in 1/nm
|
|
430
|
-
reflection_radius: float
|
|
431
|
-
radius of masked reflections in 1/nm
|
|
432
|
-
|
|
433
|
-
Output:
|
|
434
|
-
-------
|
|
435
|
-
Fourier filtered image
|
|
436
|
-
"""
|
|
437
|
-
|
|
438
|
-
if not isinstance(dset, sidpy.Dataset):
|
|
439
|
-
raise TypeError('We need a sidpy.Dataset')
|
|
440
|
-
fft_transform = fourier_transform(dset)
|
|
441
|
-
|
|
442
|
-
# prepare mask
|
|
443
|
-
x, y = np.meshgrid(fft_transform.v.values, fft_transform.u.values)
|
|
444
|
-
mask = np.zeros(dset.shape)
|
|
445
|
-
|
|
446
|
-
# mask reflections
|
|
447
|
-
for spot in spots:
|
|
448
|
-
mask_spot = (x - spot[1]) ** 2 + (y - spot[0]) ** 2 < reflection_radius ** 2 # make a spot
|
|
449
|
-
mask = mask + mask_spot # add spot to mask
|
|
450
|
-
|
|
451
|
-
# mask zero region larger (low-pass filter = intensity variations)
|
|
452
|
-
mask_spot = x ** 2 + y ** 2 < low_pass ** 2
|
|
453
|
-
mask = mask + mask_spot
|
|
454
|
-
mask[np.where(mask > 1)] = 1
|
|
455
|
-
fft_filtered = np.array(fft_transform * mask)
|
|
456
|
-
|
|
457
|
-
filtered_image = dset.like_data(np.fft.ifft2(np.fft.fftshift(fft_filtered)).real)
|
|
458
|
-
filtered_image.title = 'Fourier filtered ' + dset.title
|
|
459
|
-
filtered_image.source = dset.title
|
|
460
|
-
filtered_image.metadata = {'analysis': 'adaptive fourier filtered', 'spots': spots,
|
|
461
|
-
'low_pass': low_pass, 'reflection_radius': reflection_radius}
|
|
462
|
-
return filtered_image
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
def rotational_symmetry_diffractogram(spots):
|
|
466
|
-
""" Test rotational symmetry of diffraction spots"""
|
|
467
|
-
|
|
468
|
-
rotation_symmetry = []
|
|
469
|
-
for n in [2, 3, 4, 6]:
|
|
470
|
-
cc = np.array(
|
|
471
|
-
[[np.cos(2 * np.pi / n), np.sin(2 * np.pi / n), 0], [-np.sin(2 * np.pi / n), np.cos(2 * np.pi / n), 0],
|
|
472
|
-
[0, 0, 1]])
|
|
473
|
-
sym_spots = np.dot(spots, cc)
|
|
474
|
-
dif = []
|
|
475
|
-
for p0, p1 in product(sym_spots[:, 0:2], spots[:, 0:2]):
|
|
476
|
-
dif.append(np.linalg.norm(p0 - p1))
|
|
477
|
-
dif = np.array(sorted(dif))
|
|
478
|
-
|
|
479
|
-
if dif[int(spots.shape[0] * .7)] < 0.2:
|
|
480
|
-
rotation_symmetry.append(n)
|
|
481
|
-
return rotation_symmetry
|
|
482
|
-
|
|
483
|
-
#####################################################
|
|
484
|
-
# Registration Functions
|
|
485
|
-
#####################################################
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
def complete_registration(main_dataset, storage_channel=None):
|
|
489
|
-
"""Rigid and then non-rigid (demon) registration
|
|
490
|
-
|
|
491
|
-
Performs rigid and then non-rigid registration, please see individual functions:
|
|
492
|
-
- rigid_registration
|
|
493
|
-
- demon_registration
|
|
494
|
-
|
|
495
|
-
Parameters
|
|
496
|
-
----------
|
|
497
|
-
main_dataset: sidpy.Dataset
|
|
498
|
-
dataset of data_type 'IMAGE_STACK' to be registered
|
|
499
|
-
storage_channel: h5py.Group
|
|
500
|
-
optional - location in hdf5 file to store datasets
|
|
501
|
-
|
|
502
|
-
Returns
|
|
503
|
-
-------
|
|
504
|
-
non_rigid_registered: sidpy.Dataset
|
|
505
|
-
rigid_registered_dataset: sidpy.Dataset
|
|
506
|
-
|
|
507
|
-
"""
|
|
508
|
-
|
|
509
|
-
if not isinstance(main_dataset, sidpy.Dataset):
|
|
510
|
-
raise TypeError('We need a sidpy.Dataset')
|
|
511
|
-
if main_dataset.data_type.name != 'IMAGE_STACK':
|
|
512
|
-
raise TypeError('Registration makes only sense for an image stack')
|
|
513
|
-
|
|
514
|
-
print('Rigid_Registration')
|
|
515
|
-
|
|
516
|
-
rigid_registered_dataset = rigid_registration(main_dataset)
|
|
517
|
-
|
|
518
|
-
print(rigid_registered_dataset)
|
|
519
|
-
rigid_registered_dataset.data_type = 'IMAGE_STACK'
|
|
520
|
-
print('Non-Rigid_Registration')
|
|
521
|
-
|
|
522
|
-
non_rigid_registered = demon_registration(rigid_registered_dataset)
|
|
523
|
-
return non_rigid_registered, rigid_registered_dataset
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
def demon_registration(dataset, verbose=False):
|
|
527
|
-
"""
|
|
528
|
-
Diffeomorphic Demon Non-Rigid Registration
|
|
529
|
-
|
|
530
|
-
Depends on:
|
|
531
|
-
simpleITK and numpy
|
|
532
|
-
Please Cite: http://www.simpleitk.org/SimpleITK/project/parti.html
|
|
533
|
-
and T. Vercauteren, X. Pennec, A. Perchant and N. Ayache
|
|
534
|
-
Diffeomorphic Demons Using ITK\'s Finite Difference Solver Hierarchy
|
|
535
|
-
The Insight Journal, http://hdl.handle.net/1926/510 2007
|
|
536
|
-
|
|
537
|
-
Parameters
|
|
538
|
-
----------
|
|
539
|
-
dataset: sidpy.Dataset
|
|
540
|
-
stack of image after rigid registration and cropping
|
|
541
|
-
verbose: boolean
|
|
542
|
-
optional for increased output
|
|
543
|
-
Returns
|
|
544
|
-
-------
|
|
545
|
-
dem_reg: stack of images with non-rigid registration
|
|
546
|
-
|
|
547
|
-
Example
|
|
548
|
-
-------
|
|
549
|
-
dem_reg = demon_reg(stack_dataset, verbose=False)
|
|
550
|
-
"""
|
|
551
|
-
|
|
552
|
-
if not isinstance(dataset, sidpy.Dataset):
|
|
553
|
-
raise TypeError('We need a sidpy.Dataset')
|
|
554
|
-
if dataset.data_type.name != 'IMAGE_STACK':
|
|
555
|
-
raise TypeError('Registration makes only sense for an image stack')
|
|
556
|
-
|
|
557
|
-
dem_reg = np.zeros(dataset.shape)
|
|
558
|
-
nimages = dataset.shape[0]
|
|
559
|
-
if verbose:
|
|
560
|
-
print(nimages)
|
|
561
|
-
# create fixed image by summing over rigid registration
|
|
562
|
-
|
|
563
|
-
fixed_np = np.average(np.array(dataset), axis=0)
|
|
564
|
-
|
|
565
|
-
if not _SimpleITK_present:
|
|
566
|
-
print('This feature is not available: \n Please install simpleITK with: conda install simpleitk -c simpleitk')
|
|
567
|
-
|
|
568
|
-
fixed = sitk.GetImageFromArray(fixed_np)
|
|
569
|
-
fixed = sitk.DiscreteGaussian(fixed, 2.0)
|
|
570
|
-
|
|
571
|
-
# demons = sitk.SymmetricForcesDemonsRegistrationFilter()
|
|
572
|
-
demons = sitk.DiffeomorphicDemonsRegistrationFilter()
|
|
573
|
-
|
|
574
|
-
demons.SetNumberOfIterations(200)
|
|
575
|
-
demons.SetStandardDeviations(1.0)
|
|
576
|
-
|
|
577
|
-
resampler = sitk.ResampleImageFilter()
|
|
578
|
-
resampler.SetReferenceImage(fixed)
|
|
579
|
-
resampler.SetInterpolator(sitk.sitkBSpline)
|
|
580
|
-
resampler.SetDefaultPixelValue(0)
|
|
581
|
-
|
|
582
|
-
for i in trange(nimages):
|
|
583
|
-
moving = sitk.GetImageFromArray(dataset[i])
|
|
584
|
-
moving_f = sitk.DiscreteGaussian(moving, 2.0)
|
|
585
|
-
displacement_field = demons.Execute(fixed, moving_f)
|
|
586
|
-
out_tx = sitk.DisplacementFieldTransform(displacement_field)
|
|
587
|
-
resampler.SetTransform(out_tx)
|
|
588
|
-
out = resampler.Execute(moving)
|
|
589
|
-
dem_reg[i, :, :] = sitk.GetArrayFromImage(out)
|
|
590
|
-
|
|
591
|
-
print(':-)')
|
|
592
|
-
print('You have successfully completed Diffeomorphic Demons Registration')
|
|
593
|
-
|
|
594
|
-
demon_registered = dataset.like_data(dem_reg)
|
|
595
|
-
demon_registered.title = 'Non-Rigid Registration'
|
|
596
|
-
demon_registered.source = dataset.title
|
|
597
|
-
|
|
598
|
-
demon_registered.metadata =dataset.metadata.copy()
|
|
599
|
-
if 'analysis' not in demon_registered.metadata:
|
|
600
|
-
demon_registered.metadata['analysis'] = {}
|
|
601
|
-
demon_registered.metadata['analysis']['non_rigid_demon_registration'] = {'package': 'simpleITK',
|
|
602
|
-
'method': 'DiscreteGaussian',
|
|
603
|
-
'variance': 2,
|
|
604
|
-
'input_dataset': dataset.source}
|
|
605
|
-
demon_registered.data_type = 'IMAGE_STACK'
|
|
606
|
-
return demon_registered
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
###############################
|
|
610
|
-
# Rigid Registration New 05/09/2020
|
|
611
|
-
|
|
612
|
-
def rigid_registration(dataset, normalization=None):
|
|
613
|
-
"""
|
|
614
|
-
Rigid registration of image stack with pixel accuracy
|
|
615
|
-
|
|
616
|
-
Uses simple cross_correlation
|
|
617
|
-
(we determine drift from one image to next)
|
|
618
|
-
|
|
619
|
-
Parameters
|
|
620
|
-
----------
|
|
621
|
-
dataset: sidpy.Dataset
|
|
622
|
-
sidpy dataset with image_stack dataset
|
|
623
|
-
|
|
624
|
-
Returns
|
|
625
|
-
-------
|
|
626
|
-
rigid_registered: sidpy.Dataset
|
|
627
|
-
Registered Stack and drift (with respect to center image)
|
|
628
|
-
"""
|
|
629
|
-
|
|
630
|
-
if not isinstance(dataset, sidpy.Dataset):
|
|
631
|
-
raise TypeError('We need a sidpy.Dataset')
|
|
632
|
-
if dataset.data_type.name != 'IMAGE_STACK':
|
|
633
|
-
raise TypeError('Registration makes only sense for an image stack')
|
|
634
|
-
|
|
635
|
-
if isinstance (normalization, str):
|
|
636
|
-
if normalization.lower() != 'phase':
|
|
637
|
-
nomralization = None
|
|
638
|
-
else:
|
|
639
|
-
normalization = None
|
|
640
|
-
|
|
641
|
-
if dataset.get_dimensions_by_type('TEMPORAL')[0] != 0:
|
|
642
|
-
raise TypeError('Image stack does not have correct frame dimension')
|
|
643
|
-
|
|
644
|
-
stack_dim = dataset.get_dimensions_by_type('TEMPORAL', return_axis=True)[0]
|
|
645
|
-
image_dim = dataset.get_image_dims(return_axis=True)
|
|
646
|
-
if len(image_dim) != 2:
|
|
647
|
-
raise ValueError('need at least two SPATIAL dimension for an image stack')
|
|
648
|
-
|
|
649
|
-
relative_drift = [[0., 0.]]
|
|
650
|
-
im1 = np.fft.fft2(np.array(dataset[0]))
|
|
651
|
-
for i in range(1, len(stack_dim)):
|
|
652
|
-
im2 = np.fft.fft2(np.array(dataset[i]))
|
|
653
|
-
shift, error, _ = skimage.registration.phase_cross_correlation(im1, im2, normalization=normalization, space='fourier')
|
|
654
|
-
print(shift)
|
|
655
|
-
im1 = im2.copy()
|
|
656
|
-
relative_drift.append(shift)
|
|
657
|
-
|
|
658
|
-
rig_reg, drift = pyTEMlib.image_tools.rig_reg_drift(dataset, relative_drift)
|
|
659
|
-
crop_reg, input_crop = pyTEMlib.image_tools.crop_image_stack(rig_reg, drift)
|
|
660
|
-
|
|
661
|
-
rigid_registered = sidpy.Dataset.from_array(crop_reg,
|
|
662
|
-
title='Rigid Registration',
|
|
663
|
-
data_type='IMAGE_STACK',
|
|
664
|
-
quantity=dataset.quantity,
|
|
665
|
-
units=dataset.units)
|
|
666
|
-
rigid_registered.title = 'Rigid_Registration'
|
|
667
|
-
rigid_registered.source = dataset.title
|
|
668
|
-
rigid_registered.metadata['analysis'] = {'rigid_registration': {'drift': drift,
|
|
669
|
-
'input_crop': input_crop, 'input_shape': dataset.shape[1:]}}
|
|
670
|
-
rigid_registered.metadata['experiment'] = dataset.metadata['experiment'].copy()
|
|
671
|
-
rigid_registered.set_dimension(0, sidpy.Dimension(np.arange(rigid_registered.shape[0]),
|
|
672
|
-
name='frame', units='frame', quantity='time',
|
|
673
|
-
dimension_type='temporal'))
|
|
674
|
-
|
|
675
|
-
array_x = image_dim[0].values[input_crop[0]:input_crop[1]]
|
|
676
|
-
rigid_registered.set_dimension(1, sidpy.Dimension(array_x, name='x',
|
|
677
|
-
units='nm', quantity='Length',
|
|
678
|
-
dimension_type='spatial'))
|
|
679
|
-
array_y =image_dim[1].values[input_crop[2]:input_crop[3]]
|
|
680
|
-
rigid_registered.set_dimension(2, sidpy.Dimension(array_y, name='y',
|
|
681
|
-
units='nm', quantity='Length',
|
|
682
|
-
dimension_type='spatial'))
|
|
683
|
-
rigid_registered.data_type = 'IMAGE_STACK'
|
|
684
|
-
return rigid_registered.rechunk({0: 'auto', 1: -1, 2: -1})
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
def rig_reg_drift(dset, rel_drift):
|
|
688
|
-
""" Shifting images on top of each other
|
|
689
|
-
|
|
690
|
-
Uses relative drift to shift images on top of each other,
|
|
691
|
-
with center image as reference.
|
|
692
|
-
Shifting is done with shift routine of ndimage from scipy.
|
|
693
|
-
This function is used by rigid_registration routine
|
|
694
|
-
|
|
695
|
-
Parameters
|
|
696
|
-
----------
|
|
697
|
-
dset: sidpy.Dataset
|
|
698
|
-
dataset with image_stack
|
|
699
|
-
rel_drift:
|
|
700
|
-
relative_drift from image to image as list of [shiftx, shifty]
|
|
701
|
-
|
|
702
|
-
Returns
|
|
703
|
-
-------
|
|
704
|
-
stack: numpy array
|
|
705
|
-
drift: list of drift in pixel
|
|
706
|
-
"""
|
|
707
|
-
|
|
708
|
-
frame_dim = []
|
|
709
|
-
spatial_dim = []
|
|
710
|
-
selection = []
|
|
711
|
-
|
|
712
|
-
for i, axis in dset._axes.items():
|
|
713
|
-
if axis.dimension_type.name == 'SPATIAL':
|
|
714
|
-
spatial_dim.append(i)
|
|
715
|
-
selection.append(slice(None))
|
|
716
|
-
else:
|
|
717
|
-
frame_dim.append(i)
|
|
718
|
-
selection.append(slice(0, 1))
|
|
719
|
-
|
|
720
|
-
if len(spatial_dim) != 2:
|
|
721
|
-
print('need two spatial dimensions')
|
|
722
|
-
if len(frame_dim) != 1:
|
|
723
|
-
print('need one frame dimensions')
|
|
724
|
-
|
|
725
|
-
rig_reg = np.zeros([dset.shape[frame_dim[0]], dset.shape[spatial_dim[0]], dset.shape[spatial_dim[1]]])
|
|
726
|
-
|
|
727
|
-
# absolute drift
|
|
728
|
-
drift = np.array(rel_drift).copy()
|
|
729
|
-
|
|
730
|
-
drift[0] = [0, 0]
|
|
731
|
-
for i in range(1, drift.shape[0]):
|
|
732
|
-
drift[i] = drift[i - 1] + rel_drift[i]
|
|
733
|
-
center_drift = drift[int(drift.shape[0] / 2)]
|
|
734
|
-
drift = drift - center_drift
|
|
735
|
-
# Shift images
|
|
736
|
-
for i in range(rig_reg.shape[0]):
|
|
737
|
-
selection[frame_dim[0]] = slice(i, i+1)
|
|
738
|
-
# Now we shift
|
|
739
|
-
rig_reg[i, :, :] = ndimage.shift(dset[tuple(selection)].squeeze().compute(),
|
|
740
|
-
[drift[i, 0], drift[i, 1]], order=3)
|
|
741
|
-
return rig_reg, drift
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
def crop_image_stack(rig_reg, drift):
|
|
745
|
-
"""Crop images in stack according to drift
|
|
746
|
-
|
|
747
|
-
This function is used by rigid_registration routine
|
|
748
|
-
|
|
749
|
-
Parameters
|
|
750
|
-
----------
|
|
751
|
-
rig_reg: numpy array (N,x,y)
|
|
752
|
-
drift: list (2,B)
|
|
753
|
-
|
|
754
|
-
Returns
|
|
755
|
-
-------
|
|
756
|
-
numpy array
|
|
757
|
-
"""
|
|
758
|
-
xpmax = int(rig_reg.shape[1] - -np.floor(np.min(np.array(drift)[:, 0])))
|
|
759
|
-
xpmin = int(np.ceil(np.max(np.array(drift)[:, 0])))
|
|
760
|
-
ypmax = int(rig_reg.shape[1] - -np.floor(np.min(np.array(drift)[:, 1])))
|
|
761
|
-
ypmin = int(np.ceil(np.max(np.array(drift)[:, 1])))
|
|
762
|
-
|
|
763
|
-
return rig_reg[:, xpmin:xpmax, ypmin:ypmax:], [xpmin, xpmax, ypmin, ypmax]
|
|
764
|
-
|
|
765
|
-
|
|
766
267
|
class ImageWithLineProfile:
|
|
767
268
|
"""Image with line profile"""
|
|
768
269
|
|
|
@@ -779,6 +280,13 @@ class ImageWithLineProfile:
|
|
|
779
280
|
self.line, = self.ax.plot([0], [0], color='orange') # empty line
|
|
780
281
|
self.end_x = self.line.get_xdata()
|
|
781
282
|
self.end_y = self.line.get_ydata()
|
|
283
|
+
self.x = 0
|
|
284
|
+
self.y = 0
|
|
285
|
+
self.z = 0
|
|
286
|
+
self.start_x = self.end_x
|
|
287
|
+
self.start_y = self.end_y
|
|
288
|
+
self.moved_point = [0, 0]
|
|
289
|
+
self.new_point = [0, 0]
|
|
782
290
|
self.cid = self.line.figure.canvas.mpl_connect('button_press_event', self)
|
|
783
291
|
|
|
784
292
|
def __call__(self, event):
|
|
@@ -796,6 +304,7 @@ class ImageWithLineProfile:
|
|
|
796
304
|
self.update()
|
|
797
305
|
|
|
798
306
|
def update(self):
|
|
307
|
+
""" Update line profile"""
|
|
799
308
|
if not self.line_plot:
|
|
800
309
|
self.line_plot = True
|
|
801
310
|
self.figure.clear()
|
|
@@ -818,7 +327,7 @@ class ImageWithLineProfile:
|
|
|
818
327
|
y = np.linspace(y0, y1, num)*(self.data.shape[0]/self.extent[1])
|
|
819
328
|
|
|
820
329
|
# Extract the values along the line, using cubic interpolation
|
|
821
|
-
zi2 = ndimage.map_coordinates(self.data.T, np.vstack((x, y)))
|
|
330
|
+
zi2 = scipy.ndimage.map_coordinates(self.data.T, np.vstack((x, y)))
|
|
822
331
|
|
|
823
332
|
x_axis = np.linspace(0, length_plot, len(zi2))
|
|
824
333
|
self.x = x_axis
|
|
@@ -832,34 +341,38 @@ class ImageWithLineProfile:
|
|
|
832
341
|
|
|
833
342
|
|
|
834
343
|
class LineSelector(matplotlib.widgets.PolygonSelector):
|
|
344
|
+
""" Line selector with adjustable line width"""
|
|
835
345
|
def __init__(self, ax, onselect, line_width=1, **kwargs):
|
|
836
346
|
super().__init__(ax, onselect, **kwargs)
|
|
837
347
|
bounds = ax.viewLim.get_points()
|
|
838
348
|
np.max(bounds[0])
|
|
839
|
-
self.line_verts = np.array([[np.max(bounds[1])/2, np.max(bounds[0])/5],
|
|
840
|
-
|
|
841
|
-
[np.max(bounds[1])/5, np.max(bounds[0])/2],
|
|
842
|
-
|
|
349
|
+
self.line_verts = np.array([[np.max(bounds[1])/2, np.max(bounds[0])/5],
|
|
350
|
+
[np.max(bounds[1])/2, np.max(bounds[0])/5+1],
|
|
351
|
+
[np.max(bounds[1])/5, np.max(bounds[0])/2],
|
|
352
|
+
[np.max(bounds[1])/5, np.max(bounds[0])/2]])
|
|
843
353
|
self.verts = self.line_verts
|
|
844
354
|
self.line_width = line_width
|
|
845
355
|
|
|
846
356
|
def set_linewidth(self, line_width=None):
|
|
357
|
+
""" Set the line width of the line selector"""
|
|
847
358
|
if line_width is not None:
|
|
848
359
|
self.line_width = line_width
|
|
849
360
|
|
|
850
|
-
m = -(self.line_verts[0, 1]-self.line_verts[3, 1])/(self.line_verts[0, 0]
|
|
361
|
+
m = -(self.line_verts[0, 1]-self.line_verts[3, 1])/(self.line_verts[0, 0]
|
|
362
|
+
-self.line_verts[3, 0])
|
|
851
363
|
c = 1/np.sqrt(1+m**2)
|
|
852
364
|
s = c*m
|
|
853
|
-
self.line_verts[1] = [self.line_verts[0, 0]+self.line_width*s,
|
|
854
|
-
|
|
855
|
-
|
|
365
|
+
self.line_verts[1] = [self.line_verts[0, 0]+self.line_width*s,
|
|
366
|
+
self.line_verts[0, 1]+self.line_width*c]
|
|
367
|
+
self.line_verts[2] = [self.line_verts[3, 0]+self.line_width*s,
|
|
368
|
+
self.line_verts[3, 1]+self.line_width*c]
|
|
856
369
|
self.verts = self.line_verts.copy()
|
|
857
370
|
|
|
858
371
|
def onmove(self, event):
|
|
859
372
|
super().onmove(event)
|
|
860
373
|
if np.max(np.linalg.norm(self.line_verts-self.verts, axis=1)) > 1:
|
|
861
374
|
self.moved_point = np.argmax(np.linalg.norm(self.line_verts-self.verts, axis=1))
|
|
862
|
-
|
|
375
|
+
|
|
863
376
|
self.new_point = self.verts[self.moved_point]
|
|
864
377
|
moved_point = int(np.floor(self.moved_point/2)*3)
|
|
865
378
|
self.moved_point = moved_point
|
|
@@ -868,13 +381,16 @@ class LineSelector(matplotlib.widgets.PolygonSelector):
|
|
|
868
381
|
|
|
869
382
|
def get_profile(dataset, line, spline_order=-1):
|
|
870
383
|
"""
|
|
871
|
-
This function extracts a line profile from a given dataset.
|
|
872
|
-
|
|
384
|
+
This function extracts a line profile from a given dataset.
|
|
385
|
+
The line profile is a representation of the data values
|
|
386
|
+
along a specified line in the dataset.
|
|
387
|
+
This function works for both image and spectral image data types.
|
|
873
388
|
|
|
874
389
|
Args:
|
|
875
390
|
dataset (sidpy.Dataset): The input dataset from which to extract the line profile.
|
|
876
391
|
line (list): A list specifying the line along which the profile should be extracted.
|
|
877
|
-
spline_order (int, optional): The order of the spline interpolation to use.
|
|
392
|
+
spline_order (int, optional): The order of the spline interpolation to use.
|
|
393
|
+
Default is -1, which means no interpolation.
|
|
878
394
|
|
|
879
395
|
Returns:
|
|
880
396
|
profile_dataset (sidpy.Dataset): A new sidpy.Dataset containing the line profile.
|
|
@@ -887,22 +403,23 @@ def get_profile(dataset, line, spline_order=-1):
|
|
|
887
403
|
xv /= image_dims[0].slope
|
|
888
404
|
yv /= image_dims[1].slope
|
|
889
405
|
profile = scipy.ndimage.map_coordinates(np.array(dataset), [xv, yv])
|
|
890
|
-
|
|
406
|
+
|
|
891
407
|
profile_dataset = sidpy.Dataset.from_array(profile.sum(axis=0))
|
|
892
408
|
profile_dataset.data_type='spectrum'
|
|
893
409
|
profile_dataset.units = dataset.units
|
|
894
410
|
profile_dataset.quantity = dataset.quantity
|
|
895
|
-
profile_dataset.set_dimension(0, sidpy.Dimension(np.linspace(xv[0,0], xv[-1,-1],
|
|
896
|
-
|
|
411
|
+
profile_dataset.set_dimension(0, sidpy.Dimension(np.linspace(xv[0,0], xv[-1,-1],
|
|
412
|
+
profile_dataset.shape[0]),
|
|
413
|
+
name='x', units=dataset.x.units,
|
|
414
|
+
quantity=dataset.x.quantity,
|
|
897
415
|
dimension_type='spatial'))
|
|
898
416
|
|
|
899
|
-
profile_dataset
|
|
900
|
-
|
|
901
417
|
if dataset.data_type.name == 'SPECTRAL_IMAGE':
|
|
902
418
|
spectral_axis = dataset.get_spectral_dims(return_axis=True)[0]
|
|
903
419
|
if spline_order > -1:
|
|
904
420
|
xv, yv, zv = get_line_selection_points_interpolated(line, z_length=dataset.shape[2])
|
|
905
|
-
profile = scipy.ndimage.map_coordinates(np.array(dataset), [xv, yv, zv],
|
|
421
|
+
profile = scipy.ndimage.map_coordinates(np.array(dataset), [xv, yv, zv],
|
|
422
|
+
order=spline_order)
|
|
906
423
|
profile = profile.sum(axis=0)
|
|
907
424
|
profile = np.stack([profile, profile], axis=1)
|
|
908
425
|
start = xv[0, 0, 0]
|
|
@@ -914,20 +431,21 @@ def get_profile(dataset, line, spline_order=-1):
|
|
|
914
431
|
profile_dataset.data_type='spectral_image'
|
|
915
432
|
profile_dataset.units = dataset.units
|
|
916
433
|
profile_dataset.quantity = dataset.quantity
|
|
917
|
-
profile_dataset.set_dimension(0, sidpy.Dimension(np.arange(profile_dataset.shape[0])+start,
|
|
918
|
-
name='x', units=dataset.x.units,
|
|
434
|
+
profile_dataset.set_dimension(0, sidpy.Dimension(np.arange(profile_dataset.shape[0])+start,
|
|
435
|
+
name='x', units=dataset.x.units,
|
|
436
|
+
quantity=dataset.x.quantity,
|
|
919
437
|
dimension_type='spatial'))
|
|
920
|
-
profile_dataset.set_dimension(1, sidpy.Dimension([0, 1],
|
|
921
|
-
name='y', units=dataset.x.units,
|
|
438
|
+
profile_dataset.set_dimension(1, sidpy.Dimension([0, 1],
|
|
439
|
+
name='y', units=dataset.x.units,
|
|
440
|
+
quantity=dataset.x.quantity,
|
|
922
441
|
dimension_type='spatial'))
|
|
923
|
-
|
|
442
|
+
|
|
924
443
|
profile_dataset.set_dimension(2, spectral_axis)
|
|
925
444
|
return profile_dataset
|
|
926
445
|
|
|
927
446
|
|
|
928
|
-
|
|
929
447
|
def get_line_selection_points_interpolated(line, z_length=1):
|
|
930
|
-
|
|
448
|
+
""" Get line selection points from line selector with interpolation in z direction"""
|
|
931
449
|
start_point = line.line_verts[3]
|
|
932
450
|
right_point = line.line_verts[0]
|
|
933
451
|
low_point = line.line_verts[2]
|
|
@@ -939,7 +457,7 @@ def get_line_selection_points_interpolated(line, z_length=1):
|
|
|
939
457
|
m = (right_point[1] - start_point[1]) / (right_point[0] - start_point[0])
|
|
940
458
|
length_x = int(abs(start_point[0]-right_point[0]))
|
|
941
459
|
length_v = int(np.linalg.norm(start_point-right_point))
|
|
942
|
-
|
|
460
|
+
|
|
943
461
|
linewidth = int(abs(start_point[1]-low_point[1]))
|
|
944
462
|
x = np.linspace(0,length_x, length_v)
|
|
945
463
|
y = np.linspace(0,linewidth, line.line_width)
|
|
@@ -950,19 +468,17 @@ def get_line_selection_points_interpolated(line, z_length=1):
|
|
|
950
468
|
y = np.atleast_2d(y).repeat(z_length, axis=0).T
|
|
951
469
|
else:
|
|
952
470
|
xv, yv = np.meshgrid(x, y)
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
yv = yv + x*m + start_point[1]
|
|
471
|
+
yv = yv + x*m + start_point[1]
|
|
956
472
|
xv = (xv.swapaxes(0,1) -y*m ).swapaxes(0,1) + start_point[0]
|
|
957
473
|
|
|
958
474
|
if z_length > 1:
|
|
959
475
|
return xv, yv, zv
|
|
960
|
-
else:
|
|
476
|
+
else:
|
|
961
477
|
return xv, yv
|
|
962
478
|
|
|
963
479
|
|
|
964
480
|
def get_line_selection_points(line):
|
|
965
|
-
|
|
481
|
+
""" Get line selection points from line selector"""
|
|
966
482
|
start_point = line.line_verts[3]
|
|
967
483
|
right_point = line.line_verts[0]
|
|
968
484
|
low_point = line.line_verts[2]
|
|
@@ -974,19 +490,19 @@ def get_line_selection_points(line):
|
|
|
974
490
|
m = (right_point[1] - start_point[1]) / (right_point[0] - start_point[0])
|
|
975
491
|
length_x = int(abs(start_point[0]-right_point[0]))
|
|
976
492
|
length_v = int(np.linalg.norm(start_point-right_point))
|
|
977
|
-
|
|
493
|
+
|
|
978
494
|
linewidth = int(abs(start_point[1]-low_point[1]))
|
|
979
495
|
x = np.linspace(0,length_x, length_v)
|
|
980
496
|
y = np.linspace(0,linewidth, line.line_width)
|
|
981
497
|
xv, yv = np.meshgrid(x, y)
|
|
982
|
-
|
|
983
|
-
yy = yv +x*m+start_point[1]
|
|
498
|
+
|
|
499
|
+
yy = yv +x*m+start_point[1]
|
|
984
500
|
xx = (xv.T -y*m ).T + start_point[0]
|
|
985
|
-
|
|
986
501
|
return xx, yy
|
|
987
502
|
|
|
988
503
|
|
|
989
504
|
def get_line_profile(data, xv, yv, z_length):
|
|
505
|
+
""" Get line profile from data array"""
|
|
990
506
|
profile = np.zeros([len(xv[0]), 2, z_length])
|
|
991
507
|
for index_x in range(xv.shape[1]):
|
|
992
508
|
for index_y in range(xv.shape[0]):
|
|
@@ -995,7 +511,7 @@ def get_line_profile(data, xv, yv, z_length):
|
|
|
995
511
|
if x< data.shape[0] and x>0 and y < data.shape[1] and y>0:
|
|
996
512
|
profile[index_x, 0] +=data[x, y]
|
|
997
513
|
return profile
|
|
998
|
-
|
|
514
|
+
|
|
999
515
|
|
|
1000
516
|
def histogram_plot(image_tags):
|
|
1001
517
|
"""interactive histogram"""
|
|
@@ -1003,7 +519,6 @@ def histogram_plot(image_tags):
|
|
|
1003
519
|
color_map_list = ['gray', 'viridis', 'jet', 'hot']
|
|
1004
520
|
if 'minimum_intensity' not in image_tags:
|
|
1005
521
|
image_tags['minimum_intensity'] = image_tags['plotimage'].min()
|
|
1006
|
-
minimum_intensity = image_tags['minimum_intensity']
|
|
1007
522
|
if 'maximum_intensity' not in image_tags:
|
|
1008
523
|
image_tags['maximum_intensity'] = image_tags['plotimage'].max()
|
|
1009
524
|
data = image_tags['plotimage']
|
|
@@ -1014,20 +529,19 @@ def histogram_plot(image_tags):
|
|
|
1014
529
|
|
|
1015
530
|
cmap = plt.cm.get_cmap(image_tags['color_map'])
|
|
1016
531
|
colors = cmap(np.linspace(0., 1., nbins))
|
|
1017
|
-
norm2 =
|
|
532
|
+
norm2 = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
|
|
1018
533
|
hist, bin_edges = np.histogram(data, np.linspace(vmin, vmax, nbins), density=True)
|
|
1019
534
|
|
|
1020
535
|
width = bin_edges[1]-bin_edges[0]
|
|
1021
|
-
|
|
536
|
+
event2 = None
|
|
1022
537
|
def onselect(vmin, vmax):
|
|
1023
538
|
ax1.clear()
|
|
1024
539
|
cmap = plt.cm.get_cmap(image_tags['color_map'])
|
|
1025
540
|
colors = cmap(np.linspace(0., 1., nbins))
|
|
1026
|
-
norm2 =
|
|
541
|
+
norm2 = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
|
|
1027
542
|
hist2, bin_edges2 = np.histogram(data, np.linspace(vmin, vmax, nbins), density=True)
|
|
1028
543
|
|
|
1029
|
-
width2 =
|
|
1030
|
-
|
|
544
|
+
width2 = bin_edges2[1]-bin_edges2[0]
|
|
1031
545
|
for i in range(nbins-1):
|
|
1032
546
|
histogram[i].xy = (bin_edges2[i], 0)
|
|
1033
547
|
histogram[i].set_height(hist2[i])
|
|
@@ -1036,7 +550,8 @@ def histogram_plot(image_tags):
|
|
|
1036
550
|
ax.set_xlim(vmin, vmax)
|
|
1037
551
|
ax.set_ylim(0, hist2.max()*1.01)
|
|
1038
552
|
|
|
1039
|
-
cb1 =
|
|
553
|
+
# cb1 = matplotlib.colorbar.ColorbarBase(ax1, cmap=cmap,
|
|
554
|
+
# norm=norm2, orientation='horizontal')
|
|
1040
555
|
|
|
1041
556
|
image_tags['minimum_intensity'] = vmin
|
|
1042
557
|
image_tags['maximum_intensity'] = vmax
|
|
@@ -1044,9 +559,9 @@ def histogram_plot(image_tags):
|
|
|
1044
559
|
def onclick(event):
|
|
1045
560
|
global event2
|
|
1046
561
|
event2 = event
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
562
|
+
button_click = 'double' if event.dblclick else 'single'
|
|
563
|
+
print(f"{button_click} click: button={event.button},"
|
|
564
|
+
+ f" x={event.x}, y={event.y}, xdata={event.xdata}, ydata={event.ydata}")
|
|
1050
565
|
if event.inaxes == ax1:
|
|
1051
566
|
if event.button == 3:
|
|
1052
567
|
ind = color_map_list.index(image_tags['color_map'])+1
|
|
@@ -1065,172 +580,19 @@ def histogram_plot(image_tags):
|
|
|
1065
580
|
ax = fig2.add_axes([0., 0.2, 0.9, 0.7])
|
|
1066
581
|
ax1 = fig2.add_axes([0., 0.15, 0.9, 0.05])
|
|
1067
582
|
|
|
1068
|
-
histogram = ax.bar(bin_edges[0:-1], hist, width=width, color=colors,
|
|
583
|
+
histogram = ax.bar(bin_edges[0:-1], hist, width=width, color=colors,
|
|
584
|
+
edgecolor='black', alpha=0.8)
|
|
1069
585
|
onselect(vmin, vmax)
|
|
1070
|
-
cb1 =
|
|
586
|
+
cb1 = matplotlib.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm2, orientation='horizontal')
|
|
1071
587
|
|
|
1072
588
|
rectprops = dict(facecolor='blue', alpha=0.5)
|
|
1073
589
|
|
|
1074
|
-
span =
|
|
590
|
+
span = matplotlib.widgets.SpanSelector(ax, onselect, 'horizontal', props=rectprops)
|
|
1075
591
|
|
|
1076
592
|
cid = fig2.canvas.mpl_connect('button_press_event', onclick)
|
|
1077
593
|
return span
|
|
1078
594
|
|
|
1079
595
|
|
|
1080
|
-
def clean_svd(im, pixel_size=1, source_size=5):
|
|
1081
|
-
"""De-noising of image by using first component of single value decomposition"""
|
|
1082
|
-
patch_size = int(source_size/pixel_size)
|
|
1083
|
-
if patch_size < 3:
|
|
1084
|
-
patch_size = 3
|
|
1085
|
-
patches = image.extract_patches_2d(np.array(im), (patch_size, patch_size))
|
|
1086
|
-
patches = patches.reshape(patches.shape[0], patches.shape[1]*patches.shape[2])
|
|
1087
|
-
|
|
1088
|
-
num_components = 32
|
|
1089
|
-
|
|
1090
|
-
u, s, v = randomized_svd(patches, num_components)
|
|
1091
|
-
u_im_size = int(np.sqrt(u.shape[0]))
|
|
1092
|
-
reduced_image = u[:, 0].reshape(u_im_size, u_im_size)
|
|
1093
|
-
reduced_image = reduced_image/reduced_image.sum()*im.sum()
|
|
1094
|
-
if isinstance(im, sidpy.Dataset):
|
|
1095
|
-
reduced_image = im.like_data(reduced_image)
|
|
1096
|
-
return reduced_image
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
def rebin(im, binning=2):
|
|
1100
|
-
"""
|
|
1101
|
-
rebin an image by the number of pixels in x and y direction given by binning
|
|
1102
|
-
|
|
1103
|
-
Parameter
|
|
1104
|
-
---------
|
|
1105
|
-
image: numpy array in 2 dimensions
|
|
1106
|
-
|
|
1107
|
-
Returns
|
|
1108
|
-
-------
|
|
1109
|
-
binned image as numpy array
|
|
1110
|
-
"""
|
|
1111
|
-
if len(im.shape) == 2:
|
|
1112
|
-
return im.reshape((im.shape[0]//binning, binning, im.shape[1]//binning, binning)).mean(axis=3).mean(1)
|
|
1113
|
-
else:
|
|
1114
|
-
raise TypeError('not a 2D image')
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
def cart2pol(points):
|
|
1118
|
-
"""Cartesian to polar coordinate conversion
|
|
1119
|
-
|
|
1120
|
-
Parameters
|
|
1121
|
-
---------
|
|
1122
|
-
points: float or numpy array
|
|
1123
|
-
points to be converted (Nx2)
|
|
1124
|
-
|
|
1125
|
-
Returns
|
|
1126
|
-
-------
|
|
1127
|
-
rho: float or numpy array
|
|
1128
|
-
distance
|
|
1129
|
-
phi: float or numpy array
|
|
1130
|
-
angle
|
|
1131
|
-
"""
|
|
1132
|
-
|
|
1133
|
-
rho = np.linalg.norm(points[:, 0:2], axis=1)
|
|
1134
|
-
phi = np.arctan2(points[:, 1], points[:, 0])
|
|
1135
|
-
|
|
1136
|
-
return rho, phi
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
def pol2cart(rho, phi):
|
|
1140
|
-
"""Polar to Cartesian coordinate conversion
|
|
1141
|
-
|
|
1142
|
-
Parameters
|
|
1143
|
-
----------
|
|
1144
|
-
rho: float or numpy array
|
|
1145
|
-
distance
|
|
1146
|
-
phi: float or numpy array
|
|
1147
|
-
angle
|
|
1148
|
-
|
|
1149
|
-
Returns
|
|
1150
|
-
-------
|
|
1151
|
-
x: float or numpy array
|
|
1152
|
-
x coordinates of converted points(Nx2)
|
|
1153
|
-
"""
|
|
1154
|
-
|
|
1155
|
-
x = rho * np.cos(phi)
|
|
1156
|
-
y = rho * np.sin(phi)
|
|
1157
|
-
return x, y
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
def xy2polar(points, rounding=1e-3):
|
|
1161
|
-
""" Conversion from carthesian to polar coordinates
|
|
1162
|
-
|
|
1163
|
-
the angles and distances are sorted by r and then phi
|
|
1164
|
-
The indices of this sort is also returned
|
|
1165
|
-
|
|
1166
|
-
Parameters
|
|
1167
|
-
----------
|
|
1168
|
-
points: numpy array
|
|
1169
|
-
number of points in axis 0 first two elements in axis 1 are x and y
|
|
1170
|
-
rounding: int
|
|
1171
|
-
optional rounding in significant digits
|
|
1172
|
-
|
|
1173
|
-
Returns
|
|
1174
|
-
-------
|
|
1175
|
-
r, phi, sorted_indices
|
|
1176
|
-
"""
|
|
1177
|
-
|
|
1178
|
-
r, phi = cart2pol(points)
|
|
1179
|
-
|
|
1180
|
-
phi = phi # %np.pi # only positive angles
|
|
1181
|
-
r = (np.floor(r/rounding))*rounding # Remove rounding error differences
|
|
1182
|
-
|
|
1183
|
-
sorted_indices = np.lexsort((phi, r)) # sort first by r and then by phi
|
|
1184
|
-
r = r[sorted_indices]
|
|
1185
|
-
phi = phi[sorted_indices]
|
|
1186
|
-
|
|
1187
|
-
return r, phi, sorted_indices
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
def cartesian2polar(x, y, grid, r, t, order=3):
|
|
1191
|
-
"""Transform cartesian grid to polar grid
|
|
1192
|
-
|
|
1193
|
-
Used by warp
|
|
1194
|
-
"""
|
|
1195
|
-
|
|
1196
|
-
rr, tt = np.meshgrid(r, t)
|
|
1197
|
-
|
|
1198
|
-
new_x = rr*np.cos(tt)
|
|
1199
|
-
new_y = rr*np.sin(tt)
|
|
1200
|
-
|
|
1201
|
-
ix = interp1d(x, np.arange(len(x)))
|
|
1202
|
-
iy = interp1d(y, np.arange(len(y)))
|
|
1203
|
-
|
|
1204
|
-
new_ix = ix(new_x.ravel())
|
|
1205
|
-
new_iy = iy(new_y.ravel())
|
|
1206
|
-
|
|
1207
|
-
return ndimage.map_coordinates(grid, np.array([new_ix, new_iy]), order=order).reshape(new_x.shape)
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
def warp(diff, center):
|
|
1211
|
-
"""Takes a diffraction pattern (as a sidpy dataset)and warps it to a polar grid"""
|
|
1212
|
-
|
|
1213
|
-
# Define original polar grid
|
|
1214
|
-
nx = np.shape(diff)[0]
|
|
1215
|
-
ny = np.shape(diff)[1]
|
|
1216
|
-
|
|
1217
|
-
# Define center pixel
|
|
1218
|
-
pix2nm = np.gradient(diff.u.values)[0]
|
|
1219
|
-
|
|
1220
|
-
x = np.linspace(1, nx, nx, endpoint=True)-center[0]
|
|
1221
|
-
y = np.linspace(1, ny, ny, endpoint=True)-center[1]
|
|
1222
|
-
z = diff
|
|
1223
|
-
|
|
1224
|
-
# Define new polar grid
|
|
1225
|
-
nr = int(min([center[0], center[1], diff.shape[0]-center[0], diff.shape[1]-center[1]])-1)
|
|
1226
|
-
nt = 360 * 3
|
|
1227
|
-
|
|
1228
|
-
r = np.linspace(1, nr, nr)
|
|
1229
|
-
t = np.linspace(0., np.pi, nt, endpoint=False)
|
|
1230
|
-
|
|
1231
|
-
return cartesian2polar(x, y, z, r, t, order=3).T
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
596
|
def calculate_ctf(wavelength, cs, defocus, k):
|
|
1235
597
|
""" Calculate Contrast Transfer Function
|
|
1236
598
|
|
|
@@ -1277,13 +639,11 @@ def get_rotation(experiment_spots, crystal_spots):
|
|
|
1277
639
|
positions (in 1/nm) of spots in diffractogram
|
|
1278
640
|
crystal_spots: numpy array (nx2)
|
|
1279
641
|
positions (in 1/nm) of Bragg spots according to kinematic scattering theory
|
|
1280
|
-
|
|
1281
642
|
"""
|
|
1282
|
-
|
|
1283
643
|
r_experiment, phi_experiment = cart2pol(experiment_spots)
|
|
1284
|
-
|
|
644
|
+
|
|
1285
645
|
# get crystal spots of same length and sort them by angle as well
|
|
1286
|
-
r_crystal, phi_crystal,
|
|
646
|
+
r_crystal, phi_crystal, _ = xy2polar(crystal_spots)
|
|
1287
647
|
angle_index = np.argmin(np.abs(r_experiment-r_crystal[1]))
|
|
1288
648
|
rotation_angle = phi_experiment[angle_index] % (2*np.pi) - phi_crystal[1]
|
|
1289
649
|
print(phi_experiment[angle_index])
|
|
@@ -1296,8 +656,6 @@ def get_rotation(experiment_spots, crystal_spots):
|
|
|
1296
656
|
|
|
1297
657
|
def calibrate_image_scale(fft_tags, spots_reference, spots_experiment):
|
|
1298
658
|
"""depreciated get change of scale from comparison of spots to Bragg angles """
|
|
1299
|
-
gx = fft_tags['spatial_scale_x']
|
|
1300
|
-
gy = fft_tags['spatial_scale_y']
|
|
1301
659
|
|
|
1302
660
|
dist_reference = np.linalg.norm(spots_reference, axis=1)
|
|
1303
661
|
distance_experiment = np.linalg.norm(spots_experiment, axis=1)
|
|
@@ -1311,7 +669,8 @@ def calibrate_image_scale(fft_tags, spots_reference, spots_experiment):
|
|
|
1311
669
|
return np.sqrt((xdata * dgx) ** 2 + (ydata * dgy) ** 2) - dist_reference.min()
|
|
1312
670
|
|
|
1313
671
|
x0 = [1.001, 0.999]
|
|
1314
|
-
[dg,
|
|
672
|
+
[dg, _] = scipy.optimize.leastsq(func, x0, args=(closest_exp_reflections[:, 0],
|
|
673
|
+
closest_exp_reflections[:, 1]))
|
|
1315
674
|
return dg
|
|
1316
675
|
|
|
1317
676
|
|
|
@@ -1321,11 +680,10 @@ def align_crystal_reflections(spots, crystals):
|
|
|
1321
680
|
crystal_reflections_polar = []
|
|
1322
681
|
angles = []
|
|
1323
682
|
exp_r, exp_phi = cart2pol(spots) # just in polar coordinates
|
|
1324
|
-
spots_polar = np.array([exp_r, exp_phi])
|
|
1325
683
|
|
|
1326
|
-
for
|
|
1327
|
-
|
|
1328
|
-
r, phi, indices = xy2polar(tags['allowed']['g'])
|
|
684
|
+
for tags in crystals:
|
|
685
|
+
# sorted by r and phi , only positive angles
|
|
686
|
+
r, phi, indices = xy2polar(tags['allowed']['g'])
|
|
1329
687
|
# we mask the experimental values that are found already
|
|
1330
688
|
angle = 0.
|
|
1331
689
|
|
|
@@ -1335,126 +693,7 @@ def align_crystal_reflections(spots, crystals):
|
|
|
1335
693
|
|
|
1336
694
|
crystal_reflections_polar.append([r, angle + phi, indices])
|
|
1337
695
|
tags['allowed']['g_rotated'] = pol2cart(r, angle + phi)
|
|
1338
|
-
for spot in tags['allowed']['g']:
|
|
696
|
+
"""for spot in tags['allowed']['g']:
|
|
1339
697
|
dif = np.linalg.norm(spots[:, 0:2]-spot[0:2], axis=1)
|
|
1340
|
-
|
|
1341
|
-
if dif.min() < 1.5:
|
|
1342
|
-
ind = np.argmin(dif)
|
|
1343
|
-
|
|
698
|
+
"""
|
|
1344
699
|
return crystal_reflections_polar, angles
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
# Deconvolution
|
|
1348
|
-
def decon_lr(o_image, probe, verbose=False):
|
|
1349
|
-
"""
|
|
1350
|
-
# This task generates a restored image from an input image and point spread function (PSF) using
|
|
1351
|
-
# the algorithm developed independently by Lucy (1974, Astron. J. 79, 745) and Richardson
|
|
1352
|
-
# (1972, J. Opt. Soc. Am. 62, 55) and adapted for HST imagery by Snyder
|
|
1353
|
-
# (1990, in Restoration of HST Images and Spectra, ST ScI Workshop Proceedings; see also
|
|
1354
|
-
# Snyder, Hammoud, & White, JOSA, v. 10, no. 5, May 1993, in press).
|
|
1355
|
-
# Additional options developed by Rick White (STScI) are also included.
|
|
1356
|
-
#
|
|
1357
|
-
# The Lucy-Richardson method can be derived from the maximum likelihood expression for data
|
|
1358
|
-
# with a Poisson noise distribution. Thus, it naturally applies to optical imaging data such as HST.
|
|
1359
|
-
# The method forces the restored image to be positive, in accord with photon-counting statistics.
|
|
1360
|
-
#
|
|
1361
|
-
# The Lucy-Richardson algorithm generates a restored image through an iterative method. The essence
|
|
1362
|
-
# of the iteration is as follows: the (n+1)th estimate of the restored image is given by the nth estimate
|
|
1363
|
-
# of the restored image multiplied by a correction image. That is,
|
|
1364
|
-
#
|
|
1365
|
-
# original data
|
|
1366
|
-
# image = image --------------- * reflect(PSF)
|
|
1367
|
-
# n+1 n image * PSF
|
|
1368
|
-
# n
|
|
1369
|
-
|
|
1370
|
-
# where the *'s represent convolution operators and reflect(PSF) is the reflection of the PSF, i.e.
|
|
1371
|
-
# reflect((PSF)(x,y)) = PSF(-x,-y). When the convolutions are carried out using fast Fourier transforms
|
|
1372
|
-
# (FFTs), one can use the fact that FFT(reflect(PSF)) = conj(FFT(PSF)), where conj is the complex conjugate
|
|
1373
|
-
# operator.
|
|
1374
|
-
"""
|
|
1375
|
-
|
|
1376
|
-
if len(o_image) < 1:
|
|
1377
|
-
return o_image
|
|
1378
|
-
|
|
1379
|
-
if o_image.shape != probe.shape:
|
|
1380
|
-
print('Weirdness ', o_image.shape, ' != ', probe.shape)
|
|
1381
|
-
|
|
1382
|
-
probe_c = np.ones(probe.shape, dtype=np.complex64)
|
|
1383
|
-
probe_c.real = probe
|
|
1384
|
-
|
|
1385
|
-
error = np.ones(o_image.shape, dtype=np.complex64)
|
|
1386
|
-
est = np.ones(o_image.shape, dtype=np.complex64)
|
|
1387
|
-
source = np.ones(o_image.shape, dtype=np.complex64)
|
|
1388
|
-
o_image = o_image - o_image.min()
|
|
1389
|
-
image_mult = o_image.max()
|
|
1390
|
-
o_image = o_image / o_image.max()
|
|
1391
|
-
source.real = o_image
|
|
1392
|
-
|
|
1393
|
-
response_ft = fftpack.fft2(probe_c)
|
|
1394
|
-
|
|
1395
|
-
ap_angle = o_image.metadata['experiment']['convergence_angle']
|
|
1396
|
-
if ap_angle > .1:
|
|
1397
|
-
ap_angle /= 1000 # now in rad
|
|
1398
|
-
|
|
1399
|
-
e0 = float(o_image.metadata['experiment']['acceleration_voltage'])
|
|
1400
|
-
|
|
1401
|
-
wl = get_wavelength(e0)
|
|
1402
|
-
o_image.metadata['experiment']['wavelength'] = wl
|
|
1403
|
-
|
|
1404
|
-
over_d = 2 * ap_angle / wl
|
|
1405
|
-
|
|
1406
|
-
dx = o_image.x[1]-o_image.x[0]
|
|
1407
|
-
dk = 1.0 / float(o_image.x[-1]) # last value of x-axis is field of view
|
|
1408
|
-
screen_width = 1 / dx
|
|
1409
|
-
|
|
1410
|
-
aperture = np.ones(o_image.shape, dtype=np.complex64)
|
|
1411
|
-
# Mask for the aperture before the Fourier transform
|
|
1412
|
-
n = o_image.shape[0]
|
|
1413
|
-
size_x = o_image.shape[0]
|
|
1414
|
-
size_y = o_image.shape[1]
|
|
1415
|
-
app_ratio = over_d / screen_width * n
|
|
1416
|
-
|
|
1417
|
-
theta_x = np.array(-size_x / 2. + np.arange(size_x))
|
|
1418
|
-
theta_y = np.array(-size_y / 2. + np.arange(size_y))
|
|
1419
|
-
t_xv, t_yv = np.meshgrid(theta_x, theta_y)
|
|
1420
|
-
|
|
1421
|
-
tp1 = t_xv ** 2 + t_yv ** 2 >= app_ratio ** 2
|
|
1422
|
-
aperture[tp1.T] = 0.
|
|
1423
|
-
# print(app_ratio, screen_width, dk)
|
|
1424
|
-
|
|
1425
|
-
progress = tqdm(total=500)
|
|
1426
|
-
# de = 100
|
|
1427
|
-
dest = 100
|
|
1428
|
-
i = 0
|
|
1429
|
-
while abs(dest) > 0.0001: # or abs(de) > .025:
|
|
1430
|
-
i += 1
|
|
1431
|
-
error_old = np.sum(error.real)
|
|
1432
|
-
est_old = est.copy()
|
|
1433
|
-
error = source / np.real(fftpack.fftshift(fftpack.ifft2(fftpack.fft2(est) * response_ft)))
|
|
1434
|
-
est = est * np.real(fftpack.fftshift(fftpack.ifft2(fftpack.fft2(error) * np.conjugate(response_ft))))
|
|
1435
|
-
|
|
1436
|
-
error_new = np.real(np.sum(np.power(error, 2))) - error_old
|
|
1437
|
-
dest = np.sum(np.power((est - est_old).real, 2)) / np.sum(est) * 100
|
|
1438
|
-
|
|
1439
|
-
if error_old != 0:
|
|
1440
|
-
de = error_new / error_old * 1.0
|
|
1441
|
-
else:
|
|
1442
|
-
de = error_new
|
|
1443
|
-
|
|
1444
|
-
if verbose:
|
|
1445
|
-
print(
|
|
1446
|
-
' LR Deconvolution - Iteration: {0:d} Error: {1:.2f} = change: {2:.5f}%, {3:.5f}%'.format(i, error_new,
|
|
1447
|
-
de,
|
|
1448
|
-
abs(dest)))
|
|
1449
|
-
if i > 500:
|
|
1450
|
-
dest = 0.0
|
|
1451
|
-
print('terminate')
|
|
1452
|
-
progress.update(1)
|
|
1453
|
-
progress.write(f"converged in {i} iterations")
|
|
1454
|
-
print('\n Lucy-Richardson deconvolution converged in ' + str(i) + ' iterations')
|
|
1455
|
-
est2 = np.real(fftpack.ifft2(fftpack.fft2(est) * fftpack.fftshift(aperture)))*image_mult
|
|
1456
|
-
out_dataset = o_image.like_data(est2)
|
|
1457
|
-
out_dataset.title = 'Lucy Richardson deconvolution'
|
|
1458
|
-
out_dataset.data_type = 'image'
|
|
1459
|
-
return out_dataset
|
|
1460
|
-
|