pyTEMlib 0.2020.11.1__py3-none-any.whl → 0.2024.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyTEMlib might be problematic. Click here for more details.
- pyTEMlib/__init__.py +11 -11
- pyTEMlib/animation.py +631 -0
- pyTEMlib/atom_tools.py +240 -245
- pyTEMlib/config_dir.py +57 -33
- pyTEMlib/core_loss_widget.py +658 -0
- pyTEMlib/crystal_tools.py +1255 -0
- pyTEMlib/diffraction_plot.py +756 -0
- pyTEMlib/dynamic_scattering.py +293 -0
- pyTEMlib/eds_tools.py +609 -0
- pyTEMlib/eels_dialog.py +749 -491
- pyTEMlib/{interactive_eels.py → eels_dialog_utilities.py} +1199 -1177
- pyTEMlib/eels_tools.py +2031 -1698
- pyTEMlib/file_tools.py +1276 -560
- pyTEMlib/file_tools_qt.py +193 -0
- pyTEMlib/graph_tools.py +1166 -450
- pyTEMlib/graph_viz.py +449 -0
- pyTEMlib/image_dialog.py +158 -0
- pyTEMlib/image_dlg.py +146 -232
- pyTEMlib/image_tools.py +1399 -1028
- pyTEMlib/info_widget.py +933 -0
- pyTEMlib/interactive_image.py +1 -226
- pyTEMlib/kinematic_scattering.py +1196 -0
- pyTEMlib/low_loss_widget.py +176 -0
- pyTEMlib/microscope.py +61 -81
- pyTEMlib/peak_dialog.py +1047 -410
- pyTEMlib/peak_dlg.py +286 -242
- pyTEMlib/probe_tools.py +653 -207
- pyTEMlib/sidpy_tools.py +153 -136
- pyTEMlib/simulation_tools.py +104 -87
- pyTEMlib/version.py +6 -3
- pyTEMlib/xrpa_x_sections.py +20972 -0
- {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/LICENSE +21 -21
- pyTEMlib-0.2024.9.0.dist-info/METADATA +92 -0
- pyTEMlib-0.2024.9.0.dist-info/RECORD +37 -0
- {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/WHEEL +5 -5
- {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/entry_points.txt +0 -1
- pyTEMlib/KinsCat.py +0 -2758
- pyTEMlib/__version__.py +0 -2
- pyTEMlib/data/TEMlibrc +0 -68
- pyTEMlib/data/edges_db.csv +0 -189
- pyTEMlib/data/edges_db.pkl +0 -0
- pyTEMlib/data/fparam.txt +0 -103
- pyTEMlib/data/microscopes.csv +0 -7
- pyTEMlib/data/microscopes.xml +0 -167
- pyTEMlib/data/path.txt +0 -1
- pyTEMlib/defaults_parser.py +0 -90
- pyTEMlib/dm3_reader.py +0 -613
- pyTEMlib/edges_db.py +0 -76
- pyTEMlib/eels_dlg.py +0 -224
- pyTEMlib/hdf_utils.py +0 -483
- pyTEMlib/image_tools1.py +0 -2194
- pyTEMlib/info_dialog.py +0 -237
- pyTEMlib/info_dlg.py +0 -202
- pyTEMlib/nion_reader.py +0 -297
- pyTEMlib/nsi_reader.py +0 -170
- pyTEMlib/structure_tools.py +0 -316
- pyTEMlib/test.py +0 -2072
- pyTEMlib-0.2020.11.1.dist-info/METADATA +0 -20
- pyTEMlib-0.2020.11.1.dist-info/RECORD +0 -45
- {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/top_level.txt +0 -0
pyTEMlib/image_tools1.py
DELETED
|
@@ -1,2194 +0,0 @@
|
|
|
1
|
-
##################################
|
|
2
|
-
#
|
|
3
|
-
# image_tools.py
|
|
4
|
-
# by Gerd Duscher, UTK
|
|
5
|
-
# part of pyTEMlib
|
|
6
|
-
# MIT license except where stated differently
|
|
7
|
-
#
|
|
8
|
-
###############################
|
|
9
|
-
import numpy as np
|
|
10
|
-
|
|
11
|
-
import matplotlib as mpl
|
|
12
|
-
import matplotlib.pylab as plt
|
|
13
|
-
from matplotlib.patches import Polygon # plotting of polygons -- graph rings
|
|
14
|
-
|
|
15
|
-
import matplotlib.widgets as mwidgets
|
|
16
|
-
from matplotlib.widgets import RectangleSelector
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
from PyQt5 import QtGui, QtWidgets
|
|
21
|
-
import pickle
|
|
22
|
-
|
|
23
|
-
import json
|
|
24
|
-
import struct
|
|
25
|
-
|
|
26
|
-
import sys, os
|
|
27
|
-
|
|
28
|
-
import math
|
|
29
|
-
import itertools
|
|
30
|
-
from itertools import product
|
|
31
|
-
|
|
32
|
-
from scipy import fftpack
|
|
33
|
-
from scipy import signal
|
|
34
|
-
from scipy.interpolate import interp1d, interp2d
|
|
35
|
-
from scipy.optimize import leastsq
|
|
36
|
-
|
|
37
|
-
# Multidimensional Image library
|
|
38
|
-
import scipy.ndimage as ndimage
|
|
39
|
-
|
|
40
|
-
import scipy.spatial as sp
|
|
41
|
-
from scipy.spatial import Voronoi, KDTree, cKDTree
|
|
42
|
-
|
|
43
|
-
from skimage.feature import peak_local_max
|
|
44
|
-
# our blob detectors from the scipy image package
|
|
45
|
-
from skimage.feature import blob_log #blob_dog, blob_doh
|
|
46
|
-
|
|
47
|
-
from sklearn.feature_extraction import image
|
|
48
|
-
from sklearn.utils.extmath import randomized_svd
|
|
49
|
-
from sklearn.cluster import KMeans
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
from pyTEMlib.dftregistration import * # sup-pixel rigid registration
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
_SimpleITK_present = True
|
|
57
|
-
try:
|
|
58
|
-
import SimpleITK as sitk
|
|
59
|
-
except:
|
|
60
|
-
_SimpleITK_present = False
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
if _SimpleITK_present == False:
|
|
64
|
-
print('SimpleITK not installed; Registration Functions for Image Stacks not available')
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def plot_image2(image_tags,fig, axes):
|
|
69
|
-
if 'color_map' not in image_tags:
|
|
70
|
-
image_tags['color_map'] = 'gray'
|
|
71
|
-
color_map = image_tags['color_map']
|
|
72
|
-
if 'origin' not in image_tags:
|
|
73
|
-
image_tags['origin'] = 'upper'
|
|
74
|
-
origin = image_tags['origin']
|
|
75
|
-
if 'extent' not in image_tags:
|
|
76
|
-
if 'FOV' in image_tags:
|
|
77
|
-
FOV = image_tags['FOV']
|
|
78
|
-
image_tags['extent'] = (0,FOV,FOV,0)
|
|
79
|
-
else:
|
|
80
|
-
image_tags['extent'] = (0,1,1,0)
|
|
81
|
-
extent = image_tags['extent']
|
|
82
|
-
if 'minimum_intensity' not in image_tags:
|
|
83
|
-
image_tags['minimum_intensity'] = image_tags['plotimage'].min()
|
|
84
|
-
minimum_intensity = image_tags['minimum_intensity']
|
|
85
|
-
if 'maximum_intensity' not in image_tags:
|
|
86
|
-
image_tags['maximum_intensity'] = image_tags['plotimage'].max()
|
|
87
|
-
maximum_intensity = image_tags['maximum_intensity']
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
ax1 = axes[0]
|
|
92
|
-
|
|
93
|
-
ims = ax1.imshow(image_tags['plotimage'], cmap=color_map, origin = 'upper', extent=extent, vmin=minimum_intensity, vmax=maximum_intensity )
|
|
94
|
-
plt.xlabel('distance [nm]')
|
|
95
|
-
plt.colorbar(ims)
|
|
96
|
-
|
|
97
|
-
ax2 = axes[1]
|
|
98
|
-
def line_select_callback(eclick, erelease):
|
|
99
|
-
pixel_size = out_tags['FOV']/data.shape[0]
|
|
100
|
-
x0, y0 = eclick.xdata/pixel_size, eclick.ydata/pixel_size
|
|
101
|
-
global eclick2
|
|
102
|
-
eclick2 = eclick
|
|
103
|
-
|
|
104
|
-
x1, y1 = erelease.xdata/pixel_size, erelease.ydata/pixel_size
|
|
105
|
-
length_plot = np.sqrt((x1-x0)**2+(y1-y0)**2)
|
|
106
|
-
|
|
107
|
-
num = length_plot
|
|
108
|
-
x, y = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
|
|
109
|
-
|
|
110
|
-
# Extract the values along the line, using cubic interpolation
|
|
111
|
-
zi2 = ndimage.map_coordinates(data, np.vstack((x,y)))
|
|
112
|
-
x_axis = np.linspace(0,length_plot,len(zi2))*pixel_size
|
|
113
|
-
line_plot.set_xdata(x_axis)
|
|
114
|
-
line_plot.set_ydata(zi2)
|
|
115
|
-
ax2.set_xlim(0,x_axis.max())
|
|
116
|
-
ax2.set_ylim(zi2.min(),zi2.max())
|
|
117
|
-
ax2.draw()
|
|
118
|
-
|
|
119
|
-
return line_plot
|
|
120
|
-
line_plot, = ax2.plot([],[])
|
|
121
|
-
|
|
122
|
-
RS = RectangleSelector(ax1, line_select_callback,
|
|
123
|
-
drawtype='line', useblit=False,
|
|
124
|
-
button=[1, 3], # don't use middle button
|
|
125
|
-
minspanx=5, minspany=5,
|
|
126
|
-
spancoords='pixels',
|
|
127
|
-
interactive=True)
|
|
128
|
-
plt.show()
|
|
129
|
-
return RS, fig
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
def histogram_plot(image_tags):
|
|
135
|
-
nbins = 75
|
|
136
|
-
minbin = 0.
|
|
137
|
-
maxbin = 1.
|
|
138
|
-
color_map_list = ['gray','viridis','jet','hot']
|
|
139
|
-
|
|
140
|
-
if 'minimum_intensity' not in image_tags:
|
|
141
|
-
image_tags['minimum_intensity'] = image_tags['plotimage'].min()
|
|
142
|
-
minimum_intensity = image_tags['minimum_intensity']
|
|
143
|
-
if 'maximum_intensity' not in image_tags:
|
|
144
|
-
image_tags['maximum_intensity'] = image_tags['plotimage'].max()
|
|
145
|
-
data = image_tags['plotimage']
|
|
146
|
-
vmin = image_tags['minimum_intensity']
|
|
147
|
-
vmax = image_tags['maximum_intensity']
|
|
148
|
-
if 'color_map' not in image_tags:
|
|
149
|
-
image_tags['color_map'] = color_map_list[0]
|
|
150
|
-
cmap = plt.cm.get_cmap(image_tags['color_map'])
|
|
151
|
-
|
|
152
|
-
colors = cmap(np.linspace(0.,1.,nbins))
|
|
153
|
-
|
|
154
|
-
norm2 = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
|
|
155
|
-
hist, bin_edges = np.histogram(data, np.linspace(vmin,vmax,nbins),density=True)
|
|
156
|
-
|
|
157
|
-
width = bin_edges[1]-bin_edges[0]
|
|
158
|
-
|
|
159
|
-
def onselect(vmin, vmax):
|
|
160
|
-
|
|
161
|
-
ax1.clear()
|
|
162
|
-
cmap = plt.cm.get_cmap(image_tags['color_map'])
|
|
163
|
-
|
|
164
|
-
colors = cmap(np.linspace(0.,1.,nbins))
|
|
165
|
-
|
|
166
|
-
norm2 = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
|
|
167
|
-
hist2, bin_edges2 = np.histogram(data, np.linspace(vmin,vmax,nbins),density=True)
|
|
168
|
-
|
|
169
|
-
width2 = (bin_edges2[1]-bin_edges2[0])
|
|
170
|
-
|
|
171
|
-
for i in range(nbins-1):
|
|
172
|
-
histogram[i].xy=(bin_edges2[i],0)
|
|
173
|
-
histogram[i].set_height(hist2[i])
|
|
174
|
-
histogram[i].set_width(width2)
|
|
175
|
-
histogram[i].set_facecolor(colors[i])
|
|
176
|
-
ax.set_xlim(vmin,vmax)
|
|
177
|
-
ax.set_ylim(0,hist2.max()*1.01)
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,norm = norm2,orientation='horizontal')
|
|
181
|
-
|
|
182
|
-
image_tags['minimum_intensity']= vmin
|
|
183
|
-
image_tags['maximum_intensity']= vmax
|
|
184
|
-
|
|
185
|
-
def onclick(event):
|
|
186
|
-
global event2
|
|
187
|
-
event2 = event
|
|
188
|
-
print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
|
|
189
|
-
('double' if event.dblclick else 'single', event.button,
|
|
190
|
-
event.x, event.y, event.xdata, event.ydata))
|
|
191
|
-
if event.inaxes == ax1:
|
|
192
|
-
if event.button == 3:
|
|
193
|
-
ind = color_map_list.index(image_tags['color_map'])+1
|
|
194
|
-
if ind == len(color_map_list):
|
|
195
|
-
ind = 0
|
|
196
|
-
image_tags['color_map']= color_map_list[ind]#'viridis'
|
|
197
|
-
vmin = image_tags['minimum_intensity']
|
|
198
|
-
vmax = image_tags['maximum_intensity']
|
|
199
|
-
else:
|
|
200
|
-
vmax = data.max()
|
|
201
|
-
vmin = data.min()
|
|
202
|
-
onselect(vmin,vmax)
|
|
203
|
-
|
|
204
|
-
fig2 = plt.figure()
|
|
205
|
-
|
|
206
|
-
ax = fig2.add_axes([0., 0.2, 0.9, 0.7])
|
|
207
|
-
ax1 = fig2.add_axes([0., 0.15, 0.9, 0.05])
|
|
208
|
-
|
|
209
|
-
histogram = ax.bar(bin_edges[0:-1], hist, width=width, color=colors, edgecolor = 'black',alpha=0.8)
|
|
210
|
-
onselect(vmin,vmax)
|
|
211
|
-
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,norm = norm2,orientation='horizontal')
|
|
212
|
-
|
|
213
|
-
rectprops = dict(facecolor='blue', alpha=0.5)
|
|
214
|
-
|
|
215
|
-
span = mwidgets.SpanSelector(ax, onselect, 'horizontal',
|
|
216
|
-
rectprops=rectprops)
|
|
217
|
-
|
|
218
|
-
cid = fig2.canvas.mpl_connect('button_press_event', onclick)
|
|
219
|
-
return span
|
|
220
|
-
|
|
221
|
-
def Fourier_Transform(current_channel):
|
|
222
|
-
"""
|
|
223
|
-
Reads information into dictionary 'tags', performs 'FFT', and provides a smoothed FT and reciprocal and intensity
|
|
224
|
-
limits for visualization. All information is stored in the 'fft' sub-dictionary of tags.
|
|
225
|
-
|
|
226
|
-
Input
|
|
227
|
-
-----
|
|
228
|
-
current_channel: data group of pyUSID file
|
|
229
|
-
|
|
230
|
-
Usage
|
|
231
|
-
-----
|
|
232
|
-
|
|
233
|
-
tags = Fourier_Transform(current_channel)
|
|
234
|
-
fft = tags['fft']
|
|
235
|
-
fig = plt.figure()
|
|
236
|
-
plt.imshow(np.log2(1+ 0.5*fft['magnitude_smooth']).T, extent=fft['extend'], origin = 'upper',
|
|
237
|
-
vmin=fft['minimum_intensity'], vmax=fft['maximum_intensity'])
|
|
238
|
-
plt.xlabel('spatial frequency [1/nm]');
|
|
239
|
-
|
|
240
|
-
"""
|
|
241
|
-
|
|
242
|
-
tags = ft.get_dictionary_from_pyUSID(current_channel)
|
|
243
|
-
|
|
244
|
-
sizeX
|
|
245
|
-
image = tags['data']- tags['data'].min()
|
|
246
|
-
fft_mag = (np.abs((np.fft.fftshift(np.fft.fft2(image)))))
|
|
247
|
-
|
|
248
|
-
tags['fft'] = {}
|
|
249
|
-
fft = tags['fft']
|
|
250
|
-
fft['magnitude'] = fft_mag
|
|
251
|
-
|
|
252
|
-
fft['spatial_scale_x'] = 1/tags['FOV_x']
|
|
253
|
-
fft['spatial_scale_y'] = 1/tags['FOV_y']
|
|
254
|
-
fft['spatial_offset_x'] = -1/tags['FOV_x'] * tags['data'].shape[0] /2.
|
|
255
|
-
fft['spatial_offset_y'] = -1/tags['FOV_y'] * tags['data'].shape[1] /2.
|
|
256
|
-
|
|
257
|
-
## Field of View (FOV) in recipical space please note: rec_FOV_x = 1/(scaleX*2)
|
|
258
|
-
fft['rec_FOV_x'] = 1/tags['FOV_x'] * sizeX /2.
|
|
259
|
-
fft['rec_FOV_y'] = 1/tags['FOV_y'] * sizeY /2.
|
|
260
|
-
|
|
261
|
-
## Field ofView (FOV) in recipical space
|
|
262
|
-
fft['extend'] = (fft['spatial_offset_x'],-fft['spatial_offset_x'],-fft['rec_FOV_y'],fft['rec_FOV_y'])
|
|
263
|
-
|
|
264
|
-
# We need some smoothing (here with a Gaussian)
|
|
265
|
-
smoothing = 3
|
|
266
|
-
fft_mag2 = ndimage.gaussian_filter(fft_mag, sigma=(smoothing, smoothing), order=0)
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
#prepare mask for low and high frequencies
|
|
270
|
-
pixels = (np.linspace(0,image.shape[0]-1,image.shape[0])-image.shape[0]/2)* rec_scale_x
|
|
271
|
-
x,y = np.meshgrid(pixels,pixels);
|
|
272
|
-
mask = np.zeros(image.shape)
|
|
273
|
-
|
|
274
|
-
mask_spot = x**2+y**2 > 2**2
|
|
275
|
-
mask = mask + mask_spot
|
|
276
|
-
mask_spot = x**2+y**2 < 10**2
|
|
277
|
-
mask = mask + mask_spot
|
|
278
|
-
|
|
279
|
-
mask[np.where(mask==1)]=0 # just in case of overlapping disks
|
|
280
|
-
|
|
281
|
-
fft_mag3 = fft_mag2*mask
|
|
282
|
-
|
|
283
|
-
fft['magnitude_smooth'] = fft_mag2
|
|
284
|
-
fft['minimum_intensity'] = np.log2(1+fft_mag2)[np.where(mask==2)].min()*0.95
|
|
285
|
-
#minimum_intensity = np.mean(fft_mag3)-np.std(fft_mag3)
|
|
286
|
-
fft['maximum_intensity'] = np.log2(1+fft_mag2)[np.where(mask==2)].max()*1.05
|
|
287
|
-
#maximum_intensity = np.mean(fft_mag3)+np.std(fft_mag3)*2
|
|
288
|
-
|
|
289
|
-
return tags
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
def find_atoms(im, tags, verbose = False):
|
|
294
|
-
from skimage.feature import blob_log #blob_dog, blob_doh
|
|
295
|
-
if 'rel_blob_size' not in tags:
|
|
296
|
-
tags['rel_blob_size'] = .4 # between 0 and 1 nromally around 0.5
|
|
297
|
-
tags['source_size'] = 0.06 #in nm gives the size of the atoms or resolution
|
|
298
|
-
tags['nearest_neighbours'] = 7 # up to this number nearest neighbours are evaluated (normally 7)
|
|
299
|
-
tags['threshold'] = .15 # between 0.01 and 0.1
|
|
300
|
-
tags['rim_size'] = 2# size of rim in multiples of source size
|
|
301
|
-
|
|
302
|
-
rel_blob_size = tags['rel_blob_size'] # between 0 and 1 nromally around 0.5
|
|
303
|
-
source_size = tags['source_size'] #in nm gives the size of the atoms
|
|
304
|
-
nearest_neighbours = tags['nearest_neighbours'] # up to this number nearest neighbours are evaluated (normally 7)
|
|
305
|
-
threshold = tags['threshold'] # between 0.01 and 0.1
|
|
306
|
-
rim_size = tags['rim_size'] # sizeof rim in multiples of resolution
|
|
307
|
-
pixel_size = tags['pixel_size']
|
|
308
|
-
|
|
309
|
-
rim_width = rim_size*source_size/pixel_size
|
|
310
|
-
|
|
311
|
-
## Get a noise free image: reduced
|
|
312
|
-
#pixel_size = FOV/im.shape[0]
|
|
313
|
-
reduced_image = clean_svd(im,pixel_size=pixel_size,source_size=source_size)
|
|
314
|
-
|
|
315
|
-
reduced_image = reduced_image-reduced_image.min()
|
|
316
|
-
reduced_image = reduced_image/reduced_image.max()
|
|
317
|
-
|
|
318
|
-
tags['reduced_image'] = reduced_image
|
|
319
|
-
patch_size = im.shape[0]-reduced_image.shape[0]
|
|
320
|
-
tags['patch_size'] = patch_size
|
|
321
|
-
print(f' Use {patch_size} x {patch_size} pixels for image-patch of atoms')
|
|
322
|
-
|
|
323
|
-
# Find atoms
|
|
324
|
-
thresh = reduced_image.std()*threshold
|
|
325
|
-
blobs = blob_log(np.array(reduced_image), max_sigma=source_size/pixel_size, threshold=thresh)
|
|
326
|
-
plot_image = im[int(patch_size/2):,int(patch_size/2):]
|
|
327
|
-
|
|
328
|
-
atoms = []
|
|
329
|
-
from skimage.feature import blob_log
|
|
330
|
-
for blob in blobs:
|
|
331
|
-
y, x, r = blob
|
|
332
|
-
if r > patch_size*rel_blob_size:
|
|
333
|
-
atoms.append([x+patch_size/2,y+patch_size/2,r])
|
|
334
|
-
|
|
335
|
-
rim_atoms = []
|
|
336
|
-
|
|
337
|
-
for i in range(len(atoms)):
|
|
338
|
-
if (np.array(atoms[i][0:2])<rim_width).any() or (np.array(atoms[i]) > im.shape[0]-rim_width-5).any():
|
|
339
|
-
rim_atoms.append(i)
|
|
340
|
-
rim_atoms=np.unique(rim_atoms)
|
|
341
|
-
mid_atoms_list = np.setdiff1d(np.arange(len(atoms)),rim_atoms)
|
|
342
|
-
|
|
343
|
-
mid_atoms = np.array(atoms)[mid_atoms_list]
|
|
344
|
-
if verbose:
|
|
345
|
-
print(f'Evaluated {len(mid_atoms)} atom positions, out of {len(atoms)} atoms')
|
|
346
|
-
tags['atoms'] = atoms
|
|
347
|
-
tags['mid_atoms'] = mid_atoms
|
|
348
|
-
tags['rim_atoms'] = rim_atoms
|
|
349
|
-
tags['number_of_atoms'] = len(atoms)
|
|
350
|
-
tags['number_of_evaluated_atoms' ]= len(mid_atoms)
|
|
351
|
-
return tags
|
|
352
|
-
|
|
353
|
-
def atoms_clustering(atoms, mid_atoms, number_of_clusters = 3, nearest_neighbours = 7):
|
|
354
|
-
## get distances
|
|
355
|
-
T = cKDTree(np.array(atoms)[:,0:2])
|
|
356
|
-
|
|
357
|
-
distances, indices = T.query(np.array(mid_atoms)[:,0:2], nearest_neighbours)
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
## CLustering
|
|
361
|
-
k_means = KMeans(n_clusters=number_of_clusters, random_state=0) # Fixing the RNG in kmeans
|
|
362
|
-
k_means.fit(distances)
|
|
363
|
-
clusters = k_means.predict(distances)
|
|
364
|
-
return clusters, distances, indices
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
def voronoi(atoms,tags):
|
|
368
|
-
im = tags['image']
|
|
369
|
-
vor = Voronoi(np.array(atoms)[:,0:2])# Plot it:
|
|
370
|
-
rim_vertices = []
|
|
371
|
-
for i in range(len(vor.vertices)):
|
|
372
|
-
|
|
373
|
-
if (vor.vertices[i,0:2]<0).any() or (vor.vertices[i,0:2] > im.shape[0]-5).any():
|
|
374
|
-
rim_vertices.append(i)
|
|
375
|
-
rim_vertices=set(rim_vertices)
|
|
376
|
-
mid_vertices = list(set(np.arange(len(vor.vertices))).difference(rim_vertices))
|
|
377
|
-
|
|
378
|
-
mid_regions = []
|
|
379
|
-
for region in vor.regions: #Check all Voronoi polygons
|
|
380
|
-
if all(x in mid_vertices for x in region) and len(region)>1: # we get a lot of rim (-1) and empty and regions
|
|
381
|
-
mid_regions.append(region)
|
|
382
|
-
tags['atoms']['voronoi']=vor
|
|
383
|
-
tags['atoms']['voronoi_vertices']=vor.vertices
|
|
384
|
-
tags['atoms']['voronoi_regions'] = vor.regions
|
|
385
|
-
tags['atoms']['voronoi_midVerticesIndices']=mid_vertices
|
|
386
|
-
tags['atoms']['voronoi_midVertices']=vor.vertices[mid_vertices]
|
|
387
|
-
tags['atoms']['voronoi_midRegions'] = mid_regions
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
def clean_svd(im,pixel_size=1,source_size=5):
|
|
393
|
-
patch_size = int(source_size/pixel_size)
|
|
394
|
-
if patch_size < 3:
|
|
395
|
-
patch_size = 3
|
|
396
|
-
print(patch_size)
|
|
397
|
-
|
|
398
|
-
patches = image.extract_patches_2d(im, (patch_size, patch_size))
|
|
399
|
-
patches = patches.reshape(patches.shape[0],patches.shape[1]*patches.shape[2] )
|
|
400
|
-
|
|
401
|
-
num_components = 32
|
|
402
|
-
|
|
403
|
-
u, s, v = randomized_svd(patches, num_components)
|
|
404
|
-
u_im_size = int(np.sqrt(u.shape[0]))
|
|
405
|
-
reduced_image = u[:,0].reshape(u_im_size,u_im_size)
|
|
406
|
-
reduced_image = reduced_image/reduced_image.sum()*im.sum()
|
|
407
|
-
return reduced_image
|
|
408
|
-
|
|
409
|
-
def rebin(im,binning=2):
|
|
410
|
-
"""
|
|
411
|
-
rebin an image by the number of pixels in x and y direction given by binning
|
|
412
|
-
|
|
413
|
-
Input:
|
|
414
|
-
======
|
|
415
|
-
image: numpy array in 2 dimensions
|
|
416
|
-
|
|
417
|
-
Output:
|
|
418
|
-
=======
|
|
419
|
-
binned image
|
|
420
|
-
"""
|
|
421
|
-
if len(im.shape) == 2:
|
|
422
|
-
return im.reshape((im.shape[0]//binning,binning,im.shape[1]//binning,binning)).mean(axis=3).mean(1)
|
|
423
|
-
else:
|
|
424
|
-
print('not a 2D image')
|
|
425
|
-
return im
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
def power_spectrum(channel):
|
|
430
|
-
"""
|
|
431
|
-
Calculate power spectrum
|
|
432
|
-
|
|
433
|
-
Input:
|
|
434
|
-
======
|
|
435
|
-
channel: channnel in h5f file with image content
|
|
436
|
-
|
|
437
|
-
Output:
|
|
438
|
-
=======
|
|
439
|
-
tags: dictionary with
|
|
440
|
-
['data']: fourier transformed image
|
|
441
|
-
['axis']: scale of reciprocal image
|
|
442
|
-
['power_spectrum']: power_spectrum
|
|
443
|
-
['FOV']: field of view for extent parameter in plotting
|
|
444
|
-
['minimum_intensity']: suggested minimum intensity for plotting
|
|
445
|
-
['maximum_intensity']: suggested maximum intensity for plotting
|
|
446
|
-
|
|
447
|
-
"""
|
|
448
|
-
## fft
|
|
449
|
-
data = channel['data'][()]
|
|
450
|
-
image = data- data.min()
|
|
451
|
-
fft_mag = (np.abs((np.fft.fftshift(np.fft.fft2(image)))))
|
|
452
|
-
|
|
453
|
-
out_tags = {}
|
|
454
|
-
out_tags['power_spectrum'] = fft_mag
|
|
455
|
-
|
|
456
|
-
sizeX = channel['spatial_size_x'][()]
|
|
457
|
-
sizeY = channel['spatial_size_y'][()]
|
|
458
|
-
scaleX = channel['spatial_scale_x'][()]
|
|
459
|
-
scaleY = channel['spatial_scale_y'][()]
|
|
460
|
-
basename = channel['title'][()]
|
|
461
|
-
|
|
462
|
-
FOV_x = sizeX*scaleX
|
|
463
|
-
FOV_y = sizeY*scaleY
|
|
464
|
-
|
|
465
|
-
## pixel_size in recipical space
|
|
466
|
-
rec_scale_x = 1/FOV_x
|
|
467
|
-
rec_scale_y = 1/FOV_y
|
|
468
|
-
|
|
469
|
-
## Field of View (FOV) in recipical space please note: rec_FOV_x = 1/(scaleX*2)
|
|
470
|
-
rec_FOV_x = rec_scale_x * sizeX /2.
|
|
471
|
-
rec_FOV_y = rec_scale_y * sizeY /2.
|
|
472
|
-
|
|
473
|
-
## Field ofView (FOV) in recipical space
|
|
474
|
-
rec_extent = (-rec_FOV_x,rec_FOV_x,rec_FOV_y,-rec_FOV_y)
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
out_tags['spatial_size_x']=sizeX
|
|
478
|
-
out_tags['spatial_size_y']=sizeY
|
|
479
|
-
out_tags['spatial_scale_x']=rec_scale_x
|
|
480
|
-
out_tags['spatial_scale_y']=rec_scale_y
|
|
481
|
-
out_tags['spatial_origin_x']=sizeX/2.
|
|
482
|
-
out_tags['spatial_origin_y']=sizeY/2.
|
|
483
|
-
out_tags['title']=out_tags['basename']=basename
|
|
484
|
-
out_tags['FOV_x']=rec_FOV_x
|
|
485
|
-
out_tags['FOV_y']=rec_FOV_y
|
|
486
|
-
out_tags['extent']=rec_extent
|
|
487
|
-
out_tags['spatial_unit'] = '1/nm'
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
# We need some smoothing (here with a Gaussian)
|
|
491
|
-
smoothing = 3
|
|
492
|
-
fft_mag2 = ndimage.gaussian_filter(fft_mag, sigma=(smoothing, smoothing), order=0)
|
|
493
|
-
#fft_mag2 = np.log2(1+fft_mag2)
|
|
494
|
-
|
|
495
|
-
out_tags['data'] = out_tags['Magnitude_smoothed']=fft_mag2
|
|
496
|
-
|
|
497
|
-
#prepare mask
|
|
498
|
-
pixelsy = (np.linspace(0,image.shape[0]-1,image.shape[0])-image.shape[0]/2)* rec_scale_x
|
|
499
|
-
pixelsx = (np.linspace(0,image.shape[1]-1,image.shape[1])-image.shape[1]/2)* rec_scale_y
|
|
500
|
-
x,y = np.meshgrid(pixelsx,pixelsy);
|
|
501
|
-
mask = np.zeros(image.shape)
|
|
502
|
-
|
|
503
|
-
mask_spot = x**2+y**2 > 1**2
|
|
504
|
-
mask = mask + mask_spot
|
|
505
|
-
mask_spot = x**2+y**2 < 11**2
|
|
506
|
-
mask = mask + mask_spot
|
|
507
|
-
|
|
508
|
-
mask[np.where(mask==1)]=0 # just in case of overlapping disks
|
|
509
|
-
|
|
510
|
-
minimum_intensity = np.log2(1+fft_mag2)[np.where(mask==2)].min()*0.95
|
|
511
|
-
maximum_intensity = np.log2(1+fft_mag2)[np.where(mask==2)].max()*1.05
|
|
512
|
-
out_tags['minimum_intensity']=minimum_intensity
|
|
513
|
-
out_tags['maximum_intensity']=maximum_intensity
|
|
514
|
-
|
|
515
|
-
return out_tags
|
|
516
|
-
|
|
517
|
-
def diffractogram_spots(fft_tags, spot_threshold):
|
|
518
|
-
"""
|
|
519
|
-
Find spots in diffractogram and sort them by distance from center
|
|
520
|
-
|
|
521
|
-
Input:
|
|
522
|
-
======
|
|
523
|
-
fft_tags: dictionary with
|
|
524
|
-
['spatial_***']: information of scale of fourier pattern
|
|
525
|
-
['data']: power_spectrum
|
|
526
|
-
spot_threshold: threshold for blob finder
|
|
527
|
-
Output:
|
|
528
|
-
=======
|
|
529
|
-
spots: numpy array with sorted position (x,y) and radius (r) of all spots
|
|
530
|
-
|
|
531
|
-
"""
|
|
532
|
-
## Needed for conversion from pixel to Reciprocal space
|
|
533
|
-
# we'll have to switch x- and y-coordinates due to the differences in numpy and matrix
|
|
534
|
-
center = np.array([int(fft_tags['spatial_origin_y']), int(fft_tags['spatial_origin_x']),1] )
|
|
535
|
-
rec_scale = np.array([fft_tags['spatial_scale_y'], fft_tags['spatial_scale_x'],1])
|
|
536
|
-
|
|
537
|
-
## spot detection ( for future referece there is no symmetry assumed here)
|
|
538
|
-
data = fft_tags['data'].T
|
|
539
|
-
data = (data-data.min())/data.max()
|
|
540
|
-
spots_random = (blob_log(data, max_sigma= 5 , threshold=spot_threshold)-center)*rec_scale
|
|
541
|
-
print(f'Found {spots_random.shape[0]} reflections')
|
|
542
|
-
|
|
543
|
-
##sort reflections
|
|
544
|
-
spots_random[:,2] = np.linalg.norm(spots_random[:,0:2], axis=1)
|
|
545
|
-
spots_index = np.argsort(spots_random[:,2])
|
|
546
|
-
spots = spots_random[spots_index]
|
|
547
|
-
# third row is angles
|
|
548
|
-
spots[:,2] = np.arctan2(spots[:,0], spots[:,1])
|
|
549
|
-
return spots
|
|
550
|
-
|
|
551
|
-
def adaptive_Fourier_filter(image, tags, low_pass = 3, reflection_radius = 0.3):
|
|
552
|
-
"""
|
|
553
|
-
Use spots in diffractogram for a Fourier Filter
|
|
554
|
-
|
|
555
|
-
Input:
|
|
556
|
-
======
|
|
557
|
-
image: image to be filtered
|
|
558
|
-
tags: dictionary with
|
|
559
|
-
['spatial_***']: information of scale of fourier pattern
|
|
560
|
-
['spots']: sorted spots in diffractogram in 1/nm
|
|
561
|
-
low_pass: low pass filter in center of diffractogrm
|
|
562
|
-
|
|
563
|
-
Output:
|
|
564
|
-
=======
|
|
565
|
-
Fourier filtered image
|
|
566
|
-
"""
|
|
567
|
-
#prepare mask
|
|
568
|
-
pixelsy = (np.linspace(0,image.shape[0]-1,image.shape[0])-image.shape[0]/2)* tags['spatial_scale_x']
|
|
569
|
-
pixelsx = (np.linspace(0,image.shape[1]-1,image.shape[1])-image.shape[1]/2)* tags['spatial_scale_y']
|
|
570
|
-
x,y = np.meshgrid(pixelsx,pixelsy);
|
|
571
|
-
mask = np.zeros(image.shape)
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
# mask reflections
|
|
575
|
-
#reflection_radius = 0.3 # in 1/nm
|
|
576
|
-
spots = fft_tags['spots']
|
|
577
|
-
for spot in spots:
|
|
578
|
-
mask_spot = (x-spot[0])**2+(y-spot[1])**2 < reflection_radius**2 # make a spot
|
|
579
|
-
mask = mask + mask_spot# add spot to mask
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
# mask zero region larger (low-pass filter = intensity variations)
|
|
583
|
-
#low_pass = 3 # in 1/nm
|
|
584
|
-
mask_spot = x**2+y**2 < low_pass**2
|
|
585
|
-
mask = mask + mask_spot
|
|
586
|
-
mask[np.where(mask>1)]=1
|
|
587
|
-
fft_filtered = np.fft.fftshift(np.fft.fft2(image))*mask
|
|
588
|
-
|
|
589
|
-
return np.fft.ifft2(np.fft.fftshift(fft_filtered)).real
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
def rotational_symmetry_diffractogram(spots):
|
|
594
|
-
|
|
595
|
-
rotation_symmetry = []
|
|
596
|
-
for n in [2,3,4,6]:
|
|
597
|
-
C = np.array([[np.cos(2*np.pi/n), np.sin(2*np.pi/n),0],[-np.sin(2*np.pi/n), np.cos(2*np.pi/n),0], [0,0,1]])
|
|
598
|
-
sym_spots = np.dot(spots,C)
|
|
599
|
-
dif = []
|
|
600
|
-
for p0, p1 in product(sym_spots[:,0:2], spots[:,0:2]):
|
|
601
|
-
dif.append(np.linalg.norm(p0-p1))
|
|
602
|
-
dif = np.array(sorted(dif))
|
|
603
|
-
|
|
604
|
-
if dif[int(spots.shape[0]*.7)] < 0.2:
|
|
605
|
-
rotation_symmetry.append(n)
|
|
606
|
-
return(rotation_symmetry)
|
|
607
|
-
|
|
608
|
-
def cart2pol(points):
|
|
609
|
-
rho = np.linalg.norm(points[:,0:2], axis=1)
|
|
610
|
-
phi = np.arctan2(points[:,1], points[:,0])
|
|
611
|
-
return(rho, phi)
|
|
612
|
-
|
|
613
|
-
def pol2cart(rho, phi):
|
|
614
|
-
x = rho * np.cos(phi)
|
|
615
|
-
y = rho * np.sin(phi)
|
|
616
|
-
return(x, y)
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
def xy2polar(points, rounding = 1e-3):
|
|
620
|
-
"""
|
|
621
|
-
Conversion from carthesian to polar coordinates
|
|
622
|
-
|
|
623
|
-
the angles and distances are sorted by r and then phi
|
|
624
|
-
The indices of this sort is also returned
|
|
625
|
-
|
|
626
|
-
input points: numpy array with number of points in axis 0 first two elements in axis 1 are x and y
|
|
627
|
-
|
|
628
|
-
optional rounding in significant digits
|
|
629
|
-
|
|
630
|
-
returns r,phi, sorted_indices
|
|
631
|
-
"""
|
|
632
|
-
|
|
633
|
-
r,phi = cart2pol(points)
|
|
634
|
-
|
|
635
|
-
phi = phi-phi.min() # only positive angles
|
|
636
|
-
r = (np.floor(r/rounding) )*rounding # Remove rounding error differences
|
|
637
|
-
|
|
638
|
-
sorted_indices = np.lexsort((phi,r) ) # sort first by r and then by phi
|
|
639
|
-
r = r[sorted_indices]
|
|
640
|
-
phi = phi[sorted_indices]
|
|
641
|
-
|
|
642
|
-
return r, phi, sorted_indices
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
def cartesian2polar(x, y, grid, r, t, order=3):
|
|
648
|
-
|
|
649
|
-
R,T = np.meshgrid(r, t)
|
|
650
|
-
|
|
651
|
-
new_x = R*np.cos(T)
|
|
652
|
-
new_y = R*np.sin(T)
|
|
653
|
-
|
|
654
|
-
ix = interp1d(x, np.arange(len(x)))
|
|
655
|
-
iy = interp1d(y, np.arange(len(y)))
|
|
656
|
-
|
|
657
|
-
new_ix = ix(new_x.ravel())
|
|
658
|
-
new_iy = iy(new_y.ravel())
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
return ndimage.map_coordinates(grid, np.array([new_ix, new_iy]),
|
|
662
|
-
order=order).reshape(new_x.shape)
|
|
663
|
-
|
|
664
|
-
def warp(diff, center):
|
|
665
|
-
# Define original polar grid
|
|
666
|
-
nx = diff.shape[0]
|
|
667
|
-
ny = diff.shape[1]
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
x = np.linspace(1, nx, nx, endpoint = True)-center[1]
|
|
672
|
-
y = np.linspace(1, ny, ny, endpoint = True)-center[0]
|
|
673
|
-
z = np.abs(diff)
|
|
674
|
-
|
|
675
|
-
# Define new polar grid
|
|
676
|
-
nr = min([center[0], center[1], diff.shape[0]-center[0], diff.shape[1]-center[1]])-1
|
|
677
|
-
nt = 360*3
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
r = np.linspace(1, nr, nr)
|
|
681
|
-
t = np.linspace(0., np.pi, nt, endpoint = False)
|
|
682
|
-
return cartesian2polar(x,y, z, r, t, order=3).T
|
|
683
|
-
|
|
684
|
-
def calculateCTF(waveLength, Cs, defocus,k):
|
|
685
|
-
""" Calculate Contrast Transfer Function
|
|
686
|
-
everything in nm
|
|
687
|
-
"""
|
|
688
|
-
ctf=np.sin(np.pi*defocus*waveLength*k**2+0.5*np.pi*Cs*waveLength**3*k**4)
|
|
689
|
-
return ctf
|
|
690
|
-
|
|
691
|
-
def calculateScherzer(waveLength, Cs):
|
|
692
|
-
"""
|
|
693
|
-
Calculate the Scherzer defocus. Cs is in mm, lambda is in nm
|
|
694
|
-
# EInput and output in nm
|
|
695
|
-
"""
|
|
696
|
-
scherzer=-1.155*(Cs*waveLength)**0.5 # in m
|
|
697
|
-
return scherzer
|
|
698
|
-
|
|
699
|
-
def calibrate_imageScale(fft_tags,spots_reference,spots_experiment):
|
|
700
|
-
gx = fft_tags['spatial_scale_x']
|
|
701
|
-
gy = fft_tags['spatial_scale_y']
|
|
702
|
-
|
|
703
|
-
dist_reference = np.linalg.norm(spots_reference, axis=1)
|
|
704
|
-
distance_experiment = np.linalg.norm(spots_experiment, axis=1)
|
|
705
|
-
|
|
706
|
-
first_reflections = abs(distance_experiment - dist_reference.min()) < .1
|
|
707
|
-
print('Evaluate ', first_reflections.sum(), 'reflections')
|
|
708
|
-
closest_exp_reflections = spots_experiment[first_reflections]
|
|
709
|
-
|
|
710
|
-
import scipy.optimize as optimization
|
|
711
|
-
def func(params, xdata, ydata):
|
|
712
|
-
dgx , dgy = params
|
|
713
|
-
return (np.sqrt((xdata*dgx)**2 + (ydata*dgy)**2 ) - dist_reference.min())
|
|
714
|
-
|
|
715
|
-
x0 = [1.001,0.999]
|
|
716
|
-
dg, sig = optimization.leastsq(func, x0, args=(closest_exp_reflections[:,0], closest_exp_reflections[:,1]))
|
|
717
|
-
return dg
|
|
718
|
-
|
|
719
|
-
def align_crystal_reflections(spots,crystals):
|
|
720
|
-
crystal_reflections_polar=[]
|
|
721
|
-
angles = []
|
|
722
|
-
mask = np.ones(spots.shape[0], dtype=bool)
|
|
723
|
-
exp_r, exp_phi = cart2pol(spots) # just in polar coordinates
|
|
724
|
-
spots_polar= np.array([exp_r, exp_phi])
|
|
725
|
-
number_spots_remain = len(mask)
|
|
726
|
-
|
|
727
|
-
for i in range(len(crystals)):
|
|
728
|
-
tags = crystals[i]
|
|
729
|
-
r,phi,indices = xy2polar(tags['allowed']['g']) #sorted by r and phi , only positive angles
|
|
730
|
-
## we mask the experimental values that are found already
|
|
731
|
-
angle = 0.
|
|
732
|
-
if mask.sum()>1:
|
|
733
|
-
angleI = np.argmin(np.abs((exp_r[mask])-r[0]) )
|
|
734
|
-
angle = (exp_phi[mask])[angleI] - phi[0]
|
|
735
|
-
angles.append(angle) ## Determine rotation angle
|
|
736
|
-
crystal_reflections_polar.append([r, angle - phi, indices])
|
|
737
|
-
tags['allowed']['g_rotated'] = pol2cart(r, angle + phi)
|
|
738
|
-
for spot in tags['allowed']['g']:
|
|
739
|
-
dif = np.linalg.norm(spots[:,0:2]-spot[0:2],axis=1)
|
|
740
|
-
#print(dif.min())
|
|
741
|
-
if dif.min() < 1.5:
|
|
742
|
-
ind = np.argmin(dif)
|
|
743
|
-
if mask[ind]:
|
|
744
|
-
mask[ind] = 0
|
|
745
|
-
|
|
746
|
-
print(f'found {(number_spots_remain-mask.sum()):.0f} refletions in crystal {i}')
|
|
747
|
-
number_spots_remain -= (number_spots_remain-mask.sum())
|
|
748
|
-
print(mask.sum())
|
|
749
|
-
|
|
750
|
-
return crystal_reflections_polar, angles, mask
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
def plot_image(tags):
|
|
755
|
-
if 'axis' in tags:
|
|
756
|
-
pixel_size = tags['axis']['0']['scale']
|
|
757
|
-
units = tags['axis']['0']['units']
|
|
758
|
-
elif 'pixel_size' not in tags:
|
|
759
|
-
pixel_size = 1
|
|
760
|
-
units = 'px'
|
|
761
|
-
else:
|
|
762
|
-
pixel_size = tags['pixel_size']
|
|
763
|
-
units = 'nm'
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
image = tags['data'].T
|
|
767
|
-
FOV = pixel_size*image.shape[0]
|
|
768
|
-
plt.imshow(image, cmap='gray', extent=(0,FOV,0,FOV))
|
|
769
|
-
if 'basename' in tags:
|
|
770
|
-
plt.title(tags['basename'])
|
|
771
|
-
|
|
772
|
-
plt.show()
|
|
773
|
-
|
|
774
|
-
def DemonReg(cube, verbose = False):
|
|
775
|
-
"""
|
|
776
|
-
Diffeomorphic Demon Non-Rigid Registration
|
|
777
|
-
Usage:
|
|
778
|
-
------
|
|
779
|
-
DemReg = DemonReg(cube, verbose = False)
|
|
780
|
-
|
|
781
|
-
Input:
|
|
782
|
-
cube: stack of image after rigid registration and cropping
|
|
783
|
-
Output:
|
|
784
|
-
DemReg: stack of images with non-rigid registration
|
|
785
|
-
|
|
786
|
-
Dempends on:
|
|
787
|
-
simpleITK and numpy
|
|
788
|
-
|
|
789
|
-
Please Cite: http://www.simpleitk.org/SimpleITK/project/parti.html
|
|
790
|
-
and T. Vercauteren, X. Pennec, A. Perchant and N. Ayache
|
|
791
|
-
Diffeomorphic Demons Using ITK\'s Finite Difference Solver Hierarchy
|
|
792
|
-
The Insight Journal, http://hdl.handle.net/1926/510 2007
|
|
793
|
-
"""
|
|
794
|
-
|
|
795
|
-
DemReg = np.zeros_like(cube)
|
|
796
|
-
nimages = cube.shape[2]
|
|
797
|
-
# create fixed image by summing over rigid registration
|
|
798
|
-
|
|
799
|
-
fixed_np = np.sum(cube, axis=2)/float(nimages)
|
|
800
|
-
|
|
801
|
-
fixed = sitk.GetImageFromArray(fixed_np)
|
|
802
|
-
fixed = sitk.DiscreteGaussian(fixed, 2.0)
|
|
803
|
-
|
|
804
|
-
demons = sitk.SymmetricForcesDemonsRegistrationFilter()
|
|
805
|
-
#demons = sitk.DiffeomorphicDemonsRegistrationFilter()
|
|
806
|
-
|
|
807
|
-
demons.SetNumberOfIterations(200)
|
|
808
|
-
demons.SetStandardDeviations(1.0)
|
|
809
|
-
|
|
810
|
-
resampler = sitk.ResampleImageFilter()
|
|
811
|
-
resampler.SetReferenceImage(fixed);
|
|
812
|
-
resampler.SetInterpolator(sitk.sitkGaussian)
|
|
813
|
-
resampler.SetDefaultPixelValue(0)
|
|
814
|
-
|
|
815
|
-
for i in range(nimages):
|
|
816
|
-
moving = sitk.GetImageFromArray(cube[:,:,i])
|
|
817
|
-
movingf = sitk.DiscreteGaussian(moving, 2.0)
|
|
818
|
-
displacementField = demons.Execute(fixed,movingf)
|
|
819
|
-
outTx = sitk.DisplacementFieldTransform( displacementField )
|
|
820
|
-
resampler.SetTransform(outTx)
|
|
821
|
-
out = resampler.Execute(moving)
|
|
822
|
-
DemReg[:,:,i] = sitk.GetArrayFromImage(out)
|
|
823
|
-
print('image ', i)
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
print(':-)')
|
|
827
|
-
print('You have succesfully completed Diffeomorphic Demons Registration')
|
|
828
|
-
|
|
829
|
-
return DemReg
|
|
830
|
-
|
|
831
|
-
def dftRigReg(cube, verbose = False):
|
|
832
|
-
"""
|
|
833
|
-
Implementation of sub-pixel rigid registration
|
|
834
|
-
|
|
835
|
-
usage:
|
|
836
|
-
import image_tools as it
|
|
837
|
-
it.dftRigReg(cube, verbose = False)
|
|
838
|
-
|
|
839
|
-
input:
|
|
840
|
-
stack of images as 3dimensional numpy array with x,y as image axes.
|
|
841
|
-
|
|
842
|
-
output:
|
|
843
|
-
aligned stack
|
|
844
|
-
drift
|
|
845
|
-
|
|
846
|
-
For copyright information use:
|
|
847
|
-
from dftregistration import *
|
|
848
|
-
help(dftregistration1)
|
|
849
|
-
"""
|
|
850
|
-
|
|
851
|
-
#help(dftregistration1)
|
|
852
|
-
if len(cube.shape) !=3:
|
|
853
|
-
print('Registration requires at least 2 images')
|
|
854
|
-
return
|
|
855
|
-
|
|
856
|
-
if cube.shape[2] <2:
|
|
857
|
-
print('Registration requires at least 2 images')
|
|
858
|
-
return
|
|
859
|
-
nimages= cube.shape[2]
|
|
860
|
-
RigReg = np.empty_like(cube)
|
|
861
|
-
|
|
862
|
-
# select central image as fixed image
|
|
863
|
-
icent = int(nimages/2)
|
|
864
|
-
fixed = cube[:,:,icent]
|
|
865
|
-
|
|
866
|
-
# determine maximum shifts
|
|
867
|
-
xshift = []
|
|
868
|
-
yshift = []
|
|
869
|
-
drift = []
|
|
870
|
-
|
|
871
|
-
usfac = 1000
|
|
872
|
-
for i in range(nimages) :
|
|
873
|
-
moving = cube[:,:,i]
|
|
874
|
-
output, Greg = dftregistration1(np.fft.fft2(fixed),np.fft.fft2(moving),usfac)
|
|
875
|
-
Greg= np.fft.ifft2(Greg)
|
|
876
|
-
RigReg[:,:,i] = abs(Greg)
|
|
877
|
-
xshift.append(output[3])
|
|
878
|
-
yshift.append(output[2])
|
|
879
|
-
drift.append([output[3],output[2]])
|
|
880
|
-
print('Image number', i,' xshift = ',xshift[-1],' y-shift =',yshift[-1])
|
|
881
|
-
|
|
882
|
-
return RigReg, drift
|
|
883
|
-
|
|
884
|
-
def CropImage(drift, image_shape, verbose = False):
|
|
885
|
-
"""
|
|
886
|
-
# Images wrap around as they are shifted.
|
|
887
|
-
# If image is shifted to the right (x +ve) we need to cropp pixels from the left
|
|
888
|
-
# If image is shifted to the left (x -ve) we need to cropp pixels from the right
|
|
889
|
-
# If image is shifted down (y +ve) we need to cropp pixels from the top
|
|
890
|
-
# If image is shifted up (y -ve) we need to cropp pixels from the bottom
|
|
891
|
-
|
|
892
|
-
Usage:
|
|
893
|
-
------
|
|
894
|
-
image_limits = CropImage(drift, image_shape, verbose = False)
|
|
895
|
-
Input:
|
|
896
|
-
-----
|
|
897
|
-
drift: nimages,2 array of sample drift
|
|
898
|
-
,image_shape: shape of image stack
|
|
899
|
-
Output:
|
|
900
|
-
-------
|
|
901
|
-
[xpmin,xpmax,ypmin,ypmax]: list of image boundaries
|
|
902
|
-
"""
|
|
903
|
-
|
|
904
|
-
xmax = max(np.array(drift)[:,0])
|
|
905
|
-
xmin = min(np.array(drift)[:,0])
|
|
906
|
-
ymax = max(np.array(drift)[:,1])
|
|
907
|
-
ymin = min(np.array(drift)[:,1])
|
|
908
|
-
|
|
909
|
-
# Round up or down as appropriate
|
|
910
|
-
round_i = lambda x: (int(x+1), int(x-1))[x < 0]
|
|
911
|
-
ixmin = round_i(xmin)
|
|
912
|
-
ixmax = round_i(xmax)
|
|
913
|
-
iymin = round_i(ymin)
|
|
914
|
-
iymax = round_i(ymax)
|
|
915
|
-
|
|
916
|
-
# Now determine the cropped area
|
|
917
|
-
|
|
918
|
-
if ixmax < 0:
|
|
919
|
-
xpmax = (image_shape[0]-1) + ixmin
|
|
920
|
-
xpmin = 0
|
|
921
|
-
else:
|
|
922
|
-
xpmin = ixmax
|
|
923
|
-
if ixmin < 0:
|
|
924
|
-
xpmax = (image_shape[0]-1) + ixmin
|
|
925
|
-
else:
|
|
926
|
-
xpmax = (image_shape[0]-1)
|
|
927
|
-
|
|
928
|
-
if iymax < 0:
|
|
929
|
-
ypmax = (image_shape[1]-1) + iymin
|
|
930
|
-
ypmin = 0
|
|
931
|
-
else:
|
|
932
|
-
ypmin = iymax
|
|
933
|
-
if ixmin < 0:
|
|
934
|
-
ypmax = (image_shape[1]-1) + iymin
|
|
935
|
-
else:
|
|
936
|
-
ypmax = (image_shape[1]-1)
|
|
937
|
-
|
|
938
|
-
if verbose:
|
|
939
|
-
print()
|
|
940
|
-
print ('Cropped area ranges',xpmin,':',xpmax, ' in the x-direction')
|
|
941
|
-
print ('Cropped area ranges',ypmin,':',ypmax, ' in the y-direction')
|
|
942
|
-
ixrange = xpmax-xpmin + 1
|
|
943
|
-
iyrange = ypmax-ypmin + 1
|
|
944
|
-
print('Which results in a cropped image',ixrange,' pixels in the x direction and',iyrange, 'pixel in the y-direction' )
|
|
945
|
-
|
|
946
|
-
return [xpmin,xpmax,ypmin,ypmax]
|
|
947
|
-
|
|
948
|
-
def RigReg(cube, verbose = False):
|
|
949
|
-
"""**********************************************
|
|
950
|
-
* RigReg rigid registration
|
|
951
|
-
* This function alignes the images in stack
|
|
952
|
-
* which is called a rigid registration.
|
|
953
|
-
* The stack of images should be in the 'cube' of the dictionary
|
|
954
|
-
* output goes to the lists:
|
|
955
|
-
- tags['aligned stack']
|
|
956
|
-
- tags['drift']
|
|
957
|
-
|
|
958
|
-
**********************************************"""
|
|
959
|
-
# We need an image stack
|
|
960
|
-
if len(cube.shape) !=3:
|
|
961
|
-
print('Registration requires at least 2 images')
|
|
962
|
-
return
|
|
963
|
-
|
|
964
|
-
if cube.shape[2] <2:
|
|
965
|
-
print('Registration requires at least 2 images')
|
|
966
|
-
return
|
|
967
|
-
|
|
968
|
-
# Define center image as fixed
|
|
969
|
-
fixedID = int(cube.shape[2]/2)
|
|
970
|
-
fixed = sitk.GetImageFromArray(cube[:,:,fixedID], sitk.sitkFloat64)
|
|
971
|
-
moving = sitk.GetImageFromArray(cube[:,:,0], sitk.sitkFloat64)
|
|
972
|
-
|
|
973
|
-
# Setup registration
|
|
974
|
-
R = sitk.ImageRegistrationMethod()
|
|
975
|
-
R.SetMetricAsMeanSquares()
|
|
976
|
-
R.SetOptimizerAsRegularStepGradientDescent(4.0, .01, 200 )
|
|
977
|
-
R.SetInitialTransform(sitk.TranslationTransform(fixed.GetDimension()))
|
|
978
|
-
R.SetInterpolator(sitk.sitkLinear)
|
|
979
|
-
|
|
980
|
-
resampler = sitk.ResampleImageFilter()
|
|
981
|
-
resampler.SetReferenceImage(fixed)
|
|
982
|
-
resampler.SetInterpolator(sitk.sitkLinear)
|
|
983
|
-
resampler.SetDefaultPixelValue(100)
|
|
984
|
-
outTx = R.Execute(fixed, moving)
|
|
985
|
-
resampler.SetTransform(outTx)
|
|
986
|
-
#regimg = fixed
|
|
987
|
-
|
|
988
|
-
# Do image registration
|
|
989
|
-
aligned = []
|
|
990
|
-
drift =[]
|
|
991
|
-
for i in range(cube.shape[2]):
|
|
992
|
-
moving = sitk.GetImageFromArray(cube[:,:,i], sitk.sitkFloat64)
|
|
993
|
-
outTx = R.Execute(fixed, moving)
|
|
994
|
-
out = resampler.Execute(moving)
|
|
995
|
-
|
|
996
|
-
#regimg = regimg + out
|
|
997
|
-
aligned.append(sitk.GetArrayFromImage(out))
|
|
998
|
-
if verbose:
|
|
999
|
-
print(i, 'Offset: ',outTx.GetParameters() )
|
|
1000
|
-
drift.append(outTx.GetParameters() )
|
|
1001
|
-
|
|
1002
|
-
#tags['drift'] = drift
|
|
1003
|
-
#tags['aligned stack'] = np.array(aligned, dtype = float)
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
if verbose:
|
|
1007
|
-
print('-------')
|
|
1008
|
-
#print(outTx)
|
|
1009
|
-
print("Optimizer stop condition: {0}".format(R.GetOptimizerStopConditionDescription()))
|
|
1010
|
-
print(" Iteration: {0}".format(R.GetOptimizerIteration()))
|
|
1011
|
-
print(" Metric value: {0}".format(R.GetMetricValue()))
|
|
1012
|
-
|
|
1013
|
-
return np.array(aligned, dtype = float), drift
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
def makechi1( phi, theta,wl,ab, C1include) :
|
|
1017
|
-
"""
|
|
1018
|
-
###
|
|
1019
|
-
# Aberration function chi without defocus
|
|
1020
|
-
###
|
|
1021
|
-
"""
|
|
1022
|
-
t0 = np.power(theta,1)/1*( float(ab['C01a']) * np.cos(1*phi)
|
|
1023
|
-
+ float(ab['C01b']) * np.sin(1*phi))
|
|
1024
|
-
|
|
1025
|
-
if C1include == 1: #First and second terms
|
|
1026
|
-
t1 = np.power(theta,2)/2*( ab['C10']
|
|
1027
|
-
+ ab['C12a'] *np.cos(2*phi)
|
|
1028
|
-
+ ab['C12b'] *np.sin(2*phi))
|
|
1029
|
-
elif C1include == 2:#Second terms only
|
|
1030
|
-
t1 = np.power(theta,2)/2*( ab['C12a'] *np.cos(2*phi)
|
|
1031
|
-
+ ab['C12b'] *np.sin(2*phi))
|
|
1032
|
-
else: # none for zero
|
|
1033
|
-
t1 = t0*0.
|
|
1034
|
-
t2 = np.power(theta,3)/3*( ab['C21a'] * np.cos(1*phi)
|
|
1035
|
-
+ ab['C21b'] * np.sin(1*phi)
|
|
1036
|
-
+ ab['C23a'] * np.cos(3*phi)
|
|
1037
|
-
+ ab['C23b'] * np.sin(3*phi) )
|
|
1038
|
-
|
|
1039
|
-
t3 = np.power(theta,4)/4*( ab['C30']
|
|
1040
|
-
+ ab['C32a'] * np.cos(2*(phi))
|
|
1041
|
-
+ ab['C32b'] * np.sin(2*(phi))
|
|
1042
|
-
+ ab['C34a'] * np.cos(4*(phi))
|
|
1043
|
-
+ ab['C34b'] * np.sin(4*(phi)) )
|
|
1044
|
-
|
|
1045
|
-
t4 = np.power(theta,5)/5*( ab['C41a'] * np.cos(1*phi)
|
|
1046
|
-
+ ab['C41b'] * np.sin(1*phi)
|
|
1047
|
-
+ ab['C43a'] * np.cos(3*phi)
|
|
1048
|
-
+ ab['C43b'] * np.sin(3*phi)
|
|
1049
|
-
+ ab['C45a'] * np.cos(5*phi)
|
|
1050
|
-
+ ab['C45b'] * np.sin(5*phi) )
|
|
1051
|
-
|
|
1052
|
-
t5 = np.power(theta,6)/6*( ab['C50']
|
|
1053
|
-
+ ab['C52a'] * np.cos(2*phi)
|
|
1054
|
-
+ ab['C52b'] * np.sin(2*phi)
|
|
1055
|
-
+ ab['C54a'] * np.cos(4*phi)
|
|
1056
|
-
+ ab['C54b'] * np.sin(4*phi)
|
|
1057
|
-
+ ab['C56a'] * np.cos(6*phi)
|
|
1058
|
-
+ ab['C56b'] * np.sin(6*phi) )
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
chi = t0 + t1+t2+t3+t4+t5
|
|
1063
|
-
if 'C70' in ab:
|
|
1064
|
-
chi += np.power(theta,8)/8*( ab['C70'])
|
|
1065
|
-
return chi*2*np.pi/wl #np.power(theta,6)/6*( ab['C50'] )
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
def Probe2( ab, sizeX, sizeY, tags, verbose= False):
|
|
1070
|
-
"""
|
|
1071
|
-
**********************************************
|
|
1072
|
-
* This function creates a incident STEM probe
|
|
1073
|
-
* at position (0,0)
|
|
1074
|
-
* with parameters given in ab dictionary
|
|
1075
|
-
*
|
|
1076
|
-
* The following Abberation functions are being used:
|
|
1077
|
-
* 1) ddf = Cc*dE/E but not + Cc2*(dE/E)^2,
|
|
1078
|
-
* Cc, Cc2 = chrom. Abber. (1st, 2nd order) [1]
|
|
1079
|
-
* 2) chi(qx,qy) = (2*pi/lambda)*{0.5*C1*(qx^2+qy^2)+
|
|
1080
|
-
* 0.5*C12a*(qx^2-qy^2)+
|
|
1081
|
-
* C12b*qx*qy+
|
|
1082
|
-
* C21a/3*qx*(qx^2+qy^2)+
|
|
1083
|
-
* ...
|
|
1084
|
-
* +0.5*C3*(qx^2+qy^2)^2
|
|
1085
|
-
* +0.125*C5*(qx^2+qy^2)^3
|
|
1086
|
-
* ... (need to finish)
|
|
1087
|
-
*
|
|
1088
|
-
*
|
|
1089
|
-
* qx = acos(kx/K), qy = acos(ky/K)
|
|
1090
|
-
*
|
|
1091
|
-
* References:
|
|
1092
|
-
* [1] J. Zach, M. Haider,
|
|
1093
|
-
* "Correction of spherical and Chromatic Abberation
|
|
1094
|
-
* in a low Voltage SEM", Optik 98 (3), 112-118 (1995)
|
|
1095
|
-
* [2] O.L. Krivanek, N. Delby, A.R. Lupini,
|
|
1096
|
-
* "Towards sub-Angstroem Electron Beams",
|
|
1097
|
-
* Ultramicroscopy 78, 1-11 (1999)
|
|
1098
|
-
*
|
|
1099
|
-
*********************************************'''
|
|
1100
|
-
####
|
|
1101
|
-
# Internally reciprocal lattice vectors in 1/nm or rad.
|
|
1102
|
-
# All calculations of chi in angles.
|
|
1103
|
-
# All aberration coefficients in nm
|
|
1104
|
-
"""
|
|
1105
|
-
|
|
1106
|
-
if 'FOV' not in ab:
|
|
1107
|
-
if 'FOV' not in tags:
|
|
1108
|
-
print(' need field of view in tags ' )
|
|
1109
|
-
else:
|
|
1110
|
-
ab['FOV'] = tags['FOV']
|
|
1111
|
-
|
|
1112
|
-
if 'convAngle' not in ab:
|
|
1113
|
-
ab['convAngle'] = 30 # in mrad
|
|
1114
|
-
|
|
1115
|
-
ApAngle=ab['convAngle']/1000.0 # in rad
|
|
1116
|
-
|
|
1117
|
-
E0= ab['EHT'] = float( ab['EHT']) # acceleration voltage in eV
|
|
1118
|
-
|
|
1119
|
-
defocus = ab['C10']
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
if 'C01a' not in ab:
|
|
1123
|
-
ab['C01a'] = 0.
|
|
1124
|
-
if 'C01b' not in ab:
|
|
1125
|
-
ab['C01b'] = 0.
|
|
1126
|
-
|
|
1127
|
-
if 'C50' not in ab:
|
|
1128
|
-
ab['C50'] = 0.
|
|
1129
|
-
if 'C70' not in ab:
|
|
1130
|
-
ab['C70'] = 0.
|
|
1131
|
-
|
|
1132
|
-
if 'Cc' not in ab:
|
|
1133
|
-
ab['Cc'] = 1.3e6 #// Cc in nm
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
def get_wl():
|
|
1137
|
-
h=6.626*10**-34
|
|
1138
|
-
m0=9.109*10**-31
|
|
1139
|
-
eV=1.602*10**-19*E0
|
|
1140
|
-
C=2.998*10**8
|
|
1141
|
-
return h/np.sqrt(2*m0*eV*(1+eV/(2*m0*C**2)))*10**9
|
|
1142
|
-
|
|
1143
|
-
wl=get_wl()
|
|
1144
|
-
if verbose:
|
|
1145
|
-
print('Acceleration voltage {0:}kV => wavelength {1:.2f}pm'.format(int(E0/1000),wl*1000) )
|
|
1146
|
-
ab['wavelength'] = wl
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
## Reciprocal plane in 1/nm
|
|
1150
|
-
dk = 1/ab['FOV']
|
|
1151
|
-
kx = np.array(dk*(-sizeX/2.+ np.arange(sizeX)))
|
|
1152
|
-
ky = np.array(dk*(-sizeY/2.+ np.arange(sizeY)))
|
|
1153
|
-
Txv, Tyv = np.meshgrid(kx, ky)
|
|
1154
|
-
|
|
1155
|
-
# define reciprocal plane in angles
|
|
1156
|
-
phi = np.arctan2(Txv, Tyv)
|
|
1157
|
-
theta = np.arctan2(np.sqrt(Txv**2 + Tyv**2),1/wl)
|
|
1158
|
-
|
|
1159
|
-
## calculate chi but omit defocus
|
|
1160
|
-
chi = np.fft.ifftshift (makechi1(phi,theta,wl,ab, 2))
|
|
1161
|
-
probe = np.zeros((sizeX, sizeY))
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
## Aperture function
|
|
1165
|
-
mask = theta >= ApAngle
|
|
1166
|
-
|
|
1167
|
-
## Calculate probe with Cc
|
|
1168
|
-
|
|
1169
|
-
for i in range(len(ab['zeroLoss'])):
|
|
1170
|
-
df = ab['C10'] + ab['Cc']* ab['zeroEnergy'][i]/E0
|
|
1171
|
-
if verbose:
|
|
1172
|
-
print('defocus due to Cc: {0:.2f} nm with weight {1:.2f}'.format(df,ab['zeroLoss'][i]))
|
|
1173
|
-
# Add defocus
|
|
1174
|
-
chi2 = chi + np.power(theta,2)/2*(df)
|
|
1175
|
-
#Calculate exponent of - i * chi
|
|
1176
|
-
chiT = np.fft.ifftshift (np.vectorize(complex)(np.cos(chi2), -np.sin(chi2)) )
|
|
1177
|
-
## Aply aperture function
|
|
1178
|
-
chiT[mask] = 0.
|
|
1179
|
-
## inverse fft of aberration function
|
|
1180
|
-
i2 = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift (chiT)))
|
|
1181
|
-
## add intensities
|
|
1182
|
-
probe = probe + np.real(i2 * np.conjugate(i2)).T*ab['zeroLoss'][i]
|
|
1183
|
-
|
|
1184
|
-
ab0={}
|
|
1185
|
-
for key in ab:
|
|
1186
|
-
ab0[key] = 0.
|
|
1187
|
-
chiIA = np.fft.fftshift (makechi1(phi,theta,wl,ab0, 0))#np.ones(chi2.shape)*2*np.pi/wl
|
|
1188
|
-
chiI = np.ones((sizeY, sizeX))
|
|
1189
|
-
chiI[mask]=0.
|
|
1190
|
-
i2 = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift (chiI)))
|
|
1191
|
-
ideal = np.real(i2 * np.conjugate(i2))
|
|
1192
|
-
|
|
1193
|
-
probeF = np.fft.fft2(probe,probe.shape)+1e-12
|
|
1194
|
-
idealF = np.fft.fft2(ideal,probe.shape)
|
|
1195
|
-
fourier_space_division = idealF/probeF
|
|
1196
|
-
probeR = (np.fft.ifft2(fourier_space_division,probe.shape))
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
return probe/sum(ab['zeroLoss']), np.real(probeR)
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
def DeconLR2( Oimage, probe, tags, verbose = False):
|
|
1205
|
-
|
|
1206
|
-
if len(Oimage) < 1:
|
|
1207
|
-
return Oimage
|
|
1208
|
-
print(Oimage.shape)
|
|
1209
|
-
if Oimage.shape != probe.shape:
|
|
1210
|
-
print('Wierdness ',Oimage.shape,' != ',probe.shape)
|
|
1211
|
-
## Input Image ###
|
|
1212
|
-
# read the input image
|
|
1213
|
-
img = sitk.GetImageFromArray(Oimage, sitk.sitkFloat64)
|
|
1214
|
-
img = sitk.MirrorPad( img, [128] *2, [128]*2)
|
|
1215
|
-
|
|
1216
|
-
size = img.GetSize();
|
|
1217
|
-
# perform the FFT
|
|
1218
|
-
source = sitk.ForwardFFT( sitk.Cast( img, sitk.sitkFloat64 ) )
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
### Kernel Image ###
|
|
1223
|
-
# Read the kernel image file
|
|
1224
|
-
kernel= sitk.GetImageFromArray(probe, sitk.sitkFloat64)
|
|
1225
|
-
# flip kernel about all axis
|
|
1226
|
-
#kernel = sitk.Flip( kernel, [1]*2 )
|
|
1227
|
-
|
|
1228
|
-
# normalize the kernel to sum to ~1
|
|
1229
|
-
stats = sitk.StatisticsImageFilter();
|
|
1230
|
-
stats.Execute( kernel )
|
|
1231
|
-
kernel = sitk.Cast( kernel / stats.GetSum(), sitk.sitkFloat64 )
|
|
1232
|
-
|
|
1233
|
-
upadding = [0]*2
|
|
1234
|
-
upadding[0] = int( math.floor( (size[0] - kernel.GetSize()[0])/2.0 ) )
|
|
1235
|
-
upadding[1] = int( math.floor( (size[1] - kernel.GetSize()[1])/2.0 ) )
|
|
1236
|
-
|
|
1237
|
-
lpadding = [0]*2
|
|
1238
|
-
lpadding[0] = int( math.ceil( (size[0] - kernel.GetSize()[0])/2.0 ) )
|
|
1239
|
-
lpadding[1] = int( math.ceil( (size[1] - kernel.GetSize()[1])/2.0 ) )
|
|
1240
|
-
|
|
1241
|
-
# pad the kernel to prevent edge artifacts
|
|
1242
|
-
kernel = sitk.ConstantPad( kernel, upadding, lpadding, 0.0 )
|
|
1243
|
-
|
|
1244
|
-
# perform FFT on kernel
|
|
1245
|
-
responseFT = sitk.ForwardFFT( sitk.FFTShift( kernel ) )
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
error = sitk.GetImageFromArray(np.ones(size), sitk.sitkFloat64 )
|
|
1249
|
-
est = sitk.GetImageFromArray(np.ones(size), sitk.sitkFloat64 )
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
verbose = True
|
|
1253
|
-
dE = 100
|
|
1254
|
-
dest = 100
|
|
1255
|
-
i=0
|
|
1256
|
-
while abs(dest) > 0.0001 :#or abs(dE) > .025:
|
|
1257
|
-
i += 1
|
|
1258
|
-
|
|
1259
|
-
error = source / sitk.InverseFFT( est*responseFT )
|
|
1260
|
-
est = est * sitk.InverseFFT( error*responseFT )
|
|
1261
|
-
|
|
1262
|
-
#dest = np.sum(np.power((est - est_old).real,2))/np.sum(est)*100
|
|
1263
|
-
#print(np.sum((est.real - est_old.real)* (est.real - est_old.real) )/np.sum(est.real)*100 )
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
print(' LR Deconvolution - Iteration: {0:d} Error: {1:.2f} = change: {2:.5f}%, {3:.5f}%'.format(i,error_new,dE,abs(dest)))
|
|
1267
|
-
|
|
1268
|
-
if i > 10:
|
|
1269
|
-
dE = dest = 0.0
|
|
1270
|
-
print('terminate')
|
|
1271
|
-
|
|
1272
|
-
# This task generates a restored image from an input image and point spread function (PSF) using the algorithm developed independently by Lucy (1974, Astron. J. 79, 745) and Richardson (1972, J. Opt. Soc. Am. 62, 55) and adapted for HST imagery by Snyder (1990, in Restoration of HST Images and Spectra, ST ScI Workshop Proceedings; see also Snyder, Hammoud, & White, JOSA, v. 10, no. 5, May 1993, in press). Additional options developed by Rick White (STScI) are also included.
|
|
1273
|
-
#
|
|
1274
|
-
# The Lucy-Richardson method can be derived from the maximum likelihood expression for data with a Poisson noise distribution. Thus, it naturally applies to optical imaging data such as HST. The method forces the restored image to be positive, in accord with photon-counting statistics.
|
|
1275
|
-
#
|
|
1276
|
-
# The Lucy-Richardson algorithm generates a restored image through an iterative method. The essence of the iteration is as follows: the (n+1)th estimate of the restored image is given by the nth estimate of the restored image multiplied by a correction image. That is,
|
|
1277
|
-
#
|
|
1278
|
-
# original data
|
|
1279
|
-
# image = image --------------- * reflect(PSF)
|
|
1280
|
-
# n+1 n image * PSF
|
|
1281
|
-
# n
|
|
1282
|
-
|
|
1283
|
-
# where the *'s represent convolution operators and reflect(PSF) is the reflection of the PSF, i.e. reflect((PSF)(x,y)) = PSF(-x,-y). When the convolutions are carried out using fast Fourier transforms (FFTs), one can use the fact that FFT(reflect(PSF)) = conj(FFT(PSF)), where conj is the complex conjugate operator.
|
|
1284
|
-
|
|
1285
|
-
def DeconLR( Oimage, probe, tags, verbose = False):
|
|
1286
|
-
|
|
1287
|
-
if len(Oimage) < 1:
|
|
1288
|
-
return Oimage
|
|
1289
|
-
print(Oimage.shape)
|
|
1290
|
-
if Oimage.shape != probe.shape:
|
|
1291
|
-
print('Wierdness ',Oimage.shape,' != ',probe.shape)
|
|
1292
|
-
probeC = np.ones((probe.shape), dtype = np.complex64)
|
|
1293
|
-
probeC.real = probe
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
error = np.ones((Oimage.shape), dtype = np.complex64)
|
|
1297
|
-
est = np.ones((Oimage.shape), dtype = np.complex64)
|
|
1298
|
-
source= np.ones((Oimage.shape), dtype = np.complex64)
|
|
1299
|
-
source.real = Oimage
|
|
1300
|
-
|
|
1301
|
-
responseFT =fftpack.fft2(probeC)
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
if 'ImageScanned' in tags:
|
|
1307
|
-
ab = tags['ImageScanned']
|
|
1308
|
-
elif 'aberrations' in tags:
|
|
1309
|
-
ab = tags['aberrations']
|
|
1310
|
-
if 'convAngle' not in ab:
|
|
1311
|
-
ab['convAngle'] = 30
|
|
1312
|
-
ApAngle=ab['convAngle']/1000.0
|
|
1313
|
-
|
|
1314
|
-
E0= float( ab['EHT'])
|
|
1315
|
-
|
|
1316
|
-
def get_wl(E0):
|
|
1317
|
-
h=6.626*10**-34
|
|
1318
|
-
m0=9.109*10**-31
|
|
1319
|
-
eV=1.602*10**-19*E0
|
|
1320
|
-
C=2.998*10**8
|
|
1321
|
-
return h/np.sqrt(2*m0*eV*(1+eV/(2*m0*C**2)))*10**9
|
|
1322
|
-
|
|
1323
|
-
wl=get_wl(E0)
|
|
1324
|
-
ab['wavelength'] = wl
|
|
1325
|
-
|
|
1326
|
-
over_d = 2* ApAngle / wl
|
|
1327
|
-
|
|
1328
|
-
dx = tags['pixel_size']
|
|
1329
|
-
dk = 1.0/ float(tags['FOV'])
|
|
1330
|
-
ScreenWidth = 1/dx
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
aperture = np.ones((Oimage.shape), dtype = np.complex64)
|
|
1334
|
-
# Mask for the aperture before the Fourier transform
|
|
1335
|
-
N = Oimage.shape[0]
|
|
1336
|
-
sizeX = Oimage.shape[0]
|
|
1337
|
-
sizeY = Oimage.shape[1]
|
|
1338
|
-
App_ratio = over_d/ScreenWidth*N
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
Thetax = np.array((-sizeX/2.+ np.arange(sizeX)))
|
|
1342
|
-
Thetay = np.array((-sizeY/2.+ np.arange(sizeY)))
|
|
1343
|
-
Txv, Tyv = np.meshgrid(Thetax, Thetay)
|
|
1344
|
-
|
|
1345
|
-
tp1 = Txv**2 + Tyv**2 >= (App_ratio)**2
|
|
1346
|
-
aperture[tp1.T] = 0.
|
|
1347
|
-
print( App_ratio, ScreenWidth, dk)
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
dE = 100
|
|
1354
|
-
dest = 100
|
|
1355
|
-
i=0
|
|
1356
|
-
while abs(dest) > 0.0001 :#or abs(dE) > .025:
|
|
1357
|
-
i += 1
|
|
1358
|
-
|
|
1359
|
-
error_old = np.sum(error.real)
|
|
1360
|
-
est_old = est.copy()
|
|
1361
|
-
error = source / np.real(fftpack.fftshift(fftpack.ifft2(fftpack.fft2(est)*responseFT)))
|
|
1362
|
-
est = est * np.real(fftpack.fftshift(fftpack.ifft2(fftpack.fft2(error)*np.conjugate(responseFT))))
|
|
1363
|
-
#est = est_old * est
|
|
1364
|
-
#est = np.real(fftpack.fftshift(fftpack.ifft2(fftpack.fft2(est)*fftpack.fftshift(aperture) )))
|
|
1365
|
-
|
|
1366
|
-
error_new = np.real(np.sum(np.power(error,2)))-error_old
|
|
1367
|
-
dest = np.sum(np.power((est - est_old).real,2))/np.sum(est)*100
|
|
1368
|
-
#print(np.sum((est.real - est_old.real)* (est.real - est_old.real) )/np.sum(est.real)*100 )
|
|
1369
|
-
|
|
1370
|
-
if error_old!=0:
|
|
1371
|
-
dE = error_new / error_old *1.0
|
|
1372
|
-
|
|
1373
|
-
else:
|
|
1374
|
-
dE = error_new
|
|
1375
|
-
|
|
1376
|
-
if verbose:
|
|
1377
|
-
print(' LR Deconvolution - Iteration: {0:d} Error: {1:.2f} = change: {2:.5f}%, {3:.5f}%'.format(i,error_new,dE,abs(dest)))
|
|
1378
|
-
|
|
1379
|
-
if i > 1000:
|
|
1380
|
-
dE = dest = 0.0
|
|
1381
|
-
print('terminate')
|
|
1382
|
-
|
|
1383
|
-
print('\n Lucy-Richardson deconvolution converged in '+str(i)+ ' Iterations')
|
|
1384
|
-
est2 = np.real(fftpack.ifft2(fftpack.fft2(est)*fftpack.fftshift(aperture) ))
|
|
1385
|
-
#plt.imshow(np.real(np.log10(np.abs(fftpack.fftshift(fftpack.fft2(est)))+1)+aperture), origin='lower',)
|
|
1386
|
-
#plt.show()
|
|
1387
|
-
print(est2.shape)
|
|
1388
|
-
return est2
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
##########################################
|
|
1392
|
-
# Functions Used
|
|
1393
|
-
##########################################
|
|
1394
|
-
|
|
1395
|
-
def MakeProbeG(sizeX,sizeY,widthi,xi,yi):
|
|
1396
|
-
sizeX = (sizeX/2)
|
|
1397
|
-
sizeY = (sizeY/2)
|
|
1398
|
-
width = 2*widthi**2
|
|
1399
|
-
x, y = np.mgrid[-sizeX:sizeX, -sizeY:sizeY]
|
|
1400
|
-
g = np.exp(-((x-xi)**2/float(width)+(y-yi)**2/float(width)))
|
|
1401
|
-
probe = g/g.sum()
|
|
1402
|
-
|
|
1403
|
-
return probe
|
|
1404
|
-
|
|
1405
|
-
def MakeLorentz(sizeX,sizeY,width,xi,yi):
|
|
1406
|
-
sizeX = np.floor(sizeX/2)
|
|
1407
|
-
sizeY = np.floor(sizeY/2)
|
|
1408
|
-
gamma = width
|
|
1409
|
-
x, y = np.mgrid[-sizeX:sizeX, -sizeY:sizeY]
|
|
1410
|
-
g = gamma/(2*np.pi)/ np.power( ((x-xi)**2+(y-yi)**2+gamma**2),1.5)
|
|
1411
|
-
probe = g/g.sum()
|
|
1412
|
-
|
|
1413
|
-
return probe
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
def ZLPWeight():
|
|
1418
|
-
x = np.linspace(-0.5,.9, 29)
|
|
1419
|
-
y = [0.0143,0.0193,0.0281,0.0440,0.0768,0.1447,0.2785,0.4955,0.7442,0.9380,1.0000,0.9483,0.8596,0.7620,0.6539,0.5515,0.4478,0.3500,0.2683,0.1979,0.1410,0.1021,0.0752,0.0545,0.0401,0.0300,0.0229,0.0176,0.0139]
|
|
1420
|
-
|
|
1421
|
-
return (x,y)
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
##
|
|
1425
|
-
# All atom detection is done here
|
|
1426
|
-
# Everything is in unit of pixel!!
|
|
1427
|
-
##
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
def findatoms(image, tags):
|
|
1431
|
-
"""
|
|
1432
|
-
######################################
|
|
1433
|
-
# Find atoms
|
|
1434
|
-
######################################
|
|
1435
|
-
"""
|
|
1436
|
-
|
|
1437
|
-
image = image-image.min()
|
|
1438
|
-
image = image/image.max()
|
|
1439
|
-
|
|
1440
|
-
if 'sigma_min' not in tags:
|
|
1441
|
-
tags['sigma_min'] = 0.1
|
|
1442
|
-
if 'resolution' not in tags:
|
|
1443
|
-
tags['resolution'] = 0.1
|
|
1444
|
-
|
|
1445
|
-
if 'ROIsize' not in tags:
|
|
1446
|
-
tags['ROIsize'] = 100.
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
res = tags['resolution'] / tags['pixel_size']#* tags['ROIsize']/100.
|
|
1450
|
-
print('res',res)
|
|
1451
|
-
coordinates = peak_local_max(image, min_distance=int(res/2), threshold_rel=tags['sigma_min'], exclude_border =True)
|
|
1452
|
-
print('coor',len( coordinates))
|
|
1453
|
-
"""
|
|
1454
|
-
peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1,
|
|
1455
|
-
exclude_border=True, indices=True, num_peaks=np.inf,
|
|
1456
|
-
footprint=None, labels=None):
|
|
1457
|
-
|
|
1458
|
-
Find peaks in an image, and return them as coordinates or a boolean array.
|
|
1459
|
-
Peaks are the local maxima in a region of `2 * min_distance + 1
|
|
1460
|
-
|
|
1461
|
-
(i.e. peaks are separated by at least `min_distance`).
|
|
1462
|
-
NOTE: If peaks are flat (i.e. multiple adjacent pixels have identical
|
|
1463
|
-
intensities), the coordinates of all such pixels are returned.
|
|
1464
|
-
"""
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
|
|
1468
|
-
|
|
1469
|
-
|
|
1470
|
-
# We calculate the radius in pixel of a round area in which atoms are evaluated
|
|
1471
|
-
sc = tags['pixel_size']
|
|
1472
|
-
r= tags['resolution']/sc*tags['ROIsize']/100./2.
|
|
1473
|
-
tags['radius'] = r
|
|
1474
|
-
|
|
1475
|
-
#######################################
|
|
1476
|
-
# Now we determine intensity #
|
|
1477
|
-
#######################################
|
|
1478
|
-
|
|
1479
|
-
###
|
|
1480
|
-
# Make a circular mask for integration of atom intensity
|
|
1481
|
-
###
|
|
1482
|
-
rr = int(r+0.5)
|
|
1483
|
-
mask = np.zeros((2*rr+1,2*rr+1))
|
|
1484
|
-
|
|
1485
|
-
for i in range (2*rr+1):
|
|
1486
|
-
for j in range (2*rr+1):
|
|
1487
|
-
if (i-rr)**2+(j-rr)**2<rr**2+0.1:
|
|
1488
|
-
mask[i,j]=1
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
###
|
|
1495
|
-
# Determine pixel position and intensity of all atoms
|
|
1496
|
-
###
|
|
1497
|
-
atoms = []
|
|
1498
|
-
for i in range(len( coordinates)):
|
|
1499
|
-
x,y = coordinates[i]
|
|
1500
|
-
|
|
1501
|
-
if x>rr and y>rr and x<image.shape[1]-rr and y<image.shape[0]-rr:
|
|
1502
|
-
|
|
1503
|
-
area = image[x-rr:x+rr+1,y-rr:y+rr+1]
|
|
1504
|
-
arr = area*mask
|
|
1505
|
-
atoms.append((x,y,rr, arr.sum(), arr.max()))
|
|
1506
|
-
|
|
1507
|
-
print(' Detected ', len(atoms), ' atoms')
|
|
1508
|
-
atoms.sort()
|
|
1509
|
-
return atoms
|
|
1510
|
-
|
|
1511
|
-
# sort corners in counter-clockwise direction
|
|
1512
|
-
def TurningFunction(corners,points):
|
|
1513
|
-
# calculate centroid of the polygon
|
|
1514
|
-
corners1 = np.array(points[corners])
|
|
1515
|
-
corners2 = np.roll(corners1,1)
|
|
1516
|
-
|
|
1517
|
-
corners0 = np.roll(corners1,-1)
|
|
1518
|
-
|
|
1519
|
-
v= corners1-corners0
|
|
1520
|
-
an = (np.arctan2(v[:,0],v[:,1]) + 2.0 * math.pi)% (2.0 * math.pi)/np.pi*180
|
|
1521
|
-
print(corners1)
|
|
1522
|
-
print('an',an,v)
|
|
1523
|
-
print(4*180/6)
|
|
1524
|
-
|
|
1525
|
-
angles = []
|
|
1526
|
-
for i in range(len(corners1)):
|
|
1527
|
-
A = corners1[i] - corners0[i]
|
|
1528
|
-
B = corners1[i] - corners2[i]
|
|
1529
|
-
num = np.dot(A, B)
|
|
1530
|
-
denom = np.linalg.norm(A) * np.linalg.norm(B)
|
|
1531
|
-
angles.append(np.arccos(num/denom) * 180 / np.pi)
|
|
1532
|
-
|
|
1533
|
-
return angles
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
def PolygonSort2(corners,points):
|
|
1539
|
-
"""
|
|
1540
|
-
# sort corners in counter-clockwise direction
|
|
1541
|
-
input:
|
|
1542
|
-
corners are indices in points array
|
|
1543
|
-
points is list or array of points
|
|
1544
|
-
output:
|
|
1545
|
-
cornersWithAngles
|
|
1546
|
-
"""
|
|
1547
|
-
# calculate centroid of the polygon
|
|
1548
|
-
n = len(corners) # of corners
|
|
1549
|
-
cx = float(sum(x for x, y in points[corners])) / n
|
|
1550
|
-
cy = float(sum(y for x, y in points[corners])) / n
|
|
1551
|
-
|
|
1552
|
-
# create a new list of corners which includes angles
|
|
1553
|
-
# angles from the positive x axis
|
|
1554
|
-
cornersWithAngles = []
|
|
1555
|
-
for i in corners:
|
|
1556
|
-
x,y = points[i]
|
|
1557
|
-
an = (math.atan2(y - cy, x - cx) + 2.0 * math.pi)% (2.0 * math.pi)
|
|
1558
|
-
cornersWithAngles.append([i, math.degrees(an)])
|
|
1559
|
-
|
|
1560
|
-
#sort it using the angles
|
|
1561
|
-
cornersWithAngles.sort(key = lambda tup: tup[1])
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
return cornersWithAngles
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
def PolygonsInner(indices, points):
|
|
1568
|
-
pp = np.array(points)[indices,:]
|
|
1569
|
-
# Determine inner angle of polygon
|
|
1570
|
-
# Generate second array which is shifted by one
|
|
1571
|
-
pp2 = np.roll(pp,1,axis=0)
|
|
1572
|
-
# and subtract it from former: this is now a list of vectors
|
|
1573
|
-
p_vectors = pp-pp2
|
|
1574
|
-
|
|
1575
|
-
#angles of vectors with respect to positive x-axis
|
|
1576
|
-
ang = np.arctan2(p_vectors[:,1],p_vectors[:,0])/np.pi*180+360 % 360
|
|
1577
|
-
# shift angles array by one
|
|
1578
|
-
ang2 = np.roll(ang,-1,axis=0)
|
|
1579
|
-
|
|
1580
|
-
#difference of angles is outer angle but we want the inner (inner + outer = 180)
|
|
1581
|
-
inner_angles = (180-(ang2-ang)+360 )% 360
|
|
1582
|
-
|
|
1583
|
-
return inner_angles
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
# sort corners in counter-clockwise direction
|
|
1587
|
-
def PolygonSort(corners):
|
|
1588
|
-
# calculate centroid of the polygon
|
|
1589
|
-
n = len(corners) # of corners
|
|
1590
|
-
cx = float(sum(x for x, y in corners)) / n
|
|
1591
|
-
cy = float(sum(y for x, y in corners)) / n
|
|
1592
|
-
|
|
1593
|
-
# create a new list of corners which includes angles
|
|
1594
|
-
cornersWithAngles = []
|
|
1595
|
-
for x, y in corners:
|
|
1596
|
-
an = (math.atan2(y - cy, x - cx) + 2.0 * math.pi)% (2.0 * math.pi)
|
|
1597
|
-
cornersWithAngles.append((x, y, math.degrees(an)))
|
|
1598
|
-
|
|
1599
|
-
#sort it using the angles
|
|
1600
|
-
cornersWithAngles.sort(key = lambda tup: tup[2])
|
|
1601
|
-
|
|
1602
|
-
return cornersWithAngles
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
def PolygonArea(corners):
|
|
1608
|
-
"""
|
|
1609
|
-
# Area of Polygon using Shoelace formula
|
|
1610
|
-
# http://en.wikipedia.org/wiki/Shoelace_formula
|
|
1611
|
-
# FB - 20120218
|
|
1612
|
-
# corners must be ordered in clockwise or counter-clockwise direction
|
|
1613
|
-
"""
|
|
1614
|
-
n = len(corners) # of corners
|
|
1615
|
-
area = 0.0
|
|
1616
|
-
C_x =0
|
|
1617
|
-
C_y =0
|
|
1618
|
-
for i in range(n):
|
|
1619
|
-
j = (i + 1) % n
|
|
1620
|
-
nn = corners[i][0] * corners[j][1] - corners[j][0] * corners[i][1]
|
|
1621
|
-
area += nn
|
|
1622
|
-
C_x += (corners[i][0] + corners[j][0])*nn
|
|
1623
|
-
C_y += (corners[i][1] + corners[j][1])*nn
|
|
1624
|
-
|
|
1625
|
-
area = abs(area) / 2.0
|
|
1626
|
-
|
|
1627
|
-
# centeroid or arithmetic mean
|
|
1628
|
-
C_x = C_x/(6*area)
|
|
1629
|
-
C_y = C_y/(6* area)
|
|
1630
|
-
|
|
1631
|
-
return (area), C_x, C_y
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
def PolygonAngles( corners):
|
|
1635
|
-
angles = []
|
|
1636
|
-
# calculate centroid of the polygon
|
|
1637
|
-
n = len(corners) # of corners
|
|
1638
|
-
cx = float(sum(x for x, y in corners)) / n
|
|
1639
|
-
cy = float(sum(y for x, y in corners)) / n
|
|
1640
|
-
# create a new list of angles
|
|
1641
|
-
#print (cx,cy)
|
|
1642
|
-
for x, y in corners:
|
|
1643
|
-
an = (math.atan2(y - cy, x - cx) + 2.0 * math.pi)% (2.0 * math.pi)
|
|
1644
|
-
angles.append((math.degrees(an)))
|
|
1645
|
-
|
|
1646
|
-
return angles
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
def voronoi_tags(vor):
|
|
1654
|
-
sym = {}
|
|
1655
|
-
sym['voronoi'] = vor
|
|
1656
|
-
sym['vertices'] = vor.vertices #(ndarray of double, shape (nvertices, ndim)) Coordinates of the Voronoi vertices.
|
|
1657
|
-
sym['ridge_points'] = vor.ridge_points #ridge_points (ndarray of ints, shape (nridges, 2)) Indices of the points between which each Voronoi ridge lies.
|
|
1658
|
-
sym['ridge_vertices'] = vor.ridge_vertices #ridge_vertices (list of list of ints, shape (nridges, *)) Indices of the Voronoi vertices forming each Voronoi ridge.
|
|
1659
|
-
sym['regions'] = vor.regions #regions (list of list of ints, shape (nregions, *)) Indices of the Voronoi vertices forming each Voronoi region. -1 indicates vertex outside the Voronoi diagram.
|
|
1660
|
-
sym['point_region'] = vor.point_region #point_region (list of ints, shape (npoints)) Index of the Voronoi region for each input point. If qhull option “Qc” was not specified, the list will contain -1 for points that are not associated with a Voronoi region.
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
points = vor.points
|
|
1664
|
-
nnTree = KDTree(points)
|
|
1665
|
-
|
|
1666
|
-
rim = []
|
|
1667
|
-
regions=[]
|
|
1668
|
-
|
|
1669
|
-
###
|
|
1670
|
-
# We get all the vertice length
|
|
1671
|
-
|
|
1672
|
-
lengths =[]
|
|
1673
|
-
for vertice in vor.ridge_vertices:
|
|
1674
|
-
if not(-1 in vertice):
|
|
1675
|
-
p1 = vor.vertices[vertice[0]]
|
|
1676
|
-
p2 = vor.vertices[vertice[1]]
|
|
1677
|
-
lengths.append(np.sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1] )**2))
|
|
1678
|
-
|
|
1679
|
-
sym['lengths'] = lengths
|
|
1680
|
-
sym['median lengths'] = np.median(lengths)
|
|
1681
|
-
sym['Min Voronoi Edge'] = np.median(lengths)/1.5
|
|
1682
|
-
#print ('median lengths', np.median(lengths))
|
|
1683
|
-
#print ('Min Voronoi Edge',np.median(lengths)/1.5)
|
|
1684
|
-
cornersHist=[]
|
|
1685
|
-
nnHist = []
|
|
1686
|
-
nnDistHist =[]
|
|
1687
|
-
angleHist = []
|
|
1688
|
-
areaHist=[]
|
|
1689
|
-
deviationHist =[]
|
|
1690
|
-
|
|
1691
|
-
for i, region in enumerate(vor.point_region):
|
|
1692
|
-
x,y = points[i]
|
|
1693
|
-
sym[str(i)]={}
|
|
1694
|
-
vertices = vor.regions[region]
|
|
1695
|
-
|
|
1696
|
-
###
|
|
1697
|
-
# We get all the rim atoms
|
|
1698
|
-
###
|
|
1699
|
-
|
|
1700
|
-
#if all(v >= 0 and all(vor.vertices[v] >0) and all(vor.vertices[v]<tags['data'].shape[0]) for v in vertices):
|
|
1701
|
-
if all(v >= 0 and all(vor.vertices[v] >0) for v in vertices):
|
|
1702
|
-
# finite regions only now
|
|
1703
|
-
# negative and too large vertices (corners) are excluded
|
|
1704
|
-
|
|
1705
|
-
regions.append(vertices)
|
|
1706
|
-
poly = []
|
|
1707
|
-
for v in vertices:
|
|
1708
|
-
poly.append(vor.vertices[v])
|
|
1709
|
-
|
|
1710
|
-
area, cx,cy = PolygonArea(poly)
|
|
1711
|
-
cx = abs(cx)
|
|
1712
|
-
cy = abs(cy)
|
|
1713
|
-
|
|
1714
|
-
angles = PolygonAngles(poly)
|
|
1715
|
-
angleHist.append(angles)
|
|
1716
|
-
areaHist.append(area)
|
|
1717
|
-
deviationHist.append(np.sqrt((cx-x)**2+ (cy-y)**2))
|
|
1718
|
-
|
|
1719
|
-
sym[str(i)]['xy'] = [x, y]
|
|
1720
|
-
sym[str(i)]['geometric'] = [cx, cy]
|
|
1721
|
-
sym[str(i)]['area'] = area
|
|
1722
|
-
|
|
1723
|
-
sym[str(i)]['angles'] = angles
|
|
1724
|
-
sym[str(i)]['off center'] = [cx-x, cy-y]
|
|
1725
|
-
|
|
1726
|
-
sym[str(i)]['position'] = 'inside'
|
|
1727
|
-
sym[str(i)]['corner'] = vertices
|
|
1728
|
-
sym[str(i)]['vertices']=poly
|
|
1729
|
-
sym[str(i)]['corners'] = len(vertices)
|
|
1730
|
-
cornersHist.append(len(vertices))
|
|
1731
|
-
nn = 0
|
|
1732
|
-
nnVor = []
|
|
1733
|
-
length = []
|
|
1734
|
-
for j in range(len(vertices)):
|
|
1735
|
-
k = (j+1) % len(vertices)
|
|
1736
|
-
p1 = vor.vertices[vertices[j]]
|
|
1737
|
-
p2 = vor.vertices[vertices[k]]
|
|
1738
|
-
leng = np.sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1] )**2)
|
|
1739
|
-
length.append(leng)
|
|
1740
|
-
sym[str(i)]['length'] = length
|
|
1741
|
-
if leng > sym['Min Voronoi Edge']:
|
|
1742
|
-
nn +=1
|
|
1743
|
-
nnVor.append(vertices[j])
|
|
1744
|
-
sym[str(i)]['length'] = length
|
|
1745
|
-
nnP = nnTree.query(points[i],k = nn+1)
|
|
1746
|
-
sym [str(i)]['neighbors'] = []
|
|
1747
|
-
sym [str(i)]['nn Distance'] = []
|
|
1748
|
-
sym [str(i)]['nn']=nn
|
|
1749
|
-
if nn>0:
|
|
1750
|
-
nnHist.append(nn)
|
|
1751
|
-
for j in range (1,len(nnP[0])):
|
|
1752
|
-
sym [str(i)]['nn Distance'].append(nnP[0][j])
|
|
1753
|
-
sym [str(i)]['neighbors'].append(nnP[1][j])
|
|
1754
|
-
nnDistHist.append(nnP[0][j])
|
|
1755
|
-
else:
|
|
1756
|
-
rim.append(i)
|
|
1757
|
-
sym[str(i)]['position'] = 'rim'
|
|
1758
|
-
sym[str(i)]['corners'] = 0
|
|
1759
|
-
print('weird nn determination',i)
|
|
1760
|
-
|
|
1761
|
-
else:
|
|
1762
|
-
rim.append(i)
|
|
1763
|
-
sym[str(i)]['position'] = 'rim'
|
|
1764
|
-
sym[str(i)]['corners'] = 0
|
|
1765
|
-
sym[str(i)]['xy'] = [x, y]
|
|
1766
|
-
|
|
1767
|
-
|
|
1768
|
-
sym['average corners']= np.median(cornersHist)
|
|
1769
|
-
sym['average area']= np.median(areaHist)
|
|
1770
|
-
sym['num atoms at rim']= len(rim)
|
|
1771
|
-
sym['num voronoi']= len(points)-len(rim)
|
|
1772
|
-
sym['Median Coordination']= np.median(nnHist)
|
|
1773
|
-
sym['Median NN Distance']= np.median(nnDistHist)
|
|
1774
|
-
|
|
1775
|
-
sym['Hist corners']= (cornersHist)
|
|
1776
|
-
sym['Hist area']= areaHist
|
|
1777
|
-
sym['atoms at rim']= (rim)
|
|
1778
|
-
sym['Hist Coordination']= (nnHist)
|
|
1779
|
-
sym['Hist NN Distance']= (nnDistHist)
|
|
1780
|
-
sym['Hist deviation']= (deviationHist)
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
return sym
|
|
1784
|
-
#print ('average corners', np.median(cornersHist))
|
|
1785
|
-
|
|
1786
|
-
|
|
1787
|
-
def defineSymmetry(tags):
|
|
1788
|
-
|
|
1789
|
-
#make dictionary to store
|
|
1790
|
-
if 'symmetry' in tags:
|
|
1791
|
-
tags['symmetry'].clear()
|
|
1792
|
-
|
|
1793
|
-
tags['symmetry'] = {}
|
|
1794
|
-
sym = tags['symmetry']
|
|
1795
|
-
if 'latticeType' in tags:
|
|
1796
|
-
latticeTypes = ['None', 'Find Lattice', 'hexagonal', 'honeycomb', 'square', 'square centered',
|
|
1797
|
-
'diamond', 'fcc']
|
|
1798
|
-
sym['lattice']=latticeTypes[tags['latticeType']]
|
|
1799
|
-
|
|
1800
|
-
sym['number of atoms'] = len(self.tags['atoms'])
|
|
1801
|
-
|
|
1802
|
-
points = []
|
|
1803
|
-
for i in range(sym['number of atoms']):
|
|
1804
|
-
sym[str(i)] = {}
|
|
1805
|
-
sym[str(i)]['index']= i
|
|
1806
|
-
sym[str(i)]['x'] = self.tags['atoms'] [i][0]
|
|
1807
|
-
sym[str(i)]['y'] = self.tags['atoms'] [i][1]
|
|
1808
|
-
sym[str(i)]['intensity'] = self.tags['atoms'] [i][3]
|
|
1809
|
-
sym[str(i)]['maximum'] = self.tags['atoms'] [i][4]
|
|
1810
|
-
sym[str(i)]['position'] = 'inside'
|
|
1811
|
-
sym[str(i)]['Z'] = 0
|
|
1812
|
-
sym[str(i)]['Name'] = 'undefined'
|
|
1813
|
-
sym[str(i)]['Column'] = -1
|
|
1814
|
-
|
|
1815
|
-
points.append([int(sym[str(i)]['x']+0.5),int(sym[str(i)]['y']+0.5)])
|
|
1816
|
-
|
|
1817
|
-
self.points = points.copy()
|
|
1818
|
-
|
|
1819
|
-
|
|
1820
|
-
|
|
1821
|
-
def voronoi2(tags, atoms):
|
|
1822
|
-
|
|
1823
|
-
sym = tags['symmetry']
|
|
1824
|
-
points = []
|
|
1825
|
-
|
|
1826
|
-
for i in range(sym['number of atoms']):
|
|
1827
|
-
points.append([int(sym[str(i)]['x']+0.5),int(sym[str(i)]['y']+0.5)])
|
|
1828
|
-
|
|
1829
|
-
|
|
1830
|
-
#points = np.array(atoms[:][0:2])
|
|
1831
|
-
vor = sp.Voronoi(points)
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
sym['voronoi'] = vor
|
|
1835
|
-
|
|
1836
|
-
nnTree = sp.KDTree(points)
|
|
1837
|
-
|
|
1838
|
-
rim = []
|
|
1839
|
-
regions=[]
|
|
1840
|
-
|
|
1841
|
-
###
|
|
1842
|
-
# We get all the vertice length
|
|
1843
|
-
|
|
1844
|
-
lengths =[]
|
|
1845
|
-
for vertice in vor.ridge_vertices:
|
|
1846
|
-
if all(v >= 0 for v in vertice):
|
|
1847
|
-
p1 = vor.vertices[vertice[0]]
|
|
1848
|
-
p2 = vor.vertices[vertice[1]]
|
|
1849
|
-
lengths.append(np.sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1] )**2))
|
|
1850
|
-
|
|
1851
|
-
sym['lengths'] = lengths
|
|
1852
|
-
sym['median lengths'] = np.median(lengths)
|
|
1853
|
-
sym['Min Voronoi Edge'] = np.median(lengths)/1.5
|
|
1854
|
-
#print ('median lengths', np.median(lengths))
|
|
1855
|
-
#print ('Min Voronoi Edge',np.median(lengths)/1.5)
|
|
1856
|
-
cornersHist=[]
|
|
1857
|
-
nnHist = []
|
|
1858
|
-
nnDistHist =[]
|
|
1859
|
-
angleHist = []
|
|
1860
|
-
areaHist=[]
|
|
1861
|
-
deviationHist =[]
|
|
1862
|
-
|
|
1863
|
-
for i, region in enumerate(vor.point_region):
|
|
1864
|
-
x,y = points[i]
|
|
1865
|
-
|
|
1866
|
-
vertices = vor.regions[region]
|
|
1867
|
-
|
|
1868
|
-
###
|
|
1869
|
-
# We get all the rim atoms
|
|
1870
|
-
###
|
|
1871
|
-
|
|
1872
|
-
if all(v >= 0 and all(vor.vertices[v] >0) and all(vor.vertices[v]<tags['data'].shape[0]) for v in vertices):
|
|
1873
|
-
# finite regions only now
|
|
1874
|
-
# negative and too large vertices (corners) are excluded
|
|
1875
|
-
|
|
1876
|
-
regions.append(vertices)
|
|
1877
|
-
poly = []
|
|
1878
|
-
for v in vertices:
|
|
1879
|
-
poly.append(vor.vertices[v])
|
|
1880
|
-
|
|
1881
|
-
area, cx,cy = PolygonArea(poly)
|
|
1882
|
-
cx = abs(cx)
|
|
1883
|
-
cy = abs(cy)
|
|
1884
|
-
|
|
1885
|
-
angles = PolygonAngles(poly)
|
|
1886
|
-
angleHist.append(angles)
|
|
1887
|
-
areaHist.append(area)
|
|
1888
|
-
deviationHist.append(np.sqrt((cx-x)**2+ (cy-y)**2))
|
|
1889
|
-
|
|
1890
|
-
sym[str(i)]['xy'] = [x, y]
|
|
1891
|
-
sym[str(i)]['geometric'] = [cx, cy]
|
|
1892
|
-
sym[str(i)]['area'] = area
|
|
1893
|
-
|
|
1894
|
-
sym[str(i)]['angles'] = angles
|
|
1895
|
-
sym[str(i)]['off center'] = [cx-x, cy-y]
|
|
1896
|
-
|
|
1897
|
-
sym[str(i)]['position'] = 'inside'
|
|
1898
|
-
sym[str(i)]['corner'] = vertices
|
|
1899
|
-
sym[str(i)]['vertices']=poly
|
|
1900
|
-
sym[str(i)]['corners'] = len(vertices)
|
|
1901
|
-
cornersHist.append(len(vertices))
|
|
1902
|
-
nn = 0
|
|
1903
|
-
nnVor = []
|
|
1904
|
-
length = []
|
|
1905
|
-
for j in range(len(vertices)):
|
|
1906
|
-
k = (j+1) % len(vertices)
|
|
1907
|
-
p1 = vor.vertices[vertices[j]]
|
|
1908
|
-
p2 = vor.vertices[vertices[k]]
|
|
1909
|
-
leng = np.sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1] )**2)
|
|
1910
|
-
length.append(leng)
|
|
1911
|
-
sym[str(i)]['length'] = length
|
|
1912
|
-
if leng > sym['Min Voronoi Edge']:
|
|
1913
|
-
nn +=1
|
|
1914
|
-
nnVor.append(vertices[j])
|
|
1915
|
-
sym[str(i)]['length'] = length
|
|
1916
|
-
nnP = nnTree.query(points[i],k = nn+1)
|
|
1917
|
-
sym [str(i)]['neighbors'] = []
|
|
1918
|
-
sym [str(i)]['nn Distance'] = []
|
|
1919
|
-
sym [str(i)]['nn']=nn
|
|
1920
|
-
if nn>0:
|
|
1921
|
-
nnHist.append(nn)
|
|
1922
|
-
for j in range (1,len(nnP[0])):
|
|
1923
|
-
sym [str(i)]['nn Distance'].append(nnP[0][j])
|
|
1924
|
-
sym [str(i)]['neighbors'].append(nnP[1][j])
|
|
1925
|
-
nnDistHist.append(nnP[0][j])
|
|
1926
|
-
else:
|
|
1927
|
-
rim.append(i)
|
|
1928
|
-
sym[str(i)]['position'] = 'rim'
|
|
1929
|
-
sym[str(i)]['corners'] = 0
|
|
1930
|
-
print('weird nn determination',i)
|
|
1931
|
-
|
|
1932
|
-
else:
|
|
1933
|
-
rim.append(i)
|
|
1934
|
-
sym[str(i)]['position'] = 'rim'
|
|
1935
|
-
sym[str(i)]['corners'] = 0
|
|
1936
|
-
sym[str(i)]['xy'] = [x, y]
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
sym['average corners']= np.median(cornersHist)
|
|
1940
|
-
sym['average area']= np.median(areaHist)
|
|
1941
|
-
sym['num atoms at rim']= len(rim)
|
|
1942
|
-
sym['num voronoi']= len(points)-len(rim)
|
|
1943
|
-
sym['Median Coordination']= np.median(nnHist)
|
|
1944
|
-
sym['Median NN Distance']= np.median(nnDistHist)
|
|
1945
|
-
|
|
1946
|
-
sym['Hist corners']= (cornersHist)
|
|
1947
|
-
sym['Hist area']= areaHist
|
|
1948
|
-
sym['atoms at rim']= (rim)
|
|
1949
|
-
sym['Hist Coordination']= (nnHist)
|
|
1950
|
-
sym['Hist NN Distance']= (nnDistHist)
|
|
1951
|
-
sym['Hist deviation']= (deviationHist)
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
#print ('average corners', np.median(cornersHist))
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
def atomRefine(image, atoms, tags, maxDist = 2):
|
|
1960
|
-
|
|
1961
|
-
rr = int(tags['radius']+0.5) # atom radius
|
|
1962
|
-
print('using radius ',rr, 'pixels')
|
|
1963
|
-
|
|
1964
|
-
pixels = np.linspace(0,2*rr,2*rr+1)-rr
|
|
1965
|
-
x,y = np.meshgrid(pixels,pixels);
|
|
1966
|
-
mask = (x**2+y**2) < rr**2 #
|
|
1967
|
-
|
|
1968
|
-
def func(params, xdata, ydata):
|
|
1969
|
-
width = ydata.shape[0]/2
|
|
1970
|
-
Gauss_width = params[0]
|
|
1971
|
-
x0 = params[1]
|
|
1972
|
-
y0 = params[2]
|
|
1973
|
-
inten = params[3]
|
|
1974
|
-
|
|
1975
|
-
x, y = np.mgrid[-width:width, -width:width]
|
|
1976
|
-
|
|
1977
|
-
gauss = np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / Gauss_width**2)*inten
|
|
1978
|
-
#self.img1b.setImage(gauss)
|
|
1979
|
-
return (ydata - gauss).flatten()
|
|
1980
|
-
|
|
1981
|
-
|
|
1982
|
-
###
|
|
1983
|
-
# Determine sub pixel position and intensity of all atoms within intensity range
|
|
1984
|
-
###
|
|
1985
|
-
guess = [rr, 0.0, 0.0 , 1]
|
|
1986
|
-
pout = [0.0, 0.0, 0.0 , 0.0]
|
|
1987
|
-
newatoms = []
|
|
1988
|
-
|
|
1989
|
-
#tags['symmetry'] = {}
|
|
1990
|
-
sym = {}
|
|
1991
|
-
sym['number_of_atoms'] = len(atoms)
|
|
1992
|
-
Z=[]
|
|
1993
|
-
Name = []
|
|
1994
|
-
Column = []
|
|
1995
|
-
position = []
|
|
1996
|
-
intensity_area = []
|
|
1997
|
-
maximum_area = []
|
|
1998
|
-
Gauss_width = []
|
|
1999
|
-
Gauss_amplitude = []
|
|
2000
|
-
Gauss_volume = []
|
|
2001
|
-
|
|
2002
|
-
for i in range(len( atoms)):
|
|
2003
|
-
|
|
2004
|
-
y,x = atoms[i][0:2]
|
|
2005
|
-
x = int(x)
|
|
2006
|
-
y = int(y)
|
|
2007
|
-
append = False
|
|
2008
|
-
|
|
2009
|
-
|
|
2010
|
-
area = image[x-rr:x+rr+1,y-rr:y+rr+1]
|
|
2011
|
-
|
|
2012
|
-
sym[str(i)] = {}
|
|
2013
|
-
sym[str(i)]['index']= i
|
|
2014
|
-
sym[str(i)]['x'] = x
|
|
2015
|
-
sym[str(i)]['y'] = y
|
|
2016
|
-
sym[str(i)]['Z'] = 0
|
|
2017
|
-
sym[str(i)]['Name'] = 'undefined'
|
|
2018
|
-
sym[str(i)]['Column'] = -1
|
|
2019
|
-
|
|
2020
|
-
append = False
|
|
2021
|
-
|
|
2022
|
-
if (x-rr) < 0 or y-rr <0 or x+rr+1 > image.shape[0] or y+rr+1 > image.shape[1]:
|
|
2023
|
-
sym[str(i)]['position'] = 'outside'
|
|
2024
|
-
sym[str(i)]['intensity area'] = 0
|
|
2025
|
-
sym[str(i)]['maximum area'] = 0
|
|
2026
|
-
else:
|
|
2027
|
-
sym[str(i)]['position'] = 'inside'
|
|
2028
|
-
sym[str(i)]['intensity area'] = (area*mask).sum()
|
|
2029
|
-
sym[str(i)]['maximum area'] = (area*mask).max()
|
|
2030
|
-
|
|
2031
|
-
if tags['MaxInt']>0:
|
|
2032
|
-
if area.sum()< tags['MaxInt']:
|
|
2033
|
-
if area.sum() > tags['MinInt']:
|
|
2034
|
-
append = True
|
|
2035
|
-
elif area.sum()> tags['MinInt']:
|
|
2036
|
-
append = True
|
|
2037
|
-
|
|
2038
|
-
if append: ## If possible do a Gaussian fit and update the x and y
|
|
2039
|
-
if (x-rr) < 0 or y-rr <0 or x+rr+1 > image.shape[0] or y+rr+1 > image.shape[1]:
|
|
2040
|
-
pout[0] = 0 # width
|
|
2041
|
-
pout[1] = 0 # dx
|
|
2042
|
-
pout[2] = 0 # dy
|
|
2043
|
-
pout[3] = 0 # amplitude
|
|
2044
|
-
else:
|
|
2045
|
-
pout, res = leastsq(func, guess, args=(area, area))
|
|
2046
|
-
# shift cannot be larger than two pixels
|
|
2047
|
-
if (abs(pout[1])> maxDist) or (abs(pout[2])> maxDist):
|
|
2048
|
-
#print(i,x,y,pout[1],pout[2])
|
|
2049
|
-
pout[0] = 0 # width
|
|
2050
|
-
pout[1] = 0 # dx
|
|
2051
|
-
pout[2] = 0 # dy
|
|
2052
|
-
pout[3] = 0 # amplitude
|
|
2053
|
-
|
|
2054
|
-
sym[str(i)]['x'] = x+pout[1]
|
|
2055
|
-
sym[str(i)]['y'] = y+pout[2]
|
|
2056
|
-
|
|
2057
|
-
volume = 2* np.pi * pout[3] * pout[0]*pout[0]
|
|
2058
|
-
|
|
2059
|
-
newatoms.append([y+pout[2]+1, x+pout[1]+1])# ,pout[0], volume)) #,pout[3]))
|
|
2060
|
-
|
|
2061
|
-
sym[str(i)]['Gauss width'] = pout[0]
|
|
2062
|
-
sym[str(i)]['Gauss amplitude'] = pout[3]
|
|
2063
|
-
sym[str(i)]['Gauss volume'] = volume
|
|
2064
|
-
|
|
2065
|
-
#x.append(sym[str(i)]['x'])
|
|
2066
|
-
#y.append(sym[str(i)]['y'])
|
|
2067
|
-
Z.append(sym[str(i)]['Z'])
|
|
2068
|
-
Name.append(str(sym[str(i)]['Name']))
|
|
2069
|
-
Column.append(sym[str(i)]['Column'])
|
|
2070
|
-
if sym[str(i)]['position'] == 'inside':
|
|
2071
|
-
position.append(1)
|
|
2072
|
-
else:
|
|
2073
|
-
position.append(0)
|
|
2074
|
-
|
|
2075
|
-
intensity_area.append(sym[str(i)]['intensity area'])
|
|
2076
|
-
maximum_area.append(sym[str(i)]['maximum area'])
|
|
2077
|
-
Gauss_width.append(sym[str(i)]['Gauss width'])
|
|
2078
|
-
Gauss_amplitude.append(sym[str(i)]['Gauss amplitude'])
|
|
2079
|
-
Gauss_volume.append(sym[str(i)]['Gauss volume'])
|
|
2080
|
-
tags2 = {}
|
|
2081
|
-
tags2['number_of_atoms'] = len(atoms)
|
|
2082
|
-
|
|
2083
|
-
tags2['Z'] = np.array(Z)
|
|
2084
|
-
#out_tags2['Name'] = np.array(Name)
|
|
2085
|
-
tags2['Column'] = np.array(Column)
|
|
2086
|
-
tags2['position'] = np.array(position)
|
|
2087
|
-
tags2['intensity_area'] = np.array(intensity_area)
|
|
2088
|
-
tags2['maximum_area'] = np.array(maximum_area)
|
|
2089
|
-
|
|
2090
|
-
tags2['Gauss_width'] = np.array(Gauss_width)
|
|
2091
|
-
tags2['Gauss_amplitude'] = np.array(Gauss_amplitude)
|
|
2092
|
-
tags2['Gauss_volume'] = np.array(Gauss_volume)
|
|
2093
|
-
tags2['atoms'] = newatoms
|
|
2094
|
-
tags2['sym'] = sym
|
|
2095
|
-
return tags2
|
|
2096
|
-
|
|
2097
|
-
def Fourier_transform(current_channel,data):# = image_channel
|
|
2098
|
-
# spatial data
|
|
2099
|
-
tags = dict(current_channel.attrs)
|
|
2100
|
-
out_tags = {}
|
|
2101
|
-
basename = current_channel['title'][()]
|
|
2102
|
-
|
|
2103
|
-
sizeX = current_channel['spatial_size_x'][()]
|
|
2104
|
-
sizeY = current_channel['spatial_size_y'][()]
|
|
2105
|
-
scaleX = current_channel['spatial_scale_x'][()]
|
|
2106
|
-
scaleY = current_channel['spatial_scale_y'][()]
|
|
2107
|
-
basename = current_channel['title'][()]
|
|
2108
|
-
|
|
2109
|
-
FOV_x = sizeX*scaleX
|
|
2110
|
-
FOV_y = sizeY*scaleY
|
|
2111
|
-
|
|
2112
|
-
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
image = data- data.min()
|
|
2116
|
-
fft_mag = (np.abs((np.fft.fftshift(np.fft.fft2(image)))))
|
|
2117
|
-
|
|
2118
|
-
out_tags['Magnitude']=fft_mag
|
|
2119
|
-
|
|
2120
|
-
## pixel_size in recipical space
|
|
2121
|
-
rec_scale_x = 1/FOV_x
|
|
2122
|
-
rec_scale_y = 1/FOV_y
|
|
2123
|
-
|
|
2124
|
-
## Field of View (FOV) in recipical space please note: rec_FOV_x = 1/(scaleX*2)
|
|
2125
|
-
rec_FOV_x = rec_scale_x * sizeX /2.
|
|
2126
|
-
rec_FOV_y = rec_scale_y * sizeY /2.
|
|
2127
|
-
print(rec_FOV_x , 1/(scaleX*2))
|
|
2128
|
-
|
|
2129
|
-
|
|
2130
|
-
## Field ofView (FOV) in recipical space
|
|
2131
|
-
rec_extend = (-rec_FOV_x,rec_FOV_x,rec_FOV_y,-rec_FOV_y)
|
|
2132
|
-
|
|
2133
|
-
out_tags['spatial_size_x']=sizeX
|
|
2134
|
-
out_tags['spatial_size_y']=sizeY
|
|
2135
|
-
out_tags['spatial_scale_x']=rec_scale_x
|
|
2136
|
-
out_tags['spatial_scale_y']=rec_scale_y
|
|
2137
|
-
out_tags['spatial_origin_x']=sizeX/2.
|
|
2138
|
-
out_tags['spatial_origin_y']=sizeY/2.
|
|
2139
|
-
out_tags['title']=out_tags['basename']=basename
|
|
2140
|
-
out_tags['FOV_x']=rec_FOV_x
|
|
2141
|
-
out_tags['FOV_y']=rec_FOV_y
|
|
2142
|
-
out_tags['extent']=rec_extend
|
|
2143
|
-
|
|
2144
|
-
|
|
2145
|
-
# We need some smoothing (here with a Gaussian)
|
|
2146
|
-
smoothing = 3
|
|
2147
|
-
fft_mag2 = ndimage.gaussian_filter(fft_mag, sigma=(smoothing, smoothing), order=0)
|
|
2148
|
-
#fft_mag2 = np.log2(1+fft_mag2)
|
|
2149
|
-
|
|
2150
|
-
out_tags['data'] = out_tags['Magnitude_smoothed']=fft_mag2
|
|
2151
|
-
#prepare mask
|
|
2152
|
-
pixelsy = (np.linspace(0,image.shape[0]-1,image.shape[0])-image.shape[0]/2)* rec_scale_x
|
|
2153
|
-
pixelsx = (np.linspace(0,image.shape[1]-1,image.shape[1])-image.shape[1]/2)* rec_scale_y
|
|
2154
|
-
x,y = np.meshgrid(pixelsx,pixelsy);
|
|
2155
|
-
mask = np.zeros(image.shape)
|
|
2156
|
-
|
|
2157
|
-
mask_spot = x**2+y**2 > 1**2
|
|
2158
|
-
mask = mask + mask_spot
|
|
2159
|
-
mask_spot = x**2+y**2 < 11**2
|
|
2160
|
-
mask = mask + mask_spot
|
|
2161
|
-
|
|
2162
|
-
mask[np.where(mask==1)]=0 # just in case of overlapping disks
|
|
2163
|
-
|
|
2164
|
-
minimum_intensity = np.log2(1+fft_mag2)[np.where(mask==2)].min()*0.95
|
|
2165
|
-
#minimum_intensity = np.mean(fft_mag3)-np.std(fft_mag3)
|
|
2166
|
-
maximum_intensity = np.log2(1+fft_mag2)[np.where(mask==2)].max()*1.05
|
|
2167
|
-
#maximum_intensity = np.mean(fft_mag3)+np.std(fft_mag3)*2
|
|
2168
|
-
out_tags['minimum_intensity']=minimum_intensity
|
|
2169
|
-
out_tags['maximum_intensity']=maximum_intensity
|
|
2170
|
-
|
|
2171
|
-
return out_tags
|
|
2172
|
-
|
|
2173
|
-
|
|
2174
|
-
|
|
2175
|
-
def find_Bragg(fft_tags, spot_threshold = 0 ):
|
|
2176
|
-
if spot_threshold ==0:
|
|
2177
|
-
spot_threshold = 0.05#(fft_tags['maximum_intensity']*10)
|
|
2178
|
-
|
|
2179
|
-
# we'll have to switch x and ycoordonates
|
|
2180
|
-
center = np.array([int(fft_tags['spatial_origin_y']), int(fft_tags['spatial_origin_x']),1] )
|
|
2181
|
-
rec_scale = np.array([fft_tags['spatial_scale_y'], fft_tags['spatial_scale_x'],1])
|
|
2182
|
-
data = fft_tags['data'].T
|
|
2183
|
-
data = (data-data.min())/data.max()
|
|
2184
|
-
spots_random = (blob_log(data, max_sigma= 5 , threshold=spot_threshold)-center)*rec_scale
|
|
2185
|
-
|
|
2186
|
-
print(f'found {len(spots_random)} Bragg spots with threshold of {spot_threshold}')
|
|
2187
|
-
spots_random[:,2] = np.linalg.norm(spots_random[:,0:2], axis=1)
|
|
2188
|
-
spots_index = np.argsort(spots_random[:,2])
|
|
2189
|
-
|
|
2190
|
-
spots = spots_random[spots_index]
|
|
2191
|
-
spots[:,2] = np.arctan2(spots[:,0], spots[:,1])
|
|
2192
|
-
return spots
|
|
2193
|
-
return spots
|
|
2194
|
-
|