pyTEMlib 0.2025.4.2__py3-none-any.whl → 0.2025.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyTEMlib might be problematic. Click here for more details.

Files changed (94) hide show
  1. build/lib/pyTEMlib/__init__.py +33 -0
  2. build/lib/pyTEMlib/animation.py +640 -0
  3. build/lib/pyTEMlib/atom_tools.py +238 -0
  4. build/lib/pyTEMlib/config_dir.py +31 -0
  5. build/lib/pyTEMlib/crystal_tools.py +1219 -0
  6. build/lib/pyTEMlib/diffraction_plot.py +756 -0
  7. build/lib/pyTEMlib/dynamic_scattering.py +293 -0
  8. build/lib/pyTEMlib/eds_tools.py +826 -0
  9. build/lib/pyTEMlib/eds_xsections.py +432 -0
  10. build/lib/pyTEMlib/eels_tools/__init__.py +44 -0
  11. build/lib/pyTEMlib/eels_tools/core_loss_tools.py +751 -0
  12. build/lib/pyTEMlib/eels_tools/eels_database.py +134 -0
  13. build/lib/pyTEMlib/eels_tools/low_loss_tools.py +655 -0
  14. build/lib/pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
  15. build/lib/pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
  16. build/lib/pyTEMlib/file_reader.py +274 -0
  17. build/lib/pyTEMlib/file_tools.py +811 -0
  18. build/lib/pyTEMlib/get_bote_salvat.py +69 -0
  19. build/lib/pyTEMlib/graph_tools.py +1153 -0
  20. build/lib/pyTEMlib/graph_viz.py +599 -0
  21. build/lib/pyTEMlib/image/__init__.py +37 -0
  22. build/lib/pyTEMlib/image/image_atoms.py +270 -0
  23. build/lib/pyTEMlib/image/image_clean.py +197 -0
  24. build/lib/pyTEMlib/image/image_distortion.py +299 -0
  25. build/lib/pyTEMlib/image/image_fft.py +277 -0
  26. build/lib/pyTEMlib/image/image_graph.py +926 -0
  27. build/lib/pyTEMlib/image/image_registration.py +316 -0
  28. build/lib/pyTEMlib/image/image_utilities.py +309 -0
  29. build/lib/pyTEMlib/image/image_window.py +421 -0
  30. build/lib/pyTEMlib/image_tools.py +699 -0
  31. build/lib/pyTEMlib/interactive_image.py +1 -0
  32. build/lib/pyTEMlib/kinematic_scattering.py +1196 -0
  33. build/lib/pyTEMlib/microscope.py +61 -0
  34. build/lib/pyTEMlib/probe_tools.py +906 -0
  35. build/lib/pyTEMlib/sidpy_tools.py +153 -0
  36. build/lib/pyTEMlib/simulation_tools.py +104 -0
  37. build/lib/pyTEMlib/test.py +437 -0
  38. build/lib/pyTEMlib/utilities.py +314 -0
  39. build/lib/pyTEMlib/version.py +5 -0
  40. build/lib/pyTEMlib/xrpa_x_sections.py +20976 -0
  41. pyTEMlib/__init__.py +25 -3
  42. pyTEMlib/animation.py +31 -22
  43. pyTEMlib/atom_tools.py +29 -34
  44. pyTEMlib/config_dir.py +2 -28
  45. pyTEMlib/crystal_tools.py +129 -165
  46. pyTEMlib/eds_tools.py +559 -342
  47. pyTEMlib/eds_xsections.py +432 -0
  48. pyTEMlib/eels_tools/__init__.py +44 -0
  49. pyTEMlib/eels_tools/core_loss_tools.py +751 -0
  50. pyTEMlib/eels_tools/eels_database.py +134 -0
  51. pyTEMlib/eels_tools/low_loss_tools.py +655 -0
  52. pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
  53. pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
  54. pyTEMlib/file_reader.py +274 -0
  55. pyTEMlib/file_tools.py +260 -1130
  56. pyTEMlib/get_bote_salvat.py +69 -0
  57. pyTEMlib/graph_tools.py +101 -174
  58. pyTEMlib/graph_viz.py +150 -0
  59. pyTEMlib/image/__init__.py +37 -0
  60. pyTEMlib/image/image_atoms.py +270 -0
  61. pyTEMlib/image/image_clean.py +197 -0
  62. pyTEMlib/image/image_distortion.py +299 -0
  63. pyTEMlib/image/image_fft.py +277 -0
  64. pyTEMlib/image/image_graph.py +926 -0
  65. pyTEMlib/image/image_registration.py +316 -0
  66. pyTEMlib/image/image_utilities.py +309 -0
  67. pyTEMlib/image/image_window.py +421 -0
  68. pyTEMlib/image_tools.py +154 -928
  69. pyTEMlib/kinematic_scattering.py +1 -1
  70. pyTEMlib/probe_tools.py +1 -1
  71. pyTEMlib/test.py +437 -0
  72. pyTEMlib/utilities.py +314 -0
  73. pyTEMlib/version.py +2 -3
  74. pyTEMlib/xrpa_x_sections.py +14 -10
  75. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/METADATA +13 -16
  76. pytemlib-0.2025.9.1.dist-info/RECORD +86 -0
  77. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/WHEEL +1 -1
  78. pytemlib-0.2025.9.1.dist-info/top_level.txt +6 -0
  79. pyTEMlib/core_loss_widget.py +0 -721
  80. pyTEMlib/eels_dialog.py +0 -754
  81. pyTEMlib/eels_dialog_utilities.py +0 -1199
  82. pyTEMlib/eels_tools.py +0 -2359
  83. pyTEMlib/file_tools_qt.py +0 -193
  84. pyTEMlib/image_dialog.py +0 -158
  85. pyTEMlib/image_dlg.py +0 -146
  86. pyTEMlib/info_widget.py +0 -1086
  87. pyTEMlib/info_widget3.py +0 -1120
  88. pyTEMlib/low_loss_widget.py +0 -479
  89. pyTEMlib/peak_dialog.py +0 -1129
  90. pyTEMlib/peak_dlg.py +0 -286
  91. pytemlib-0.2025.4.2.dist-info/RECORD +0 -38
  92. pytemlib-0.2025.4.2.dist-info/top_level.txt +0 -1
  93. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/entry_points.txt +0 -0
  94. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/licenses/LICENSE +0 -0
pyTEMlib/image_tools.py CHANGED
@@ -3,80 +3,31 @@ image_tools.py
3
3
  by Gerd Duscher, UTK
4
4
  part of pyTEMlib
5
5
  MIT license except where stated differently
6
+
7
+ This version is build on top of pycroscopy.image package of the pycrocsopy ecosysgtem.
6
8
  """
7
9
 
8
10
  import numpy as np
9
- import matplotlib
10
- import matplotlib as mpl
11
- import matplotlib.pylab as plt
12
- import matplotlib.widgets as mwidgets
13
- # from matplotlib.widgets import RectangleSelector
14
-
15
- import sidpy
16
- import pyTEMlib.file_tools as ft
17
- import pyTEMlib.sidpy_tools
18
-
19
- from tqdm.auto import trange, tqdm
20
11
 
21
- # import itertools
22
- from itertools import product
23
-
24
- from scipy import fftpack
25
12
  import scipy
26
- # from scipy import signal
27
- from scipy.interpolate import interp1d # , interp2d
28
- import scipy.optimize as optimization
29
-
30
- # Multidimensional Image library
31
- import scipy.ndimage as ndimage
32
- import scipy.constants as const
33
-
34
- # from scipy.spatial import Voronoi, KDTree, cKDTree
35
-
36
13
  import skimage
14
+ import sklearn
15
+ import matplotlib
16
+ import matplotlib.pylab as plt
37
17
 
38
- import skimage.registration as registration
39
- # from skimage.feature import register_translation # blob_dog, blob_doh
40
- from skimage.feature import peak_local_max
41
- # from skimage.measure import points_in_poly
42
-
43
- # our blob detectors from the scipy image package
44
- from skimage.feature import blob_log # blob_dog, blob_doh
45
-
46
- from sklearn.feature_extraction import image
47
- from sklearn.utils.extmath import randomized_svd
48
- from sklearn.cluster import DBSCAN
49
-
50
- from collections import Counter
51
-
52
- # center diff function
53
- from skimage.filters import threshold_otsu, sobel
54
- from scipy.optimize import leastsq
55
- from sklearn.cluster import DBSCAN
56
-
57
- from ase.build import fcc110
58
-
59
- from scipy.ndimage import rotate
60
- from scipy.interpolate import RegularGridInterpolator
61
- from scipy.signal import fftconvolve
62
-
63
-
64
- _SimpleITK_present = True
65
- try:
66
- import SimpleITK as sitk
67
- except ImportError:
68
- sitk = False
69
- _SimpleITK_present = False
18
+ import sidpy
19
+ import pyTEMlib
70
20
 
71
- if not _SimpleITK_present:
72
- print('SimpleITK not installed; Registration Functions for Image Stacks not available\n' +
73
- 'install with: conda install -c simpleitk simpleitk ')
21
+ ## import all function of the image package of pycroscopy
22
+ from .image import *
23
+ from .image.image_utilities import pol2cart, cart2pol, xy2polar
74
24
 
75
25
 
76
26
  def get_atomic_pseudo_potential(fov, atoms, size=512, rotation=0):
77
- # Big assumption: the atoms are not near the edge of the unit cell
27
+ """Big assumption: the atoms are not near the edge of the unit cell
78
28
  # If any atoms are close to the edge (ex. [0,0]) then the potential will be clipped
79
29
  # before calling the function, shift the atoms to the center of the unit cell
30
+ """
80
31
 
81
32
  pixel_size = fov / size
82
33
  max_size = int(size * np.sqrt(2) + 1) # Maximum size to accommodate rotation
@@ -91,42 +42,51 @@ def get_atomic_pseudo_potential(fov, atoms, size=512, rotation=0):
91
42
  x = pos[0] / pixel_size
92
43
  y = pos[1] / pixel_size
93
44
  atom_width = 0.5 # Angstrom
94
- gauss_width = atom_width/pixel_size # important for images at various fov. Room for improvement with theory
95
- gauss = pyTEMlib.probe_tools.make_gauss(max_size, max_size, width = gauss_width, x0=x, y0=y)
45
+ # important for images at various fov. Room for improvement with theory
46
+ gauss_width = atom_width/pixel_size
47
+ gauss = pyTEMlib.probe_tools.make_gauss(max_size, max_size,
48
+ width=gauss_width,
49
+ x0=x, y0=y)
96
50
  unit_cell_potential += gauss * atomic_number # gauss is already normalized to 1
97
51
 
98
52
  # Create interpolation function for unit cell potential
99
53
  x_grid = np.linspace(0, fov * max_size / size, max_size)
100
54
  y_grid = np.linspace(0, fov * max_size / size, max_size)
101
- interpolator = RegularGridInterpolator((x_grid, y_grid), unit_cell_potential, bounds_error=False, fill_value=0)
102
-
55
+ interpolator = scipy.interpolate.RegularGridInterpolator((x_grid, y_grid),
56
+ unit_cell_potential,
57
+ bounds_error=False,
58
+ fill_value=0)
103
59
  # Vectorized computation of the full potential map with max_size
104
- x_coords, y_coords = np.meshgrid(np.linspace(0, fov, max_size), np.linspace(0, fov, max_size), indexing="ij")
60
+ x_coords, y_coords = np.meshgrid(np.linspace(0, fov, max_size),
61
+ np.linspace(0, fov, max_size),
62
+ indexing="ij")
105
63
  xtal_x = x_coords % unit_cell_size[0]
106
64
  xtal_y = y_coords % unit_cell_size[1]
107
65
  potential_map = interpolator((xtal_x.ravel(), xtal_y.ravel())).reshape(max_size, max_size)
108
66
 
109
67
  # Rotate and crop the potential map
110
- potential_map = rotate(potential_map, rotation, reshape=False)
68
+ potential_map = scipy.ndimage.rotate(potential_map, rotation, reshape=False)
111
69
  center = potential_map.shape[0] // 2
112
- potential_map = potential_map[center - size // 2:center + size // 2, center - size // 2:center + size // 2]
113
-
70
+ potential_map = potential_map[center - size // 2:center + size // 2,
71
+ center - size // 2:center + size // 2]
114
72
  potential_map = scipy.ndimage.gaussian_filter(potential_map,3)
115
73
 
116
74
  return potential_map
117
75
 
118
76
  def convolve_probe(ab, potential):
77
+ """ Convolve probe with potential using FFT based convolution"""
119
78
  # the pixel sizes should be the exact same as the potential
120
79
  final_sizes = potential.shape
121
80
 
122
81
  # Perform FFT-based convolution
123
82
  pad_height = pad_width = potential.shape[0] // 2
124
- potential = np.pad(potential, ((pad_height, pad_height), (pad_width, pad_width)), mode='constant')
83
+ potential = np.pad(potential, ((pad_height, pad_height),
84
+ (pad_width, pad_width)), mode='constant')
125
85
 
126
- probe, A_k, chi = pyTEMlib.probe_tools.get_probe(ab, potential.shape[0], potential.shape[1], scale = 'mrad', verbose= False)
127
-
128
-
129
- convolved = fftconvolve(potential, probe, mode='same')
86
+ probe, _, _ = pyTEMlib.probe_tools.get_probe(ab, potential.shape[0],
87
+ potential.shape[1],
88
+ scale='mrad', verbose=False)
89
+ convolved = scipy.signal.fftconvolve(potential, probe, mode='same')
130
90
 
131
91
  # Crop to original potential size
132
92
  start_row = pad_height
@@ -134,16 +94,13 @@ def convolve_probe(ab, potential):
134
94
  end_row = start_row + final_sizes[0]
135
95
  end_col = start_col + final_sizes[1]
136
96
 
137
- image = convolved[start_row:end_row, start_col:end_col]
138
-
97
+ image = convolved[start_row:end_row, start_col:end_col]
139
98
  return probe, image
140
99
 
141
-
142
- # Wavelength in 1/nm
143
100
  def get_wavelength(e0):
144
101
  """
145
102
  Calculates the relativistic corrected de Broglie wave length of an electron
146
-
103
+ # Wavelength in 1/nm
147
104
  Parameters
148
105
  ----------
149
106
  e0: float
@@ -153,14 +110,16 @@ def get_wavelength(e0):
153
110
  -------
154
111
  wave length in 1/nm
155
112
  """
156
-
157
- eV = const.e * e0
158
- return const.h/np.sqrt(2*const.m_e*eV*(1+eV/(2*const.m_e*const.c**2)))*10**9
159
-
113
+ ev = scipy.constants.e * e0
114
+ h = scipy.constants.h
115
+ m = scipy.constants.m_e
116
+ c = scipy.constants.c
117
+ return h / np.sqrt(2 * m * ev * (1 + ev / (2 * m * c**2))) * 10**9
160
118
 
161
119
  def fourier_transform(dset: sidpy.Dataset) -> sidpy.Dataset:
162
120
  """
163
- Reads information into dictionary 'tags', performs 'FFT', and provides a smoothed FT and reciprocal
121
+ Reads information into dictionary 'tags', performs 'FFT',
122
+ and provides a smoothed FT and reciprocal
164
123
  and intensity limits for visualization.
165
124
 
166
125
  Parameters
@@ -182,8 +141,8 @@ def fourier_transform(dset: sidpy.Dataset) -> sidpy.Dataset:
182
141
  assert isinstance(dset, sidpy.Dataset), 'Expected a sidpy Dataset'
183
142
 
184
143
  selection = []
185
- image_dims = pyTEMlib.sidpy_tools.get_image_dims(dset)
186
- if dset.data_type == sidpy.DataType.IMAGE_STACK:
144
+ image_dims = dset.get_image_dims(return_axis=True)
145
+ if dset.data_type.name == 'IMAGE_STACK':
187
146
  stack_dim = dset.get_dimensions_by_type('TEMPORAL')
188
147
 
189
148
  if len(image_dims) != 2:
@@ -193,184 +152,73 @@ def fourier_transform(dset: sidpy.Dataset) -> sidpy.Dataset:
193
152
  if i in image_dims:
194
153
  selection.append(slice(None))
195
154
  if len(stack_dim) == 0:
196
- stack_dims = i
155
+ stack_dim = i
197
156
  selection.append(slice(None))
198
157
  elif i in stack_dim:
199
- stack_dims = i
158
+ stack_dim = i
200
159
  selection.append(slice(None))
201
160
  else:
202
161
  selection.append(slice(0, 1))
203
162
 
204
163
  image_stack = np.squeeze(np.array(dset)[selection])
205
164
  new_image = np.sum(np.array(image_stack), axis=stack_dim)
206
- elif dset.data_type == sidpy.DataType.IMAGE:
165
+ elif dset.data_type.name == 'IMAGE':
207
166
  new_image = np.array(dset)
208
167
  else:
209
168
  return
210
169
 
211
170
  new_image = new_image - new_image.min()
212
-
213
- fft_transform = (np.fft.fftshift(np.fft.fft2(new_image)))
214
-
215
- image_dims = pyTEMlib.sidpy_tools.get_image_dims(dset)
171
+ fft_transform = (np.fft.fftshift(np.fft.fft2(np.array(new_image))))
216
172
 
217
- units_x = '1/' + dset._axes[image_dims[0]].units
218
- units_y = '1/' + dset._axes[image_dims[1]].units
173
+ image_dims = dset.get_image_dims(return_axis=True)
219
174
 
175
+ units_x = '1/' + image_dims[0].units
176
+ units_y = '1/' + image_dims[1].units
220
177
  fft_dset = sidpy.Dataset.from_array(fft_transform)
221
178
  fft_dset.quantity = dset.quantity
222
179
  fft_dset.units = 'a.u.'
223
180
  fft_dset.data_type = 'IMAGE'
224
181
  fft_dset.source = dset.title
225
182
  fft_dset.modality = 'fft'
226
-
227
- fft_dset.set_dimension(0, sidpy.Dimension(np.fft.fftshift(np.fft.fftfreq(new_image.shape[0],
228
- d=dset.x[1]-dset.x[0])),
229
- name='u', units=units_x, dimension_type='RECIPROCAL',
183
+ axis = np.fft.fftshift(np.fft.fftfreq(new_image.shape[0], d=dset.x[1]-dset.x[0]))
184
+ fft_dset.set_dimension(0, sidpy.Dimension(axis,
185
+ name='u', units=units_x,
186
+ dimension_type='RECIPROCAL',
230
187
  quantity='reciprocal_length'))
231
- fft_dset.set_dimension(1, sidpy.Dimension(np.fft.fftshift(np.fft.fftfreq(new_image.shape[1],
232
- d=dset.y[1]- dset.y[0])),
233
- name='v', units=units_y, dimension_type='RECIPROCAL',
188
+ axis = np.fft.fftshift(np.fft.fftfreq(new_image.shape[1], d=dset.y[1]-dset.y[0]))
189
+ fft_dset.set_dimension(1, sidpy.Dimension(axis,
190
+ name='v', units=units_y,
191
+ dimension_type='RECIPROCAL',
234
192
  quantity='reciprocal_length'))
235
-
236
193
  return fft_dset
237
194
 
238
195
 
239
- def power_spectrum(dset, smoothing=3):
240
- """
241
- Calculate power spectrum
242
-
243
- Parameters
244
- ----------
245
- dset: sidpy.Dataset
246
- image
247
- smoothing: int
248
- Gaussian smoothing
249
-
250
- Returns
251
- -------
252
- power_spec: sidpy.Dataset
253
- power spectrum with correct dimensions
254
-
255
- """
256
-
257
- fft_transform = fourier_transform(dset) # dset.fft()
258
- fft_mag = np.abs(fft_transform)
259
- fft_mag2 = ndimage.gaussian_filter(fft_mag, sigma=(smoothing, smoothing), order=0)
260
-
261
- power_spec = fft_transform.like_data(np.log(1.+fft_mag2))
262
-
263
- # prepare mask
264
- x, y = np.meshgrid(power_spec.v.values, power_spec.u.values)
265
- mask = np.zeros(power_spec.shape)
266
-
267
- mask_spot = x ** 2 + y ** 2 > 1 ** 2
268
- mask = mask + mask_spot
269
- mask_spot = x ** 2 + y ** 2 < 11 ** 2
270
- mask = mask + mask_spot
271
-
272
- mask[np.where(mask == 1)] = 0 # just in case of overlapping disks
273
-
274
- minimum_intensity = np.array(power_spec)[np.where(mask == 2)].min() * 0.95
275
- maximum_intensity = np.array(power_spec)[np.where(mask == 2)].max() * 1.05
276
- power_spec.metadata = {'fft': {'smoothing': smoothing,
277
- 'minimum_intensity': minimum_intensity, 'maximum_intensity': maximum_intensity}}
278
- power_spec.title = 'power spectrum ' + power_spec.source
279
-
280
- return power_spec
281
-
282
-
283
- def diffractogram_spots(dset, spot_threshold, return_center=True, eps=0.1):
284
- """Find spots in diffractogram and sort them by distance from center
285
-
286
- Uses blob_log from scipy.spatial
287
-
288
- Parameters
289
- ----------
290
- dset: sidpy.Dataset
291
- diffractogram
292
- spot_threshold: float
293
- threshold for blob finder
294
- return_center: bool, optional
295
- return center of image if true
296
- eps: float, optional
297
- threshold for blob finder
298
-
299
- Returns
300
- -------
301
- spots: numpy array
302
- sorted position (x,y) and radius (r) of all spots
303
- """
304
-
305
- # spot detection (for future reference there is no symmetry assumed here)
306
- data = np.array(np.log(1+np.abs(dset)))
307
- data = data - data.min()
308
- data = data/data.max()
309
- # some images are strange and blob_log does not work on the power spectrum
310
- try:
311
- spots_random = blob_log(data, max_sigma=5, threshold=spot_threshold)
312
- except ValueError:
313
- spots_random = peak_local_max(np.array(data.T), min_distance=3, threshold_rel=spot_threshold)
314
- spots_random = np.hstack(spots_random, np.zeros((spots_random.shape[0], 1)))
315
-
316
- print(f'Found {spots_random.shape[0]} reflections')
317
-
318
- # Needed for conversion from pixel to Reciprocal space
319
- image_dims = dset.get_image_dims(return_axis=True)
320
- rec_scale = np.array([image_dims[0].slope, image_dims[1].slope])
321
-
322
- spots_random[:, :2] = spots_random[:, :2]*rec_scale+[dset.u.values[0], dset.v.values[0]]
323
- # sort reflections
324
- spots_random[:, 2] = np.linalg.norm(spots_random[:, 0:2], axis=1)
325
- spots_index = np.argsort(spots_random[:, 2])
326
- spots = spots_random[spots_index]
327
- # third row is angles
328
- spots[:, 2] = np.arctan2(spots[:, 0], spots[:, 1])
329
-
330
- center = [0, 0]
331
-
332
- if return_center:
333
- points = spots[:, 0:2]
334
-
335
- # Calculate the midpoints between all points
336
- reshaped_points = points[:, np.newaxis, :]
337
- midpoints = (reshaped_points + reshaped_points.transpose(1, 0, 2)) / 2.0
338
- midpoints = midpoints.reshape(-1, 2)
339
-
340
- # Find the most dense cluster of midpoints
341
- dbscan = DBSCAN(eps=eps, min_samples=2)
342
- labels = dbscan.fit_predict(midpoints)
343
- cluster_counter = Counter(labels)
344
- largest_cluster_label = max(cluster_counter, key=cluster_counter.get)
345
- largest_cluster_points = midpoints[labels == largest_cluster_label]
346
-
347
- # Average of these midpoints must be the center
348
- center = np.mean(largest_cluster_points, axis=0)
349
-
350
- return spots, center
351
-
352
-
353
- def center_diffractogram(dset, return_plot = True, smoothing = 1, min_samples = 10, beamstop_size = 0.1):
196
+ def center_diffractogram(dset, return_plot = True, smoothing = 1,
197
+ min_samples = 10, beamstop_size = 0.1):
198
+ """Find center of diffractogram by fitting a circle to the diffraction ring"""
199
+ mean_radius = 0
354
200
  try:
355
201
  diff = np.array(dset).T.astype(np.float16)
356
202
  diff[diff < 0] = 0
357
- threshold = threshold_otsu(diff)
203
+ threshold = skimage.filters.threshold_otsu(diff)
358
204
  binary = (diff > threshold).astype(float)
359
- smoothed_image = ndimage.gaussian_filter(binary, sigma=smoothing) # Smooth before edge detection
360
- smooth_threshold = threshold_otsu(smoothed_image)
205
+ # Smooth before edge detection
206
+ smoothed_image = scipy.ndimage.gaussian_filter(binary, sigma=smoothing)
207
+ smooth_threshold = skimage.filters.threshold_otsu(smoothed_image)
361
208
  smooth_binary = (smoothed_image > smooth_threshold).astype(float)
362
209
 
363
210
  # add a circle to mask the beamstop
364
211
  x, y = np.meshgrid(np.arange(dset.shape[0]), np.arange(dset.shape[1]))
365
- circle = (x - dset.shape[0] / 2) ** 2 + (y - dset.shape[1] / 2) ** 2 < (beamstop_size * dset.shape[0] / 2) ** 2
212
+ radius = (x - dset.shape[0] / 2) ** 2 + (y - dset.shape[1] / 2) ** 2
213
+ circle = radius < (beamstop_size * dset.shape[0] / 2) ** 2
366
214
  smooth_binary[circle] = 1
367
-
215
+
368
216
  # Find the edges using the Sobel operator
369
- edges = sobel(smooth_binary)
217
+ edges = skimage.filters.sobel(smooth_binary)
370
218
  edge_points = np.argwhere(edges)
371
219
 
372
220
  # Use DBSCAN to cluster the edge points
373
- db = DBSCAN(eps=10, min_samples=min_samples).fit(edge_points)
221
+ db = sklearn.cluster.DBSCAN(eps=10, min_samples=min_samples).fit(edge_points)
374
222
  labels = db.labels_
375
223
  if len(set(labels)) == 1:
376
224
  raise ValueError("DBSCAN clustering resulted in only one group, check the parameters.")
@@ -383,14 +231,16 @@ def center_diffractogram(dset, return_plot = True, smoothing = 1, min_samples =
383
231
 
384
232
  # Fit a circle to the diffraction ring
385
233
  def calc_distance(c, x, y):
386
- Ri = np.sqrt((x - c[0])**2 + (y - c[1])**2)
387
- return Ri - Ri.mean()
234
+ ri = np.sqrt((x - c[0])**2 + (y - c[1])**2)
235
+ return ri - ri.mean()
388
236
  x_m = np.mean(edge_points[:, 1])
389
237
  y_m = np.mean(edge_points[:, 0])
390
238
  center_guess = x_m, y_m
391
- center, ier = leastsq(calc_distance, center_guess, args=(edge_points[:, 1], edge_points[:, 0]))
392
- mean_radius = np.mean(calc_distance(center, edge_points[:, 1], edge_points[:, 0])) + np.sqrt((edge_points[:, 1] - center[0])**2 + (edge_points[:, 0] - center[1])**2).mean()
393
-
239
+ center, _ = scipy.optimize.leastsq(calc_distance, center_guess,
240
+ args=(edge_points[:, 1], edge_points[:, 0]))
241
+ mean_radius = (np.mean(calc_distance(center, edge_points[:, 1], edge_points[:, 0]))
242
+ + np.sqrt((edge_points[:, 1] - center[0])**2
243
+ + (edge_points[:, 0] - center[1])**2).mean())
394
244
  finally:
395
245
  if return_plot:
396
246
  fig, ax = plt.subplots(1, 5, figsize=(14, 4), sharex=True, sharey=True)
@@ -411,371 +261,9 @@ def center_diffractogram(dset, return_plot = True, smoothing = 1, min_samples =
411
261
  for axis in ax:
412
262
  axis.axis('off')
413
263
  fig.tight_layout()
414
-
415
264
  return center
416
265
 
417
266
 
418
- def adaptive_fourier_filter(dset, spots, low_pass=3, reflection_radius=0.3):
419
- """
420
- Use spots in diffractogram for a Fourier Filter
421
-
422
- Parameters:
423
- -----------
424
- dset: sidpy.Dataset
425
- image to be filtered
426
- spots: np.ndarray(N,2)
427
- sorted spots in diffractogram in 1/nm
428
- low_pass: float
429
- low pass filter in center of diffractogram in 1/nm
430
- reflection_radius: float
431
- radius of masked reflections in 1/nm
432
-
433
- Output:
434
- -------
435
- Fourier filtered image
436
- """
437
-
438
- if not isinstance(dset, sidpy.Dataset):
439
- raise TypeError('We need a sidpy.Dataset')
440
- fft_transform = fourier_transform(dset)
441
-
442
- # prepare mask
443
- x, y = np.meshgrid(fft_transform.v.values, fft_transform.u.values)
444
- mask = np.zeros(dset.shape)
445
-
446
- # mask reflections
447
- for spot in spots:
448
- mask_spot = (x - spot[1]) ** 2 + (y - spot[0]) ** 2 < reflection_radius ** 2 # make a spot
449
- mask = mask + mask_spot # add spot to mask
450
-
451
- # mask zero region larger (low-pass filter = intensity variations)
452
- mask_spot = x ** 2 + y ** 2 < low_pass ** 2
453
- mask = mask + mask_spot
454
- mask[np.where(mask > 1)] = 1
455
- fft_filtered = np.array(fft_transform * mask)
456
-
457
- filtered_image = dset.like_data(np.fft.ifft2(np.fft.fftshift(fft_filtered)).real)
458
- filtered_image.title = 'Fourier filtered ' + dset.title
459
- filtered_image.source = dset.title
460
- filtered_image.metadata = {'analysis': 'adaptive fourier filtered', 'spots': spots,
461
- 'low_pass': low_pass, 'reflection_radius': reflection_radius}
462
- return filtered_image
463
-
464
-
465
- def rotational_symmetry_diffractogram(spots):
466
- """ Test rotational symmetry of diffraction spots"""
467
-
468
- rotation_symmetry = []
469
- for n in [2, 3, 4, 6]:
470
- cc = np.array(
471
- [[np.cos(2 * np.pi / n), np.sin(2 * np.pi / n), 0], [-np.sin(2 * np.pi / n), np.cos(2 * np.pi / n), 0],
472
- [0, 0, 1]])
473
- sym_spots = np.dot(spots, cc)
474
- dif = []
475
- for p0, p1 in product(sym_spots[:, 0:2], spots[:, 0:2]):
476
- dif.append(np.linalg.norm(p0 - p1))
477
- dif = np.array(sorted(dif))
478
-
479
- if dif[int(spots.shape[0] * .7)] < 0.2:
480
- rotation_symmetry.append(n)
481
- return rotation_symmetry
482
-
483
- #####################################################
484
- # Registration Functions
485
- #####################################################
486
-
487
-
488
- def complete_registration(main_dataset, storage_channel=None):
489
- """Rigid and then non-rigid (demon) registration
490
-
491
- Performs rigid and then non-rigid registration, please see individual functions:
492
- - rigid_registration
493
- - demon_registration
494
-
495
- Parameters
496
- ----------
497
- main_dataset: sidpy.Dataset
498
- dataset of data_type 'IMAGE_STACK' to be registered
499
- storage_channel: h5py.Group
500
- optional - location in hdf5 file to store datasets
501
-
502
- Returns
503
- -------
504
- non_rigid_registered: sidpy.Dataset
505
- rigid_registered_dataset: sidpy.Dataset
506
-
507
- """
508
-
509
- if not isinstance(main_dataset, sidpy.Dataset):
510
- raise TypeError('We need a sidpy.Dataset')
511
- if main_dataset.data_type.name != 'IMAGE_STACK':
512
- raise TypeError('Registration makes only sense for an image stack')
513
-
514
- print('Rigid_Registration')
515
-
516
- rigid_registered_dataset = rigid_registration(main_dataset)
517
-
518
- print(rigid_registered_dataset)
519
- rigid_registered_dataset.data_type = 'IMAGE_STACK'
520
- print('Non-Rigid_Registration')
521
-
522
- non_rigid_registered = demon_registration(rigid_registered_dataset)
523
- return non_rigid_registered, rigid_registered_dataset
524
-
525
-
526
- def demon_registration(dataset, verbose=False):
527
- """
528
- Diffeomorphic Demon Non-Rigid Registration
529
-
530
- Depends on:
531
- simpleITK and numpy
532
- Please Cite: http://www.simpleitk.org/SimpleITK/project/parti.html
533
- and T. Vercauteren, X. Pennec, A. Perchant and N. Ayache
534
- Diffeomorphic Demons Using ITK\'s Finite Difference Solver Hierarchy
535
- The Insight Journal, http://hdl.handle.net/1926/510 2007
536
-
537
- Parameters
538
- ----------
539
- dataset: sidpy.Dataset
540
- stack of image after rigid registration and cropping
541
- verbose: boolean
542
- optional for increased output
543
- Returns
544
- -------
545
- dem_reg: stack of images with non-rigid registration
546
-
547
- Example
548
- -------
549
- dem_reg = demon_reg(stack_dataset, verbose=False)
550
- """
551
-
552
- if not isinstance(dataset, sidpy.Dataset):
553
- raise TypeError('We need a sidpy.Dataset')
554
- if dataset.data_type.name != 'IMAGE_STACK':
555
- raise TypeError('Registration makes only sense for an image stack')
556
-
557
- dem_reg = np.zeros(dataset.shape)
558
- nimages = dataset.shape[0]
559
- if verbose:
560
- print(nimages)
561
- # create fixed image by summing over rigid registration
562
-
563
- fixed_np = np.average(np.array(dataset), axis=0)
564
-
565
- if not _SimpleITK_present:
566
- print('This feature is not available: \n Please install simpleITK with: conda install simpleitk -c simpleitk')
567
-
568
- fixed = sitk.GetImageFromArray(fixed_np)
569
- fixed = sitk.DiscreteGaussian(fixed, 2.0)
570
-
571
- # demons = sitk.SymmetricForcesDemonsRegistrationFilter()
572
- demons = sitk.DiffeomorphicDemonsRegistrationFilter()
573
-
574
- demons.SetNumberOfIterations(200)
575
- demons.SetStandardDeviations(1.0)
576
-
577
- resampler = sitk.ResampleImageFilter()
578
- resampler.SetReferenceImage(fixed)
579
- resampler.SetInterpolator(sitk.sitkBSpline)
580
- resampler.SetDefaultPixelValue(0)
581
-
582
- for i in trange(nimages):
583
- moving = sitk.GetImageFromArray(dataset[i])
584
- moving_f = sitk.DiscreteGaussian(moving, 2.0)
585
- displacement_field = demons.Execute(fixed, moving_f)
586
- out_tx = sitk.DisplacementFieldTransform(displacement_field)
587
- resampler.SetTransform(out_tx)
588
- out = resampler.Execute(moving)
589
- dem_reg[i, :, :] = sitk.GetArrayFromImage(out)
590
-
591
- print(':-)')
592
- print('You have successfully completed Diffeomorphic Demons Registration')
593
-
594
- demon_registered = dataset.like_data(dem_reg)
595
- demon_registered.title = 'Non-Rigid Registration'
596
- demon_registered.source = dataset.title
597
-
598
- demon_registered.metadata =dataset.metadata.copy()
599
- if 'analysis' not in demon_registered.metadata:
600
- demon_registered.metadata['analysis'] = {}
601
- demon_registered.metadata['analysis']['non_rigid_demon_registration'] = {'package': 'simpleITK',
602
- 'method': 'DiscreteGaussian',
603
- 'variance': 2,
604
- 'input_dataset': dataset.source}
605
- demon_registered.data_type = 'IMAGE_STACK'
606
- return demon_registered
607
-
608
-
609
- ###############################
610
- # Rigid Registration New 05/09/2020
611
-
612
- def rigid_registration(dataset, normalization=None):
613
- """
614
- Rigid registration of image stack with pixel accuracy
615
-
616
- Uses simple cross_correlation
617
- (we determine drift from one image to next)
618
-
619
- Parameters
620
- ----------
621
- dataset: sidpy.Dataset
622
- sidpy dataset with image_stack dataset
623
-
624
- Returns
625
- -------
626
- rigid_registered: sidpy.Dataset
627
- Registered Stack and drift (with respect to center image)
628
- """
629
-
630
- if not isinstance(dataset, sidpy.Dataset):
631
- raise TypeError('We need a sidpy.Dataset')
632
- if dataset.data_type.name != 'IMAGE_STACK':
633
- raise TypeError('Registration makes only sense for an image stack')
634
-
635
- if isinstance (normalization, str):
636
- if normalization.lower() != 'phase':
637
- nomralization = None
638
- else:
639
- normalization = None
640
-
641
- if dataset.get_dimensions_by_type('TEMPORAL')[0] != 0:
642
- x = dataset.x
643
- y = dataset.y
644
- z = dataset.z
645
- metadata = dataset.metadata.copy()
646
- original_metadata = dataset.original_metadata.copy()
647
- arr = np.rollaxis(np.array(dataset), 2, 0)
648
- dataset = sidpy.Dataset.from_array(arr, title=dataset.title, data_type='IMAGE_STACK',
649
- quantity=dataset.quantity, units=dataset.units)
650
- dataset.set_dimension(0, sidpy.Dimension(z.values, name='frame', units='frame', quantity='time',
651
- dimension_type='temporal'))
652
- dataset.set_dimension(1, x)
653
- dataset.set_dimension(2, y)
654
- dataset.metadata = metadata
655
- dataset.original_metadata = original_metadata
656
-
657
- stack_dim = dataset.get_dimensions_by_type('TEMPORAL', return_axis=True)[0]
658
- image_dim = dataset.get_image_dims(return_axis=True)
659
- if len(image_dim) != 2:
660
- raise ValueError('need at least two SPATIAL dimension for an image stack')
661
-
662
- relative_drift = [[0., 0.]]
663
- im1 = np.fft.fft2(np.array(dataset[0]))
664
- for i in range(1, len(stack_dim)):
665
- im2 = np.fft.fft2(np.array(dataset[i]))
666
- shift, error, _ = skimage.registration.phase_cross_correlation(im1, im2, normalization=normalization, space='fourier')
667
- print(shift)
668
- im1 = im2.copy()
669
- relative_drift.append(shift)
670
-
671
- rig_reg, drift = pyTEMlib.image_tools.rig_reg_drift(dataset, relative_drift)
672
- crop_reg, input_crop = pyTEMlib.image_tools.crop_image_stack(rig_reg, drift)
673
-
674
- rigid_registered = sidpy.Dataset.from_array(crop_reg,
675
- title='Rigid Registration',
676
- data_type='IMAGE_STACK',
677
- quantity=dataset.quantity,
678
- units=dataset.units)
679
- rigid_registered.title = 'Rigid_Registration'
680
- rigid_registered.source = dataset.title
681
- rigid_registered.metadata['analysis'] = {'rigid_registration': {'drift': drift,
682
- 'input_crop': input_crop, 'input_shape': dataset.shape[1:]}}
683
- rigid_registered.metadata['experiment'] = dataset.metadata['experiment'].copy()
684
- rigid_registered.set_dimension(0, sidpy.Dimension(np.arange(rigid_registered.shape[0]),
685
- name='frame', units='frame', quantity='time',
686
- dimension_type='temporal'))
687
-
688
- array_x = image_dim[0].values[input_crop[0]:input_crop[1]]
689
- rigid_registered.set_dimension(1, sidpy.Dimension(array_x, name='x',
690
- units='nm', quantity='Length',
691
- dimension_type='spatial'))
692
- array_y =image_dim[1].values[input_crop[2]:input_crop[3]]
693
- rigid_registered.set_dimension(2, sidpy.Dimension(array_y, name='y',
694
- units='nm', quantity='Length',
695
- dimension_type='spatial'))
696
- rigid_registered.data_type = 'IMAGE_STACK'
697
- return rigid_registered.rechunk({0: 'auto', 1: -1, 2: -1})
698
-
699
-
700
- def rig_reg_drift(dset, rel_drift):
701
- """ Shifting images on top of each other
702
-
703
- Uses relative drift to shift images on top of each other,
704
- with center image as reference.
705
- Shifting is done with shift routine of ndimage from scipy.
706
- This function is used by rigid_registration routine
707
-
708
- Parameters
709
- ----------
710
- dset: sidpy.Dataset
711
- dataset with image_stack
712
- rel_drift:
713
- relative_drift from image to image as list of [shiftx, shifty]
714
-
715
- Returns
716
- -------
717
- stack: numpy array
718
- drift: list of drift in pixel
719
- """
720
-
721
- frame_dim = []
722
- spatial_dim = []
723
- selection = []
724
-
725
- for i, axis in dset._axes.items():
726
- if axis.dimension_type.name == 'SPATIAL':
727
- spatial_dim.append(i)
728
- selection.append(slice(None))
729
- else:
730
- frame_dim.append(i)
731
- selection.append(slice(0, 1))
732
-
733
- if len(spatial_dim) != 2:
734
- print('need two spatial dimensions')
735
- if len(frame_dim) != 1:
736
- print('need one frame dimensions')
737
-
738
- rig_reg = np.zeros([dset.shape[frame_dim[0]], dset.shape[spatial_dim[0]], dset.shape[spatial_dim[1]]])
739
-
740
- # absolute drift
741
- drift = np.array(rel_drift).copy()
742
-
743
- drift[0] = [0, 0]
744
- for i in range(1, drift.shape[0]):
745
- drift[i] = drift[i - 1] + rel_drift[i]
746
- center_drift = drift[int(drift.shape[0] / 2)]
747
- drift = drift - center_drift
748
- # Shift images
749
- for i in range(rig_reg.shape[0]):
750
- selection[frame_dim[0]] = slice(i, i+1)
751
- # Now we shift
752
- rig_reg[i, :, :] = ndimage.shift(dset[tuple(selection)].squeeze().compute(),
753
- [drift[i, 0], drift[i, 1]], order=3)
754
- return rig_reg, drift
755
-
756
-
757
- def crop_image_stack(rig_reg, drift):
758
- """Crop images in stack according to drift
759
-
760
- This function is used by rigid_registration routine
761
-
762
- Parameters
763
- ----------
764
- rig_reg: numpy array (N,x,y)
765
- drift: list (2,B)
766
-
767
- Returns
768
- -------
769
- numpy array
770
- """
771
- xpmax = int(rig_reg.shape[1] - -np.floor(np.min(np.array(drift)[:, 0])))
772
- xpmin = int(np.ceil(np.max(np.array(drift)[:, 0])))
773
- ypmax = int(rig_reg.shape[1] - -np.floor(np.min(np.array(drift)[:, 1])))
774
- ypmin = int(np.ceil(np.max(np.array(drift)[:, 1])))
775
-
776
- return rig_reg[:, xpmin:xpmax, ypmin:ypmax:], [xpmin, xpmax, ypmin, ypmax]
777
-
778
-
779
267
  class ImageWithLineProfile:
780
268
  """Image with line profile"""
781
269
 
@@ -792,6 +280,13 @@ class ImageWithLineProfile:
792
280
  self.line, = self.ax.plot([0], [0], color='orange') # empty line
793
281
  self.end_x = self.line.get_xdata()
794
282
  self.end_y = self.line.get_ydata()
283
+ self.x = 0
284
+ self.y = 0
285
+ self.z = 0
286
+ self.start_x = self.end_x
287
+ self.start_y = self.end_y
288
+ self.moved_point = [0, 0]
289
+ self.new_point = [0, 0]
795
290
  self.cid = self.line.figure.canvas.mpl_connect('button_press_event', self)
796
291
 
797
292
  def __call__(self, event):
@@ -809,6 +304,7 @@ class ImageWithLineProfile:
809
304
  self.update()
810
305
 
811
306
  def update(self):
307
+ """ Update line profile"""
812
308
  if not self.line_plot:
813
309
  self.line_plot = True
814
310
  self.figure.clear()
@@ -831,7 +327,7 @@ class ImageWithLineProfile:
831
327
  y = np.linspace(y0, y1, num)*(self.data.shape[0]/self.extent[1])
832
328
 
833
329
  # Extract the values along the line, using cubic interpolation
834
- zi2 = ndimage.map_coordinates(self.data.T, np.vstack((x, y)))
330
+ zi2 = scipy.ndimage.map_coordinates(self.data.T, np.vstack((x, y)))
835
331
 
836
332
  x_axis = np.linspace(0, length_plot, len(zi2))
837
333
  self.x = x_axis
@@ -845,34 +341,38 @@ class ImageWithLineProfile:
845
341
 
846
342
 
847
343
  class LineSelector(matplotlib.widgets.PolygonSelector):
344
+ """ Line selector with adjustable line width"""
848
345
  def __init__(self, ax, onselect, line_width=1, **kwargs):
849
346
  super().__init__(ax, onselect, **kwargs)
850
347
  bounds = ax.viewLim.get_points()
851
348
  np.max(bounds[0])
852
- self.line_verts = np.array([[np.max(bounds[1])/2, np.max(bounds[0])/5], [np.max(bounds[1])/2,
853
- np.max(bounds[0])/5+1],
854
- [np.max(bounds[1])/5, np.max(bounds[0])/2], [np.max(bounds[1])/5,
855
- np.max(bounds[0])/2]])
349
+ self.line_verts = np.array([[np.max(bounds[1])/2, np.max(bounds[0])/5],
350
+ [np.max(bounds[1])/2, np.max(bounds[0])/5+1],
351
+ [np.max(bounds[1])/5, np.max(bounds[0])/2],
352
+ [np.max(bounds[1])/5, np.max(bounds[0])/2]])
856
353
  self.verts = self.line_verts
857
354
  self.line_width = line_width
858
355
 
859
356
  def set_linewidth(self, line_width=None):
357
+ """ Set the line width of the line selector"""
860
358
  if line_width is not None:
861
359
  self.line_width = line_width
862
360
 
863
- m = -(self.line_verts[0, 1]-self.line_verts[3, 1])/(self.line_verts[0, 0]-self.line_verts[3, 0])
361
+ m = -(self.line_verts[0, 1]-self.line_verts[3, 1])/(self.line_verts[0, 0]
362
+ -self.line_verts[3, 0])
864
363
  c = 1/np.sqrt(1+m**2)
865
364
  s = c*m
866
- self.line_verts[1] = [self.line_verts[0, 0]+self.line_width*s, self.line_verts[0, 1]+self.line_width*c]
867
- self.line_verts[2] = [self.line_verts[3, 0]+self.line_width*s, self.line_verts[3, 1]+self.line_width*c]
868
-
365
+ self.line_verts[1] = [self.line_verts[0, 0]+self.line_width*s,
366
+ self.line_verts[0, 1]+self.line_width*c]
367
+ self.line_verts[2] = [self.line_verts[3, 0]+self.line_width*s,
368
+ self.line_verts[3, 1]+self.line_width*c]
869
369
  self.verts = self.line_verts.copy()
870
370
 
871
371
  def onmove(self, event):
872
372
  super().onmove(event)
873
373
  if np.max(np.linalg.norm(self.line_verts-self.verts, axis=1)) > 1:
874
374
  self.moved_point = np.argmax(np.linalg.norm(self.line_verts-self.verts, axis=1))
875
-
375
+
876
376
  self.new_point = self.verts[self.moved_point]
877
377
  moved_point = int(np.floor(self.moved_point/2)*3)
878
378
  self.moved_point = moved_point
@@ -881,13 +381,16 @@ class LineSelector(matplotlib.widgets.PolygonSelector):
881
381
 
882
382
  def get_profile(dataset, line, spline_order=-1):
883
383
  """
884
- This function extracts a line profile from a given dataset. The line profile is a representation of the data values
885
- along a specified line in the dataset. This function works for both image and spectral image data types.
384
+ This function extracts a line profile from a given dataset.
385
+ The line profile is a representation of the data values
386
+ along a specified line in the dataset.
387
+ This function works for both image and spectral image data types.
886
388
 
887
389
  Args:
888
390
  dataset (sidpy.Dataset): The input dataset from which to extract the line profile.
889
391
  line (list): A list specifying the line along which the profile should be extracted.
890
- spline_order (int, optional): The order of the spline interpolation to use. Default is -1, which means no interpolation.
392
+ spline_order (int, optional): The order of the spline interpolation to use.
393
+ Default is -1, which means no interpolation.
891
394
 
892
395
  Returns:
893
396
  profile_dataset (sidpy.Dataset): A new sidpy.Dataset containing the line profile.
@@ -900,22 +403,23 @@ def get_profile(dataset, line, spline_order=-1):
900
403
  xv /= image_dims[0].slope
901
404
  yv /= image_dims[1].slope
902
405
  profile = scipy.ndimage.map_coordinates(np.array(dataset), [xv, yv])
903
-
406
+
904
407
  profile_dataset = sidpy.Dataset.from_array(profile.sum(axis=0))
905
408
  profile_dataset.data_type='spectrum'
906
409
  profile_dataset.units = dataset.units
907
410
  profile_dataset.quantity = dataset.quantity
908
- profile_dataset.set_dimension(0, sidpy.Dimension(np.linspace(xv[0,0], xv[-1,-1], profile_dataset.shape[0]),
909
- name='x', units=dataset.x.units, quantity=dataset.x.quantity,
411
+ profile_dataset.set_dimension(0, sidpy.Dimension(np.linspace(xv[0,0], xv[-1,-1],
412
+ profile_dataset.shape[0]),
413
+ name='x', units=dataset.x.units,
414
+ quantity=dataset.x.quantity,
910
415
  dimension_type='spatial'))
911
416
 
912
- profile_dataset
913
-
914
417
  if dataset.data_type.name == 'SPECTRAL_IMAGE':
915
418
  spectral_axis = dataset.get_spectral_dims(return_axis=True)[0]
916
419
  if spline_order > -1:
917
420
  xv, yv, zv = get_line_selection_points_interpolated(line, z_length=dataset.shape[2])
918
- profile = scipy.ndimage.map_coordinates(np.array(dataset), [xv, yv, zv], order=spline_order)
421
+ profile = scipy.ndimage.map_coordinates(np.array(dataset), [xv, yv, zv],
422
+ order=spline_order)
919
423
  profile = profile.sum(axis=0)
920
424
  profile = np.stack([profile, profile], axis=1)
921
425
  start = xv[0, 0, 0]
@@ -927,20 +431,21 @@ def get_profile(dataset, line, spline_order=-1):
927
431
  profile_dataset.data_type='spectral_image'
928
432
  profile_dataset.units = dataset.units
929
433
  profile_dataset.quantity = dataset.quantity
930
- profile_dataset.set_dimension(0, sidpy.Dimension(np.arange(profile_dataset.shape[0])+start,
931
- name='x', units=dataset.x.units, quantity=dataset.x.quantity,
434
+ profile_dataset.set_dimension(0, sidpy.Dimension(np.arange(profile_dataset.shape[0])+start,
435
+ name='x', units=dataset.x.units,
436
+ quantity=dataset.x.quantity,
932
437
  dimension_type='spatial'))
933
- profile_dataset.set_dimension(1, sidpy.Dimension([0, 1],
934
- name='y', units=dataset.x.units, quantity=dataset.x.quantity,
438
+ profile_dataset.set_dimension(1, sidpy.Dimension([0, 1],
439
+ name='y', units=dataset.x.units,
440
+ quantity=dataset.x.quantity,
935
441
  dimension_type='spatial'))
936
-
442
+
937
443
  profile_dataset.set_dimension(2, spectral_axis)
938
444
  return profile_dataset
939
445
 
940
446
 
941
-
942
447
  def get_line_selection_points_interpolated(line, z_length=1):
943
-
448
+ """ Get line selection points from line selector with interpolation in z direction"""
944
449
  start_point = line.line_verts[3]
945
450
  right_point = line.line_verts[0]
946
451
  low_point = line.line_verts[2]
@@ -952,7 +457,7 @@ def get_line_selection_points_interpolated(line, z_length=1):
952
457
  m = (right_point[1] - start_point[1]) / (right_point[0] - start_point[0])
953
458
  length_x = int(abs(start_point[0]-right_point[0]))
954
459
  length_v = int(np.linalg.norm(start_point-right_point))
955
-
460
+
956
461
  linewidth = int(abs(start_point[1]-low_point[1]))
957
462
  x = np.linspace(0,length_x, length_v)
958
463
  y = np.linspace(0,linewidth, line.line_width)
@@ -963,19 +468,17 @@ def get_line_selection_points_interpolated(line, z_length=1):
963
468
  y = np.atleast_2d(y).repeat(z_length, axis=0).T
964
469
  else:
965
470
  xv, yv = np.meshgrid(x, y)
966
-
967
-
968
- yv = yv + x*m + start_point[1]
471
+ yv = yv + x*m + start_point[1]
969
472
  xv = (xv.swapaxes(0,1) -y*m ).swapaxes(0,1) + start_point[0]
970
473
 
971
474
  if z_length > 1:
972
475
  return xv, yv, zv
973
- else:
476
+ else:
974
477
  return xv, yv
975
478
 
976
479
 
977
480
  def get_line_selection_points(line):
978
-
481
+ """ Get line selection points from line selector"""
979
482
  start_point = line.line_verts[3]
980
483
  right_point = line.line_verts[0]
981
484
  low_point = line.line_verts[2]
@@ -987,19 +490,19 @@ def get_line_selection_points(line):
987
490
  m = (right_point[1] - start_point[1]) / (right_point[0] - start_point[0])
988
491
  length_x = int(abs(start_point[0]-right_point[0]))
989
492
  length_v = int(np.linalg.norm(start_point-right_point))
990
-
493
+
991
494
  linewidth = int(abs(start_point[1]-low_point[1]))
992
495
  x = np.linspace(0,length_x, length_v)
993
496
  y = np.linspace(0,linewidth, line.line_width)
994
497
  xv, yv = np.meshgrid(x, y)
995
-
996
- yy = yv +x*m+start_point[1]
498
+
499
+ yy = yv +x*m+start_point[1]
997
500
  xx = (xv.T -y*m ).T + start_point[0]
998
-
999
501
  return xx, yy
1000
502
 
1001
503
 
1002
504
  def get_line_profile(data, xv, yv, z_length):
505
+ """ Get line profile from data array"""
1003
506
  profile = np.zeros([len(xv[0]), 2, z_length])
1004
507
  for index_x in range(xv.shape[1]):
1005
508
  for index_y in range(xv.shape[0]):
@@ -1008,7 +511,7 @@ def get_line_profile(data, xv, yv, z_length):
1008
511
  if x< data.shape[0] and x>0 and y < data.shape[1] and y>0:
1009
512
  profile[index_x, 0] +=data[x, y]
1010
513
  return profile
1011
-
514
+
1012
515
 
1013
516
  def histogram_plot(image_tags):
1014
517
  """interactive histogram"""
@@ -1016,7 +519,6 @@ def histogram_plot(image_tags):
1016
519
  color_map_list = ['gray', 'viridis', 'jet', 'hot']
1017
520
  if 'minimum_intensity' not in image_tags:
1018
521
  image_tags['minimum_intensity'] = image_tags['plotimage'].min()
1019
- minimum_intensity = image_tags['minimum_intensity']
1020
522
  if 'maximum_intensity' not in image_tags:
1021
523
  image_tags['maximum_intensity'] = image_tags['plotimage'].max()
1022
524
  data = image_tags['plotimage']
@@ -1027,20 +529,19 @@ def histogram_plot(image_tags):
1027
529
 
1028
530
  cmap = plt.cm.get_cmap(image_tags['color_map'])
1029
531
  colors = cmap(np.linspace(0., 1., nbins))
1030
- norm2 = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
532
+ norm2 = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
1031
533
  hist, bin_edges = np.histogram(data, np.linspace(vmin, vmax, nbins), density=True)
1032
534
 
1033
535
  width = bin_edges[1]-bin_edges[0]
1034
-
536
+ event2 = None
1035
537
  def onselect(vmin, vmax):
1036
538
  ax1.clear()
1037
539
  cmap = plt.cm.get_cmap(image_tags['color_map'])
1038
540
  colors = cmap(np.linspace(0., 1., nbins))
1039
- norm2 = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
541
+ norm2 = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
1040
542
  hist2, bin_edges2 = np.histogram(data, np.linspace(vmin, vmax, nbins), density=True)
1041
543
 
1042
- width2 = (bin_edges2[1]-bin_edges2[0])
1043
-
544
+ width2 = bin_edges2[1]-bin_edges2[0]
1044
545
  for i in range(nbins-1):
1045
546
  histogram[i].xy = (bin_edges2[i], 0)
1046
547
  histogram[i].set_height(hist2[i])
@@ -1049,7 +550,8 @@ def histogram_plot(image_tags):
1049
550
  ax.set_xlim(vmin, vmax)
1050
551
  ax.set_ylim(0, hist2.max()*1.01)
1051
552
 
1052
- cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm2, orientation='horizontal')
553
+ # cb1 = matplotlib.colorbar.ColorbarBase(ax1, cmap=cmap,
554
+ # norm=norm2, orientation='horizontal')
1053
555
 
1054
556
  image_tags['minimum_intensity'] = vmin
1055
557
  image_tags['maximum_intensity'] = vmax
@@ -1057,9 +559,9 @@ def histogram_plot(image_tags):
1057
559
  def onclick(event):
1058
560
  global event2
1059
561
  event2 = event
1060
- print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
1061
- ('double' if event.dblclick else 'single', event.button,
1062
- event.x, event.y, event.xdata, event.ydata))
562
+ button_click = 'double' if event.dblclick else 'single'
563
+ print(f"{button_click} click: button={event.button},"
564
+ + f" x={event.x}, y={event.y}, xdata={event.xdata}, ydata={event.ydata}")
1063
565
  if event.inaxes == ax1:
1064
566
  if event.button == 3:
1065
567
  ind = color_map_list.index(image_tags['color_map'])+1
@@ -1078,172 +580,19 @@ def histogram_plot(image_tags):
1078
580
  ax = fig2.add_axes([0., 0.2, 0.9, 0.7])
1079
581
  ax1 = fig2.add_axes([0., 0.15, 0.9, 0.05])
1080
582
 
1081
- histogram = ax.bar(bin_edges[0:-1], hist, width=width, color=colors, edgecolor='black', alpha=0.8)
583
+ histogram = ax.bar(bin_edges[0:-1], hist, width=width, color=colors,
584
+ edgecolor='black', alpha=0.8)
1082
585
  onselect(vmin, vmax)
1083
- cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm2, orientation='horizontal')
586
+ cb1 = matplotlib.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm2, orientation='horizontal')
1084
587
 
1085
588
  rectprops = dict(facecolor='blue', alpha=0.5)
1086
589
 
1087
- span = mwidgets.SpanSelector(ax, onselect, 'horizontal', rectprops=rectprops)
590
+ span = matplotlib.widgets.SpanSelector(ax, onselect, 'horizontal', props=rectprops)
1088
591
 
1089
592
  cid = fig2.canvas.mpl_connect('button_press_event', onclick)
1090
593
  return span
1091
594
 
1092
595
 
1093
- def clean_svd(im, pixel_size=1, source_size=5):
1094
- """De-noising of image by using first component of single value decomposition"""
1095
- patch_size = int(source_size/pixel_size)
1096
- if patch_size < 3:
1097
- patch_size = 3
1098
- patches = image.extract_patches_2d(np.array(im), (patch_size, patch_size))
1099
- patches = patches.reshape(patches.shape[0], patches.shape[1]*patches.shape[2])
1100
-
1101
- num_components = 32
1102
-
1103
- u, s, v = randomized_svd(patches, num_components)
1104
- u_im_size = int(np.sqrt(u.shape[0]))
1105
- reduced_image = u[:, 0].reshape(u_im_size, u_im_size)
1106
- reduced_image = reduced_image/reduced_image.sum()*im.sum()
1107
- if isinstance(im, sidpy.Dataset):
1108
- reduced_image = im.like_data(reduced_image)
1109
- return reduced_image
1110
-
1111
-
1112
- def rebin(im, binning=2):
1113
- """
1114
- rebin an image by the number of pixels in x and y direction given by binning
1115
-
1116
- Parameter
1117
- ---------
1118
- image: numpy array in 2 dimensions
1119
-
1120
- Returns
1121
- -------
1122
- binned image as numpy array
1123
- """
1124
- if len(im.shape) == 2:
1125
- return im.reshape((im.shape[0]//binning, binning, im.shape[1]//binning, binning)).mean(axis=3).mean(1)
1126
- else:
1127
- raise TypeError('not a 2D image')
1128
-
1129
-
1130
- def cart2pol(points):
1131
- """Cartesian to polar coordinate conversion
1132
-
1133
- Parameters
1134
- ---------
1135
- points: float or numpy array
1136
- points to be converted (Nx2)
1137
-
1138
- Returns
1139
- -------
1140
- rho: float or numpy array
1141
- distance
1142
- phi: float or numpy array
1143
- angle
1144
- """
1145
-
1146
- rho = np.linalg.norm(points[:, 0:2], axis=1)
1147
- phi = np.arctan2(points[:, 1], points[:, 0])
1148
-
1149
- return rho, phi
1150
-
1151
-
1152
- def pol2cart(rho, phi):
1153
- """Polar to Cartesian coordinate conversion
1154
-
1155
- Parameters
1156
- ----------
1157
- rho: float or numpy array
1158
- distance
1159
- phi: float or numpy array
1160
- angle
1161
-
1162
- Returns
1163
- -------
1164
- x: float or numpy array
1165
- x coordinates of converted points(Nx2)
1166
- """
1167
-
1168
- x = rho * np.cos(phi)
1169
- y = rho * np.sin(phi)
1170
- return x, y
1171
-
1172
-
1173
- def xy2polar(points, rounding=1e-3):
1174
- """ Conversion from carthesian to polar coordinates
1175
-
1176
- the angles and distances are sorted by r and then phi
1177
- The indices of this sort is also returned
1178
-
1179
- Parameters
1180
- ----------
1181
- points: numpy array
1182
- number of points in axis 0 first two elements in axis 1 are x and y
1183
- rounding: int
1184
- optional rounding in significant digits
1185
-
1186
- Returns
1187
- -------
1188
- r, phi, sorted_indices
1189
- """
1190
-
1191
- r, phi = cart2pol(points)
1192
-
1193
- phi = phi # %np.pi # only positive angles
1194
- r = (np.floor(r/rounding))*rounding # Remove rounding error differences
1195
-
1196
- sorted_indices = np.lexsort((phi, r)) # sort first by r and then by phi
1197
- r = r[sorted_indices]
1198
- phi = phi[sorted_indices]
1199
-
1200
- return r, phi, sorted_indices
1201
-
1202
-
1203
- def cartesian2polar(x, y, grid, r, t, order=3):
1204
- """Transform cartesian grid to polar grid
1205
-
1206
- Used by warp
1207
- """
1208
-
1209
- rr, tt = np.meshgrid(r, t)
1210
-
1211
- new_x = rr*np.cos(tt)
1212
- new_y = rr*np.sin(tt)
1213
-
1214
- ix = interp1d(x, np.arange(len(x)))
1215
- iy = interp1d(y, np.arange(len(y)))
1216
-
1217
- new_ix = ix(new_x.ravel())
1218
- new_iy = iy(new_y.ravel())
1219
-
1220
- return ndimage.map_coordinates(grid, np.array([new_ix, new_iy]), order=order).reshape(new_x.shape)
1221
-
1222
-
1223
- def warp(diff, center):
1224
- """Takes a diffraction pattern (as a sidpy dataset)and warps it to a polar grid"""
1225
-
1226
- # Define original polar grid
1227
- nx = np.shape(diff)[0]
1228
- ny = np.shape(diff)[1]
1229
-
1230
- # Define center pixel
1231
- pix2nm = np.gradient(diff.u.values)[0]
1232
-
1233
- x = np.linspace(1, nx, nx, endpoint=True)-center[0]
1234
- y = np.linspace(1, ny, ny, endpoint=True)-center[1]
1235
- z = diff
1236
-
1237
- # Define new polar grid
1238
- nr = int(min([center[0], center[1], diff.shape[0]-center[0], diff.shape[1]-center[1]])-1)
1239
- nt = 360 * 3
1240
-
1241
- r = np.linspace(1, nr, nr)
1242
- t = np.linspace(0., np.pi, nt, endpoint=False)
1243
-
1244
- return cartesian2polar(x, y, z, r, t, order=3).T
1245
-
1246
-
1247
596
  def calculate_ctf(wavelength, cs, defocus, k):
1248
597
  """ Calculate Contrast Transfer Function
1249
598
 
@@ -1290,13 +639,11 @@ def get_rotation(experiment_spots, crystal_spots):
1290
639
  positions (in 1/nm) of spots in diffractogram
1291
640
  crystal_spots: numpy array (nx2)
1292
641
  positions (in 1/nm) of Bragg spots according to kinematic scattering theory
1293
-
1294
642
  """
1295
-
1296
643
  r_experiment, phi_experiment = cart2pol(experiment_spots)
1297
-
644
+
1298
645
  # get crystal spots of same length and sort them by angle as well
1299
- r_crystal, phi_crystal, crystal_indices = xy2polar(crystal_spots)
646
+ r_crystal, phi_crystal, _ = xy2polar(crystal_spots)
1300
647
  angle_index = np.argmin(np.abs(r_experiment-r_crystal[1]))
1301
648
  rotation_angle = phi_experiment[angle_index] % (2*np.pi) - phi_crystal[1]
1302
649
  print(phi_experiment[angle_index])
@@ -1309,8 +656,6 @@ def get_rotation(experiment_spots, crystal_spots):
1309
656
 
1310
657
  def calibrate_image_scale(fft_tags, spots_reference, spots_experiment):
1311
658
  """depreciated get change of scale from comparison of spots to Bragg angles """
1312
- gx = fft_tags['spatial_scale_x']
1313
- gy = fft_tags['spatial_scale_y']
1314
659
 
1315
660
  dist_reference = np.linalg.norm(spots_reference, axis=1)
1316
661
  distance_experiment = np.linalg.norm(spots_experiment, axis=1)
@@ -1324,7 +669,8 @@ def calibrate_image_scale(fft_tags, spots_reference, spots_experiment):
1324
669
  return np.sqrt((xdata * dgx) ** 2 + (ydata * dgy) ** 2) - dist_reference.min()
1325
670
 
1326
671
  x0 = [1.001, 0.999]
1327
- [dg, sig] = optimization.leastsq(func, x0, args=(closest_exp_reflections[:, 0], closest_exp_reflections[:, 1]))
672
+ [dg, _] = scipy.optimize.leastsq(func, x0, args=(closest_exp_reflections[:, 0],
673
+ closest_exp_reflections[:, 1]))
1328
674
  return dg
1329
675
 
1330
676
 
@@ -1334,11 +680,10 @@ def align_crystal_reflections(spots, crystals):
1334
680
  crystal_reflections_polar = []
1335
681
  angles = []
1336
682
  exp_r, exp_phi = cart2pol(spots) # just in polar coordinates
1337
- spots_polar = np.array([exp_r, exp_phi])
1338
683
 
1339
- for i in range(len(crystals)):
1340
- tags = crystals[i]
1341
- r, phi, indices = xy2polar(tags['allowed']['g']) # sorted by r and phi , only positive angles
684
+ for tags in crystals:
685
+ # sorted by r and phi , only positive angles
686
+ r, phi, indices = xy2polar(tags['allowed']['g'])
1342
687
  # we mask the experimental values that are found already
1343
688
  angle = 0.
1344
689
 
@@ -1348,126 +693,7 @@ def align_crystal_reflections(spots, crystals):
1348
693
 
1349
694
  crystal_reflections_polar.append([r, angle + phi, indices])
1350
695
  tags['allowed']['g_rotated'] = pol2cart(r, angle + phi)
1351
- for spot in tags['allowed']['g']:
696
+ """for spot in tags['allowed']['g']:
1352
697
  dif = np.linalg.norm(spots[:, 0:2]-spot[0:2], axis=1)
1353
- # print(dif.min())
1354
- if dif.min() < 1.5:
1355
- ind = np.argmin(dif)
1356
-
698
+ """
1357
699
  return crystal_reflections_polar, angles
1358
-
1359
-
1360
- # Deconvolution
1361
- def decon_lr(o_image, probe, verbose=False):
1362
- """
1363
- # This task generates a restored image from an input image and point spread function (PSF) using
1364
- # the algorithm developed independently by Lucy (1974, Astron. J. 79, 745) and Richardson
1365
- # (1972, J. Opt. Soc. Am. 62, 55) and adapted for HST imagery by Snyder
1366
- # (1990, in Restoration of HST Images and Spectra, ST ScI Workshop Proceedings; see also
1367
- # Snyder, Hammoud, & White, JOSA, v. 10, no. 5, May 1993, in press).
1368
- # Additional options developed by Rick White (STScI) are also included.
1369
- #
1370
- # The Lucy-Richardson method can be derived from the maximum likelihood expression for data
1371
- # with a Poisson noise distribution. Thus, it naturally applies to optical imaging data such as HST.
1372
- # The method forces the restored image to be positive, in accord with photon-counting statistics.
1373
- #
1374
- # The Lucy-Richardson algorithm generates a restored image through an iterative method. The essence
1375
- # of the iteration is as follows: the (n+1)th estimate of the restored image is given by the nth estimate
1376
- # of the restored image multiplied by a correction image. That is,
1377
- #
1378
- # original data
1379
- # image = image --------------- * reflect(PSF)
1380
- # n+1 n image * PSF
1381
- # n
1382
-
1383
- # where the *'s represent convolution operators and reflect(PSF) is the reflection of the PSF, i.e.
1384
- # reflect((PSF)(x,y)) = PSF(-x,-y). When the convolutions are carried out using fast Fourier transforms
1385
- # (FFTs), one can use the fact that FFT(reflect(PSF)) = conj(FFT(PSF)), where conj is the complex conjugate
1386
- # operator.
1387
- """
1388
-
1389
- if len(o_image) < 1:
1390
- return o_image
1391
-
1392
- if o_image.shape != probe.shape:
1393
- print('Weirdness ', o_image.shape, ' != ', probe.shape)
1394
-
1395
- probe_c = np.ones(probe.shape, dtype=np.complex64)
1396
- probe_c.real = probe
1397
-
1398
- error = np.ones(o_image.shape, dtype=np.complex64)
1399
- est = np.ones(o_image.shape, dtype=np.complex64)
1400
- source = np.ones(o_image.shape, dtype=np.complex64)
1401
- o_image = o_image - o_image.min()
1402
- image_mult = o_image.max()
1403
- o_image = o_image / o_image.max()
1404
- source.real = o_image
1405
-
1406
- response_ft = fftpack.fft2(probe_c)
1407
-
1408
- ap_angle = o_image.metadata['experiment']['convergence_angle']
1409
- if ap_angle > .1:
1410
- ap_angle /= 1000 # now in rad
1411
-
1412
- e0 = float(o_image.metadata['experiment']['acceleration_voltage'])
1413
-
1414
- wl = get_wavelength(e0)
1415
- o_image.metadata['experiment']['wavelength'] = wl
1416
-
1417
- over_d = 2 * ap_angle / wl
1418
-
1419
- dx = o_image.x[1]-o_image.x[0]
1420
- dk = 1.0 / float(o_image.x[-1]) # last value of x-axis is field of view
1421
- screen_width = 1 / dx
1422
-
1423
- aperture = np.ones(o_image.shape, dtype=np.complex64)
1424
- # Mask for the aperture before the Fourier transform
1425
- n = o_image.shape[0]
1426
- size_x = o_image.shape[0]
1427
- size_y = o_image.shape[1]
1428
- app_ratio = over_d / screen_width * n
1429
-
1430
- theta_x = np.array(-size_x / 2. + np.arange(size_x))
1431
- theta_y = np.array(-size_y / 2. + np.arange(size_y))
1432
- t_xv, t_yv = np.meshgrid(theta_x, theta_y)
1433
-
1434
- tp1 = t_xv ** 2 + t_yv ** 2 >= app_ratio ** 2
1435
- aperture[tp1.T] = 0.
1436
- # print(app_ratio, screen_width, dk)
1437
-
1438
- progress = tqdm(total=500)
1439
- # de = 100
1440
- dest = 100
1441
- i = 0
1442
- while abs(dest) > 0.0001: # or abs(de) > .025:
1443
- i += 1
1444
- error_old = np.sum(error.real)
1445
- est_old = est.copy()
1446
- error = source / np.real(fftpack.fftshift(fftpack.ifft2(fftpack.fft2(est) * response_ft)))
1447
- est = est * np.real(fftpack.fftshift(fftpack.ifft2(fftpack.fft2(error) * np.conjugate(response_ft))))
1448
-
1449
- error_new = np.real(np.sum(np.power(error, 2))) - error_old
1450
- dest = np.sum(np.power((est - est_old).real, 2)) / np.sum(est) * 100
1451
-
1452
- if error_old != 0:
1453
- de = error_new / error_old * 1.0
1454
- else:
1455
- de = error_new
1456
-
1457
- if verbose:
1458
- print(
1459
- ' LR Deconvolution - Iteration: {0:d} Error: {1:.2f} = change: {2:.5f}%, {3:.5f}%'.format(i, error_new,
1460
- de,
1461
- abs(dest)))
1462
- if i > 500:
1463
- dest = 0.0
1464
- print('terminate')
1465
- progress.update(1)
1466
- progress.write(f"converged in {i} iterations")
1467
- print('\n Lucy-Richardson deconvolution converged in ' + str(i) + ' iterations')
1468
- est2 = np.real(fftpack.ifft2(fftpack.fft2(est) * fftpack.fftshift(aperture)))*image_mult
1469
- out_dataset = o_image.like_data(est2)
1470
- out_dataset.title = 'Lucy Richardson deconvolution'
1471
- out_dataset.data_type = 'image'
1472
- return out_dataset
1473
-