pyTEMlib 0.2025.4.2__py3-none-any.whl → 0.2025.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyTEMlib might be problematic. Click here for more details.

Files changed (94) hide show
  1. build/lib/pyTEMlib/__init__.py +33 -0
  2. build/lib/pyTEMlib/animation.py +640 -0
  3. build/lib/pyTEMlib/atom_tools.py +238 -0
  4. build/lib/pyTEMlib/config_dir.py +31 -0
  5. build/lib/pyTEMlib/crystal_tools.py +1219 -0
  6. build/lib/pyTEMlib/diffraction_plot.py +756 -0
  7. build/lib/pyTEMlib/dynamic_scattering.py +293 -0
  8. build/lib/pyTEMlib/eds_tools.py +826 -0
  9. build/lib/pyTEMlib/eds_xsections.py +432 -0
  10. build/lib/pyTEMlib/eels_tools/__init__.py +44 -0
  11. build/lib/pyTEMlib/eels_tools/core_loss_tools.py +751 -0
  12. build/lib/pyTEMlib/eels_tools/eels_database.py +134 -0
  13. build/lib/pyTEMlib/eels_tools/low_loss_tools.py +655 -0
  14. build/lib/pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
  15. build/lib/pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
  16. build/lib/pyTEMlib/file_reader.py +274 -0
  17. build/lib/pyTEMlib/file_tools.py +811 -0
  18. build/lib/pyTEMlib/get_bote_salvat.py +69 -0
  19. build/lib/pyTEMlib/graph_tools.py +1153 -0
  20. build/lib/pyTEMlib/graph_viz.py +599 -0
  21. build/lib/pyTEMlib/image/__init__.py +37 -0
  22. build/lib/pyTEMlib/image/image_atoms.py +270 -0
  23. build/lib/pyTEMlib/image/image_clean.py +197 -0
  24. build/lib/pyTEMlib/image/image_distortion.py +299 -0
  25. build/lib/pyTEMlib/image/image_fft.py +277 -0
  26. build/lib/pyTEMlib/image/image_graph.py +926 -0
  27. build/lib/pyTEMlib/image/image_registration.py +316 -0
  28. build/lib/pyTEMlib/image/image_utilities.py +309 -0
  29. build/lib/pyTEMlib/image/image_window.py +421 -0
  30. build/lib/pyTEMlib/image_tools.py +699 -0
  31. build/lib/pyTEMlib/interactive_image.py +1 -0
  32. build/lib/pyTEMlib/kinematic_scattering.py +1196 -0
  33. build/lib/pyTEMlib/microscope.py +61 -0
  34. build/lib/pyTEMlib/probe_tools.py +906 -0
  35. build/lib/pyTEMlib/sidpy_tools.py +153 -0
  36. build/lib/pyTEMlib/simulation_tools.py +104 -0
  37. build/lib/pyTEMlib/test.py +437 -0
  38. build/lib/pyTEMlib/utilities.py +314 -0
  39. build/lib/pyTEMlib/version.py +5 -0
  40. build/lib/pyTEMlib/xrpa_x_sections.py +20976 -0
  41. pyTEMlib/__init__.py +25 -3
  42. pyTEMlib/animation.py +31 -22
  43. pyTEMlib/atom_tools.py +29 -34
  44. pyTEMlib/config_dir.py +2 -28
  45. pyTEMlib/crystal_tools.py +129 -165
  46. pyTEMlib/eds_tools.py +559 -342
  47. pyTEMlib/eds_xsections.py +432 -0
  48. pyTEMlib/eels_tools/__init__.py +44 -0
  49. pyTEMlib/eels_tools/core_loss_tools.py +751 -0
  50. pyTEMlib/eels_tools/eels_database.py +134 -0
  51. pyTEMlib/eels_tools/low_loss_tools.py +655 -0
  52. pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
  53. pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
  54. pyTEMlib/file_reader.py +274 -0
  55. pyTEMlib/file_tools.py +260 -1130
  56. pyTEMlib/get_bote_salvat.py +69 -0
  57. pyTEMlib/graph_tools.py +101 -174
  58. pyTEMlib/graph_viz.py +150 -0
  59. pyTEMlib/image/__init__.py +37 -0
  60. pyTEMlib/image/image_atoms.py +270 -0
  61. pyTEMlib/image/image_clean.py +197 -0
  62. pyTEMlib/image/image_distortion.py +299 -0
  63. pyTEMlib/image/image_fft.py +277 -0
  64. pyTEMlib/image/image_graph.py +926 -0
  65. pyTEMlib/image/image_registration.py +316 -0
  66. pyTEMlib/image/image_utilities.py +309 -0
  67. pyTEMlib/image/image_window.py +421 -0
  68. pyTEMlib/image_tools.py +154 -928
  69. pyTEMlib/kinematic_scattering.py +1 -1
  70. pyTEMlib/probe_tools.py +1 -1
  71. pyTEMlib/test.py +437 -0
  72. pyTEMlib/utilities.py +314 -0
  73. pyTEMlib/version.py +2 -3
  74. pyTEMlib/xrpa_x_sections.py +14 -10
  75. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/METADATA +13 -16
  76. pytemlib-0.2025.9.1.dist-info/RECORD +86 -0
  77. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/WHEEL +1 -1
  78. pytemlib-0.2025.9.1.dist-info/top_level.txt +6 -0
  79. pyTEMlib/core_loss_widget.py +0 -721
  80. pyTEMlib/eels_dialog.py +0 -754
  81. pyTEMlib/eels_dialog_utilities.py +0 -1199
  82. pyTEMlib/eels_tools.py +0 -2359
  83. pyTEMlib/file_tools_qt.py +0 -193
  84. pyTEMlib/image_dialog.py +0 -158
  85. pyTEMlib/image_dlg.py +0 -146
  86. pyTEMlib/info_widget.py +0 -1086
  87. pyTEMlib/info_widget3.py +0 -1120
  88. pyTEMlib/low_loss_widget.py +0 -479
  89. pyTEMlib/peak_dialog.py +0 -1129
  90. pyTEMlib/peak_dlg.py +0 -286
  91. pytemlib-0.2025.4.2.dist-info/RECORD +0 -38
  92. pytemlib-0.2025.4.2.dist-info/top_level.txt +0 -1
  93. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/entry_points.txt +0 -0
  94. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,270 @@
1
+ """ Atom detection
2
+
3
+ All atom detection is done here
4
+ Everything is in unit of pixel!!
5
+
6
+ Author: Gerd Duscher
7
+
8
+ part of pycroscopy.image
9
+
10
+ the core pycroscopy package
11
+ """
12
+ import numpy as np
13
+
14
+ import skimage
15
+ import sklearn
16
+ import scipy
17
+
18
+ import sidpy
19
+ from tqdm.auto import trange
20
+
21
+
22
+ def make_gauss(size_x: [int, float], size_y: [int, float], width: float = 1.0, x0: float = 0.0,
23
+ y0: float = 0.0, intensity: float = 1.0) -> np.ndarray:
24
+ """
25
+ Generates a 2D Gaussian-shaped probe array.
26
+ Parameters
27
+ ----------
28
+ size_x : int or float
29
+ The size of the probe along the x-axis.
30
+ size_y : int or float
31
+ The size of the probe along the y-axis.
32
+ width : float, optional
33
+ The standard deviation (spread) of the Gaussian (default is 1.0).
34
+ x0 : float, optional
35
+ The x-coordinate of the Gaussian center (default is 0.0).
36
+ y0 : float, optional
37
+ The y-coordinate of the Gaussian center (default is 0.0).
38
+ intensity : float, optional
39
+ The total intensity (sum) of the probe (default is 1.0).
40
+ Returns
41
+ -------
42
+ probe : numpy.ndarray
43
+ A 2D array representing the normalized Gaussian probe.
44
+ """
45
+ size_x = size_x/2
46
+ size_y = size_y/2
47
+ x, y = np.mgrid[-size_x:size_x, -size_y:size_y]
48
+ g = np.exp(-((x-x0)**2 + (y-y0)**2) / 2.0 / width**2)
49
+ probe = g / g.sum() * intensity
50
+
51
+ return probe
52
+
53
+
54
+ def find_atoms(image: sidpy.Dataset, atom_size: float = 0.1, threshold: float = 0.) -> np.ndarray:
55
+ """ Find atoms is a simple wrapper for blob_log in skimage.feature
56
+
57
+ threshold for blob finder is usually between 0.001 and 1.0,
58
+ note: for threshold <= 0 we use the RMS contrast
59
+
60
+ Parameters
61
+ ----------
62
+ image: sidpy.Dataset
63
+ the image to find atoms
64
+ atom_size: float
65
+ visible size of atom blob diameter in nm gives minimal distance between found blobs
66
+ threshold: float
67
+ threshold for blob finder;
68
+
69
+ Returns
70
+ -------
71
+ atoms: numpy array(Nx3)
72
+ atoms positions and radius of blob
73
+ """
74
+
75
+ if not isinstance(image, sidpy.Dataset):
76
+ raise TypeError('We need a sidpy.Dataset')
77
+ if image.data_type.name != 'IMAGE':
78
+ raise TypeError('We need sidpy.Dataset of sidpy.Datatype: IMAGE')
79
+ if not isinstance(atom_size, (float, int)):
80
+ raise TypeError('atom_size parameter has to be a number')
81
+ if not isinstance(threshold, float):
82
+ raise TypeError('threshold parameter has to be a float number')
83
+
84
+ scale_x = np.unique(np.gradient(image.dim_0.values))[0]
85
+ im = np.array(image-image.min())
86
+ im = im/im.max()
87
+ if threshold <= 0.:
88
+ threshold = np.std(im)
89
+ atoms = skimage.feature.blob_log(im, max_sigma=atom_size/scale_x, threshold=threshold)
90
+
91
+ return atoms
92
+
93
+
94
+ def atoms_clustering(atoms: np.ndarray, mid_atoms: np.ndarray,
95
+ number_of_clusters: int = 3, nearest_neighbours: int = 7) -> tuple:
96
+ """ A wrapper for sklearn.cluster kmeans clustering of atoms.
97
+
98
+ Parameters
99
+ ----------
100
+ atoms: list or np.array (Nx2)
101
+ list of all atoms
102
+ mid_atoms: list or np.array (Nx2)
103
+ atoms to be evaluated
104
+ number_of_clusters: int
105
+ number of clusters to sort (ini=3)
106
+ nearest_neighbours: int
107
+ number of nearest neighbours evaluated
108
+
109
+ Returns
110
+ -------
111
+ clusters, distances, indices: numpy arrays
112
+ """
113
+
114
+ # get distances
115
+ nn_tree = scipy.spatial.KDTree(np.array(atoms)[:, 0:2])
116
+
117
+ distances, indices = nn_tree.query(np.array(mid_atoms)[:, 0:2], nearest_neighbours)
118
+
119
+ # Clustering
120
+ k_means = sklearn.cluster.KMeans(n_clusters=number_of_clusters, random_state=0)
121
+ k_means.fit(distances)
122
+ clusters = k_means.predict(distances)
123
+
124
+ return clusters, distances, indices
125
+
126
+
127
+ def gauss_difference(params: list[float], area: np.ndarray) -> np.ndarray:
128
+ """
129
+ Difference between part of an image and a Gaussian
130
+ This function is used int the atom refine function of pyTEMlib
131
+
132
+ Parameters
133
+ ----------
134
+ params: list
135
+ list of Gaussian parameters [width, position_x, position_y, intensity]
136
+ area: numpy array
137
+ 2D matrix = part of an image
138
+
139
+ Returns
140
+ -------
141
+ numpy array: flattened array of difference
142
+
143
+ """
144
+ gauss = make_gauss(area.shape[0], area.shape[1], width=params[0], x0=params[1],
145
+ y0=params[2], intensity=params[3])
146
+ return (area - gauss).flatten()
147
+
148
+
149
+ def atom_refine(image: [np.ndarray, sidpy.Dataset], atoms: [np.ndarray, list], radius: float,
150
+ max_int: float = 0, min_int: float = 0, max_dist: float = 4) -> dict:
151
+ """Fits a Gaussian in a blob of an image
152
+
153
+ Parameters
154
+ ----------
155
+ image: np.array or sidpy Dataset
156
+ atoms: list or np.array
157
+ positions of atoms
158
+ radius: float
159
+ radius of circular mask to define fitting of Gaussian
160
+ max_int: float
161
+ optional - maximum intensity to be considered for fitting (to exclude contaminated areas)
162
+ min_int: float
163
+ optional - minimum intensity to be considered for fitting (to exclude contaminated holes )
164
+ max_dist: float
165
+ optional - maximum distance of movement of Gaussian during fitting
166
+
167
+ Returns
168
+ -------
169
+ sym: dict
170
+ dictionary containing new atom positions and other output such as intensity of Gaussian
171
+ """
172
+ rr = int(radius + 0.5) # atom radius
173
+ print('using radius ', rr, 'pixels')
174
+
175
+ pixels = np.linspace(0, 2 * rr, 2 * rr + 1) - rr
176
+ x, y = np.meshgrid(pixels, pixels)
177
+ mask = (x ** 2 + y ** 2) < rr ** 2
178
+
179
+ guess = [rr * 2, 0.0, 0.0, 1]
180
+
181
+ sym = {'number_of_atoms': len(atoms)}
182
+
183
+ volume = []
184
+ position = []
185
+ intensities = []
186
+ maximum_area = []
187
+ new_atoms = []
188
+ gauss_width = []
189
+ gauss_amplitude = []
190
+ gauss_intensity = []
191
+
192
+ for i in trange(len(atoms)):
193
+ x, y = atoms[i][0:2]
194
+ x = int(x)
195
+ y = int(y)
196
+
197
+ area = image[x - rr:x + rr + 1, y - rr:y + rr + 1]
198
+
199
+ append = False
200
+
201
+ if x-rr<0 or y-rr < 0 or x+rr+1 > image.shape[0] or y+rr+1 > image.shape[1]:
202
+ position.append(-1)
203
+ intensities.append(-1.)
204
+ maximum_area.append(-1.)
205
+ else: # atom found
206
+ position.append(1)
207
+ intensities.append((area * mask).sum())
208
+ maximum_area.append((area * mask).max())
209
+
210
+ if max_int > 0:
211
+ if area.sum() < max_int:
212
+ if area.sum() > min_int:
213
+ append = True
214
+ elif area.sum() > min_int:
215
+ append = True
216
+
217
+ pout = [0, 0, 0, 0]
218
+ if append:
219
+ if x-rr < 0 or y-rr < 0 or x+rr+1 > image.shape[0] or y+rr+1 > image.shape[1]:
220
+ pass
221
+ else:
222
+ [pout, _] = scipy.optimize.leastsq(gauss_difference, guess, args=area)
223
+
224
+ if (abs(pout[1]) > max_dist) or (abs(pout[2]) > max_dist):
225
+ pout = [0, 0, 0, 0]
226
+
227
+ volume.append(2 * np.pi * pout[3] * pout[0] * pout[0])
228
+
229
+ new_atoms.append([x + pout[1], y + pout[2]]) # ,pout[0], volume)) #,pout[3]))
230
+ if all(v == 0 for v in pout):
231
+ gauss_intensity.append(0.)
232
+ else:
233
+ gauss = make_gauss(area.shape[0], area.shape[1], width=pout[0], x0=pout[1], y0=pout[2],
234
+ intensity=pout[3])
235
+ gauss_intensity.append((gauss * mask).sum())
236
+ gauss_width.append(pout[0])
237
+ gauss_amplitude.append(pout[3])
238
+
239
+ sym['inside'] = position
240
+ sym['intensity_area'] = intensities
241
+ sym['maximum_area'] = maximum_area
242
+ sym['atoms'] = new_atoms
243
+ sym['gauss_width'] = gauss_width
244
+ sym['gauss_amplitude'] = gauss_amplitude
245
+ sym['gauss_intensity'] = gauss_intensity
246
+ sym['gauss_volume'] = volume
247
+
248
+ return sym
249
+
250
+
251
+ def intensity_area(image: np.ndarray, atoms: np.ndarray, radius: float) -> list[float]:
252
+ """
253
+ integrated intensity of atoms in an image with a mask around each atom of radius radius
254
+ """
255
+ rr = int(radius + 0.5) # atom radius
256
+ print('using radius ', rr, 'pixels')
257
+
258
+ pixels = np.linspace(0, 2 * rr, 2 * rr + 1) - rr
259
+ x, y = np.meshgrid(pixels, pixels)
260
+ mask = np.array((x ** 2 + y ** 2) < rr ** 2)
261
+ intensities = []
262
+ for atom in atoms:
263
+ x = int(atom[1])
264
+ y = int(atom[0])
265
+ area = image[x - rr:x + rr + 1, y - rr:y + rr + 1]
266
+ if area.shape == mask.shape:
267
+ intensities.append((area * mask).sum())
268
+ else:
269
+ intensities.append(-1)
270
+ return intensities
@@ -0,0 +1,197 @@
1
+ """
2
+ image cleaning functions part of image part of pycroscopy
3
+ """
4
+ import numpy as np
5
+ import sidpy
6
+ from tqdm import tqdm
7
+ import sklearn.feature_extraction
8
+ from sklearn.utils.extmath import randomized_svd
9
+ import skimage
10
+
11
+
12
+ # Image cleaning functions
13
+
14
+ def clean_svd(im, pixel_size=1, source_size=5):
15
+ """De-noising of image by using first component of single value decomposition"""
16
+
17
+ if not isinstance(im, sidpy.Dataset):
18
+ raise TypeError('We need a sidpy.Dataset')
19
+ if im.data_type.name != 'IMAGE':
20
+ raise TypeError('We need sidpy.Dataset of sidpy.Datatype: IMAGE')
21
+
22
+ patch_size = int(source_size/pixel_size)
23
+ patch_size = max(patch_size, 3)
24
+
25
+ patches = sklearn.feature_extraction.image.extract_patches_2d(np.array(im),
26
+ (patch_size,
27
+ patch_size))
28
+ patches = patches.reshape(patches.shape[0], patches.shape[1]*patches.shape[2])
29
+
30
+ num_components = 32
31
+
32
+ u, _, _ = randomized_svd(patches, num_components)
33
+ u_im_size = int(np.sqrt(u.shape[0]))
34
+ reduced_image = u[:, 0].reshape(u_im_size, u_im_size)
35
+ reduced_image = reduced_image/reduced_image.sum()*im.sum()
36
+ out_dataset = im.like_data(reduced_image)
37
+ out_dataset.title = 'Major SVD component'
38
+ out_dataset.data_type = 'image'
39
+ return out_dataset
40
+
41
+ def background_correction(image, value=12):
42
+ """Background correction of an image with difference of Gaussians
43
+
44
+ Parameters
45
+ ----------
46
+ image: numpy array or sidpy.Dataset
47
+ 2D image to be corrected
48
+ value: int
49
+ value for the difference of Gaussians, larger values result in more smoothing
50
+
51
+ Returns
52
+ -------
53
+ bgd_corr: numpy array or sidpy.Dataset
54
+ background corrected image
55
+ """
56
+
57
+ if image.ndim != 2:
58
+ raise ValueError('Input image must be 2D')
59
+
60
+ bgd_corr = skimage.filters.difference_of_gaussians(np.array(image), 1, int(value))
61
+ if isinstance(image, sidpy.Dataset):
62
+ bgd_corr = image.like_data(bgd_corr)
63
+ bgd_corr.title = 'Background Corrected ' + image.title
64
+ bgd_corr.source = image.title
65
+ bgd_corr.metadata = image.metadata.copy()
66
+ if 'analysis' not in bgd_corr.metadata:
67
+ bgd_corr.metadata['analysis'] = {}
68
+ bgd_corr.metadata['analysis']['background_correction'] = {'value': value,
69
+ 'input_dataset': image.source}
70
+ return bgd_corr
71
+
72
+ # Deconvolution
73
+
74
+ def make_gauss(size_x, size_y, width=1.0, x0=0.0, y0=0.0, intensity=1.0):
75
+ """Make a Gaussian shaped probe """
76
+ size_x = size_x/2
77
+ size_y = size_y/2
78
+ x, y = np.mgrid[-size_x:size_x, -size_y:size_y]
79
+ g = np.exp(-((x-x0)**2 + (y-y0)**2) / 2.0 / width**2)
80
+ probe = g / g.sum() * intensity
81
+
82
+ return probe
83
+
84
+
85
+ def decon_lr(o_image, resolution=0.1, verbose=False):
86
+
87
+ """
88
+ # This task generates a restored image from an input image and point spread function (PSF) using
89
+ # the algorithm developed independently by Lucy (1974, Astron. J. 79, 745) and Richardson
90
+ # (1972, J. Opt. Soc. Am. 62, 55) and adapted for HST imagery by Snyder
91
+ # (1990, in Restoration of HST Images and Spectra, ST ScI Workshop Proceedings; see also
92
+ # Snyder, Hammoud, & White, JOSA, v. 10, no. 5, May 1993, in press).
93
+ # Additional options developed by Rick White (STScI) are also included.
94
+ #
95
+ # The Lucy-Richardson method can be derived from the maximum likelihood expression
96
+ # for data with a Poisson noise distribution. Thus, it naturally applies to optical
97
+ # imaging data such as HST. The method forces the restored image to be positive,
98
+ # in accord with photon-counting statistics.
99
+ #
100
+ # The Lucy-Richardson algorithm generates a restored image through an iterative method.
101
+ # The essence of the iteration is as follows: the (n+1)th estimate of the restored image
102
+ # is given by the nth estimate of the restored image multiplied by a correction image.
103
+ # That is,
104
+ #
105
+ # original data
106
+ # image = image --------------- * reflect(PSF)
107
+ # n+1 n image * PSF
108
+ # n
109
+
110
+ # where the *'s represent convolution operators and reflect(PSF) is the reflection of the PSF,
111
+ # i.e. reflect((PSF)(x,y)) = PSF(-x,-y). When the convolutions are carried out using fast
112
+ # Fourier transforms (FFTs), one can use the fact that FFT(reflect(PSF)) = conj(FFT(PSF)),
113
+ # where conj is the complex conjugate operator.
114
+
115
+ Parameters
116
+ ----------
117
+ o_image: sidpy_Dataset with DataType='image'
118
+ the image to be dconvoluted
119
+ resolution:
120
+ width of resolution function
121
+ Returns
122
+ -------
123
+ out_dataset: sidpy.Dataset
124
+ the deconvoluted dataset
125
+
126
+ """
127
+
128
+ if len(o_image) < 1:
129
+ return o_image
130
+
131
+ image_dimensions = o_image.get_image_dims(return_axis=True)
132
+ scale_x = image_dimensions[0].slope
133
+ gauss_diameter = resolution/scale_x
134
+ probe = make_gauss(o_image.shape[0], o_image.shape[1], gauss_diameter)
135
+
136
+ probe_c = np.ones(probe.shape, dtype=np.complex64)
137
+ probe_c.real = probe
138
+
139
+ error = np.ones(o_image.shape, dtype=np.complex64)
140
+ est = np.ones(o_image.shape, dtype=np.complex64)
141
+ source = np.ones(o_image.shape, dtype=np.complex64)
142
+ source.real = o_image
143
+
144
+ response_ft = np.fft.fft2(probe_c)
145
+
146
+ # dx = scale_x
147
+ # dk = 1.0 / float(image_dimensions[0][-1]) # last value of x axis is field of view
148
+ # screen_width = 1 / dx
149
+
150
+ aperture = np.ones(o_image.shape, dtype=np.complex64)
151
+ # Mask for the aperture before the Fourier transform
152
+ # n = o_image.shape[0]
153
+ size_x = o_image.shape[0]
154
+ size_y = o_image.shape[1]
155
+
156
+ theta_x = np.array(-size_x / 2. + np.arange(size_x))
157
+ theta_y = np.array(-size_y / 2. + np.arange(size_y))
158
+ t_xv, t_yv = np.meshgrid(theta_x, theta_y)
159
+
160
+ tp1 = t_xv ** 2 + t_yv ** 2 >= o_image.shape[0]*4/5 ** 2
161
+ aperture[tp1.T] = 0.
162
+ # print(app_ratio, screen_width, dk)
163
+
164
+
165
+ progress = tqdm(total=500)
166
+ # de = 100
167
+ dest = 100
168
+ i = 0
169
+ while abs(dest) > 0.0001: # or abs(de) > .025:
170
+ i += 1
171
+ error_old = np.sum(error.real)
172
+ est_old = est.copy()
173
+ error = source / np.real(np.fft.fftshift(np.fft.ifft2(np.fft.fft2(est) * response_ft)))
174
+ est = est * np.real(np.fft.fftshift(np.fft.ifft2(np.fft.fft2(error) * np.conjugate(response_ft))))
175
+
176
+ error_new = np.real(np.sum(np.power(error, 2))) - error_old
177
+ dest = np.sum(np.power((est - est_old).real, 2)) / np.sum(est) * 100
178
+
179
+ if error_old != 0:
180
+ de = error_new / error_old * 1.0
181
+ else:
182
+ de = error_new
183
+
184
+ if verbose:
185
+ print(f' LR Deconvolution - Iteration: {i:d} Error: {error_new:.2f} = '+
186
+ f'change: {de:.5f}%, {abs(dest):.5f}%')
187
+ if i > 500:
188
+ dest = 0.0
189
+ print('terminate')
190
+ progress.update(1)
191
+ progress.write(f"converged in {i} iterations")
192
+ print('\n Lucy-Richardson deconvolution converged in ' + str(i) + ' iterations')
193
+ # est2 = np.real(np.fft.ifft2(np.fft.fft2(est) * np.fft.fftshift(aperture)))
194
+ out_dataset = o_image.like_data(np.real(est))
195
+ out_dataset.title = 'Lucy Richardson deconvolution'
196
+ out_dataset.data_type = 'image'
197
+ return out_dataset