pyTEMlib 0.2024.2.2__py2.py3-none-any.whl → 0.2024.6.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyTEMlib might be problematic. Click here for more details.

pyTEMlib/eels_dialog.py CHANGED
@@ -1,6 +1,4 @@
1
1
  """
2
- QT dialog window for EELS compositional analysis
3
-
4
2
  Author: Gerd Duscher
5
3
  """
6
4
 
@@ -93,7 +91,7 @@ class CurveVisualizer(object):
93
91
  legline.set_alpha(0.2)
94
92
  self.fig.canvas.draw()
95
93
 
96
- def get_sidebar():
94
+ def get_core_loss_sidebar():
97
95
  side_bar = ipywidgets.GridspecLayout(14, 3,width='auto', grid_gap="0px")
98
96
 
99
97
 
@@ -194,6 +192,7 @@ def get_sidebar():
194
192
  return side_bar
195
193
 
196
194
 
195
+
197
196
  class CompositionWidget(object):
198
197
  def __init__(self, datasets=None, key=None):
199
198
 
@@ -203,7 +202,7 @@ class CompositionWidget(object):
203
202
 
204
203
 
205
204
  self.model = []
206
- self.sidebar = get_sidebar()
205
+ self.sidebar = get_core_loss_sidebar()
207
206
 
208
207
  self.set_dataset(key)
209
208
 
@@ -312,11 +311,12 @@ class CompositionWidget(object):
312
311
  reference_list =[('None', -1)]
313
312
 
314
313
  for index, key in enumerate(self.datasets.keys()):
315
- if 'Reference' not in key:
316
- if 'SPECTR' in self.datasets[key].data_type.name:
317
- spectrum_list.append((f'{key}: {self.datasets[key].title}', index))
318
- self.spectrum_keys_list.append(key)
319
- reference_list.append((f'{key}: {self.datasets[key].title}', index))
314
+ if '_rel' not in key:
315
+ if 'Reference' not in key :
316
+ if 'SPECTR' in self.datasets[key].data_type.name:
317
+ spectrum_list.append((f'{key}: {self.datasets[key].title}', index))
318
+ self.spectrum_keys_list.append(key)
319
+ reference_list.append((f'{key}: {self.datasets[key].title}', index))
320
320
 
321
321
  if set_key in self.spectrum_keys_list:
322
322
  self.key = set_key
@@ -324,10 +324,11 @@ class CompositionWidget(object):
324
324
  self.key = self.spectrum_keys_list[-1]
325
325
  self.dataset = self.datasets[self.key]
326
326
 
327
- spec_dim = self.dataset.get_dimensions_by_type(sidpy.DimensionType.SPECTRAL)
328
- self.spec_dim = self.dataset._axes[spec_dim[0]]
327
+ self.spec_dim = self.dataset.get_spectral_dims(return_axis=True)[0]
329
328
 
330
329
  self.energy_scale = self.spec_dim.values
330
+ self.dd = (self.energy_scale[0], self.energy_scale[1])
331
+
331
332
  self.dataset.metadata['experiment']['offset'] = self.energy_scale[0]
332
333
  self.dataset.metadata['experiment']['dispersion'] = self.energy_scale[1] - self.energy_scale[0]
333
334
  if 'edges' not in self.dataset.metadata or self.dataset.metadata['edges'] == {}:
@@ -634,7 +635,7 @@ class CompositionWidget(object):
634
635
  raise ValueError('need a experiment parameter in metadata dictionary')
635
636
 
636
637
  eff_beta = eels.effective_collection_angle(self.energy_scale, alpha, beta, beam_kv)
637
-
638
+ eff_beta = beta
638
639
  self.low_loss = None
639
640
  if self.sidebar[12, 1].value:
640
641
  for key in self.datasets.keys():
@@ -1033,13 +1033,12 @@ class EdgesAtCursor(object):
1033
1033
  self.label.remove()
1034
1034
  self.line.remove()
1035
1035
  if event.button == 1:
1036
- self.label = self.ax.text(x, y_max, eels.find_major_edges(event.xdata, self.maximal_chemical_shift),
1036
+ self.label = self.ax.text(x, y_max, eels.find_all_edges(event.xdata, self.maximal_chemical_shift, major_edges_only=True),
1037
1037
  verticalalignment='top')
1038
1038
  self.line, = self.ax.plot([x, x], [y_min, y_max], color='black')
1039
1039
  if event.button == 3:
1040
1040
  self.line, = self.ax.plot([x, x], [y_min, y_max], color='black')
1041
- self.label = self.ax.text(x, y_max, eels.find_all_edges(event.xdata, self.maximal_chemical_shift),
1042
- verticalalignment='top')
1041
+ self.label = self.ax.text(x, y_max, eels.find_all_edges(event.xdata, self.maximal_chemical_shift), verticalalignment='top')
1043
1042
  self.ax.set_ylim(y_min, y_max)
1044
1043
 
1045
1044
  def mouse_move(self, event):
pyTEMlib/eels_tools.py CHANGED
@@ -373,15 +373,30 @@ def shift_energy(dataset: sidpy.Dataset, shifts: np.ndarray) -> sidpy.Dataset:
373
373
 
374
374
 
375
375
  def align_zero_loss(dataset: sidpy.Dataset) -> sidpy.Dataset:
376
+ """
377
+ Shifts the energy axis of the input dataset to be aligned with the zero-loss peak.
378
+
379
+ Parameters:
380
+ -----------
381
+ dataset : sidpy.Dataset
382
+ The input dataset containing the energy axis to be aligned.
376
383
 
384
+ Returns:
385
+ --------
386
+ sidpy.Dataset
387
+ The dataset with the energy axis shifted to align the zero-loss peak.
388
+
389
+ """
377
390
  shifts = get_zero_loss_energy(dataset)
378
- print(shifts, dataset)
391
+ # print(shifts, dataset)
379
392
  new_si = shift_energy(dataset, shifts)
380
393
  new_si.metadata.update({'zero_loss': {'shifted': shifts}})
381
394
  return new_si
382
395
 
383
396
 
384
- def get_resolution_functions(dset: sidpy.Dataset, startFitEnergy: float=-1, endFitEnergy: float=+1,
397
+
398
+
399
+ def get_resolution_functions(dataset: sidpy.Dataset, startFitEnergy: float=-1, endFitEnergy: float=+1,
385
400
  n_workers: int=1, n_threads: int=8):
386
401
  """
387
402
  Analyze and fit low-loss EELS data within a specified energy range to determine zero-loss peaks.
@@ -392,33 +407,35 @@ def get_resolution_functions(dset: sidpy.Dataset, startFitEnergy: float=-1, endF
392
407
  from the dataset. The function handles both 2D and 3D datasets.
393
408
 
394
409
  Parameters:
395
- dset: sidpy.Dataset
396
- The dataset containing TEM spectral data.
397
- startFitEnergy: float
398
- The start energy of the fitting window.
399
- endFitEnergy: float
400
- The end energy of the fitting window.
401
- n_workers: int, optional
402
- The number of workers for parallel processing (default is 1).
403
- n_threads: int, optional
404
- The number of threads for parallel processing (default is 8).
410
+ -----------
411
+ dataset (sidpy.Dataset): The dataset containing TEM spectral data.
412
+ startFitEnergy (float): The start energy of the fitting window.
413
+ endFitEnergy (float): The end energy of the fitting window.
414
+ n_workers (int, optional): The number of workers for parallel processing (default is 1).
415
+ n_threads (int, optional): The number of threads for parallel processing (default is 8).
405
416
 
406
417
  Returns:
418
+ --------
407
419
  tuple: A tuple containing:
408
420
  - z_loss_dset (sidpy.Dataset): The dataset with added zero-loss peak information.
409
421
  - z_loss_params (numpy.ndarray): Array of parameters used for the zero-loss peak fitting.
410
422
 
411
423
  Raises:
424
+ -------
412
425
  ValueError: If the input dataset does not have the expected dimensions or format.
413
426
 
414
427
  Notes:
428
+ ------
415
429
  - The function expects `dset` to have specific dimensionalities and will raise an error if they are not met.
416
430
  - Parallel processing is employed to enhance performance, particularly for large datasets.
417
431
  """
418
- energy = dset.get_spectral_dims(return_axis=True)[0].values
432
+ energy = dataset.get_spectral_dims(return_axis=True)[0].values
419
433
  start_fit_pixel = np.searchsorted(energy, startFitEnergy)
420
434
  end_fit_pixel = np.searchsorted(energy, endFitEnergy)
421
435
  guess_width = (endFitEnergy - startFitEnergy)/2
436
+ if end_fit_pixel - start_fit_pixel < 5:
437
+ start_fit_pixel -= 2
438
+ end_fit_pixel += 2
422
439
 
423
440
  def get_good_guess(zl_func, energy, spectrum):
424
441
  popt, pcov = curve_fit(zl_func, energy, spectrum,
@@ -428,21 +445,21 @@ def get_resolution_functions(dset: sidpy.Dataset, startFitEnergy: float=-1, endF
428
445
 
429
446
  fit_energy = energy[start_fit_pixel:end_fit_pixel]
430
447
  # get a good guess for the fit parameters
431
- if len(dset.shape) == 3:
432
- fit_dset = dset[:, :, start_fit_pixel:end_fit_pixel]
448
+ if len(dataset.shape) == 3:
449
+ fit_dset = dataset[:, :, start_fit_pixel:end_fit_pixel]
433
450
  guess_amplitude = np.sqrt(fit_dset.max())
434
451
  guess_params = get_good_guess(zl_func, fit_energy, fit_dset.sum(axis=(0, 1))/fit_dset.shape[0]/fit_dset.shape[1])
435
- elif len(dset.shape) == 2:
436
- fit_dset = dset[:, start_fit_pixel:end_fit_pixel]
452
+ elif len(dataset.shape) == 2:
453
+ fit_dset = dataset[:, start_fit_pixel:end_fit_pixel]
437
454
  fit_energy = energy[start_fit_pixel:end_fit_pixel]
438
455
  guess_amplitude = np.sqrt(fit_dset.max())
439
456
  guess_params = get_good_guess(zl_func, fit_energy, fit_dset.sum(axis=0)/fit_dset.shape[0])
440
- elif len(dset.shape) == 1:
441
- fit_dset = dset[start_fit_pixel:end_fit_pixel]
457
+ elif len(dataset.shape) == 1:
458
+ fit_dset = dataset[start_fit_pixel:end_fit_pixel]
442
459
  fit_energy = energy[start_fit_pixel:end_fit_pixel]
443
460
  guess_amplitude = np.sqrt(fit_dset.max())
444
461
  guess_params = get_good_guess(zl_func, fit_energy, fit_dset)
445
- z_loss_dset = dset.copy()
462
+ z_loss_dset = dataset.copy()
446
463
  z_loss_dset *= 0.0
447
464
  z_loss_dset += zl_func(energy, *guess_params)
448
465
  if 'zero_loss' not in z_loss_dset.metadata:
@@ -450,11 +467,11 @@ def get_resolution_functions(dset: sidpy.Dataset, startFitEnergy: float=-1, endF
450
467
  z_loss_dset.metadata['zero_loss'].update({'startFitEnergy': startFitEnergy,
451
468
  'endFitEnergy': endFitEnergy,
452
469
  'fit_parameter': guess_params,
453
- 'original_low_loss': dset.title})
470
+ 'original_low_loss': dataset.title})
454
471
  return z_loss_dset
455
472
  else:
456
473
  print('Error: need a spectrum or spectral image sidpy dataset')
457
- print('Not dset.shape = ', dset.shape)
474
+ print('Not dset.shape = ', dataset.shape)
458
475
  return None
459
476
 
460
477
  # define guess function for SidFitter
@@ -466,7 +483,7 @@ def get_resolution_functions(dset: sidpy.Dataset, startFitEnergy: float=-1, endF
466
483
  return_cov=False, return_fit=False, return_std=False, km_guess=False, num_fit_parms=6)
467
484
 
468
485
  [z_loss_params] = zero_loss_fitter.do_fit()
469
- z_loss_dset = dset.copy()
486
+ z_loss_dset = dataset.copy()
470
487
  z_loss_dset *= 0.0
471
488
 
472
489
  energy_grid = np.broadcast_to(energy.reshape((1, 1, -1)), (z_loss_dset.shape[0],
@@ -480,7 +497,7 @@ def get_resolution_functions(dset: sidpy.Dataset, startFitEnergy: float=-1, endF
480
497
  z_loss_dset.metadata['zero_loss'].update({'startFitEnergy': startFitEnergy,
481
498
  'endFitEnergy': endFitEnergy,
482
499
  'fit_parameter': z_loss_params,
483
- 'original_low_loss': dset.title})
500
+ 'original_low_loss': dataset.title})
484
501
 
485
502
 
486
503
  return z_loss_dset
@@ -503,7 +520,7 @@ def drude_lorentz(eps_inf, leng, ep, eb, gamma, e, amplitude):
503
520
  return eps
504
521
 
505
522
 
506
- def fit_plasmon(dataset, startFitEnergy, endFitEnergy, plot_result=False, number_workers=4, number_threads=8):
523
+ def fit_plasmon(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float, endFitEnergy: float, plot_result: bool = False, number_workers: int = 4, number_threads: int = 8) -> Union[sidpy.Dataset, np.ndarray]:
507
524
  """
508
525
  Fit plasmon peak positions and widths in a TEM dataset using a Drude model.
509
526
 
@@ -540,7 +557,7 @@ def fit_plasmon(dataset, startFitEnergy, endFitEnergy, plot_result=False, number
540
557
  - If `plot_result` is True, the function plots Ep, Ew, and A as separate subplots.
541
558
  """
542
559
  # define Drude function for plasmon fitting
543
- def energy_loss_function(E, Ep, Ew, A):
560
+ def energy_loss_function(E: np.ndarray, Ep: float, Ew: float, A: float) -> np.ndarray:
544
561
  E = E/E.max()
545
562
  eps = 1 - Ep**2/(E**2+Ew**2) + 1j * Ew * Ep**2/E/(E**2+Ew**2)
546
563
  elf = (-1/eps).imag
@@ -592,7 +609,6 @@ def drude_simulation(dset, e, ep, ew, tnm, eb):
592
609
  Gives probabilities of dielectric function eps relative to zero-loss integral (i0 = 1) per eV
593
610
  Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
594
611
 
595
- # function drude(ep,ew,eb,epc,e0,beta,nn,tnm)
596
612
  # Given the plasmon energy (ep), plasmon fwhm (ew) and binding energy(eb),
597
613
  # this program generates:
598
614
  # EPS1, EPS2 from modified Eq. (3.40), ELF=Im(-1/EPS) from Eq. (3.42),
@@ -603,41 +619,10 @@ def drude_simulation(dset, e, ep, ew, tnm, eb):
603
619
  # Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
604
620
  # Version 10.11.26
605
621
 
606
-
607
- b.7 drude Simulation of a Low-Loss Spectrum
608
- The program DRUDE calculates a single-scattering plasmon-loss spectrum for
609
- a specimen of a given thickness tnm (in nm), recorded with electrons of a
610
- specified incident energy e0 by a spectrometer that accepts scattering up to a
611
- specified collection semi-angle beta. It is based on the extended drude model
612
- (Section 3.3.2), with a volume energy-loss function elf in accord with Eq. (3.64) and
613
- a surface-scattering energy-loss function srelf as in Eq. (4.31). Retardation effects
614
- and coupling between the two surface modes are not included. The surface term can
615
- be made negligible by entering a large specimen thickness (tnm > 1000).
616
- Surface intensity srfint and volume intensity volint are calculated from
617
- Eqs. (4.31) and (4.26), respectively. The total spectral intensity ssd is written to
618
- the file DRUDE.SSD, which can be used as input for KRAKRO. These intensities are
619
- all divided by i0, to give relative probabilities (per eV). The real and imaginary parts
620
- of the dielectric function are written to DRUDE.EPS and can be used for comparison
621
- with the results of Kramers–Kronig analysis (KRAKRO.DAT).
622
- Written output includes the surface-loss probability Ps, obtained by integrating
623
- srfint (a value that relates to two surfaces but includes the negative begrenzungs
624
- term), for comparison with the analytical integration represented by Eq. (3.77). The
625
- volume-loss probability p_v is obtained by integrating volint and is used to calculate
626
- the volume plasmon mean free path (lam = tnm/p_v). The latter is listed and
627
- compared with the MFP obtained from Eq. (3.44), which represents analytical integration
628
- assuming a zero-width plasmon peak. The total probability (Pt = p_v+Ps) is
629
- calculated and used to evaluate the thickness (lam.Pt) that would be given by the formula
630
- t/λ = ln(It/i0), ignoring the surface-loss probability. Note that p_v will exceed
631
- 1 for thicker specimens (t/λ > 1), since it represents the probability of plasmon
632
- scattering relative to that of no inelastic scattering.
633
- The command-line usage is drude(ep,ew,eb,epc,beta,e0,tnm,nn), where ep is the
634
- plasmon energy, ew the plasmon width, eb the binding energy of the electrons (0 for
635
- a metal), and nn is the number of channels in the output spectrum. An example of
636
- the output is shown in Fig. b.1a,b.
637
-
638
622
  """
639
-
640
- epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
623
+ energy_scale = dset.get_spectral_dims(return_axis=True)[0].values
624
+
625
+ epc = energy_scale[1] - energy_scale[0] # input('ev per channel : ');
641
626
 
642
627
  b = dset.metadata['collection_angle'] / 1000. # rad
643
628
  epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
@@ -1465,31 +1450,31 @@ def fit_edges2(spectrum, energy_scale, edges):
1465
1450
 
1466
1451
 
1467
1452
  def model(xx, pp):
1468
- yy = pp[0] + x**pp[1] + pp[2] + pp[3] * xx + pp[4] * xx * xx
1453
+ yy = pp[0] * xx**pp[1] + pp[2] + pp[3]* xx + pp[4] * xx * xx
1469
1454
  for i in range(number_of_edges):
1470
1455
  pp[i+5] = np.abs(pp[i+5])
1471
1456
  yy = yy + pp[i+5] * xsec[i, :]
1472
1457
  return yy
1473
1458
 
1474
1459
  def residuals(pp, xx, yy):
1475
- err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
1460
+ err = np.abs((yy - model(xx, pp)) * mask) / np.sqrt(np.abs(y))
1476
1461
  return err
1477
1462
 
1478
1463
  scale = y[100]
1479
- pin = np.array([A,r, 10., 1., 0.00] + [scale/5] * number_of_edges)
1464
+ pin = np.array([A,-r, 10., 1., 0.00] + [scale/5] * number_of_edges)
1480
1465
  [p, _] = leastsq(residuals, pin, args=(x, y))
1481
1466
 
1482
1467
  for key in edges:
1483
1468
  if key.isdigit():
1484
1469
  edges[key]['areal_density'] = p[int(key)+5]
1485
-
1470
+ print(p)
1486
1471
  edges['model'] = {}
1487
- edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
1488
- edges['model']['background-poly_0'] = p[6]
1489
- edges['model']['background-poly_1'] = p[7]
1490
- edges['model']['background-poly_2'] = p[8]
1491
- edges['model']['background-A'] = A
1492
- edges['model']['background-r'] = r
1472
+ edges['model']['background'] = ( p[0] * np.power(x, -p[1])+ p[2]+ x**p[3] + p[4] * x * x)
1473
+ edges['model']['background-poly_0'] = p[2]
1474
+ edges['model']['background-poly_1'] = p[3]
1475
+ edges['model']['background-poly_2'] = p[4]
1476
+ edges['model']['background-A'] = p[0]
1477
+ edges['model']['background-r'] = p[1]
1493
1478
  edges['model']['spectrum'] = model(x, p)
1494
1479
  edges['model']['blurred'] = blurred
1495
1480
  edges['model']['mask'] = mask
pyTEMlib/file_tools.py CHANGED
@@ -731,7 +731,13 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
731
731
  read_essential_metadata(dset)
732
732
  dset.metadata['filename'] = filename
733
733
  dataset_dict = {'Channel_000': dset}
734
-
734
+
735
+ # Temporary Fix for dual eels spectra in dm files
736
+ # Todo: Fic in ScifyReaders
737
+ for dset in dataset_dict.values():
738
+ if 'single_exposure_time' in dset.metadata['experiment']:
739
+ dset.metadata['experiment']['exposure_time'] = dset.metadata['experiment']['number_of_frames'] * \
740
+ dset.metadata['experiment']['single_exposure_time']
735
741
  if write_hdf_file:
736
742
  h5_master_group = save_dataset(dataset_dict, filename=filename)
737
743
 
pyTEMlib/image_tools.py CHANGED
@@ -50,6 +50,11 @@ from sklearn.cluster import DBSCAN
50
50
 
51
51
  from collections import Counter
52
52
 
53
+ # center diff function
54
+ from skimage.filters import threshold_otsu, sobel
55
+ from scipy.optimize import leastsq
56
+ from sklearn.cluster import DBSCAN
57
+
53
58
 
54
59
  _SimpleITK_present = True
55
60
  try:
@@ -275,6 +280,67 @@ def diffractogram_spots(dset, spot_threshold, return_center=True, eps=0.1):
275
280
  return spots, center
276
281
 
277
282
 
283
+ def center_diffractogram(dset, return_plot = True, histogram_factor = None, smoothing = 1, min_samples = 100):
284
+ try:
285
+ diff = np.array(dset).T.astype(np.float16)
286
+ diff[diff < 0] = 0
287
+
288
+ if histogram_factor is not None:
289
+ hist, bins = np.histogram(np.ravel(diff), bins=256, range=(0, 1), density=True)
290
+ threshold = threshold_otsu(diff, hist = hist * histogram_factor)
291
+ else:
292
+ threshold = threshold_otsu(diff)
293
+ binary = (diff > threshold).astype(float)
294
+ smoothed_image = ndimage.gaussian_filter(binary, sigma=smoothing) # Smooth before edge detection
295
+ smooth_threshold = threshold_otsu(smoothed_image)
296
+ smooth_binary = (smoothed_image > smooth_threshold).astype(float)
297
+ # Find the edges using the Sobel operator
298
+ edges = sobel(smooth_binary)
299
+ edge_points = np.argwhere(edges)
300
+
301
+ # Use DBSCAN to cluster the edge points
302
+ db = DBSCAN(eps=10, min_samples=min_samples).fit(edge_points)
303
+ labels = db.labels_
304
+ if len(set(labels)) == 1:
305
+ raise ValueError("DBSCAN clustering resulted in only one group, check the parameters.")
306
+
307
+ # Get the largest group of edge points
308
+ unique, counts = np.unique(labels, return_counts=True)
309
+ counts = dict(zip(unique, counts))
310
+ largest_group = max(counts, key=counts.get)
311
+ edge_points = edge_points[labels == largest_group]
312
+
313
+ # Fit a circle to the diffraction ring
314
+ def calc_distance(c, x, y):
315
+ Ri = np.sqrt((x - c[0])**2 + (y - c[1])**2)
316
+ return Ri - Ri.mean()
317
+ x_m = np.mean(edge_points[:, 1])
318
+ y_m = np.mean(edge_points[:, 0])
319
+ center_guess = x_m, y_m
320
+ center, ier = leastsq(calc_distance, center_guess, args=(edge_points[:, 1], edge_points[:, 0]))
321
+ mean_radius = np.mean(calc_distance(center, edge_points[:, 1], edge_points[:, 0])) + np.sqrt((edge_points[:, 1] - center[0])**2 + (edge_points[:, 0] - center[1])**2).mean()
322
+
323
+ finally:
324
+ if return_plot:
325
+ fig, ax = plt.subplots(1, 4, figsize=(10, 4))
326
+ ax[0].set_title('Diffractogram')
327
+ ax[0].imshow(dset.T, cmap='viridis')
328
+ ax[1].set_title('Otsu Binary Image')
329
+ ax[1].imshow(binary, cmap='gray')
330
+ ax[2].set_title('Smoothed Binary Image')
331
+ ax[2].imshow(smooth_binary, cmap='gray')
332
+ ax[3].set_title('Edge Detection and Fitting')
333
+ ax[3].imshow(edges, cmap='gray')
334
+ ax[3].scatter(center[0], center[1], c='r', s=10)
335
+ circle = plt.Circle(center, mean_radius, color='red', fill=False)
336
+ ax[3].add_artist(circle)
337
+ for axis in ax:
338
+ axis.axis('off')
339
+ fig.tight_layout()
340
+
341
+ return center
342
+
343
+
278
344
  def adaptive_fourier_filter(dset, spots, low_pass=3, reflection_radius=0.3):
279
345
  """
280
346
  Use spots in diffractogram for a Fourier Filter
@@ -375,16 +441,10 @@ def complete_registration(main_dataset, storage_channel=None):
375
441
 
376
442
  rigid_registered_dataset = rigid_registration(main_dataset)
377
443
 
378
- if storage_channel is not None:
379
- registration_channel = ft.log_results(storage_channel, rigid_registered_dataset)
380
-
444
+
381
445
  print('Non-Rigid_Registration')
382
446
 
383
447
  non_rigid_registered = demon_registration(rigid_registered_dataset)
384
- if storage_channel is not None:
385
- registration_channel = ft.log_results(storage_channel, non_rigid_registered)
386
-
387
- non_rigid_registered.h5_dataset = registration_channel
388
448
  return non_rigid_registered, rigid_registered_dataset
389
449
 
390
450
 
@@ -473,7 +533,7 @@ def demon_registration(dataset, verbose=False):
473
533
  ###############################
474
534
  # Rigid Registration New 05/09/2020
475
535
 
476
- def rigid_registration(dataset):
536
+ def rigid_registration(dataset, sub_pixel=True):
477
537
  """
478
538
  Rigid registration of image stack with pixel accuracy
479
539
 
@@ -529,9 +589,13 @@ def rigid_registration(dataset):
529
589
  selection[frame_dim[0]] = slice(i, i+1)
530
590
  moving = dataset[tuple(selection)].squeeze().compute()
531
591
  fft_moving = np.fft.fft2(moving)
532
- image_product = fft_fixed * fft_moving.conj()
533
- cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
534
- shift = np.array(ndimage.maximum_position(cc_image.real))-cc_image.shape[0]/2
592
+ if sub_pixel:
593
+ shift = skimage.registration.phase_cross_correlation(fft_fixed, fft_moving, upsample_factor=1000,
594
+ space='fourier')[0]
595
+ else:
596
+ image_product = fft_fixed * fft_moving.conj()
597
+ cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
598
+ shift = np.array(ndimage.maximum_position(cc_image.real))-cc_image.shape[0]/2
535
599
  fft_fixed = fft_moving
536
600
  relative_drift.append(shift)
537
601
  rig_reg, drift = rig_reg_drift(dataset, relative_drift)
@@ -542,9 +606,18 @@ def rigid_registration(dataset):
542
606
  rigid_registered.source = dataset.title
543
607
  rigid_registered.metadata = {'analysis': 'rigid sub-pixel registration', 'drift': drift,
544
608
  'input_crop': input_crop, 'input_shape': dataset.shape[1:]}
545
- rigid_registered.set_dimension(0, dataset._axes[frame_dim[0]])
546
- rigid_registered.set_dimension(1, dataset._axes[spatial_dim[0]][input_crop[0]:input_crop[1]])
547
- rigid_registered.set_dimension(2, dataset._axes[spatial_dim[1]][input_crop[2]:input_crop[3]])
609
+ rigid_registered.set_dimension(0, sidpy.Dimension(np.arange(rigid_registered.shape[0]),
610
+ name='frame', units='frame', quantity='time',
611
+ dimension_type='temporal'))
612
+
613
+ array_x = dataset._axes[spatial_dim[0]][input_crop[0]:input_crop[1]].values
614
+ rigid_registered.set_dimension(1, sidpy.Dimension(array_x,
615
+ 'x', units='nm', quantity='Length',
616
+ dimension_type='spatial'))
617
+ array_y = dataset._axes[spatial_dim[1]][input_crop[2]:input_crop[3]].values
618
+ rigid_registered.set_dimension(2, sidpy.Dimension(array_y,
619
+ 'y', units='nm', quantity='Length',
620
+ dimension_type='spatial'))
548
621
  return rigid_registered.rechunk({0: 'auto', 1: -1, 2: -1})
549
622
 
550
623
 
@@ -589,6 +662,7 @@ def rig_reg_drift(dset, rel_drift):
589
662
  rig_reg = np.zeros([dset.shape[frame_dim[0]], dset.shape[spatial_dim[0]], dset.shape[spatial_dim[1]]])
590
663
 
591
664
  # absolute drift
665
+ print(rel_drift)
592
666
  drift = np.array(rel_drift).copy()
593
667
 
594
668
  drift[0] = [0, 0]
@@ -1070,9 +1144,8 @@ def cartesian2polar(x, y, grid, r, t, order=3):
1070
1144
  return ndimage.map_coordinates(grid, np.array([new_ix, new_iy]), order=order).reshape(new_x.shape)
1071
1145
 
1072
1146
 
1073
- def warp(diff):
1074
- """Takes a centered diffraction pattern (as a sidpy dataset)and warps it to a polar grid"""
1075
- """Centered diff can be produced with it.diffractogram_spots(return_center = True)"""
1147
+ def warp(diff, center):
1148
+ """Takes a diffraction pattern (as a sidpy dataset)and warps it to a polar grid"""
1076
1149
 
1077
1150
  # Define original polar grid
1078
1151
  nx = np.shape(diff)[0]
@@ -1080,20 +1153,19 @@ def warp(diff):
1080
1153
 
1081
1154
  # Define center pixel
1082
1155
  pix2nm = np.gradient(diff.u.values)[0]
1083
- center_pixel = [abs(min(diff.u.values)), abs(min(diff.v.values))]//pix2nm
1084
1156
 
1085
- x = np.linspace(1, nx, nx, endpoint=True)-center_pixel[0]
1086
- y = np.linspace(1, ny, ny, endpoint=True)-center_pixel[1]
1157
+ x = np.linspace(1, nx, nx, endpoint=True)-center[0]
1158
+ y = np.linspace(1, ny, ny, endpoint=True)-center[1]
1087
1159
  z = diff
1088
1160
 
1089
1161
  # Define new polar grid
1090
- nr = int(min([center_pixel[0], center_pixel[1], diff.shape[0]-center_pixel[0], diff.shape[1]-center_pixel[1]])-1)
1091
- nt = 360*3
1162
+ nr = int(min([center[0], center[1], diff.shape[0]-center[0], diff.shape[1]-center[1]])-1)
1163
+ nt = 360 * 3
1092
1164
 
1093
1165
  r = np.linspace(1, nr, nr)
1094
1166
  t = np.linspace(0., np.pi, nt, endpoint=False)
1095
1167
 
1096
- return cartesian2polar(x, y, z, r, t, order=3)
1168
+ return cartesian2polar(x, y, z, r, t, order=3).T
1097
1169
 
1098
1170
 
1099
1171
  def calculate_ctf(wavelength, cs, defocus, k):