waveorder 2.2.0rc0__py3-none-any.whl → 2.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
waveorder/optics.py CHANGED
@@ -1,9 +1,8 @@
1
+ import itertools
2
+
1
3
  import numpy as np
2
4
  import torch
3
- import matplotlib.pyplot as plt
4
- import gc
5
- import itertools
6
- from numpy.fft import fft, fft2, ifft2, fftn, ifftn, fftshift, ifftshift
5
+ from numpy.fft import fft2, fftn, fftshift, ifft2, ifftn, ifftshift
7
6
 
8
7
 
9
8
  def Jones_sample(Ein, t, sa):
@@ -133,7 +132,7 @@ def generate_pupil(frr, NA, lamb_in):
133
132
  numerical aperture of the pupil function (normalized by the refractive index of the immersion media)
134
133
 
135
134
  lamb_in : float
136
- wavelength of the light (inside the immersion media)
135
+ wavelength of the light in free space
137
136
  in units of length (inverse of frr's units)
138
137
 
139
138
  Returns
@@ -225,6 +224,103 @@ def gen_sector_Pupil(fxx, fyy, NA, lamb_in, sector_angle, rotation_angle):
225
224
  return Pupil_sector
226
225
 
227
226
 
227
+ def rotation_matrix(nu_z, nu_y, nu_x, wavelength):
228
+ nu_perp_squared = nu_x**2 + nu_y**2
229
+ nu_zz = wavelength * nu_z - 1
230
+
231
+ R_xx = (wavelength * nu_x**2 * nu_z + nu_y**2) / nu_perp_squared
232
+ R_yy = (wavelength * nu_y**2 * nu_z + nu_x**2) / nu_perp_squared
233
+ R_xy = nu_x * nu_y * nu_zz / nu_perp_squared
234
+
235
+ row0 = torch.stack((-wavelength * nu_y, -wavelength * nu_x), dim=0)
236
+ row1 = torch.stack((R_yy, R_xy), dim=0)
237
+ row2 = torch.stack((R_xy, R_xx), dim=0)
238
+
239
+ out = torch.stack((row0, row1, row2), dim=0)
240
+
241
+ # KLUDGE: fix the DC term manually, avoiding nan
242
+ out[..., 0, 0] = torch.tensor([[0, 0], [1, 0], [0, 1]])[..., None]
243
+
244
+ return torch.nan_to_num(out, nan=0.0)
245
+
246
+
247
+ def generate_vector_source_defocus_pupil(
248
+ x_frequencies,
249
+ y_frequencies,
250
+ z_position_list,
251
+ defocus_pupil,
252
+ input_jones,
253
+ ill_pupil,
254
+ wavelength,
255
+ ):
256
+ ill_pupil_3d = torch.einsum(
257
+ "zyx,yx->zyx", torch.fft.fft(defocus_pupil, dim=0), ill_pupil
258
+ ).abs() # make this real
259
+
260
+ freq_shape = z_position_list.shape + x_frequencies.shape
261
+
262
+ y_broadcast = torch.broadcast_to(y_frequencies[None, :, :], freq_shape)
263
+ x_broadcast = torch.broadcast_to(x_frequencies[None, :, :], freq_shape)
264
+ z_broadcast = np.sqrt(wavelength ** (-2) - x_broadcast**2 - y_broadcast**2)
265
+
266
+ # Calculate rotation matrix
267
+ rotations = rotation_matrix(
268
+ z_broadcast, y_broadcast, x_broadcast, wavelength
269
+ ).type(torch.complex64)
270
+
271
+ # TEMPORARY SIMPLIFY ROTATIONS "TURN OFF ROTATIONS"
272
+ # 3x2 IDENTITY MATRIX
273
+ rotations = torch.zeros_like(rotations)
274
+ rotations[1, 0, ...] = 1
275
+ rotations[2, 1, ...] = 1
276
+
277
+ # Main calculation in the frequency domain
278
+ source_pupil = torch.einsum(
279
+ "ijzyx,j,zyx->izyx", rotations, input_jones, ill_pupil_3d
280
+ )
281
+
282
+ # Convert back to defocus pupil
283
+ source_defocus_pupil = torch.fft.ifft(source_pupil, dim=-3)
284
+
285
+ return source_defocus_pupil
286
+
287
+
288
+ def generate_vector_detection_defocus_pupil(
289
+ x_frequencies,
290
+ y_frequencies,
291
+ z_position_list,
292
+ det_defocus_pupil,
293
+ det_pupil,
294
+ wavelength,
295
+ ):
296
+ # TODO: refactor redundancy with illumination pupil
297
+ det_pupil_3d = torch.einsum(
298
+ "zyx,yx->zyx", torch.fft.ifft(det_defocus_pupil, dim=0), det_pupil
299
+ )
300
+
301
+ # Calculate zyx_frequency grid (inelegant)
302
+ z_frequencies = torch.fft.ifft(z_position_list)
303
+ freq_shape = z_frequencies.shape + x_frequencies.shape
304
+ z_broadcast = torch.broadcast_to(z_frequencies[:, None, None], freq_shape)
305
+ y_broadcast = torch.broadcast_to(y_frequencies[None, :, :], freq_shape)
306
+ x_broadcast = torch.broadcast_to(x_frequencies[None, :, :], freq_shape)
307
+
308
+ # Calculate rotation matrix
309
+ rotations = rotation_matrix(
310
+ z_broadcast, y_broadcast, x_broadcast, wavelength
311
+ ).type(torch.complex64)
312
+
313
+ # Main calculation in the frequency domain
314
+ vector_detection_pupil = torch.einsum(
315
+ "jizyx,zyx->ijzyx", rotations, det_pupil_3d
316
+ )
317
+
318
+ # Convert back to defocus pupil
319
+ detection_defocus_pupil = torch.fft.fft(vector_detection_pupil, dim=-3)
320
+
321
+ return detection_defocus_pupil
322
+
323
+
228
324
  def Source_subsample(Source_cont, NAx_coord, NAy_coord, subsampled_NA=0.1):
229
325
  """
230
326
 
@@ -300,7 +396,7 @@ def generate_propagation_kernel(
300
396
  wavelength : float
301
397
  wavelength of the light in the immersion media
302
398
 
303
- z_position_list : torch.tensor or list
399
+ z_position_list : torch.tensor
304
400
  1D array of defocused z positions with the size of (Z)
305
401
 
306
402
  Returns
@@ -310,15 +406,16 @@ def generate_propagation_kernel(
310
406
 
311
407
  """
312
408
 
313
- oblique_factor = (
314
- (1 - wavelength**2 * radial_frequencies**2) * pupil_support
315
- ) ** (1 / 2) / wavelength
409
+ oblique_factor = ((1 - wavelength**2 * radial_frequencies**2)) ** (
410
+ 1 / 2
411
+ ) / wavelength
412
+ oblique_factor = torch.nan_to_num(oblique_factor, nan=0.0)
316
413
 
317
414
  propagation_kernel = pupil_support[None, :, :] * torch.exp(
318
415
  1j
319
416
  * 2
320
417
  * np.pi
321
- * torch.tensor(z_position_list)[:, None, None]
418
+ * z_position_list[:, None, None]
322
419
  * oblique_factor[None, :, :]
323
420
  )
324
421
 
@@ -326,7 +423,11 @@ def generate_propagation_kernel(
326
423
 
327
424
 
328
425
  def generate_greens_function_z(
329
- radial_frequencies, pupil_support, wavelength_illumination, z_position_list
426
+ radial_frequencies,
427
+ pupil_support,
428
+ wavelength_illumination,
429
+ z_position_list,
430
+ axially_even=True,
330
431
  ):
331
432
  """
332
433
 
@@ -343,9 +444,14 @@ def generate_greens_function_z(
343
444
  wavelength_illumination : float
344
445
  wavelength of the light in the immersion media
345
446
 
346
- z_position_list : torch.tensor or list
447
+ z_position_list : torch.tensor
347
448
  1D array of defocused z position with the size of (Z,)
348
449
 
450
+ axially_even : bool
451
+ For backwards compatibility with legacy phase reconstruction.
452
+ Ideally the legacy phase reconstruction should be unified with
453
+ the new reconstructions, and this parameter should be removed.
454
+
349
455
  Returns
350
456
  -------
351
457
  greens_function_z : torch.tensor
@@ -358,47 +464,97 @@ def generate_greens_function_z(
358
464
  * pupil_support
359
465
  ) ** (1 / 2) / wavelength_illumination
360
466
 
467
+ if axially_even:
468
+ z_positions = torch.abs(z_position_list[:, None, None])
469
+ else:
470
+ z_positions = z_position_list[:, None, None]
471
+
361
472
  greens_function_z = (
362
473
  -1j
363
474
  / 4
364
475
  / np.pi
365
476
  * pupil_support[None, :, :]
366
- * torch.exp(
367
- 1j
368
- * 2
369
- * np.pi
370
- * torch.tensor(z_position_list)[:, None, None]
371
- * oblique_factor[None, :, :]
372
- )
477
+ * torch.exp(1j * 2 * np.pi * z_positions * oblique_factor[None, :, :])
373
478
  / (oblique_factor[None, :, :] + 1e-15)
374
479
  )
375
480
 
376
481
  return greens_function_z
377
482
 
378
483
 
379
- def gen_dyadic_Greens_tensor_z(fxx, fyy, G_fun_z, Pupil_support, lambda_in):
484
+ def generate_defocus_greens_tensor(
485
+ fxx, fyy, G_fun_z, Pupil_support, lambda_in
486
+ ):
380
487
  """
381
488
 
382
489
  generate forward dyadic Green's function in u_x, u_y, z space
383
490
 
384
491
  Parameters
385
492
  ----------
386
- fxx : numpy.ndarray
493
+ fxx : tensor.Tensor
387
494
  x component of 2D spatial frequency array with the size of (Ny, Nx)
388
495
 
389
- fyy : numpy.ndarray
496
+ fyy : tensor.Tensor
390
497
  y component of 2D spatial frequency array with the size of (Ny, Nx)
391
498
 
392
- G_fun_z : numpy.ndarray
393
- forward Green's function in u_x, u_y, z space with size of (Ny, Nx, Nz)
499
+ G_fun_z : tensor.Tensor
500
+ forward Green's function in u_x, u_y, z space with size of (Nz, Ny, Nx)
394
501
 
395
- Pupil_support : numpy.ndarray
502
+ Pupil_support : tensor.Tensor
396
503
  the array that defines the support of the pupil function with the size of (Ny, Nx)
397
504
 
398
505
  lambda_in : float
399
506
  wavelength of the light in the immersion media
400
507
 
401
508
  Returns
509
+ -------
510
+ G_tensor_z : tensor.Tensor
511
+ forward dyadic Green's function in u_x, u_y, z space with the size of (3, 3, Nz, Ny, Nx)
512
+ """
513
+
514
+ fr = (fxx**2 + fyy**2) ** (1 / 2)
515
+ oblique_factor = ((1 - lambda_in**2 * fr**2) * Pupil_support) ** (
516
+ 1 / 2
517
+ ) / lambda_in
518
+
519
+ diff_filter = torch.zeros((3,) + G_fun_z.shape, dtype=torch.complex64)
520
+ diff_filter[0] = (1j * 2 * np.pi * oblique_factor)[None, ...]
521
+ diff_filter[1] = (1j * 2 * np.pi * fyy * Pupil_support)[None, ...]
522
+ diff_filter[2] = (1j * 2 * np.pi * fxx * Pupil_support)[None, ...]
523
+
524
+ G_tensor_z = torch.zeros((3, 3) + G_fun_z.shape, dtype=torch.complex64)
525
+
526
+ for i in range(3):
527
+ for j in range(3):
528
+ G_tensor_z[i, j] = (
529
+ G_fun_z
530
+ * diff_filter[i]
531
+ * diff_filter[j]
532
+ / (2 * np.pi / lambda_in) ** 2
533
+ )
534
+ if i == j:
535
+ G_tensor_z[i, i] += G_fun_z
536
+
537
+ return G_tensor_z
538
+
539
+
540
+ def gen_dyadic_Greens_tensor_z(fxx, fyy, G_fun_z, Pupil_support, lambda_in):
541
+ """
542
+ keeping for backwards compatibility
543
+
544
+ generate forward dyadic Green's function in u_x, u_y, z space
545
+ Parameters
546
+ ----------
547
+ fxx : numpy.ndarray
548
+ x component of 2D spatial frequency array with the size of (Ny, Nx)
549
+ fyy : numpy.ndarray
550
+ y component of 2D spatial frequency array with the size of (Ny, Nx)
551
+ G_fun_z : numpy.ndarray
552
+ forward Green's function in u_x, u_y, z space with size of (Ny, Nx, Nz)
553
+ Pupil_support : numpy.ndarray
554
+ the array that defines the support of the pupil function with the size of (Ny, Nx)
555
+ lambda_in : float
556
+ wavelength of the light in the immersion media
557
+ Returns
402
558
  -------
403
559
  G_tensor_z : numpy.ndarray
404
560
  forward dyadic Green's function in u_x, u_y, z space with the size of (3, 3, Ny, Nx, Nz)
@@ -427,7 +583,6 @@ def gen_dyadic_Greens_tensor_z(fxx, fyy, G_fun_z, Pupil_support, lambda_in):
427
583
  )
428
584
  if i == j:
429
585
  G_tensor_z[i, i] += G_fun_z
430
-
431
586
  return G_tensor_z
432
587
 
433
588
 
@@ -561,6 +716,58 @@ def gen_dyadic_Greens_tensor(G_real, ps, psz, lambda_in, space="real"):
561
716
  )
562
717
 
563
718
 
719
+ def generate_greens_tensor_spectrum(
720
+ zyx_shape,
721
+ zyx_pixel_size,
722
+ wavelength,
723
+ ):
724
+ """
725
+ Parameters
726
+ ----------
727
+ zyx_shape : tuple
728
+ zyx_pixel_size : tuple
729
+ wavelength : float
730
+ wavelength in medium
731
+
732
+ Returns
733
+ -------
734
+ torch.tensor
735
+ Green's tensor spectrum
736
+ """
737
+ Z, Y, X = zyx_shape
738
+ dZ, dY, dX = zyx_pixel_size
739
+
740
+ z_step = torch.fft.ifftshift((torch.arange(Z) - Z // 2) * dZ)
741
+ y_step = torch.fft.ifftshift((torch.arange(Y) - Y // 2) * dY)
742
+ x_step = torch.fft.ifftshift((torch.arange(X) - X // 2) * dX)
743
+
744
+ zz = torch.broadcast_to(z_step[:, None, None], (Z, Y, X))
745
+ yy = torch.broadcast_to(y_step[None, :, None], (Z, Y, X))
746
+ xx = torch.broadcast_to(x_step[None, None, :], (Z, Y, X))
747
+
748
+ rr = torch.sqrt(xx**2 + yy**2 + zz**2)
749
+ rhat = torch.stack([zz, yy, xx], dim=0) / rr
750
+
751
+ scalar_g = torch.exp(1j * 2 * torch.pi * rr / wavelength) / (
752
+ 4 * torch.pi * rr
753
+ )
754
+
755
+ eye = torch.zeros((3, 3, Z, Y, X))
756
+ eye[0, 0] = 1
757
+ eye[1, 1] = 1
758
+ eye[2, 2] = 1
759
+
760
+ Q = eye - torch.einsum("izyx,jzyx->ijzyx", rhat, rhat)
761
+ g_3d = Q * scalar_g
762
+ g_3d = torch.nan_to_num(g_3d)
763
+
764
+ G_3D = torch.fft.fftn(g_3d, dim=(-3, -2, -1))
765
+ G_3D = torch.imag(G_3D) * 1j
766
+ G_3D /= torch.amax(torch.abs(G_3D))
767
+
768
+ return G_3D
769
+
770
+
564
771
  def compute_weak_object_transfer_function_2d(
565
772
  illumination_pupil, detection_pupil
566
773
  ):
@@ -0,0 +1,28 @@
1
+ import torch
2
+
3
+
4
+ def tikhonov_regularized_inverse_filter(
5
+ forward_filter: torch.Tensor, regularization_strength: float
6
+ ):
7
+ """Compute the Tikhonov regularized inverse filter from a forward filter.
8
+
9
+ Parameters
10
+ ----------
11
+ forward_filter : torch.Tensor
12
+ The forward filter tensor.
13
+ regularization_strength : float
14
+ The strength of the regularization term.
15
+ Returns
16
+ -------
17
+ torch.Tensor
18
+ The Tikhonov regularized inverse filter.
19
+ """
20
+
21
+ if forward_filter.ndim == 3:
22
+ forward_filter_conj = torch.conj(forward_filter)
23
+ return forward_filter_conj / (
24
+ (forward_filter_conj * forward_filter) + regularization_strength
25
+ )
26
+ else:
27
+ # TC TODO INTEGRATE THE 5D FILTER BANK CASE
28
+ raise NotImplementedError("Only 3D tensors are supported.")
waveorder/sampling.py ADDED
@@ -0,0 +1,94 @@
1
+ import numpy as np
2
+ import torch
3
+
4
+
5
+ def transverse_nyquist(
6
+ wavelength_emission,
7
+ numerical_aperture_illumination,
8
+ numerical_aperture_detection,
9
+ ):
10
+ """Transverse Nyquist sample spacing in `wavelength_emission` units.
11
+
12
+ For widefield label-free imaging, the transverse Nyquist sample spacing is
13
+ lambda / (2 * (NA_ill + NA_det)).
14
+
15
+ Perhaps surprisingly, the transverse Nyquist sample spacing for widefield
16
+ fluorescence is lambda / (4 * NA), which is equivalent to the above formula
17
+ when NA_ill = NA_det.
18
+
19
+ Parameters
20
+ ----------
21
+ wavelength_emission : float
22
+ Output units match these units
23
+ numerical_aperture_illumination : float
24
+ For widefield fluorescence, set to numerical_aperture_detection
25
+ numerical_aperture_detection : float
26
+
27
+ Returns
28
+ -------
29
+ float
30
+ Transverse Nyquist sample spacing
31
+
32
+ """
33
+ return wavelength_emission / (
34
+ 2 * (numerical_aperture_detection + numerical_aperture_illumination)
35
+ )
36
+
37
+
38
+ def axial_nyquist(
39
+ wavelength_emission,
40
+ numerical_aperture_detection,
41
+ index_of_refraction_media,
42
+ ):
43
+ """Axial Nyquist sample spacing in `wavelength_emission` units.
44
+
45
+ For widefield microscopes, the axial Nyquist cutoff frequency is:
46
+
47
+ (n/lambda) - sqrt( (n/lambda)^2 - (NA_det/lambda)^2 ),
48
+
49
+ and the axial Nyquist sample spacing is 1 / (2 * cutoff_frequency).
50
+
51
+ Perhaps surprisingly, the axial Nyquist sample spacing is independent of
52
+ the illumination numerical aperture.
53
+
54
+ Parameters
55
+ ----------
56
+ wavelength_emission : float
57
+ Output units match these units
58
+ numerical_aperture_detection : float
59
+ index_of_refraction_media: float
60
+
61
+ Returns
62
+ -------
63
+ float
64
+ Axial Nyquist sample spacing
65
+
66
+ """
67
+ n_on_lambda = index_of_refraction_media / wavelength_emission
68
+ cutoff_frequency = n_on_lambda - np.sqrt(
69
+ n_on_lambda**2
70
+ - (numerical_aperture_detection / wavelength_emission) ** 2
71
+ )
72
+ return 1 / (2 * cutoff_frequency)
73
+
74
+
75
+ def nd_fourier_central_cuboid(source, target_shape):
76
+ """Central cuboid of an N-D Fourier transform.
77
+
78
+ Parameters
79
+ ----------
80
+ source : torch.Tensor
81
+ Source tensor
82
+ target_shape : tuple of int
83
+
84
+ Returns
85
+ -------
86
+ torch.Tensor
87
+ Center cuboid in Fourier space
88
+
89
+ """
90
+ center_slices = tuple(
91
+ slice((s - o) // 2, (s - o) // 2 + o)
92
+ for s, o in zip(source.shape, target_shape)
93
+ )
94
+ return torch.fft.ifftshift(torch.fft.fftshift(source)[center_slices])
waveorder/stokes.py CHANGED
@@ -2,7 +2,7 @@
2
2
  Overview
3
3
  --------
4
4
 
5
- This module collects Stokes- and Mueller-related calculations.
5
+ This module collects Stokes- and Mueller-related calculations.
6
6
 
7
7
  The functions are roughly organized into groups:
8
8
 
@@ -29,8 +29,8 @@ y = mmul(A, x)
29
29
  Usage
30
30
  -----
31
31
 
32
- All functions are intended to be used with torch.Tensors with Stokes- or
33
- Mueller-indices as the first axes.
32
+ All functions are intended to be used with torch.Tensors with Stokes- or
33
+ Mueller-indices as the first axes.
34
34
 
35
35
  For example, the following usage modes of stokes_after_adr are valid:
36
36
 
@@ -46,6 +46,7 @@ For example, the following usage modes of stokes_after_adr are valid:
46
46
  >>> stokes_after_adr(*adr_params) # * expands along the first axis
47
47
 
48
48
  """
49
+
49
50
  import numpy as np
50
51
  import torch
51
52
 
waveorder/util.py CHANGED
@@ -1,15 +1,14 @@
1
+ import re
2
+ import time
3
+ from collections import namedtuple
4
+
1
5
  import numpy as np
2
- import matplotlib.pyplot as plt
3
6
  import pywt
4
- import time
5
7
  import torch
6
-
7
- from numpy.fft import fft, ifft, fft2, ifft2, fftn, ifftn, fftshift, ifftshift
8
+ from numpy.fft import fft, fft2, fftn, fftshift, ifft, ifftn, ifftshift
8
9
  from scipy.ndimage import uniform_filter
9
- from collections import namedtuple
10
- from .optics import scattering_potential_tensor_to_3D_orientation_PN
11
10
 
12
- import re
11
+ from .optics import scattering_potential_tensor_to_3D_orientation_PN
13
12
 
14
13
  numbers = re.compile(r"(\d+)")
15
14
 
@@ -331,12 +330,15 @@ def gen_coordinate(img_dim, ps):
331
330
  return (xx, yy, fxx, fyy)
332
331
 
333
332
 
334
- def generate_radial_frequencies(img_dim, ps):
333
+ def generate_frequencies(img_dim, ps):
335
334
  fy = torch.fft.fftfreq(img_dim[0], ps)
336
335
  fx = torch.fft.fftfreq(img_dim[1], ps)
337
-
338
336
  fyy, fxx = torch.meshgrid(fy, fx, indexing="ij")
337
+ return fyy, fxx
338
+
339
339
 
340
+ def generate_radial_frequencies(img_dim, ps):
341
+ fyy, fxx = generate_frequencies(img_dim, ps)
340
342
  return torch.sqrt(fyy**2 + fxx**2)
341
343
 
342
344
 
@@ -2239,3 +2241,53 @@ def orientation_3D_continuity_map(
2239
2241
  retardance_pr_avg /= np.max(retardance_pr_avg)
2240
2242
 
2241
2243
  return retardance_pr_avg
2244
+
2245
+
2246
+ def pauli():
2247
+ # yx order
2248
+ # trace-orthogonal normalization
2249
+ # torch.einsum("kij,lji->kl", pauli(), pauli()) == torch.eye(4)
2250
+
2251
+ # intensity, x-y, +45-(-45), LCP-RCP
2252
+ # yx
2253
+ # yx
2254
+ a = 2**-0.5
2255
+ sigma = torch.tensor(
2256
+ [
2257
+ [[a, 0], [0, a]],
2258
+ [[-a, 0], [0, a]],
2259
+ [[0, a], [a, 0]],
2260
+ [[0, 1j * a], [-1j * a, 0]],
2261
+ ]
2262
+ )
2263
+ return sigma
2264
+
2265
+
2266
+ def gellmann():
2267
+ # zyx order
2268
+ # trace-orthogonal normalization
2269
+ # torch.einsum("kij,lji->kl", gellmann(), gellmann()) == torch.eye(9)
2270
+ #
2271
+ # lexicographical order of the Gell-Mann matrices
2272
+ # 00, 1-1, 10, 11, 2-2, 2-1, 20, 21, 22
2273
+ #
2274
+ # zyx
2275
+ # zyx
2276
+ a = 3**-0.5
2277
+ c = 2**-0.5
2278
+ d = -(6**-0.5)
2279
+ e = 2 * (6**-0.5)
2280
+ return torch.tensor(
2281
+ [
2282
+ [[a, 0, 0], [0, a, 0], [0, 0, a]],
2283
+ [[0, 0, -c], [0, 0, 0], [c, 0, 0]],
2284
+ [[0, 0, 0], [0, 0, -c], [0, c, 0]],
2285
+ [[0, -c, 0], [c, 0, 0], [0, 0, 0]],
2286
+ [[0, 0, 0], [0, 0, c], [0, c, 0]], #
2287
+ [[0, c, 0], [c, 0, 0], [0, 0, 0]],
2288
+ [[e, 0, 0], [0, d, 0], [0, 0, d]],
2289
+ [[0, 0, c], [0, 0, 0], [c, 0, 0]],
2290
+ [[0, 0, 0], [0, -c, 0], [0, 0, c]], #
2291
+ ],
2292
+ dtype=torch.complex64,
2293
+ )
@@ -1,26 +1,16 @@
1
- import numpy as np
2
- import matplotlib.pyplot as plt
3
- import ipywidgets as widgets
4
- import os
5
1
  import io
2
+ import os
3
+
4
+ import ipywidgets as widgets
5
+ import matplotlib.pyplot as plt
6
+ import numpy as np
7
+ from ipywidgets import HBox, Image, Layout, interact
8
+ from matplotlib.colors import Normalize, hsv_to_rgb
9
+ from numpy.typing import NDArray
6
10
  from PIL import Image as PImage
7
- from ipywidgets import (
8
- Image,
9
- Layout,
10
- interact,
11
- interactive,
12
- fixed,
13
- interact_manual,
14
- HBox,
15
- VBox,
16
- )
17
- from matplotlib.colors import hsv_to_rgb
18
- from matplotlib.colors import Normalize
19
11
  from scipy.ndimage import uniform_filter
20
12
  from scipy.stats import binned_statistic_2d
21
13
 
22
- from numpy.typing import NDArray
23
-
24
14
 
25
15
  def im_bit_convert(im, bit=16, norm=False, limit=[]):
26
16
  im = im.astype(
@@ -1050,15 +1040,17 @@ def plotVectorField(
1050
1040
  # plot vector field representaiton of the orientation map
1051
1041
 
1052
1042
  # Compute U, V such that they are as long as line-length when anisotropy = 1.
1053
- U, V = anisotropy * linelength * np.cos(
1054
- 2 * orientation
1055
- ), anisotropy * linelength * np.sin(2 * orientation)
1043
+ U, V = (
1044
+ anisotropy * linelength * np.cos(2 * orientation),
1045
+ anisotropy * linelength * np.sin(2 * orientation),
1046
+ )
1056
1047
  USmooth = uniform_filter(U, (window, window)) # plot smoothed vector field
1057
1048
  VSmooth = uniform_filter(V, (window, window)) # plot smoothed vector field
1058
1049
  azimuthSmooth = 0.5 * np.arctan2(VSmooth, USmooth)
1059
1050
  RSmooth = np.sqrt(USmooth**2 + VSmooth**2)
1060
- USmooth, VSmooth = RSmooth * np.cos(azimuthSmooth), RSmooth * np.sin(
1061
- azimuthSmooth
1051
+ USmooth, VSmooth = (
1052
+ RSmooth * np.cos(azimuthSmooth),
1053
+ RSmooth * np.sin(azimuthSmooth),
1062
1054
  )
1063
1055
 
1064
1056
  nY, nX = img.shape
@@ -1643,8 +1635,9 @@ def plot3DVectorField(
1643
1635
  VSmooth = uniform_filter(V, (window, window)) # plot smoothed vector field
1644
1636
  azimuthSmooth = 0.5 * np.arctan2(VSmooth, USmooth)
1645
1637
  RSmooth = np.sqrt(USmooth**2 + VSmooth**2)
1646
- USmooth, VSmooth = RSmooth * np.cos(azimuthSmooth), RSmooth * np.sin(
1647
- azimuthSmooth
1638
+ USmooth, VSmooth = (
1639
+ RSmooth * np.cos(azimuthSmooth),
1640
+ RSmooth * np.sin(azimuthSmooth),
1648
1641
  )
1649
1642
 
1650
1643
  nY, nX = img.shape
@@ -1928,4 +1921,4 @@ def orientation_3D_hist(
1928
1921
  if colorbar:
1929
1922
  fig.colorbar(img, ax=ax[row_idx, col_idx])
1930
1923
 
1931
- return fig, ax
1924
+ return fig, ax