Trajectree 0.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- trajectree/__init__.py +0 -0
- trajectree/experimental/sparse.py +115 -0
- trajectree/fock_optics/devices.py +58 -0
- trajectree/fock_optics/light_sources.py +123 -0
- trajectree/fock_optics/measurement.py +236 -0
- trajectree/fock_optics/noise_models.py +41 -0
- trajectree/fock_optics/outputs.py +65 -0
- trajectree/fock_optics/utils.py +180 -0
- trajectree/optical_quant_info.py +137 -0
- trajectree/sequence/swap.py +76 -0
- trajectree/trajectory.py +210 -0
- trajectree-0.0.0.dist-info/METADATA +18 -0
- trajectree-0.0.0.dist-info/RECORD +16 -0
- trajectree-0.0.0.dist-info/WHEEL +5 -0
- trajectree-0.0.0.dist-info/licenses/LICENSE +19 -0
- trajectree-0.0.0.dist-info/top_level.txt +1 -0
trajectree/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
from ..fock_optics.measurement import *
|
|
2
|
+
from ..fock_optics.outputs import *
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
import scipy.sparse as sp
|
|
6
|
+
|
|
7
|
+
# Support functions:
|
|
8
|
+
def create_op(left_indices, op, right_indices, N):
|
|
9
|
+
if left_indices == 0:
|
|
10
|
+
return sp.kron(op, sp.eye(N**right_indices))
|
|
11
|
+
elif right_indices == 0:
|
|
12
|
+
return sp.kron(sp.eye(N**left_indices), op)
|
|
13
|
+
else:
|
|
14
|
+
out_op = sp.kron(sp.eye(N**left_indices), op)
|
|
15
|
+
return sp.kron(out_op, sp.eye(N**right_indices))
|
|
16
|
+
def _find_mat_exp(mat):
|
|
17
|
+
ans = sp.eye(mat.shape[0])
|
|
18
|
+
intermediate = 1
|
|
19
|
+
for i in range(1, 50+1):
|
|
20
|
+
intermediate *= mat/i
|
|
21
|
+
intermediate.eliminate_zeros()
|
|
22
|
+
ans += intermediate
|
|
23
|
+
return ans
|
|
24
|
+
def read_quantum_state_sparse(sparse_state, N):
|
|
25
|
+
temp_sparse_state = sp.csr_matrix(sparse_state)
|
|
26
|
+
temp_sparse_state.data = np.round(temp_sparse_state.data, 10)
|
|
27
|
+
temp_sparse_state.eliminate_zeros()
|
|
28
|
+
labels = generate_labels(4,N)
|
|
29
|
+
state = temp_sparse_state.nonzero()[0]
|
|
30
|
+
print(f"{len(state)} non-zero elements Corresponding Basis terms:")
|
|
31
|
+
for k in state: print(labels[k],"-",k,"-",temp_sparse_state[k].data)
|
|
32
|
+
|
|
33
|
+
def extend_state_sparse(state):
|
|
34
|
+
return sp.kron(state, state)
|
|
35
|
+
# TMSV_state_dense = extend_state_sparse(TMSV_state)
|
|
36
|
+
|
|
37
|
+
def bell_state_measurement_sparse(TMSV_state_dense, N, efficiency, a_dag, is_dm = False):
|
|
38
|
+
# BSM BS implementation
|
|
39
|
+
BSM_H_0_Mode_op = create_op(2, a_dag, 5, N)
|
|
40
|
+
print(BSM_H_0_Mode_op.shape, len(BSM_H_0_Mode_op.nonzero()[0]), len(BSM_H_0_Mode_op.nonzero()[1]))
|
|
41
|
+
BSM_V_0_Mode_op = create_op(3, a_dag, 4, N)
|
|
42
|
+
BSM_H_1_Mode_op = create_op(6, a_dag, 1, N)
|
|
43
|
+
BSM_V_1_Mode_op = create_op(7, a_dag, 0, N)
|
|
44
|
+
|
|
45
|
+
hamiltonian_BS_H = -np.pi/4 * ( BSM_H_0_Mode_op.T@BSM_H_1_Mode_op - BSM_H_0_Mode_op@BSM_H_1_Mode_op.T )
|
|
46
|
+
unitary_BS_H = _find_mat_exp(hamiltonian_BS_H)
|
|
47
|
+
|
|
48
|
+
hamiltonian_BS_V = -np.pi/4 * ( BSM_V_0_Mode_op.T@BSM_V_1_Mode_op - BSM_V_0_Mode_op@BSM_V_1_Mode_op.T )
|
|
49
|
+
unitary_BS_V = _find_mat_exp(hamiltonian_BS_V)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
# BSM povm implementation
|
|
53
|
+
povm_op_1 = sp.csr_matrix(create_threshold_POVM_OP_Dense(efficiency, 1, N))
|
|
54
|
+
povm_op_0 = sp.csr_matrix(create_threshold_POVM_OP_Dense(efficiency, 0, N))
|
|
55
|
+
|
|
56
|
+
BSM_povm = create_op(2, povm_op_1, 0, N)
|
|
57
|
+
BSM_povm = create_op(0, sp.kron(BSM_povm, povm_op_0), 2, N)
|
|
58
|
+
BSM_povm = sp.kron(BSM_povm, sp.kron(povm_op_0, povm_op_1))
|
|
59
|
+
|
|
60
|
+
# print(unitary_BS_V.shape, unitary_BS_H.shape, TMSV_state_dense.shape)
|
|
61
|
+
|
|
62
|
+
if is_dm:
|
|
63
|
+
post_BS_State = unitary_BS_V @ unitary_BS_H @ TMSV_state_dense @ (unitary_BS_V @ unitary_BS_H).conj().T
|
|
64
|
+
post_BSM_State = BSM_povm @ post_BS_State @ BSM_povm.conj().T
|
|
65
|
+
else:
|
|
66
|
+
post_BS_State = unitary_BS_V @ unitary_BS_H @ TMSV_state_dense
|
|
67
|
+
post_BSM_State = BSM_povm @ post_BS_State
|
|
68
|
+
|
|
69
|
+
# post_BSM_State.data = np.round(post_BSM_State.data, 10)
|
|
70
|
+
# post_BSM_State.eliminate_zeros()
|
|
71
|
+
|
|
72
|
+
return post_BSM_State
|
|
73
|
+
# post_BSM_State = bell_state_measurement_sparse(TMSV_state_dense, N, efficiency)
|
|
74
|
+
|
|
75
|
+
def rotate_and_measure_sparse(post_BSM_State, N, efficiency, a_dag):
|
|
76
|
+
# Polarization rotators mode operators
|
|
77
|
+
rotator_H_0_Mode_op = create_op(0, a_dag, 7, N)
|
|
78
|
+
rotator_V_0_Mode_op = create_op(1, a_dag, 6, N)
|
|
79
|
+
rotator_H_1_Mode_op = create_op(4, a_dag, 3, N)
|
|
80
|
+
rotator_V_1_Mode_op = create_op(5, a_dag, 2, N)
|
|
81
|
+
|
|
82
|
+
povm_op_1 = sp.csr_matrix(create_threshold_POVM_OP_Dense(efficiency, 1, N))
|
|
83
|
+
|
|
84
|
+
# polarization analysis detector POVMs
|
|
85
|
+
pol_analyzer_povm = create_op(0, povm_op_1, 3, N)
|
|
86
|
+
pol_analyzer_povm = create_op(0, sp.kron(pol_analyzer_povm, povm_op_1), 3, N)
|
|
87
|
+
|
|
88
|
+
# Applying rotations and measuring
|
|
89
|
+
|
|
90
|
+
signal_angles = np.linspace(0, np.pi, 10)
|
|
91
|
+
# idler_angles = np.linspace(0, np.pi, 20)
|
|
92
|
+
idler_angles = [0]
|
|
93
|
+
coincidence = []
|
|
94
|
+
|
|
95
|
+
for i, idler_angle in enumerate(idler_angles):
|
|
96
|
+
coincidence_probs = []
|
|
97
|
+
|
|
98
|
+
hamiltonian_rotator_1 = -idler_angle * ( rotator_H_1_Mode_op.T@rotator_V_1_Mode_op - rotator_H_1_Mode_op@rotator_V_1_Mode_op.T )
|
|
99
|
+
unitary_rotator_1 = _find_mat_exp(hamiltonian_rotator_1)
|
|
100
|
+
post_idler_detection_state = unitary_rotator_1 @ post_BSM_State
|
|
101
|
+
# post_idler_detection_state = post_BSM_State
|
|
102
|
+
|
|
103
|
+
for j, angle in enumerate(signal_angles):
|
|
104
|
+
# print("idler:", i, "signal:", j)
|
|
105
|
+
|
|
106
|
+
hamiltonian_rotator_0 = -angle * ( rotator_H_0_Mode_op.T@rotator_V_0_Mode_op - rotator_H_0_Mode_op@rotator_V_0_Mode_op.T )
|
|
107
|
+
unitary_rotator_0 = _find_mat_exp(hamiltonian_rotator_0)
|
|
108
|
+
post_rotations_state = unitary_rotator_0 @ post_idler_detection_state
|
|
109
|
+
|
|
110
|
+
measured_state = pol_analyzer_povm @ post_rotations_state
|
|
111
|
+
|
|
112
|
+
coincidence_probs.append(sp.linalg.norm(measured_state)**2)
|
|
113
|
+
coincidence.append(coincidence_probs)
|
|
114
|
+
return coincidence, idler_angles
|
|
115
|
+
# coincidence, idler_angles = rotate_and_measure_sparse(post_BSM_State, N, efficiency)
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from scipy.linalg import expm
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from numpy import kron
|
|
5
|
+
|
|
6
|
+
from quimb.tensor import MatrixProductOperator as mpo #type: ignore
|
|
7
|
+
|
|
8
|
+
import qutip as qt
|
|
9
|
+
|
|
10
|
+
# Beamsplitter transformation
|
|
11
|
+
def create_BS_MPO(site1, site2, theta, total_sites, N, tag = 'BS'):
|
|
12
|
+
|
|
13
|
+
a = qt.destroy(N).full()
|
|
14
|
+
a_dag = a.T
|
|
15
|
+
I = np.eye(N)
|
|
16
|
+
|
|
17
|
+
# This corresponds to the BS hamiltonian:
|
|
18
|
+
|
|
19
|
+
hamiltonian_BS = -theta * ( kron(I, a_dag)@kron(a, I) - kron(I, a)@kron(a_dag, I) )
|
|
20
|
+
unitary_BS = expm(hamiltonian_BS)
|
|
21
|
+
|
|
22
|
+
# print("unitary_BS", unitary_BS)
|
|
23
|
+
|
|
24
|
+
BS_MPO = mpo.from_dense(unitary_BS, dims = N, sites = (site1,site2), L=total_sites, tags=tag)
|
|
25
|
+
# BS_MPO = BS_MPO.fill_empty_sites(mode = "full")
|
|
26
|
+
return BS_MPO
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def generalized_mode_mixer(site1, site2, theta, phi, psi, lamda, total_sites, N, tag = 'MM'):
|
|
30
|
+
|
|
31
|
+
a = qt.destroy(N).full()
|
|
32
|
+
a_dag = a.T
|
|
33
|
+
I = np.eye(N)
|
|
34
|
+
|
|
35
|
+
# This corresponds to the BS hamiltonian: This is a different difinition from the one in
|
|
36
|
+
# create_BS_MPO. This is because of how the generalized beamsplitter is defined in DOI: 10.1088/0034-4885/66/7/203 .
|
|
37
|
+
hamiltonian_BS = theta * (kron(a_dag, I)@kron(I, a) + kron(a, I)@kron(I, a_dag))
|
|
38
|
+
unitary_BS = expm(-1j * hamiltonian_BS)
|
|
39
|
+
|
|
40
|
+
# print("unitary_BS\n", np.round(unitary_BS, 4))
|
|
41
|
+
|
|
42
|
+
pre_phase_shifter = np.kron(phase_shifter(N, phi[0]/2), phase_shifter(N, phi[1]/2))
|
|
43
|
+
post_phase_shifter = np.kron(phase_shifter(N, psi[0]/2), phase_shifter(N, psi[1]/2))
|
|
44
|
+
global_phase_shifter = np.kron(phase_shifter(N, lamda[0]/2), phase_shifter(N, lamda[1]/2))
|
|
45
|
+
|
|
46
|
+
# This construction for the generalized beamsplitter is based on the description in paper DOI: 10.1088/0034-4885/66/7/203
|
|
47
|
+
generalized_BS = global_phase_shifter @ (pre_phase_shifter @ unitary_BS @ post_phase_shifter)
|
|
48
|
+
|
|
49
|
+
# print("generalized_BS\n", np.round(generalized_BS, 4))
|
|
50
|
+
|
|
51
|
+
BS_MPO = mpo.from_dense(generalized_BS, dims = N, sites = (site1,site2), L=total_sites, tags=tag)
|
|
52
|
+
# BS_MPO = BS_MPO.fill_empty_sites(mode = "full")
|
|
53
|
+
return BS_MPO
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def phase_shifter(N, theta):
|
|
57
|
+
diag = [np.exp(1j * theta * i) for i in range(N)]
|
|
58
|
+
return np.diag(diag, k=0)
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
from .utils import create_MPO
|
|
2
|
+
from .devices import create_BS_MPO
|
|
3
|
+
|
|
4
|
+
from scipy import sparse as sp
|
|
5
|
+
from scipy.linalg import expm
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
from numpy.linalg import matrix_power
|
|
9
|
+
from numpy import kron, sqrt
|
|
10
|
+
|
|
11
|
+
from quimb.tensor.tensor_arbgeom import tensor_network_apply_op_vec #type: ignore
|
|
12
|
+
from quimb.tensor.tensor_1d_compress import enforce_1d_like #type: ignore
|
|
13
|
+
|
|
14
|
+
import qutip as qt
|
|
15
|
+
from math import factorial
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def create_TMSV_OP_Dense(N, mean_photon_num):
|
|
19
|
+
a = qt.destroy(N).full()
|
|
20
|
+
a_dag = a.T
|
|
21
|
+
truncation = (N-1)
|
|
22
|
+
|
|
23
|
+
op = expm(1j * mean_photon_num * (kron(a_dag, a_dag) + kron(a, a)))
|
|
24
|
+
|
|
25
|
+
return op
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
########## Light Source ###########
|
|
30
|
+
|
|
31
|
+
def light_source(vacuum, N, mean_photon_num, num_modes, error_tolerance, TMSV_indices = ((0,2),(5,7)), compress = True, contract = True):
|
|
32
|
+
|
|
33
|
+
psi = vacuum.copy()
|
|
34
|
+
psi.add_tag("L0")
|
|
35
|
+
site_tags = psi.site_tags
|
|
36
|
+
|
|
37
|
+
# Creating TMSV ops:
|
|
38
|
+
TMSV_op_dense = create_TMSV_OP_Dense(N, mean_photon_num)
|
|
39
|
+
|
|
40
|
+
TMSV_MPO_H = create_MPO(site1 = TMSV_indices[0][0], site2 = TMSV_indices[0][1], total_sites = num_modes, op = TMSV_op_dense, N = N, tag = r"$TMSV_H$")
|
|
41
|
+
# TMSV_MPO_H.draw()
|
|
42
|
+
# print("sites present in light_source:", TMSV_MPO_H.sites)
|
|
43
|
+
enforce_1d_like(TMSV_MPO_H, site_tags=site_tags, inplace=True)
|
|
44
|
+
# print("sites present in light_source:", TMSV_MPO_H.sites)
|
|
45
|
+
TMSV_MPO_H.add_tag("L1")
|
|
46
|
+
|
|
47
|
+
TMSV_MPO_V = create_MPO(site1 = TMSV_indices[1][0], site2 = TMSV_indices[1][1], total_sites = num_modes, op = TMSV_op_dense, N = N, tag = r"$TMSV_V$")
|
|
48
|
+
enforce_1d_like(TMSV_MPO_V, site_tags=site_tags, inplace=True)
|
|
49
|
+
TMSV_MPO_V.add_tag("L1")
|
|
50
|
+
|
|
51
|
+
# Creating PBS ops:
|
|
52
|
+
U_PBS_H_Signal = create_BS_MPO(site1 = 2, site2 = 6, theta=np.pi/2, total_sites = num_modes, N = N, tag = r"$PBS_S$")
|
|
53
|
+
enforce_1d_like(U_PBS_H_Signal, site_tags=site_tags, inplace=True)
|
|
54
|
+
U_PBS_H_Signal.add_tag("L1")
|
|
55
|
+
|
|
56
|
+
U_PBS_H_Idler = create_BS_MPO(site1 = 0, site2 = 4, theta=np.pi/2, total_sites = num_modes, N = N, tag = r"$PBS_I$")
|
|
57
|
+
enforce_1d_like(U_PBS_H_Idler, site_tags=site_tags, inplace=True)
|
|
58
|
+
U_PBS_H_Signal.add_tag("L1")
|
|
59
|
+
|
|
60
|
+
# Create entangled state:
|
|
61
|
+
psi = tensor_network_apply_op_vec(TMSV_MPO_H, psi, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
62
|
+
psi = tensor_network_apply_op_vec(TMSV_MPO_V, psi, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
63
|
+
psi = tensor_network_apply_op_vec(U_PBS_H_Idler, psi, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
64
|
+
psi = tensor_network_apply_op_vec(U_PBS_H_Signal, psi, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
65
|
+
|
|
66
|
+
psi.normalize()
|
|
67
|
+
|
|
68
|
+
# print("trace is:", np.linalg.norm(psi.to_dense()))
|
|
69
|
+
|
|
70
|
+
for _ in range(4):
|
|
71
|
+
psi.measure(0, remove = True, renorm = True, inplace = True)
|
|
72
|
+
|
|
73
|
+
# Not used for TN implermentation. Used for validating impelmentation with dense version
|
|
74
|
+
TMSV_state = psi.to_dense()
|
|
75
|
+
TMSV_state = np.reshape(TMSV_state.data, (-1, 1), order = 'C')
|
|
76
|
+
TMSV_state = sp.csr_matrix(TMSV_state)
|
|
77
|
+
TMSV_state.data = np.round(TMSV_state.data, 10)
|
|
78
|
+
TMSV_state.eliminate_zeros()
|
|
79
|
+
|
|
80
|
+
return psi, TMSV_state
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
# Generate truncation filter MPO
|
|
84
|
+
# TODO: Make a function to renormalize a quantum state. How: find the projection of the quantum state onto itself and calculate the
|
|
85
|
+
# probability. Next, take the square root of this number, divide it by the number nodes in the quantum state and multiply it with
|
|
86
|
+
# all the states in the MPS. For density matrices, simply find the trace directly and do the same thing as the previous example except
|
|
87
|
+
# for not taking the square root. The truncation filter would not work without the renormalization
|
|
88
|
+
def create_truncation_filter_Dense(truncation):
|
|
89
|
+
# This is only the projection operator. The states need to be normalized first.
|
|
90
|
+
N = truncation+1
|
|
91
|
+
vacuum = np.zeros(N**2)
|
|
92
|
+
vacuum[0] = 1
|
|
93
|
+
|
|
94
|
+
a = qt.destroy(N).full()
|
|
95
|
+
a_dag = a.T
|
|
96
|
+
I = np.eye(N)
|
|
97
|
+
|
|
98
|
+
# # debug
|
|
99
|
+
# labels = generate_labels(1,N)
|
|
100
|
+
|
|
101
|
+
op = 0
|
|
102
|
+
for trunc in range(truncation, -1, -1):
|
|
103
|
+
state = kron(matrix_power(a_dag, trunc), I) @ vacuum / sqrt(factorial(trunc) * factorial(0))
|
|
104
|
+
op+=np.outer(state, state)
|
|
105
|
+
coeffs = [trunc+1, 0]
|
|
106
|
+
|
|
107
|
+
# # Debug
|
|
108
|
+
# state_inds = state.nonzero()[0]
|
|
109
|
+
# print("TMSV state:", [labels[i] for i in state_inds], "Val:", state[state_inds[0]])
|
|
110
|
+
# print("coeffs", coeffs)
|
|
111
|
+
|
|
112
|
+
for i in range(trunc):
|
|
113
|
+
coeffs = [coeffs[0]-1, coeffs[1]+1]
|
|
114
|
+
state = kron(a, a_dag) @ state / sqrt((coeffs[0]) * (coeffs[1]))
|
|
115
|
+
op += np.outer(state, state)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
# # debug
|
|
119
|
+
# state_inds = state.nonzero()[0]
|
|
120
|
+
# print("TMSV state:", [labels[i] for i in state_inds], "Val:", state[state_inds[0]])
|
|
121
|
+
# print("coeffs", coeffs)
|
|
122
|
+
|
|
123
|
+
return op
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
from .devices import generalized_mode_mixer, create_BS_MPO
|
|
2
|
+
from ..trajectory import quantum_channel
|
|
3
|
+
from .noise_models import single_mode_bosonic_noise_channels
|
|
4
|
+
|
|
5
|
+
from scipy.linalg import sqrtm
|
|
6
|
+
from scipy import sparse as sp
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
from numpy.linalg import matrix_power
|
|
10
|
+
from numpy import sqrt
|
|
11
|
+
|
|
12
|
+
from quimb.tensor import MatrixProductOperator as mpo #type: ignore
|
|
13
|
+
from quimb.tensor.tensor_arbgeom import tensor_network_apply_op_vec #type: ignore
|
|
14
|
+
from quimb.tensor.tensor_1d_compress import enforce_1d_like #type: ignore
|
|
15
|
+
|
|
16
|
+
import qutip as qt
|
|
17
|
+
from math import factorial
|
|
18
|
+
|
|
19
|
+
from functools import lru_cache
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# This is the actual function that generates the POVM operator.
|
|
23
|
+
def create_threshold_POVM_OP_Dense(efficiency, outcome, N):
|
|
24
|
+
a = qt.destroy(N).full()
|
|
25
|
+
a_dag = a.T
|
|
26
|
+
create0 = a_dag * sqrt(efficiency)
|
|
27
|
+
destroy0 = a * sqrt(efficiency)
|
|
28
|
+
series_elem_list = [((-1)**i) * matrix_power(create0, (i+1)) @ matrix_power(destroy0, (i+1)) / factorial(i+1) for i in range(N-1)] # (-1)^i * a_dag^(i+1) @ a^(i+1) / (i+1)! = (-1)^(i+2) * a_dag^(i+1) @ a^(i+1) / (i+1)! since goes from 0->n
|
|
29
|
+
# print(series_elem_list[0])
|
|
30
|
+
dense_op = sum(series_elem_list)
|
|
31
|
+
|
|
32
|
+
if outcome == 0:
|
|
33
|
+
dense_op = np.eye(dense_op.shape[0]) - dense_op
|
|
34
|
+
# print(sqrtm(dense_op))
|
|
35
|
+
return dense_op
|
|
36
|
+
|
|
37
|
+
@lru_cache(maxsize=20)
|
|
38
|
+
def factorial(x):
|
|
39
|
+
n = 1
|
|
40
|
+
for i in range(2, x+1):
|
|
41
|
+
n *= i
|
|
42
|
+
return n
|
|
43
|
+
|
|
44
|
+
@lru_cache(maxsize=20)
|
|
45
|
+
def comb(n, k):
|
|
46
|
+
return factorial(n) / (factorial(k) * factorial(n - k))
|
|
47
|
+
|
|
48
|
+
@lru_cache(maxsize=20)
|
|
49
|
+
def projector(n, N):
|
|
50
|
+
state = np.zeros(N)
|
|
51
|
+
state[n] = 1
|
|
52
|
+
return np.outer(state, state)
|
|
53
|
+
|
|
54
|
+
# Testing stuff out here.
|
|
55
|
+
def create_PNR_POVM_OP_Dense(eff, outcome, N, debug = False):
|
|
56
|
+
a_dag = qt.create(N).full()
|
|
57
|
+
vacuum = np.zeros(N)
|
|
58
|
+
vacuum[0] = 1
|
|
59
|
+
|
|
60
|
+
@lru_cache(maxsize=20)
|
|
61
|
+
def create_povm_list(eff, N):
|
|
62
|
+
povms = []
|
|
63
|
+
# m is the outcome here
|
|
64
|
+
for m in range(N-1):
|
|
65
|
+
op = 0
|
|
66
|
+
for n in range(m, N):
|
|
67
|
+
op += comb(n,m) * eff**m * (1-eff)**(n-m) * projector(n, N)
|
|
68
|
+
povms.append(op)
|
|
69
|
+
|
|
70
|
+
povms.append(np.eye(N) - sum(povms))
|
|
71
|
+
return povms
|
|
72
|
+
|
|
73
|
+
povms = create_povm_list(eff, N)
|
|
74
|
+
if debug:
|
|
75
|
+
return povms[outcome], povms
|
|
76
|
+
return povms[outcome]
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def generate_sqrt_POVM_MPO(sites, outcome, total_sites, efficiency, N, pnr = False, tag = "POVM"):
|
|
81
|
+
if pnr:
|
|
82
|
+
dense_op = sqrtm(create_PNR_POVM_OP_Dense(efficiency, outcome, N)).astype(np.complex128)
|
|
83
|
+
else:
|
|
84
|
+
dense_op = sqrtm(create_threshold_POVM_OP_Dense(efficiency, outcome, N)).astype(np.complex128)
|
|
85
|
+
|
|
86
|
+
sqrt_POVM_MPOs = []
|
|
87
|
+
for i in sites:
|
|
88
|
+
sqrt_POVM_MPOs.append(mpo.from_dense(dense_op, dims = N, sites = (i,), L=total_sites, tags=tag))
|
|
89
|
+
|
|
90
|
+
return sqrt_POVM_MPOs
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def bell_state_measurement(psi, N, site_tags, num_modes, efficiencies, dark_counts_gain, error_tolerance, beamsplitters = [[2,6],[3,7]], measurements = {0:(2,7), 1:(3,6)}, pnr = False, det_outcome = 1, use_trajectory = False, return_MPOs = False, compress = True, contract = True):
|
|
94
|
+
|
|
95
|
+
"""Perform Bell state measrement or return the MPOs used in the measurement.
|
|
96
|
+
Args:
|
|
97
|
+
psi (mps): The input state to be measured.
|
|
98
|
+
N (int): local Hilbert space dimension
|
|
99
|
+
site_tags (list): The tags for the sites in the MPS.
|
|
100
|
+
num_modes (int): The number of modes in the MPS.
|
|
101
|
+
efficiencies list[float]: The efficiencies of the (pairs of) detectors in the BSM.
|
|
102
|
+
error_tolerance (float): The error tolerance for the tensor network.
|
|
103
|
+
measurements (dict): The sites for the measurements. Default is {1:(2,7), 0:(3,6)}.
|
|
104
|
+
pnr (bool): Whether to use photon number resolving measurement. Default is False.
|
|
105
|
+
pnr_outcome (int): The outcome for the photon number resolving measurement. Default is 1. When not using PNR, this can be anything other than 1 since threshold detectors don't distinguish between photon numbers.
|
|
106
|
+
return_MPOs (bool): Whether to return the MPOs used in the measurement. Default is False.
|
|
107
|
+
compress (bool): Whether to compress the MPS after applying the MPOs. Default is True.
|
|
108
|
+
contract (bool): Whether to contract the MPS after applying the MPOs. Default is True.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
mps: The measured state after the Bell state measurement.
|
|
112
|
+
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
U_BS_H = create_BS_MPO(site1 = beamsplitters[0][0], site2 = beamsplitters[0][1], theta=np.pi/4, total_sites = num_modes, N = N, tag = r"$U_{BS_H}$")
|
|
116
|
+
enforce_1d_like(U_BS_H, site_tags=site_tags, inplace=True)
|
|
117
|
+
U_BS_H.add_tag("L2")
|
|
118
|
+
|
|
119
|
+
U_BS_V = create_BS_MPO(site1 = beamsplitters[1][0], site2 = beamsplitters[1][1], theta=np.pi/4, total_sites = num_modes, N = N, tag = r"$U_{BS_V}$")
|
|
120
|
+
enforce_1d_like(U_BS_V, site_tags=site_tags, inplace=True)
|
|
121
|
+
U_BS_V.add_tag("L3")
|
|
122
|
+
|
|
123
|
+
# Note that these are not used if using trajectree to implement detector inefficiency.
|
|
124
|
+
BSM_POVM_1_OPs = generate_sqrt_POVM_MPO(sites=measurements[1], outcome = det_outcome, total_sites=num_modes, efficiency=efficiencies[0], N=N, pnr = pnr)
|
|
125
|
+
BSM_POVM_1_OPs.extend(generate_sqrt_POVM_MPO(sites=measurements[0], outcome = 0, total_sites=num_modes, efficiency=efficiencies[1], N=N, pnr = pnr))
|
|
126
|
+
|
|
127
|
+
if return_MPOs:
|
|
128
|
+
returned_MPOs = [U_BS_H, U_BS_V]
|
|
129
|
+
if use_trajectory:
|
|
130
|
+
quantum_channel_list = [quantum_channel(N = N, num_modes = num_modes, formalism = "closed", unitary_MPOs = BSM_MPO, name = "BSM") for BSM_MPO in returned_MPOs]
|
|
131
|
+
|
|
132
|
+
damping_kraus_ops_0 = single_mode_bosonic_noise_channels(noise_parameter = 1-efficiencies[0], N = N)
|
|
133
|
+
damping_kraus_ops_1 = single_mode_bosonic_noise_channels(noise_parameter = 1-efficiencies[1], N = N)
|
|
134
|
+
two_mode_kraus_ops_0 = [sp.kron(op1, op2) for op1 in damping_kraus_ops_0 for op2 in damping_kraus_ops_0]
|
|
135
|
+
two_mode_kraus_ops_1 = [sp.kron(op1, op2) for op1 in damping_kraus_ops_1 for op2 in damping_kraus_ops_1]
|
|
136
|
+
quantum_channel_list.append(quantum_channel(N = N, num_modes = num_modes, formalism = "kraus", kraus_ops_tuple = ((2,3), two_mode_kraus_ops_0))) # The tuples in this list are defined as (sites, kraus_ops). The sites are the sites where the Kraus ops are applied.
|
|
137
|
+
quantum_channel_list.append(quantum_channel(N = N, num_modes = num_modes, formalism = "kraus", kraus_ops_tuple = ((6,7), two_mode_kraus_ops_1))) # The tuples in this list are defined as (sites, kraus_ops). The sites are the sites where the Kraus ops are applied.
|
|
138
|
+
|
|
139
|
+
amplification_kraus_ops_0 = single_mode_bosonic_noise_channels(noise_parameter = dark_counts_gain[0], N = N)
|
|
140
|
+
amplification_kraus_ops_1 = single_mode_bosonic_noise_channels(noise_parameter = dark_counts_gain[1], N = N)
|
|
141
|
+
two_mode_kraus_ops_0 = [sp.kron(op1, op2) for op1 in amplification_kraus_ops_0 for op2 in amplification_kraus_ops_0]
|
|
142
|
+
two_mode_kraus_ops_1 = [sp.kron(op1, op2) for op1 in amplification_kraus_ops_1 for op2 in amplification_kraus_ops_1]
|
|
143
|
+
quantum_channel_list.append(quantum_channel(N = N, num_modes = num_modes, formalism = "kraus", kraus_ops_tuple = ((2,3), two_mode_kraus_ops_0))) # The tuples in this list are defined as (sites, kraus_ops). The sites are the sites where the Kraus ops are applied.
|
|
144
|
+
quantum_channel_list.append(quantum_channel(N = N, num_modes = num_modes, formalism = "kraus", kraus_ops_tuple = ((6,7), two_mode_kraus_ops_1))) # The tuples in this list are defined as (sites, kraus_ops). The sites are the sites where the Kraus ops are applied.
|
|
145
|
+
|
|
146
|
+
BSM_POVM_1_OPs = generate_sqrt_POVM_MPO(sites=measurements[1], outcome = det_outcome, total_sites=num_modes, efficiency=1, N=N, pnr = pnr)
|
|
147
|
+
BSM_POVM_1_OPs.extend(generate_sqrt_POVM_MPO(sites=measurements[0], outcome = 0, total_sites=num_modes, efficiency=1, N=N, pnr = pnr))
|
|
148
|
+
|
|
149
|
+
det_quantum_channels = [quantum_channel(N = N, num_modes = num_modes, formalism = "closed", unitary_MPOs = DET_MPO, name = "DET") for DET_MPO in BSM_POVM_1_OPs]
|
|
150
|
+
quantum_channel_list.extend(det_quantum_channels)
|
|
151
|
+
|
|
152
|
+
return quantum_channel_list
|
|
153
|
+
|
|
154
|
+
returned_MPOs.extend(BSM_POVM_1_OPs) # Collect all the MPOs in a list and return them. The operators are ordered as such:
|
|
155
|
+
|
|
156
|
+
quantum_channel_list = [quantum_channel(N = N, num_modes = num_modes, formalism = "closed", unitary_MPOs = BSM_MPO, name = "BSM") for BSM_MPO in returned_MPOs]
|
|
157
|
+
|
|
158
|
+
return quantum_channel_list
|
|
159
|
+
|
|
160
|
+
psi = tensor_network_apply_op_vec(U_BS_H, psi, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
161
|
+
psi = tensor_network_apply_op_vec(U_BS_V, psi, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
162
|
+
|
|
163
|
+
for POVM_OP in BSM_POVM_1_OPs:
|
|
164
|
+
POVM_OP.add_tag("L4")
|
|
165
|
+
psi = tensor_network_apply_op_vec(POVM_OP, psi, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
166
|
+
|
|
167
|
+
return psi
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def rotate_and_measure(psi, N, site_tags, num_modes, efficiency, error_tolerance, idler_angles, signal_angles, rotations = {"signal":(4,5), "idler":(0,1)}, measurements = {1:(0,4), 0:(1,5)}, pnr = False, det_outcome = 1, return_MPOs = False, compress = True, contract = True, draw = False):
|
|
172
|
+
# idler_angles = [0]
|
|
173
|
+
# angles = [np.pi/4]
|
|
174
|
+
|
|
175
|
+
coincidence = []
|
|
176
|
+
|
|
177
|
+
POVM_1_OPs = generate_sqrt_POVM_MPO(sites = measurements[1], outcome = det_outcome, total_sites=num_modes, efficiency=efficiency, N=N, pnr = pnr)
|
|
178
|
+
POVM_0_OPs = generate_sqrt_POVM_MPO(sites = measurements[0], outcome = 0, total_sites=num_modes, efficiency=efficiency, N=N, pnr = pnr)
|
|
179
|
+
# POVM_0_OPs = generate_sqrt_POVM_MPO(sites=(0,4), outcome = 0, total_sites=num_modes, efficiency=efficiency, N=N, pnr = pnr)
|
|
180
|
+
# enforce_1d_like(POVM_OP, site_tags=site_tags, inplace=True)
|
|
181
|
+
|
|
182
|
+
meas_ops = POVM_1_OPs
|
|
183
|
+
meas_ops.extend(POVM_0_OPs)
|
|
184
|
+
|
|
185
|
+
for i, idler_angle in enumerate(idler_angles):
|
|
186
|
+
coincidence_probs = []
|
|
187
|
+
|
|
188
|
+
# rotator_node_1 = create_BS_MPO(site1 = rotations["idler"][0], site2 = rotations["idler"][1], theta=idler_angle, total_sites = num_modes, N = N, tag = r"$Rotator_I$")
|
|
189
|
+
######################
|
|
190
|
+
# We make this correction here since the rotator hamiltonian is 1/2(a_v b_h + a_h b_v), which does not show up in the bs unitary, whose function we are reusing to
|
|
191
|
+
# rotate the state.
|
|
192
|
+
rotator_node_1 = generalized_mode_mixer(site1 = rotations["idler"][0], site2 = rotations["idler"][1], theta = -idler_angle/2, phi = [0,0], psi = [0,0], lamda = [0,0], total_sites = num_modes, N = N, tag = 'MM')
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
enforce_1d_like(rotator_node_1, site_tags=site_tags, inplace=True)
|
|
196
|
+
rotator_node_1.add_tag("L5")
|
|
197
|
+
if not return_MPOs: # If the user wants the MPOs, we don't need to apply the rotator to the state.
|
|
198
|
+
idler_rotated_psi = tensor_network_apply_op_vec(rotator_node_1, psi, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
for j, angle in enumerate(signal_angles):
|
|
202
|
+
# print("idler:", i, "signal:", j)
|
|
203
|
+
|
|
204
|
+
# rotator_node_2 = create_BS_MPO(site1 = rotations["signal"][0], site2 = rotations["signal"][1], theta=angle, total_sites = num_modes, N = N, tag = r"$Rotator_S$")
|
|
205
|
+
##########################
|
|
206
|
+
# We make this correction here since the rotator hamiltonian is 1/2(a_v b_h + a_h b_v), which does not show up in the bs unitary, whose function we are reusing to
|
|
207
|
+
# rotate the state.
|
|
208
|
+
rotator_node_2 = generalized_mode_mixer(site1 = rotations["signal"][0], site2 = rotations["signal"][1], theta = -angle/2, phi = [0,0], psi = [0,0], lamda = [0,0], total_sites = num_modes, N = N, tag = 'MM')
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
enforce_1d_like(rotator_node_2, site_tags=site_tags, inplace=True)
|
|
212
|
+
|
|
213
|
+
if return_MPOs:
|
|
214
|
+
meas_ops = [rotator_node_1, rotator_node_2] + meas_ops # Collect all the MPOs in a list and return them
|
|
215
|
+
return meas_ops
|
|
216
|
+
|
|
217
|
+
# Rotate and measure:
|
|
218
|
+
rotator_node_2.add_tag("L5")
|
|
219
|
+
rho_rotated = tensor_network_apply_op_vec(rotator_node_2, idler_rotated_psi, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
220
|
+
|
|
221
|
+
# read_quantum_state(psi)
|
|
222
|
+
# read_quantum_state(rho_rotated)
|
|
223
|
+
|
|
224
|
+
for POVM_OP in meas_ops:
|
|
225
|
+
POVM_OP.add_tag("L6")
|
|
226
|
+
rho_rotated = tensor_network_apply_op_vec(POVM_OP, rho_rotated, compress=compress, contract = contract, cutoff = error_tolerance)
|
|
227
|
+
|
|
228
|
+
if draw:
|
|
229
|
+
# only for drawing the TN. Not used otherwise
|
|
230
|
+
fix = {(f"L{j}",f"I{num_modes - i-1}"):(3*j,i+5) for j in range(10) for i in range(10)}
|
|
231
|
+
rho_rotated.draw(color = [r'$HH+VV$', r'$U_{BS_H}$', r"$U_{BS_V}$", 'POVM', r'$Rotator_I$', r'$Rotator_S$'], title = "Polarization entanglement swapping MPS", fix = fix, show_inds = True, show_tags = False)
|
|
232
|
+
# rho_rotated.draw_tn()
|
|
233
|
+
coincidence_probs.append((rho_rotated.norm())**2)
|
|
234
|
+
coincidence.append(coincidence_probs)
|
|
235
|
+
|
|
236
|
+
return np.array(coincidence)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from scipy import sparse as sp
|
|
2
|
+
from scipy.linalg import expm
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
import qutip as qt
|
|
7
|
+
from math import factorial
|
|
8
|
+
|
|
9
|
+
def single_mode_bosonic_noise_channels(noise_parameter, N):
|
|
10
|
+
"""This function produces the Kraus operatorsd for the single mode bosonic noise channels. This includes pure loss and
|
|
11
|
+
pure gain channels. The pure gain channel is simply the transpose of the pure loss channel.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
noise_parameter (float): The noise parameter, (loss for pure loss and gain for pure gain channels). For the pure loss channel, this
|
|
15
|
+
parameter is the dimensionless noise term: 1-transmissivity (of beamsplitter in beamsplitter model of attenuation).
|
|
16
|
+
For a fiber, transmissivity = e**(-chi), where chi = l/l_att, where l is the length of the fiber and
|
|
17
|
+
l_att is the attenuation length. If the noise_parameter is greater than 1, it is assumed to be a gain channel.
|
|
18
|
+
N (int): local Hilbert space dimension being considered.
|
|
19
|
+
"""
|
|
20
|
+
a = qt.destroy(N).full()
|
|
21
|
+
a_dag = qt.create(N).full()
|
|
22
|
+
n = a_dag @ a
|
|
23
|
+
|
|
24
|
+
# TODO: Theoretically, verify these
|
|
25
|
+
normalization = 1
|
|
26
|
+
gain_channel = False
|
|
27
|
+
|
|
28
|
+
if noise_parameter > 1:
|
|
29
|
+
gain_channel = True
|
|
30
|
+
normalization = np.sqrt(1/noise_parameter)
|
|
31
|
+
noise_parameter = (noise_parameter-1)/(noise_parameter) # Convert gain to loss parameter
|
|
32
|
+
|
|
33
|
+
kraus_ops = []
|
|
34
|
+
for l in range(N): # you can lose anywhere from 0 to N-1 (=trunc) photons in the truncated Hilbert space.
|
|
35
|
+
kraus_ops.append(sp.csr_array(normalization * np.sqrt(1/factorial(l) * (noise_parameter/(1-noise_parameter))**l) * (np.linalg.matrix_power(a, l) @ expm(n/2 * np.log(1-noise_parameter)))))
|
|
36
|
+
|
|
37
|
+
if gain_channel:
|
|
38
|
+
for l in range(N):
|
|
39
|
+
kraus_ops[l] = kraus_ops[l].T.conjugate()
|
|
40
|
+
|
|
41
|
+
return kraus_ops
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from scipy import sparse as sp
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from matplotlib import pyplot as plt
|
|
6
|
+
|
|
7
|
+
# Generating labels for reading state.
|
|
8
|
+
def generate_labels(num_systems, N):
|
|
9
|
+
dim = N**2
|
|
10
|
+
labels = []
|
|
11
|
+
state_labels = []
|
|
12
|
+
for i in range(dim):
|
|
13
|
+
state_labels.append(f"{i//N}H{i%N}V")
|
|
14
|
+
# print("sates:", self.state_labels)
|
|
15
|
+
for i in range(dim**num_systems):
|
|
16
|
+
new_label = ""
|
|
17
|
+
for j in range(num_systems-1, -1, -1):
|
|
18
|
+
# print("appending to labels:", f"{self.state_labels[(i//self.dim**j)%self.dim]}_{chr(65+j)} ")
|
|
19
|
+
new_label += f"{state_labels[(i//dim**j)%dim]}_{chr(65+j)} "
|
|
20
|
+
labels.append(new_label[:-1])
|
|
21
|
+
return labels
|
|
22
|
+
|
|
23
|
+
def read_quantum_state(TN_state, N, num_states = 4, return_dense = False, precision = 10):
|
|
24
|
+
dense_state = TN_state.to_dense()
|
|
25
|
+
if return_dense: return dense_state
|
|
26
|
+
dense_state = np.reshape(dense_state.data, (-1, 1), order = 'C')
|
|
27
|
+
dense_state = sp.csr_matrix(dense_state)
|
|
28
|
+
dense_state.data = np.round(dense_state.data, precision)
|
|
29
|
+
dense_state.eliminate_zeros()
|
|
30
|
+
|
|
31
|
+
print_quantum_state(N, dense_state, num_states)
|
|
32
|
+
|
|
33
|
+
def print_quantum_state(N, dense_state, num_states = 4):
|
|
34
|
+
labels = generate_labels(num_states,N)
|
|
35
|
+
state = dense_state.nonzero()[0]
|
|
36
|
+
print("Corresponding Basis terms:")
|
|
37
|
+
for k in state: print(labels[k],"-",k,"-",dense_state[k].data)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def plot_coincidences(coincidence, idler_angles, signal_angles, title = ''):
|
|
42
|
+
visibilities = []
|
|
43
|
+
for i in range(len(coincidence)):
|
|
44
|
+
visibility = (max(coincidence[i]) - min(coincidence[i])) / (max(coincidence[i]) + min(coincidence[i]))
|
|
45
|
+
visibilities.append(visibility)
|
|
46
|
+
# print(visibility, coincidence[i])
|
|
47
|
+
|
|
48
|
+
idler_angles = np.array(list(map(float, idler_angles)))/np.pi
|
|
49
|
+
|
|
50
|
+
plt.figure()
|
|
51
|
+
plt.grid(True)
|
|
52
|
+
for i in range(len(idler_angles)):
|
|
53
|
+
# print(fringe_real[i])
|
|
54
|
+
plt.plot(signal_angles, coincidence[i], label=r'{:.2f}$\pi$'.format(idler_angles[i]))
|
|
55
|
+
plt.title(title)
|
|
56
|
+
plt.ylabel("Coincidence probability")
|
|
57
|
+
plt.xlabel(r"$\alpha$ (rad)")
|
|
58
|
+
plt.legend(title = "$\delta$")
|
|
59
|
+
|
|
60
|
+
plt.figure()
|
|
61
|
+
plt.grid(True)
|
|
62
|
+
plt.plot(idler_angles*np.pi, visibilities)
|
|
63
|
+
plt.title("Visiblilities")
|
|
64
|
+
plt.ylabel("Visibility")
|
|
65
|
+
plt.xlabel(r"$\delta$")
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from numpy import sqrt
|
|
3
|
+
|
|
4
|
+
from quimb.tensor import MatrixProductState as mps #type: ignore
|
|
5
|
+
from quimb.tensor import MatrixProductOperator as mpo #type: ignore
|
|
6
|
+
from quimb.tensor.tensor_arbgeom import tensor_network_apply_op_vec #type: ignore
|
|
7
|
+
from quimb.tensor.tensor_core import new_bond #type: ignore
|
|
8
|
+
from quimb.tensor.tensor_1d_compress import enforce_1d_like #type: ignore
|
|
9
|
+
from quimb.tensor.tensor_1d import TensorNetwork1DOperator #type: ignore
|
|
10
|
+
|
|
11
|
+
import qutip as qt
|
|
12
|
+
import re
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
###### SUPPORT FUNCTIONS ######
|
|
16
|
+
|
|
17
|
+
# Vacuum state creation
|
|
18
|
+
def fill_fn(shape):
|
|
19
|
+
arr = np.zeros(shape)
|
|
20
|
+
idx = tuple([0]*(len(shape)))
|
|
21
|
+
arr[idx] = 1
|
|
22
|
+
return arr
|
|
23
|
+
def create_vacuum_state(num_modes, N, bond_dim = 2):
|
|
24
|
+
return mps.from_fill_fn(
|
|
25
|
+
fill_fn,
|
|
26
|
+
L=num_modes,
|
|
27
|
+
bond_dim=bond_dim,
|
|
28
|
+
phys_dim=N,
|
|
29
|
+
cyclic=False,
|
|
30
|
+
tags="In"
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
def create_ladder_MPO(site, total_sites, N, tag="$Ladder$"):
|
|
34
|
+
a = qt.destroy(N).full()
|
|
35
|
+
a_dag = a.T
|
|
36
|
+
TMSV_MPO = mpo.from_dense(a_dag, dims = N, sites = (site,), L=total_sites, tags=tag)
|
|
37
|
+
# return TMSV_MPO.fill_empty_sites(mode = "minimal")
|
|
38
|
+
return TMSV_MPO
|
|
39
|
+
|
|
40
|
+
def create_MPO(site1, site2, total_sites, op, N, tag):
|
|
41
|
+
MPO = mpo.from_dense(op, dims = N, sites = (site1,site2), L=total_sites, tags=tag)
|
|
42
|
+
return MPO
|
|
43
|
+
|
|
44
|
+
###### POVM OPERATORS #######
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
########## TMSV Operator ############
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
########## EXTEND MPS ###########
|
|
53
|
+
|
|
54
|
+
def extend_MPS(psi, psi_second = None):
|
|
55
|
+
# print("inside extend_MPS")
|
|
56
|
+
# psi_second.draw()
|
|
57
|
+
# print(psi_second)
|
|
58
|
+
|
|
59
|
+
psi.permute_arrays('lrp')
|
|
60
|
+
|
|
61
|
+
# psi_second.draw()
|
|
62
|
+
# print(psi_second)
|
|
63
|
+
|
|
64
|
+
# This is supposed to be passed as the second MPS to extend the first MPS with.
|
|
65
|
+
if psi_second == None:
|
|
66
|
+
psi_second = psi.copy()
|
|
67
|
+
else:
|
|
68
|
+
psi_second.permute_arrays('lrp')
|
|
69
|
+
|
|
70
|
+
psi_num_modes = len(psi.site_tags)
|
|
71
|
+
psi2_num_modes = len(psi_second.site_tags)
|
|
72
|
+
|
|
73
|
+
psi_second.reindex({f"k{i}":f"k{i+psi_num_modes}" for i in range(psi2_num_modes)}, inplace = True)
|
|
74
|
+
psi_second.retag({f"I{i}":f"I{i+psi_num_modes}" for i in range(psi2_num_modes)}, inplace = True)
|
|
75
|
+
|
|
76
|
+
psi = psi.combine(psi_second)
|
|
77
|
+
|
|
78
|
+
psi_last_tensor = psi.select_tensors(f"I{psi_num_modes-1}", which='any')[0]
|
|
79
|
+
psi2_first_tensor = psi.select_tensors(f"I{psi_num_modes}", which='any')[0]
|
|
80
|
+
|
|
81
|
+
new_bond(psi2_first_tensor, psi_last_tensor, axis1=0, axis2=1)
|
|
82
|
+
|
|
83
|
+
# Simply find the tags for the input modes.
|
|
84
|
+
pattern = re.compile(r"I[0-9][0-9]*")
|
|
85
|
+
tags = []
|
|
86
|
+
for tag_list in [t.tags for t in psi]:
|
|
87
|
+
for tag in tag_list:
|
|
88
|
+
match = re.search(pattern, tag)
|
|
89
|
+
if match:
|
|
90
|
+
tags.append(match.string)
|
|
91
|
+
break
|
|
92
|
+
|
|
93
|
+
sorted_arrays = [array for array, _ in sorted( zip(psi.arrays, tags), key = lambda pair: int(pair[1][1:]) )]
|
|
94
|
+
|
|
95
|
+
psi = mps(sorted_arrays)
|
|
96
|
+
return psi
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def calc_fidelity_swapping(state, reference_state, N, error_tolerance):
|
|
100
|
+
reference_mps = create_bimode_bell_state(reference_state, N)
|
|
101
|
+
projector_mpo = outer_product_mps(reference_mps)
|
|
102
|
+
|
|
103
|
+
projector_mpo.reindex({"k0":"k0","k1":"k1","k2":"k4","k3":"k5"}, inplace = True)
|
|
104
|
+
projector_mpo.reindex({"b0":"b0","b1":"b1","b2":"b4","b3":"b5"}, inplace = True)
|
|
105
|
+
projector_mpo.retag({"I0":"I0","I1":"I1","I2":"I4","I3":"I5"}, inplace = True)
|
|
106
|
+
|
|
107
|
+
# print("sites present in projector_mpo:", projector_mpo.sites)
|
|
108
|
+
enforce_1d_like(projector_mpo, site_tags=state.site_tags, inplace=True)
|
|
109
|
+
|
|
110
|
+
state = tensor_network_apply_op_vec(projector_mpo, state, compress=True, contract = True, cutoff = error_tolerance)
|
|
111
|
+
# state.draw()
|
|
112
|
+
return state.norm()**2
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
# Calculate and return fidelity of the projected state.
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def create_bimode_bell_state(bell_state, N, error_tolerance = 1e-12):
|
|
120
|
+
I = np.eye(N)
|
|
121
|
+
|
|
122
|
+
a_dag = qt.create(N).full()
|
|
123
|
+
a = qt.destroy(N).full()
|
|
124
|
+
|
|
125
|
+
vacuum_state = np.zeros((N,1))
|
|
126
|
+
vacuum_state[0] = 1
|
|
127
|
+
vac_projector = np.outer(vacuum_state, vacuum_state)
|
|
128
|
+
|
|
129
|
+
one_state = a_dag @ vacuum_state # For now, we're defining the 1 state as having only one photon. This could be changed to have any number of non-zero photons.
|
|
130
|
+
# print("one_state:", one_state) # This is because the ideal case is having exactly one photon for the 1 state.
|
|
131
|
+
one_projector = np.outer(one_state, one_state)
|
|
132
|
+
|
|
133
|
+
NOT_gate = vacuum_state @ one_state.conj().T + one_state @ vacuum_state.conj().T
|
|
134
|
+
H_gate = (1/sqrt(2)) * ((vacuum_state - one_state) @ one_state.conj().T + (vacuum_state + one_state) @ vacuum_state.conj().T)
|
|
135
|
+
C_NOT_close = np.kron(vac_projector, I) + np.kron(one_projector, NOT_gate)
|
|
136
|
+
C_NOT_open = np.kron(one_projector, I) + np.kron(vac_projector, NOT_gate)
|
|
137
|
+
|
|
138
|
+
NOT_MPO_0 = mpo.from_dense(NOT_gate, dims = N, sites = (0,), L=4, tags="a_dag")
|
|
139
|
+
NOT_MPO_1 = mpo.from_dense(NOT_gate, dims = N, sites = (1,), L=4, tags="a_dag")
|
|
140
|
+
H_MPO = mpo.from_dense(H_gate, dims = N, sites = (0,), L=4, tags="H")
|
|
141
|
+
C_NOT_close_MPO_1 = mpo.from_dense(C_NOT_close, dims = N, sites = (0,1), L=4, tags="C_NOT_close_1")
|
|
142
|
+
C_NOT_close_MPO_2 = mpo.from_dense(C_NOT_close, dims = N, sites = (1,2), L=4, tags="C_NOT_close_2")
|
|
143
|
+
C_NOT_open_MPO = mpo.from_dense(C_NOT_open, dims = N, sites = (2,3), L=4, tags="C_create_open")
|
|
144
|
+
|
|
145
|
+
vacuum = create_vacuum_state(4, N, bond_dim = 2)
|
|
146
|
+
|
|
147
|
+
if bell_state == "psi_minus":
|
|
148
|
+
psi = tensor_network_apply_op_vec(NOT_MPO_0, vacuum, compress=True, contract = True, cutoff = error_tolerance)
|
|
149
|
+
psi = tensor_network_apply_op_vec(NOT_MPO_1, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
150
|
+
elif bell_state == "psi_plus":
|
|
151
|
+
psi = tensor_network_apply_op_vec(NOT_MPO_1, vacuum, compress=True, contract = True, cutoff = error_tolerance)
|
|
152
|
+
elif bell_state == "phi_plus":
|
|
153
|
+
psi = vacuum
|
|
154
|
+
elif bell_state == "phi_minus":
|
|
155
|
+
psi = tensor_network_apply_op_vec(NOT_MPO_0, vacuum, compress=True, contract = True, cutoff = error_tolerance)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
psi = tensor_network_apply_op_vec(H_MPO, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
159
|
+
# read_quantum_state(psi, N, num_states = 2)
|
|
160
|
+
psi = tensor_network_apply_op_vec(C_NOT_close_MPO_1, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
161
|
+
# read_quantum_state(psi, N, num_states = 2)
|
|
162
|
+
psi = tensor_network_apply_op_vec(C_NOT_close_MPO_2, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
163
|
+
psi = tensor_network_apply_op_vec(C_NOT_open_MPO, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
164
|
+
|
|
165
|
+
return psi
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def outer_product_mps(psi):
|
|
169
|
+
psi_H = psi.H
|
|
170
|
+
psi_H.retag_({'In': 'Out'})
|
|
171
|
+
psi_H.site_ind_id = 'b{}'
|
|
172
|
+
rho = (psi_H | psi)
|
|
173
|
+
for i in range(rho.L):
|
|
174
|
+
rho ^= f"I{i}"
|
|
175
|
+
rho = TensorNetwork1DOperator(rho)
|
|
176
|
+
rho._upper_ind_id = psi.site_ind_id
|
|
177
|
+
rho._lower_ind_id = psi_H.site_ind_id
|
|
178
|
+
rho = rho.fuse_multibonds()
|
|
179
|
+
rho_MPO = rho.view_as_(mpo, cyclic = False, L = 8) # L is important. Its hard coded now, but must be configutrable based on the input state.
|
|
180
|
+
return rho_MPO
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
from trajectree.fock_optics.measurement import *
|
|
2
|
+
from trajectree.fock_optics.utils import *
|
|
3
|
+
from trajectree.fock_optics.light_sources import *
|
|
4
|
+
from trajectree.fock_optics.devices import *
|
|
5
|
+
from trajectree.trajectory import *
|
|
6
|
+
|
|
7
|
+
from trajectree.protocols.swap import perform_swapping_simulation
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
|
|
11
|
+
def quantum_encoder(mean_photon_num, N, psi_control, control_indices, error_tolerance):
|
|
12
|
+
"""This function performs the quantum encoder, basically "copy" one state into two modes. Obviously, you aren't copying shit. You are simply entangling another
|
|
13
|
+
state (modes (0,1) or mode b in the paper) with the control state, creating a "copy". The bell state required to make the copy is added before the control MPS
|
|
14
|
+
((0,1):(H,V){b,d} and (2,3):(H,V){a,c}). {a,c} modes are measured out at the end of the function and hence, only 2 modes are pre-added to the retured MPS.
|
|
15
|
+
|
|
16
|
+
"""
|
|
17
|
+
# Entangled state from EPS
|
|
18
|
+
vacuum = create_vacuum_state(num_modes=8, N=N)
|
|
19
|
+
bell_state, _ = light_source(vacuum, N, mean_photon_num, 8, error_tolerance, compress=True, contract=True)
|
|
20
|
+
|
|
21
|
+
# psi_control.draw()
|
|
22
|
+
# print(psi_control)
|
|
23
|
+
|
|
24
|
+
psi = extend_MPS(bell_state, psi_control)
|
|
25
|
+
|
|
26
|
+
# psi.draw()
|
|
27
|
+
# print(psi)
|
|
28
|
+
|
|
29
|
+
# PBS op: (The V mode is transmitted and not reflected)
|
|
30
|
+
U_PBS_V = create_BS_MPO(site1 = 3, site2 = bell_state.L+control_indices[1], theta=np.pi/2, total_sites = psi.L, N = N, tag = r"$PBS$")
|
|
31
|
+
enforce_1d_like(U_PBS_V, site_tags=psi.site_tags, inplace=True)
|
|
32
|
+
psi = tensor_network_apply_op_vec(U_PBS_V, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
33
|
+
|
|
34
|
+
# Measuring D_d
|
|
35
|
+
# This is meant to change the basis from HV -> FS: (See https://doi.org/10.1103/PhysRevA.64.062311)
|
|
36
|
+
U_PBS_FS = create_BS_MPO(site1 = 2, site2 = 3, theta=np.pi/4, total_sites = psi.L, N = N, tag = r"$rotator$")
|
|
37
|
+
enforce_1d_like(U_PBS_FS, site_tags=psi.site_tags, inplace=True)
|
|
38
|
+
psi = tensor_network_apply_op_vec(U_PBS_FS, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
39
|
+
|
|
40
|
+
# Performing measurements:
|
|
41
|
+
BSM_POVM_1_OPs = generate_sqrt_POVM_MPO(sites=[2], outcome = 1, total_sites=psi.L, efficiency=1, N=N, pnr = True)
|
|
42
|
+
BSM_POVM_1_OPs.extend(generate_sqrt_POVM_MPO(sites=[3], outcome = 0, total_sites=psi.L, efficiency=1, N=N, pnr = True))
|
|
43
|
+
|
|
44
|
+
for POVM_OP in BSM_POVM_1_OPs:
|
|
45
|
+
psi = tensor_network_apply_op_vec(POVM_OP, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
46
|
+
|
|
47
|
+
return psi
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def destructive_CNOT(control_b_sites, target_sites, psi, N, error_tolerance):
|
|
51
|
+
|
|
52
|
+
# print("control_b_sites:", control_b_sites)
|
|
53
|
+
# print("target_sites", target_sites)
|
|
54
|
+
|
|
55
|
+
# Rotaing bases of encoded control's b mode and the target mode:
|
|
56
|
+
U_rotator_FS = create_BS_MPO(site1 = control_b_sites[0], site2 = control_b_sites[1], theta=np.pi/4, total_sites = psi.L, N = N, tag = r"$rotator$")
|
|
57
|
+
enforce_1d_like(U_rotator_FS, site_tags=psi.site_tags, inplace=True)
|
|
58
|
+
psi = tensor_network_apply_op_vec(U_rotator_FS, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
59
|
+
|
|
60
|
+
U_rotator_FS = create_BS_MPO(site1 = target_sites[0], site2 = target_sites[1], theta=np.pi/4, total_sites = psi.L, N = N, tag = r"$rotator$")
|
|
61
|
+
enforce_1d_like(U_rotator_FS, site_tags=psi.site_tags, inplace=True)
|
|
62
|
+
psi = tensor_network_apply_op_vec(U_rotator_FS, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
63
|
+
|
|
64
|
+
# Applying PBS in rotated basis (only the V modes are reflected and the H modes are transmitted. Hence, the V modes undergo pi/2 rotation and the H modes undergo no rotation):
|
|
65
|
+
|
|
66
|
+
# Implementation using SWAP operator. This does not generalize to higher truncations.
|
|
67
|
+
# SWAP = qt.qip.operations.swap().full()
|
|
68
|
+
# U_PBS_F = create_MPO(1, 3, psi.L, SWAP, N, r"$PBS$")
|
|
69
|
+
U_PBS_F = create_BS_MPO(site1 = target_sites[1], site2 = control_b_sites[1], theta=np.pi/2, total_sites = psi.L, N = N, tag = r"$PBS$")
|
|
70
|
+
enforce_1d_like(U_PBS_F, site_tags=psi.site_tags, inplace=True)
|
|
71
|
+
psi = tensor_network_apply_op_vec(U_PBS_F, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
72
|
+
|
|
73
|
+
# Undoing rotations:
|
|
74
|
+
U_inverse_rotator_FS = create_BS_MPO(site1 = target_sites[0], site2 = target_sites[1], theta=-np.pi/4, total_sites = psi.L, N = N, tag = r"$rotator$")
|
|
75
|
+
enforce_1d_like(U_inverse_rotator_FS, site_tags=psi.site_tags, inplace=True)
|
|
76
|
+
psi = tensor_network_apply_op_vec(U_inverse_rotator_FS, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
77
|
+
|
|
78
|
+
U_inverse_rotator_FS = create_BS_MPO(site1 = control_b_sites[0], site2 = control_b_sites[1], theta=-np.pi/4, total_sites = psi.L, N = N, tag = r"$rotator$")
|
|
79
|
+
enforce_1d_like(U_inverse_rotator_FS, site_tags=psi.site_tags, inplace=True)
|
|
80
|
+
psi = tensor_network_apply_op_vec(U_inverse_rotator_FS, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
81
|
+
|
|
82
|
+
# Measuring the b mode (name d after the PBS)
|
|
83
|
+
BSM_POVM_1_OPs = generate_sqrt_POVM_MPO(sites=[control_b_sites[0]], outcome = 1, total_sites=psi.L, efficiency=1, N=N, pnr = True)
|
|
84
|
+
BSM_POVM_1_OPs.extend(generate_sqrt_POVM_MPO(sites=[control_b_sites[1]], outcome = 0, total_sites=psi.L, efficiency=1, N=N, pnr = True))
|
|
85
|
+
|
|
86
|
+
for POVM_OP in BSM_POVM_1_OPs:
|
|
87
|
+
psi = tensor_network_apply_op_vec(POVM_OP, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
88
|
+
|
|
89
|
+
return psi
|
|
90
|
+
|
|
91
|
+
def CNOT(psi_control_modes, psi_target_modes, psi_control, psi_target, N, mean_photon_num, error_tolerance):
|
|
92
|
+
"""Pass psi_target as None if the same MPS has both the control and target modes.
|
|
93
|
+
Args:
|
|
94
|
+
psi_control_modes (list): List of control modes (H,V).
|
|
95
|
+
psi_target_modes (list): List of target modes (H,V).
|
|
96
|
+
psi_control (MPS): MPS for the control modes.
|
|
97
|
+
psi_target (MPS): MPS for the target modes, can be None if the same MPS as target is used.
|
|
98
|
+
N (int): Number of photons.
|
|
99
|
+
mean_photon_num (float): Mean photon number for the EPS used in implemeting the CNOT gate.
|
|
100
|
+
error_tolerance (float): Tolerance for numerical errors in tensor network operations.
|
|
101
|
+
Returns:
|
|
102
|
+
MPS: The resulting MPS after applying the CNOT operation.
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
psi_encoded_control = quantum_encoder(mean_photon_num, N, psi_control, psi_control_modes, error_tolerance)
|
|
106
|
+
|
|
107
|
+
# read_quantum_state(psi_encoded_control, N, num_states = 6)
|
|
108
|
+
|
|
109
|
+
if not psi_target == None:
|
|
110
|
+
psi = extend_MPS(psi_target, psi_encoded_control)
|
|
111
|
+
psi_control_b_modes = [psi_target.L, psi_target.L+1] # [site + psi_target.L for site in psi_control_modes]
|
|
112
|
+
|
|
113
|
+
else:
|
|
114
|
+
psi_control_b_modes = [0,1]
|
|
115
|
+
psi_target_modes = [4+site for site in psi_target_modes] # We add 4 since the additional modes from the EPS are pre-added to the MPS.
|
|
116
|
+
psi = psi_encoded_control
|
|
117
|
+
|
|
118
|
+
psi = destructive_CNOT(psi_control_b_modes, psi_target_modes, psi, N, error_tolerance)
|
|
119
|
+
|
|
120
|
+
# read_quantum_state(psi, N, num_states = 6)
|
|
121
|
+
|
|
122
|
+
norm = psi.normalize()
|
|
123
|
+
for _ in range(4):
|
|
124
|
+
psi.measure(0, remove = True, renorm = True, inplace = True)
|
|
125
|
+
psi[-1].modify(data=psi[-1].data * norm**0.5)
|
|
126
|
+
|
|
127
|
+
return psi
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def H(psi, sites, N, error_tolerance):
|
|
131
|
+
# TODO: This function does not work for N > 2.
|
|
132
|
+
# This definition is based on the paper: https://arxiv.org/pdf/quant-ph/9706022
|
|
133
|
+
H = generalized_mode_mixer(sites[0], sites[1], -np.pi/4, [0,-np.pi], [0,-np.pi], [0,0], psi.L, N)
|
|
134
|
+
# H = generalized_mode_mixer(0, 1, np.pi/4, 0, 0, 0, 2, N)
|
|
135
|
+
enforce_1d_like(H, site_tags=psi.site_tags, inplace=True)
|
|
136
|
+
psi = tensor_network_apply_op_vec(H, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
137
|
+
return psi
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from ..fock_optics.noise_models import *
|
|
2
|
+
from ..fock_optics.measurement import *
|
|
3
|
+
from ..fock_optics.utils import *
|
|
4
|
+
from ..fock_optics.light_sources import *
|
|
5
|
+
|
|
6
|
+
from ..trajectory import *
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
import numpy as np
|
|
10
|
+
import copy
|
|
11
|
+
|
|
12
|
+
def generate_swapping_circuit(N, num_modes, site_tags, bsm_det_effs, bsm_dark_counts_gain, bsm_measurements, channel_loss, error_tolerance):
|
|
13
|
+
quantum_channel_list = []
|
|
14
|
+
|
|
15
|
+
# Amplitude damping due to fibers
|
|
16
|
+
damping_kraus_ops = single_mode_bosonic_noise_channels(noise_parameter = channel_loss, N = N)
|
|
17
|
+
two_mode_kraus_ops = [sp.kron(op, op) for op in damping_kraus_ops]
|
|
18
|
+
quantum_channel_list.append(quantum_channel(N = N, num_modes = num_modes, formalism = "kraus", kraus_ops_tuple = ((2,3), two_mode_kraus_ops))) # The tuples in this list are defined as (sites, kraus_ops). The sites are the sites where the Kraus ops are applied.
|
|
19
|
+
quantum_channel_list.append(quantum_channel(N = N, num_modes = num_modes, formalism = "kraus", kraus_ops_tuple = ((6,7), two_mode_kraus_ops))) # The tuples in this list are defined as (sites, kraus_ops). The sites are the sites where the Kraus ops are applied.
|
|
20
|
+
|
|
21
|
+
# Quantum channel for the Bell state measurement
|
|
22
|
+
# BSM_MPOs = bell_state_measurement(None, N, site_tags, num_modes, bsm_det_effs, error_tolerance, measurements = bsm_measurements, pnr = False, use_trajectory = True, return_MPOs = True, compress=True, contract=True)
|
|
23
|
+
# BSM_quantum_channels = [quantum_channel(N = N, num_modes = num_modes, formalism = "closed", unitary_MPOs = BSM_MPO, name = "BSM") for BSM_MPO in BSM_MPOs]
|
|
24
|
+
BSM_quantum_channels = bell_state_measurement(None, N, site_tags, num_modes, bsm_det_effs, bsm_dark_counts_gain, error_tolerance, measurements = bsm_measurements, pnr = False, use_trajectory = True, return_MPOs = True, compress=True, contract=True)
|
|
25
|
+
quantum_channel_list.extend(BSM_quantum_channels)
|
|
26
|
+
|
|
27
|
+
return quantum_channel_list
|
|
28
|
+
|
|
29
|
+
def analyze_entanglement(quantum_channel_list, N, site_tags, num_modes, efficiency, error_tolerance, idler_angles, signal_angles):
|
|
30
|
+
PA_MPOs = rotate_and_measure(None, N, site_tags, num_modes, efficiency, error_tolerance, idler_angles, signal_angles, return_MPOs = True)
|
|
31
|
+
PA_quantum_channels = [quantum_channel(N = N, num_modes = num_modes, formalism = "closed", unitary_MPOs = PA_MPO) for PA_MPO in PA_MPOs]
|
|
32
|
+
print("num pa quantum channels:", len(PA_quantum_channels))
|
|
33
|
+
quantum_channel_list.extend(PA_quantum_channels)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def create_swapping_initial_state(num_modes, N, mean_photon_num, error_tolerance):
|
|
37
|
+
# Create Vacuum state:
|
|
38
|
+
vacuum = create_vacuum_state(num_modes=num_modes, N=N)
|
|
39
|
+
|
|
40
|
+
# Entangled state from EPS
|
|
41
|
+
psi, TMSV_state = light_source(vacuum, N, mean_photon_num, num_modes, error_tolerance, compress=True, contract=True)
|
|
42
|
+
|
|
43
|
+
psi = extend_MPS(psi)
|
|
44
|
+
return psi
|
|
45
|
+
|
|
46
|
+
def perform_swapping_simulation(N, num_modes, num_simulations, params, error_tolerance = 1e-10):
|
|
47
|
+
|
|
48
|
+
psi = create_swapping_initial_state(num_modes, N, params["chi"], error_tolerance)
|
|
49
|
+
|
|
50
|
+
quantum_channels = generate_swapping_circuit(N, num_modes, psi.site_tags, [params["BSM_det_loss_1"], params["BSM_det_loss_2"]], [params["BSM_dark_counts_1"], params["BSM_dark_counts_2"]], params["BSM_meas"], params["channel_loss"], error_tolerance)
|
|
51
|
+
|
|
52
|
+
if params["if_analyze_entanglement"]:
|
|
53
|
+
analyze_entanglement(quantum_channels, N, psi.site_tags, num_modes, params["PA_det_loss"], error_tolerance, params["alpha_list"], params["delta_list"])
|
|
54
|
+
|
|
55
|
+
t_eval = trajectory_evaluator(quantum_channels)
|
|
56
|
+
|
|
57
|
+
fidelities = []
|
|
58
|
+
probabilities = []
|
|
59
|
+
|
|
60
|
+
for i in range(num_simulations):
|
|
61
|
+
start = time.time()
|
|
62
|
+
psi_iter = copy.deepcopy(t_eval.perform_simulation(psi, error_tolerance, normalize = False))
|
|
63
|
+
|
|
64
|
+
probabilities.append(psi_iter.normalize())
|
|
65
|
+
|
|
66
|
+
if params["calc_fidelity"]:
|
|
67
|
+
fidelity = np.abs(calc_fidelity_swapping(psi_iter, "psi_minus", N, error_tolerance))
|
|
68
|
+
fidelities.append(fidelity)
|
|
69
|
+
|
|
70
|
+
time_taken = time.time() - start
|
|
71
|
+
# print("time taken for simulation", i, ":", time_taken)
|
|
72
|
+
|
|
73
|
+
print("completed set", "cache_hits:", t_eval.cache_hit, "cache_partial_hits:", t_eval.cache_partial_hit, "cache_misses:", t_eval.cache_miss, "time taken:", time_taken)
|
|
74
|
+
|
|
75
|
+
return fidelities, probabilities, t_eval
|
|
76
|
+
|
trajectree/trajectory.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from quimb.tensor import MatrixProductOperator as mpo #type: ignore
|
|
3
|
+
from quimb.tensor.tensor_arbgeom import tensor_network_apply_op_vec #type: ignore
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class quantum_channel:
|
|
8
|
+
def __init__(self, N, num_modes, formalism, kraus_ops_tuple = None, unitary_MPOs = None, name = "quantum_channel"):
|
|
9
|
+
self.N = N
|
|
10
|
+
self.name = name
|
|
11
|
+
self.num_modes = num_modes
|
|
12
|
+
self.formalism = formalism
|
|
13
|
+
if self.formalism == 'kraus':
|
|
14
|
+
# Calculate the MPOs of the Kraus operators
|
|
15
|
+
self.kraus_MPOs = quantum_channel.find_quantum_channels_MPOs(kraus_ops_tuple, N, num_modes)
|
|
16
|
+
elif self.formalism == 'closed':
|
|
17
|
+
self.unitary_MPOs = unitary_MPOs
|
|
18
|
+
|
|
19
|
+
def get_MPOs(self):
|
|
20
|
+
if self.formalism == 'closed':
|
|
21
|
+
return self.unitary_MPOs
|
|
22
|
+
elif self.formalism == 'kraus':
|
|
23
|
+
return self.kraus_MPOs
|
|
24
|
+
|
|
25
|
+
@staticmethod
|
|
26
|
+
def find_quantum_channels_MPOs(ops_tuple, N, num_modes):
|
|
27
|
+
(sites, ops) = ops_tuple
|
|
28
|
+
quantum_channels = quantum_channel.calc_mpos(ops, N, sites, num_modes)
|
|
29
|
+
return quantum_channels
|
|
30
|
+
|
|
31
|
+
# Just a function which calcualte the MPOs of the Kraus ops
|
|
32
|
+
@staticmethod
|
|
33
|
+
def calc_mpos(ops, N, sites, num_modes):
|
|
34
|
+
MPOs = []
|
|
35
|
+
for op in ops:
|
|
36
|
+
MPO = mpo.from_dense(op.todense(), dims = N, sites = sites, L=num_modes, tags="op")
|
|
37
|
+
MPOs.append(MPO)
|
|
38
|
+
return MPOs
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class trajectree_node:
|
|
42
|
+
def __init__(self, probs, trajectories, trajectory_indices):
|
|
43
|
+
self.probs = probs
|
|
44
|
+
self.trajectories = trajectories
|
|
45
|
+
self.trajectory_indices = trajectory_indices
|
|
46
|
+
|
|
47
|
+
class trajectory_evaluator():
|
|
48
|
+
def __init__(self, quantum_channels, cache_size = 2):
|
|
49
|
+
self.quantum_channels = quantum_channels
|
|
50
|
+
self.kraus_channels = []
|
|
51
|
+
for quantum_channel in self.quantum_channels:
|
|
52
|
+
if quantum_channel.formalism == 'kraus':
|
|
53
|
+
self.kraus_channels.append(quantum_channel)
|
|
54
|
+
|
|
55
|
+
self.trajectree = [{} for i in range(len(self.kraus_channels)+1)] # +1 because you also cache the end of the simulation so you prevent doing the final unitary operations multiple times.
|
|
56
|
+
self.traversed_nodes = ()
|
|
57
|
+
self.cache_size = cache_size
|
|
58
|
+
|
|
59
|
+
# for debugging only:
|
|
60
|
+
self.cache_hit = 0
|
|
61
|
+
self.cache_miss = 0
|
|
62
|
+
self.cache_partial_hit = 0
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def apply_kraus(self, psi, kraus_MPOs, error_tolerance, normalize = True):
|
|
66
|
+
trajectory_probs = np.array([])
|
|
67
|
+
trajectories = np.array([])
|
|
68
|
+
for kraus_MPO in kraus_MPOs:
|
|
69
|
+
|
|
70
|
+
trajectory = tensor_network_apply_op_vec(kraus_MPO, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
71
|
+
trajectory_prob = np.real(trajectory.H @ trajectory)
|
|
72
|
+
|
|
73
|
+
if trajectory_prob < 1e-25: # Using 1e-25 arbitrarily. Trajectories with probability less than this are pruned.
|
|
74
|
+
continue
|
|
75
|
+
|
|
76
|
+
if normalize:
|
|
77
|
+
trajectory.normalize()
|
|
78
|
+
trajectory_probs = np.append(trajectory_probs, trajectory_prob)
|
|
79
|
+
trajectories = np.append(trajectories, trajectory)
|
|
80
|
+
|
|
81
|
+
return trajectories, trajectory_probs
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def cache_trajectree_node(self, trajectory_probs, trajectories):
|
|
85
|
+
sorted_indices = np.argsort(trajectory_probs)
|
|
86
|
+
|
|
87
|
+
print("trajectory_probs", trajectory_probs)
|
|
88
|
+
|
|
89
|
+
cached_trajectory_indices = sorted_indices[-self.cache_size:]
|
|
90
|
+
cached_trajectories = np.array(trajectories)[cached_trajectory_indices]
|
|
91
|
+
|
|
92
|
+
new_node = trajectree_node(trajectory_probs, cached_trajectories, cached_trajectory_indices)
|
|
93
|
+
self.trajectree[len(self.traversed_nodes)][self.traversed_nodes] = new_node
|
|
94
|
+
|
|
95
|
+
self.last_cached_node = new_node
|
|
96
|
+
|
|
97
|
+
return cached_trajectory_indices
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def discover_trajectree_node(self, psi, kraus_MPOs, error_tolerance, normalize = True, selected_trajectory_index = None):
|
|
101
|
+
|
|
102
|
+
trajectories, trajectory_probs = self.apply_kraus(psi, kraus_MPOs, error_tolerance, normalize)
|
|
103
|
+
|
|
104
|
+
cached_trajectory_indices = self.cache_trajectree_node(trajectory_probs, trajectories) # cached_trajectory_indices is returned only for debugging.
|
|
105
|
+
|
|
106
|
+
if selected_trajectory_index == None:
|
|
107
|
+
selected_trajectory_index = np.random.choice(a = len(trajectory_probs), p = trajectory_probs/sum(trajectory_probs))
|
|
108
|
+
|
|
109
|
+
self.traversed_nodes = self.traversed_nodes + (selected_trajectory_index,)
|
|
110
|
+
|
|
111
|
+
return trajectories[selected_trajectory_index]
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def query_trajectree(self, psi, kraus_MPOs, error_tolerance, cache = True, selected_trajectory_index = None, normalize = True):
|
|
115
|
+
self.skip_unitary = False
|
|
116
|
+
self.cache_unitary = False
|
|
117
|
+
|
|
118
|
+
if cache == False:
|
|
119
|
+
psi = tensor_network_apply_op_vec(self.kraus_channels[len(self.traversed_nodes)].get_MPOs()[selected_trajectory_index], psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
120
|
+
self.traversed_nodes = self.traversed_nodes + (selected_trajectory_index,)
|
|
121
|
+
return psi
|
|
122
|
+
|
|
123
|
+
if self.traversed_nodes in self.trajectree[len(self.traversed_nodes)]: # Check if the dictionary at level where the traversal is now, i.e., len(self.traversed_nodes)
|
|
124
|
+
# has the path that the present traversal has taken.
|
|
125
|
+
node = self.trajectree[len(self.traversed_nodes)][self.traversed_nodes] # If found, index that node into the node object to call the probabilities and trajectories cached inside it.
|
|
126
|
+
if selected_trajectory_index == None:
|
|
127
|
+
selected_trajectory_index = np.random.choice(a = len(node.probs), p = node.probs/sum(node.probs)) # The cached nodes have all the probabilities, but not all the trajectories cache. So, we can select
|
|
128
|
+
# what trajecory our traversal takes and later see if the actual trajectory has been cached or needs to be retrieved.
|
|
129
|
+
self.cache_unitary = False # If the node has been found, we do not cache the unitary. The unitary is either already cached or we don't need to cache it at all.
|
|
130
|
+
|
|
131
|
+
if selected_trajectory_index in node.trajectory_indices: # See if the selected trajectory's MPS has been cached or not.
|
|
132
|
+
self.skip_unitary = True # If we're skipping the unitary entirely, it just does not matter whether we cache the unitary or not.
|
|
133
|
+
self.cache_hit += 1
|
|
134
|
+
psi = node.trajectories[np.where(node.trajectory_indices == selected_trajectory_index)[0][0]]
|
|
135
|
+
else:
|
|
136
|
+
self.skip_unitary = False # If the trajectory has not been cached, we will have to apply the unitary to it.
|
|
137
|
+
self.cache_partial_hit += 1
|
|
138
|
+
psi = tensor_network_apply_op_vec(self.kraus_channels[len(self.traversed_nodes)].get_MPOs()[selected_trajectory_index], psi, compress=True, contract = True, cutoff = error_tolerance) # If not, simply calculate that trajectory.
|
|
139
|
+
# You don't need to cache it since we have already cached what we had to.
|
|
140
|
+
if normalize:
|
|
141
|
+
psi.normalize()
|
|
142
|
+
self.traversed_nodes = self.traversed_nodes + (selected_trajectory_index,)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
else: # If the node has not been discovered, we'll have to find all probabilities and cache the results.
|
|
146
|
+
self.skip_unitary = False
|
|
147
|
+
self.cache_unitary = True
|
|
148
|
+
self.cache_miss += 1
|
|
149
|
+
psi = self.discover_trajectree_node(psi, kraus_MPOs, error_tolerance, normalize, selected_trajectory_index = selected_trajectory_index)
|
|
150
|
+
|
|
151
|
+
return psi
|
|
152
|
+
|
|
153
|
+
def apply_unitary_MPOs(self, psi, unitary_MPOs, error_tolerance):
|
|
154
|
+
return tensor_network_apply_op_vec(unitary_MPOs, psi, compress=True, contract = True, cutoff = error_tolerance)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def calculate_density_matrix(self, psi, error_tolerance):
|
|
158
|
+
dm = 0
|
|
159
|
+
trajectree_indices_list = [[]]
|
|
160
|
+
for quantum_channel in self.quantum_channels:
|
|
161
|
+
if quantum_channel.formalism == 'kraus':
|
|
162
|
+
trajectree_indices_list = [[*i, j] for i in trajectree_indices_list for j in range(len(quantum_channel.get_MPOs()))]
|
|
163
|
+
for trajectree_indices in trajectree_indices_list:
|
|
164
|
+
psi_new_dense = self.perform_simulation(psi, error_tolerance, cache = True, trajectree_indices = trajectree_indices, normalize = False).to_dense()
|
|
165
|
+
dm += psi_new_dense @ psi_new_dense.conj().T
|
|
166
|
+
return dm
|
|
167
|
+
|
|
168
|
+
def update_cached_node(self, unitary_MPOs, last_cached_node, error_tolerance):
|
|
169
|
+
for kraus_idx in range(len(last_cached_node.trajectories)):
|
|
170
|
+
last_cached_node.trajectories[kraus_idx] = self.apply_unitary_MPOs(last_cached_node.trajectories[kraus_idx], unitary_MPOs, error_tolerance)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def perform_simulation(self, psi, error_tolerance, cache = True, trajectree_indices = None, normalize = True):
|
|
175
|
+
self.traversed_nodes = ()
|
|
176
|
+
self.skip_unitary = False
|
|
177
|
+
self.cache_unitary = False
|
|
178
|
+
for quantum_channel in self.quantum_channels:
|
|
179
|
+
if quantum_channel.formalism == 'kraus':
|
|
180
|
+
kraus_MPOs = quantum_channel.get_MPOs()
|
|
181
|
+
if not trajectree_indices == None: # If the list of trajectoery indices is provided, we will use that to traverse the trajectree. The random number generators will not be used.
|
|
182
|
+
psi = self.query_trajectree(psi, kraus_MPOs, error_tolerance, cache, trajectree_indices.pop(0), normalize)
|
|
183
|
+
else: # In this branch, you actually select the trajectory redomly and perform realistic simulations.
|
|
184
|
+
psi = self.query_trajectree(psi, kraus_MPOs, error_tolerance, cache = cache, normalize = normalize)
|
|
185
|
+
|
|
186
|
+
elif quantum_channel.formalism == 'closed' and not self.skip_unitary:
|
|
187
|
+
unitary_MPOs = quantum_channel.get_MPOs()
|
|
188
|
+
|
|
189
|
+
if not cache: # If we aren't aching the trajectories at all, simply apply the unitary MPOs to the state.
|
|
190
|
+
psi = self.apply_unitary_MPOs(psi, unitary_MPOs, error_tolerance)
|
|
191
|
+
continue
|
|
192
|
+
|
|
193
|
+
last_cached_node = self.trajectree[len(self.traversed_nodes)-1][self.traversed_nodes[:-1]]
|
|
194
|
+
|
|
195
|
+
if self.cache_unitary:
|
|
196
|
+
self.update_cached_node(unitary_MPOs, last_cached_node, error_tolerance)
|
|
197
|
+
|
|
198
|
+
# This is where we are checking if the psi is cached or not. If it is, simply use the last cached node
|
|
199
|
+
# node to update psi. If not, apply the unitary MPOs to psi.
|
|
200
|
+
traj_idx = np.where(last_cached_node.trajectory_indices == self.traversed_nodes[-1])
|
|
201
|
+
if traj_idx[0].size > 0:
|
|
202
|
+
psi = last_cached_node.trajectories[traj_idx[0][0]]
|
|
203
|
+
else:
|
|
204
|
+
psi = self.apply_unitary_MPOs(psi, unitary_MPOs, error_tolerance)
|
|
205
|
+
|
|
206
|
+
else:
|
|
207
|
+
# print("unitary skipped:", self.traversed_nodes)
|
|
208
|
+
pass
|
|
209
|
+
|
|
210
|
+
return psi
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: Trajectree
|
|
3
|
+
Version: 0.0.0
|
|
4
|
+
Summary: Trajectree is a quantum trajectory theory and tensor network based quantum optics simulator.
|
|
5
|
+
Author-email: Ansh Singal <asingal@u.northwestern.edu>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: Operating System :: OS Independent
|
|
9
|
+
Requires-Python: >=3.9
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Requires-Dist: quimb
|
|
13
|
+
Requires-Dist: numpy
|
|
14
|
+
Requires-Dist: scipy
|
|
15
|
+
Requires-Dist: matplotlib
|
|
16
|
+
Dynamic: license-file
|
|
17
|
+
|
|
18
|
+
Trajectree is a quantum trajectory theory and tensor network based quantum optics simulator.
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
trajectree/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
trajectree/optical_quant_info.py,sha256=27LWg0jIWT2ZXWVK8HEd-2gqAJl0GzmsyYi7sxyYzvA,7589
|
|
3
|
+
trajectree/trajectory.py,sha256=yqY68605Qxf49LOvK4IRv9w1lvX_5kLfnFHAK0GMUmE,11262
|
|
4
|
+
trajectree/experimental/sparse.py,sha256=05aFsMcBSCUoBop1LJG_lB5kTULRSYhZhGT9RD-UH4Q,4851
|
|
5
|
+
trajectree/fock_optics/devices.py,sha256=1gwl4AIktqu3obXmNhR1EoKR4iWulP4LfWD4xenKfx8,2130
|
|
6
|
+
trajectree/fock_optics/light_sources.py,sha256=qzBevi1Uqcxb6KuYMuVQ9AkJbHCCigBf21N6s01x-V0,4823
|
|
7
|
+
trajectree/fock_optics/measurement.py,sha256=avTx8LD5OZFg1JCFkEJRZmDFAzlaAMN7XPxU0WqVn-s,13099
|
|
8
|
+
trajectree/fock_optics/noise_models.py,sha256=nyU_jNqjJkkcafq7vIXI8IRDiwthiOr4ZQqf55knZMM,1895
|
|
9
|
+
trajectree/fock_optics/outputs.py,sha256=Dg19FvodfKWDTB7B6IZ_EYUtC3vgsjlBugyrF4_tBvc,2270
|
|
10
|
+
trajectree/fock_optics/utils.py,sha256=SqMTDHe7QcGD7PeSb5Pj5MlxlSiXbjgSsGwQUArQfxk,7098
|
|
11
|
+
trajectree/sequence/swap.py,sha256=g7yWJg6Ow9D07GIHOxiINL6ao3jaAniGAhnW1AR_2ps,4288
|
|
12
|
+
trajectree-0.0.0.dist-info/licenses/LICENSE,sha256=7EI8xVBu6h_7_JlVw-yPhhOZlpY9hP8wal7kHtqKT_E,1074
|
|
13
|
+
trajectree-0.0.0.dist-info/METADATA,sha256=NA9LiSQFm_64Ixwemj7k1gtOjt23DivWEed92Rq5vZQ,621
|
|
14
|
+
trajectree-0.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
15
|
+
trajectree-0.0.0.dist-info/top_level.txt,sha256=6x9i8aAZcVn5tZ9J-2IVQMTGdn4bw6DiSd8pR3v4VR8,11
|
|
16
|
+
trajectree-0.0.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
Copyright (c) 2018 The Python Packaging Authority
|
|
2
|
+
|
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
4
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
5
|
+
in the Software without restriction, including without limitation the rights
|
|
6
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
7
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
8
|
+
furnished to do so, subject to the following conditions:
|
|
9
|
+
|
|
10
|
+
The above copyright notice and this permission notice shall be included in all
|
|
11
|
+
copies or substantial portions of the Software.
|
|
12
|
+
|
|
13
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
14
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
15
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
16
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
17
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
18
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
19
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
trajectree
|