pyqrackising 9.3.11__tar.gz → 9.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyqrackising-9.3.11/pyqrackising.egg-info → pyqrackising-9.4.0}/PKG-INFO +1 -1
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyproject.toml +1 -1
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/otoc.py +90 -81
- {pyqrackising-9.3.11 → pyqrackising-9.4.0/pyqrackising.egg-info}/PKG-INFO +1 -1
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/setup.py +1 -1
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/LICENSE.md +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/MANIFEST.in +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/README.md +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/__init__.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/convert_tensor_network_to_tsp.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/generate_tfim_samples.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/kernels.cl +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim_sparse.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim_streaming.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim_util.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/spin_glass_solver.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/spin_glass_solver_sparse.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/spin_glass_solver_streaming.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/tfim_magnetization.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/tfim_square_magnetization.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/tsp.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/tsp_maxcut.py +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising.egg-info/SOURCES.txt +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising.egg-info/dependency_links.txt +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising.egg-info/not-zip-safe +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising.egg-info/top_level.txt +0 -0
- {pyqrackising-9.3.11 → pyqrackising-9.4.0}/setup.cfg +0 -0
|
@@ -10,7 +10,7 @@ build-backend = "setuptools.build_meta"
|
|
|
10
10
|
|
|
11
11
|
[project]
|
|
12
12
|
name = "pyqrackising"
|
|
13
|
-
version = "9.
|
|
13
|
+
version = "9.4.0"
|
|
14
14
|
requires-python = ">=3.8"
|
|
15
15
|
description = "Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)"
|
|
16
16
|
readme = {file = "README.txt", content-type = "text/markdown"}
|
|
@@ -32,32 +32,23 @@ def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.174532925199432957
|
|
|
32
32
|
|
|
33
33
|
diff_theta *= cycles
|
|
34
34
|
diff_phi *= cycles
|
|
35
|
-
# diff_lam = diff_phi
|
|
36
35
|
|
|
37
36
|
diff_z = np.zeros(n_bias, dtype=np.float64)
|
|
38
|
-
diff_x = np.zeros(n_bias, dtype=np.float64)
|
|
39
|
-
diff_y = np.zeros(n_bias, dtype=np.float64)
|
|
40
37
|
for b in pauli_string:
|
|
41
38
|
match b:
|
|
42
39
|
case 'X':
|
|
43
40
|
diff_z += diff_theta
|
|
44
|
-
diff_y += diff_phi
|
|
45
41
|
case 'Z':
|
|
46
|
-
|
|
47
|
-
diff_y += diff_theta
|
|
42
|
+
diff_z += diff_phi
|
|
48
43
|
case 'Y':
|
|
49
|
-
diff_z += diff_theta
|
|
50
|
-
|
|
44
|
+
diff_z += diff_theta + diff_phi
|
|
45
|
+
case _:
|
|
46
|
+
pass
|
|
51
47
|
|
|
52
48
|
diff_z[0] += n_qubits
|
|
53
|
-
diff_x[0] += n_qubits
|
|
54
|
-
diff_y[0] += n_qubits
|
|
55
|
-
|
|
56
49
|
diff_z /= diff_z.sum()
|
|
57
|
-
diff_x /= diff_x.sum()
|
|
58
|
-
diff_y /= diff_y.sum()
|
|
59
50
|
|
|
60
|
-
return
|
|
51
|
+
return diff_z
|
|
61
52
|
|
|
62
53
|
|
|
63
54
|
@njit
|
|
@@ -73,6 +64,30 @@ def fix_cdf(hamming_prob):
|
|
|
73
64
|
return cum_prob
|
|
74
65
|
|
|
75
66
|
|
|
67
|
+
@njit
|
|
68
|
+
def factor_width(width):
|
|
69
|
+
col_len = int(np.floor(np.sqrt(width)))
|
|
70
|
+
while ((width // col_len) * col_len) != width:
|
|
71
|
+
col_len -= 1
|
|
72
|
+
row_len = width // col_len
|
|
73
|
+
|
|
74
|
+
return row_len, col_len
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# Provided by Google search AI
|
|
78
|
+
def find_all_str_occurrences(main_string, sub_string):
|
|
79
|
+
indices = []
|
|
80
|
+
start_index = 0
|
|
81
|
+
while True:
|
|
82
|
+
index = main_string.find(sub_string, start_index)
|
|
83
|
+
if index == -1:
|
|
84
|
+
break
|
|
85
|
+
indices.append(index)
|
|
86
|
+
start_index = index + 1 # Start searching after the found occurrence
|
|
87
|
+
|
|
88
|
+
return indices
|
|
89
|
+
|
|
90
|
+
|
|
76
91
|
def take_sample(n_qubits, sample, m, inv_dist):
|
|
77
92
|
indices = [i for i in range(n_qubits)]
|
|
78
93
|
tot_inv_dist = 0.0
|
|
@@ -81,11 +96,14 @@ def take_sample(n_qubits, sample, m, inv_dist):
|
|
|
81
96
|
selected = []
|
|
82
97
|
for i in range(m):
|
|
83
98
|
r = tot_inv_dist * np.random.random()
|
|
84
|
-
p = 0
|
|
99
|
+
p = inv_dist[indices[0]]
|
|
85
100
|
idx = 0
|
|
86
101
|
while p < r:
|
|
87
|
-
p += inv_dist[indices[idx]]
|
|
88
102
|
idx += 1
|
|
103
|
+
if idx >= len(indices):
|
|
104
|
+
idx = len(indices) - 1
|
|
105
|
+
break
|
|
106
|
+
p += inv_dist[indices[idx]]
|
|
89
107
|
i = indices[idx]
|
|
90
108
|
del indices[idx]
|
|
91
109
|
selected.append(i)
|
|
@@ -96,92 +114,83 @@ def take_sample(n_qubits, sample, m, inv_dist):
|
|
|
96
114
|
return sample
|
|
97
115
|
|
|
98
116
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
start_index = 0
|
|
113
|
-
while True:
|
|
114
|
-
index = main_string.find(sub_string, start_index)
|
|
115
|
-
if index == -1:
|
|
116
|
-
break
|
|
117
|
-
indices.append(index)
|
|
118
|
-
start_index = index + 1 # Start searching after the found occurrence
|
|
117
|
+
def get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
|
|
118
|
+
inv_dist = np.zeros(n_qubits, dtype=np.float64)
|
|
119
|
+
for idx in butterfly_idx_x:
|
|
120
|
+
b_row, b_col = divmod(idx, row_len)
|
|
121
|
+
for q in range(n_qubits):
|
|
122
|
+
q_row, q_col = divmod(q, row_len)
|
|
123
|
+
inv_dist[q] -= abs(q_row - b_row) + abs(q_col - b_col)
|
|
124
|
+
for idx in butterfly_idx_z:
|
|
125
|
+
b_row, b_col = divmod(idx, row_len)
|
|
126
|
+
for q in range(n_qubits):
|
|
127
|
+
q_row, q_col = divmod(q, row_len)
|
|
128
|
+
inv_dist[q] += abs(q_row - b_row) + abs(q_col - b_col)
|
|
129
|
+
inv_dist += 1.0 - inv_dist.min()
|
|
119
130
|
|
|
120
|
-
return
|
|
131
|
+
return inv_dist
|
|
121
132
|
|
|
122
133
|
|
|
123
|
-
def get_inv_dist(
|
|
134
|
+
def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
|
|
124
135
|
inv_dist = np.zeros(n_qubits, dtype=np.float64)
|
|
125
|
-
|
|
136
|
+
half_row = row_len >> 1
|
|
137
|
+
half_col = col_len >> 1
|
|
138
|
+
for idx in butterfly_idx_x:
|
|
139
|
+
b_row, b_col = divmod(idx, row_len)
|
|
126
140
|
for q in range(n_qubits):
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
141
|
+
q_row, q_col = divmod(q, row_len)
|
|
142
|
+
row_d = abs(q_row - b_row)
|
|
143
|
+
if row_d > half_row:
|
|
144
|
+
row_d = row_len - row_d
|
|
145
|
+
col_d = abs(q_col - b_col)
|
|
146
|
+
if col_d > half_col:
|
|
147
|
+
col_d = col_len - col_d
|
|
148
|
+
inv_dist[q] -= row_d + col_d
|
|
149
|
+
for idx in butterfly_idx_z:
|
|
150
|
+
b_row, b_col = divmod(idx, row_len)
|
|
151
|
+
for q in range(n_qubits):
|
|
152
|
+
q_row, q_col = divmod(q, row_len)
|
|
153
|
+
row_d = abs(q_row - b_row)
|
|
154
|
+
if row_d > half_row:
|
|
155
|
+
row_d = row_len - row_d
|
|
156
|
+
col_d = abs(q_col - b_col)
|
|
157
|
+
if col_d > half_col:
|
|
158
|
+
col_d = col_len - col_d
|
|
159
|
+
inv_dist[q] += row_d + col_d
|
|
160
|
+
inv_dist += 1.0 - inv_dist.min()
|
|
133
161
|
|
|
134
162
|
return inv_dist
|
|
135
163
|
|
|
136
164
|
|
|
137
|
-
def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55, shots=100,
|
|
165
|
+
def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55, shots=100, is_orbifold=True):
|
|
138
166
|
pauli_string = list(pauli_string)
|
|
139
167
|
if len(pauli_string) != n_qubits:
|
|
140
168
|
raise ValueError("OTOC pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
|
|
141
169
|
|
|
142
|
-
|
|
143
|
-
if len(measurement_basis) != n_qubits:
|
|
144
|
-
raise ValueError("OTOC measurement_basis must be same length as n_qubits! (Use 'I' for excluded qubits.)")
|
|
145
|
-
|
|
146
|
-
thresholds = { key: fix_cdf(value) for key, value in get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, cycles, pauli_string).items() }
|
|
170
|
+
thresholds = fix_cdf(get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, cycles, pauli_string))
|
|
147
171
|
|
|
148
172
|
row_len, col_len = factor_width(n_qubits)
|
|
149
173
|
p_string = "".join(pauli_string)
|
|
150
174
|
butterfly_idx_x = find_all_str_occurrences(p_string, 'X')
|
|
151
|
-
butterfly_idx_y = find_all_str_occurrences(p_string, 'Y')
|
|
152
175
|
butterfly_idx_z = find_all_str_occurrences(p_string, 'Z')
|
|
153
176
|
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
inv_dist = { 'X': inv_dist_x, 'Y': inv_dist_y, 'Z': inv_dist_z }
|
|
177
|
+
if is_orbifold:
|
|
178
|
+
inv_dist = get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
|
|
179
|
+
else:
|
|
180
|
+
inv_dist = get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
|
|
159
181
|
|
|
160
182
|
samples = []
|
|
161
183
|
for _ in range(shots):
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
sample_3_axis[key] = take_sample(n_qubits, sample_3_axis[key], m, inv_dist[key])
|
|
174
|
-
|
|
175
|
-
sample = 0
|
|
176
|
-
j = 0
|
|
177
|
-
for i in range(n_qubits):
|
|
178
|
-
base = measurement_basis[i]
|
|
179
|
-
if base not in ['X', 'Y', 'Z']:
|
|
180
|
-
continue
|
|
181
|
-
if (sample_3_axis[base] >> i) & 1:
|
|
182
|
-
sample |= 1 << j
|
|
183
|
-
j += 1
|
|
184
|
-
|
|
185
|
-
samples.append(sample)
|
|
184
|
+
# First dimension: Hamming weight
|
|
185
|
+
m = sample_mag(thresholds)
|
|
186
|
+
if m == 0:
|
|
187
|
+
samples.append(0)
|
|
188
|
+
continue
|
|
189
|
+
if m >= n_qubits:
|
|
190
|
+
samples.append((1 << n_qubits) - 1)
|
|
191
|
+
continue
|
|
192
|
+
|
|
193
|
+
# Second dimension: permutation within Hamming weight
|
|
194
|
+
samples.append(take_sample(n_qubits, 0, m, inv_dist))
|
|
186
195
|
|
|
187
196
|
return samples
|
|
@@ -7,7 +7,7 @@ with open(README_PATH) as readme_file:
|
|
|
7
7
|
|
|
8
8
|
setup(
|
|
9
9
|
name='pyqrackising',
|
|
10
|
-
version='9.
|
|
10
|
+
version='9.4.0',
|
|
11
11
|
author='Dan Strano',
|
|
12
12
|
author_email='stranoj@gmail.com',
|
|
13
13
|
description='Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)',
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|