pyqrackising 9.3.10__tar.gz → 9.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. {pyqrackising-9.3.10/pyqrackising.egg-info → pyqrackising-9.4.0}/PKG-INFO +1 -1
  2. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyproject.toml +1 -1
  3. pyqrackising-9.4.0/pyqrackising/otoc.py +196 -0
  4. {pyqrackising-9.3.10 → pyqrackising-9.4.0/pyqrackising.egg-info}/PKG-INFO +1 -1
  5. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/setup.py +1 -1
  6. pyqrackising-9.3.10/pyqrackising/otoc.py +0 -238
  7. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/LICENSE.md +0 -0
  8. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/MANIFEST.in +0 -0
  9. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/README.md +0 -0
  10. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/__init__.py +0 -0
  11. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/convert_tensor_network_to_tsp.py +0 -0
  12. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/generate_tfim_samples.py +0 -0
  13. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/kernels.cl +0 -0
  14. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim.py +0 -0
  15. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim_sparse.py +0 -0
  16. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim_streaming.py +0 -0
  17. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim_util.py +0 -0
  18. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/spin_glass_solver.py +0 -0
  19. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/spin_glass_solver_sparse.py +0 -0
  20. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/spin_glass_solver_streaming.py +0 -0
  21. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/tfim_magnetization.py +0 -0
  22. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/tfim_square_magnetization.py +0 -0
  23. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/tsp.py +0 -0
  24. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising/tsp_maxcut.py +0 -0
  25. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising.egg-info/SOURCES.txt +0 -0
  26. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising.egg-info/dependency_links.txt +0 -0
  27. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising.egg-info/not-zip-safe +0 -0
  28. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/pyqrackising.egg-info/top_level.txt +0 -0
  29. {pyqrackising-9.3.10 → pyqrackising-9.4.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyqrackising
3
- Version: 9.3.10
3
+ Version: 9.4.0
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -10,7 +10,7 @@ build-backend = "setuptools.build_meta"
10
10
 
11
11
  [project]
12
12
  name = "pyqrackising"
13
- version = "9.3.10"
13
+ version = "9.4.0"
14
14
  requires-python = ">=3.8"
15
15
  description = "Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)"
16
16
  readme = {file = "README.txt", content-type = "text/markdown"}
@@ -0,0 +1,196 @@
1
+ from .maxcut_tfim_util import probability_by_hamming_weight, sample_mag, opencl_context
2
+ from numba import njit
3
+ import numpy as np
4
+ import sys
5
+
6
+
7
+ epsilon = opencl_context.epsilon
8
+
9
+
10
+ def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55):
11
+ pauli_string = list(pauli_string)
12
+ if len(pauli_string) != n_qubits:
13
+ raise ValueError("OTOCS pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
14
+
15
+ n_bias = n_qubits + 1
16
+ if h <= epsilon:
17
+ bias = np.empty(n_bias, dtype=np.float64)
18
+ bias[0] = 1.0
19
+ return { 'X': bias, 'Y': bias, 'Z': bias }
20
+
21
+ fwd = probability_by_hamming_weight(J, h, z, theta, t, n_qubits + 1)
22
+ rev = probability_by_hamming_weight(-J, -h, z, theta + np.pi, t, n_qubits + 1)
23
+ diff_theta = rev - fwd
24
+
25
+ phi = theta + np.pi / 2
26
+ fwd = probability_by_hamming_weight(-h, -J, z, phi, t, n_qubits + 1)
27
+ rev = probability_by_hamming_weight(h, J, z, phi + np.pi, t, n_qubits + 1)
28
+ diff_phi = rev - fwd
29
+
30
+ # Lambda (Y-axis) is at a right angle to both J and h,
31
+ # so there is no difference in this dimension.
32
+
33
+ diff_theta *= cycles
34
+ diff_phi *= cycles
35
+
36
+ diff_z = np.zeros(n_bias, dtype=np.float64)
37
+ for b in pauli_string:
38
+ match b:
39
+ case 'X':
40
+ diff_z += diff_theta
41
+ case 'Z':
42
+ diff_z += diff_phi
43
+ case 'Y':
44
+ diff_z += diff_theta + diff_phi
45
+ case _:
46
+ pass
47
+
48
+ diff_z[0] += n_qubits
49
+ diff_z /= diff_z.sum()
50
+
51
+ return diff_z
52
+
53
+
54
+ @njit
55
+ def fix_cdf(hamming_prob):
56
+ tot_prob = 0.0
57
+ n_bias = len(hamming_prob)
58
+ cum_prob = np.empty(n_bias, dtype=np.float64)
59
+ for i in range(n_bias):
60
+ tot_prob += hamming_prob[i]
61
+ cum_prob[i] = tot_prob
62
+ cum_prob[-1] = 2.0
63
+
64
+ return cum_prob
65
+
66
+
67
+ @njit
68
+ def factor_width(width):
69
+ col_len = int(np.floor(np.sqrt(width)))
70
+ while ((width // col_len) * col_len) != width:
71
+ col_len -= 1
72
+ row_len = width // col_len
73
+
74
+ return row_len, col_len
75
+
76
+
77
+ # Provided by Google search AI
78
+ def find_all_str_occurrences(main_string, sub_string):
79
+ indices = []
80
+ start_index = 0
81
+ while True:
82
+ index = main_string.find(sub_string, start_index)
83
+ if index == -1:
84
+ break
85
+ indices.append(index)
86
+ start_index = index + 1 # Start searching after the found occurrence
87
+
88
+ return indices
89
+
90
+
91
+ def take_sample(n_qubits, sample, m, inv_dist):
92
+ indices = [i for i in range(n_qubits)]
93
+ tot_inv_dist = 0.0
94
+ for i in range(n_qubits):
95
+ tot_inv_dist += inv_dist[i]
96
+ selected = []
97
+ for i in range(m):
98
+ r = tot_inv_dist * np.random.random()
99
+ p = inv_dist[indices[0]]
100
+ idx = 0
101
+ while p < r:
102
+ idx += 1
103
+ if idx >= len(indices):
104
+ idx = len(indices) - 1
105
+ break
106
+ p += inv_dist[indices[idx]]
107
+ i = indices[idx]
108
+ del indices[idx]
109
+ selected.append(i)
110
+ tot_inv_dist -= inv_dist[i]
111
+ for i in selected:
112
+ sample |= 1 << i
113
+
114
+ return sample
115
+
116
+
117
+ def get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
118
+ inv_dist = np.zeros(n_qubits, dtype=np.float64)
119
+ for idx in butterfly_idx_x:
120
+ b_row, b_col = divmod(idx, row_len)
121
+ for q in range(n_qubits):
122
+ q_row, q_col = divmod(q, row_len)
123
+ inv_dist[q] -= abs(q_row - b_row) + abs(q_col - b_col)
124
+ for idx in butterfly_idx_z:
125
+ b_row, b_col = divmod(idx, row_len)
126
+ for q in range(n_qubits):
127
+ q_row, q_col = divmod(q, row_len)
128
+ inv_dist[q] += abs(q_row - b_row) + abs(q_col - b_col)
129
+ inv_dist += 1.0 - inv_dist.min()
130
+
131
+ return inv_dist
132
+
133
+
134
+ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
135
+ inv_dist = np.zeros(n_qubits, dtype=np.float64)
136
+ half_row = row_len >> 1
137
+ half_col = col_len >> 1
138
+ for idx in butterfly_idx_x:
139
+ b_row, b_col = divmod(idx, row_len)
140
+ for q in range(n_qubits):
141
+ q_row, q_col = divmod(q, row_len)
142
+ row_d = abs(q_row - b_row)
143
+ if row_d > half_row:
144
+ row_d = row_len - row_d
145
+ col_d = abs(q_col - b_col)
146
+ if col_d > half_col:
147
+ col_d = col_len - col_d
148
+ inv_dist[q] -= row_d + col_d
149
+ for idx in butterfly_idx_z:
150
+ b_row, b_col = divmod(idx, row_len)
151
+ for q in range(n_qubits):
152
+ q_row, q_col = divmod(q, row_len)
153
+ row_d = abs(q_row - b_row)
154
+ if row_d > half_row:
155
+ row_d = row_len - row_d
156
+ col_d = abs(q_col - b_col)
157
+ if col_d > half_col:
158
+ col_d = col_len - col_d
159
+ inv_dist[q] += row_d + col_d
160
+ inv_dist += 1.0 - inv_dist.min()
161
+
162
+ return inv_dist
163
+
164
+
165
+ def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55, shots=100, is_orbifold=True):
166
+ pauli_string = list(pauli_string)
167
+ if len(pauli_string) != n_qubits:
168
+ raise ValueError("OTOC pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
169
+
170
+ thresholds = fix_cdf(get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, cycles, pauli_string))
171
+
172
+ row_len, col_len = factor_width(n_qubits)
173
+ p_string = "".join(pauli_string)
174
+ butterfly_idx_x = find_all_str_occurrences(p_string, 'X')
175
+ butterfly_idx_z = find_all_str_occurrences(p_string, 'Z')
176
+
177
+ if is_orbifold:
178
+ inv_dist = get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
179
+ else:
180
+ inv_dist = get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
181
+
182
+ samples = []
183
+ for _ in range(shots):
184
+ # First dimension: Hamming weight
185
+ m = sample_mag(thresholds)
186
+ if m == 0:
187
+ samples.append(0)
188
+ continue
189
+ if m >= n_qubits:
190
+ samples.append((1 << n_qubits) - 1)
191
+ continue
192
+
193
+ # Second dimension: permutation within Hamming weight
194
+ samples.append(take_sample(n_qubits, 0, m, inv_dist))
195
+
196
+ return samples
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyqrackising
3
- Version: 9.3.10
3
+ Version: 9.4.0
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -7,7 +7,7 @@ with open(README_PATH) as readme_file:
7
7
 
8
8
  setup(
9
9
  name='pyqrackising',
10
- version='9.3.10',
10
+ version='9.4.0',
11
11
  author='Dan Strano',
12
12
  author_email='stranoj@gmail.com',
13
13
  description='Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)',
@@ -1,238 +0,0 @@
1
- from .maxcut_tfim_util import probability_by_hamming_weight, sample_mag, opencl_context
2
- from numba import njit
3
- import numpy as np
4
- import sys
5
-
6
-
7
- epsilon = opencl_context.epsilon
8
-
9
-
10
- def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55):
11
- pauli_string = list(pauli_string)
12
- if len(pauli_string) != n_qubits:
13
- raise ValueError("OTOCS pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
14
-
15
- n_bias = n_qubits + 1
16
- if h <= epsilon:
17
- bias = np.empty(n_bias, dtype=np.float64)
18
- bias[0] = 1.0
19
- return { 'X': bias, 'Y': bias, 'Z': bias }
20
-
21
- fwd = probability_by_hamming_weight(J, h, z, theta, t, n_qubits + 1)
22
- rev = probability_by_hamming_weight(-J, -h, z, theta + np.pi, t, n_qubits + 1)
23
- diff_theta = rev - fwd
24
-
25
- phi = theta + np.pi / 2
26
- fwd = probability_by_hamming_weight(-h, -J, z, phi, t, n_qubits + 1)
27
- rev = probability_by_hamming_weight(h, J, z, phi + np.pi, t, n_qubits + 1)
28
- diff_phi = rev - fwd
29
-
30
- # Lambda (Y-axis) is at a right angle to both J and h,
31
- # so there is no difference in this dimension.
32
-
33
- diff_theta *= cycles
34
- diff_phi *= cycles
35
- # diff_lam = diff_phi
36
-
37
- diff_z = np.zeros(n_bias, dtype=np.float64)
38
- diff_x = np.zeros(n_bias, dtype=np.float64)
39
- diff_y = np.zeros(n_bias, dtype=np.float64)
40
- for b in pauli_string:
41
- match b:
42
- case 'X':
43
- diff_z += diff_theta
44
- diff_y += diff_phi
45
- case 'Z':
46
- diff_x += diff_phi
47
- diff_y += diff_theta
48
- case 'Y':
49
- diff_z += diff_theta
50
- diff_x += diff_phi
51
-
52
- diff_z[0] += n_qubits
53
- diff_x[0] += n_qubits
54
- diff_y[0] += n_qubits
55
-
56
- diff_z /= diff_z.sum()
57
- diff_x /= diff_x.sum()
58
- diff_y /= diff_y.sum()
59
-
60
- return { 'X': diff_x, 'Y': diff_y, 'Z': diff_z }
61
-
62
-
63
- @njit
64
- def fix_cdf(hamming_prob):
65
- tot_prob = 0.0
66
- n_bias = len(hamming_prob)
67
- cum_prob = np.empty(n_bias, dtype=np.float64)
68
- for i in range(n_bias):
69
- tot_prob += hamming_prob[i]
70
- cum_prob[i] = tot_prob
71
- cum_prob[-1] = 2.0
72
-
73
- return cum_prob
74
-
75
-
76
- def take_all(b, basis, sample):
77
- for i in range(len(basis)):
78
- if basis[i] == b:
79
- sample |= (1 << i)
80
-
81
- return sample
82
-
83
-
84
- def take_sample(b, basis, sample, m, inv_dist):
85
- indices = []
86
- tot_inv_dist = 0.0
87
- for i in range(len(basis)):
88
- if basis[i] == b:
89
- indices.append(i)
90
- tot_inv_dist += inv_dist[i]
91
- selected = []
92
- for i in range(m):
93
- r = tot_inv_dist * np.random.random()
94
- p = 0.0
95
- idx = 0
96
- while p < r:
97
- p += inv_dist[indices[idx]]
98
- idx += 1
99
- i = indices[idx]
100
- del indices[idx]
101
- selected.append(i)
102
- tot_inv_dist -= inv_dist[i]
103
- for i in selected:
104
- sample |= 1 << i
105
-
106
- return sample
107
-
108
-
109
- @njit
110
- def factor_width(width):
111
- col_len = np.floor(np.sqrt(width))
112
- while ((width // col_len) * col_len) != width:
113
- col_len -= 1
114
- row_len = width // col_len
115
-
116
- return row_len, col_len
117
-
118
-
119
- # Provided by Google search AI
120
- def find_all_str_occurrences(main_string, sub_string):
121
- indices = []
122
- start_index = 0
123
- while True:
124
- index = main_string.find(sub_string, start_index)
125
- if index == -1:
126
- break
127
- indices.append(index)
128
- start_index = index + 1 # Start searching after the found occurrence
129
-
130
- return indices
131
-
132
-
133
- def get_inv_dist(butterfly_idx, n_qubits, row_len):
134
- inv_dist = np.zeros(n_qubits, dtype=np.float64)
135
- for idx in butterfly_idx:
136
- for q in range(n_qubits):
137
- b_row = idx // row_len
138
- b_col = idx % row_len
139
- q_row = q // row_len
140
- q_col = q % row_len
141
- dist = (q_row - b_row) ** 2 + (q_col - b_col) ** 2
142
- if dist > 0:
143
- inv_dist[q] += 1.0 / dist
144
-
145
- return inv_dist
146
-
147
-
148
- def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55, shots=100, measurement_basis='Z' * 56):
149
- pauli_string = list(pauli_string)
150
- if len(pauli_string) != n_qubits:
151
- raise ValueError("OTOC pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
152
-
153
- measurement_basis = list(measurement_basis)
154
- if len(measurement_basis) != n_qubits:
155
- raise ValueError("OTOC measurement_basis must be same length as n_qubits! (Use 'I' for excluded qubits.)")
156
-
157
- basis_x, basis_y, basis_z = [], [], []
158
- for b in pauli_string:
159
- if b == 'Z':
160
- basis_z.append('X')
161
- basis_y.append('I')
162
- basis_x.append('Z')
163
- elif b == 'X':
164
- basis_z.append('Z')
165
- basis_y.append('I')
166
- basis_x.append('X')
167
- elif b == 'Y':
168
- basis_z.append('I')
169
- basis_y.append('Z')
170
- basis_x.append('I')
171
- else:
172
- basis_z.append('I')
173
- basis_y.append('I')
174
- basis_x.append('I')
175
-
176
- bases = { 'X': basis_x, 'Y': basis_y, 'Z': basis_z }
177
- thresholds = { key: fix_cdf(value) for key, value in get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, cycles, pauli_string).items() }
178
-
179
- row_len, col_len = factor_width(n_qubits)
180
- p_string = "".join(pauli_string)
181
- butterfly_idx_x = find_all_str_occurrences(p_string, 'X')
182
- butterfly_idx_y = find_all_str_occurrences(p_string, 'Y')
183
- butterfly_idx_z = find_all_str_occurrences(p_string, 'Z')
184
-
185
- inv_dist_x = get_inv_dist(butterfly_idx_x, n_qubits, row_len)
186
- inv_dist_y = get_inv_dist(butterfly_idx_y, n_qubits, row_len)
187
- inv_dist_z = get_inv_dist(butterfly_idx_z, n_qubits, row_len)
188
-
189
- inv_dist = { 'X': inv_dist_x, 'Y': inv_dist_y, 'Z': inv_dist_z }
190
-
191
- samples = []
192
- for _ in range(shots):
193
- sample_3_axis = { 'X': 0, 'Y': 0, 'Z': 0 }
194
- for key, value in thresholds.items():
195
- basis = bases[key]
196
-
197
- # First dimension: Hamming weight
198
- m = sample_mag(value)
199
- if m == 0:
200
- continue
201
- if m >= n_qubits:
202
- sample_3_axis[key] = (1 << n_qubits) - 1
203
- continue
204
-
205
- # Second dimension: permutation within Hamming weight
206
- z_count = basis.count('Z')
207
- if z_count > m:
208
- sample_3_axis[key] = take_sample('Z', basis, sample_3_axis[key], m, inv_dist[key])
209
- continue
210
- m -= z_count
211
- sample_3_axis[key] = take_all('Z', basis, sample_3_axis[key])
212
- if m == 0:
213
- continue
214
-
215
- i_count = basis.count('I')
216
- if i_count > m:
217
- sample_3_axis[key] = take_sample('I', basis, sample_3_axis[key], m, inv_dist[key])
218
- continue
219
- m -= i_count
220
- sample_3_axis[key] = take_all('I', basis, sample_3_axis[key])
221
- if m == 0:
222
- continue
223
-
224
- sample_3_axis[key] = take_sample('X', basis, sample_3_axis[key], m, inv_dist[key])
225
-
226
- sample = 0
227
- j = 0
228
- for i in range(n_qubits):
229
- base = measurement_basis[i]
230
- if base not in ['X', 'Y', 'Z']:
231
- continue
232
- if (sample_3_axis[base] >> i) & 1:
233
- sample |= 1 << j
234
- j += 1
235
-
236
- samples.append(sample)
237
-
238
- return samples
File without changes
File without changes
File without changes
File without changes