pyqrackising 9.3.11__tar.gz → 9.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyqrackising might be problematic. Click here for more details.

Files changed (29) hide show
  1. {pyqrackising-9.3.11/pyqrackising.egg-info → pyqrackising-9.5.0}/PKG-INFO +1 -1
  2. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyproject.toml +1 -1
  3. pyqrackising-9.5.0/pyqrackising/otoc.py +192 -0
  4. {pyqrackising-9.3.11 → pyqrackising-9.5.0/pyqrackising.egg-info}/PKG-INFO +1 -1
  5. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/setup.py +1 -1
  6. pyqrackising-9.3.11/pyqrackising/otoc.py +0 -187
  7. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/LICENSE.md +0 -0
  8. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/MANIFEST.in +0 -0
  9. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/README.md +0 -0
  10. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/__init__.py +0 -0
  11. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/convert_tensor_network_to_tsp.py +0 -0
  12. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/generate_tfim_samples.py +0 -0
  13. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/kernels.cl +0 -0
  14. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/maxcut_tfim.py +0 -0
  15. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/maxcut_tfim_sparse.py +0 -0
  16. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/maxcut_tfim_streaming.py +0 -0
  17. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/maxcut_tfim_util.py +0 -0
  18. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/spin_glass_solver.py +0 -0
  19. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/spin_glass_solver_sparse.py +0 -0
  20. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/spin_glass_solver_streaming.py +0 -0
  21. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/tfim_magnetization.py +0 -0
  22. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/tfim_square_magnetization.py +0 -0
  23. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/tsp.py +0 -0
  24. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising/tsp_maxcut.py +0 -0
  25. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising.egg-info/SOURCES.txt +0 -0
  26. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising.egg-info/dependency_links.txt +0 -0
  27. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising.egg-info/not-zip-safe +0 -0
  28. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/pyqrackising.egg-info/top_level.txt +0 -0
  29. {pyqrackising-9.3.11 → pyqrackising-9.5.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyqrackising
3
- Version: 9.3.11
3
+ Version: 9.5.0
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -10,7 +10,7 @@ build-backend = "setuptools.build_meta"
10
10
 
11
11
  [project]
12
12
  name = "pyqrackising"
13
- version = "9.3.11"
13
+ version = "9.5.0"
14
14
  requires-python = ">=3.8"
15
15
  description = "Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)"
16
16
  readme = {file = "README.txt", content-type = "text/markdown"}
@@ -0,0 +1,192 @@
1
+ from .maxcut_tfim_util import probability_by_hamming_weight, sample_mag, opencl_context
2
+ from numba import njit
3
+ import numpy as np
4
+ import sys
5
+
6
+
7
+ epsilon = opencl_context.epsilon
8
+
9
+
10
+ def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=65, pauli_strings = ['X' + 'I' * 64]):
11
+ n_bias = n_qubits + 1
12
+ if h <= epsilon:
13
+ bias = np.empty(n_bias, dtype=np.float64)
14
+ bias[0] = 1.0
15
+ return bias
16
+
17
+ diff_z = np.zeros(n_bias, dtype=np.float64)
18
+ for pauli_string in pauli_strings:
19
+ pauli_string = list(pauli_string)
20
+ if len(pauli_string) != n_qubits:
21
+ raise ValueError("OTOCS pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
22
+
23
+ fwd = probability_by_hamming_weight(J, h, z, theta, t, n_qubits + 1)
24
+ rev = probability_by_hamming_weight(-J, -h, z, theta + np.pi, t, n_qubits + 1)
25
+ diff_theta = rev - fwd
26
+
27
+ phi = theta + np.pi / 2
28
+ fwd = probability_by_hamming_weight(-h, -J, z, phi, t, n_qubits + 1)
29
+ rev = probability_by_hamming_weight(h, J, z, phi + np.pi, t, n_qubits + 1)
30
+ diff_phi = rev - fwd
31
+
32
+ # Lambda (Y-axis) is at a right angle to both J and h,
33
+ # so there is no difference in this dimension.
34
+
35
+ diff_z[0] += n_qubits
36
+ for b in pauli_string:
37
+ match b:
38
+ case 'X':
39
+ diff_z += diff_theta
40
+ case 'Z':
41
+ diff_z += diff_phi
42
+ case 'Y':
43
+ diff_z += diff_theta + diff_phi
44
+ case _:
45
+ pass
46
+
47
+ # Normalize:
48
+ diff_z /= diff_z.sum()
49
+
50
+ return diff_z
51
+
52
+
53
+ @njit
54
+ def fix_cdf(hamming_prob):
55
+ tot_prob = 0.0
56
+ n_bias = len(hamming_prob)
57
+ cum_prob = np.empty(n_bias, dtype=np.float64)
58
+ for i in range(n_bias):
59
+ tot_prob += hamming_prob[i]
60
+ cum_prob[i] = tot_prob
61
+ cum_prob[-1] = 2.0
62
+
63
+ return cum_prob
64
+
65
+
66
+ @njit
67
+ def factor_width(width):
68
+ col_len = int(np.floor(np.sqrt(width)))
69
+ while ((width // col_len) * col_len) != width:
70
+ col_len -= 1
71
+ row_len = width // col_len
72
+
73
+ return row_len, col_len
74
+
75
+
76
+ # Provided by Google search AI
77
+ def find_all_str_occurrences(main_string, sub_string):
78
+ indices = []
79
+ start_index = 0
80
+ while True:
81
+ index = main_string.find(sub_string, start_index)
82
+ if index == -1:
83
+ break
84
+ indices.append(index)
85
+ start_index = index + 1 # Start searching after the found occurrence
86
+
87
+ return indices
88
+
89
+
90
+ def take_sample(n_qubits, sample, m, inv_dist):
91
+ indices = [i for i in range(n_qubits)]
92
+ tot_inv_dist = 0.0
93
+ for i in range(n_qubits):
94
+ tot_inv_dist += inv_dist[i]
95
+ selected = []
96
+ for i in range(m):
97
+ r = tot_inv_dist * np.random.random()
98
+ p = inv_dist[indices[0]]
99
+ idx = 0
100
+ while p < r:
101
+ idx += 1
102
+ if idx >= len(indices):
103
+ idx = len(indices) - 1
104
+ break
105
+ p += inv_dist[indices[idx]]
106
+ i = indices[idx]
107
+ del indices[idx]
108
+ selected.append(i)
109
+ tot_inv_dist -= inv_dist[i]
110
+ for i in selected:
111
+ sample |= 1 << i
112
+
113
+ return sample
114
+
115
+
116
+ def get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
117
+ inv_dist = np.zeros(n_qubits, dtype=np.float64)
118
+ for idx in butterfly_idx_x:
119
+ b_row, b_col = divmod(idx, row_len)
120
+ for q in range(n_qubits):
121
+ q_row, q_col = divmod(q, row_len)
122
+ inv_dist[q] -= abs(q_row - b_row) + abs(q_col - b_col)
123
+ for idx in butterfly_idx_z:
124
+ b_row, b_col = divmod(idx, row_len)
125
+ for q in range(n_qubits):
126
+ q_row, q_col = divmod(q, row_len)
127
+ inv_dist[q] += abs(q_row - b_row) + abs(q_col - b_col)
128
+ inv_dist += 1.0 - inv_dist.min()
129
+
130
+ return inv_dist
131
+
132
+
133
+ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
134
+ inv_dist = np.zeros(n_qubits, dtype=np.float64)
135
+ half_row = row_len >> 1
136
+ half_col = col_len >> 1
137
+ for idx in butterfly_idx_x:
138
+ b_row, b_col = divmod(idx, row_len)
139
+ for q in range(n_qubits):
140
+ q_row, q_col = divmod(q, row_len)
141
+ row_d = abs(q_row - b_row)
142
+ if row_d > half_row:
143
+ row_d = row_len - row_d
144
+ col_d = abs(q_col - b_col)
145
+ if col_d > half_col:
146
+ col_d = col_len - col_d
147
+ inv_dist[q] -= row_d + col_d
148
+ for idx in butterfly_idx_z:
149
+ b_row, b_col = divmod(idx, row_len)
150
+ for q in range(n_qubits):
151
+ q_row, q_col = divmod(q, row_len)
152
+ row_d = abs(q_row - b_row)
153
+ if row_d > half_row:
154
+ row_d = row_len - row_d
155
+ col_d = abs(q_col - b_col)
156
+ if col_d > half_col:
157
+ col_d = col_len - col_d
158
+ inv_dist[q] += row_d + col_d
159
+ inv_dist += 1.0 - inv_dist.min()
160
+
161
+ return inv_dist
162
+
163
+
164
+ def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=65, pauli_strings = ['X' + 'I' * 64], shots=100, is_orbifold=True):
165
+ thresholds = fix_cdf(get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, pauli_strings))
166
+
167
+ row_len, col_len = factor_width(n_qubits)
168
+ inv_dist = np.zeros(n_qubits, dtype=np.float64)
169
+ for pauli_string in pauli_strings:
170
+ butterfly_idx_x = find_all_str_occurrences(pauli_string, 'X')
171
+ butterfly_idx_z = find_all_str_occurrences(pauli_string, 'Z')
172
+ if is_orbifold:
173
+ inv_dist += get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
174
+ else:
175
+ inv_dist += get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
176
+ inv_dist /= 2.0
177
+
178
+ samples = []
179
+ for _ in range(shots):
180
+ # First dimension: Hamming weight
181
+ m = sample_mag(thresholds)
182
+ if m == 0:
183
+ samples.append(0)
184
+ continue
185
+ if m >= n_qubits:
186
+ samples.append((1 << n_qubits) - 1)
187
+ continue
188
+
189
+ # Second dimension: permutation within Hamming weight
190
+ samples.append(take_sample(n_qubits, 0, m, inv_dist))
191
+
192
+ return samples
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyqrackising
3
- Version: 9.3.11
3
+ Version: 9.5.0
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -7,7 +7,7 @@ with open(README_PATH) as readme_file:
7
7
 
8
8
  setup(
9
9
  name='pyqrackising',
10
- version='9.3.11',
10
+ version='9.5.0',
11
11
  author='Dan Strano',
12
12
  author_email='stranoj@gmail.com',
13
13
  description='Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)',
@@ -1,187 +0,0 @@
1
- from .maxcut_tfim_util import probability_by_hamming_weight, sample_mag, opencl_context
2
- from numba import njit
3
- import numpy as np
4
- import sys
5
-
6
-
7
- epsilon = opencl_context.epsilon
8
-
9
-
10
- def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55):
11
- pauli_string = list(pauli_string)
12
- if len(pauli_string) != n_qubits:
13
- raise ValueError("OTOCS pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
14
-
15
- n_bias = n_qubits + 1
16
- if h <= epsilon:
17
- bias = np.empty(n_bias, dtype=np.float64)
18
- bias[0] = 1.0
19
- return { 'X': bias, 'Y': bias, 'Z': bias }
20
-
21
- fwd = probability_by_hamming_weight(J, h, z, theta, t, n_qubits + 1)
22
- rev = probability_by_hamming_weight(-J, -h, z, theta + np.pi, t, n_qubits + 1)
23
- diff_theta = rev - fwd
24
-
25
- phi = theta + np.pi / 2
26
- fwd = probability_by_hamming_weight(-h, -J, z, phi, t, n_qubits + 1)
27
- rev = probability_by_hamming_weight(h, J, z, phi + np.pi, t, n_qubits + 1)
28
- diff_phi = rev - fwd
29
-
30
- # Lambda (Y-axis) is at a right angle to both J and h,
31
- # so there is no difference in this dimension.
32
-
33
- diff_theta *= cycles
34
- diff_phi *= cycles
35
- # diff_lam = diff_phi
36
-
37
- diff_z = np.zeros(n_bias, dtype=np.float64)
38
- diff_x = np.zeros(n_bias, dtype=np.float64)
39
- diff_y = np.zeros(n_bias, dtype=np.float64)
40
- for b in pauli_string:
41
- match b:
42
- case 'X':
43
- diff_z += diff_theta
44
- diff_y += diff_phi
45
- case 'Z':
46
- diff_x += diff_phi
47
- diff_y += diff_theta
48
- case 'Y':
49
- diff_z += diff_theta
50
- diff_x += diff_phi
51
-
52
- diff_z[0] += n_qubits
53
- diff_x[0] += n_qubits
54
- diff_y[0] += n_qubits
55
-
56
- diff_z /= diff_z.sum()
57
- diff_x /= diff_x.sum()
58
- diff_y /= diff_y.sum()
59
-
60
- return { 'X': diff_x, 'Y': diff_y, 'Z': diff_z }
61
-
62
-
63
- @njit
64
- def fix_cdf(hamming_prob):
65
- tot_prob = 0.0
66
- n_bias = len(hamming_prob)
67
- cum_prob = np.empty(n_bias, dtype=np.float64)
68
- for i in range(n_bias):
69
- tot_prob += hamming_prob[i]
70
- cum_prob[i] = tot_prob
71
- cum_prob[-1] = 2.0
72
-
73
- return cum_prob
74
-
75
-
76
- def take_sample(n_qubits, sample, m, inv_dist):
77
- indices = [i for i in range(n_qubits)]
78
- tot_inv_dist = 0.0
79
- for i in range(n_qubits):
80
- tot_inv_dist += inv_dist[i]
81
- selected = []
82
- for i in range(m):
83
- r = tot_inv_dist * np.random.random()
84
- p = 0.0
85
- idx = 0
86
- while p < r:
87
- p += inv_dist[indices[idx]]
88
- idx += 1
89
- i = indices[idx]
90
- del indices[idx]
91
- selected.append(i)
92
- tot_inv_dist -= inv_dist[i]
93
- for i in selected:
94
- sample |= 1 << i
95
-
96
- return sample
97
-
98
-
99
- @njit
100
- def factor_width(width):
101
- col_len = np.floor(np.sqrt(width))
102
- while ((width // col_len) * col_len) != width:
103
- col_len -= 1
104
- row_len = width // col_len
105
-
106
- return row_len, col_len
107
-
108
-
109
- # Provided by Google search AI
110
- def find_all_str_occurrences(main_string, sub_string):
111
- indices = []
112
- start_index = 0
113
- while True:
114
- index = main_string.find(sub_string, start_index)
115
- if index == -1:
116
- break
117
- indices.append(index)
118
- start_index = index + 1 # Start searching after the found occurrence
119
-
120
- return indices
121
-
122
-
123
- def get_inv_dist(butterfly_idx, n_qubits, row_len):
124
- inv_dist = np.zeros(n_qubits, dtype=np.float64)
125
- for idx in butterfly_idx:
126
- for q in range(n_qubits):
127
- b_row = idx // row_len
128
- b_col = idx % row_len
129
- q_row = q // row_len
130
- q_col = q % row_len
131
- dist = (q_row - b_row) ** 2 + (q_col - b_col) ** 2
132
- inv_dist[q] += 1.0 / (1.0 + dist)
133
-
134
- return inv_dist
135
-
136
-
137
- def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55, shots=100, measurement_basis='Z' * 56):
138
- pauli_string = list(pauli_string)
139
- if len(pauli_string) != n_qubits:
140
- raise ValueError("OTOC pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
141
-
142
- measurement_basis = list(measurement_basis)
143
- if len(measurement_basis) != n_qubits:
144
- raise ValueError("OTOC measurement_basis must be same length as n_qubits! (Use 'I' for excluded qubits.)")
145
-
146
- thresholds = { key: fix_cdf(value) for key, value in get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, cycles, pauli_string).items() }
147
-
148
- row_len, col_len = factor_width(n_qubits)
149
- p_string = "".join(pauli_string)
150
- butterfly_idx_x = find_all_str_occurrences(p_string, 'X')
151
- butterfly_idx_y = find_all_str_occurrences(p_string, 'Y')
152
- butterfly_idx_z = find_all_str_occurrences(p_string, 'Z')
153
-
154
- inv_dist_x = get_inv_dist(butterfly_idx_x, n_qubits, row_len)
155
- inv_dist_y = get_inv_dist(butterfly_idx_y, n_qubits, row_len)
156
- inv_dist_z = get_inv_dist(butterfly_idx_z, n_qubits, row_len)
157
-
158
- inv_dist = { 'X': inv_dist_x, 'Y': inv_dist_y, 'Z': inv_dist_z }
159
-
160
- samples = []
161
- for _ in range(shots):
162
- sample_3_axis = { 'X': 0, 'Y': 0, 'Z': 0 }
163
- for key, value in thresholds.items():
164
- # First dimension: Hamming weight
165
- m = sample_mag(value)
166
- if m == 0:
167
- continue
168
- if m >= n_qubits:
169
- sample_3_axis[key] = (1 << n_qubits) - 1
170
- continue
171
-
172
- # Second dimension: permutation within Hamming weight
173
- sample_3_axis[key] = take_sample(n_qubits, sample_3_axis[key], m, inv_dist[key])
174
-
175
- sample = 0
176
- j = 0
177
- for i in range(n_qubits):
178
- base = measurement_basis[i]
179
- if base not in ['X', 'Y', 'Z']:
180
- continue
181
- if (sample_3_axis[base] >> i) & 1:
182
- sample |= 1 << j
183
- j += 1
184
-
185
- samples.append(sample)
186
-
187
- return samples
File without changes
File without changes
File without changes
File without changes