pyqrackising 9.3.11__tar.gz → 9.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {pyqrackising-9.3.11/pyqrackising.egg-info → pyqrackising-9.4.0}/PKG-INFO +1 -1
  2. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyproject.toml +1 -1
  3. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/otoc.py +90 -81
  4. {pyqrackising-9.3.11 → pyqrackising-9.4.0/pyqrackising.egg-info}/PKG-INFO +1 -1
  5. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/setup.py +1 -1
  6. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/LICENSE.md +0 -0
  7. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/MANIFEST.in +0 -0
  8. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/README.md +0 -0
  9. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/__init__.py +0 -0
  10. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/convert_tensor_network_to_tsp.py +0 -0
  11. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/generate_tfim_samples.py +0 -0
  12. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/kernels.cl +0 -0
  13. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim.py +0 -0
  14. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim_sparse.py +0 -0
  15. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim_streaming.py +0 -0
  16. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/maxcut_tfim_util.py +0 -0
  17. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/spin_glass_solver.py +0 -0
  18. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/spin_glass_solver_sparse.py +0 -0
  19. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/spin_glass_solver_streaming.py +0 -0
  20. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/tfim_magnetization.py +0 -0
  21. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/tfim_square_magnetization.py +0 -0
  22. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/tsp.py +0 -0
  23. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising/tsp_maxcut.py +0 -0
  24. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising.egg-info/SOURCES.txt +0 -0
  25. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising.egg-info/dependency_links.txt +0 -0
  26. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising.egg-info/not-zip-safe +0 -0
  27. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/pyqrackising.egg-info/top_level.txt +0 -0
  28. {pyqrackising-9.3.11 → pyqrackising-9.4.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyqrackising
3
- Version: 9.3.11
3
+ Version: 9.4.0
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -10,7 +10,7 @@ build-backend = "setuptools.build_meta"
10
10
 
11
11
  [project]
12
12
  name = "pyqrackising"
13
- version = "9.3.11"
13
+ version = "9.4.0"
14
14
  requires-python = ">=3.8"
15
15
  description = "Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)"
16
16
  readme = {file = "README.txt", content-type = "text/markdown"}
@@ -32,32 +32,23 @@ def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.174532925199432957
32
32
 
33
33
  diff_theta *= cycles
34
34
  diff_phi *= cycles
35
- # diff_lam = diff_phi
36
35
 
37
36
  diff_z = np.zeros(n_bias, dtype=np.float64)
38
- diff_x = np.zeros(n_bias, dtype=np.float64)
39
- diff_y = np.zeros(n_bias, dtype=np.float64)
40
37
  for b in pauli_string:
41
38
  match b:
42
39
  case 'X':
43
40
  diff_z += diff_theta
44
- diff_y += diff_phi
45
41
  case 'Z':
46
- diff_x += diff_phi
47
- diff_y += diff_theta
42
+ diff_z += diff_phi
48
43
  case 'Y':
49
- diff_z += diff_theta
50
- diff_x += diff_phi
44
+ diff_z += diff_theta + diff_phi
45
+ case _:
46
+ pass
51
47
 
52
48
  diff_z[0] += n_qubits
53
- diff_x[0] += n_qubits
54
- diff_y[0] += n_qubits
55
-
56
49
  diff_z /= diff_z.sum()
57
- diff_x /= diff_x.sum()
58
- diff_y /= diff_y.sum()
59
50
 
60
- return { 'X': diff_x, 'Y': diff_y, 'Z': diff_z }
51
+ return diff_z
61
52
 
62
53
 
63
54
  @njit
@@ -73,6 +64,30 @@ def fix_cdf(hamming_prob):
73
64
  return cum_prob
74
65
 
75
66
 
67
+ @njit
68
+ def factor_width(width):
69
+ col_len = int(np.floor(np.sqrt(width)))
70
+ while ((width // col_len) * col_len) != width:
71
+ col_len -= 1
72
+ row_len = width // col_len
73
+
74
+ return row_len, col_len
75
+
76
+
77
+ # Provided by Google search AI
78
+ def find_all_str_occurrences(main_string, sub_string):
79
+ indices = []
80
+ start_index = 0
81
+ while True:
82
+ index = main_string.find(sub_string, start_index)
83
+ if index == -1:
84
+ break
85
+ indices.append(index)
86
+ start_index = index + 1 # Start searching after the found occurrence
87
+
88
+ return indices
89
+
90
+
76
91
  def take_sample(n_qubits, sample, m, inv_dist):
77
92
  indices = [i for i in range(n_qubits)]
78
93
  tot_inv_dist = 0.0
@@ -81,11 +96,14 @@ def take_sample(n_qubits, sample, m, inv_dist):
81
96
  selected = []
82
97
  for i in range(m):
83
98
  r = tot_inv_dist * np.random.random()
84
- p = 0.0
99
+ p = inv_dist[indices[0]]
85
100
  idx = 0
86
101
  while p < r:
87
- p += inv_dist[indices[idx]]
88
102
  idx += 1
103
+ if idx >= len(indices):
104
+ idx = len(indices) - 1
105
+ break
106
+ p += inv_dist[indices[idx]]
89
107
  i = indices[idx]
90
108
  del indices[idx]
91
109
  selected.append(i)
@@ -96,92 +114,83 @@ def take_sample(n_qubits, sample, m, inv_dist):
96
114
  return sample
97
115
 
98
116
 
99
- @njit
100
- def factor_width(width):
101
- col_len = np.floor(np.sqrt(width))
102
- while ((width // col_len) * col_len) != width:
103
- col_len -= 1
104
- row_len = width // col_len
105
-
106
- return row_len, col_len
107
-
108
-
109
- # Provided by Google search AI
110
- def find_all_str_occurrences(main_string, sub_string):
111
- indices = []
112
- start_index = 0
113
- while True:
114
- index = main_string.find(sub_string, start_index)
115
- if index == -1:
116
- break
117
- indices.append(index)
118
- start_index = index + 1 # Start searching after the found occurrence
117
+ def get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
118
+ inv_dist = np.zeros(n_qubits, dtype=np.float64)
119
+ for idx in butterfly_idx_x:
120
+ b_row, b_col = divmod(idx, row_len)
121
+ for q in range(n_qubits):
122
+ q_row, q_col = divmod(q, row_len)
123
+ inv_dist[q] -= abs(q_row - b_row) + abs(q_col - b_col)
124
+ for idx in butterfly_idx_z:
125
+ b_row, b_col = divmod(idx, row_len)
126
+ for q in range(n_qubits):
127
+ q_row, q_col = divmod(q, row_len)
128
+ inv_dist[q] += abs(q_row - b_row) + abs(q_col - b_col)
129
+ inv_dist += 1.0 - inv_dist.min()
119
130
 
120
- return indices
131
+ return inv_dist
121
132
 
122
133
 
123
- def get_inv_dist(butterfly_idx, n_qubits, row_len):
134
+ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
124
135
  inv_dist = np.zeros(n_qubits, dtype=np.float64)
125
- for idx in butterfly_idx:
136
+ half_row = row_len >> 1
137
+ half_col = col_len >> 1
138
+ for idx in butterfly_idx_x:
139
+ b_row, b_col = divmod(idx, row_len)
126
140
  for q in range(n_qubits):
127
- b_row = idx // row_len
128
- b_col = idx % row_len
129
- q_row = q // row_len
130
- q_col = q % row_len
131
- dist = (q_row - b_row) ** 2 + (q_col - b_col) ** 2
132
- inv_dist[q] += 1.0 / (1.0 + dist)
141
+ q_row, q_col = divmod(q, row_len)
142
+ row_d = abs(q_row - b_row)
143
+ if row_d > half_row:
144
+ row_d = row_len - row_d
145
+ col_d = abs(q_col - b_col)
146
+ if col_d > half_col:
147
+ col_d = col_len - col_d
148
+ inv_dist[q] -= row_d + col_d
149
+ for idx in butterfly_idx_z:
150
+ b_row, b_col = divmod(idx, row_len)
151
+ for q in range(n_qubits):
152
+ q_row, q_col = divmod(q, row_len)
153
+ row_d = abs(q_row - b_row)
154
+ if row_d > half_row:
155
+ row_d = row_len - row_d
156
+ col_d = abs(q_col - b_col)
157
+ if col_d > half_col:
158
+ col_d = col_len - col_d
159
+ inv_dist[q] += row_d + col_d
160
+ inv_dist += 1.0 - inv_dist.min()
133
161
 
134
162
  return inv_dist
135
163
 
136
164
 
137
- def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55, shots=100, measurement_basis='Z' * 56):
165
+ def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55, shots=100, is_orbifold=True):
138
166
  pauli_string = list(pauli_string)
139
167
  if len(pauli_string) != n_qubits:
140
168
  raise ValueError("OTOC pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
141
169
 
142
- measurement_basis = list(measurement_basis)
143
- if len(measurement_basis) != n_qubits:
144
- raise ValueError("OTOC measurement_basis must be same length as n_qubits! (Use 'I' for excluded qubits.)")
145
-
146
- thresholds = { key: fix_cdf(value) for key, value in get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, cycles, pauli_string).items() }
170
+ thresholds = fix_cdf(get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, cycles, pauli_string))
147
171
 
148
172
  row_len, col_len = factor_width(n_qubits)
149
173
  p_string = "".join(pauli_string)
150
174
  butterfly_idx_x = find_all_str_occurrences(p_string, 'X')
151
- butterfly_idx_y = find_all_str_occurrences(p_string, 'Y')
152
175
  butterfly_idx_z = find_all_str_occurrences(p_string, 'Z')
153
176
 
154
- inv_dist_x = get_inv_dist(butterfly_idx_x, n_qubits, row_len)
155
- inv_dist_y = get_inv_dist(butterfly_idx_y, n_qubits, row_len)
156
- inv_dist_z = get_inv_dist(butterfly_idx_z, n_qubits, row_len)
157
-
158
- inv_dist = { 'X': inv_dist_x, 'Y': inv_dist_y, 'Z': inv_dist_z }
177
+ if is_orbifold:
178
+ inv_dist = get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
179
+ else:
180
+ inv_dist = get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
159
181
 
160
182
  samples = []
161
183
  for _ in range(shots):
162
- sample_3_axis = { 'X': 0, 'Y': 0, 'Z': 0 }
163
- for key, value in thresholds.items():
164
- # First dimension: Hamming weight
165
- m = sample_mag(value)
166
- if m == 0:
167
- continue
168
- if m >= n_qubits:
169
- sample_3_axis[key] = (1 << n_qubits) - 1
170
- continue
171
-
172
- # Second dimension: permutation within Hamming weight
173
- sample_3_axis[key] = take_sample(n_qubits, sample_3_axis[key], m, inv_dist[key])
174
-
175
- sample = 0
176
- j = 0
177
- for i in range(n_qubits):
178
- base = measurement_basis[i]
179
- if base not in ['X', 'Y', 'Z']:
180
- continue
181
- if (sample_3_axis[base] >> i) & 1:
182
- sample |= 1 << j
183
- j += 1
184
-
185
- samples.append(sample)
184
+ # First dimension: Hamming weight
185
+ m = sample_mag(thresholds)
186
+ if m == 0:
187
+ samples.append(0)
188
+ continue
189
+ if m >= n_qubits:
190
+ samples.append((1 << n_qubits) - 1)
191
+ continue
192
+
193
+ # Second dimension: permutation within Hamming weight
194
+ samples.append(take_sample(n_qubits, 0, m, inv_dist))
186
195
 
187
196
  return samples
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyqrackising
3
- Version: 9.3.11
3
+ Version: 9.4.0
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -7,7 +7,7 @@ with open(README_PATH) as readme_file:
7
7
 
8
8
  setup(
9
9
  name='pyqrackising',
10
- version='9.3.11',
10
+ version='9.4.0',
11
11
  author='Dan Strano',
12
12
  author_email='stranoj@gmail.com',
13
13
  description='Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)',
File without changes
File without changes
File without changes
File without changes