pyqrackising 9.5.9__py3-none-manylinux_2_35_x86_64.whl → 9.7.0__py3-none-manylinux_2_35_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -22,6 +22,9 @@ def update_repulsion_choice(G_m, weights, n, used, node, repulsion_base):
22
22
  # Select node
23
23
  used[node] = True
24
24
 
25
+ if abs(1.0 - repulsion_base) <= epsilon:
26
+ return
27
+
25
28
  # Repulsion: penalize neighbors
26
29
  for nbr in range(n):
27
30
  if used[nbr]:
@@ -22,6 +22,9 @@ def update_repulsion_choice(G_data, G_rows, G_cols, weights, n, used, node, repu
22
22
  # Select node
23
23
  used[node] = True
24
24
 
25
+ if abs(1.0 - repulsion_base) <= epsilon:
26
+ return
27
+
25
28
  # Repulsion: penalize neighbors
26
29
  for j in range(G_rows[node], G_rows[node + 1]):
27
30
  nbr = G_cols[j]
@@ -324,7 +327,7 @@ def maxcut_tfim_sparse(
324
327
  best_solution, best_value = sample_for_opencl(G_m.data, G_m.indptr, G_m.indices, G_data_buf, G_rows_buf, G_cols_buf, shots, cum_prob, repulsion_base, is_spin_glass, is_segmented, segment_size, theta_segment_size)
325
328
  else:
326
329
  thread_count = os.cpu_count() ** 2
327
- best_solution, best_value = sample_measurement(G_data, G_rows, G_cols, shots, thread_count, cum_prob, repulsion_base, is_spin_glass)
330
+ best_solution, best_value = sample_measurement(G_m.data, G_m.indptr, G_m.indices, shots, thread_count, cum_prob, repulsion_base, is_spin_glass)
328
331
 
329
332
  bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
330
333
 
@@ -15,6 +15,9 @@ def update_repulsion_choice(G_func, nodes, weights, n, used, node, repulsion_bas
15
15
  # Select node
16
16
  used[node] = True
17
17
 
18
+ if abs(1.0 - repulsion_base) <= epsilon:
19
+ return
20
+
18
21
  # Repulsion: penalize neighbors
19
22
  for nbr in range(n):
20
23
  if used[nbr]:
@@ -390,19 +390,21 @@ def init_theta(h_mult, n_qubits, J_eff, degrees):
390
390
 
391
391
 
392
392
  def init_thresholds(n_qubits):
393
- n_bias = n_qubits - 1
393
+ n_bias = n_qubits + 1
394
394
  thresholds = np.empty(n_bias, dtype=np.float64)
395
- tot_prob = 0
396
- p = n_qubits
397
- for q in range(1, n_qubits >> 1):
398
- thresholds[q - 1] = p
399
- thresholds[n_bias - q] = p
400
- tot_prob += 2 * p
395
+ normalizer = 0
396
+ for q in range(n_qubits >> 1):
397
+ normalizer += math.comb(n_qubits, q) << 1
398
+ if n_qubits & 1:
399
+ normalizer += math.comb(n_qubits, n_qubits >> 1)
400
+ p = 1
401
+ for q in range(n_qubits >> 1):
402
+ val = p / normalizer
403
+ thresholds[q] = val
404
+ thresholds[n_bias - (q + 1)] = val
401
405
  p = math.comb(n_qubits, q + 1)
402
406
  if n_qubits & 1:
403
- thresholds[n_qubits >> 1] = p
404
- tot_prob += p
405
- thresholds /= tot_prob
407
+ thresholds[n_qubits >> 1] = p / normalizer
406
408
 
407
409
  return thresholds
408
410
 
@@ -438,7 +440,7 @@ def probability_by_hamming_weight(J, h, z, theta, t, n_bias, normalized=True):
438
440
  return bias
439
441
 
440
442
 
441
- @njit(parallel=True)
443
+ @njit
442
444
  def maxcut_hamming_cdf(hamming_prob, n_qubits, J_func, degrees, quality, tot_t, h_mult):
443
445
  n_steps = 1 << quality
444
446
  delta_t = tot_t / n_steps
@@ -446,7 +448,7 @@ def maxcut_hamming_cdf(hamming_prob, n_qubits, J_func, degrees, quality, tot_t,
446
448
 
447
449
  theta = init_theta(h_mult, n_qubits, J_func, degrees)
448
450
 
449
- for qc in prange(n_qubits, n_steps * n_qubits):
451
+ for qc in range(n_qubits, n_steps * n_qubits):
450
452
  step = qc // n_qubits
451
453
  q = qc % n_qubits
452
454
  J_eff = J_func[q]
pyqrackising/otoc.py CHANGED
@@ -15,21 +15,22 @@ def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=6
15
15
  bias[0] = 1.0
16
16
  return bias
17
17
 
18
- max_entropy = np.empty(n_bias, dtype=np.float64)
18
+ diff_x = np.empty(n_bias, dtype=np.float64)
19
19
  tot_prob = 0
20
20
  p = 1.0
21
21
  for q in range(n_qubits >> 1):
22
- max_entropy[q] = p
23
- max_entropy[n_bias - (q + 1)] = p
22
+ diff_x[q] = p
23
+ diff_x[n_bias - (q + 1)] = p
24
24
  tot_prob += 2 * p
25
25
  p = math.comb(n_qubits, q + 1)
26
26
  if n_qubits & 1:
27
- max_entropy[n_qubits >> 1] = p
27
+ diff_x[n_qubits >> 1] = p
28
28
  tot_prob += p
29
- max_entropy /= tot_prob
29
+ diff_x *= n_qubits / tot_prob
30
30
 
31
31
  signal_frac = 0.0
32
32
  diff_z = np.zeros(n_bias, dtype=np.float64)
33
+ diff_z[0] = n_qubits
33
34
  for pauli_string in pauli_strings:
34
35
  pauli_string = list(pauli_string)
35
36
  if len(pauli_string) != n_qubits:
@@ -44,31 +45,30 @@ def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=6
44
45
  fwd = probability_by_hamming_weight(J, h, z, theta, t, n_qubits + 1)
45
46
  rev = probability_by_hamming_weight(-J, -h, z, theta + np.pi, t, n_qubits + 1)
46
47
  diff_theta = rev - fwd
47
- diff_theta[0] += 1.0
48
48
 
49
49
  phi = theta + np.pi / 2
50
50
  fwd = probability_by_hamming_weight(-h, -J, z, phi, t, n_qubits + 1)
51
51
  rev = probability_by_hamming_weight(h, J, z, phi - np.pi, t, n_qubits + 1)
52
- diff_phi = (rev - fwd) + max_entropy
53
-
54
- diff_lam = (diff_theta + diff_phi) / 2
52
+ diff_phi = rev - fwd
55
53
 
56
54
  for b in pauli_string:
57
55
  match b:
58
56
  case 'X':
59
57
  diff_z += diff_theta
60
58
  case 'Z':
61
- diff_z += diff_phi
59
+ diff_x += diff_phi
62
60
  case 'Y':
63
- diff_z += diff_lam
61
+ diff_z += diff_theta
62
+ diff_x += diff_phi
64
63
  case _:
65
- diff_z[0] += 1.0
64
+ pass
66
65
 
67
66
  # Normalize:
68
67
  diff_z /= diff_z.sum()
68
+ diff_x /= diff_x.sum()
69
69
 
70
70
  signal_frac = 2 ** signal_frac
71
- diff_z = signal_frac * diff_z + (1 - signal_frac) * max_entropy
71
+ diff_z = signal_frac * diff_z + (1 - signal_frac) * diff_x
72
72
 
73
73
  # Normalize:
74
74
  diff_z /= diff_z.sum()
@@ -139,7 +139,7 @@ def take_sample(n_qubits, sample, m, inv_dist):
139
139
  return sample
140
140
 
141
141
 
142
- def get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
142
+ def get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len, t):
143
143
  inv_dist = np.zeros(n_qubits, dtype=np.float64)
144
144
  for idx in butterfly_idx_x:
145
145
  b_row, b_col = divmod(idx, row_len)
@@ -151,12 +151,12 @@ def get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col
151
151
  for q in range(n_qubits):
152
152
  q_row, q_col = divmod(q, row_len)
153
153
  inv_dist[q] -= abs(q_row - b_row) + abs(q_col - b_col)
154
- inv_dist = 2 ** inv_dist
154
+ inv_dist = 2 ** (inv_dist / t)
155
155
 
156
156
  return inv_dist
157
157
 
158
158
 
159
- def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
159
+ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len, t):
160
160
  inv_dist = np.zeros(n_qubits, dtype=np.float64)
161
161
  half_row = row_len >> 1
162
162
  half_col = col_len >> 1
@@ -182,7 +182,7 @@ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
182
182
  if col_d > half_col:
183
183
  col_d = col_len - col_d
184
184
  inv_dist[q] -= row_d + col_d
185
- inv_dist = 2 ** inv_dist
185
+ inv_dist = 2 ** (inv_dist / t)
186
186
 
187
187
  return inv_dist
188
188
 
@@ -198,9 +198,9 @@ def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=65, pauli
198
198
  butterfly_idx_x = find_all_str_occurrences(pauli_string, 'X')
199
199
  butterfly_idx_z = find_all_str_occurrences(pauli_string, 'Z')
200
200
  if is_orbifold:
201
- inv_dist += get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
201
+ inv_dist += get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len, t)
202
202
  else:
203
- inv_dist += get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
203
+ inv_dist += get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len, t)
204
204
  inv_dist /= 2.0
205
205
 
206
206
  qubit_pows = [1 << q for q in range(n_qubits)]
@@ -308,7 +308,7 @@ def spin_glass_solver(
308
308
  best_theta = np.array([b == "1" for b in list(bitstring)], dtype=np.bool_)
309
309
 
310
310
  if gray_iterations is None:
311
- gray_iterations = n_qubits * os.cpu_count()
311
+ gray_iterations = n_qubits * n_qubits
312
312
 
313
313
  if gray_seed_multiple is None:
314
314
  gray_seed_multiple = os.cpu_count()
@@ -312,7 +312,7 @@ def spin_glass_solver_sparse(
312
312
  best_theta = np.array([b == "1" for b in list(bitstring)], dtype=np.bool_)
313
313
 
314
314
  if gray_iterations is None:
315
- gray_iterations = n_qubits * os.cpu_count()
315
+ gray_iterations = n_qubits * n_qubits
316
316
 
317
317
  if gray_seed_multiple is None:
318
318
  gray_seed_multiple = os.cpu_count()
@@ -219,7 +219,7 @@ def spin_glass_solver_streaming(
219
219
  best_theta = np.array([b == "1" for b in list(bitstring)], dtype=np.bool_)
220
220
 
221
221
  if gray_iterations is None:
222
- gray_iterations = n_qubits * os.cpu_count()
222
+ gray_iterations = n_qubits * n_qubits
223
223
 
224
224
  if gray_seed_multiple is None:
225
225
  gray_seed_multiple = os.cpu_count()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyqrackising
3
- Version: 9.5.9
3
+ Version: 9.7.0
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -0,0 +1,20 @@
1
+ pyqrackising/__init__.py,sha256=Q-fHQRApRseeFnnO6VfD1LpjPHCVkWoz0Gnaqmv6lp0,884
2
+ pyqrackising/convert_tensor_network_to_tsp.py,sha256=IbZdPfHQ2bzYqDXUdHfwdZJh-pC8lri_cAgESaskQWI,3450
3
+ pyqrackising/generate_tfim_samples.py,sha256=IlAz1l8oLExO6wJBO8LCQKlU_4ZPlyGsNE8xUt_iTrg,4762
4
+ pyqrackising/maxcut_tfim.py,sha256=uqC7Ryw1ikehGfG4OFV7rHojWlx5u8wL2l8yxsmNkWY,10170
5
+ pyqrackising/maxcut_tfim_sparse.py,sha256=YyenDVmXR46Pf9LClvpNEWjziToE6U7WOy68mKseTkk,11269
6
+ pyqrackising/maxcut_tfim_streaming.py,sha256=7vPKulDRcQ-YvPacCNT11Ba-CXhBYggjpu_aDMov6NE,6410
7
+ pyqrackising/maxcut_tfim_util.py,sha256=Ye8kNEunHQLdbTdyT8hTFlsRy0f3QCnKUDIwZXAm0rM,17003
8
+ pyqrackising/otoc.py,sha256=PKmpjwkGpMmwXGWmeqtvbVj1NfE35i9kkIdjQnDUZKE,7208
9
+ pyqrackising/spin_glass_solver.py,sha256=-hQAyTCRpadYhH4E8cT9kBFK9vdSDNK22XESXGSnfjc,13786
10
+ pyqrackising/spin_glass_solver_sparse.py,sha256=F07Y92FCaUJHX1h1ydTFp6HjWLgo_A2buPErIvtYM7g,14734
11
+ pyqrackising/spin_glass_solver_streaming.py,sha256=dDl7VYeu7BkJlLHTCCovaT3z6-Zx4RGwTVcNGzwxTXY,10012
12
+ pyqrackising/tfim_magnetization.py,sha256=On1MhCNGGHRxJFRmCOpMcdqQJiy25gWkjz0Ka8i5f-Q,499
13
+ pyqrackising/tfim_square_magnetization.py,sha256=9uJCT8ytyufcGFrZiignjCkWJr9UcP44sAAy0BIBw34,531
14
+ pyqrackising/tsp.py,sha256=kqDxU2RCjad-T4tW_C9WO1I-COSwX7fHB6VhIuQsjfQ,62464
15
+ pyqrackising/tsp_maxcut.py,sha256=ngxfSJgePXVwJXfNXYdk4jv1ISznx8zHOqR-Vbf33B0,9772
16
+ pyqrackising-9.7.0.dist-info/LICENSE.md,sha256=46mU2C5kSwOnkqkw9XQAJlhBL2JAf1_uCD8lVcXyMRg,7652
17
+ pyqrackising-9.7.0.dist-info/METADATA,sha256=Du6Br81vibtHb0s-WHKaPU3CC57uNaL1B9ken-RCj7o,12796
18
+ pyqrackising-9.7.0.dist-info/WHEEL,sha256=AMMNaGlKLEICDqgnxZojk7k8N6wUjQQ3X9tPjxJ2sOc,110
19
+ pyqrackising-9.7.0.dist-info/top_level.txt,sha256=bxlfGuLwzeVEI8Jm5D9HvC_WedgvvkSrpFwbGDjg-Ag,13
20
+ pyqrackising-9.7.0.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- pyqrackising/__init__.py,sha256=Q-fHQRApRseeFnnO6VfD1LpjPHCVkWoz0Gnaqmv6lp0,884
2
- pyqrackising/convert_tensor_network_to_tsp.py,sha256=IbZdPfHQ2bzYqDXUdHfwdZJh-pC8lri_cAgESaskQWI,3450
3
- pyqrackising/generate_tfim_samples.py,sha256=IlAz1l8oLExO6wJBO8LCQKlU_4ZPlyGsNE8xUt_iTrg,4762
4
- pyqrackising/maxcut_tfim.py,sha256=05nRjk5hhEjNFjtXdKVObL0cYYuJ8URfcjrOGK5tGi4,10106
5
- pyqrackising/maxcut_tfim_sparse.py,sha256=zMgaVmEgfZWL2tp728PuEg5Dc_jbQBUPsgbP3lXiMiA,11194
6
- pyqrackising/maxcut_tfim_streaming.py,sha256=EcRXKurqLiQs6pMNz-rhMp2YQzRXD726RnkXsPt4IJ0,6346
7
- pyqrackising/maxcut_tfim_util.py,sha256=2FSBcba8Ys8HEe_h-ayB68pAqeeJXeveEHGmZKg_tkI,16889
8
- pyqrackising/otoc.py,sha256=PEyq58cg9Bmj5_3z-i4w3TnYMuG9chWQv7tmib6eVYc,7215
9
- pyqrackising/spin_glass_solver.py,sha256=YtsIfYfpwhEMQPVd_sbjqpz6nQcrv8p2mUBYZ-wRpnM,13792
10
- pyqrackising/spin_glass_solver_sparse.py,sha256=E4Ft7H-uKXZyq1cS7Z77AgdkL5fMlKMK6DwnXgrgOhk,14740
11
- pyqrackising/spin_glass_solver_streaming.py,sha256=xpWq63yODMzBPQQtpDuB1Tro6ta8pPbVwc0ZkDWVASk,10018
12
- pyqrackising/tfim_magnetization.py,sha256=On1MhCNGGHRxJFRmCOpMcdqQJiy25gWkjz0Ka8i5f-Q,499
13
- pyqrackising/tfim_square_magnetization.py,sha256=9uJCT8ytyufcGFrZiignjCkWJr9UcP44sAAy0BIBw34,531
14
- pyqrackising/tsp.py,sha256=kqDxU2RCjad-T4tW_C9WO1I-COSwX7fHB6VhIuQsjfQ,62464
15
- pyqrackising/tsp_maxcut.py,sha256=ngxfSJgePXVwJXfNXYdk4jv1ISznx8zHOqR-Vbf33B0,9772
16
- pyqrackising-9.5.9.dist-info/LICENSE.md,sha256=46mU2C5kSwOnkqkw9XQAJlhBL2JAf1_uCD8lVcXyMRg,7652
17
- pyqrackising-9.5.9.dist-info/METADATA,sha256=WE07pR3mDv51yMB8HPa0V9945wD6q7EbYVesGSvK5kQ,12796
18
- pyqrackising-9.5.9.dist-info/WHEEL,sha256=AMMNaGlKLEICDqgnxZojk7k8N6wUjQQ3X9tPjxJ2sOc,110
19
- pyqrackising-9.5.9.dist-info/top_level.txt,sha256=bxlfGuLwzeVEI8Jm5D9HvC_WedgvvkSrpFwbGDjg-Ag,13
20
- pyqrackising-9.5.9.dist-info/RECORD,,