pyqrackising 9.4.0__tar.gz → 9.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyqrackising might be problematic. Click here for more details.

Files changed (28) hide show
  1. {pyqrackising-9.4.0/pyqrackising.egg-info → pyqrackising-9.5.1}/PKG-INFO +1 -1
  2. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyproject.toml +1 -1
  3. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/maxcut_tfim_util.py +42 -7
  4. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/otoc.py +64 -47
  5. {pyqrackising-9.4.0 → pyqrackising-9.5.1/pyqrackising.egg-info}/PKG-INFO +1 -1
  6. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/setup.py +1 -1
  7. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/LICENSE.md +0 -0
  8. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/MANIFEST.in +0 -0
  9. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/README.md +0 -0
  10. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/__init__.py +0 -0
  11. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/convert_tensor_network_to_tsp.py +0 -0
  12. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/generate_tfim_samples.py +0 -0
  13. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/kernels.cl +0 -0
  14. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/maxcut_tfim.py +0 -0
  15. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/maxcut_tfim_sparse.py +0 -0
  16. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/maxcut_tfim_streaming.py +0 -0
  17. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/spin_glass_solver.py +0 -0
  18. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/spin_glass_solver_sparse.py +0 -0
  19. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/spin_glass_solver_streaming.py +0 -0
  20. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/tfim_magnetization.py +0 -0
  21. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/tfim_square_magnetization.py +0 -0
  22. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/tsp.py +0 -0
  23. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising/tsp_maxcut.py +0 -0
  24. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising.egg-info/SOURCES.txt +0 -0
  25. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising.egg-info/dependency_links.txt +0 -0
  26. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising.egg-info/not-zip-safe +0 -0
  27. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/pyqrackising.egg-info/top_level.txt +0 -0
  28. {pyqrackising-9.4.0 → pyqrackising-9.5.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyqrackising
3
- Version: 9.4.0
3
+ Version: 9.5.1
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -10,7 +10,7 @@ build-backend = "setuptools.build_meta"
10
10
 
11
11
  [project]
12
12
  name = "pyqrackising"
13
- version = "9.4.0"
13
+ version = "9.5.1"
14
14
  requires-python = ">=3.8"
15
15
  description = "Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)"
16
16
  readme = {file = "README.txt", content-type = "text/markdown"}
@@ -389,22 +389,57 @@ def init_theta(h_mult, n_qubits, J_eff, degrees):
389
389
  return theta
390
390
 
391
391
 
392
+ # From Google Search AI
393
+ @njit
394
+ def factorial(num):
395
+ """Calculates the factorial of a non-negative integer."""
396
+ if num == 0:
397
+ return 1
398
+
399
+ result = 1
400
+ for i in range(1, num + 1):
401
+ result *= i
402
+
403
+ return result
404
+
405
+
406
+ # From Google Search AI
407
+ @njit
408
+ def comb(n, k):
409
+ """
410
+ Calculates the number of combinations (n choose k) from scratch.
411
+ n: The total number of items.
412
+ k: The number of items to choose.
413
+ """
414
+ # Optimize by choosing the smaller of k and (n-k)
415
+ # This reduces the number of multiplications in the factorial calculation
416
+ k = min(k, n - k)
417
+
418
+ # Calculate the numerator: n * (n-1) * ... * (n-k+1)
419
+ numerator = 1
420
+ for i in range(k):
421
+ numerator *= (n - i)
422
+
423
+ # Calculate the denominator: k!
424
+ denominator = factorial(k)
425
+
426
+ return numerator // denominator
427
+
428
+
392
429
  @njit
393
430
  def init_thresholds(n_qubits):
394
431
  n_bias = n_qubits - 1
395
432
  thresholds = np.empty(n_bias, dtype=np.float64)
396
433
  tot_prob = 0
397
- p = 1.0
398
- if n_qubits & 1:
399
- q = n_qubits // 2
400
- thresholds[q - 1] = p
401
- tot_prob = p
402
- p /= 2
434
+ p = n_qubits
403
435
  for q in range(1, n_qubits // 2):
404
436
  thresholds[q - 1] = p
405
437
  thresholds[n_bias - q] = p
406
438
  tot_prob += 2 * p
407
- p /= 2
439
+ p = comb(n_qubits, q + 1)
440
+ if n_qubits & 1:
441
+ thresholds[q - 1] = p
442
+ tot_prob += p
408
443
  thresholds /= tot_prob
409
444
 
410
445
  return thresholds
@@ -1,4 +1,5 @@
1
1
  from .maxcut_tfim_util import probability_by_hamming_weight, sample_mag, opencl_context
2
+ import math
2
3
  from numba import njit
3
4
  import numpy as np
4
5
  import sys
@@ -7,45 +8,64 @@ import sys
7
8
  epsilon = opencl_context.epsilon
8
9
 
9
10
 
10
- def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55):
11
- pauli_string = list(pauli_string)
12
- if len(pauli_string) != n_qubits:
13
- raise ValueError("OTOCS pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
14
-
11
+ def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=65, pauli_strings = ['X' + 'I' * 64]):
15
12
  n_bias = n_qubits + 1
16
13
  if h <= epsilon:
17
14
  bias = np.empty(n_bias, dtype=np.float64)
18
15
  bias[0] = 1.0
19
- return { 'X': bias, 'Y': bias, 'Z': bias }
20
-
21
- fwd = probability_by_hamming_weight(J, h, z, theta, t, n_qubits + 1)
22
- rev = probability_by_hamming_weight(-J, -h, z, theta + np.pi, t, n_qubits + 1)
23
- diff_theta = rev - fwd
24
-
25
- phi = theta + np.pi / 2
26
- fwd = probability_by_hamming_weight(-h, -J, z, phi, t, n_qubits + 1)
27
- rev = probability_by_hamming_weight(h, J, z, phi + np.pi, t, n_qubits + 1)
28
- diff_phi = rev - fwd
29
-
30
- # Lambda (Y-axis) is at a right angle to both J and h,
31
- # so there is no difference in this dimension.
32
-
33
- diff_theta *= cycles
34
- diff_phi *= cycles
16
+ return bias
17
+
18
+ max_entropy = np.empty(n_bias, dtype=np.float64)
19
+ tot_prob = 0
20
+ p = 1.0
21
+ for q in range(n_qubits // 2):
22
+ max_entropy[q] = p
23
+ max_entropy[n_bias - (q + 1)] = p
24
+ tot_prob += 2 * p
25
+ p = math.comb(n_qubits, q + 1)
26
+ if n_qubits & 1:
27
+ max_entropy[q - 1] = p
28
+ tot_prob += p
29
+ max_entropy /= tot_prob
35
30
 
36
31
  diff_z = np.zeros(n_bias, dtype=np.float64)
37
- for b in pauli_string:
38
- match b:
39
- case 'X':
40
- diff_z += diff_theta
41
- case 'Z':
42
- diff_z += diff_phi
43
- case 'Y':
44
- diff_z += diff_theta + diff_phi
45
- case _:
46
- pass
47
-
48
- diff_z[0] += n_qubits
32
+ for pauli_string in pauli_strings:
33
+ pauli_string = list(pauli_string)
34
+ if len(pauli_string) != n_qubits:
35
+ raise ValueError("OTOCS pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
36
+
37
+ fwd = probability_by_hamming_weight(J, h, z, theta, t, n_qubits + 1)
38
+ rev = probability_by_hamming_weight(-J, -h, z, theta + np.pi, t, n_qubits + 1)
39
+ diff_theta = rev - fwd
40
+
41
+ phi = theta + np.pi / 2
42
+ fwd = probability_by_hamming_weight(-h, -J, z, phi, t, n_qubits + 1)
43
+ rev = probability_by_hamming_weight(h, J, z, phi + np.pi, t, n_qubits + 1)
44
+ diff_phi = rev - fwd
45
+
46
+ # Lambda (Y-axis) is at a right angle to both J and h,
47
+ # so there is no difference in this dimension.
48
+
49
+ diff_z[0] += n_qubits
50
+ entropy_frac = 0
51
+ for b in pauli_string:
52
+ match b:
53
+ case 'X':
54
+ diff_z += diff_theta
55
+ entropy_frac += 1
56
+ case 'Z':
57
+ diff_z += diff_phi
58
+ entropy_frac += 1
59
+ case 'Y':
60
+ diff_z += diff_theta + diff_phi
61
+ entropy_frac += 1
62
+ case _:
63
+ pass
64
+
65
+ entropy_frac /= n_qubits
66
+ diff_z = ((1 - entropy_frac) / n_qubits) * diff_z + entropy_frac * max_entropy
67
+
68
+ # Normalize:
49
69
  diff_z /= diff_z.sum()
50
70
 
51
71
  return diff_z
@@ -162,22 +182,19 @@ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
162
182
  return inv_dist
163
183
 
164
184
 
165
- def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.174532925199432957, t=5, n_qubits=56, cycles=1, pauli_string = 'X' + 'I' * 55, shots=100, is_orbifold=True):
166
- pauli_string = list(pauli_string)
167
- if len(pauli_string) != n_qubits:
168
- raise ValueError("OTOC pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
169
-
170
- thresholds = fix_cdf(get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, cycles, pauli_string))
185
+ def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=65, pauli_strings = ['X' + 'I' * 64], shots=100, is_orbifold=True):
186
+ thresholds = fix_cdf(get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, pauli_strings))
171
187
 
172
188
  row_len, col_len = factor_width(n_qubits)
173
- p_string = "".join(pauli_string)
174
- butterfly_idx_x = find_all_str_occurrences(p_string, 'X')
175
- butterfly_idx_z = find_all_str_occurrences(p_string, 'Z')
176
-
177
- if is_orbifold:
178
- inv_dist = get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
179
- else:
180
- inv_dist = get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
189
+ inv_dist = np.zeros(n_qubits, dtype=np.float64)
190
+ for pauli_string in pauli_strings:
191
+ butterfly_idx_x = find_all_str_occurrences(pauli_string, 'X')
192
+ butterfly_idx_z = find_all_str_occurrences(pauli_string, 'Z')
193
+ if is_orbifold:
194
+ inv_dist += get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
195
+ else:
196
+ inv_dist += get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
197
+ inv_dist /= 2.0
181
198
 
182
199
  samples = []
183
200
  for _ in range(shots):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyqrackising
3
- Version: 9.4.0
3
+ Version: 9.5.1
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -7,7 +7,7 @@ with open(README_PATH) as readme_file:
7
7
 
8
8
  setup(
9
9
  name='pyqrackising',
10
- version='9.4.0',
10
+ version='9.5.1',
11
11
  author='Dan Strano',
12
12
  author_email='stranoj@gmail.com',
13
13
  description='Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)',
File without changes
File without changes
File without changes
File without changes