pyqrackising 9.5.3__py3-none-win_amd64.whl → 9.7.1__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ import numpy as np
3
3
  from numba import njit, prange
4
4
  import os
5
5
 
6
- from .maxcut_tfim_util import compute_cut, compute_energy, convert_bool_to_uint, get_cut, get_cut_base, make_G_m_buf, make_theta_buf, maxcut_hamming_cdf, opencl_context, sample_mag, setup_opencl, bit_pick
6
+ from .maxcut_tfim_util import compute_cut, compute_energy, convert_bool_to_uint, get_cut, get_cut_base, heuristic_threshold, init_thresholds, make_G_m_buf, make_theta_buf, maxcut_hamming_cdf, opencl_context, sample_mag, setup_opencl, bit_pick
7
7
 
8
8
  IS_OPENCL_AVAILABLE = True
9
9
  try:
@@ -22,6 +22,9 @@ def update_repulsion_choice(G_m, weights, n, used, node, repulsion_base):
22
22
  # Select node
23
23
  used[node] = True
24
24
 
25
+ if abs(1.0 - repulsion_base) <= epsilon:
26
+ return
27
+
25
28
  # Repulsion: penalize neighbors
26
29
  for nbr in range(n):
27
30
  if used[nbr]:
@@ -177,21 +180,6 @@ def init_J_and_z(G_m, repulsion_base):
177
180
  return J_eff, degrees
178
181
 
179
182
 
180
- @njit
181
- def cpu_footer(shots, thread_count, quality, n_qubits, G_m, nodes, is_spin_glass, anneal_t, anneal_h, repulsion_base):
182
- J_eff, degrees = init_J_and_z(G_m, repulsion_base)
183
- hamming_prob = maxcut_hamming_cdf(n_qubits, J_eff, degrees, quality, anneal_t, anneal_h)
184
-
185
- degrees = None
186
- J_eff = None
187
-
188
- best_solution, best_value = sample_measurement(G_m, shots, thread_count, hamming_prob, repulsion_base, is_spin_glass)
189
-
190
- bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
191
-
192
- return bit_string, best_value, (l, r)
193
-
194
-
195
183
  def run_cut_opencl(best_energy, samples, G_m_buf, is_segmented, local_size, global_size, args_buf, local_energy_buf, local_index_buf, max_energy_host, max_index_host, max_energy_buf, max_index_buf):
196
184
  queue = opencl_context.queue
197
185
  calculate_cut_kernel = opencl_context.calculate_cut_segmented_kernel if is_segmented else opencl_context.calculate_cut_kernel
@@ -253,6 +241,61 @@ def run_cut_opencl(best_energy, samples, G_m_buf, is_segmented, local_size, glob
253
241
 
254
242
  return samples[max_index_host[best_x]], energy
255
243
 
244
+
245
+ @njit
246
+ def exact_maxcut(G):
247
+ """Brute-force exact MAXCUT solver using Numba JIT."""
248
+ n = G.shape[0]
249
+ max_cut = -1.0
250
+ best_mask = 0
251
+
252
+ # Enumerate all 2^n possible bitstrings
253
+ for mask in range(1 << n):
254
+ cut = 0.0
255
+ for i in range(n):
256
+ bi = (mask >> i) & 1
257
+ for j in range(i + 1, n):
258
+ if bi != ((mask >> j) & 1):
259
+ cut += G[i, j]
260
+ if cut > max_cut:
261
+ max_cut = cut
262
+ best_mask = mask
263
+
264
+ # Reconstruct best bitstring
265
+ best_bits = np.zeros(n, dtype=np.bool_)
266
+ for i in range(n):
267
+ best_bits[i] = (best_mask >> i) & 1
268
+
269
+ return best_bits, max_cut
270
+
271
+
272
+ @njit
273
+ def exact_spin_glass(G):
274
+ """Brute-force exact spin-glass solver using Numba JIT."""
275
+ n = G.shape[0]
276
+ max_cut = -1.0
277
+ best_mask = 0
278
+
279
+ # Enumerate all 2^n possible bitstrings
280
+ for mask in range(1 << n):
281
+ cut = 0.0
282
+ for i in range(n):
283
+ bi = (mask >> i) & 1
284
+ for j in range(i + 1, n):
285
+ val = G[i, j]
286
+ cut += val if bi == ((mask >> j) & 1) else -val
287
+ if cut > max_cut:
288
+ max_cut = cut
289
+ best_mask = mask
290
+
291
+ # Reconstruct best bitstring
292
+ best_bits = np.zeros(n, dtype=np.bool_)
293
+ for i in range(n):
294
+ best_bits[i] = (best_mask >> i) & 1
295
+
296
+ return best_bits, max_cut
297
+
298
+
256
299
  def maxcut_tfim(
257
300
  G,
258
301
  quality=None,
@@ -275,22 +318,15 @@ def maxcut_tfim(
275
318
 
276
319
  n_qubits = len(G_m)
277
320
 
278
- if n_qubits < 3:
279
- empty = [nodes[0]]
280
- empty.clear()
281
-
282
- if n_qubits == 0:
283
- return "", 0, (empty, empty.copy())
284
-
285
- if n_qubits == 1:
286
- return "0", 0, (nodes, empty)
321
+ if n_qubits < heuristic_threshold:
322
+ best_solution, best_value = exact_spin_glass(G_m) if is_spin_glass else exact_maxcut(G_m)
323
+ bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
287
324
 
288
- if n_qubits == 2:
289
- weight = G_m[0, 1]
290
- if weight < 0.0:
291
- return "00", 0, (nodes, empty)
325
+ if best_value < 0.0:
326
+ # Best cut is trivial partition, all/empty
327
+ return '0' * n_qubits, 0.0, (nodes, [])
292
328
 
293
- return "01", weight, ([nodes[0]], [nodes[1]])
329
+ return bit_string, best_value, (l, r)
294
330
 
295
331
  if quality is None:
296
332
  quality = 6
@@ -308,31 +344,21 @@ def maxcut_tfim(
308
344
  if repulsion_base is None:
309
345
  repulsion_base = 5.0
310
346
 
311
- is_opencl = is_maxcut_gpu and IS_OPENCL_AVAILABLE
312
-
313
- if not is_opencl:
314
- thread_count = os.cpu_count() ** 2
315
-
316
- bit_string, best_value, partition = cpu_footer(shots, thread_count, quality, n_qubits, G_m, nodes, is_spin_glass, anneal_t, anneal_h, repulsion_base)
317
-
318
- if best_value < 0.0:
319
- # Best cut is trivial partition, all/empty
320
- return '0' * n_qubits, 0.0, (nodes, [])
321
-
322
- return bit_string, best_value, partition
323
-
324
- segment_size = (G_m.shape[0] * G_m.shape[1] + 3) >> 2
325
- theta_segment_size = (((n_qubits + 31) >> 5) * (((shots + wgs - 1) // wgs) * wgs) + 3) >> 2
326
- is_segmented = ((G_m.nbytes << 1) > opencl_context.max_alloc) or ((theta_segment_size << 3) > opencl_context.max_alloc)
327
- G_m_buf = make_G_m_buf(G_m, is_segmented, segment_size)
328
-
329
347
  J_eff, degrees = init_J_and_z(G_m, repulsion_base)
330
- hamming_prob = maxcut_hamming_cdf(n_qubits, J_eff, degrees, quality, anneal_t, anneal_h)
348
+ cum_prob = maxcut_hamming_cdf(init_thresholds(n_qubits), n_qubits, J_eff, degrees, quality, anneal_t, anneal_h)
331
349
 
332
350
  degrees = None
333
351
  J_eff = None
334
352
 
335
- best_solution, best_value = sample_for_opencl(G_m, G_m_buf, shots, hamming_prob, repulsion_base, is_spin_glass, is_segmented, segment_size, theta_segment_size)
353
+ if is_maxcut_gpu and IS_OPENCL_AVAILABLE:
354
+ segment_size = (G_m.shape[0] * G_m.shape[1] + 3) >> 2
355
+ theta_segment_size = (((n_qubits + 31) >> 5) * (((shots + wgs - 1) // wgs) * wgs) + 3) >> 2
356
+ is_segmented = ((G_m.nbytes << 1) > opencl_context.max_alloc) or ((theta_segment_size << 3) > opencl_context.max_alloc)
357
+ G_m_buf = make_G_m_buf(G_m, is_segmented, segment_size)
358
+ best_solution, best_value = sample_for_opencl(G_m, G_m_buf, shots, cum_prob, repulsion_base, is_spin_glass, is_segmented, segment_size, theta_segment_size)
359
+ else:
360
+ thread_count = os.cpu_count() ** 2
361
+ best_solution, best_value = sample_measurement(G_m, shots, thread_count, cum_prob, repulsion_base, is_spin_glass)
336
362
 
337
363
  bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
338
364
 
@@ -3,7 +3,7 @@ import numpy as np
3
3
  from numba import njit, prange
4
4
  import os
5
5
 
6
- from .maxcut_tfim_util import binary_search, compute_cut_sparse, compute_energy_sparse, convert_bool_to_uint, get_cut, get_cut_base, make_G_m_csr_buf, make_theta_buf, maxcut_hamming_cdf, opencl_context, sample_mag, setup_opencl, bit_pick, to_scipy_sparse_upper_triangular
6
+ from .maxcut_tfim_util import binary_search, compute_cut_sparse, compute_energy_sparse, convert_bool_to_uint, get_cut, get_cut_base, heuristic_threshold_sparse, init_thresholds, make_G_m_csr_buf, make_theta_buf, maxcut_hamming_cdf, opencl_context, sample_mag, setup_opencl, bit_pick, to_scipy_sparse_upper_triangular
7
7
 
8
8
  IS_OPENCL_AVAILABLE = True
9
9
  try:
@@ -22,6 +22,9 @@ def update_repulsion_choice(G_data, G_rows, G_cols, weights, n, used, node, repu
22
22
  # Select node
23
23
  used[node] = True
24
24
 
25
+ if abs(1.0 - repulsion_base) <= epsilon:
26
+ return
27
+
25
28
  # Repulsion: penalize neighbors
26
29
  for j in range(G_rows[node], G_rows[node + 1]):
27
30
  nbr = G_cols[j]
@@ -187,17 +190,6 @@ def init_J_and_z(G_m, repulsion_base):
187
190
  return J_eff, degrees
188
191
 
189
192
 
190
- @njit
191
- def cpu_footer(J_eff, degrees, shots, thread_count, quality, n_qubits, G_data, G_rows, G_cols, nodes, is_spin_glass, anneal_t, anneal_h, repulsion_base):
192
- hamming_prob = maxcut_hamming_cdf(n_qubits, J_eff, degrees, quality, anneal_t, anneal_h)
193
-
194
- best_solution, best_value = sample_measurement(G_data, G_rows, G_cols, shots, thread_count, hamming_prob, repulsion_base, is_spin_glass)
195
-
196
- bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
197
-
198
- return bit_string, best_value, (l, r)
199
-
200
-
201
193
  def run_cut_opencl(best_energy, samples, G_data_buf, G_rows_buf, G_cols_buf, is_segmented, local_size, global_size, args_buf, local_energy_buf, local_index_buf, max_energy_host, max_index_host, max_energy_buf, max_index_buf):
202
194
  queue = opencl_context.queue
203
195
  calculate_cut_kernel = opencl_context.calculate_cut_sparse_segmented_kernel if is_segmented else opencl_context.calculate_cut_sparse_kernel
@@ -264,6 +256,69 @@ def run_cut_opencl(best_energy, samples, G_data_buf, G_rows_buf, G_cols_buf, is_
264
256
  return samples[max_index_host[best_x]], energy
265
257
 
266
258
 
259
+ @njit
260
+ def exact_maxcut(G_data, G_rows, G_cols):
261
+ """Brute-force exact MAXCUT solver using Numba JIT."""
262
+ n = G_rows.shape[0] - 1
263
+ max_cut = -1.0
264
+ best_mask = 0
265
+
266
+ # Enumerate all 2^n possible bitstrings
267
+ for mask in range(1 << n):
268
+ cut = 0.0
269
+ for i in range(n):
270
+ bi = (mask >> i) & 1
271
+ for j in range(i + 1, n):
272
+ if bi != ((mask >> j) & 1):
273
+ u, v = (i, j) if i < j else (j, i)
274
+ start = G_rows[u]
275
+ end = G_rows[u + 1]
276
+ k = binary_search(G_cols[start:end], v) + start
277
+ if k < end:
278
+ cut += G_data[k]
279
+ if cut > max_cut:
280
+ max_cut = cut
281
+ best_mask = mask
282
+
283
+ # Reconstruct best bitstring
284
+ best_bits = np.zeros(n, dtype=np.bool_)
285
+ for i in range(n):
286
+ best_bits[i] = (best_mask >> i) & 1
287
+
288
+ return best_bits, max_cut
289
+
290
+
291
+ @njit
292
+ def exact_spin_glass(G_data, G_rows, G_cols):
293
+ """Brute-force exact spin-glass solver using Numba JIT."""
294
+ n = G_rows.shape[0] - 1
295
+ max_cut = -1.0
296
+ best_mask = 0
297
+
298
+ # Enumerate all 2^n possible bitstrings
299
+ for mask in range(1 << n):
300
+ cut = 0.0
301
+ for i in range(n):
302
+ bi = (mask >> i) & 1
303
+ for j in range(i + 1, n):
304
+ u, v = (i, j) if i < j else (j, i)
305
+ start = G_rows[u]
306
+ end = G_rows[u + 1]
307
+ k = binary_search(G_cols[start:end], v) + start
308
+ if k < end:
309
+ val = G_data[k]
310
+ cut += val if bi == ((mask >> j) & 1) else -val
311
+ if cut > max_cut:
312
+ max_cut = cut
313
+ best_mask = mask
314
+
315
+ # Reconstruct best bitstring
316
+ best_bits = np.zeros(n, dtype=np.bool_)
317
+ for i in range(n):
318
+ best_bits[i] = (best_mask >> i) & 1
319
+
320
+ return best_bits, max_cut
321
+
267
322
 
268
323
  def maxcut_tfim_sparse(
269
324
  G,
@@ -289,19 +344,15 @@ def maxcut_tfim_sparse(
289
344
  nodes = list(range(n_qubits))
290
345
  G_m = G
291
346
 
292
- if n_qubits < 3:
293
- if n_qubits == 0:
294
- return "", 0, ([], [])
347
+ if n_qubits < heuristic_threshold_sparse:
348
+ best_solution, best_value = exact_spin_glass(G_m.data, G_m.indptr, G_m.indices) if is_spin_glass else exact_maxcut(G_m.data, G_m.indptr, G_m.indices)
349
+ bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
295
350
 
296
- if n_qubits == 1:
297
- return "0", 0, (nodes, [])
298
-
299
- if n_qubits == 2:
300
- weight = G_m[0, 1]
301
- if weight < 0.0:
302
- return "00", 0, (nodes, [])
351
+ if best_value < 0.0:
352
+ # Best cut is trivial partition, all/empty
353
+ return '0' * n_qubits, 0.0, (nodes, [])
303
354
 
304
- return "01", weight, ([nodes[0]], [nodes[1]])
355
+ return bit_string, best_value, (l, r)
305
356
 
306
357
  if quality is None:
307
358
  quality = 6
@@ -319,35 +370,23 @@ def maxcut_tfim_sparse(
319
370
  if repulsion_base is None:
320
371
  repulsion_base = 5.0
321
372
 
322
- J_eff, degrees = init_J_and_z(G_m, repulsion_base)
323
-
324
373
  n_qubits = G_m.shape[0]
325
374
 
326
- is_opencl = is_maxcut_gpu and IS_OPENCL_AVAILABLE
327
-
328
- if not is_opencl:
329
- thread_count = os.cpu_count() ** 2
330
-
331
- bit_string, best_value, partition = cpu_footer(J_eff, degrees, shots, thread_count, quality, n_qubits, G_m.data, G_m.indptr, G_m.indices, nodes, is_spin_glass, anneal_t, anneal_h, repulsion_base)
332
-
333
- if best_value < 0.0:
334
- # Best cut is trivial partition, all/empty
335
- return '0' * n_qubits, 0.0, (nodes, [])
336
-
337
- return bit_string, best_value, partition
338
-
339
- segment_size = (G_m.data.shape[0] + 3) >> 2
340
- theta_segment_size = (((n_qubits + 31) >> 5) * (((shots + wgs - 1) // wgs) * wgs) + 3) >> 2
341
- is_segmented = (G_m.data.nbytes << 1) > opencl_context.max_alloc or ((theta_segment_size << 3) > opencl_context.max_alloc)
342
-
343
- G_data_buf, G_rows_buf, G_cols_buf = make_G_m_csr_buf(G_m, is_segmented, segment_size)
344
-
345
- hamming_prob = maxcut_hamming_cdf(n_qubits, J_eff, degrees, quality, anneal_t, anneal_h)
375
+ J_eff, degrees = init_J_and_z(G_m, repulsion_base)
376
+ cum_prob = maxcut_hamming_cdf(init_thresholds(n_qubits), n_qubits, J_eff, degrees, quality, anneal_t, anneal_h)
346
377
 
347
378
  degrees = None
348
379
  J_eff = None
349
380
 
350
- best_solution, best_value = sample_for_opencl(G_m.data, G_m.indptr, G_m.indices, G_data_buf, G_rows_buf, G_cols_buf, shots, hamming_prob, repulsion_base, is_spin_glass, is_segmented, segment_size, theta_segment_size)
381
+ if is_maxcut_gpu and IS_OPENCL_AVAILABLE:
382
+ segment_size = (G_m.data.shape[0] + 3) >> 2
383
+ theta_segment_size = (((n_qubits + 31) >> 5) * (((shots + wgs - 1) // wgs) * wgs) + 3) >> 2
384
+ is_segmented = (G_m.data.nbytes << 1) > opencl_context.max_alloc or ((theta_segment_size << 3) > opencl_context.max_alloc)
385
+ G_data_buf, G_rows_buf, G_cols_buf = make_G_m_csr_buf(G_m, is_segmented, segment_size)
386
+ best_solution, best_value = sample_for_opencl(G_m.data, G_m.indptr, G_m.indices, G_data_buf, G_rows_buf, G_cols_buf, shots, cum_prob, repulsion_base, is_spin_glass, is_segmented, segment_size, theta_segment_size)
387
+ else:
388
+ thread_count = os.cpu_count() ** 2
389
+ best_solution, best_value = sample_measurement(G_m.data, G_m.indptr, G_m.indices, shots, thread_count, cum_prob, repulsion_base, is_spin_glass)
351
390
 
352
391
  bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
353
392
 
@@ -3,7 +3,7 @@ import numpy as np
3
3
  from numba import njit, prange
4
4
  import os
5
5
 
6
- from .maxcut_tfim_util import compute_cut_streaming, compute_energy_streaming, get_cut, get_cut_base, maxcut_hamming_cdf, opencl_context, sample_mag, bit_pick
6
+ from .maxcut_tfim_util import compute_cut_streaming, compute_energy_streaming, get_cut, get_cut_base, heuristic_threshold, init_thresholds, maxcut_hamming_cdf, opencl_context, sample_mag, bit_pick
7
7
 
8
8
 
9
9
  epsilon = opencl_context.epsilon
@@ -15,6 +15,9 @@ def update_repulsion_choice(G_func, nodes, weights, n, used, node, repulsion_bas
15
15
  # Select node
16
16
  used[node] = True
17
17
 
18
+ if abs(1.0 - repulsion_base) <= epsilon:
19
+ return
20
+
18
21
  # Repulsion: penalize neighbors
19
22
  for nbr in range(n):
20
23
  if used[nbr]:
@@ -146,18 +149,55 @@ def find_G_min(G_func, nodes, n_nodes):
146
149
 
147
150
 
148
151
  @njit
149
- def cpu_footer(shots, thread_count, quality, n_qubits, G_min, G_func, nodes, is_spin_glass, anneal_t, anneal_h, repulsion_base):
150
- J_eff, degrees = init_J_and_z(G_func, nodes, G_min, repulsion_base)
151
- hamming_prob = maxcut_hamming_cdf(n_qubits, J_eff, degrees, quality, anneal_t, anneal_h)
152
-
153
- degrees = None
154
- J_eff = None
152
+ def exact_maxcut(G_func, n):
153
+ """Brute-force exact MAXCUT solver using Numba JIT."""
154
+ max_cut = -1.0
155
+ best_mask = 0
156
+
157
+ # Enumerate all 2^n possible bitstrings
158
+ for mask in range(1 << n):
159
+ cut = 0.0
160
+ for i in range(n):
161
+ bi = (mask >> i) & 1
162
+ for j in range(i + 1, n):
163
+ if bi != ((mask >> j) & 1):
164
+ cut += G_func(i, j)
165
+ if cut > max_cut:
166
+ max_cut = cut
167
+ best_mask = mask
168
+
169
+ # Reconstruct best bitstring
170
+ best_bits = np.zeros(n, dtype=np.bool_)
171
+ for i in range(n):
172
+ best_bits[i] = (best_mask >> i) & 1
173
+
174
+ return best_bits, max_cut
155
175
 
156
- best_solution, best_value = sample_measurement(G_func, nodes, shots, thread_count, hamming_prob, n_qubits, repulsion_base, is_spin_glass)
157
176
 
158
- bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
159
-
160
- return bit_string, best_value, (l, r)
177
+ @njit
178
+ def exact_spin_glass(G_func, n):
179
+ """Brute-force exact spin-glass solver using Numba JIT."""
180
+ max_cut = -1.0
181
+ best_mask = 0
182
+
183
+ # Enumerate all 2^n possible bitstrings
184
+ for mask in range(1 << n):
185
+ cut = 0.0
186
+ for i in range(n):
187
+ bi = (mask >> i) & 1
188
+ for j in range(i + 1, n):
189
+ val = G_func(i, j)
190
+ cut += val if bi == ((mask >> j) & 1) else -val
191
+ if cut > max_cut:
192
+ max_cut = cut
193
+ best_mask = mask
194
+
195
+ # Reconstruct best bitstring
196
+ best_bits = np.zeros(n, dtype=np.bool_)
197
+ for i in range(n):
198
+ best_bits[i] = (best_mask >> i) & 1
199
+
200
+ return best_bits, max_cut
161
201
 
162
202
 
163
203
  def maxcut_tfim_streaming(
@@ -173,19 +213,15 @@ def maxcut_tfim_streaming(
173
213
  wgs = opencl_context.work_group_size
174
214
  n_qubits = len(nodes)
175
215
 
176
- if n_qubits < 3:
177
- if n_qubits == 0:
178
- return "", 0, ([], [])
216
+ if n_qubits < heuristic_threshold:
217
+ best_solution, best_value = exact_spin_glass(G_func, n_qubits) if is_spin_glass else exact_maxcut(G_func, n_qubits)
218
+ bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
179
219
 
180
- if n_qubits == 1:
181
- return "0", 0, (nodes, [])
220
+ if best_value < 0.0:
221
+ # Best cut is trivial partition, all/empty
222
+ return '0' * n_qubits, 0.0, (nodes, [])
182
223
 
183
- if n_qubits == 2:
184
- weight = G_func(nodes[0], nodes[1])
185
- if weight < 0.0:
186
- return "00", 0, (nodes, [])
187
-
188
- return "01", weight, ([nodes[0]], [nodes[1]])
224
+ return bit_string, best_value, (l, r)
189
225
 
190
226
  if quality is None:
191
227
  quality = 6
@@ -207,10 +243,18 @@ def maxcut_tfim_streaming(
207
243
 
208
244
  thread_count = os.cpu_count() ** 2
209
245
 
210
- bit_string, best_value, partition = cpu_footer(shots, thread_count, quality, n_qubits, G_min, G_func, nodes, is_spin_glass, anneal_t, anneal_h, repulsion_base)
246
+ J_eff, degrees = init_J_and_z(G_func, nodes, G_min, repulsion_base)
247
+ cum_prob = maxcut_hamming_cdf(init_thresholds(n_qubits), n_qubits, J_eff, degrees, quality, anneal_t, anneal_h)
248
+
249
+ degrees = None
250
+ J_eff = None
251
+
252
+ best_solution, best_value = sample_measurement(G_func, nodes, shots, thread_count, cum_prob, n_qubits, repulsion_base, is_spin_glass)
253
+
254
+ bit_string, l, r = get_cut(best_solution, nodes, n_qubits)
211
255
 
212
256
  if best_value < 0.0:
213
257
  # Best cut is trivial partition, all/empty
214
258
  return '0' * n_qubits, 0.0, (nodes, [])
215
259
 
216
- return bit_string, best_value, partition
260
+ return bit_string, best_value, (l, r)
@@ -121,6 +121,8 @@ except ImportError:
121
121
  print("PyOpenCL not installed. (If you have any OpenCL accelerator devices with available ICDs, you might want to optionally install pyopencl.)")
122
122
 
123
123
  opencl_context = OpenCLContext(compute_units, IS_OPENCL_AVAILABLE, work_group_size, dtype, epsilon, max_alloc, ctx, queue, calculate_cut_kernel, calculate_cut_sparse_kernel, calculate_cut_segmented_kernel, calculate_cut_sparse_segmented_kernel, single_bit_flips_kernel, single_bit_flips_sparse_kernel, single_bit_flips_segmented_kernel, single_bit_flips_sparse_segmented_kernel, double_bit_flips_kernel, double_bit_flips_sparse_kernel, double_bit_flips_segmented_kernel, double_bit_flips_sparse_segmented_kernel)
124
+ heuristic_threshold = 24
125
+ heuristic_threshold_sparse = 23
124
126
 
125
127
 
126
128
  def setup_opencl(l, g, args_np):
@@ -389,58 +391,22 @@ def init_theta(h_mult, n_qubits, J_eff, degrees):
389
391
  return theta
390
392
 
391
393
 
392
- # From Google Search AI
393
- @njit
394
- def factorial(num):
395
- """Calculates the factorial of a non-negative integer."""
396
- if num == 0:
397
- return 1
398
-
399
- result = 1
400
- for i in range(1, num + 1):
401
- result *= i
402
-
403
- return result
404
-
405
-
406
- # From Google Search AI
407
- @njit
408
- def comb(n, k):
409
- """
410
- Calculates the number of combinations (n choose k) from scratch.
411
- n: The total number of items.
412
- k: The number of items to choose.
413
- """
414
- # Optimize by choosing the smaller of k and (n-k)
415
- # This reduces the number of multiplications in the factorial calculation
416
- k = min(k, n - k)
417
-
418
- # Calculate the numerator: n * (n-1) * ... * (n-k+1)
419
- numerator = 1
420
- for i in range(k):
421
- numerator *= (n - i)
422
-
423
- # Calculate the denominator: k!
424
- denominator = factorial(k)
425
-
426
- return numerator // denominator
427
-
428
-
429
- @njit
430
394
  def init_thresholds(n_qubits):
431
- n_bias = n_qubits - 1
395
+ n_bias = n_qubits + 1
432
396
  thresholds = np.empty(n_bias, dtype=np.float64)
433
- tot_prob = 0
434
- p = n_qubits
435
- for q in range(1, n_qubits // 2):
436
- thresholds[q - 1] = p
437
- thresholds[n_bias - q] = p
438
- tot_prob += 2 * p
439
- p = comb(n_qubits, q + 1)
397
+ normalizer = 0
398
+ for q in range(n_qubits >> 1):
399
+ normalizer += math.comb(n_qubits, q) << 1
440
400
  if n_qubits & 1:
441
- thresholds[q - 1] = p
442
- tot_prob += p
443
- thresholds /= tot_prob
401
+ normalizer += math.comb(n_qubits, n_qubits >> 1)
402
+ p = 1
403
+ for q in range(n_qubits >> 1):
404
+ val = p / normalizer
405
+ thresholds[q] = val
406
+ thresholds[n_bias - (q + 1)] = val
407
+ p = math.comb(n_qubits, q + 1)
408
+ if n_qubits & 1:
409
+ thresholds[n_qubits >> 1] = p / normalizer
444
410
 
445
411
  return thresholds
446
412
 
@@ -476,17 +442,15 @@ def probability_by_hamming_weight(J, h, z, theta, t, n_bias, normalized=True):
476
442
  return bias
477
443
 
478
444
 
479
- @njit(parallel=True)
480
- def maxcut_hamming_cdf(n_qubits, J_func, degrees, quality, tot_t, h_mult):
481
- hamming_prob = init_thresholds(n_qubits)
482
-
445
+ @njit
446
+ def maxcut_hamming_cdf(hamming_prob, n_qubits, J_func, degrees, quality, tot_t, h_mult):
483
447
  n_steps = 1 << quality
484
448
  delta_t = tot_t / n_steps
485
449
  n_bias = n_qubits + 1
486
450
 
487
451
  theta = init_theta(h_mult, n_qubits, J_func, degrees)
488
452
 
489
- for qc in prange(n_qubits, n_steps * n_qubits):
453
+ for qc in range(n_qubits, n_steps * n_qubits):
490
454
  step = qc // n_qubits
491
455
  q = qc % n_qubits
492
456
  J_eff = J_func[q]
pyqrackising/otoc.py CHANGED
@@ -15,54 +15,60 @@ def get_otoc_hamming_distribution(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=6
15
15
  bias[0] = 1.0
16
16
  return bias
17
17
 
18
- max_entropy = np.empty(n_bias, dtype=np.float64)
18
+ diff_x = np.empty(n_bias, dtype=np.float64)
19
19
  tot_prob = 0
20
20
  p = 1.0
21
- for q in range(n_qubits // 2):
22
- max_entropy[q] = p
23
- max_entropy[n_bias - (q + 1)] = p
21
+ for q in range(n_qubits >> 1):
22
+ diff_x[q] = p
23
+ diff_x[n_bias - (q + 1)] = p
24
24
  tot_prob += 2 * p
25
25
  p = math.comb(n_qubits, q + 1)
26
26
  if n_qubits & 1:
27
- max_entropy[q - 1] = p
27
+ diff_x[n_qubits >> 1] = p
28
28
  tot_prob += p
29
- max_entropy /= tot_prob
29
+ diff_x *= n_qubits / tot_prob
30
30
 
31
+ signal_frac = 0.0
31
32
  diff_z = np.zeros(n_bias, dtype=np.float64)
33
+ diff_z[0] = n_qubits
32
34
  for pauli_string in pauli_strings:
33
35
  pauli_string = list(pauli_string)
34
36
  if len(pauli_string) != n_qubits:
35
37
  raise ValueError("OTOCS pauli_string must be same length as n_qubits! (Use 'I' for qubits that aren't changed.)")
36
38
 
39
+ term_signal = 0.5 * pauli_string.count('X') + pauli_string.count('Z') + 1.5 * pauli_string.count('Y')
40
+ if term_signal == 0:
41
+ continue
42
+
43
+ signal_frac -= term_signal
44
+
37
45
  fwd = probability_by_hamming_weight(J, h, z, theta, t, n_qubits + 1)
38
46
  rev = probability_by_hamming_weight(-J, -h, z, theta + np.pi, t, n_qubits + 1)
39
47
  diff_theta = rev - fwd
40
48
 
41
49
  phi = theta + np.pi / 2
42
50
  fwd = probability_by_hamming_weight(-h, -J, z, phi, t, n_qubits + 1)
43
- rev = probability_by_hamming_weight(h, J, z, phi + np.pi, t, n_qubits + 1)
51
+ rev = probability_by_hamming_weight(h, J, z, phi - np.pi, t, n_qubits + 1)
44
52
  diff_phi = rev - fwd
45
53
 
46
- diff_lam = diff_theta + diff_phi
47
-
48
- diff_theta[0] += 1.0
49
- diff_phi[0] += 1.0
50
- diff_lam[0] += 1.0
51
-
52
- diff_theta += max_entropy
53
- diff_phi += max_entropy
54
- diff_lam += max_entropy
55
-
56
54
  for b in pauli_string:
57
55
  match b:
58
56
  case 'X':
59
57
  diff_z += diff_theta
60
58
  case 'Z':
61
- diff_z += diff_phi
59
+ diff_x += diff_phi
62
60
  case 'Y':
63
- diff_z += diff_lam
61
+ diff_z += diff_theta
62
+ diff_x += diff_phi
64
63
  case _:
65
- diff_z[0] += 1.0
64
+ pass
65
+
66
+ # Normalize:
67
+ diff_z /= diff_z.sum()
68
+ diff_x /= diff_x.sum()
69
+
70
+ signal_frac = 2 ** signal_frac
71
+ diff_z = signal_frac * diff_z + (1 - signal_frac) * diff_x
66
72
 
67
73
  # Normalize:
68
74
  diff_z /= diff_z.sum()
@@ -133,24 +139,24 @@ def take_sample(n_qubits, sample, m, inv_dist):
133
139
  return sample
134
140
 
135
141
 
136
- def get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
142
+ def get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len, t):
137
143
  inv_dist = np.zeros(n_qubits, dtype=np.float64)
138
144
  for idx in butterfly_idx_x:
139
145
  b_row, b_col = divmod(idx, row_len)
140
146
  for q in range(n_qubits):
141
147
  q_row, q_col = divmod(q, row_len)
142
- inv_dist[q] -= abs(q_row - b_row) + abs(q_col - b_col)
148
+ inv_dist[q] += abs(q_row - b_row) + abs(q_col - b_col)
143
149
  for idx in butterfly_idx_z:
144
150
  b_row, b_col = divmod(idx, row_len)
145
151
  for q in range(n_qubits):
146
152
  q_row, q_col = divmod(q, row_len)
147
- inv_dist[q] += abs(q_row - b_row) + abs(q_col - b_col)
148
- inv_dist += 1.0 - inv_dist.min()
153
+ inv_dist[q] -= abs(q_row - b_row) + abs(q_col - b_col)
154
+ inv_dist = 2 ** (inv_dist / t)
149
155
 
150
156
  return inv_dist
151
157
 
152
158
 
153
- def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
159
+ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len, t):
154
160
  inv_dist = np.zeros(n_qubits, dtype=np.float64)
155
161
  half_row = row_len >> 1
156
162
  half_col = col_len >> 1
@@ -164,7 +170,7 @@ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
164
170
  col_d = abs(q_col - b_col)
165
171
  if col_d > half_col:
166
172
  col_d = col_len - col_d
167
- inv_dist[q] -= row_d + col_d
173
+ inv_dist[q] += row_d + col_d
168
174
  for idx in butterfly_idx_z:
169
175
  b_row, b_col = divmod(idx, row_len)
170
176
  for q in range(n_qubits):
@@ -175,8 +181,8 @@ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
175
181
  col_d = abs(q_col - b_col)
176
182
  if col_d > half_col:
177
183
  col_d = col_len - col_d
178
- inv_dist[q] += row_d + col_d
179
- inv_dist += 1.0 - inv_dist.min()
184
+ inv_dist[q] -= row_d + col_d
185
+ inv_dist = 2 ** (inv_dist / t)
180
186
 
181
187
  return inv_dist
182
188
 
@@ -184,21 +190,17 @@ def get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len):
184
190
  def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=65, pauli_strings = ['X' + 'I' * 64], shots=100, is_orbifold=True):
185
191
  thresholds = fix_cdf(get_otoc_hamming_distribution(J, h, z, theta, t, n_qubits, pauli_strings))
186
192
 
187
- entropy_frac = 0.0
188
- for pauli_string in pauli_strings:
189
- pauli_string = list(pauli_string)
190
- entropy_frac += pauli_string.count('X') + pauli_string.count('Y') + pauli_string.count('Z')
191
- entropy_frac /= n_qubits * len(pauli_strings)
192
-
193
193
  row_len, col_len = factor_width(n_qubits)
194
194
  inv_dist = np.zeros(n_qubits, dtype=np.float64)
195
195
  for pauli_string in pauli_strings:
196
+ if (pauli_string.count('X') + pauli_string.count('Y') + pauli_string.count('Z')) == 0:
197
+ continue
196
198
  butterfly_idx_x = find_all_str_occurrences(pauli_string, 'X')
197
199
  butterfly_idx_z = find_all_str_occurrences(pauli_string, 'Z')
198
200
  if is_orbifold:
199
- inv_dist += get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
201
+ inv_dist += get_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len, t)
200
202
  else:
201
- inv_dist += get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len)
203
+ inv_dist += get_willow_inv_dist(butterfly_idx_x, butterfly_idx_z, n_qubits, row_len, col_len, t)
202
204
  inv_dist /= 2.0
203
205
 
204
206
  qubit_pows = [1 << q for q in range(n_qubits)]
@@ -214,13 +216,6 @@ def generate_otoc_samples(J=-1.0, h=2.0, z=4, theta=0.0, t=5, n_qubits=65, pauli
214
216
  continue
215
217
 
216
218
  # Second dimension: permutation within Hamming weight
217
- if np.random.random() < entropy_frac:
218
- bit_pows = np.random.choice(qubit_pows, size=m, replace=False)
219
- sample = 0
220
- for bit_pow in bit_pows:
221
- sample |= bit_pow
222
- samples.append(sample)
223
- else:
224
- samples.append(take_sample(n_qubits, 0, m, inv_dist))
219
+ samples.append(take_sample(n_qubits, 0, m, inv_dist))
225
220
 
226
221
  return samples
@@ -1,5 +1,5 @@
1
1
  from .maxcut_tfim import maxcut_tfim
2
- from .maxcut_tfim_util import compute_cut, compute_energy, get_cut, gray_code_next, gray_mutation, int_to_bitstring, make_G_m_buf, make_best_theta_buf, opencl_context, setup_opencl
2
+ from .maxcut_tfim_util import compute_cut, compute_energy, get_cut, gray_code_next, gray_mutation, heuristic_threshold, int_to_bitstring, make_G_m_buf, make_best_theta_buf, opencl_context, setup_opencl
3
3
  import networkx as nx
4
4
  import numpy as np
5
5
  from numba import njit, prange
@@ -296,6 +296,9 @@ def spin_glass_solver(
296
296
 
297
297
  return "01", weight, ([nodes[0]], [nodes[1]]), -weight
298
298
 
299
+ if n_qubits < heuristic_threshold:
300
+ best_guess = None
301
+
299
302
  bitstring = ""
300
303
  if isinstance(best_guess, str):
301
304
  bitstring = best_guess
@@ -305,16 +308,27 @@ def spin_glass_solver(
305
308
  bitstring = "".join(["1" if b else "0" for b in best_guess])
306
309
  else:
307
310
  bitstring, cut_value, _ = maxcut_tfim(G_m, quality=quality, shots=shots, is_spin_glass=is_spin_glass, anneal_t=anneal_t, anneal_h=anneal_h, repulsion_base=repulsion_base, is_maxcut_gpu=is_maxcut_gpu, is_nested=True)
311
+
308
312
  best_theta = np.array([b == "1" for b in list(bitstring)], dtype=np.bool_)
313
+ max_energy = compute_energy(best_theta, G_m, n_qubits) if is_spin_glass else cut_value
314
+
315
+ if n_qubits < heuristic_threshold:
316
+ bitstring, l, r = get_cut(best_theta, nodes, n_qubits)
317
+ if is_spin_glass:
318
+ cut_value = compute_cut(best_theta, G_m, n_qubits)
319
+ min_energy = -max_energy
320
+ else:
321
+ cut_value = max_energy
322
+ min_energy = compute_energy(best_theta, G_m, n_qubits)
323
+
324
+ return bitstring, float(cut_value), (l, r), float(min_energy)
309
325
 
310
326
  if gray_iterations is None:
311
- gray_iterations = n_qubits * os.cpu_count()
327
+ gray_iterations = n_qubits * n_qubits
312
328
 
313
329
  if gray_seed_multiple is None:
314
330
  gray_seed_multiple = os.cpu_count()
315
331
 
316
- max_energy = compute_energy(best_theta, G_m, n_qubits) if is_spin_glass else cut_value
317
-
318
332
  is_opencl = is_maxcut_gpu and IS_OPENCL_AVAILABLE
319
333
 
320
334
  if is_opencl:
@@ -1,5 +1,5 @@
1
1
  from .maxcut_tfim_sparse import maxcut_tfim_sparse
2
- from .maxcut_tfim_util import compute_cut_sparse, compute_energy_sparse, get_cut, gray_code_next, gray_mutation, int_to_bitstring, make_G_m_csr_buf, make_best_theta_buf, opencl_context, setup_opencl, to_scipy_sparse_upper_triangular
2
+ from .maxcut_tfim_util import compute_cut_sparse, compute_energy_sparse, get_cut, gray_code_next, gray_mutation, heuristic_threshold_sparse, int_to_bitstring, make_G_m_csr_buf, make_best_theta_buf, opencl_context, setup_opencl, to_scipy_sparse_upper_triangular
3
3
  import networkx as nx
4
4
  import numpy as np
5
5
  from numba import njit, prange
@@ -300,6 +300,9 @@ def spin_glass_solver_sparse(
300
300
 
301
301
  return "01", weight, ([nodes[0]], [nodes[1]]), -weight
302
302
 
303
+ if n_qubits < heuristic_threshold_sparse:
304
+ best_guess = None
305
+
303
306
  bitstring = ""
304
307
  if isinstance(best_guess, str):
305
308
  bitstring = best_guess
@@ -309,16 +312,27 @@ def spin_glass_solver_sparse(
309
312
  bitstring = "".join(["1" if b else "0" for b in best_guess])
310
313
  else:
311
314
  bitstring, cut_value, _ = maxcut_tfim_sparse(G_m, quality=quality, shots=shots, is_spin_glass=is_spin_glass, anneal_t=anneal_t, anneal_h=anneal_h, repulsion_base=repulsion_base, is_maxcut_gpu=is_maxcut_gpu, is_nested=True)
315
+
312
316
  best_theta = np.array([b == "1" for b in list(bitstring)], dtype=np.bool_)
317
+ max_energy = compute_energy(best_theta, G_m, n_qubits) if is_spin_glass else cut_value
318
+
319
+ if n_qubits < heuristic_threshold_sparse:
320
+ bitstring, l, r = get_cut(best_theta, nodes, n_qubits)
321
+ if is_spin_glass:
322
+ cut_value = compute_cut_sparse(best_theta, G_m.data, G_m.indptr, G_m.indices, n_qubits)
323
+ min_energy = -max_energy
324
+ else:
325
+ cut_value = max_energy
326
+ min_energy = compute_energy_sparse(best_theta, G_m.data, G_m.indptr, G_m.indices, n_qubits)
327
+
328
+ return bitstring, float(cut_value), (l, r), float(min_energy)
313
329
 
314
330
  if gray_iterations is None:
315
- gray_iterations = n_qubits * os.cpu_count()
331
+ gray_iterations = n_qubits * n_qubits
316
332
 
317
333
  if gray_seed_multiple is None:
318
334
  gray_seed_multiple = os.cpu_count()
319
335
 
320
- max_energy = compute_energy_sparse(best_theta, G_m.data, G_m.indptr, G_m.indices, n_qubits) if is_spin_glass else cut_value
321
-
322
336
  is_opencl = is_maxcut_gpu and IS_OPENCL_AVAILABLE
323
337
 
324
338
  if is_opencl:
@@ -1,5 +1,5 @@
1
1
  from .maxcut_tfim_streaming import maxcut_tfim_streaming
2
- from .maxcut_tfim_util import compute_cut_streaming, compute_energy_streaming, get_cut, get_cut_base, gray_code_next, gray_mutation, int_to_bitstring, opencl_context
2
+ from .maxcut_tfim_util import compute_cut_streaming, compute_energy_streaming, get_cut, get_cut_base, gray_code_next, gray_mutation, heuristic_threshold, int_to_bitstring, opencl_context
3
3
  import networkx as nx
4
4
  import numpy as np
5
5
  from numba import njit, prange
@@ -207,6 +207,9 @@ def spin_glass_solver_streaming(
207
207
 
208
208
  return "01", weight, ([nodes[0]], [nodes[1]]), -weight
209
209
 
210
+ if n_qubits < heuristic_threshold:
211
+ best_guess = None
212
+
210
213
  bitstring = ""
211
214
  if isinstance(best_guess, str):
212
215
  bitstring = best_guess
@@ -216,16 +219,27 @@ def spin_glass_solver_streaming(
216
219
  bitstring = "".join(["1" if b else "0" for b in best_guess])
217
220
  else:
218
221
  bitstring, cut_value, _ = maxcut_tfim_streaming(G_func, nodes, quality=quality, shots=shots, is_spin_glass=is_spin_glass, anneal_t=anneal_t, anneal_h=anneal_h, repulsion_base=repulsion_base)
222
+
219
223
  best_theta = np.array([b == "1" for b in list(bitstring)], dtype=np.bool_)
224
+ max_energy = compute_energy(best_theta, G_m, n_qubits) if is_spin_glass else cut_value
225
+
226
+ if n_qubits < heuristic_threshold:
227
+ bitstring, l, r = get_cut(best_theta, nodes, n_qubits)
228
+ if is_spin_glass:
229
+ cut_value = compute_cut_streaming(best_theta, G_func, nodes, n_qubits)
230
+ min_energy = -max_energy
231
+ else:
232
+ cut_value = max_energy
233
+ min_energy = compute_energy_streaming(best_theta, G_func, nodes, n_qubits)
234
+
235
+ return bitstring, float(cut_value), (l, r), float(min_energy)
220
236
 
221
237
  if gray_iterations is None:
222
- gray_iterations = n_qubits * os.cpu_count()
238
+ gray_iterations = n_qubits * n_qubits
223
239
 
224
240
  if gray_seed_multiple is None:
225
241
  gray_seed_multiple = os.cpu_count()
226
242
 
227
- max_energy = compute_energy_streaming(best_theta, G_func, nodes, n_qubits) if is_spin_glass else cut_value
228
-
229
243
  thread_count = os.cpu_count() ** 2
230
244
  improved = True
231
245
  while improved:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyqrackising
3
- Version: 9.5.3
3
+ Version: 9.7.1
4
4
  Summary: Fast MAXCUT, TSP, and sampling heuristics from near-ideal transverse field Ising model (TFIM)
5
5
  Home-page: https://github.com/vm6502q/PyQrackIsing
6
6
  Author: Dan Strano
@@ -0,0 +1,21 @@
1
+ pyqrackising/__init__.py,sha256=MWxOmzFfQ2csz2A6o30Kf8zQvEtd996d3dz4kYaLtHM,898
2
+ pyqrackising/convert_tensor_network_to_tsp.py,sha256=55ajnvdBXFgf-Tx8Ck4i_tXOWULft3LZd_xz5Z6s4qw,3547
3
+ pyqrackising/generate_tfim_samples.py,sha256=IlAz1l8oLExO6wJBO8LCQKlU_4ZPlyGsNE8xUt_iTrg,4762
4
+ pyqrackising/kernels.cl,sha256=YLWWyWxd38VzB_SbhSPaVtz0EHzLiat7oINtzR7moXQ,27861
5
+ pyqrackising/maxcut_tfim.py,sha256=a2gbK_WypOQz8JTbMSXAtG5h-poumSTUNjK1jfNAF_c,11586
6
+ pyqrackising/maxcut_tfim_sparse.py,sha256=26Rv5yt_LC9OJRobCtOvEFgNphRfof9rduG7XoPehmQ,13361
7
+ pyqrackising/maxcut_tfim_streaming.py,sha256=ne0NH8_I6S9iYE6315UUCMlOtQyYuSJOjckRTIfkHH0,7893
8
+ pyqrackising/maxcut_tfim_util.py,sha256=I2iRXrrEr_YXKcmVlez8ZMwhmIQH3TegdepyM19J0CU,17062
9
+ pyqrackising/otoc.py,sha256=PKmpjwkGpMmwXGWmeqtvbVj1NfE35i9kkIdjQnDUZKE,7208
10
+ pyqrackising/spin_glass_solver.py,sha256=j0vB3YaQE-CMInhVKE6z9_K8TXTb3T3lJATe3IY58OQ,14718
11
+ pyqrackising/spin_glass_solver_sparse.py,sha256=a_j06Jvl6dKuWoYCvp0pOyoe4clNwaT7OEuhm8kF2nU,15728
12
+ pyqrackising/spin_glass_solver_streaming.py,sha256=aUf8XIt3bIeBP3C5_k0ATyboib7C6hwZ30bG9wvLeZo,10841
13
+ pyqrackising/tfim_magnetization.py,sha256=On1MhCNGGHRxJFRmCOpMcdqQJiy25gWkjz0Ka8i5f-Q,499
14
+ pyqrackising/tfim_square_magnetization.py,sha256=9uJCT8ytyufcGFrZiignjCkWJr9UcP44sAAy0BIBw34,531
15
+ pyqrackising/tsp.py,sha256=k8VK6fKw_niR-dVz8MyOT7LedABIwTzcSkhTOircYBg,64290
16
+ pyqrackising/tsp_maxcut.py,sha256=lEDruz5lhjVu0ufvH5VaMJW3_nohO-rEijJJabEtuSU,10084
17
+ pyqrackising-9.7.1.dist-info/licenses/LICENSE.md,sha256=fTqV5eBpeAZO0_jit8j4Ref9ikBSlHJ8xwj5TLg7gFk,7817
18
+ pyqrackising-9.7.1.dist-info/METADATA,sha256=Xl5EKxdkXuGdVQLgQumsHhJXl-kHPxNZqORLpgMRDFY,1170
19
+ pyqrackising-9.7.1.dist-info/WHEEL,sha256=ZjXRCNaQ9YSypEK2TE0LRB0sy2OVXSszb4Sx1XjM99k,97
20
+ pyqrackising-9.7.1.dist-info/top_level.txt,sha256=bxlfGuLwzeVEI8Jm5D9HvC_WedgvvkSrpFwbGDjg-Ag,13
21
+ pyqrackising-9.7.1.dist-info/RECORD,,
@@ -1,21 +0,0 @@
1
- pyqrackising/__init__.py,sha256=MWxOmzFfQ2csz2A6o30Kf8zQvEtd996d3dz4kYaLtHM,898
2
- pyqrackising/convert_tensor_network_to_tsp.py,sha256=55ajnvdBXFgf-Tx8Ck4i_tXOWULft3LZd_xz5Z6s4qw,3547
3
- pyqrackising/generate_tfim_samples.py,sha256=IlAz1l8oLExO6wJBO8LCQKlU_4ZPlyGsNE8xUt_iTrg,4762
4
- pyqrackising/kernels.cl,sha256=YLWWyWxd38VzB_SbhSPaVtz0EHzLiat7oINtzR7moXQ,27861
5
- pyqrackising/maxcut_tfim.py,sha256=U1nNjyfMS48TtTQk7TRf5_VF3pVPfAcZEa2awc2nR8k,10862
6
- pyqrackising/maxcut_tfim_sparse.py,sha256=eenJNSEwRvgwACfKoH0tj6rpn7uqH6nNTBuxUPh_lDg,11941
7
- pyqrackising/maxcut_tfim_streaming.py,sha256=FkBsRoXSRhv4gUeN9O7Ivx54oxq_SqiCDKnYsyxU4bs,6664
8
- pyqrackising/maxcut_tfim_util.py,sha256=S8W_Hxi4mwdSxK76uBG66fZwd9yODSVCDtmV9Jrirjs,17781
9
- pyqrackising/otoc.py,sha256=cfnE8TOgrgVHU7MWcYYoHLv8ncyRJ9Rl60XrX4u6uVA,7399
10
- pyqrackising/spin_glass_solver.py,sha256=tgmdJ6b1TetnFM--QEzOZFsNkquQYRVYiHVSzIGYvMI,14207
11
- pyqrackising/spin_glass_solver_sparse.py,sha256=cmXfAR43n1beRk4LaQbwfrQWkkhMPzPXpdK2T1HhQrk,15159
12
- pyqrackising/spin_glass_solver_streaming.py,sha256=joQLMKotgjQMwbG0msv_AookiWva_AiVBIR0Xr1Unho,10310
13
- pyqrackising/tfim_magnetization.py,sha256=On1MhCNGGHRxJFRmCOpMcdqQJiy25gWkjz0Ka8i5f-Q,499
14
- pyqrackising/tfim_square_magnetization.py,sha256=9uJCT8ytyufcGFrZiignjCkWJr9UcP44sAAy0BIBw34,531
15
- pyqrackising/tsp.py,sha256=k8VK6fKw_niR-dVz8MyOT7LedABIwTzcSkhTOircYBg,64290
16
- pyqrackising/tsp_maxcut.py,sha256=lEDruz5lhjVu0ufvH5VaMJW3_nohO-rEijJJabEtuSU,10084
17
- pyqrackising-9.5.3.dist-info/licenses/LICENSE.md,sha256=fTqV5eBpeAZO0_jit8j4Ref9ikBSlHJ8xwj5TLg7gFk,7817
18
- pyqrackising-9.5.3.dist-info/METADATA,sha256=IRBVTz7T_zf7jfPxQxOB3aLtlgtjvCIZmSbbGkUP26I,1170
19
- pyqrackising-9.5.3.dist-info/WHEEL,sha256=ZjXRCNaQ9YSypEK2TE0LRB0sy2OVXSszb4Sx1XjM99k,97
20
- pyqrackising-9.5.3.dist-info/top_level.txt,sha256=bxlfGuLwzeVEI8Jm5D9HvC_WedgvvkSrpFwbGDjg-Ag,13
21
- pyqrackising-9.5.3.dist-info/RECORD,,