pyqrack-cpu-complex128 1.72.5__py3-none-macosx_14_0_arm64.whl → 1.80.2__py3-none-macosx_14_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyqrack/__init__.py +2 -2
- pyqrack/qrack_ace_backend.py +44 -70
- pyqrack/qrack_circuit.py +51 -40
- pyqrack/qrack_neuron.py +112 -5
- pyqrack/qrack_neuron_torch_layer.py +182 -106
- pyqrack/qrack_simulator.py +304 -271
- pyqrack/qrack_system/qrack_lib/libqrack_pinvoke.9.35.1.dylib +0 -0
- pyqrack/qrack_system/qrack_lib/libqrack_pinvoke.dylib +0 -0
- pyqrack/qrack_system/qrack_system.py +25 -12
- pyqrack/stats/load_quantized_data.py +1 -3
- pyqrack/stats/quantize_by_range.py +2 -6
- {pyqrack_cpu_complex128-1.72.5.dist-info → pyqrack_cpu_complex128-1.80.2.dist-info}/METADATA +3 -3
- pyqrack_cpu_complex128-1.80.2.dist-info/RECORD +23 -0
- pyqrack/qrack_system/qrack_lib/libqrack_pinvoke.9.32.6.dylib +0 -0
- pyqrack_cpu_complex128-1.72.5.dist-info/RECORD +0 -23
- {pyqrack_cpu_complex128-1.72.5.dist-info → pyqrack_cpu_complex128-1.80.2.dist-info}/LICENSE +0 -0
- {pyqrack_cpu_complex128-1.72.5.dist-info → pyqrack_cpu_complex128-1.80.2.dist-info}/WHEEL +0 -0
- {pyqrack_cpu_complex128-1.72.5.dist-info → pyqrack_cpu_complex128-1.80.2.dist-info}/top_level.txt +0 -0
pyqrack/qrack_neuron.py
CHANGED
|
@@ -64,7 +64,7 @@ class QrackNeuron:
|
|
|
64
64
|
self.nid = Qrack.qrack_lib.init_qneuron(
|
|
65
65
|
simulator.sid,
|
|
66
66
|
len(controls),
|
|
67
|
-
|
|
67
|
+
QrackNeuron._ulonglong_byref(controls),
|
|
68
68
|
target,
|
|
69
69
|
activation_fn,
|
|
70
70
|
alpha,
|
|
@@ -99,15 +99,32 @@ class QrackNeuron:
|
|
|
99
99
|
self._throw_if_error()
|
|
100
100
|
return result
|
|
101
101
|
|
|
102
|
-
|
|
102
|
+
@staticmethod
|
|
103
|
+
def _ulonglong_byref(a):
|
|
103
104
|
return (ctypes.c_ulonglong * len(a))(*a)
|
|
104
105
|
|
|
105
|
-
|
|
106
|
+
@staticmethod
|
|
107
|
+
def _real1_byref(a):
|
|
106
108
|
# This needs to be c_double, if PyQrack is built with fp64.
|
|
107
109
|
if Qrack.fppow < 6:
|
|
108
110
|
return (ctypes.c_float * len(a))(*a)
|
|
109
111
|
return (ctypes.c_double * len(a))(*a)
|
|
110
112
|
|
|
113
|
+
def set_simulator(self, s):
|
|
114
|
+
"""Set the neuron simulator
|
|
115
|
+
|
|
116
|
+
Set the simulator used by this neuron
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
s(QrackSimulator): The simulator to use
|
|
120
|
+
|
|
121
|
+
Raises:
|
|
122
|
+
RuntimeError: QrackSimulator raised an exception.
|
|
123
|
+
"""
|
|
124
|
+
Qrack.qrack_lib.set_qneuron_sim(self.nid, s.sid)
|
|
125
|
+
self._throw_if_error()
|
|
126
|
+
self.simulator = s
|
|
127
|
+
|
|
111
128
|
def set_angles(self, a):
|
|
112
129
|
"""Directly sets the neuron parameters.
|
|
113
130
|
|
|
@@ -125,7 +142,7 @@ class QrackNeuron:
|
|
|
125
142
|
raise ValueError(
|
|
126
143
|
"Angles 'a' in QrackNeuron.set_angles() must contain at least (2 ** len(self.controls)) elements."
|
|
127
144
|
)
|
|
128
|
-
Qrack.qrack_lib.set_qneuron_angles(self.nid,
|
|
145
|
+
Qrack.qrack_lib.set_qneuron_angles(self.nid, QrackNeuron._real1_byref(a))
|
|
129
146
|
self._throw_if_error()
|
|
130
147
|
|
|
131
148
|
def get_angles(self):
|
|
@@ -137,7 +154,7 @@ class QrackNeuron:
|
|
|
137
154
|
Raises:
|
|
138
155
|
RuntimeError: QrackNeuron C++ library raised an exception.
|
|
139
156
|
"""
|
|
140
|
-
ket =
|
|
157
|
+
ket = QrackNeuron._real1_byref([0.0] * (1 << len(self.controls)))
|
|
141
158
|
Qrack.qrack_lib.get_qneuron_angles(self.nid, ket)
|
|
142
159
|
self._throw_if_error()
|
|
143
160
|
return list(ket)
|
|
@@ -260,3 +277,93 @@ class QrackNeuron:
|
|
|
260
277
|
"""
|
|
261
278
|
Qrack.qrack_lib.qneuron_learn_permutation(self.nid, eta, e, r)
|
|
262
279
|
self._throw_if_error()
|
|
280
|
+
|
|
281
|
+
@staticmethod
|
|
282
|
+
def quantile_bounds(vec, bits):
|
|
283
|
+
"""Calculate vector quantile bounds
|
|
284
|
+
|
|
285
|
+
This is a static helper method to calculate the quantile
|
|
286
|
+
bounds of 2 ** bits worth of quantiles.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
vec: numerical vector
|
|
290
|
+
bits: log2() of quantile count
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
Quantile (n + 1) bounds for n-quantile division, including
|
|
294
|
+
minimum and maximum values
|
|
295
|
+
"""
|
|
296
|
+
|
|
297
|
+
bins = 1 << bits
|
|
298
|
+
n = len(vec)
|
|
299
|
+
vec_sorted = sorted(vec)
|
|
300
|
+
|
|
301
|
+
return (
|
|
302
|
+
[vec_sorted[0]]
|
|
303
|
+
+ [vec_sorted[(k * n) // bins] for k in range(1, bins)]
|
|
304
|
+
+ [vec_sorted[-1]]
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
@staticmethod
|
|
308
|
+
def discretize(vec, bounds):
|
|
309
|
+
"""Discretize vector by quantile bounds
|
|
310
|
+
|
|
311
|
+
This is a static helper method to discretize a numerical
|
|
312
|
+
vector according to quantile bounds calculated by the
|
|
313
|
+
quantile_bounds(vec, bits) static method.
|
|
314
|
+
|
|
315
|
+
Args:
|
|
316
|
+
vec: numerical vector
|
|
317
|
+
bounds: (n + 1) n-quantile bounds including extrema
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
Discretized bit-row vector, least-significant first
|
|
321
|
+
"""
|
|
322
|
+
|
|
323
|
+
bounds = bounds[1:]
|
|
324
|
+
bounds_len = len(bounds)
|
|
325
|
+
bits = bounds_len.bit_length() - 1
|
|
326
|
+
n = len(vec)
|
|
327
|
+
vec_discrete = [[False] * n for _ in range(bits)]
|
|
328
|
+
for i, v in enumerate(vec):
|
|
329
|
+
p = 0
|
|
330
|
+
while (p < bounds_len) and (v > bounds[p]):
|
|
331
|
+
p += 1
|
|
332
|
+
for b in range(bits):
|
|
333
|
+
vec_discrete[b][i] = bool((p >> b) & 1)
|
|
334
|
+
|
|
335
|
+
return vec_discrete
|
|
336
|
+
|
|
337
|
+
@staticmethod
|
|
338
|
+
def flatten_and_transpose(arr):
|
|
339
|
+
"""Flatten and transpose feature matrix
|
|
340
|
+
|
|
341
|
+
This is a static helper method to convert a multi-feature
|
|
342
|
+
bit-row matrix to an observation-row matrix with flat
|
|
343
|
+
feature columns.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
arr: bit-row matrix
|
|
347
|
+
|
|
348
|
+
Returns:
|
|
349
|
+
Observation-row matrix with flat feature columns
|
|
350
|
+
"""
|
|
351
|
+
return list(zip(*[item for sublist in arr for item in sublist]))
|
|
352
|
+
|
|
353
|
+
@staticmethod
|
|
354
|
+
def bin_endpoints_average(bounds):
|
|
355
|
+
"""Bin endpoints average
|
|
356
|
+
|
|
357
|
+
This is a static helper method that accepts the output
|
|
358
|
+
bins from quantile_bounds() and returns the average points
|
|
359
|
+
between the bin endpoints. (This is NOT always necessarily
|
|
360
|
+
the best heuristic for how to convert binned results back
|
|
361
|
+
to numerical results, but it is often a reasonable way.)
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
bounds: (n + 1) n-quantile bounds including extrema
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
List of average points between the bin endpoints
|
|
368
|
+
"""
|
|
369
|
+
return [((bounds[i] + bounds[i + 1]) / 2) for i in range(len(bounds) - 1)]
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# (C) Daniel Strano and the Qrack contributors 2017-
|
|
1
|
+
# (C) Daniel Strano and the Qrack contributors 2017-2026. All rights reserved.
|
|
2
2
|
#
|
|
3
3
|
# Initial draft by Elara (OpenAI custom GPT)
|
|
4
4
|
# Refined and architecturally clarified by Dan Strano
|
|
@@ -6,6 +6,11 @@
|
|
|
6
6
|
# Use of this source code is governed by an MIT-style license that can be
|
|
7
7
|
# found in the LICENSE file or at https://opensource.org/licenses/MIT.
|
|
8
8
|
|
|
9
|
+
import itertools
|
|
10
|
+
import math
|
|
11
|
+
import random
|
|
12
|
+
import sys
|
|
13
|
+
|
|
9
14
|
_IS_TORCH_AVAILABLE = True
|
|
10
15
|
try:
|
|
11
16
|
import torch
|
|
@@ -14,82 +19,133 @@ try:
|
|
|
14
19
|
except ImportError:
|
|
15
20
|
_IS_TORCH_AVAILABLE = False
|
|
16
21
|
|
|
22
|
+
from .pauli import Pauli
|
|
17
23
|
from .qrack_neuron import QrackNeuron
|
|
24
|
+
from .qrack_simulator import QrackSimulator
|
|
18
25
|
from .neuron_activation_fn import NeuronActivationFn
|
|
19
26
|
|
|
20
|
-
from itertools import chain, combinations
|
|
21
27
|
|
|
28
|
+
# Parameter-shift rule
|
|
29
|
+
param_shift_eps = math.pi / 2
|
|
30
|
+
# Neuron angle initialization
|
|
31
|
+
init_phi = math.asin(0.5)
|
|
22
32
|
|
|
23
|
-
# From https://stackoverflow.com/questions/1482308/how-to-get-all-subsets-of-a-set-powerset#answer-1482316
|
|
24
|
-
def powerset(iterable):
|
|
25
|
-
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3,) (1,2,3)"
|
|
26
|
-
s = list(iterable)
|
|
27
|
-
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
|
|
28
33
|
|
|
34
|
+
class QrackNeuronTorchFunction(Function if _IS_TORCH_AVAILABLE else object):
|
|
35
|
+
"""Static forward/backward/apply functions for QrackNeuronTorch"""
|
|
29
36
|
|
|
30
|
-
|
|
31
|
-
|
|
37
|
+
@staticmethod
|
|
38
|
+
def forward(ctx, x, neuron):
|
|
39
|
+
ctx.neuron = neuron
|
|
40
|
+
ctx.simulator = neuron.simulator
|
|
41
|
+
ctx.save_for_backward(x)
|
|
32
42
|
|
|
33
|
-
|
|
34
|
-
neuron(
|
|
35
|
-
"""
|
|
43
|
+
# Baseline probability BEFORE applying this neuron's unitary
|
|
44
|
+
pre_prob = neuron.simulator.prob(neuron.target)
|
|
36
45
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
self.neuron = neuron
|
|
40
|
-
|
|
41
|
-
def forward(self, x):
|
|
42
|
-
neuron = self.neuron
|
|
46
|
+
angles = x.detach().cpu().numpy() if x.requires_grad else x.numpy()
|
|
47
|
+
neuron.set_angles(angles)
|
|
43
48
|
neuron.predict(True, False)
|
|
44
49
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
class QrackNeuronFunction(Function if _IS_TORCH_AVAILABLE else object):
|
|
49
|
-
"""Static forward/backward/apply functions for QrackTorchNeuron"""
|
|
50
|
-
|
|
51
|
-
@staticmethod
|
|
52
|
-
def forward(ctx, neuron):
|
|
53
|
-
# Save for backward
|
|
54
|
-
ctx.neuron = neuron
|
|
50
|
+
# Probability AFTER applying this neuron's unitary
|
|
51
|
+
post_prob = neuron.simulator.prob(neuron.target)
|
|
52
|
+
ctx.post_prob = post_prob
|
|
55
53
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
final_prob = neuron.simulator.prob(neuron.target)
|
|
59
|
-
ctx.delta = final_prob - init_prob
|
|
54
|
+
delta = math.asin(post_prob) - math.asin(pre_prob)
|
|
55
|
+
ctx.delta = delta
|
|
60
56
|
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
if _IS_TORCH_AVAILABLE
|
|
64
|
-
else ctx.delta
|
|
65
|
-
)
|
|
57
|
+
# Return shape: (1,)
|
|
58
|
+
return x.new_tensor([delta])
|
|
66
59
|
|
|
67
60
|
@staticmethod
|
|
68
61
|
def backward(ctx, grad_output):
|
|
62
|
+
(x,) = ctx.saved_tensors
|
|
69
63
|
neuron = ctx.neuron
|
|
64
|
+
neuron.set_simulator(ctx.simulator)
|
|
65
|
+
post_prob = ctx.post_prob
|
|
66
|
+
|
|
67
|
+
angles = x.detach().cpu().numpy() if x.requires_grad else x.numpy()
|
|
70
68
|
|
|
71
|
-
|
|
69
|
+
# Restore simulator to state BEFORE this neuron's unitary
|
|
70
|
+
neuron.set_angles(angles)
|
|
72
71
|
neuron.unpredict()
|
|
73
|
-
|
|
74
|
-
reverse_delta = pre_unpredict - post_unpredict
|
|
72
|
+
pre_sim = neuron.simulator
|
|
75
73
|
|
|
76
|
-
|
|
74
|
+
grad_x = torch.zeros_like(x)
|
|
75
|
+
|
|
76
|
+
for i in range(x.shape[0]):
|
|
77
|
+
angle = angles[i]
|
|
78
|
+
|
|
79
|
+
# θ + π/2
|
|
80
|
+
angles[i] = angle + param_shift_eps
|
|
81
|
+
neuron.set_angles(angles)
|
|
82
|
+
neuron.simulator = pre_sim.clone()
|
|
83
|
+
neuron.predict(True, False)
|
|
84
|
+
p_plus = neuron.simulator.prob(neuron.target)
|
|
85
|
+
|
|
86
|
+
# θ − π/2
|
|
87
|
+
angles[i] = angle - param_shift_eps
|
|
88
|
+
neuron.set_angles(angles)
|
|
89
|
+
neuron.simulator = pre_sim.clone()
|
|
90
|
+
neuron.predict(True, False)
|
|
91
|
+
p_minus = neuron.simulator.prob(neuron.target)
|
|
92
|
+
|
|
93
|
+
# Parameter-shift gradient
|
|
94
|
+
grad_x[i] = 0.5 * (p_plus - p_minus)
|
|
95
|
+
|
|
96
|
+
angles[i] = angle
|
|
97
|
+
|
|
98
|
+
# Restore simulator
|
|
99
|
+
neuron.set_simulator(pre_sim)
|
|
100
|
+
|
|
101
|
+
# Apply chain rule and upstream gradient
|
|
102
|
+
grad_x *= grad_output[0] / math.sqrt(max(1.0 - post_prob * post_prob, 1e-6))
|
|
103
|
+
|
|
104
|
+
return grad_x, None
|
|
77
105
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
106
|
+
|
|
107
|
+
class QrackNeuronTorch(nn.Module if _IS_TORCH_AVAILABLE else object):
|
|
108
|
+
"""Torch wrapper for QrackNeuron
|
|
109
|
+
|
|
110
|
+
Attributes:
|
|
111
|
+
neuron(QrackNeuron): QrackNeuron backing this torch wrapper
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def __init__(self, neuron, x):
|
|
115
|
+
super().__init__()
|
|
116
|
+
self.neuron = neuron
|
|
117
|
+
self.weights = nn.Parameter(x)
|
|
118
|
+
|
|
119
|
+
def forward(self):
|
|
120
|
+
return QrackNeuronTorchFunction.apply(self.weights, self.neuron)
|
|
81
121
|
|
|
82
122
|
|
|
83
123
|
class QrackNeuronTorchLayer(nn.Module if _IS_TORCH_AVAILABLE else object):
|
|
84
|
-
"""Torch layer wrapper for QrackNeuron (with
|
|
124
|
+
"""Torch layer wrapper for QrackNeuron (with maximally expressive set of neurons between inputs and outputs)
|
|
125
|
+
|
|
126
|
+
Attributes:
|
|
127
|
+
simulator (QrackSimulator): Prototype simulator that batching copies to use with QrackNeuron instances
|
|
128
|
+
simulators (list[QrackSimulator]): In-flight copies of prototype simulator corresponding to batch count
|
|
129
|
+
input_indices (list[int], read-only): simulator qubit indices used as QrackNeuron inputs
|
|
130
|
+
output_indices (list[int], read-only): simulator qubit indices used as QrackNeuron outputs
|
|
131
|
+
hidden_indices (list[int], read-only): simulator qubit indices used as QrackNeuron hidden inputs (in maximal superposition)
|
|
132
|
+
neurons (ModuleList[QrackNeuronTorch]): QrackNeuronTorch wrappers (for PyQrack QrackNeurons) in this layer, corresponding to weights
|
|
133
|
+
weights (ParameterList): List of tensors corresponding one-to-one with weights of list of neurons
|
|
134
|
+
apply_fn (Callable[Tensor, QrackNeuronTorch]): Corresponds to QrackNeuronTorchFunction.apply(x, neuron_wrapper) (or override with a custom implementation)
|
|
135
|
+
backward_fn (Callable[Tensor, Tensor]): Corresponds to QrackNeuronTorchFunction._backward(x, neuron_wrapper) (or override with a custom implementation)
|
|
136
|
+
"""
|
|
85
137
|
|
|
86
138
|
def __init__(
|
|
87
139
|
self,
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
140
|
+
input_qubits,
|
|
141
|
+
output_qubits,
|
|
142
|
+
hidden_qubits=None,
|
|
143
|
+
lowest_combo_count=0,
|
|
144
|
+
highest_combo_count=2,
|
|
91
145
|
activation=int(NeuronActivationFn.Generalized_Logistic),
|
|
146
|
+
dtype=torch.float if _IS_TORCH_AVAILABLE else float,
|
|
92
147
|
parameters=None,
|
|
148
|
+
**kwargs
|
|
93
149
|
):
|
|
94
150
|
"""
|
|
95
151
|
Initialize a QrackNeuron layer for PyTorch with a power set of neurons connecting inputs to outputs.
|
|
@@ -97,74 +153,94 @@ class QrackNeuronTorchLayer(nn.Module if _IS_TORCH_AVAILABLE else object):
|
|
|
97
153
|
|
|
98
154
|
Args:
|
|
99
155
|
sim (QrackSimulator): Simulator into which predictor features are loaded
|
|
100
|
-
|
|
101
|
-
|
|
156
|
+
input_qubits (int): Count of inputs (1 per qubit)
|
|
157
|
+
output_qubits (int): Count of outputs (1 per qubit)
|
|
158
|
+
hidden_qubits (int): Count of "hidden" inputs (1 per qubit, always initialized to |+>, suggested to be same a highest_combo_count)
|
|
159
|
+
lowest_combo_count (int): Lowest combination count of input qubits iterated (0 is bias)
|
|
160
|
+
highest_combo_count (int): Highest combination count of input qubits iterated
|
|
102
161
|
activation (int): Integer corresponding to choice of activation function from NeuronActivationFn
|
|
103
|
-
parameters (list[float]): (Optional) Flat list of initial neuron parameters, corresponding to little-endian basis states of
|
|
162
|
+
parameters (list[float]): (Optional) Flat list of initial neuron parameters, corresponding to little-endian basis states of input + hidden qubits, repeated for ascending combo count, repeated for each output index
|
|
104
163
|
"""
|
|
105
164
|
super(QrackNeuronTorchLayer, self).__init__()
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
self.
|
|
109
|
-
self.
|
|
110
|
-
self.
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
)
|
|
115
|
-
|
|
116
|
-
# Create neurons from all powerset input combinations, projecting to coherent output qubits
|
|
117
|
-
self.neurons = nn.ModuleList(
|
|
118
|
-
[
|
|
119
|
-
QrackTorchNeuron(
|
|
120
|
-
QrackNeuron(simulator, list(input_subset), output_id, activation)
|
|
121
|
-
)
|
|
122
|
-
for input_subset in powerset(input_indices)
|
|
123
|
-
for output_id in output_indices
|
|
124
|
-
]
|
|
165
|
+
if hidden_qubits is None:
|
|
166
|
+
hidden_qubits = highest_combo_count
|
|
167
|
+
self.simulator = QrackSimulator(input_qubits + hidden_qubits + output_qubits, **kwargs)
|
|
168
|
+
self.simulators = []
|
|
169
|
+
self.input_indices = list(range(input_qubits))
|
|
170
|
+
self.hidden_indices = list(range(input_qubits, input_qubits + hidden_qubits))
|
|
171
|
+
self.output_indices = list(
|
|
172
|
+
range(input_qubits + hidden_qubits, input_qubits + hidden_qubits + output_qubits)
|
|
125
173
|
)
|
|
174
|
+
self.activation = NeuronActivationFn(activation)
|
|
175
|
+
self.dtype = dtype
|
|
176
|
+
self.apply_fn = QrackNeuronTorchFunction.apply
|
|
126
177
|
|
|
127
|
-
#
|
|
178
|
+
# Create neurons from all input combinations, projecting to coherent output qubits
|
|
179
|
+
neurons = []
|
|
128
180
|
param_count = 0
|
|
129
|
-
for neuron_wrapper in self.neurons:
|
|
130
|
-
neuron = neuron_wrapper.neuron
|
|
131
|
-
p_count = 1 << len(neuron.controls)
|
|
132
|
-
neuron.set_angles(
|
|
133
|
-
parameters[param_count : (param_count + p_count + 1)]
|
|
134
|
-
if parameters
|
|
135
|
-
else ([0.0] * p_count)
|
|
136
|
-
)
|
|
137
|
-
param_count += p_count
|
|
138
|
-
|
|
139
|
-
self.weights = nn.ParameterList()
|
|
140
|
-
for pid in range(param_count):
|
|
141
|
-
self.weights.append(
|
|
142
|
-
nn.Parameter(torch.tensor(parameters[pid] if parameters else 0.0))
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
def forward(self, _):
|
|
146
|
-
# Assume quantum outputs should overwrite the simulator state
|
|
147
181
|
for output_id in self.output_indices:
|
|
148
|
-
|
|
149
|
-
self.
|
|
182
|
+
for k in range(lowest_combo_count, highest_combo_count + 1):
|
|
183
|
+
for input_subset in itertools.combinations(self.input_indices, k):
|
|
184
|
+
p_count = 1 << len(input_subset)
|
|
185
|
+
angles = (
|
|
186
|
+
(
|
|
187
|
+
torch.tensor(
|
|
188
|
+
parameters[param_count : (param_count + p_count)], dtype=dtype
|
|
189
|
+
)
|
|
190
|
+
if parameters
|
|
191
|
+
else torch.zeros(p_count, dtype=dtype)
|
|
192
|
+
)
|
|
193
|
+
)
|
|
194
|
+
neurons.append(
|
|
195
|
+
QrackNeuronTorch(
|
|
196
|
+
QrackNeuron(self.simulator, input_subset, output_id, activation), angles
|
|
197
|
+
)
|
|
198
|
+
)
|
|
199
|
+
param_count += p_count
|
|
200
|
+
self.neurons = nn.ModuleList(neurons)
|
|
201
|
+
|
|
202
|
+
def forward(self, x):
|
|
203
|
+
B = x.shape[0]
|
|
204
|
+
x = x.view(B, -1)
|
|
205
|
+
|
|
206
|
+
self.simulators.clear()
|
|
207
|
+
|
|
208
|
+
self.simulator.reset_all()
|
|
209
|
+
# Prepare hidden predictors
|
|
210
|
+
for hidden_id in self.hidden_indices:
|
|
211
|
+
self.simulator.h(hidden_id)
|
|
212
|
+
# Prepare a maximally uncertain output state.
|
|
213
|
+
for output_id in self.output_indices:
|
|
150
214
|
self.simulator.h(output_id)
|
|
151
215
|
|
|
152
|
-
#
|
|
153
|
-
|
|
216
|
+
# Group neurons by output target once
|
|
217
|
+
by_out = {out: [] for out in self.output_indices}
|
|
154
218
|
for neuron_wrapper in self.neurons:
|
|
155
|
-
|
|
156
|
-
p_count = 1 << len(neuron.controls)
|
|
157
|
-
angles = [
|
|
158
|
-
w.item() for w in self.weights[param_count : (param_count + p_count)]
|
|
159
|
-
]
|
|
160
|
-
neuron.set_angles(angles)
|
|
161
|
-
param_count += p_count
|
|
219
|
+
by_out[neuron_wrapper.neuron.target].append(neuron_wrapper)
|
|
162
220
|
|
|
163
|
-
|
|
164
|
-
for
|
|
165
|
-
self.
|
|
221
|
+
batch_rows = []
|
|
222
|
+
for b in range(B):
|
|
223
|
+
simulator = self.simulator.clone()
|
|
224
|
+
self.simulators.append(simulator)
|
|
225
|
+
|
|
226
|
+
for q, input_id in enumerate(self.input_indices):
|
|
227
|
+
simulator.r(Pauli.PauliY, math.pi * x[b, q].item(), input_id)
|
|
228
|
+
|
|
229
|
+
row = []
|
|
230
|
+
for out in self.output_indices:
|
|
231
|
+
phi = torch.tensor(init_phi, device=x.device, dtype=x.dtype)
|
|
232
|
+
|
|
233
|
+
for neuron_wrapper in by_out[out]:
|
|
234
|
+
neuron_wrapper.neuron.set_simulator(simulator)
|
|
235
|
+
phi += self.apply_fn(
|
|
236
|
+
neuron_wrapper.weights,
|
|
237
|
+
neuron_wrapper.neuron
|
|
238
|
+
).squeeze()
|
|
239
|
+
|
|
240
|
+
# Convert angle back to probability
|
|
241
|
+
p = torch.clamp(torch.sin(phi), min=0.0)
|
|
242
|
+
row.append(p)
|
|
166
243
|
|
|
167
|
-
|
|
168
|
-
outputs = [self.simulator.prob(output_id) for output_id in self.output_indices]
|
|
244
|
+
batch_rows.append(torch.stack(row))
|
|
169
245
|
|
|
170
|
-
return torch.
|
|
246
|
+
return torch.stack(batch_rows)
|