pyqrack-cpu 1.82.0__py3-none-macosx_15_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,381 @@
1
+ # (C) Daniel Strano and the Qrack contributors 2017-2025. All rights reserved.
2
+ #
3
+ # Use of this source code is governed by an MIT-style license that can be
4
+ # found in the LICENSE file or at https://opensource.org/licenses/MIT.
5
+
6
+ import ctypes
7
+ import sys
8
+
9
+ from .qrack_system import Qrack
10
+ from .neuron_activation_fn import NeuronActivationFn
11
+
12
+
13
+ class QrackNeuron:
14
+ """Class that exposes the QNeuron class of Qrack
15
+
16
+ This model of a "quantum neuron" is based on the concept of a "uniformly controlled"
17
+ rotation of a single output qubit around the Pauli Y axis, and has been developed by
18
+ others. In our case, the primary relevant gate could also be called a
19
+ single-qubit-target multiplexer.
20
+
21
+ (See https://arxiv.org/abs/quant-ph/0407010 for an introduction to "uniformly controlled
22
+ gates.)
23
+
24
+ QrackNeuron is meant to be interchangeable with a single classical neuron, as in
25
+ conventional neural net software. It differs from classical neurons in conventional
26
+ neural nets, in that the "synaptic cleft" is modelled as a single qubit. Hence, this
27
+ neuron can train and predict in superposition.
28
+
29
+ Attributes:
30
+ nid(int): Qrack ID of this neuron
31
+ simulator(QrackSimulator): Simulator instance for all synaptic clefts of the neuron
32
+ controls(list(int)): Indices of all "control" qubits, for neuron input
33
+ target(int): Index of "target" qubit, for neuron output
34
+ activation_fn(NeuronActivationFn): Activation function choice
35
+ alpha(float): Activation function parameter, if required
36
+ angles(list[ctypes.c_float]): (or c_double) Memory for neuron prediction angles
37
+ """
38
+
39
+ def _get_error(self):
40
+ return Qrack.qrack_lib.get_error(self.simulator.sid)
41
+
42
+ def _throw_if_error(self):
43
+ if self._get_error() != 0:
44
+ raise RuntimeError("QrackNeuron C++ library raised exception.")
45
+
46
+ def __init__(
47
+ self,
48
+ simulator,
49
+ controls,
50
+ target,
51
+ activation_fn=NeuronActivationFn.Sigmoid,
52
+ alpha=1.0,
53
+ _init=True,
54
+ ):
55
+ self.simulator = simulator
56
+ self.controls = controls
57
+ self.target = target
58
+ self.activation_fn = activation_fn
59
+ self.alpha = alpha
60
+ self.angles = QrackNeuron._real1_byref([0.0] * (1 << len(controls)))
61
+
62
+ if not _init:
63
+ return
64
+
65
+ self.nid = Qrack.qrack_lib.init_qneuron(
66
+ simulator.sid,
67
+ len(controls),
68
+ QrackNeuron._ulonglong_byref(controls),
69
+ target,
70
+ )
71
+
72
+ self._throw_if_error()
73
+
74
+ def __del__(self):
75
+ if self.nid is not None:
76
+ Qrack.qrack_lib.destroy_qneuron(self.nid)
77
+ self.nid = None
78
+
79
+ def clone(self):
80
+ """Clones this neuron.
81
+
82
+ Create a new, independent neuron instance with identical angles,
83
+ inputs, output, and tolerance, for the same QrackSimulator.
84
+
85
+ Raises:
86
+ RuntimeError: QrackNeuron C++ library raised an exception.
87
+ """
88
+ result = QrackNeuron(
89
+ self.simulator,
90
+ self.controls,
91
+ self.target,
92
+ )
93
+ result.nid = Qrack.qrack_lib.clone_qneuron(self.simulator.sid)
94
+ result.angles = self.angles[:]
95
+ self._throw_if_error()
96
+ return result
97
+
98
+ @staticmethod
99
+ def _ulonglong_byref(a):
100
+ return (ctypes.c_ulonglong * len(a))(*a)
101
+
102
+ @staticmethod
103
+ def _real1_byref(a):
104
+ # This needs to be c_double, if PyQrack is built with fp64.
105
+ if Qrack.fppow < 6:
106
+ return (ctypes.c_float * len(a))(*a)
107
+ return (ctypes.c_double * len(a))(*a)
108
+
109
+ def set_simulator(self, s, controls=None, target=None):
110
+ """Set the neuron simulator
111
+
112
+ Set the simulator used by this neuron
113
+
114
+ Args:
115
+ s(QrackSimulator): The simulator to use
116
+ controls(list[int]): The control qubit IDs to use
117
+ target(int): The output qubit ID to use
118
+
119
+ Raises:
120
+ RuntimeError: QrackSimulator raised an exception.
121
+ """
122
+ if controls is None:
123
+ controls = self.controls
124
+ if target is None:
125
+ target = self.target
126
+ Qrack.qrack_lib.set_qneuron_sim(
127
+ self.nid,
128
+ s.sid,
129
+ len(controls),
130
+ QrackNeuron._ulonglong_byref(controls),
131
+ target,
132
+ )
133
+ self._throw_if_error()
134
+ self.simulator = s
135
+ self.controls = controls
136
+ self.target = target
137
+
138
+ def set_qubit_ids(self, controls, target=None):
139
+ """Set the neuron qubit identifiers
140
+
141
+ Set the control and target qubits within the simulator
142
+
143
+ Args:
144
+ controls(list[int]): The control qubit IDs to use
145
+ target(int): The output qubit ID to use
146
+
147
+ Raises:
148
+ RuntimeError: QrackSimulator raised an exception.
149
+ """
150
+ if target is None:
151
+ target = self.target
152
+ self.set_simulator(self.simulator, controls, target)
153
+
154
+ def set_angles(self, a):
155
+ """Directly sets the neuron parameters.
156
+
157
+ Set all synaptic parameters of the neuron directly, by a list
158
+ enumerated over the integer permutations of input qubits.
159
+
160
+ Args:
161
+ a(list(double)): List of input permutation angles
162
+
163
+ Raises:
164
+ ValueError: Angles 'a' in QrackNeuron.set_angles() must contain at least (2 ** len(self.controls)) elements.
165
+ RuntimeError: QrackSimulator raised an exception.
166
+ """
167
+ if len(a) < (1 << len(self.controls)):
168
+ raise ValueError(
169
+ "Angles 'a' in QrackNeuron.set_angles() must contain at least (2 ** len(self.controls)) elements."
170
+ )
171
+ self.angles = QrackNeuron._real1_byref(a)
172
+
173
+ def get_angles(self):
174
+ """Directly gets the neuron parameters.
175
+
176
+ Get all synaptic parameters of the neuron directly, as a list
177
+ enumerated over the integer permutations of input qubits.
178
+
179
+ Raises:
180
+ RuntimeError: QrackNeuron C++ library raised an exception.
181
+ """
182
+ return list(self.angles)
183
+
184
+ def set_alpha(self, a):
185
+ """Set the neuron 'alpha' parameter.
186
+
187
+ To enable nonlinear activation, `QrackNeuron` has an 'alpha'
188
+ parameter that is applied as a power to its angles, before
189
+ learning and prediction. This makes the activation function
190
+ sharper (or less sharp).
191
+ """
192
+ self.alpha = a
193
+
194
+ def set_activation_fn(self, f):
195
+ """Sets the activation function of this QrackNeuron
196
+
197
+ Nonlinear activation functions can be important to neural net
198
+ applications, like DNN. The available activation functions are
199
+ enumerated in `NeuronActivationFn`.
200
+ """
201
+ self.activation_fn = f
202
+
203
+ def predict(self, e=True, r=True):
204
+ """Predict based on training
205
+
206
+ "Predict" the anticipated output, based on input and training.
207
+ By default, "predict()" will initialize the output qubit as by
208
+ resetting to |0> and then acting a Hadamard gate. From that
209
+ state, the method amends the output qubit upon the basis of
210
+ the state of its input qubits, applying a rotation around
211
+ Pauli Y axis according to the angle learned for the input.
212
+
213
+ Args:
214
+ e(bool): If False, predict the opposite
215
+ r(bool): If True, start by resetting the output to 50/50
216
+
217
+ Raises:
218
+ RuntimeError: QrackNeuron C++ library raised an exception.
219
+ """
220
+ result = Qrack.qrack_lib.qneuron_predict(self.nid, self.angles, e, r, self.activation_fn, self.alpha)
221
+ self._throw_if_error()
222
+ return result
223
+
224
+ def unpredict(self, e=True):
225
+ """Uncompute a prediction
226
+
227
+ Uncompute a 'prediction' of the anticipated output, based on
228
+ input and training.
229
+
230
+ Args:
231
+ e(bool): If False, unpredict the opposite
232
+
233
+ Raises:
234
+ RuntimeError: QrackNeuron C++ library raised an exception.
235
+ """
236
+ result = Qrack.qrack_lib.qneuron_unpredict(self.nid, self.angles, e, self.activation_fn, self.alpha)
237
+ self._throw_if_error()
238
+ return result
239
+
240
+ def learn_cycle(self, e=True):
241
+ """Run a learning cycle
242
+
243
+ A learning cycle consists of predicting a result, saving the
244
+ classical outcome, and uncomputing the prediction.
245
+
246
+ Args:
247
+ e(bool): If False, predict the opposite
248
+
249
+ Raises:
250
+ RuntimeError: QrackNeuron C++ library raised an exception.
251
+ """
252
+ Qrack.qrack_lib.qneuron_learn_cycle(self.nid, self.angles, e, self.activation_fn, self.alpha)
253
+ self._throw_if_error()
254
+
255
+ def learn(self, eta, e=True, r=True):
256
+ """Learn from current qubit state
257
+
258
+ "Learn" to associate current inputs with output. Based on
259
+ input qubit states and volatility 'eta,' the input state
260
+ synaptic parameter is updated to prefer the "e" ("expected")
261
+ output.
262
+
263
+ Args:
264
+ eta(double): Training volatility, 0 to 1
265
+ e(bool): If False, predict the opposite
266
+ r(bool): If True, start by resetting the output to 50/50
267
+
268
+ Raises:
269
+ RuntimeError: QrackNeuron C++ library raised an exception.
270
+ """
271
+ Qrack.qrack_lib.qneuron_learn(self.nid, self.angles, eta, e, r, self.activation_fn, self.alpha)
272
+ self._throw_if_error()
273
+
274
+ def learn_permutation(self, eta, e=True, r=True):
275
+ """Learn from current classical state
276
+
277
+ Learn to associate current inputs with output, under the
278
+ assumption that the inputs and outputs are "classical."
279
+ Based on input qubit states and volatility 'eta,' the input
280
+ state angle is updated to prefer the "e" ("expected") output.
281
+
282
+ Args:
283
+ eta(double): Training volatility, 0 to 1
284
+ e(bool): If False, predict the opposite
285
+ r(bool): If True, start by resetting the output to 50/50
286
+
287
+ Raises:
288
+ RuntimeError: QrackNeuron C++ library raised an exception.
289
+ """
290
+ Qrack.qrack_lib.qneuron_learn_permutation(self.nid, self.angles, eta, e, r, self.activation_fn, self.alpha)
291
+ self._throw_if_error()
292
+
293
+ @staticmethod
294
+ def quantile_bounds(vec, bits):
295
+ """Calculate vector quantile bounds
296
+
297
+ This is a static helper method to calculate the quantile
298
+ bounds of 2 ** bits worth of quantiles.
299
+
300
+ Args:
301
+ vec: numerical vector
302
+ bits: log2() of quantile count
303
+
304
+ Returns:
305
+ Quantile (n + 1) bounds for n-quantile division, including
306
+ minimum and maximum values
307
+ """
308
+
309
+ bins = 1 << bits
310
+ n = len(vec)
311
+ vec_sorted = sorted(vec)
312
+
313
+ return (
314
+ [vec_sorted[0]]
315
+ + [vec_sorted[(k * n) // bins] for k in range(1, bins)]
316
+ + [vec_sorted[-1]]
317
+ )
318
+
319
+ @staticmethod
320
+ def discretize(vec, bounds):
321
+ """Discretize vector by quantile bounds
322
+
323
+ This is a static helper method to discretize a numerical
324
+ vector according to quantile bounds calculated by the
325
+ quantile_bounds(vec, bits) static method.
326
+
327
+ Args:
328
+ vec: numerical vector
329
+ bounds: (n + 1) n-quantile bounds including extrema
330
+
331
+ Returns:
332
+ Discretized bit-row vector, least-significant first
333
+ """
334
+
335
+ bounds = bounds[1:]
336
+ bounds_len = len(bounds)
337
+ bits = bounds_len.bit_length() - 1
338
+ n = len(vec)
339
+ vec_discrete = [[False] * n for _ in range(bits)]
340
+ for i, v in enumerate(vec):
341
+ p = 0
342
+ while (p < bounds_len) and (v > bounds[p]):
343
+ p += 1
344
+ for b in range(bits):
345
+ vec_discrete[b][i] = bool((p >> b) & 1)
346
+
347
+ return vec_discrete
348
+
349
+ @staticmethod
350
+ def flatten_and_transpose(arr):
351
+ """Flatten and transpose feature matrix
352
+
353
+ This is a static helper method to convert a multi-feature
354
+ bit-row matrix to an observation-row matrix with flat
355
+ feature columns.
356
+
357
+ Args:
358
+ arr: bit-row matrix
359
+
360
+ Returns:
361
+ Observation-row matrix with flat feature columns
362
+ """
363
+ return list(zip(*[item for sublist in arr for item in sublist]))
364
+
365
+ @staticmethod
366
+ def bin_endpoints_average(bounds):
367
+ """Bin endpoints average
368
+
369
+ This is a static helper method that accepts the output
370
+ bins from quantile_bounds() and returns the average points
371
+ between the bin endpoints. (This is NOT always necessarily
372
+ the best heuristic for how to convert binned results back
373
+ to numerical results, but it is often a reasonable way.)
374
+
375
+ Args:
376
+ bounds: (n + 1) n-quantile bounds including extrema
377
+
378
+ Returns:
379
+ List of average points between the bin endpoints
380
+ """
381
+ return [((bounds[i] + bounds[i + 1]) / 2) for i in range(len(bounds) - 1)]
@@ -0,0 +1,257 @@
1
+ # (C) Daniel Strano and the Qrack contributors 2017-2026. All rights reserved.
2
+ #
3
+ # Initial draft by Elara (OpenAI custom GPT)
4
+ # Refined and architecturally clarified by Dan Strano
5
+ #
6
+ # Use of this source code is governed by an MIT-style license that can be
7
+ # found in the LICENSE file or at https://opensource.org/licenses/MIT.
8
+
9
+ import ctypes
10
+ import itertools
11
+ import math
12
+ import random
13
+ import sys
14
+
15
+ _IS_TORCH_AVAILABLE = True
16
+ try:
17
+ import torch
18
+ import torch.nn as nn
19
+ from torch.autograd import Function
20
+ except ImportError:
21
+ _IS_TORCH_AVAILABLE = False
22
+
23
+ from .pauli import Pauli
24
+ from .qrack_neuron import QrackNeuron
25
+ from .qrack_simulator import QrackSimulator
26
+ from .qrack_system import Qrack
27
+ from .neuron_activation_fn import NeuronActivationFn
28
+
29
+
30
+ # Parameter-shift rule
31
+ param_shift_eps = math.pi / 2
32
+ # Neuron angle initialization
33
+ init_phi = math.asin(0.5)
34
+ # Systemic floating-point type
35
+ fp_type = ctypes.c_float if Qrack.fppow <= 5 else ctypes.c_double
36
+
37
+
38
+ class QrackNeuronTorchFunction(Function if _IS_TORCH_AVAILABLE else object):
39
+ """Static forward/backward/apply functions for QrackNeuronTorch"""
40
+
41
+ @staticmethod
42
+ def forward(ctx, x, neuron):
43
+ ctx.neuron = neuron
44
+ ctx.simulator = neuron.simulator
45
+ ctx.save_for_backward(x)
46
+
47
+ # Baseline probability BEFORE applying this neuron's unitary
48
+ pre_prob = neuron.simulator.prob(neuron.target)
49
+
50
+ angles = x.detach().cpu().numpy() if x.requires_grad else x.numpy()
51
+ neuron.angles = angles.ctypes.data_as(ctypes.POINTER(fp_type))
52
+ neuron.predict(True, False)
53
+
54
+ # Probability AFTER applying this neuron's unitary
55
+ post_prob = neuron.simulator.prob(neuron.target)
56
+ ctx.post_prob = post_prob
57
+
58
+ delta = math.asin(post_prob) - math.asin(pre_prob)
59
+ ctx.delta = delta
60
+
61
+ # Return shape: (1,)
62
+ return x.new_tensor([delta])
63
+
64
+ @staticmethod
65
+ def backward(ctx, grad_output):
66
+ (x,) = ctx.saved_tensors
67
+ neuron = ctx.neuron
68
+ neuron.set_simulator(ctx.simulator)
69
+ post_prob = ctx.post_prob
70
+
71
+ angles = x.detach().cpu().numpy() if x.requires_grad else x.numpy()
72
+
73
+ # Restore simulator to state BEFORE this neuron's unitary
74
+ neuron.angles = angles.ctypes.data_as(ctypes.POINTER(fp_type))
75
+ neuron.unpredict()
76
+ pre_sim = neuron.simulator
77
+
78
+ grad_x = torch.zeros_like(x)
79
+
80
+ for i in range(x.shape[0]):
81
+ angle = angles[i]
82
+
83
+ # θ + π/2
84
+ angles[i] = angle + param_shift_eps
85
+ neuron.set_angles(angles)
86
+ neuron.simulator = pre_sim.clone()
87
+ neuron.predict(True, False)
88
+ p_plus = neuron.simulator.prob(neuron.target)
89
+
90
+ # θ − π/2
91
+ angles[i] = angle - param_shift_eps
92
+ neuron.set_angles(angles)
93
+ neuron.simulator = pre_sim.clone()
94
+ neuron.predict(True, False)
95
+ p_minus = neuron.simulator.prob(neuron.target)
96
+
97
+ # Parameter-shift gradient
98
+ grad_x[i] = 0.5 * (p_plus - p_minus)
99
+
100
+ angles[i] = angle
101
+
102
+ # Restore simulator
103
+ neuron.set_simulator(pre_sim)
104
+
105
+ # Apply chain rule and upstream gradient
106
+ grad_x *= grad_output[0] / math.sqrt(max(1.0 - post_prob * post_prob, 1e-6))
107
+
108
+ return grad_x, None
109
+
110
+
111
+ class QrackNeuronTorch(nn.Module if _IS_TORCH_AVAILABLE else object):
112
+ """Torch wrapper for QrackNeuron
113
+
114
+ Attributes:
115
+ neuron(QrackNeuron): QrackNeuron backing this torch wrapper
116
+ """
117
+
118
+ def __init__(self, neuron, x):
119
+ super().__init__()
120
+ self.neuron = neuron
121
+ self.weights = nn.Parameter(x)
122
+
123
+ def forward(self):
124
+ return QrackNeuronTorchFunction.apply(self.weights, self.neuron)
125
+
126
+
127
+ class QrackNeuronTorchLayer(nn.Module if _IS_TORCH_AVAILABLE else object):
128
+ """Torch layer wrapper for QrackNeuron (with maximally expressive set of neurons between inputs and outputs)
129
+
130
+ Attributes:
131
+ simulator (QrackSimulator): Prototype simulator that batching copies to use with QrackNeuron instances. (You may customize or overwrite the initialization or reference, before calling forward(x).)
132
+ simulators (list[QrackSimulator]): In-flight copies of prototype simulator corresponding to batch count
133
+ input_indices (list[int], read-only): simulator qubit indices used as QrackNeuron inputs
134
+ output_indices (list[int], read-only): simulator qubit indices used as QrackNeuron outputs
135
+ hidden_indices (list[int], read-only): simulator qubit indices used as QrackNeuron hidden inputs (in maximal superposition)
136
+ neurons (ModuleList[QrackNeuronTorch]): QrackNeuronTorch wrappers (for PyQrack QrackNeurons) in this layer, corresponding to weights
137
+ weights (ParameterList): List of tensors corresponding one-to-one with weights of list of neurons
138
+ apply_fn (Callable[Tensor, QrackNeuronTorch]): Corresponds to QrackNeuronTorchFunction.apply(x, neuron_wrapper) (or override with a custom implementation)
139
+ post_init_fn (Callable[QrackSimulator]): Function that is applied after forward(x) state initialization, before inference. (As the function depends on nothing but the simulator, it's differentiable.)
140
+ """
141
+
142
+ def __init__(
143
+ self,
144
+ input_qubits,
145
+ output_qubits,
146
+ hidden_qubits=None,
147
+ lowest_combo_count=0,
148
+ highest_combo_count=2,
149
+ activation=int(NeuronActivationFn.Generalized_Logistic),
150
+ parameters=None,
151
+ post_init_fn=lambda simulator: None,
152
+ **kwargs
153
+ ):
154
+ """
155
+ Initialize a QrackNeuron layer for PyTorch with a power set of neurons connecting inputs to outputs.
156
+ The inputs and outputs must take the form of discrete, binary features (loaded manually into the backing QrackSimulator)
157
+
158
+ Args:
159
+ sim (QrackSimulator): Simulator into which predictor features are loaded
160
+ input_qubits (int): Count of inputs (1 per qubit)
161
+ output_qubits (int): Count of outputs (1 per qubit)
162
+ hidden_qubits (int): (Optional) Count of "hidden" inputs (1 per qubit, always initialized to |+>, suggested to be same a highest_combo_count)
163
+ lowest_combo_count (int): (Optional) Lowest combination count of input qubits iterated (0 is bias)
164
+ highest_combo_count (int): (Optional) Highest combination count of input qubits iterated
165
+ activation (int): (Optional) Integer corresponding to choice of activation function from NeuronActivationFn
166
+ parameters (list[float]): (Optional) Flat list of initial neuron parameters, corresponding to little-endian basis states of input + hidden qubits, repeated for ascending combo count, repeated for each output index
167
+ post_init_fn (Callable[QrackSimulator]): (Optional) Function that is applied after forward(x) state initialization, before inference. (As the function depends on nothing but the simulator, it's differentiable.)
168
+ """
169
+ super(QrackNeuronTorchLayer, self).__init__()
170
+ if hidden_qubits is None:
171
+ hidden_qubits = highest_combo_count
172
+ self.simulator = QrackSimulator(input_qubits + hidden_qubits + output_qubits, **kwargs)
173
+ self.simulators = []
174
+ self.input_indices = list(range(input_qubits))
175
+ self.hidden_indices = list(range(input_qubits, input_qubits + hidden_qubits))
176
+ self.output_indices = list(
177
+ range(input_qubits + hidden_qubits, input_qubits + hidden_qubits + output_qubits)
178
+ )
179
+ self.activation = NeuronActivationFn(activation)
180
+ self.dtype = torch.float if Qrack.fppow <= 5 else torch.double
181
+ self.apply_fn = QrackNeuronTorchFunction.apply
182
+ self.post_init_fn = post_init_fn
183
+
184
+ # Create neurons from all input combinations, projecting to coherent output qubits
185
+ neurons = []
186
+ param_count = 0
187
+ for output_id in self.output_indices:
188
+ for k in range(lowest_combo_count, highest_combo_count + 1):
189
+ for input_subset in itertools.combinations(self.input_indices, k):
190
+ p_count = 1 << len(input_subset)
191
+ angles = (
192
+ (
193
+ torch.tensor(
194
+ parameters[param_count : (param_count + p_count)], dtype=self.dtype
195
+ )
196
+ if parameters
197
+ else torch.zeros(p_count, dtype=self.dtype)
198
+ )
199
+ )
200
+ neurons.append(
201
+ QrackNeuronTorch(
202
+ QrackNeuron(self.simulator, input_subset, output_id, activation), angles
203
+ )
204
+ )
205
+ param_count += p_count
206
+ self.neurons = nn.ModuleList(neurons)
207
+
208
+ # Prepare the state before feed-forward:
209
+
210
+ # Prepare hidden predictors
211
+ for hidden_id in self.hidden_indices:
212
+ self.simulator.h(hidden_id)
213
+ # Prepare a maximally uncertain output state.
214
+ for output_id in self.output_indices:
215
+ self.simulator.h(output_id)
216
+
217
+ def forward(self, x):
218
+ B = x.shape[0]
219
+ x = x.view(B, -1)
220
+
221
+ self.simulators.clear()
222
+
223
+ # Group neurons by output target once
224
+ by_out = {out: [] for out in self.output_indices}
225
+ for neuron_wrapper in self.neurons:
226
+ by_out[neuron_wrapper.neuron.target].append(neuron_wrapper)
227
+
228
+ batch_rows = []
229
+ for b in range(B):
230
+ simulator = self.simulator.clone()
231
+ self.simulators.append(simulator)
232
+
233
+ # Apply feed-forward
234
+ for q, input_id in enumerate(self.input_indices):
235
+ simulator.r(Pauli.PauliY, math.pi * x[b, q].item(), input_id)
236
+
237
+ # Differentiable post-initialization:
238
+ self.post_init_fn(simulator)
239
+
240
+ row = []
241
+ for out in self.output_indices:
242
+ phi = torch.tensor(init_phi, device=x.device, dtype=x.dtype)
243
+
244
+ for neuron_wrapper in by_out[out]:
245
+ neuron_wrapper.neuron.set_simulator(simulator)
246
+ phi += self.apply_fn(
247
+ neuron_wrapper.weights,
248
+ neuron_wrapper.neuron
249
+ ).squeeze()
250
+
251
+ # Convert angle back to probability
252
+ p = torch.clamp(torch.sin(phi), min=0.0)
253
+ row.append(p)
254
+
255
+ batch_rows.append(torch.stack(row))
256
+
257
+ return torch.stack(batch_rows)