pyqrack-complex128 1.65.2__py3-none-macosx_14_0_arm64.whl → 1.81.0__py3-none-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyqrack-complex128 might be problematic. Click here for more details.

pyqrack/qrack_neuron.py CHANGED
@@ -31,7 +31,9 @@ class QrackNeuron:
31
31
  simulator(QrackSimulator): Simulator instance for all synaptic clefts of the neuron
32
32
  controls(list(int)): Indices of all "control" qubits, for neuron input
33
33
  target(int): Index of "target" qubit, for neuron output
34
- tolerance(double): Rounding tolerance
34
+ activation_fn(NeuronActivationFn): Activation function choice
35
+ alpha(float): Activation function parameter, if required
36
+ angles(list[ctypes.c_float]): (or c_double) Memory for neuron prediction angles
35
37
  """
36
38
 
37
39
  def _get_error(self):
@@ -48,7 +50,6 @@ class QrackNeuron:
48
50
  target,
49
51
  activation_fn=NeuronActivationFn.Sigmoid,
50
52
  alpha=1.0,
51
- tolerance=sys.float_info.epsilon,
52
53
  _init=True,
53
54
  ):
54
55
  self.simulator = simulator
@@ -56,7 +57,7 @@ class QrackNeuron:
56
57
  self.target = target
57
58
  self.activation_fn = activation_fn
58
59
  self.alpha = alpha
59
- self.tolerance = tolerance
60
+ self.angles = QrackNeuron._real1_byref([0.0] * (1 << len(controls)))
60
61
 
61
62
  if not _init:
62
63
  return
@@ -64,11 +65,8 @@ class QrackNeuron:
64
65
  self.nid = Qrack.qrack_lib.init_qneuron(
65
66
  simulator.sid,
66
67
  len(controls),
67
- self._ulonglong_byref(controls),
68
+ QrackNeuron._ulonglong_byref(controls),
68
69
  target,
69
- activation_fn,
70
- alpha,
71
- tolerance,
72
70
  )
73
71
 
74
72
  self._throw_if_error()
@@ -91,23 +89,44 @@ class QrackNeuron:
91
89
  self.simulator,
92
90
  self.controls,
93
91
  self.target,
94
- self.activation_fn,
95
- self.alpha,
96
- self.tolerance,
97
92
  )
98
- self.nid = Qrack.qrack_lib.clone_qneuron(self.simulator.sid)
93
+ result.nid = Qrack.qrack_lib.clone_qneuron(self.simulator.sid)
94
+ result.angles = self.angles[:]
99
95
  self._throw_if_error()
100
96
  return result
101
97
 
102
- def _ulonglong_byref(self, a):
98
+ @staticmethod
99
+ def _ulonglong_byref(a):
103
100
  return (ctypes.c_ulonglong * len(a))(*a)
104
101
 
105
- def _real1_byref(self, a):
102
+ @staticmethod
103
+ def _real1_byref(a):
106
104
  # This needs to be c_double, if PyQrack is built with fp64.
107
105
  if Qrack.fppow < 6:
108
106
  return (ctypes.c_float * len(a))(*a)
109
107
  return (ctypes.c_double * len(a))(*a)
110
108
 
109
+ def set_simulator(self, s):
110
+ """Set the neuron simulator
111
+
112
+ Set the simulator used by this neuron
113
+
114
+ Args:
115
+ s(QrackSimulator): The simulator to use
116
+
117
+ Raises:
118
+ RuntimeError: QrackSimulator raised an exception.
119
+ """
120
+ Qrack.qrack_lib.set_qneuron_sim(
121
+ self.nid,
122
+ s.sid,
123
+ len(self.controls),
124
+ QrackNeuron._ulonglong_byref(self.controls),
125
+ self.target,
126
+ )
127
+ self._throw_if_error()
128
+ self.simulator = s
129
+
111
130
  def set_angles(self, a):
112
131
  """Directly sets the neuron parameters.
113
132
 
@@ -125,8 +144,7 @@ class QrackNeuron:
125
144
  raise ValueError(
126
145
  "Angles 'a' in QrackNeuron.set_angles() must contain at least (2 ** len(self.controls)) elements."
127
146
  )
128
- Qrack.qrack_lib.set_qneuron_angles(self.nid, self._real1_byref(a))
129
- self._throw_if_error()
147
+ self.angles = QrackNeuron._real1_byref(a)
130
148
 
131
149
  def get_angles(self):
132
150
  """Directly gets the neuron parameters.
@@ -137,10 +155,7 @@ class QrackNeuron:
137
155
  Raises:
138
156
  RuntimeError: QrackNeuron C++ library raised an exception.
139
157
  """
140
- ket = self._real1_byref([0.0] * (1 << len(self.controls)))
141
- Qrack.qrack_lib.get_qneuron_angles(self.nid, ket)
142
- self._throw_if_error()
143
- return list(ket)
158
+ return list(self.angles)
144
159
 
145
160
  def set_alpha(self, a):
146
161
  """Set the neuron 'alpha' parameter.
@@ -149,13 +164,8 @@ class QrackNeuron:
149
164
  parameter that is applied as a power to its angles, before
150
165
  learning and prediction. This makes the activation function
151
166
  sharper (or less sharp).
152
-
153
- Raises:
154
- RuntimeError: QrackNeuron C++ library raised an exception.
155
167
  """
156
168
  self.alpha = a
157
- Qrack.qrack_lib.set_qneuron_alpha(self.nid, a)
158
- self._throw_if_error()
159
169
 
160
170
  def set_activation_fn(self, f):
161
171
  """Sets the activation function of this QrackNeuron
@@ -163,13 +173,8 @@ class QrackNeuron:
163
173
  Nonlinear activation functions can be important to neural net
164
174
  applications, like DNN. The available activation functions are
165
175
  enumerated in `NeuronActivationFn`.
166
-
167
- Raises:
168
- RuntimeError: QrackNeuron C++ library raised an exception.
169
176
  """
170
177
  self.activation_fn = f
171
- Qrack.qrack_lib.set_qneuron_activation_fn(self.nid, f)
172
- self._throw_if_error()
173
178
 
174
179
  def predict(self, e=True, r=True):
175
180
  """Predict based on training
@@ -188,7 +193,7 @@ class QrackNeuron:
188
193
  Raises:
189
194
  RuntimeError: QrackNeuron C++ library raised an exception.
190
195
  """
191
- result = Qrack.qrack_lib.qneuron_predict(self.nid, e, r)
196
+ result = Qrack.qrack_lib.qneuron_predict(self.nid, self.angles, e, r, self.activation_fn, self.alpha)
192
197
  self._throw_if_error()
193
198
  return result
194
199
 
@@ -204,7 +209,7 @@ class QrackNeuron:
204
209
  Raises:
205
210
  RuntimeError: QrackNeuron C++ library raised an exception.
206
211
  """
207
- result = Qrack.qrack_lib.qneuron_unpredict(self.nid, e)
212
+ result = Qrack.qrack_lib.qneuron_unpredict(self.nid, self.angles, e, self.activation_fn, self.alpha)
208
213
  self._throw_if_error()
209
214
  return result
210
215
 
@@ -220,7 +225,7 @@ class QrackNeuron:
220
225
  Raises:
221
226
  RuntimeError: QrackNeuron C++ library raised an exception.
222
227
  """
223
- Qrack.qrack_lib.qneuron_learn_cycle(self.nid, e)
228
+ Qrack.qrack_lib.qneuron_learn_cycle(self.nid, self.angles, e, self.activation_fn, self.alpha)
224
229
  self._throw_if_error()
225
230
 
226
231
  def learn(self, eta, e=True, r=True):
@@ -239,7 +244,7 @@ class QrackNeuron:
239
244
  Raises:
240
245
  RuntimeError: QrackNeuron C++ library raised an exception.
241
246
  """
242
- Qrack.qrack_lib.qneuron_learn(self.nid, eta, e, r)
247
+ Qrack.qrack_lib.qneuron_learn(self.nid, self.angles, eta, e, r, self.activation_fn, self.alpha)
243
248
  self._throw_if_error()
244
249
 
245
250
  def learn_permutation(self, eta, e=True, r=True):
@@ -258,5 +263,95 @@ class QrackNeuron:
258
263
  Raises:
259
264
  RuntimeError: QrackNeuron C++ library raised an exception.
260
265
  """
261
- Qrack.qrack_lib.qneuron_learn_permutation(self.nid, eta, e, r)
266
+ Qrack.qrack_lib.qneuron_learn_permutation(self.nid, self.angles, eta, e, r, self.activation_fn, self.alpha)
262
267
  self._throw_if_error()
268
+
269
+ @staticmethod
270
+ def quantile_bounds(vec, bits):
271
+ """Calculate vector quantile bounds
272
+
273
+ This is a static helper method to calculate the quantile
274
+ bounds of 2 ** bits worth of quantiles.
275
+
276
+ Args:
277
+ vec: numerical vector
278
+ bits: log2() of quantile count
279
+
280
+ Returns:
281
+ Quantile (n + 1) bounds for n-quantile division, including
282
+ minimum and maximum values
283
+ """
284
+
285
+ bins = 1 << bits
286
+ n = len(vec)
287
+ vec_sorted = sorted(vec)
288
+
289
+ return (
290
+ [vec_sorted[0]]
291
+ + [vec_sorted[(k * n) // bins] for k in range(1, bins)]
292
+ + [vec_sorted[-1]]
293
+ )
294
+
295
+ @staticmethod
296
+ def discretize(vec, bounds):
297
+ """Discretize vector by quantile bounds
298
+
299
+ This is a static helper method to discretize a numerical
300
+ vector according to quantile bounds calculated by the
301
+ quantile_bounds(vec, bits) static method.
302
+
303
+ Args:
304
+ vec: numerical vector
305
+ bounds: (n + 1) n-quantile bounds including extrema
306
+
307
+ Returns:
308
+ Discretized bit-row vector, least-significant first
309
+ """
310
+
311
+ bounds = bounds[1:]
312
+ bounds_len = len(bounds)
313
+ bits = bounds_len.bit_length() - 1
314
+ n = len(vec)
315
+ vec_discrete = [[False] * n for _ in range(bits)]
316
+ for i, v in enumerate(vec):
317
+ p = 0
318
+ while (p < bounds_len) and (v > bounds[p]):
319
+ p += 1
320
+ for b in range(bits):
321
+ vec_discrete[b][i] = bool((p >> b) & 1)
322
+
323
+ return vec_discrete
324
+
325
+ @staticmethod
326
+ def flatten_and_transpose(arr):
327
+ """Flatten and transpose feature matrix
328
+
329
+ This is a static helper method to convert a multi-feature
330
+ bit-row matrix to an observation-row matrix with flat
331
+ feature columns.
332
+
333
+ Args:
334
+ arr: bit-row matrix
335
+
336
+ Returns:
337
+ Observation-row matrix with flat feature columns
338
+ """
339
+ return list(zip(*[item for sublist in arr for item in sublist]))
340
+
341
+ @staticmethod
342
+ def bin_endpoints_average(bounds):
343
+ """Bin endpoints average
344
+
345
+ This is a static helper method that accepts the output
346
+ bins from quantile_bounds() and returns the average points
347
+ between the bin endpoints. (This is NOT always necessarily
348
+ the best heuristic for how to convert binned results back
349
+ to numerical results, but it is often a reasonable way.)
350
+
351
+ Args:
352
+ bounds: (n + 1) n-quantile bounds including extrema
353
+
354
+ Returns:
355
+ List of average points between the bin endpoints
356
+ """
357
+ return [((bounds[i] + bounds[i + 1]) / 2) for i in range(len(bounds) - 1)]
@@ -1,4 +1,4 @@
1
- # (C) Daniel Strano and the Qrack contributors 2017-2025. All rights reserved.
1
+ # (C) Daniel Strano and the Qrack contributors 2017-2026. All rights reserved.
2
2
  #
3
3
  # Initial draft by Elara (OpenAI custom GPT)
4
4
  # Refined and architecturally clarified by Dan Strano
@@ -6,6 +6,12 @@
6
6
  # Use of this source code is governed by an MIT-style license that can be
7
7
  # found in the LICENSE file or at https://opensource.org/licenses/MIT.
8
8
 
9
+ import ctypes
10
+ import itertools
11
+ import math
12
+ import random
13
+ import sys
14
+
9
15
  _IS_TORCH_AVAILABLE = True
10
16
  try:
11
17
  import torch
@@ -14,82 +20,136 @@ try:
14
20
  except ImportError:
15
21
  _IS_TORCH_AVAILABLE = False
16
22
 
23
+ from .pauli import Pauli
17
24
  from .qrack_neuron import QrackNeuron
25
+ from .qrack_simulator import QrackSimulator
26
+ from .qrack_system import Qrack
18
27
  from .neuron_activation_fn import NeuronActivationFn
19
28
 
20
- from itertools import chain, combinations
21
29
 
30
+ # Parameter-shift rule
31
+ param_shift_eps = math.pi / 2
32
+ # Neuron angle initialization
33
+ init_phi = math.asin(0.5)
34
+ # Systemic floating-point type
35
+ fp_type = ctypes.c_float if Qrack.fppow <= 5 else ctypes.c_double
22
36
 
23
- # From https://stackoverflow.com/questions/1482308/how-to-get-all-subsets-of-a-set-powerset#answer-1482316
24
- def powerset(iterable):
25
- "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3,) (1,2,3)"
26
- s = list(iterable)
27
- return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
28
37
 
38
+ class QrackNeuronTorchFunction(Function if _IS_TORCH_AVAILABLE else object):
39
+ """Static forward/backward/apply functions for QrackNeuronTorch"""
29
40
 
30
- class QrackTorchNeuron(nn.Module if _IS_TORCH_AVAILABLE else object):
31
- """Torch wrapper for QrackNeuron
41
+ @staticmethod
42
+ def forward(ctx, x, neuron):
43
+ ctx.neuron = neuron
44
+ ctx.simulator = neuron.simulator
45
+ ctx.save_for_backward(x)
32
46
 
33
- Attributes:
34
- neuron(QrackNeuron): QrackNeuron backing this torch wrapper
35
- """
47
+ # Baseline probability BEFORE applying this neuron's unitary
48
+ pre_prob = neuron.simulator.prob(neuron.target)
36
49
 
37
- def __init__(self, neuron: QrackNeuron):
38
- super().__init__()
39
- self.neuron = neuron
40
-
41
- def forward(self, x):
42
- neuron = self.neuron
50
+ angles = x.detach().cpu().numpy() if x.requires_grad else x.numpy()
51
+ neuron.angles = angles.ctypes.data_as(ctypes.POINTER(fp_type))
43
52
  neuron.predict(True, False)
44
53
 
45
- return neuron.simulator.prob(neuron.target)
46
-
47
-
48
- class QrackNeuronFunction(Function if _IS_TORCH_AVAILABLE else object):
49
- """Static forward/backward/apply functions for QrackTorchNeuron"""
50
-
51
- @staticmethod
52
- def forward(ctx, neuron):
53
- # Save for backward
54
- ctx.neuron = neuron
54
+ # Probability AFTER applying this neuron's unitary
55
+ post_prob = neuron.simulator.prob(neuron.target)
56
+ ctx.post_prob = post_prob
55
57
 
56
- init_prob = neuron.simulator.prob(neuron.target)
57
- neuron.predict(True, False)
58
- final_prob = neuron.simulator.prob(neuron.target)
59
- ctx.delta = final_prob - init_prob
58
+ delta = math.asin(post_prob) - math.asin(pre_prob)
59
+ ctx.delta = delta
60
60
 
61
- return (
62
- torch.tensor([ctx.delta], dtype=torch.float32)
63
- if _IS_TORCH_AVAILABLE
64
- else ctx.delta
65
- )
61
+ # Return shape: (1,)
62
+ return x.new_tensor([delta])
66
63
 
67
64
  @staticmethod
68
65
  def backward(ctx, grad_output):
66
+ (x,) = ctx.saved_tensors
69
67
  neuron = ctx.neuron
68
+ neuron.set_simulator(ctx.simulator)
69
+ post_prob = ctx.post_prob
70
+
71
+ angles = x.detach().cpu().numpy() if x.requires_grad else x.numpy()
70
72
 
71
- pre_unpredict = neuron.simulator.prob(neuron.output_id)
73
+ # Restore simulator to state BEFORE this neuron's unitary
74
+ neuron.angles = angles.ctypes.data_as(ctypes.POINTER(fp_type))
72
75
  neuron.unpredict()
73
- post_unpredict = neuron.simulator.prob(neuron.output_id)
74
- reverse_delta = pre_unpredict - post_unpredict
76
+ pre_sim = neuron.simulator
75
77
 
76
- grad = reverse_delta - ctx.delta
78
+ grad_x = torch.zeros_like(x)
79
+
80
+ for i in range(x.shape[0]):
81
+ angle = angles[i]
82
+
83
+ # θ + π/2
84
+ angles[i] = angle + param_shift_eps
85
+ neuron.set_angles(angles)
86
+ neuron.simulator = pre_sim.clone()
87
+ neuron.predict(True, False)
88
+ p_plus = neuron.simulator.prob(neuron.target)
89
+
90
+ # θ − π/2
91
+ angles[i] = angle - param_shift_eps
92
+ neuron.set_angles(angles)
93
+ neuron.simulator = pre_sim.clone()
94
+ neuron.predict(True, False)
95
+ p_minus = neuron.simulator.prob(neuron.target)
96
+
97
+ # Parameter-shift gradient
98
+ grad_x[i] = 0.5 * (p_plus - p_minus)
99
+
100
+ angles[i] = angle
101
+
102
+ # Restore simulator
103
+ neuron.set_simulator(pre_sim)
104
+
105
+ # Apply chain rule and upstream gradient
106
+ grad_x *= grad_output[0] / math.sqrt(max(1.0 - post_prob * post_prob, 1e-6))
107
+
108
+ return grad_x, None
77
109
 
78
- return (
79
- torch.tensor([grad], dtype=torch.float32) if _IS_TORCH_AVAILABLE else grad
80
- )
110
+
111
+ class QrackNeuronTorch(nn.Module if _IS_TORCH_AVAILABLE else object):
112
+ """Torch wrapper for QrackNeuron
113
+
114
+ Attributes:
115
+ neuron(QrackNeuron): QrackNeuron backing this torch wrapper
116
+ """
117
+
118
+ def __init__(self, neuron, x):
119
+ super().__init__()
120
+ self.neuron = neuron
121
+ self.weights = nn.Parameter(x)
122
+
123
+ def forward(self):
124
+ return QrackNeuronTorchFunction.apply(self.weights, self.neuron)
81
125
 
82
126
 
83
127
  class QrackNeuronTorchLayer(nn.Module if _IS_TORCH_AVAILABLE else object):
84
- """Torch layer wrapper for QrackNeuron (with power set of neurons between inputs and outputs)"""
128
+ """Torch layer wrapper for QrackNeuron (with maximally expressive set of neurons between inputs and outputs)
129
+
130
+ Attributes:
131
+ simulator (QrackSimulator): Prototype simulator that batching copies to use with QrackNeuron instances. (You may customize or overwrite the initialization or reference, before calling forward(x).)
132
+ simulators (list[QrackSimulator]): In-flight copies of prototype simulator corresponding to batch count
133
+ input_indices (list[int], read-only): simulator qubit indices used as QrackNeuron inputs
134
+ output_indices (list[int], read-only): simulator qubit indices used as QrackNeuron outputs
135
+ hidden_indices (list[int], read-only): simulator qubit indices used as QrackNeuron hidden inputs (in maximal superposition)
136
+ neurons (ModuleList[QrackNeuronTorch]): QrackNeuronTorch wrappers (for PyQrack QrackNeurons) in this layer, corresponding to weights
137
+ weights (ParameterList): List of tensors corresponding one-to-one with weights of list of neurons
138
+ apply_fn (Callable[Tensor, QrackNeuronTorch]): Corresponds to QrackNeuronTorchFunction.apply(x, neuron_wrapper) (or override with a custom implementation)
139
+ post_init_fn (Callable[QrackSimulator]): Function that is applied after forward(x) state initialization, before inference. (As the function depends on nothing but the simulator, it's differentiable.)
140
+ """
85
141
 
86
142
  def __init__(
87
143
  self,
88
- simulator,
89
- input_indices,
90
- output_indices,
144
+ input_qubits,
145
+ output_qubits,
146
+ hidden_qubits=None,
147
+ lowest_combo_count=0,
148
+ highest_combo_count=2,
91
149
  activation=int(NeuronActivationFn.Generalized_Logistic),
92
150
  parameters=None,
151
+ post_init_fn=lambda simulator: None,
152
+ **kwargs
93
153
  ):
94
154
  """
95
155
  Initialize a QrackNeuron layer for PyTorch with a power set of neurons connecting inputs to outputs.
@@ -97,74 +157,101 @@ class QrackNeuronTorchLayer(nn.Module if _IS_TORCH_AVAILABLE else object):
97
157
 
98
158
  Args:
99
159
  sim (QrackSimulator): Simulator into which predictor features are loaded
100
- input_indices (list[int]): List of input bits
101
- output_indices (list[int]): List of output bits
102
- activation (int): Integer corresponding to choice of activation function from NeuronActivationFn
103
- parameters (list[float]): (Optional) Flat list of initial neuron parameters, corresponding to little-endian basis states of power set of input indices, repeated for each output index (with empty set being constant bias)
160
+ input_qubits (int): Count of inputs (1 per qubit)
161
+ output_qubits (int): Count of outputs (1 per qubit)
162
+ hidden_qubits (int): (Optional) Count of "hidden" inputs (1 per qubit, always initialized to |+>, suggested to be same a highest_combo_count)
163
+ lowest_combo_count (int): (Optional) Lowest combination count of input qubits iterated (0 is bias)
164
+ highest_combo_count (int): (Optional) Highest combination count of input qubits iterated
165
+ activation (int): (Optional) Integer corresponding to choice of activation function from NeuronActivationFn
166
+ parameters (list[float]): (Optional) Flat list of initial neuron parameters, corresponding to little-endian basis states of input + hidden qubits, repeated for ascending combo count, repeated for each output index
167
+ post_init_fn (Callable[QrackSimulator]): (Optional) Function that is applied after forward(x) state initialization, before inference. (As the function depends on nothing but the simulator, it's differentiable.)
104
168
  """
105
169
  super(QrackNeuronTorchLayer, self).__init__()
106
- self.simulator = simulator
107
- self.input_indices = input_indices
108
- self.output_indices = output_indices
109
- self.activation = NeuronActivationFn(activation)
110
- self.fn = (
111
- QrackNeuronFunction.apply
112
- if _IS_TORCH_AVAILABLE
113
- else lambda x: QrackNeuronFunction.forward(object(), x)
114
- )
115
-
116
- # Create neurons from all powerset input combinations, projecting to coherent output qubits
117
- self.neurons = nn.ModuleList(
118
- [
119
- QrackTorchNeuron(
120
- QrackNeuron(simulator, list(input_subset), output_id, activation)
121
- )
122
- for input_subset in powerset(input_indices)
123
- for output_id in output_indices
124
- ]
170
+ if hidden_qubits is None:
171
+ hidden_qubits = highest_combo_count
172
+ self.simulator = QrackSimulator(input_qubits + hidden_qubits + output_qubits, **kwargs)
173
+ self.simulators = []
174
+ self.input_indices = list(range(input_qubits))
175
+ self.hidden_indices = list(range(input_qubits, input_qubits + hidden_qubits))
176
+ self.output_indices = list(
177
+ range(input_qubits + hidden_qubits, input_qubits + hidden_qubits + output_qubits)
125
178
  )
179
+ self.activation = NeuronActivationFn(activation)
180
+ self.dtype = torch.float if Qrack.fppow <= 5 else torch.double
181
+ self.apply_fn = QrackNeuronTorchFunction.apply
182
+ self.post_init_fn = post_init_fn
126
183
 
127
- # Set Qrack's internal parameters:
184
+ # Create neurons from all input combinations, projecting to coherent output qubits
185
+ neurons = []
128
186
  param_count = 0
129
- for neuron_wrapper in self.neurons:
130
- neuron = neuron_wrapper.neuron
131
- p_count = 1 << len(neuron.controls)
132
- neuron.set_angles(
133
- parameters[param_count : (param_count + p_count + 1)]
134
- if parameters
135
- else ([0.0] * p_count)
136
- )
137
- param_count += p_count
138
-
139
- self.weights = nn.ParameterList()
140
- for pid in range(param_count):
141
- self.weights.append(
142
- nn.Parameter(torch.tensor(parameters[pid] if parameters else 0.0))
143
- )
144
-
145
- def forward(self, _):
146
- # Assume quantum outputs should overwrite the simulator state
147
187
  for output_id in self.output_indices:
148
- if self.simulator.m(output_id):
149
- self.simulator.x(output_id)
188
+ for k in range(lowest_combo_count, highest_combo_count + 1):
189
+ for input_subset in itertools.combinations(self.input_indices, k):
190
+ p_count = 1 << len(input_subset)
191
+ angles = (
192
+ (
193
+ torch.tensor(
194
+ parameters[param_count : (param_count + p_count)], dtype=self.dtype
195
+ )
196
+ if parameters
197
+ else torch.zeros(p_count, dtype=self.dtype)
198
+ )
199
+ )
200
+ neurons.append(
201
+ QrackNeuronTorch(
202
+ QrackNeuron(self.simulator, input_subset, output_id, activation), angles
203
+ )
204
+ )
205
+ param_count += p_count
206
+ self.neurons = nn.ModuleList(neurons)
207
+
208
+ # Prepare the state before feed-forward:
209
+
210
+ # Prepare hidden predictors
211
+ for hidden_id in self.hidden_indices:
212
+ self.simulator.h(hidden_id)
213
+ # Prepare a maximally uncertain output state.
214
+ for output_id in self.output_indices:
150
215
  self.simulator.h(output_id)
151
216
 
152
- # Set Qrack's internal parameters:
153
- param_count = 0
154
- for neuron_wrapper in self.neurons:
155
- neuron = neuron_wrapper.neuron
156
- p_count = 1 << len(neuron.controls)
157
- angles = [
158
- w.item() for w in self.weights[param_count : (param_count + p_count)]
159
- ]
160
- neuron.set_angles(angles)
161
- param_count += p_count
217
+ def forward(self, x):
218
+ B = x.shape[0]
219
+ x = x.view(B, -1)
162
220
 
163
- # Assume quantum inputs already loaded into simulator state
221
+ self.simulators.clear()
222
+
223
+ # Group neurons by output target once
224
+ by_out = {out: [] for out in self.output_indices}
164
225
  for neuron_wrapper in self.neurons:
165
- self.fn(neuron_wrapper.neuron)
226
+ by_out[neuron_wrapper.neuron.target].append(neuron_wrapper)
227
+
228
+ batch_rows = []
229
+ for b in range(B):
230
+ simulator = self.simulator.clone()
231
+ self.simulators.append(simulator)
232
+
233
+ # Apply feed-forward
234
+ for q, input_id in enumerate(self.input_indices):
235
+ simulator.r(Pauli.PauliY, math.pi * x[b, q].item(), input_id)
236
+
237
+ # Differentiable post-initialization:
238
+ self.post_init_fn(simulator)
239
+
240
+ row = []
241
+ for out in self.output_indices:
242
+ phi = torch.tensor(init_phi, device=x.device, dtype=x.dtype)
243
+
244
+ for neuron_wrapper in by_out[out]:
245
+ neuron_wrapper.neuron.set_simulator(simulator)
246
+ phi += self.apply_fn(
247
+ neuron_wrapper.weights,
248
+ neuron_wrapper.neuron
249
+ ).squeeze()
250
+
251
+ # Convert angle back to probability
252
+ p = torch.clamp(torch.sin(phi), min=0.0)
253
+ row.append(p)
166
254
 
167
- # These are classical views over quantum state; simulator still maintains full coherence
168
- outputs = [self.simulator.prob(output_id) for output_id in self.output_indices]
255
+ batch_rows.append(torch.stack(row))
169
256
 
170
- return torch.tensor(outputs, dtype=torch.float32)
257
+ return torch.stack(batch_rows)