pyqrack-complex128 1.78.3__py3-none-macosx_14_0_arm64.whl → 1.82.0__py3-none-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyqrack-complex128 might be problematic. Click here for more details.

@@ -1,4 +1,4 @@
1
- # (C) Daniel Strano and the Qrack contributors 2017-2025. All rights reserved.
1
+ # (C) Daniel Strano and the Qrack contributors 2017-2026. All rights reserved.
2
2
  #
3
3
  # Initial draft by Elara (OpenAI custom GPT)
4
4
  # Refined and architecturally clarified by Dan Strano
@@ -6,8 +6,10 @@
6
6
  # Use of this source code is governed by an MIT-style license that can be
7
7
  # found in the LICENSE file or at https://opensource.org/licenses/MIT.
8
8
 
9
+ import ctypes
9
10
  import itertools
10
11
  import math
12
+ import random
11
13
  import sys
12
14
 
13
15
  _IS_TORCH_AVAILABLE = True
@@ -21,102 +23,90 @@ except ImportError:
21
23
  from .pauli import Pauli
22
24
  from .qrack_neuron import QrackNeuron
23
25
  from .qrack_simulator import QrackSimulator
26
+ from .qrack_system import Qrack
24
27
  from .neuron_activation_fn import NeuronActivationFn
25
28
 
26
29
 
27
- # Should be safe for 16-bit
28
- angle_eps = math.pi * (2 ** -8)
30
+ # Parameter-shift rule
31
+ param_shift_eps = math.pi / 2
32
+ # Neuron angle initialization
33
+ init_phi = math.asin(0.5)
34
+ # Systemic floating-point type
35
+ fp_type = ctypes.c_float if Qrack.fppow <= 5 else ctypes.c_double
29
36
 
30
37
 
31
- if not _IS_TORCH_AVAILABLE:
32
- class TorchContextMock(object):
33
- def __init__(self):
34
- pass
35
-
36
- def save_for_backward(self, *args):
37
- self.saved_tensors = args
38
-
39
38
  class QrackNeuronTorchFunction(Function if _IS_TORCH_AVAILABLE else object):
40
39
  """Static forward/backward/apply functions for QrackNeuronTorch"""
41
40
 
42
- if not _IS_TORCH_AVAILABLE:
43
- @staticmethod
44
- def apply(x, neuron_wrapper):
45
- return forward(TorchContextMock(), x, neuron_wrapper)
46
-
47
41
  @staticmethod
48
- def forward(ctx, x, neuron_wrapper):
49
- ctx.neuron_wrapper = neuron_wrapper
42
+ def forward(ctx, x, neuron):
43
+ ctx.neuron = neuron
44
+ ctx.simulator = neuron.simulator
50
45
  ctx.save_for_backward(x)
51
- neuron = neuron_wrapper.neuron
52
46
 
53
- angles = (x.detach().cpu().numpy() if x.requires_grad else x.numpy()) if _IS_TORCH_AVAILABLE else x
54
- neuron.set_angles(angles)
47
+ # Baseline probability BEFORE applying this neuron's unitary
48
+ pre_prob = neuron.simulator.prob(neuron.target)
49
+
50
+ angles = x.detach().cpu().numpy() if x.requires_grad else x.numpy()
51
+ neuron.angles = angles.ctypes.data_as(ctypes.POINTER(fp_type))
55
52
  neuron.predict(True, False)
53
+
54
+ # Probability AFTER applying this neuron's unitary
56
55
  post_prob = neuron.simulator.prob(neuron.target)
57
- if _IS_TORCH_AVAILABLE:
58
- post_prob = torch.tensor([post_prob], dtype=torch.float32, device=x.device)
56
+ ctx.post_prob = post_prob
57
+
58
+ delta = math.asin(post_prob) - math.asin(pre_prob)
59
+ ctx.delta = delta
59
60
 
60
- return post_prob
61
+ # Return shape: (1,)
62
+ return x.new_tensor([delta])
61
63
 
62
64
  @staticmethod
63
- def _backward(x, neuron_wrapper):
64
- neuron = neuron_wrapper.neuron
65
- angles = (x.detach().cpu().numpy() if x.requires_grad else x.numpy()) if _IS_TORCH_AVAILABLE else x
65
+ def backward(ctx, grad_output):
66
+ (x,) = ctx.saved_tensors
67
+ neuron = ctx.neuron
68
+ neuron.set_simulator(ctx.simulator)
69
+ post_prob = ctx.post_prob
70
+
71
+ angles = x.detach().cpu().numpy() if x.requires_grad else x.numpy()
66
72
 
67
- # Uncompute
68
- neuron.set_angles(angles)
73
+ # Restore simulator to state BEFORE this neuron's unitary
74
+ neuron.angles = angles.ctypes.data_as(ctypes.POINTER(fp_type))
69
75
  neuron.unpredict()
70
76
  pre_sim = neuron.simulator
71
- pre_prob = pre_sim.prob(neuron.target)
72
77
 
73
- param_count = 1 << len(neuron.controls)
74
- delta = [0.0] * param_count
75
- for param in range(param_count):
76
- angle = angles[param]
78
+ grad_x = torch.zeros_like(x)
77
79
 
78
- # x + angle_eps
79
- angles[param] = angle + angle_eps
80
+ for i in range(x.shape[0]):
81
+ angle = angles[i]
82
+
83
+ # θ + π/2
84
+ angles[i] = angle + param_shift_eps
80
85
  neuron.set_angles(angles)
81
86
  neuron.simulator = pre_sim.clone()
82
87
  neuron.predict(True, False)
83
88
  p_plus = neuron.simulator.prob(neuron.target)
84
89
 
85
- # x - angle_eps
86
- angles[param] = angle - angle_eps
90
+ # θ π/2
91
+ angles[i] = angle - param_shift_eps
87
92
  neuron.set_angles(angles)
88
93
  neuron.simulator = pre_sim.clone()
89
94
  neuron.predict(True, False)
90
95
  p_minus = neuron.simulator.prob(neuron.target)
91
96
 
92
- # Central difference
93
- delta[param] = (p_plus - p_minus) / (2 * angle_eps)
97
+ # Parameter-shift gradient
98
+ grad_x[i] = 0.5 * (p_plus - p_minus)
94
99
 
95
- angles[param] = angle
100
+ angles[i] = angle
96
101
 
97
- neuron.simulator = pre_sim
102
+ # Restore simulator
103
+ neuron.set_simulator(pre_sim)
98
104
 
99
- if _IS_TORCH_AVAILABLE:
100
- delta = torch.tensor(delta, dtype=torch.float32, device=x.device)
105
+ # Apply chain rule and upstream gradient
106
+ grad_x *= grad_output[0] / math.sqrt(max(1.0 - post_prob * post_prob, 1e-6))
101
107
 
102
- return delta
108
+ return grad_x, None
103
109
 
104
- @staticmethod
105
- def backward(ctx, grad_output):
106
- (x,) = ctx.saved_tensors
107
- neuron_wrapper = ctx.neuron_wrapper
108
- delta = _backward(x, neuron_wrapper, grad_output)
109
- if _IS_TORCH_AVAILABLE:
110
- # grad_output: (O,)
111
- # delta: (O, I)
112
- grad_input = torch.matmul(grad_output, delta) # result: (I,)
113
- else:
114
- grad_input = [
115
- sum(o * d for o, d in zip(grad_output, col))
116
- for col in zip(*delta)
117
- ]
118
-
119
- return grad_input, None
120
110
 
121
111
  class QrackNeuronTorch(nn.Module if _IS_TORCH_AVAILABLE else object):
122
112
  """Torch wrapper for QrackNeuron
@@ -125,19 +115,20 @@ class QrackNeuronTorch(nn.Module if _IS_TORCH_AVAILABLE else object):
125
115
  neuron(QrackNeuron): QrackNeuron backing this torch wrapper
126
116
  """
127
117
 
128
- def __init__(self, neuron):
118
+ def __init__(self, neuron, x):
129
119
  super().__init__()
130
120
  self.neuron = neuron
121
+ self.weights = nn.Parameter(x)
131
122
 
132
- def forward(self, x):
133
- return QrackNeuronTorchFunction.apply(x, self.neuron)
123
+ def forward(self):
124
+ return QrackNeuronTorchFunction.apply(self.weights, self.neuron)
134
125
 
135
126
 
136
127
  class QrackNeuronTorchLayer(nn.Module if _IS_TORCH_AVAILABLE else object):
137
128
  """Torch layer wrapper for QrackNeuron (with maximally expressive set of neurons between inputs and outputs)
138
129
 
139
130
  Attributes:
140
- simulator (QrackSimulator): Prototype simulator that batching copies to use with QrackNeuron instances
131
+ simulator (QrackSimulator): Prototype simulator that batching copies to use with QrackNeuron instances. (You may customize or overwrite the initialization or reference, before calling forward(x).)
141
132
  simulators (list[QrackSimulator]): In-flight copies of prototype simulator corresponding to batch count
142
133
  input_indices (list[int], read-only): simulator qubit indices used as QrackNeuron inputs
143
134
  output_indices (list[int], read-only): simulator qubit indices used as QrackNeuron outputs
@@ -145,7 +136,7 @@ class QrackNeuronTorchLayer(nn.Module if _IS_TORCH_AVAILABLE else object):
145
136
  neurons (ModuleList[QrackNeuronTorch]): QrackNeuronTorch wrappers (for PyQrack QrackNeurons) in this layer, corresponding to weights
146
137
  weights (ParameterList): List of tensors corresponding one-to-one with weights of list of neurons
147
138
  apply_fn (Callable[Tensor, QrackNeuronTorch]): Corresponds to QrackNeuronTorchFunction.apply(x, neuron_wrapper) (or override with a custom implementation)
148
- backward_fn (Callable[Tensor, Tensor]): Corresponds to QrackNeuronTorchFunction._backward(x, neuron_wrapper) (or override with a custom implementation)
139
+ post_init_fn (Callable[QrackSimulator]): Function that is applied after forward(x) state initialization, before inference. (As the function depends on nothing but the simulator, it's differentiable.)
149
140
  """
150
141
 
151
142
  def __init__(
@@ -157,6 +148,8 @@ class QrackNeuronTorchLayer(nn.Module if _IS_TORCH_AVAILABLE else object):
157
148
  highest_combo_count=2,
158
149
  activation=int(NeuronActivationFn.Generalized_Logistic),
159
150
  parameters=None,
151
+ post_init_fn=lambda simulator: None,
152
+ **kwargs
160
153
  ):
161
154
  """
162
155
  Initialize a QrackNeuron layer for PyTorch with a power set of neurons connecting inputs to outputs.
@@ -166,195 +159,99 @@ class QrackNeuronTorchLayer(nn.Module if _IS_TORCH_AVAILABLE else object):
166
159
  sim (QrackSimulator): Simulator into which predictor features are loaded
167
160
  input_qubits (int): Count of inputs (1 per qubit)
168
161
  output_qubits (int): Count of outputs (1 per qubit)
169
- hidden_qubits (int): Count of "hidden" inputs (1 per qubit, always initialized to |+>, suggested to be same a highest_combo_count)
170
- lowest_combo_count (int): Lowest combination count of input qubits iterated (0 is bias)
171
- highest_combo_count (int): Highest combination count of input qubits iterated
172
- activation (int): Integer corresponding to choice of activation function from NeuronActivationFn
162
+ hidden_qubits (int): (Optional) Count of "hidden" inputs (1 per qubit, always initialized to |+>, suggested to be same a highest_combo_count)
163
+ lowest_combo_count (int): (Optional) Lowest combination count of input qubits iterated (0 is bias)
164
+ highest_combo_count (int): (Optional) Highest combination count of input qubits iterated
165
+ activation (int): (Optional) Integer corresponding to choice of activation function from NeuronActivationFn
173
166
  parameters (list[float]): (Optional) Flat list of initial neuron parameters, corresponding to little-endian basis states of input + hidden qubits, repeated for ascending combo count, repeated for each output index
167
+ post_init_fn (Callable[QrackSimulator]): (Optional) Function that is applied after forward(x) state initialization, before inference. (As the function depends on nothing but the simulator, it's differentiable.)
174
168
  """
175
169
  super(QrackNeuronTorchLayer, self).__init__()
176
170
  if hidden_qubits is None:
177
171
  hidden_qubits = highest_combo_count
178
- self.simulator = QrackSimulator(input_qubits + hidden_qubits + output_qubits)
172
+ self.simulator = QrackSimulator(input_qubits + hidden_qubits + output_qubits, **kwargs)
179
173
  self.simulators = []
180
174
  self.input_indices = list(range(input_qubits))
181
175
  self.hidden_indices = list(range(input_qubits, input_qubits + hidden_qubits))
182
- self.output_indices = list(range(input_qubits + hidden_qubits, input_qubits + hidden_qubits + output_qubits))
176
+ self.output_indices = list(
177
+ range(input_qubits + hidden_qubits, input_qubits + hidden_qubits + output_qubits)
178
+ )
183
179
  self.activation = NeuronActivationFn(activation)
180
+ self.dtype = torch.float if Qrack.fppow <= 5 else torch.double
184
181
  self.apply_fn = QrackNeuronTorchFunction.apply
185
- self.backward_fn = QrackNeuronTorchFunction._backward
182
+ self.post_init_fn = post_init_fn
186
183
 
187
184
  # Create neurons from all input combinations, projecting to coherent output qubits
188
- neurons = [
189
- QrackNeuronTorch(
190
- QrackNeuron(self.simulator, input_subset, output_id, activation)
191
- )
192
- for output_id in self.output_indices
193
- for k in range(lowest_combo_count, highest_combo_count + 1)
194
- for input_subset in itertools.combinations(self.input_indices + self.hidden_indices, k)
195
- ]
196
- self.neurons = nn.ModuleList(neurons) if _IS_TORCH_AVAILABLE else neurons
197
-
198
- # Set Qrack's internal parameters:
199
- if parameters:
200
- param_count = 0
201
- self.weights = nn.ParameterList() if _IS_TORCH_AVAILABLE else []
202
- for neuron_wrapper in self.neurons:
203
- neuron = neuron_wrapper.neuron
204
- p_count = 1 << len(neuron.controls)
205
- neuron.set_angles(parameters[param_count : (param_count + p_count)])
206
- self.weights.append(
207
- nn.Parameter(torch.tensor(parameters[param_count : (param_count + p_count)]))
208
- if _IS_TORCH_AVAILABLE else parameters[param_count : (param_count + p_count)]
209
- )
210
- param_count += p_count
211
- else:
212
- self.weights = nn.ParameterList() if _IS_TORCH_AVAILABLE else []
213
- for neuron_wrapper in self.neurons:
214
- neuron = neuron_wrapper.neuron
215
- p_count = 1 << len(neuron.controls)
216
- self.weights.append(nn.Parameter(torch.zeros(p_count)) if _IS_TORCH_AVAILABLE else ([0.0] * p_count))
185
+ neurons = []
186
+ param_count = 0
187
+ for output_id in self.output_indices:
188
+ for k in range(lowest_combo_count, highest_combo_count + 1):
189
+ for input_subset in itertools.combinations(self.input_indices, k):
190
+ p_count = 1 << len(input_subset)
191
+ angles = (
192
+ (
193
+ torch.tensor(
194
+ parameters[param_count : (param_count + p_count)], dtype=self.dtype
195
+ )
196
+ if parameters
197
+ else torch.zeros(p_count, dtype=self.dtype)
198
+ )
199
+ )
200
+ neurons.append(
201
+ QrackNeuronTorch(
202
+ QrackNeuron(self.simulator, input_subset, output_id, activation), angles
203
+ )
204
+ )
205
+ param_count += p_count
206
+ self.neurons = nn.ModuleList(neurons)
207
+
208
+ # Prepare the state before feed-forward:
209
+
210
+ # Prepare hidden predictors
211
+ for hidden_id in self.hidden_indices:
212
+ self.simulator.h(hidden_id)
213
+ # Prepare a maximally uncertain output state.
214
+ for output_id in self.output_indices:
215
+ self.simulator.h(output_id)
217
216
 
218
217
  def forward(self, x):
219
- return QrackNeuronTorchLayerFunction.apply(x, self)
218
+ B = x.shape[0]
219
+ x = x.view(B, -1)
220
220
 
221
+ self.simulators.clear()
221
222
 
222
- class QrackNeuronTorchLayerFunction(Function if _IS_TORCH_AVAILABLE else object):
223
- """Static forward/backward/apply functions for QrackNeuronTorch"""
223
+ # Group neurons by output target once
224
+ by_out = {out: [] for out in self.output_indices}
225
+ for neuron_wrapper in self.neurons:
226
+ by_out[neuron_wrapper.neuron.target].append(neuron_wrapper)
224
227
 
225
- @staticmethod
226
- def forward(ctx, x, neuron_layer):
227
- # Save for backward
228
- ctx.save_for_backward(x)
229
- ctx.neuron_layer = neuron_layer
230
-
231
- input_indices = neuron_layer.input_indices
232
- hidden_indices = neuron_layer.hidden_indices
233
- output_indices = neuron_layer.output_indices
234
- simulators = neuron_layer.simulators
235
- weights = neuron_layer.weights
236
-
237
- if _IS_TORCH_AVAILABLE:
238
- B = x.shape[0]
239
- x = x.view(B, -1)
240
- else:
241
- B = len(x)
242
-
243
- simulators.clear()
244
- if _IS_TORCH_AVAILABLE:
245
- for b in range(B):
246
- simulator = neuron_layer.simulator.clone()
247
- simulators.append(simulator)
248
- for q, input_id in enumerate(input_indices):
249
- simulator.r(Pauli.PauliY, math.pi * x[b, q].item(), q)
250
- else:
251
- for b in range(B):
252
- simulator = neuron_layer.simulator.clone()
253
- simulators.append(simulator)
254
- for q, input_id in enumerate(input_indices):
255
- simulator.r(Pauli.PauliY, math.pi * x[b][q], q)
256
-
257
- y = [([0.0] * len(output_indices)) for _ in range(B)]
228
+ batch_rows = []
258
229
  for b in range(B):
259
- simulator = simulators[b]
260
- # Prepare a maximally uncertain output state.
261
- for output_id in output_indices:
262
- simulator.h(output_id)
263
- # Prepare hidden predictors
264
- for h in hidden_indices:
265
- simulator.h(h)
230
+ simulator = self.simulator.clone()
231
+ self.simulators.append(simulator)
266
232
 
267
- # Set Qrack's internal parameters:
268
- for idx, neuron_wrapper in enumerate(neuron_layer.neurons):
269
- neuron_wrapper.neuron.simulator = simulator
270
- neuron_layer.apply_fn(weights[idx], neuron_wrapper)
233
+ # Apply feed-forward
234
+ for q, input_id in enumerate(self.input_indices):
235
+ simulator.r(Pauli.PauliY, math.pi * x[b, q].item(), input_id)
271
236
 
272
- for q, output_id in enumerate(output_indices):
273
- y[b][q] = simulator.prob(output_id)
237
+ # Differentiable post-initialization:
238
+ self.post_init_fn(simulator)
274
239
 
275
- if _IS_TORCH_AVAILABLE:
276
- y = torch.tensor(y, dtype=torch.float32, device=x.device)
240
+ row = []
241
+ for out in self.output_indices:
242
+ phi = torch.tensor(init_phi, device=x.device, dtype=x.dtype)
277
243
 
278
- return y
244
+ for neuron_wrapper in by_out[out]:
245
+ neuron_wrapper.neuron.set_simulator(simulator)
246
+ phi += self.apply_fn(
247
+ neuron_wrapper.weights,
248
+ neuron_wrapper.neuron
249
+ ).squeeze()
279
250
 
280
- @staticmethod
281
- def backward(ctx, grad_output):
282
- (x,) = ctx.saved_tensors
283
- neuron_layer = ctx.neuron_layer
284
-
285
- input_indices = neuron_layer.input_indices
286
- hidden_indices = neuron_layer.hidden_indices
287
- output_indices = neuron_layer.output_indices
288
- simulators = neuron_layer.simulators
289
- neurons = neuron_layer.neurons
290
- backward_fn = neuron_layer.backward_fn
291
-
292
- input_count = len(input_indices)
293
- output_count = len(output_indices)
294
-
295
- if _IS_TORCH_AVAILABLE:
296
- B = x.shape[0]
297
- x = x.view(B, -1)
298
- else:
299
- B = len(x)
300
-
301
- # Uncompute prediction
302
- if _IS_TORCH_AVAILABLE:
303
- delta = torch.zeros((B, output_count, input_count), dtype=torch.float32, device=x.device)
304
- for b in range(B):
305
- simulator = simulators[b]
306
- for neuron_wrapper in neurons:
307
- neuron = neuron_wrapper.neuron
308
- neuron.simulator = simulator
309
- angles = torch.tensor(neuron.get_angles(), dtype=torch.float32, device=x.device, requires_grad=True)
310
- o = output_indices.index(neuron.target)
311
- neuron_grad = backward_fn(angles, neuron_wrapper)
312
- for idx, c in enumerate(neuron.controls):
313
- if c not in input_indices:
314
- continue
315
- i = input_indices.index(c)
316
- delta[b, o, i] += neuron_grad[idx]
317
- else:
318
- delta = [[[0.0] * input_count for _ in range(output_count)] for _ in range(B)]
319
- for b in range(B):
320
- simulator = simulators[b]
321
- for neuron_wrapper in neurons:
322
- neuron = neuron_wrapper.neuron
323
- neuron.simulator = simulator
324
- angles = neuron.get_angles()
325
- o = output_indices.index(neuron.target)
326
- neuron_grad = backward_fn(angles, neuron_wrapper)
327
- for idx, c in enumerate(neuron.controls):
328
- if c not in input_indices:
329
- continue
330
- i = input_indices.index(c)
331
- delta[b][o][i] += neuron_grad[idx]
332
-
333
- # Uncompute output state prep
334
- for simulator in simulators:
335
- for output_id in output_indices:
336
- simulator.h(output_id)
337
- for h in hidden_indices:
338
- simulator.h(output_id)
339
-
340
- if _IS_TORCH_AVAILABLE:
341
- for b in range(B):
342
- simulator = simulators[b]
343
- for q, input_id in enumerate(input_indices):
344
- simulator.r(Pauli.PauliY, -math.pi * x[b, q].item(), q)
345
- else:
346
- for b in range(B):
347
- simulator = simulators[b]
348
- for q, input_id in enumerate(input_indices):
349
- simulator.r(Pauli.PauliY, -math.pi * x[b][q].item(), q)
350
-
351
- if _IS_TORCH_AVAILABLE:
352
- grad_input = torch.matmul(grad_output.view(B, 1, -1), delta).view_as(x)
353
- else:
354
- grad_input = [[0.0] * output_count for _ in range(B)]
355
- for b in range(B):
356
- for o in range(output_indices):
357
- for i in range(input_indices):
358
- grad_input[b][o] += grad_output[b][o] * delta[b][o][i]
359
-
360
- return grad_input, None
251
+ # Convert angle back to probability
252
+ p = torch.clamp(torch.sin(phi), min=0.0)
253
+ row.append(p)
254
+
255
+ batch_rows.append(torch.stack(row))
256
+
257
+ return torch.stack(batch_rows)