qadence 1.11.2__py3-none-any.whl → 1.11.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qadence/backend.py CHANGED
@@ -25,7 +25,14 @@ from qadence.measurements import Measurements
25
25
  from qadence.mitigations import Mitigations
26
26
  from qadence.noise import NoiseHandler
27
27
  from qadence.parameters import stringify
28
- from qadence.types import ArrayLike, BackendName, DiffMode, Endianness, Engine, ParamDictType
28
+ from qadence.types import (
29
+ ArrayLike,
30
+ BackendName,
31
+ DiffMode,
32
+ Endianness,
33
+ Engine,
34
+ ParamDictType,
35
+ )
29
36
 
30
37
  logger = getLogger(__name__)
31
38
 
@@ -215,7 +222,7 @@ class Backend(ABC):
215
222
  if observable is not None:
216
223
  observable = observable if isinstance(observable, list) else [observable]
217
224
  conv_obs = []
218
- obs_embedding_fn_list = []
225
+ obs_embedding_fns = []
219
226
 
220
227
  for obs in observable:
221
228
  obs = check_observable(obs)
@@ -224,13 +231,18 @@ class Backend(ABC):
224
231
  c_obs.abstract, self.config._use_gate_params, self.engine
225
232
  )
226
233
  params.update(obs_params)
227
- obs_embedding_fn_list.append(obs_embedding_fn)
234
+ obs_embedding_fns.append(obs_embedding_fn)
228
235
  conv_obs.append(c_obs)
229
236
 
230
237
  def embedding_fn_dict(a: dict, b: dict) -> dict:
231
- embedding_dict = circ_embedding_fn(a, b)
232
- for o in obs_embedding_fn_list:
233
- embedding_dict.update(o(a, b))
238
+ if "circuit" in b or "observables" in b:
239
+ embedding_dict = {"circuit": circ_embedding_fn(a, b), "observables": dict()}
240
+ for obs_embedding_fn in obs_embedding_fns:
241
+ embedding_dict["observables"].update(obs_embedding_fn(a, b))
242
+ else:
243
+ embedding_dict = circ_embedding_fn(a, b)
244
+ for obs_embedding_fn in obs_embedding_fns:
245
+ embedding_dict.update(obs_embedding_fn(a, b))
234
246
  return embedding_dict
235
247
 
236
248
  return Converted(conv_circ, conv_obs, embedding_fn_dict, params)
@@ -316,7 +328,11 @@ class Backend(ABC):
316
328
  raise NotImplementedError
317
329
 
318
330
  @abstractmethod
319
- def assign_parameters(self, circuit: ConvertedCircuit, param_values: dict[str, Tensor]) -> Any:
331
+ def assign_parameters(
332
+ self,
333
+ circuit: ConvertedCircuit,
334
+ param_values: dict[str, Tensor] | dict[str, dict[str, Tensor]],
335
+ ) -> Any:
320
336
  raise NotImplementedError
321
337
 
322
338
  @staticmethod
@@ -134,19 +134,32 @@ class Backend(BackendInterface):
134
134
  Returns:
135
135
  A jax.Array of shape (batch_size, n_observables)
136
136
  """
137
- observable = observable if isinstance(observable, list) else [observable]
138
- batch_size = max([arr.size for arr in param_values.values()])
137
+ observables = observable if isinstance(observable, list) else [observable]
138
+ if "observables" in param_values or "circuit" in param_values:
139
+ raise NotImplementedError("The Horqrux backend does not support separated parameters.")
140
+ else:
141
+ merged_params = param_values
142
+ batch_size = max([arr.size for arr in param_values.values()]) # type: ignore[union-attr]
139
143
  n_obs = len(observable)
140
144
 
141
145
  def _expectation(params: ParamDictType) -> ArrayLike:
146
+ param_circuits = params["circuit"] if "circuit" in params else params
147
+ param_observables = params["observables"] if "observables" in params else params
142
148
  out_state = self.run(
143
- circuit, params, state, endianness, horqify_state=True, unhorqify_state=False
149
+ circuit,
150
+ param_circuits,
151
+ state,
152
+ endianness,
153
+ horqify_state=True,
154
+ unhorqify_state=False,
155
+ )
156
+ return jnp.array(
157
+ [observable.native(out_state, param_observables) for observable in observables]
144
158
  )
145
- return jnp.array([o.native(out_state, params) for o in observable])
146
159
 
147
160
  if batch_size > 1: # We vmap for batch_size > 1
148
- expvals = jax.vmap(_expectation, in_axes=({k: 0 for k in param_values.keys()},))(
149
- uniform_batchsize(param_values)
161
+ expvals = jax.vmap(_expectation, in_axes=({k: 0 for k in merged_params.keys()},))(
162
+ uniform_batchsize(merged_params)
150
163
  )
151
164
  else:
152
165
  expvals = _expectation(param_values)
@@ -19,6 +19,7 @@ from qadence.blocks import (
19
19
  )
20
20
  from qadence.blocks.block_to_tensor import _gate_parameters
21
21
  from qadence.types import Endianness, ParamDictType
22
+ from qadence.utils import merge_separate_params
22
23
 
23
24
 
24
25
  def jarr_to_tensor(arr: Array, dtype: Any = cdouble) -> Tensor:
@@ -52,9 +53,11 @@ def horqify(state: Array) -> Array:
52
53
 
53
54
 
54
55
  def uniform_batchsize(param_values: ParamDictType) -> ParamDictType:
55
- max_batch_size = max(p.size for p in param_values.values())
56
+ if "observables" in param_values or "circuit" in param_values:
57
+ param_values = merge_separate_params(param_values)
58
+ max_batch_size = max(p.size for p in param_values.values()) # type: ignore[union-attr]
56
59
  batched_values = {
57
- k: (v if v.size == max_batch_size else v.repeat(max_batch_size))
60
+ k: (v if v.size == max_batch_size else v.repeat(max_batch_size)) # type: ignore[union-attr]
58
61
  for k, v in param_values.items()
59
62
  }
60
63
  return batched_values
@@ -28,7 +28,7 @@ from qadence.noise import NoiseHandler
28
28
  from qadence.overlap import overlap_exact
29
29
  from qadence.register import Register
30
30
  from qadence.transpile import transpile
31
- from qadence.types import BackendName, DeviceType, Endianness, Engine, NoiseProtocol
31
+ from qadence.types import BackendName, DeviceType, Endianness, Engine, NoiseProtocol, ParamDictType
32
32
 
33
33
  from .channels import GLOBAL_CHANNEL, LOCAL_CHANNEL
34
34
  from .cloud import get_client
@@ -183,7 +183,7 @@ class Backend(BackendInterface):
183
183
  def run(
184
184
  self,
185
185
  circuit: ConvertedCircuit,
186
- param_values: dict[str, Tensor] = {},
186
+ param_values: ParamDictType = {},
187
187
  state: Tensor | None = None,
188
188
  endianness: Endianness = Endianness.BIG,
189
189
  noise: NoiseHandler | None = None,
@@ -235,7 +235,7 @@ class Backend(BackendInterface):
235
235
  self,
236
236
  circuit: ConvertedCircuit,
237
237
  noise: NoiseHandler,
238
- param_values: dict[str, Tensor] = dict(),
238
+ param_values: ParamDictType = dict(),
239
239
  state: Tensor | None = None,
240
240
  endianness: Endianness = Endianness.BIG,
241
241
  ) -> Tensor:
@@ -284,7 +284,7 @@ class Backend(BackendInterface):
284
284
  def sample(
285
285
  self,
286
286
  circuit: ConvertedCircuit,
287
- param_values: dict[str, Tensor] = {},
287
+ param_values: ParamDictType = {},
288
288
  n_shots: int = 1,
289
289
  state: Tensor | None = None,
290
290
  noise: NoiseHandler | None = None,
@@ -322,7 +322,7 @@ class Backend(BackendInterface):
322
322
  self,
323
323
  circuit: ConvertedCircuit,
324
324
  observable: list[ConvertedObservable] | ConvertedObservable,
325
- param_values: dict[str, Tensor] = {},
325
+ param_values: ParamDictType = {},
326
326
  state: Tensor | None = None,
327
327
  measurement: Measurements | None = None,
328
328
  noise: NoiseHandler | None = None,
@@ -330,14 +330,19 @@ class Backend(BackendInterface):
330
330
  endianness: Endianness = Endianness.BIG,
331
331
  ) -> Tensor:
332
332
  observable = observable if isinstance(observable, list) else [observable]
333
+ param_circuit = param_values["circuit"] if "circuit" in param_values else param_values
334
+ param_observables = (
335
+ param_values["observables"] if "observables" in param_values else param_values
336
+ )
333
337
  if mitigation is None:
334
338
  if noise is None:
335
339
  state = self.run(
336
- circuit, param_values=param_values, state=state, endianness=endianness
340
+ circuit, param_values=param_circuit, state=state, endianness=endianness
337
341
  )
338
342
  support = sorted(list(circuit.abstract.register.support))
339
343
  res_list = [
340
- obs.native(state, param_values, qubit_support=support) for obs in observable
344
+ obs.native(state, param_observables, qubit_support=support)
345
+ for obs in observable
341
346
  ]
342
347
  res = torch.transpose(torch.stack(res_list), 0, 1)
343
348
  res = res if len(res.shape) > 0 else res.reshape(1)
@@ -345,7 +350,7 @@ class Backend(BackendInterface):
345
350
  elif noise is not None:
346
351
  dms = self.run(
347
352
  circuit=circuit,
348
- param_values=param_values,
353
+ param_values=param_circuit,
349
354
  state=state,
350
355
  endianness=endianness,
351
356
  noise=noise,
@@ -353,7 +358,9 @@ class Backend(BackendInterface):
353
358
  support = sorted(list(circuit.abstract.register.support))
354
359
  res_list = [
355
360
  [
356
- obs.native(dm.squeeze(), param_values, qubit_support=support, noise=noise)
361
+ obs.native(
362
+ dm.squeeze(), param_observables, qubit_support=support, noise=noise
363
+ )
357
364
  for dm in dms
358
365
  ]
359
366
  for obs in observable
@@ -31,7 +31,7 @@ from qadence.transpile import (
31
31
  set_noise,
32
32
  transpile,
33
33
  )
34
- from qadence.types import BackendName, Endianness, Engine
34
+ from qadence.types import BackendName, Endianness, Engine, ParamDictType
35
35
 
36
36
  from .config import Configuration, default_passes
37
37
  from .convert_ops import convert_block, convert_readout_noise
@@ -182,16 +182,21 @@ class Backend(BackendInterface):
182
182
  self,
183
183
  circuit: ConvertedCircuit,
184
184
  observable: list[ConvertedObservable] | ConvertedObservable,
185
- param_values: dict[str, Tensor] = {},
185
+ param_values: ParamDictType = {},
186
186
  state: Tensor | None = None,
187
187
  measurement: Measurements | None = None,
188
188
  noise: NoiseHandler | None = None,
189
189
  endianness: Endianness = Endianness.BIG,
190
190
  ) -> Tensor:
191
191
  set_block_and_readout_noises(circuit, noise, self.config)
192
+ param_circuit = param_values["circuit"] if "circuit" in param_values else param_values
193
+ param_observables = (
194
+ param_values["observables"] if "observables" in param_values else param_values
195
+ )
196
+
192
197
  state = self.run(
193
198
  circuit,
194
- param_values=param_values,
199
+ param_values=param_circuit,
195
200
  state=state,
196
201
  endianness=endianness,
197
202
  pyqify_state=True,
@@ -200,7 +205,7 @@ class Backend(BackendInterface):
200
205
  )
201
206
  observable = observable if isinstance(observable, list) else [observable]
202
207
  _expectation = torch.hstack(
203
- [obs.native.expectation(state, param_values).reshape(-1, 1) for obs in observable]
208
+ [obs.native.expectation(state, param_observables).reshape(-1, 1) for obs in observable]
204
209
  )
205
210
  return _expectation
206
211
 
@@ -208,7 +213,7 @@ class Backend(BackendInterface):
208
213
  self,
209
214
  circuit: ConvertedCircuit,
210
215
  observable: list[ConvertedObservable] | ConvertedObservable,
211
- param_values: dict[str, Tensor] = {},
216
+ param_values: ParamDictType = {},
212
217
  state: Tensor | None = None,
213
218
  measurement: Measurements | None = None,
214
219
  noise: NoiseHandler | None = None,
@@ -230,9 +235,18 @@ class Backend(BackendInterface):
230
235
 
231
236
  list_expvals = []
232
237
  observables = observable if isinstance(observable, list) else [observable]
233
- for vals in to_list_of_dicts(param_values):
234
- wf = self.run(circuit, vals, state, endianness, pyqify_state=True, unpyqify_state=False)
235
- exs = torch.cat([obs.native.expectation(wf, vals) for obs in observables], 0)
238
+ param_circuits = param_values["circuit"] if "circuit" in param_values else param_values
239
+ param_observables = (
240
+ param_values["observables"] if "observables" in param_values else param_values
241
+ )
242
+
243
+ for vals_circ, vals_obs in zip(
244
+ to_list_of_dicts(param_circuits), to_list_of_dicts(param_observables)
245
+ ):
246
+ wf = self.run(
247
+ circuit, vals_circ, state, endianness, pyqify_state=True, unpyqify_state=False
248
+ )
249
+ exs = torch.cat([obs.native.expectation(wf, vals_obs) for obs in observables], 0)
236
250
  list_expvals.append(exs)
237
251
 
238
252
  batch_expvals = torch.vstack(list_expvals)
@@ -242,7 +256,7 @@ class Backend(BackendInterface):
242
256
  self,
243
257
  circuit: ConvertedCircuit,
244
258
  observable: list[ConvertedObservable] | ConvertedObservable,
245
- param_values: dict[str, Tensor] = {},
259
+ param_values: ParamDictType = {},
246
260
  state: Tensor | None = None,
247
261
  measurement: Measurements | None = None,
248
262
  noise: NoiseHandler | None = None,
@@ -269,7 +283,7 @@ class Backend(BackendInterface):
269
283
  def sample(
270
284
  self,
271
285
  circuit: ConvertedCircuit,
272
- param_values: dict[str, Tensor] = {},
286
+ param_values: ParamDictType = {},
273
287
  n_shots: int = 1,
274
288
  state: Tensor | None = None,
275
289
  noise: NoiseHandler | None = None,
@@ -295,7 +309,7 @@ class Backend(BackendInterface):
295
309
  samples = apply_mitigation(noise=noise, mitigation=mitigation, samples=samples)
296
310
  return samples
297
311
 
298
- def assign_parameters(self, circuit: ConvertedCircuit, param_values: dict[str, Tensor]) -> Any:
312
+ def assign_parameters(self, circuit: ConvertedCircuit, param_values: ParamDictType) -> Any:
299
313
  raise NotImplementedError
300
314
 
301
315
  @staticmethod
@@ -16,7 +16,14 @@ from qadence.blocks.utils import (
16
16
  uuid_to_expression,
17
17
  )
18
18
  from qadence.parameters import evaluate, make_differentiable, stringify
19
- from qadence.types import ArrayLike, DifferentiableExpression, Engine, ParamDictType, TNumber
19
+ from qadence.types import (
20
+ ArrayLike,
21
+ DifferentiableExpression,
22
+ Engine,
23
+ ParamDictType,
24
+ TNumber,
25
+ )
26
+ from qadence.utils import merge_separate_params
20
27
 
21
28
 
22
29
  def _concretize_parameter(engine: Engine) -> Callable:
@@ -110,6 +117,8 @@ def embedding(
110
117
 
111
118
  def embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
112
119
  embedded_params: dict[sympy.Expr, ArrayLike] = {}
120
+ if "circuit" in inputs or "observables" in inputs:
121
+ inputs = merge_separate_params(inputs)
113
122
  for expr, fn in embeddings.items():
114
123
  angle: ArrayLike
115
124
  values = {}
qadence/blocks/utils.py CHANGED
@@ -7,6 +7,7 @@ from logging import getLogger
7
7
  from typing import Generator, List, Type, TypeVar, Union, get_args
8
8
 
9
9
  from sympy import Array, Basic, Expr
10
+ import torch
10
11
  from torch import Tensor
11
12
 
12
13
  from qadence.blocks import (
@@ -292,31 +293,39 @@ def uuid_to_eigen(
292
293
 
293
294
  result = {}
294
295
  for uuid, b in uuid_to_block(block).items():
295
- if b.eigenvalues_generator is not None:
296
- if b.eigenvalues_generator.numel() > 0:
297
- # GPSR assumes a factor 0.5 for differentiation
298
- # so need rescaling
299
- if isinstance(b, TimeEvolutionBlock) and rescale_eigenvals_timeevo:
300
- if b.eigenvalues_generator.numel() > 1:
301
- result[uuid] = (
302
- b.eigenvalues_generator * 2.0,
303
- 0.5,
304
- )
296
+ eigs_generator = None
297
+
298
+ # this is to handle the case for the N operator
299
+ try:
300
+ eigs_generator = b.eigenvalues_generator
301
+ except ValueError:
302
+ result[uuid] = (torch.zeros(2), 1.0)
303
+ else:
304
+ if eigs_generator is not None:
305
+ if eigs_generator.numel() > 0:
306
+ # GPSR assumes a factor 0.5 for differentiation
307
+ # so need rescaling
308
+ if isinstance(b, TimeEvolutionBlock) and rescale_eigenvals_timeevo:
309
+ if eigs_generator.numel() > 1:
310
+ result[uuid] = (
311
+ eigs_generator * 2.0,
312
+ 0.5,
313
+ )
314
+ else:
315
+ result[uuid] = (
316
+ eigs_generator * 2.0,
317
+ (
318
+ 1.0 / (eigs_generator.item() * 2.0)
319
+ if len(eigs_generator) == 1
320
+ else 1.0
321
+ ),
322
+ )
305
323
  else:
306
- result[uuid] = (
307
- b.eigenvalues_generator * 2.0,
308
- (
309
- 1.0 / (b.eigenvalues_generator.item() * 2.0)
310
- if len(b.eigenvalues_generator) == 1
311
- else 1.0
312
- ),
313
- )
314
- else:
315
- result[uuid] = (b.eigenvalues_generator, 1.0)
316
-
317
- # leave only angle parameter uuid with eigenvals for ConstantAnalogRotation block
318
- if isinstance(block, ConstantAnalogRotation):
319
- break
324
+ result[uuid] = (eigs_generator, 1.0)
325
+
326
+ # leave only angle parameter uuid with eigenvals for ConstantAnalogRotation block
327
+ if isinstance(block, ConstantAnalogRotation):
328
+ break
320
329
 
321
330
  return result
322
331
 
@@ -12,7 +12,13 @@ from qadence.circuit import QuantumCircuit
12
12
  from qadence.measurements import Measurements
13
13
  from qadence.mitigations import Mitigations
14
14
  from qadence.noise import NoiseHandler
15
- from qadence.types import ArrayLike, DiffMode, Endianness, Engine, ParamDictType
15
+ from qadence.types import (
16
+ ArrayLike,
17
+ DiffMode,
18
+ Endianness,
19
+ Engine,
20
+ ParamDictType,
21
+ )
16
22
 
17
23
 
18
24
  @dataclass(frozen=True, eq=True)
@@ -8,7 +8,13 @@ from qadence.engines.jax.differentiable_expectation import DifferentiableExpecta
8
8
  from qadence.measurements import Measurements
9
9
  from qadence.mitigations import Mitigations
10
10
  from qadence.noise import NoiseHandler
11
- from qadence.types import ArrayLike, DiffMode, Endianness, Engine, ParamDictType
11
+ from qadence.types import (
12
+ ArrayLike,
13
+ DiffMode,
14
+ Endianness,
15
+ Engine,
16
+ ParamDictType,
17
+ )
12
18
 
13
19
 
14
20
  class DifferentiableBackend(DifferentiableBackendInterface):
@@ -12,7 +12,13 @@ from qadence.backends.parameter_shift_rules import general_psr
12
12
  from qadence.measurements import Measurements
13
13
  from qadence.mitigations import Mitigations
14
14
  from qadence.noise import NoiseHandler
15
- from qadence.types import ArrayLike, DiffMode, Endianness, Engine, ParamDictType
15
+ from qadence.types import (
16
+ ArrayLike,
17
+ DiffMode,
18
+ Endianness,
19
+ Engine,
20
+ ParamDictType,
21
+ )
16
22
 
17
23
 
18
24
  class DifferentiableBackend(DifferentiableBackendInterface):
@@ -20,7 +20,7 @@ from qadence.measurements import Measurements
20
20
  from qadence.mitigations import Mitigations
21
21
  from qadence.ml_tools import promote_to_tensor
22
22
  from qadence.noise import NoiseHandler
23
- from qadence.types import Endianness
23
+ from qadence.types import Endianness, ParamDictType
24
24
 
25
25
 
26
26
  class PSRExpectation(Function):
@@ -94,7 +94,7 @@ class DifferentiableExpectation:
94
94
  backend: QuantumBackend
95
95
  circuit: ConvertedCircuit
96
96
  observable: list[ConvertedObservable] | ConvertedObservable
97
- param_values: dict[str, Tensor]
97
+ param_values: ParamDictType
98
98
  state: Tensor | None = None
99
99
  measurement: Measurements | None = None
100
100
  noise: NoiseHandler | None = None
@@ -135,8 +135,6 @@ class DifferentiableExpectation:
135
135
  self.observable = (
136
136
  self.observable if isinstance(self.observable, list) else [self.observable]
137
137
  )
138
- if len(self.observable) > 1:
139
- raise NotImplementedError("AdjointExpectation currently only supports one observable.")
140
138
 
141
139
  n_qubits = self.circuit.abstract.n_qubits
142
140
  values_batch_size = infer_batchsize(self.param_values)
@@ -150,18 +148,21 @@ class DifferentiableExpectation:
150
148
  else self.state
151
149
  )
152
150
  batch_size = max(values_batch_size, self.state.size(-1))
153
- return (
154
- AdjointExpectation.apply(
151
+
152
+ def expectation_fn(i: int) -> Tensor:
153
+ return AdjointExpectation.apply(
155
154
  self.circuit.native,
156
155
  self.state,
157
- self.observable[0].native, # Currently, adjoint only supports a single observable.
156
+ self.observable[i].native, # Currently, adjoint only supports a single observable.
158
157
  None,
159
158
  self.param_values.keys(),
160
159
  *self.param_values.values(),
161
- )
162
- .unsqueeze(1)
163
- .reshape(batch_size, 1)
164
- ) # we expect (batch_size, n_observables) shape
160
+ ).reshape(
161
+ batch_size, 1
162
+ ) # we expect (batch_size, n_observables) shape
163
+
164
+ expectation_list = [expectation_fn(i) for i in range(len(self.observable))]
165
+ return torch.vstack(expectation_list)
165
166
 
166
167
  def psr(self, psr_fn: Callable, **psr_args: int | float | None) -> Tensor:
167
168
  # wrapper which unpacks the parameters
@@ -10,6 +10,7 @@ from .optimize_step import optimize_step as default_optimize_step
10
10
  from .parameters import get_parameters, num_parameters, set_parameters
11
11
  from .tensors import numpy_to_tensor, promote_to, promote_to_tensor
12
12
  from .trainer import Trainer
13
+ from .qcnn_model import QCNN
13
14
 
14
15
  # Modules to be automatically added to the qadence namespace
15
16
  __all__ = [
@@ -25,4 +26,5 @@ __all__ = [
25
26
  "OptimizeResult",
26
27
  "Trainer",
27
28
  "write_checkpoint",
29
+ "QCNN",
28
30
  ]
qadence/model.py CHANGED
@@ -27,7 +27,12 @@ from qadence.mitigations import Mitigations
27
27
  from qadence.noise import NoiseHandler
28
28
  from qadence.parameters import Parameter
29
29
  from qadence.types import DiffMode, Endianness
30
- from qadence.utils import block_to_mathematical_expression
30
+ from qadence.utils import (
31
+ block_to_mathematical_expression,
32
+ check_param_dict_values,
33
+ merge_separate_params,
34
+ )
35
+ from qadence.transpile import set_as_variational, set_as_fixed
31
36
 
32
37
  logger = getLogger(__name__)
33
38
 
@@ -142,12 +147,20 @@ class QuantumModel(nn.Module):
142
147
  self._measurement = measurement
143
148
  self._noise = noise
144
149
  self._mitigation = mitigation
145
- self._params = nn.ParameterDict(
146
- {
147
- str(key): nn.Parameter(val, requires_grad=val.requires_grad)
148
- for key, val in conv.params.items()
149
- }
150
- )
150
+ if check_param_dict_values(conv.params):
151
+ self._params = nn.ParameterDict(
152
+ {
153
+ str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr]
154
+ for key, val in conv.params.items()
155
+ }
156
+ )
157
+ else:
158
+ self._params = nn.ParameterDict(
159
+ {
160
+ str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr]
161
+ for key, val in merge_separate_params(conv.params).items()
162
+ }
163
+ )
151
164
 
152
165
  @property
153
166
  def vparams(self) -> OrderedDict:
@@ -194,6 +207,80 @@ class QuantumModel(nn.Module):
194
207
  current_config = self.backend.backend.config
195
208
  BackendConfiguration.change_config(current_config, new_config)
196
209
 
210
+ def set_as_variational(self, params: list[str] = list()) -> None:
211
+ """Set as variational the list of names in `params`.
212
+
213
+ Args:
214
+ params (list[str], optional): List of parameters to fix. Defaults to list().
215
+ """
216
+ circuit: QuantumCircuit = self._circuit.original
217
+ if self._observable is not None:
218
+ if isinstance(self._observable, list):
219
+ for obs in self._observable:
220
+ set_as_variational(obs.original, params)
221
+ observable = [obs.original for obs in self._observable]
222
+ else:
223
+ set_as_variational(self._observable.original, params)
224
+ observable = [self._observable.original]
225
+ else:
226
+ observable = self._observable # type: ignore[assignment]
227
+ set_as_variational(circuit.block, params)
228
+ conv = self.backend.convert(circuit, observable)
229
+ self.embedding_fn = conv.embedding_fn
230
+ self._circuit = conv.circuit
231
+ self._observable = conv.observable
232
+ if check_param_dict_values(conv.params):
233
+ self._params = nn.ParameterDict(
234
+ {
235
+ str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr]
236
+ for key, val in conv.params.items()
237
+ }
238
+ )
239
+ else:
240
+ self._params = nn.ParameterDict(
241
+ {
242
+ str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr]
243
+ for key, val in merge_separate_params(conv.params).items()
244
+ }
245
+ )
246
+
247
+ def set_as_fixed(self, params: list[str] = list()) -> None:
248
+ """Set as fixed the list of names in `params`.
249
+
250
+ Args:
251
+ params (list[str], optional): List of parameters to fix. Defaults to list().
252
+ """
253
+ circuit: QuantumCircuit = self._circuit.original
254
+ if self._observable is not None:
255
+ if isinstance(self._observable, list):
256
+ for obs in self._observable:
257
+ set_as_fixed(obs.original, params)
258
+ observable = [obs.original for obs in self._observable]
259
+ else:
260
+ set_as_fixed(self._observable.original, params)
261
+ observable = [self._observable.original]
262
+ else:
263
+ observable = self._observable # type: ignore[assignment]
264
+ set_as_fixed(circuit.block, params)
265
+ conv = self.backend.convert(circuit, observable)
266
+ self.embedding_fn = conv.embedding_fn
267
+ self._circuit = conv.circuit
268
+ self._observable = conv.observable
269
+ if check_param_dict_values(conv.params):
270
+ self._params = nn.ParameterDict(
271
+ {
272
+ str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr]
273
+ for key, val in conv.params.items()
274
+ }
275
+ )
276
+ else:
277
+ self._params = nn.ParameterDict(
278
+ {
279
+ str(key): nn.Parameter(val, requires_grad=val.requires_grad) # type: ignore[union-attr]
280
+ for key, val in merge_separate_params(conv.params).items()
281
+ }
282
+ )
283
+
197
284
  def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:
198
285
  """Get backend-converted circuit.
199
286
 
@@ -5,7 +5,8 @@ from .block import (
5
5
  chain_single_qubit_ops,
6
6
  repeat,
7
7
  scale_primitive_blocks_only,
8
- set_trainable,
8
+ set_as_variational,
9
+ set_as_fixed,
9
10
  validate,
10
11
  )
11
12
  from .circuit import fill_identities
@@ -15,4 +16,4 @@ from .invert import invert_endianness, reassign
15
16
  from .noise import set_noise
16
17
  from .transpile import blockfn_to_circfn, transpile
17
18
 
18
- __all__ = ["set_trainable", "invert_endianness", "set_noise"]
19
+ __all__ = ["set_as_variational", "set_as_fixed", "invert_endianness", "set_noise"]
@@ -1,8 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from copy import deepcopy
4
- from functools import singledispatch
5
4
  from logging import getLogger
5
+ from functools import singledispatch
6
6
  from typing import Callable, Iterable, Type
7
7
 
8
8
  import sympy
@@ -42,13 +42,17 @@ def repeat(
42
42
 
43
43
 
44
44
  def set_trainable(
45
- blocks: AbstractBlock | list[AbstractBlock], value: bool = True, inplace: bool = True
45
+ blocks: AbstractBlock | list[AbstractBlock],
46
+ restricted_names: list[str] = list(),
47
+ value: bool = True,
48
+ inplace: bool = True,
46
49
  ) -> AbstractBlock | list[AbstractBlock]:
47
50
  """Set the trainability of all parameters in a block to a given value.
48
51
 
49
52
  Args:
50
53
  blocks (AbstractBlock | list[AbstractBlock]): Block or list of blocks for which
51
54
  to set the trainable attribute
55
+ restricted_names (list[str]): Restricted list of parameters names to set the value.
52
56
  value (bool, optional): The value of the trainable attribute to assign to the input blocks
53
57
  inplace (bool, optional): Whether to modify the block(s) in place or not. Currently, only
54
58
 
@@ -66,16 +70,65 @@ def set_trainable(
66
70
 
67
71
  if inplace:
68
72
  for block in blocks:
69
- params: list[sympy.Basic] = parameters(block)
73
+ params: list[sympy.Basic] = list(filter(lambda p: not p.is_number, parameters(block)))
74
+ if bool(restricted_names):
75
+ params = list(filter(lambda p: p.name in restricted_names, params))
70
76
  for p in params:
71
- if not p.is_number:
72
- p.trainable = value
77
+ p.trainable = value
73
78
  else:
74
79
  raise NotImplementedError("Not inplace set_trainable is not yet available")
75
80
 
76
81
  return blocks if len(blocks) > 1 else blocks[0]
77
82
 
78
83
 
84
+ def set_as_variational(
85
+ blocks: AbstractBlock | list[AbstractBlock],
86
+ restricted_names: list[str] = list(),
87
+ inplace: bool = True,
88
+ ) -> AbstractBlock | list[AbstractBlock]:
89
+ """Set parameters in blocks as variational (trainable parameters).
90
+
91
+ Args:
92
+ blocks (AbstractBlock | list[AbstractBlock]): Block or list of blocks for which
93
+ to set the trainable attribute
94
+ restricted_names (list[str]): Restricted list of parameters names to set the value.
95
+ inplace (bool, optional): Whether to modify the block(s) in place or not. Currently, only
96
+
97
+ Raises:
98
+ NotImplementedError: if the `inplace` argument is set to False, the function will
99
+ raise this exception
100
+
101
+ Returns:
102
+ AbstractBlock | list[AbstractBlock]: the input block or list of blocks with the trainable
103
+ attribute set to True
104
+ """
105
+ return set_trainable(blocks, restricted_names=restricted_names, inplace=inplace)
106
+
107
+
108
+ def set_as_fixed(
109
+ blocks: AbstractBlock | list[AbstractBlock],
110
+ restricted_names: list[str] = list(),
111
+ inplace: bool = True,
112
+ ) -> AbstractBlock | list[AbstractBlock]:
113
+ """Set parameters in blocks as fixed (non-trainable parameters).
114
+
115
+ Args:
116
+ blocks (AbstractBlock | list[AbstractBlock]): Block or list of blocks for which
117
+ to set the trainable attribute
118
+ restricted_names (list[str]): Restricted list of parameters names to set the value.
119
+ inplace (bool, optional): Whether to modify the block(s) in place or not. Currently, only
120
+
121
+ Raises:
122
+ NotImplementedError: if the `inplace` argument is set to False, the function will
123
+ raise this exception
124
+
125
+ Returns:
126
+ AbstractBlock | list[AbstractBlock]: the input block or list of blocks with the trainable
127
+ attribute set to False
128
+ """
129
+ return set_trainable(blocks, restricted_names=restricted_names, value=False, inplace=inplace)
130
+
131
+
79
132
  def validate(block: AbstractBlock) -> AbstractBlock:
80
133
  """Moves a block from global to local qubit numbers by adding PutBlocks.
81
134
 
qadence/types.py CHANGED
@@ -430,7 +430,7 @@ class ReadOutOptimization(StrEnum):
430
430
  CONSTRAINED = "constrained"
431
431
 
432
432
 
433
- ParamDictType = dict[str, ArrayLike]
433
+ ParamDictType = dict[str, Union[ArrayLike, dict[str, ArrayLike]]]
434
434
  DifferentiableExpression = Callable[..., ArrayLike]
435
435
 
436
436
 
qadence/utils.py CHANGED
@@ -17,7 +17,7 @@ from torch.linalg import eigvals
17
17
  from rich.tree import Tree
18
18
 
19
19
  from qadence.blocks import AbstractBlock
20
- from qadence.types import Endianness, ResultType, TNumber
20
+ from qadence.types import Endianness, ResultType, TNumber, ParamDictType
21
21
 
22
22
  if TYPE_CHECKING:
23
23
  from qadence.operations import Projector
@@ -30,6 +30,28 @@ __all__ = [] # type: ignore
30
30
  logger = getLogger(__name__)
31
31
 
32
32
 
33
+ def merge_separate_params(param_dict: ParamDictType) -> ParamDictType:
34
+ """Merge circuit and observables parameters."""
35
+ merged_dict: ParamDictType = dict()
36
+ for p in param_dict.values():
37
+ merged_dict |= p
38
+ return merged_dict
39
+
40
+
41
+ def check_param_dict_values(param_dict: ParamDictType) -> bool:
42
+ """Check if `param_dict` contains array values.
43
+
44
+ Args:
45
+ param_dict (ParamDictType): Dictionary of parameters.
46
+
47
+ Returns:
48
+ bool: True if values are arrays, False if values are dict.
49
+ """
50
+ if all(isinstance(p, dict) for p in param_dict.values()):
51
+ return False
52
+ return True
53
+
54
+
33
55
  def basis_to_int(basis: str, endianness: Endianness = Endianness.BIG) -> int:
34
56
  """
35
57
  Converts a computational basis state to an int.
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: qadence
3
- Version: 1.11.2
3
+ Version: 1.11.3
4
4
  Summary: Pasqal interface for circuit-based quantum computing SDKs
5
- Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>, Manu Lahariya <manu.lahariya@pasqal.com>
5
+ Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>, Manu Lahariya <manu.lahariya@pasqal.com>, Sungwoo Ahn <sungwoo.ahn@pasqal.com>
6
6
  License: PASQAL OPEN-SOURCE SOFTWARE LICENSE (MIT-derived)
7
7
  License-File: LICENSE
8
8
  Classifier: License :: Other/Proprietary License
@@ -24,7 +24,7 @@ Requires-Dist: nevergrad
24
24
  Requires-Dist: numpy
25
25
  Requires-Dist: openfermion
26
26
  Requires-Dist: pasqal-cloud
27
- Requires-Dist: pyqtorch==1.7.5
27
+ Requires-Dist: pyqtorch==1.7.6
28
28
  Requires-Dist: pyyaml
29
29
  Requires-Dist: rich
30
30
  Requires-Dist: scipy
@@ -44,15 +44,25 @@ Requires-Dist: nvidia-pyindex; extra == 'dlprof'
44
44
  Provides-Extra: horqrux
45
45
  Requires-Dist: einops; extra == 'horqrux'
46
46
  Requires-Dist: flax; extra == 'horqrux'
47
- Requires-Dist: horqrux==0.8.0; extra == 'horqrux'
47
+ Requires-Dist: horqrux==0.8.1; extra == 'horqrux'
48
48
  Requires-Dist: jax; extra == 'horqrux'
49
49
  Requires-Dist: jaxopt; extra == 'horqrux'
50
50
  Requires-Dist: optax; extra == 'horqrux'
51
51
  Requires-Dist: sympy2jax; extra == 'horqrux'
52
+ Provides-Extra: hub
53
+ Requires-Dist: qadence-measurement; extra == 'hub'
54
+ Requires-Dist: qadence-mitigation; extra == 'hub'
55
+ Requires-Dist: qadence-model; extra == 'hub'
52
56
  Provides-Extra: libs
53
57
  Requires-Dist: qadence-libs; extra == 'libs'
58
+ Provides-Extra: measurement
59
+ Requires-Dist: qadence-measurement; extra == 'measurement'
60
+ Provides-Extra: mitigation
61
+ Requires-Dist: qadence-mitigation; extra == 'mitigation'
54
62
  Provides-Extra: mlflow
55
63
  Requires-Dist: mlflow; extra == 'mlflow'
64
+ Provides-Extra: model
65
+ Requires-Dist: qadence-model; extra == 'model'
56
66
  Provides-Extra: protocols
57
67
  Requires-Dist: qadence-protocols; extra == 'protocols'
58
68
  Provides-Extra: pulser
@@ -1,5 +1,5 @@
1
1
  qadence/__init__.py,sha256=c3y-Tbq_P4cbWKi7b45Z6zUFlnqgd8Q68OOpL2O2GOI,2671
2
- qadence/backend.py,sha256=r9Ymrou3IoxJ4yWIJ-Rc2MyNq56NNwZg5rpmLM_pJaE,13721
2
+ qadence/backend.py,sha256=dcgGHncwK5Ul7COVccUyxGOKqbTuoK-KVLZ1qL7Abx8,14166
3
3
  qadence/circuit.py,sha256=r8QvzLWHvavqLOlNstq8aHg6UpKmPClEDoZVBkk-tzY,6970
4
4
  qadence/decompose.py,sha256=C4LYia_GcC9Rx3QO0ZLWTI9dN63a8WTEAXO0ZVQWuiE,5221
5
5
  qadence/divergences.py,sha256=JhpELhWSnuDvQxa9hJp_DE3EQg2Ban-Ta0mHZ_fVrHg,1832
@@ -8,7 +8,7 @@ qadence/extensions.py,sha256=TqkUGhc7vVUQeGsfuPruQ_Q3Vp7QQVHsHC_dayZ5EXU,5754
8
8
  qadence/libs.py,sha256=HetkKO8TCTlVCViQdVQJvxwBekrhd-y_iMox4UJMY1M,410
9
9
  qadence/log_config.yaml,sha256=QiwoB1bnRdk9NpxnvfgWX2PjN7EDfYlrc3GVD38rtdI,739
10
10
  qadence/logger.py,sha256=Hb76pK3VyQjVjJb4_NqFlOJgjYJVa8t7DHJFlzOM86M,407
11
- qadence/model.py,sha256=LbWXVoysyrFGz-xjMwzm5RGKBGVysrwL-PUEy0UAhHo,23346
11
+ qadence/model.py,sha256=dVabrxs4lJjgx5FXXcq9AmEr81zPrctCmLuWHxXptVc,27128
12
12
  qadence/overlap.py,sha256=ekaUnIcQWdF4hSFuUWpRjaxo43JyDGFOayP7vMXCZpw,16821
13
13
  qadence/parameters.py,sha256=g7OET8_HLjo1zKlX6WNCeSPtgzC3WMl6hVPAVZ9uiHU,12744
14
14
  qadence/pasqal_cloud_connection.py,sha256=1lz_AJRo54-YMnsYGJD_SFpUK-0_ZzJRWr9Qqa_pBZU,9675
@@ -19,8 +19,8 @@ qadence/register.py,sha256=MlI1-L1P_e7ugjelhH-1YdxrfPsgmLmX5m-dueawuWQ,13172
19
19
  qadence/serial_expr_grammar.peg,sha256=z5ytL7do9kO8o4h-V5GrsDuLdso0KsRcMuIYURFfmAY,328
20
20
  qadence/serialization.py,sha256=IB0OgYhtV3F9AmMMMbGcfgNil9vBzs92j5G3yj4KPhg,15616
21
21
  qadence/states.py,sha256=Aj28aNHGWkZrFw_mKpHrxCA1bDXlkFhw18D70tg0RF0,15953
22
- qadence/types.py,sha256=RfsUA7jksqsK2gRNOj7QSsAJc0N1rAeI2k-6MWbuPC4,11898
23
- qadence/utils.py,sha256=J3NsYYty7Oys6SrrpB4UkxunjE_h6-vWI0e2I1TpDAw,11735
22
+ qadence/types.py,sha256=HtOKf6xi-kTtncqctRWK0Wpxut7KEXHdqoQVqfx0vxo,11927
23
+ qadence/utils.py,sha256=fChJDz7OelWNGLPjoBBcmleWGluWhR36Mf0LnqCx8FA,12376
24
24
  qadence/analog/__init__.py,sha256=BCyS9R4KUjzUXN0Ax3b0eMo8ZAuSkGoJQVtZ4_pvAFs,279
25
25
  qadence/analog/addressing.py,sha256=GSt4heEmRkBmoQIgdgkTclEFxZY-jjuAd77_SsZtGdI,6513
26
26
  qadence/analog/constants.py,sha256=B2phQoN1ASL8CwM-Dsa1rbraYwGwwPSeiB3HbVe-MPA,1243
@@ -30,15 +30,15 @@ qadence/analog/parse_analog.py,sha256=9Y_LMdw4wCHH6YSkvHhs6PUNwzT14HS7cUGheNSmDQ
30
30
  qadence/backends/__init__.py,sha256=ibm7wmZxuIoMYAQxgAx0MsfLYWOVHNWgLwyS1HjMuuI,215
31
31
  qadence/backends/agpsr_utils.py,sha256=CfjMG3S4ws5M3PImbjhg8KEAzRPKuh7h5YR8WpHgzas,2965
32
32
  qadence/backends/api.py,sha256=54l6a0sUSkSN1AEjZPaMXGwg0JNjl2i0GWAJ828QFII,3095
33
- qadence/backends/jax_utils.py,sha256=VfKhqCKknHDWZO21UFipWH_Lkiq175Z5GkP49gWjbyw,5038
33
+ qadence/backends/jax_utils.py,sha256=n-m4_q795Uw01ra-sNsqarF03d47TxHSyMMnu3K3WBg,5268
34
34
  qadence/backends/parameter_shift_rules.py,sha256=CV_pU09HbipKrWGpFBRa5DJ-t2RRm0tstAJdEee-TzM,6654
35
35
  qadence/backends/utils.py,sha256=SSiMxZjaFS8e8sB6ZBLXPKuJNQGl93pRMy9hnI4oDrw,9104
36
36
  qadence/backends/horqrux/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
37
- qadence/backends/horqrux/backend.py,sha256=Dmtj9RUA7CnzoYCQS8C049W_XVH8hUD1z3b2BBgf7Ik,8904
37
+ qadence/backends/horqrux/backend.py,sha256=4M_23e0oGbcC_PUHpig8V1rNboI8xKATtp92guOTqPQ,9489
38
38
  qadence/backends/horqrux/config.py,sha256=m8rroGPBwvzuHoCDPB4VlNQaKBPp7Rbgrb8KHINpTEo,1415
39
39
  qadence/backends/horqrux/convert_ops.py,sha256=3vjzbMGEFH4CUzIDFdvmPbyT5_gecHLMoLxQ_ATFhSw,6327
40
40
  qadence/backends/pulser/__init__.py,sha256=capQ-eHqwtOeLf4mWsI0BIseAHhiLGie5cFD4-iVhUo,116
41
- qadence/backends/pulser/backend.py,sha256=cI4IgijPpItNdDmLpKkJFas0X02wMiZd_XmVas41gEI,14846
41
+ qadence/backends/pulser/backend.py,sha256=xHIv4CVm2VxLko6yYioRyXLs_PiQtZZXZsTDhf7qMgI,15157
42
42
  qadence/backends/pulser/channels.py,sha256=ZF0yEXUFHAmi3IdeXjzdTNGR5NzaRRFTiUpUGVg2sO4,329
43
43
  qadence/backends/pulser/cloud.py,sha256=0uUluvbFV9sOuCPraE-9uiVtC3Q8QaDY1IJMDi8grDM,2057
44
44
  qadence/backends/pulser/config.py,sha256=crIv1IJDk7uMZiQa994sDPqKKLHDH0RC17JYv_6N1cY,2708
@@ -47,7 +47,7 @@ qadence/backends/pulser/devices.py,sha256=DermLZNfmCB3SqteKVW4uhg4jp6ya1G6ptnXbB
47
47
  qadence/backends/pulser/pulses.py,sha256=F4fExIRAhLPMtVg1bhNtDihUYHxu5RExGjovk8-CQIo,11884
48
48
  qadence/backends/pulser/waveforms.py,sha256=0uz95b7rUaUUtN0tuHBZmJ0H6UBmfHST_59ozwsRCzg,2227
49
49
  qadence/backends/pyqtorch/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
50
- qadence/backends/pyqtorch/backend.py,sha256=Sjuof9b332w4gk9o8Rso2rgSHxskexfkIazRfxRD0Ng,11458
50
+ qadence/backends/pyqtorch/backend.py,sha256=JXfZiFu-9PYZbO_C7rZ7Bg8vbzNwpUujZQ3KHKaeCuk,12034
51
51
  qadence/backends/pyqtorch/config.py,sha256=v3IUzyGSsXPBE5WeAZo7PegrIoEeuztCyYm8u3IwNho,2920
52
52
  qadence/backends/pyqtorch/convert_ops.py,sha256=qG26-HmtUDaZO0KDnw2sbT3CRx_poS7eqJ3dn9wpWgc,13457
53
53
  qadence/blocks/__init__.py,sha256=H6jEA_CptkE-eoB4UfSbUiDszbxxhZwECV_TgoZWXoU,960
@@ -55,11 +55,11 @@ qadence/blocks/abstract.py,sha256=DSQUE71rMyRBwAP--4Tx1WQC_LCXaNlftjd7goGyrpQ,12
55
55
  qadence/blocks/analog.py,sha256=ymnnlSVoW1XL05ZvnnHCqRTHuOXIEY_7E9M0PNKJZy4,10812
56
56
  qadence/blocks/block_to_tensor.py,sha256=DceLKuyjhG87QGsl34lk1CXHtris42Sc8A7bavGrRA0,17207
57
57
  qadence/blocks/composite.py,sha256=f9D8L3u5Ktu_-xDBWsWiPlY8I-YW5YFgU18BtqwFHK0,8937
58
- qadence/blocks/embedding.py,sha256=MI-gTPEe1e56AiHJr6MJwMAHdA7ZYmTo0b0VmFfyISQ,7029
58
+ qadence/blocks/embedding.py,sha256=2odczunlLSl_8jx94SsHnt_WHpo09Zxt96BGwzoeoL8,7212
59
59
  qadence/blocks/manipulate.py,sha256=kPmzej7mnWFoqTJA2CkGulT7hcPha0GGPARC8rjZltg,2387
60
60
  qadence/blocks/matrix.py,sha256=JgzFLWoWDytaE0MEYe-Di7tbwb4jSmMF8tsOF04RIRo,4214
61
61
  qadence/blocks/primitive.py,sha256=21D7YIWw8N_uWcZwM9-DtyqQJ-8Ng1tZIJL4zaSK6uQ,17644
62
- qadence/blocks/utils.py,sha256=_V43qD7kQNK8JS3gxfpkRn56ZIF_GGrhAnARn1hq2hk,17772
62
+ qadence/blocks/utils.py,sha256=zdpaIh-OYFnLIWdnL-ogY8RONt5Yk-nUqSArXnhKy5E,18052
63
63
  qadence/constructors/__init__.py,sha256=LiFkGzgMa27LrY1LhINfmj3UWfrjoUk9wwRM9a54rK0,1211
64
64
  qadence/constructors/ala.py,sha256=76rdD_vMki5F_r_Iq-68zU3NHrJiebVmr-e7L4lIDAo,8359
65
65
  qadence/constructors/feature_maps.py,sha256=BaAxFi6fSKwjsfFjqZ8T7lyZfjotcgH2OW3b0j67YVk,8644
@@ -83,13 +83,13 @@ qadence/draw/assets/dark/measurement.svg,sha256=6ALGjaCX3xZ1NqB6RW6yzOchzZV-j8uk
83
83
  qadence/draw/assets/light/measurement.png,sha256=q6AMI7Lmb1O42sEzvSiK9ZtyTpXOGj-zIAh4FiPbhsk,278
84
84
  qadence/draw/assets/light/measurement.svg,sha256=kL424wtIEU7ihxecZnthyYnvHhNW_F75daoO9pBHNiw,7282
85
85
  qadence/engines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
86
- qadence/engines/differentiable_backend.py,sha256=o9fuXapvoWB4Rr0QsQP3YRFe1Wdq-Fhww0NUvWR7oqA,5896
86
+ qadence/engines/differentiable_backend.py,sha256=VLi2QiBdGU0MXDr39tOfL26p8gcVT6JYB4hNGT_QEI0,5921
87
87
  qadence/engines/jax/__init__.py,sha256=qcW09lcnblwRVQM6mDmW9su5kDVPe--buWCCuvPbOVM,223
88
- qadence/engines/jax/differentiable_backend.py,sha256=FcSrzzjzb0zfXC0-4mUJ6UB-wGO5RnNjQOGPAsM25zs,3076
88
+ qadence/engines/jax/differentiable_backend.py,sha256=EnuhC1kW0Lkf1voZcIQjgO9PuBa3T3Td0kwgAKhnQwU,3101
89
89
  qadence/engines/jax/differentiable_expectation.py,sha256=rn_l7IH-S4IvuAcyAIgyEuMZOIqswu5Nsfz0JffXjaE,3694
90
90
  qadence/engines/torch/__init__.py,sha256=iZFdD32ot0B0CVyC-f5dVViOBnqoalxa6M9Lj4WQuPE,160
91
- qadence/engines/torch/differentiable_backend.py,sha256=bab2Lk-P2nCtj66a0foaajSwxKmfzwIrqa3nwF2RRyQ,3433
92
- qadence/engines/torch/differentiable_expectation.py,sha256=kc4WTos7d65DDmao6YSrpTM0rCBnpqhGK4xLHm_K4yk,10351
91
+ qadence/engines/torch/differentiable_backend.py,sha256=ySWpMXLtt4HhuqTu58098p7qGKbUrEniBDf1jZppZBQ,3458
92
+ qadence/engines/torch/differentiable_expectation.py,sha256=zHJbgl1elBU6QN_NBQ348lYsxvKtGdnVs6sEbkCsX98,10374
93
93
  qadence/exceptions/__init__.py,sha256=BU6vWrI9mshzr1aTPm1Ticr_o_42GjTrWI4OZXhThsI,203
94
94
  qadence/exceptions/exceptions.py,sha256=4j_VJpx2sZ2Mir5BJUWu4nwb131FY1ygO4q8-XlyfRc,190
95
95
  qadence/measurements/__init__.py,sha256=RIjG9tVJMqhNzyj7maZI250Um0KgHl2PizDcKJag-JU,161
@@ -102,7 +102,7 @@ qadence/mitigations/__init__.py,sha256=RzaxYJftePFMloGhBVSixZ8fSe-ps_Jc-EyPm6xz-
102
102
  qadence/mitigations/analog_zne.py,sha256=KGtdq3TmqipIVP-0U3mIkF_xf91dWbB3SYkM_26plaY,7895
103
103
  qadence/mitigations/protocols.py,sha256=0TeHvlGTN8_88XNEwrjA97C5BUlrh34wYmx0w6-5Tyw,1622
104
104
  qadence/mitigations/readout.py,sha256=nI-voV5N0R7630Cn8t8x9EdV9iB76P0LDkRosy1s0Ec,6631
105
- qadence/ml_tools/__init__.py,sha256=Z744FRFAzU5iw-4JC5YC43mDQD6rTa5ApdkWbUFuTbQ,970
105
+ qadence/ml_tools/__init__.py,sha256=AG0WaQTvP_jXVx4xmiFWroYZ53ydFzrmTXcXC6nV7Wg,1011
106
106
  qadence/ml_tools/config.py,sha256=0qN0FVXRwYNMLGKgvk70cHTnEbl5_p83dU85sIyXls4,22728
107
107
  qadence/ml_tools/constructors.py,sha256=rfJ4VqUbhwnCMtOm3oQ94N_DmGCLtljPlhsCaDGQ2FY,36227
108
108
  qadence/ml_tools/data.py,sha256=-HUQk3-0UHOBA4KMFQ63tlt3si4W66psbgR1DAjtoVE,5481
@@ -137,16 +137,16 @@ qadence/operations/control_ops.py,sha256=fPSwOxJaVtJNbwri1UdD20W1JXQlB-inPTCJG3F
137
137
  qadence/operations/ham_evo.py,sha256=brJ11tlwj6UPYkUcnId-BKlzNStsZd0vp9FKHCFTjlM,10642
138
138
  qadence/operations/parametric.py,sha256=kV5d-diaQAoRlqKqoo0CGCbPej6eAxHQXniqfFKff3g,5394
139
139
  qadence/operations/primitive.py,sha256=hPJMDgWaEEdSYDZsr__hAcwy-QJEtzbM4qtFDcLmNBg,9881
140
- qadence/transpile/__init__.py,sha256=JrrQ4Osc4nNRWWjRGmVn57fWc8WwF92MokhKLRZ1vVA,499
140
+ qadence/transpile/__init__.py,sha256=AuWxqao_KFOJOfmlBRASqwbhFYbu1MM1f-pMTRBvFNc,543
141
141
  qadence/transpile/apply_fn.py,sha256=glZo2_wMOjw7_KgWKYbqg8j-9SDs-RefWIfxWgdQK8I,1336
142
- qadence/transpile/block.py,sha256=jV-EyatrwwdL2ahjF3wyEhC3PKMBPLaL5sQN1VNFc_w,11582
142
+ qadence/transpile/block.py,sha256=qsGixA8MVGwEh4-j7qR8H4UapKdOyufFGA1TyHWQVAg,13783
143
143
  qadence/transpile/circuit.py,sha256=KTh6Gv1czZddFuA1JhNNszheZbwViVixiGh4rGvIgTM,451
144
144
  qadence/transpile/digitalize.py,sha256=7oNHEBs50ff3rvP-Tb72tCQ5sk6vMpoQvz4CXyvH6Tc,1521
145
145
  qadence/transpile/flatten.py,sha256=k4HAfVzvDV40HyfaukiEHyJtAtvFRIcyDbAWiCL8tf0,3425
146
146
  qadence/transpile/invert.py,sha256=IeyidgBwECCKB0i7Ym0KkLyfcx42LyT2mbqkfbK1H8M,4843
147
147
  qadence/transpile/noise.py,sha256=LDcDJtQGkgUPkL2t69gg6AScTb-p3J3SxCDZbYOu1L8,1668
148
148
  qadence/transpile/transpile.py,sha256=xnzkHA6Qdb-Y5Fv9Latrolrpw44N6_OKc7_QGt70f0I,2713
149
- qadence-1.11.2.dist-info/METADATA,sha256=59o3ZoZh4OCOH_1qjHjcolovDDDc0YAdgz8nUqukis0,10308
150
- qadence-1.11.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
151
- qadence-1.11.2.dist-info/licenses/LICENSE,sha256=IfA3wQpmMOjCnDZ0P8Od2Bxb39rND9s5zfGHp1vMTbQ,2359
152
- qadence-1.11.2.dist-info/RECORD,,
149
+ qadence-1.11.3.dist-info/METADATA,sha256=KnEzhBVUnxeEgo778rmcj9l5R_otThJo6lUH6sKyuMI,10752
150
+ qadence-1.11.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
151
+ qadence-1.11.3.dist-info/licenses/LICENSE,sha256=IfA3wQpmMOjCnDZ0P8Od2Bxb39rND9s5zfGHp1vMTbQ,2359
152
+ qadence-1.11.3.dist-info/RECORD,,