qadence 1.7.6__py3-none-any.whl → 1.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qadence/backend.py CHANGED
@@ -282,33 +282,6 @@ class Backend(ABC):
282
282
  """
283
283
  raise NotImplementedError
284
284
 
285
- @abstractmethod
286
- def run_dm(
287
- self,
288
- circuit: ConvertedCircuit,
289
- noise: Noise,
290
- param_values: dict[str, ArrayLike] = {},
291
- state: Tensor | None = None,
292
- endianness: Endianness = Endianness.BIG,
293
- ) -> Tensor:
294
- """Run a circuit and return the resulting the density matrix.
295
-
296
- TODO: Temporary method for the purposes of noise model implementation.
297
- To be removed in a later refactoring.
298
-
299
- Arguments:
300
- circuit: A converted circuit as returned by `backend.circuit`.
301
- param_values: _**Already embedded**_ parameters of the circuit. See
302
- [`embedding`][qadence.blocks.embedding.embedding] for more info.
303
- state: Initial state.
304
- endianness: Endianness of the resulting density matrix.
305
-
306
- Returns:
307
- A list of Counter objects where each key represents a bitstring
308
- and its value the number of times it has been sampled from the given wave function.
309
- """
310
- raise NotImplementedError
311
-
312
285
  @abstractmethod
313
286
  def expectation(
314
287
  self,
@@ -131,16 +131,6 @@ class Backend(BackendInterface):
131
131
  states = invert_endianness(states)
132
132
  return states
133
133
 
134
- def run_dm(
135
- self,
136
- circuit: ConvertedCircuit,
137
- noise: Noise,
138
- param_values: dict[str, Tensor] = {},
139
- state: Tensor | None = None,
140
- endianness: Endianness = Endianness.BIG,
141
- ) -> Tensor:
142
- raise NotImplementedError
143
-
144
134
  def sample(
145
135
  self,
146
136
  circuit: ConvertedCircuit,
@@ -107,16 +107,6 @@ class Backend(BackendInterface):
107
107
  state = unhorqify(state)
108
108
  return state
109
109
 
110
- def run_dm(
111
- self,
112
- circuit: ConvertedCircuit,
113
- noise: Noise,
114
- param_values: ParamDictType = {},
115
- state: ArrayLike | None = None,
116
- endianness: Endianness = Endianness.BIG,
117
- ) -> ArrayLike:
118
- raise NotImplementedError
119
-
120
110
  def expectation(
121
111
  self,
122
112
  circuit: ConvertedCircuit,
@@ -187,6 +187,7 @@ class Backend(BackendInterface):
187
187
  param_values: dict[str, Tensor] = {},
188
188
  state: Tensor | None = None,
189
189
  endianness: Endianness = Endianness.BIG,
190
+ noise: Noise | None = None,
190
191
  ) -> Tensor:
191
192
  vals = to_list_of_dicts(param_values)
192
193
 
@@ -197,37 +198,41 @@ class Backend(BackendInterface):
197
198
  "specify any cloud credentials to use the .run() method"
198
199
  )
199
200
 
200
- state = state if state is None else _convert_init_state(state)
201
- batched_wf = np.zeros((len(vals), 2**circuit.abstract.n_qubits), dtype=np.complex128)
201
+ if noise is None:
202
+ state = state if state is None else _convert_init_state(state)
203
+ batched_wf = np.zeros((len(vals), 2**circuit.abstract.n_qubits), dtype=np.complex128)
202
204
 
203
- for i, param_values_el in enumerate(vals):
204
- sequence = self.assign_parameters(circuit, param_values_el)
205
- pattern = circuit.original.register.device_specs.pattern
206
- if pattern is not None:
207
- add_addressing_pattern(sequence, pattern)
208
- sequence.measure()
209
- sim_result = simulate_sequence(sequence, self.config, state, n_shots=None)
210
- wf = (
211
- sim_result.get_final_state( # type:ignore [union-attr]
212
- ignore_global_phase=False, normalize=True
205
+ for i, param_values_el in enumerate(vals):
206
+ sequence = self.assign_parameters(circuit, param_values_el)
207
+ pattern = circuit.original.register.device_specs.pattern
208
+ if pattern is not None:
209
+ add_addressing_pattern(sequence, pattern)
210
+ sequence.measure()
211
+ sim_result = simulate_sequence(sequence, self.config, state, n_shots=None)
212
+ wf = (
213
+ sim_result.get_final_state( # type:ignore [union-attr]
214
+ ignore_global_phase=False, normalize=True
215
+ )
216
+ .full()
217
+ .flatten()
213
218
  )
214
- .full()
215
- .flatten()
216
- )
217
- # We flip the wavefunction coming out of pulser,
218
- # essentially changing logic 0 with logic 1 in the basis states.
219
- batched_wf[i] = np.flip(wf)
219
+ # We flip the wavefunction coming out of pulser,
220
+ # essentially changing logic 0 with logic 1 in the basis states.
221
+ batched_wf[i] = np.flip(wf)
220
222
 
221
- batched_wf_torch = torch.from_numpy(batched_wf)
223
+ batched_wf_torch = torch.from_numpy(batched_wf)
222
224
 
223
- if endianness != self.native_endianness:
224
- from qadence.transpile import invert_endianness
225
+ if endianness != self.native_endianness:
226
+ from qadence.transpile import invert_endianness
225
227
 
226
- batched_wf_torch = invert_endianness(batched_wf_torch)
228
+ batched_wf_torch = invert_endianness(batched_wf_torch)
227
229
 
228
- return batched_wf_torch
230
+ return batched_wf_torch
229
231
 
230
- def run_dm(
232
+ else:
233
+ return self._run_noisy(circuit, noise, param_values, state, endianness)
234
+
235
+ def _run_noisy(
231
236
  self,
232
237
  circuit: ConvertedCircuit,
233
238
  noise: Noise,
@@ -342,12 +347,12 @@ class Backend(BackendInterface):
342
347
  res = res if len(res.shape) > 0 else res.reshape(1)
343
348
  return res.real
344
349
  elif noise is not None:
345
- dms = self.run_dm(
350
+ dms = self.run(
346
351
  circuit=circuit,
347
- noise=noise,
348
352
  param_values=param_values,
349
353
  state=state,
350
354
  endianness=endianness,
355
+ noise=noise,
351
356
  )
352
357
  support = sorted(list(circuit.abstract.register.support))
353
358
  # TODO: There should be a better check for batched density matrices.
@@ -106,16 +106,6 @@ class Backend(BackendInterface):
106
106
  state = invert_endianness(state) if endianness != self.native_endianness else state
107
107
  return state
108
108
 
109
- def run_dm(
110
- self,
111
- circuit: ConvertedCircuit,
112
- noise: Noise,
113
- param_values: dict[str, Tensor] = {},
114
- state: Tensor | None = None,
115
- endianness: Endianness = Endianness.BIG,
116
- ) -> Tensor:
117
- raise NotImplementedError
118
-
119
109
  def _batched_expectation(
120
110
  self,
121
111
  circuit: ConvertedCircuit,
@@ -14,6 +14,7 @@ from pyqtorch.utils import is_diag
14
14
  from torch import (
15
15
  Tensor,
16
16
  cdouble,
17
+ complex64,
17
18
  diag_embed,
18
19
  diagonal,
19
20
  exp,
@@ -77,11 +78,24 @@ def is_single_qubit_chain(block: AbstractBlock) -> bool:
77
78
 
78
79
 
79
80
  def extract_parameter(block: ScaleBlock | ParametricBlock, config: Configuration) -> str | Tensor:
80
- return (
81
- tensor([block.parameters.parameter], dtype=float64)
82
- if not block.is_parametric
83
- else config.get_param_name(block)[0]
84
- )
81
+ """Extract the parameter as string or its tensor value.
82
+
83
+ Args:
84
+ block (ScaleBlock | ParametricBlock): Block to extract parameter from.
85
+ config (Configuration): Configuration instance.
86
+
87
+ Returns:
88
+ str | Tensor: Parameter value or symbol.
89
+ """
90
+ if not block.is_parametric:
91
+ tensor_val = tensor([block.parameters.parameter], dtype=complex64)
92
+ return (
93
+ tensor([block.parameters.parameter], dtype=float64)
94
+ if torch.all(tensor_val.imag == 0)
95
+ else tensor_val
96
+ )
97
+
98
+ return config.get_param_name(block)[0]
85
99
 
86
100
 
87
101
  def convert_block(
@@ -122,17 +136,11 @@ def convert_block(
122
136
  else:
123
137
  generator = convert_block(block.generator, n_qubits, config)[0] # type: ignore[arg-type]
124
138
  time_param = config.get_param_name(block)[0]
125
- is_parametric = (
126
- block.generator.is_parametric
127
- if isinstance(block.generator, AbstractBlock)
128
- else False
129
- )
130
139
  return [
131
140
  pyq.HamiltonianEvolution(
132
141
  qubit_support=qubit_support,
133
142
  generator=generator,
134
143
  time=time_param,
135
- generator_parametric=is_parametric, # type: ignore[union-attr]
136
144
  cache_length=0,
137
145
  )
138
146
  ]
@@ -82,7 +82,7 @@ def pulse_experiment(
82
82
  conv_circuit = backend.circuit(stretched_circuit)
83
83
  noisy_density_matrices.append(
84
84
  # Contain a single experiment result for the stretch.
85
- backend.run_dm(
85
+ backend.run(
86
86
  conv_circuit,
87
87
  param_values=param_values,
88
88
  state=state,
@@ -124,7 +124,7 @@ def noise_level_experiment(
124
124
  zne_datasets: list = []
125
125
  # Get noisy density matrices.
126
126
  conv_circuit = backend.circuit(circuit)
127
- noisy_density_matrices = backend.run_dm(
127
+ noisy_density_matrices = backend.run(
128
128
  conv_circuit, param_values=param_values, state=state, noise=noise, endianness=endianness
129
129
  )
130
130
  # Convert observable to Numpy types compatible with QuTip simulations.
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from .config import AnsatzConfig, Callback, FeatureMapConfig, TrainConfig
4
4
  from .constructors import create_ansatz, create_fm_blocks, observable_from_config
5
- from .data import DictDataLoader, InfiniteTensorDataset, to_dataloader
5
+ from .data import DictDataLoader, InfiniteTensorDataset, OptimizeResult, to_dataloader
6
6
  from .models import QNN
7
7
  from .optimize_step import optimize_step as default_optimize_step
8
8
  from .parameters import get_parameters, num_parameters, set_parameters
@@ -23,6 +23,7 @@ __all__ = [
23
23
  "observable_from_config",
24
24
  "QNN",
25
25
  "TrainConfig",
26
+ "OptimizeResult",
26
27
  "Callback",
27
28
  "train_with_grad",
28
29
  "train_gradient_free",
@@ -38,15 +38,22 @@ class Callback:
38
38
  Each callback function should take at least as first input
39
39
  an OptimizeResult instance.
40
40
 
41
+ Note: when setting call_after_opt to True, we skip
42
+ verifying iteration % called_every == 0.
43
+
41
44
  Attributes:
42
45
  callback (CallbackFunction): Callback function accepting an
43
46
  OptimizeResult as first argument.
44
47
  callback_condition (CallbackConditionFunction | None, optional): Function that
45
48
  conditions the call to callback. Defaults to None.
49
+ modify_optimize_result (CallbackFunction | dict[str, Any] | None, optional):
50
+ Function that modify the OptimizeResult before callback.
51
+ For instance, one can change the `extra` (dict) argument to be used in callback.
52
+ If a dict is provided, the `extra` field of OptimizeResult is updated with the dict.
46
53
  called_every (int, optional): Callback to be called each `called_every` epoch.
47
54
  Defaults to 1.
48
55
  If callback_condition is None, we set
49
- callback_condition to returns True when iteration % every == 0.
56
+ callback_condition to returns True when iteration % called_every == 0.
50
57
  call_before_opt (bool, optional): If true, callback is applied before training.
51
58
  Defaults to False.
52
59
  call_end_epoch (bool, optional): If true, callback is applied during training,
@@ -61,6 +68,7 @@ class Callback:
61
68
  self,
62
69
  callback: CallbackFunction,
63
70
  callback_condition: CallbackConditionFunction | None = None,
71
+ modify_optimize_result: CallbackFunction | dict[str, Any] | None = None,
64
72
  called_every: int = 1,
65
73
  call_before_opt: bool = False,
66
74
  call_end_epoch: bool = True,
@@ -74,10 +82,13 @@ class Callback:
74
82
  OptimizeResult as ifrst argument.
75
83
  callback_condition (CallbackConditionFunction | None, optional): Function that
76
84
  conditions the call to callback. Defaults to None.
85
+ modify_optimize_result (CallbackFunction | dict[str, Any] | None , optional):
86
+ Function that modify the OptimizeResult before callback. If a dict
87
+ is provided, this updates the `extra` field of OptimizeResult.
77
88
  called_every (int, optional): Callback to be called each `called_every` epoch.
78
89
  Defaults to 1.
79
90
  If callback_condition is None, we set
80
- callback_condition to returns True when iteration % every == 0.
91
+ callback_condition to returns True when iteration % called_every == 0.
81
92
  call_before_opt (bool, optional): If true, callback is applied before training.
82
93
  Defaults to False.
83
94
  call_end_epoch (bool, optional): If true, callback is applied during training,
@@ -102,9 +113,56 @@ class Callback:
102
113
  else:
103
114
  self.callback_condition = callback_condition
104
115
 
105
- def __call__(self, opt_result: OptimizeResult) -> Any:
116
+ if modify_optimize_result is None:
117
+ self.modify_optimize_result = lambda opt_result: opt_result
118
+ elif isinstance(modify_optimize_result, dict):
119
+
120
+ def update_extra(opt_result: OptimizeResult) -> OptimizeResult:
121
+ opt_result.extra.update(modify_optimize_result)
122
+ return opt_result
123
+
124
+ self.modify_optimize_result = update_extra
125
+ else:
126
+ self.modify_optimize_result = modify_optimize_result
127
+
128
+ def __call__(self, opt_result: OptimizeResult, is_last_iteration: bool = False) -> Any:
129
+ """Apply callback if conditions are met.
130
+
131
+ Note that the current result may be modified by specifying a function
132
+ `modify_optimize_result` for instance to add inputs to the `extra` argument
133
+ of the current OptimizeResult.
134
+
135
+ Args:
136
+ opt_result (OptimizeResult): Current result.
137
+ is_last_iteration (bool, optional): When True,
138
+ avoid verifying modulo. Defaults to False.
139
+ Useful when call_after_opt is True.
140
+
141
+ Returns:
142
+ Any: The result of the callback.
143
+ """
144
+ opt_result = self.modify_optimize_result(opt_result)
106
145
  if opt_result.iteration % self.called_every == 0 and self.callback_condition(opt_result):
107
146
  return self.callback(opt_result)
147
+ if is_last_iteration and self.callback_condition(opt_result):
148
+ return self.callback(opt_result)
149
+
150
+
151
+ def run_callbacks(
152
+ callback_iterable: list[Callback], opt_res: OptimizeResult, is_last_iteration: bool = False
153
+ ) -> None:
154
+ """Run a list of Callback given the current OptimizeResult.
155
+
156
+ Used in train functions.
157
+
158
+ Args:
159
+ callback_iterable (list[Callback]): Iterable of Callbacks
160
+ opt_res (OptimizeResult): Current optimization result,
161
+ is_last_iteration (bool, optional): Whether we reached the last iteration or not.
162
+ Defaults to False.
163
+ """
164
+ for callback in callback_iterable:
165
+ callback(opt_res, is_last_iteration)
108
166
 
109
167
 
110
168
  @dataclass
@@ -389,6 +447,13 @@ class FeatureMapConfig:
389
447
  assign `t, x = xs[:,0], xs[:,1]`.
390
448
  """
391
449
 
450
+ tag: str | None = None
451
+ """
452
+ String to indicate the name tag of the feature map.
453
+
454
+ Defaults to None, in which case no tag will be applied.
455
+ """
456
+
392
457
  def __post_init__(self) -> None:
393
458
  if self.multivariate_strategy == MultivariateStrategy.PARALLEL and self.num_features > 1:
394
459
  assert (
@@ -548,6 +613,13 @@ class AnsatzConfig:
548
613
  param_prefix: str = "theta"
549
614
  """The base bame of the variational parameter."""
550
615
 
616
+ tag: str | None = None
617
+ """
618
+ String to indicate the name tag of the ansatz.
619
+
620
+ Defaults to None, in which case no tag will be applied.
621
+ """
622
+
551
623
  def __post_init__(self) -> None:
552
624
  if self.ansatz_type == AnsatzType.IIA:
553
625
  assert (
@@ -7,7 +7,7 @@ from qadence.backend import BackendConfiguration
7
7
  from qadence.blocks import chain, kron
8
8
  from qadence.blocks.abstract import AbstractBlock
9
9
  from qadence.blocks.composite import ChainBlock, KronBlock
10
- from qadence.blocks.utils import add
10
+ from qadence.blocks.utils import add, tag
11
11
  from qadence.circuit import QuantumCircuit
12
12
  from qadence.constructors import (
13
13
  analog_feature_map,
@@ -774,10 +774,15 @@ def build_qnn_from_configs(
774
774
  fm_blocks=fm_blocks,
775
775
  ansatz_config=ansatz_config,
776
776
  )
777
+ if isinstance(fm_config.tag, str):
778
+ tag(full_fm, fm_config.tag)
777
779
  inputs = fm_config.inputs
778
780
  blocks.append(full_fm)
779
781
 
780
- blocks.append(create_ansatz(register=register, config=ansatz_config))
782
+ ansatz = create_ansatz(register=register, config=ansatz_config)
783
+ if isinstance(ansatz_config.tag, str):
784
+ tag(ansatz, ansatz_config.tag)
785
+ blocks.append(ansatz)
781
786
 
782
787
  circ = QuantumCircuit(register, *blocks)
783
788
 
@@ -72,7 +72,11 @@ def write_checkpoint(
72
72
  device = None
73
73
  try:
74
74
  # We extract the device from the pyqtorch native circuit
75
- device = model.device if isinstance(QuantumModel, QNN) else next(model.parameters()).device
75
+ device = (
76
+ model.device
77
+ if isinstance(model, (QNN, QuantumModel))
78
+ else next(model.parameters()).device
79
+ )
76
80
  device = str(device).split(":")[0] # in case of using several CUDA devices
77
81
  except Exception as e:
78
82
  msg = (
@@ -14,7 +14,7 @@ from torch.optim import Optimizer
14
14
  from torch.utils.data import DataLoader
15
15
  from torch.utils.tensorboard import SummaryWriter
16
16
 
17
- from qadence.ml_tools.config import Callback, TrainConfig
17
+ from qadence.ml_tools.config import Callback, TrainConfig, run_callbacks
18
18
  from qadence.ml_tools.data import DictDataLoader, OptimizeResult, data_to_device
19
19
  from qadence.ml_tools.optimize_step import optimize_step
20
20
  from qadence.ml_tools.printing import (
@@ -194,7 +194,6 @@ def train(
194
194
  Callback(
195
195
  lambda opt_res: print_metrics(opt_res.loss, opt_res.metrics, opt_res.iteration - 1),
196
196
  called_every=config.print_every,
197
- call_after_opt=True,
198
197
  )
199
198
  ]
200
199
 
@@ -214,21 +213,48 @@ def train(
214
213
  ]
215
214
 
216
215
  # writing metrics
216
+ # we specify two writers,
217
+ # to write at evaluation time and before evaluation
217
218
  callbacks += [
218
219
  Callback(
219
220
  lambda opt_res: write_tracker(
220
221
  writer,
221
222
  opt_res.loss,
222
223
  opt_res.metrics,
223
- opt_res.iteration,
224
+ opt_res.iteration - 1, # loss returned be optimized_step is at -1
224
225
  tracking_tool=config.tracking_tool,
225
226
  ),
226
227
  called_every=config.write_every,
227
- call_before_opt=False,
228
+ call_end_epoch=True,
229
+ ),
230
+ Callback(
231
+ lambda opt_res: write_tracker(
232
+ writer,
233
+ opt_res.loss,
234
+ opt_res.metrics,
235
+ opt_res.iteration, # after_opt we match the right loss function
236
+ tracking_tool=config.tracking_tool,
237
+ ),
238
+ called_every=config.write_every,
239
+ call_end_epoch=False,
228
240
  call_after_opt=True,
229
- call_during_eval=True,
230
- )
241
+ ),
231
242
  ]
243
+ if perform_val:
244
+ callbacks += [
245
+ Callback(
246
+ lambda opt_res: write_tracker(
247
+ writer,
248
+ None,
249
+ opt_res.metrics,
250
+ opt_res.iteration,
251
+ tracking_tool=config.tracking_tool,
252
+ ),
253
+ called_every=config.write_every,
254
+ call_before_opt=True,
255
+ call_during_eval=True,
256
+ )
257
+ ]
232
258
 
233
259
  # checkpointing
234
260
  if config.folder and config.checkpoint_every > 0 and not config.checkpoint_best_only:
@@ -262,10 +288,6 @@ def train(
262
288
  )
263
289
  ]
264
290
 
265
- def run_callbacks(callback_iterable: list[Callback], opt_res: OptimizeResult) -> None:
266
- for callback in callback_iterable:
267
- callback(opt_res)
268
-
269
291
  callbacks_before_opt = [
270
292
  callback
271
293
  for callback in callbacks
@@ -337,19 +359,24 @@ def train(
337
359
  logger.info("Terminating training gracefully after the current iteration.")
338
360
  break
339
361
 
340
- # Handling printing the last training loss
362
+ # For handling printing/writing the last training loss
341
363
  # as optimize_step does not give the loss value at the last iteration
342
364
  try:
343
365
  loss, metrics, *_ = next_loss_iter(dl_iter)
344
- if iteration % config.print_every == 0 and config.verbose:
345
- print_metrics(loss, metrics, iteration)
366
+ if isinstance(loss, Tensor):
367
+ loss = loss.item()
368
+ if perform_val:
369
+ # reputting val_loss as already evaluated before
370
+ metrics["val_loss"] = val_loss
371
+ print_metrics(loss, metrics, iteration)
346
372
 
347
373
  except KeyboardInterrupt:
348
374
  logger.info("Terminating training gracefully after the current iteration.")
349
375
 
350
376
  # Final callbacks, by default checkpointing and writing
377
+ opt_result = OptimizeResult(iteration, model, optimizer, loss, metrics)
351
378
  callbacks_after_opt = [callback for callback in callbacks if callback.call_after_opt]
352
- run_callbacks(callbacks_after_opt, opt_result)
379
+ run_callbacks(callbacks_after_opt, opt_result, is_last_iteration=True)
353
380
 
354
381
  # writing hyperparameters
355
382
  if config.hyperparams:
@@ -12,7 +12,7 @@ from torch.nn import Module
12
12
  from torch.utils.data import DataLoader
13
13
  from torch.utils.tensorboard import SummaryWriter
14
14
 
15
- from qadence.ml_tools.config import Callback, TrainConfig
15
+ from qadence.ml_tools.config import Callback, TrainConfig, run_callbacks
16
16
  from qadence.ml_tools.data import DictDataLoader, OptimizeResult
17
17
  from qadence.ml_tools.parameters import get_parameters, set_parameters
18
18
  from qadence.ml_tools.printing import (
@@ -160,10 +160,6 @@ def train(
160
160
  )
161
161
  ]
162
162
 
163
- def run_callbacks(callback_iterable: list[Callback], opt_res: OptimizeResult) -> None:
164
- for callback in callback_iterable:
165
- callback(opt_res)
166
-
167
163
  callbacks_end_opt = [
168
164
  callback
169
165
  for callback in callbacks
@@ -192,7 +188,7 @@ def train(
192
188
 
193
189
  # Final callbacks
194
190
  callbacks_after_opt = [callback for callback in callbacks if callback.call_after_opt]
195
- run_callbacks(callbacks_after_opt, opt_result)
191
+ run_callbacks(callbacks_after_opt, opt_result, is_last_iteration=True)
196
192
 
197
193
  # close tracker
198
194
  if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: qadence
3
- Version: 1.7.6
3
+ Version: 1.7.8
4
4
  Summary: Pasqal interface for circuit-based quantum computing SDKs
5
- Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>
5
+ Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>
6
6
  License: Apache 2.0
7
7
  License-File: LICENSE
8
8
  Classifier: License :: OSI Approved :: Apache Software License
@@ -22,7 +22,7 @@ Requires-Dist: matplotlib
22
22
  Requires-Dist: nevergrad
23
23
  Requires-Dist: numpy
24
24
  Requires-Dist: openfermion
25
- Requires-Dist: pyqtorch==1.4.4
25
+ Requires-Dist: pyqtorch==1.4.9
26
26
  Requires-Dist: pyyaml
27
27
  Requires-Dist: rich
28
28
  Requires-Dist: scipy
@@ -57,7 +57,7 @@ Requires-Dist: mlflow; extra == 'mlflow'
57
57
  Provides-Extra: protocols
58
58
  Requires-Dist: qadence-protocols; extra == 'protocols'
59
59
  Provides-Extra: pulser
60
- Requires-Dist: pasqal-cloud==0.11.3; extra == 'pulser'
60
+ Requires-Dist: pasqal-cloud==0.12.2; extra == 'pulser'
61
61
  Requires-Dist: pulser-core==0.19.0; extra == 'pulser'
62
62
  Requires-Dist: pulser-simulation==0.19.0; extra == 'pulser'
63
63
  Provides-Extra: visualization
@@ -1,5 +1,5 @@
1
1
  qadence/__init__.py,sha256=0SFU1_XZ-3WlSU1rA4W1Y0edxpZLO_sNg-YnpjlD77w,2638
2
- qadence/backend.py,sha256=TTzWEHEyOg7EH02IBiDkhE-Uwtj0fSLNMvQ48qtAcOk,14401
2
+ qadence/backend.py,sha256=N27CRrmjkgFGhwdTJvdRKn2hKjuTwGM5t0QFzGEgvJA,13351
3
3
  qadence/circuit.py,sha256=3lQdjj_srxgk6f5M3eh3kE-Qdov4FA9TZxZZb0E1_mI,6966
4
4
  qadence/decompose.py,sha256=C4LYia_GcC9Rx3QO0ZLWTI9dN63a8WTEAXO0ZVQWuiE,5221
5
5
  qadence/divergences.py,sha256=JhpELhWSnuDvQxa9hJp_DE3EQg2Ban-Ta0mHZ_fVrHg,1832
@@ -32,15 +32,15 @@ qadence/backends/gpsr.py,sha256=3lcOHgt0soCiDXAyZ8DVyS8dMgUypIPwkDADds2boSE,5371
32
32
  qadence/backends/jax_utils.py,sha256=VfKhqCKknHDWZO21UFipWH_Lkiq175Z5GkP49gWjbyw,5038
33
33
  qadence/backends/utils.py,sha256=7gWiV_yJH3yyGFxwt-AQLEMLYkBX8aThvmFUlF0M2R0,8302
34
34
  qadence/backends/braket/__init__.py,sha256=eruyDZKMqkh1LE7eJ980vcrLJbia35uUX6krAP78clI,121
35
- qadence/backends/braket/backend.py,sha256=WX5FG4WsrtdnG0at2DvIY0n_AFm44t4g5OIJ1e8r6fQ,8752
35
+ qadence/backends/braket/backend.py,sha256=HNqs4ASB1wgIaInBNifC83GDsXOfz8s6FIIvP4aY9IM,8481
36
36
  qadence/backends/braket/config.py,sha256=7cu22dmYdp48Fu760HPfxBHinaUnGmzx9MkE_EPhVN8,594
37
37
  qadence/backends/braket/convert_ops.py,sha256=DVXV7sT9sX_yGOgPKclD9KIGgmbBRuDy_e39i1Z8I1s,3417
38
38
  qadence/backends/horqrux/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
39
- qadence/backends/horqrux/backend.py,sha256=W5sYvX9QP-xD3MMjwX-ZMcpHuncPVqBTyn80jgWViUM,9094
39
+ qadence/backends/horqrux/backend.py,sha256=9BqJP_av_cyLtIw07ri8gwoOXLz3b2yCzm9CmP36ZWw,8821
40
40
  qadence/backends/horqrux/config.py,sha256=xz7JlUcwW_4JAbvProbSI9hA1SXZRRAN0Hr2bvmLzfg,892
41
41
  qadence/backends/horqrux/convert_ops.py,sha256=3uG3yLq5wjfrWzFHDs0HEnd8kER91ZHVX3HCpYjOdjk,8565
42
42
  qadence/backends/pulser/__init__.py,sha256=capQ-eHqwtOeLf4mWsI0BIseAHhiLGie5cFD4-iVhUo,116
43
- qadence/backends/pulser/backend.py,sha256=51lbX-KfK6wFxFW7t0QwsXXwAw06D6z2msvSZzM_vD8,15363
43
+ qadence/backends/pulser/backend.py,sha256=bALJrLH4ZyJ24ehxilRat1LdFL7cwIpF7MSjHTXhPZQ,15621
44
44
  qadence/backends/pulser/channels.py,sha256=ZF0yEXUFHAmi3IdeXjzdTNGR5NzaRRFTiUpUGVg2sO4,329
45
45
  qadence/backends/pulser/cloud.py,sha256=0uUluvbFV9sOuCPraE-9uiVtC3Q8QaDY1IJMDi8grDM,2057
46
46
  qadence/backends/pulser/config.py,sha256=aoHDmtgq5i0Zryxenw_p3uARY0B1w-UaYvfqDmrWHM0,2175
@@ -49,9 +49,9 @@ qadence/backends/pulser/devices.py,sha256=DermLZNfmCB3SqteKVW4uhg4jp6ya1G6ptnXbB
49
49
  qadence/backends/pulser/pulses.py,sha256=F4fExIRAhLPMtVg1bhNtDihUYHxu5RExGjovk8-CQIo,11884
50
50
  qadence/backends/pulser/waveforms.py,sha256=0uz95b7rUaUUtN0tuHBZmJ0H6UBmfHST_59ozwsRCzg,2227
51
51
  qadence/backends/pyqtorch/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
52
- qadence/backends/pyqtorch/backend.py,sha256=ITJ52hFAK0jfXo2-2QyIZ1Mt0NcxrwjJqVuT7dyR8hg,9178
52
+ qadence/backends/pyqtorch/backend.py,sha256=NG83pZBuL2eevIFxqioiWWBrmCMVNvcvnEAAo-gju3A,8907
53
53
  qadence/backends/pyqtorch/config.py,sha256=jK-if0OF6L_inP-oZhWI4-b8wcrOiK8-EVv3NYDOfBM,2056
54
- qadence/backends/pyqtorch/convert_ops.py,sha256=gOETCypdCzecpvYy-5ROoCIML4HBy1Fq1NiqriD3tGc,14127
54
+ qadence/backends/pyqtorch/convert_ops.py,sha256=PNn9TVXHWMGVyEpa8DqF1eJ4uSNAhZfuP9R_oT_fxOs,14314
55
55
  qadence/blocks/__init__.py,sha256=H6jEA_CptkE-eoB4UfSbUiDszbxxhZwECV_TgoZWXoU,960
56
56
  qadence/blocks/abstract.py,sha256=QFwKPagbTrn3V4c2DHpBd-QL_mVIUXfbvyBLUdD6zw4,12023
57
57
  qadence/blocks/analog.py,sha256=ymnnlSVoW1XL05ZvnnHCqRTHuOXIEY_7E9M0PNKJZy4,10812
@@ -100,21 +100,21 @@ qadence/measurements/shadow.py,sha256=lYZWbBCJJh7pFXPV5jSvsyN_0g22ao3jARpKnx1jeJ
100
100
  qadence/measurements/tomography.py,sha256=8fzXhYOu_DaMiUoZzLvpP03WhuwlZ3ldkWepLUHjWqM,2665
101
101
  qadence/measurements/utils.py,sha256=CJmnSobzdeR4T4FuEpad7d-BSJ9W-wTaU9hRbveB6kY,6534
102
102
  qadence/mitigations/__init__.py,sha256=RzaxYJftePFMloGhBVSixZ8fSe-ps_Jc-EyPm6xz-bs,159
103
- qadence/mitigations/analog_zne.py,sha256=g0QkjSdF-N9Dv2N8Oza4sylnjUMid5ea-4NCT9Tcm3Y,7768
103
+ qadence/mitigations/analog_zne.py,sha256=M43TRSlPy8HjM1PQJHZUgVPXTodkVMe5KkOYFWVDa-4,7762
104
104
  qadence/mitigations/protocols.py,sha256=Jq9MyLujfTyWmc7XVUGYVRUkJT1MmZw-GgmWpVjmX2Y,1608
105
105
  qadence/mitigations/readout.py,sha256=HPfYmdjRlieUdOBMZTghFK4DRWfveM4KkDkEI0bMI0E,6262
106
- qadence/ml_tools/__init__.py,sha256=VE5ma3QDdtemU7sfkoB4XL2MxAfGmFDJMlMWgWtOyJg,1080
107
- qadence/ml_tools/config.py,sha256=-9gnxolagg4g6_OwSRop162iG_7oBMYbH1Z5zeAtstY,22374
108
- qadence/ml_tools/constructors.py,sha256=VM7VdtvQ4-4b6SBzUdDpy6fbdDAeQPrj4t2HnUlvUas,27877
106
+ qadence/ml_tools/__init__.py,sha256=nTXcVpfSnMBWwHjU18ASIbvqfht5TIY9Zt9Wu1DATUg,1118
107
+ qadence/ml_tools/config.py,sha256=b6pFoluE0YxrjDLG-a2ZL8Z0fZKSHoUqpySTB2lkzvg,25316
108
+ qadence/ml_tools/constructors.py,sha256=OOUHF4FkX1VsBB1FqATQQuPQTemMQ5tVYPMTXafSwDI,28067
109
109
  qadence/ml_tools/data.py,sha256=ubwtkNvoBf0ZTGQm2M2Lgaim2tBAiAsa9VoTRR_MWks,5175
110
110
  qadence/ml_tools/models.py,sha256=SjwAPbSl9zn9YqfmwqHc2lIXCkIpwG_ysz4jieRh7W0,16996
111
111
  qadence/ml_tools/optimize_step.py,sha256=L92-kNILrmwz20d_Xd_FIQw6SDGJYIEbFN3tSRz9eno,1835
112
112
  qadence/ml_tools/parameters.py,sha256=gew2Kq_5-RgRpaTvs8eauVhgo0sTqqDQEV6WHFEiLGM,1301
113
113
  qadence/ml_tools/printing.py,sha256=2xMhsn2j0nQdO2klLcLWY33GT_7r-Gi83Fv2M2rGQQE,4789
114
- qadence/ml_tools/saveload.py,sha256=r_AZstRiCwXfq44HxqammrH6yZGf7iAzp6Y0k6k_88M,5888
114
+ qadence/ml_tools/saveload.py,sha256=B6709ZdqHkg6kCZJmlZhCoWaNJ4ZynJe_W2IoaexLTs,5945
115
115
  qadence/ml_tools/tensors.py,sha256=xZ9ZRzOqEaMgLUGWQf1najDmL6iLuN1ojCGVFs1Tm94,1337
116
- qadence/ml_tools/train_grad.py,sha256=AaI6OHmElDdWHny1JAN2YzpICP3W2AR_GVCfMAAjtEU,13665
117
- qadence/ml_tools/train_no_grad.py,sha256=4SiIMVunTXCWLzoDudhs-eozDHOZ3t-Of2JXRJyKT20,7375
116
+ qadence/ml_tools/train_grad.py,sha256=dJuNK8Gq1dLbK23TIeE6ynlYUoj0KQH9iPPDx7ZMNPU,14682
117
+ qadence/ml_tools/train_no_grad.py,sha256=jUjnsxvWMrAa-7NV1bewpAo6mV9grF02gwBHy0SVTws,7249
118
118
  qadence/ml_tools/utils.py,sha256=PW8FyoV0mG_DtN1U8njTDV5qxZ0EK4mnFwMAsLBArfk,1410
119
119
  qadence/noise/__init__.py,sha256=r0nR8uEZeB1M9pI2UisjWq0bjw50fPFfVGzIMev923g,147
120
120
  qadence/noise/protocols.py,sha256=-aZ06JvMnpxCeT5v5lI_RNPOLbb9Ju1Pi1AB6uAXxVE,1653
@@ -133,7 +133,7 @@ qadence/transpile/digitalize.py,sha256=iWRwYAYQsD2INHj0HNbGJriv_3fRCuBW1nDBrwtKS
133
133
  qadence/transpile/flatten.py,sha256=EdhSG5WyF56nbnxINNLqrHgY84MRM1YFjT3fR4aph5Q,3427
134
134
  qadence/transpile/invert.py,sha256=KAefHTG2AWr39aengVhXrzCtJPhrZC-ZnL6vYvmbnY0,4867
135
135
  qadence/transpile/transpile.py,sha256=6MRRkk1OS279L1fwUQjazA6qlfpbd-T_EJMKT8hAhOU,2721
136
- qadence-1.7.6.dist-info/METADATA,sha256=Q0bt-7eH8L7b3QffCuD68-vjWa3F6kXBdZHarDsnC5c,9936
137
- qadence-1.7.6.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
138
- qadence-1.7.6.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
139
- qadence-1.7.6.dist-info/RECORD,,
136
+ qadence-1.7.8.dist-info/METADATA,sha256=gEPDak8aSQfH-cItfLStkGpyqJnAfRHxxkeSns5_168,9986
137
+ qadence-1.7.8.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
138
+ qadence-1.7.8.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
139
+ qadence-1.7.8.dist-info/RECORD,,