qadence 1.7.5__py3-none-any.whl → 1.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qadence/backend.py CHANGED
@@ -282,33 +282,6 @@ class Backend(ABC):
282
282
  """
283
283
  raise NotImplementedError
284
284
 
285
- @abstractmethod
286
- def run_dm(
287
- self,
288
- circuit: ConvertedCircuit,
289
- noise: Noise,
290
- param_values: dict[str, ArrayLike] = {},
291
- state: Tensor | None = None,
292
- endianness: Endianness = Endianness.BIG,
293
- ) -> Tensor:
294
- """Run a circuit and return the resulting the density matrix.
295
-
296
- TODO: Temporary method for the purposes of noise model implementation.
297
- To be removed in a later refactoring.
298
-
299
- Arguments:
300
- circuit: A converted circuit as returned by `backend.circuit`.
301
- param_values: _**Already embedded**_ parameters of the circuit. See
302
- [`embedding`][qadence.blocks.embedding.embedding] for more info.
303
- state: Initial state.
304
- endianness: Endianness of the resulting density matrix.
305
-
306
- Returns:
307
- A list of Counter objects where each key represents a bitstring
308
- and its value the number of times it has been sampled from the given wave function.
309
- """
310
- raise NotImplementedError
311
-
312
285
  @abstractmethod
313
286
  def expectation(
314
287
  self,
@@ -131,16 +131,6 @@ class Backend(BackendInterface):
131
131
  states = invert_endianness(states)
132
132
  return states
133
133
 
134
- def run_dm(
135
- self,
136
- circuit: ConvertedCircuit,
137
- noise: Noise,
138
- param_values: dict[str, Tensor] = {},
139
- state: Tensor | None = None,
140
- endianness: Endianness = Endianness.BIG,
141
- ) -> Tensor:
142
- raise NotImplementedError
143
-
144
134
  def sample(
145
135
  self,
146
136
  circuit: ConvertedCircuit,
qadence/backends/gpsr.py CHANGED
@@ -10,14 +10,30 @@ from qadence.types import PI
10
10
  from qadence.utils import _round_complex
11
11
 
12
12
 
13
- def general_psr(spectrum: Tensor, shift_prefac: float = 0.5) -> Callable:
13
+ def general_psr(spectrum: Tensor, n_eqs: int | None = None, shift_prefac: float = 0.5) -> Callable:
14
+ """Define whether single_gap_psr or multi_gap_psr is used.
15
+
16
+ Args:
17
+ spectrum (Tensor): Spectrum of the operation we apply PSR onto.
18
+ n_eqs (int | None, optional): Number of equations. Defaults to None.
19
+ If provided, we keep the n_eqs higher spectral gaps.
20
+ shift_prefac (float, optional): Shift prefactor. Defaults to 0.5.
21
+
22
+ Returns:
23
+ Callable: single_gap_psr or multi_gap_psr function for
24
+ concerned operation.
25
+ """
14
26
  diffs = _round_complex(spectrum - spectrum.reshape(-1, 1))
15
27
  sorted_unique_spectral_gaps = torch.unique(torch.abs(torch.tril(diffs)))
16
28
 
17
29
  # We have to filter out zeros
18
30
  sorted_unique_spectral_gaps = sorted_unique_spectral_gaps[sorted_unique_spectral_gaps > 0]
19
- n_eqs = len(sorted_unique_spectral_gaps)
20
- sorted_unique_spectral_gaps = torch.tensor(list(sorted_unique_spectral_gaps))
31
+ n_eqs = (
32
+ len(sorted_unique_spectral_gaps)
33
+ if n_eqs is None
34
+ else min(n_eqs, len(sorted_unique_spectral_gaps))
35
+ )
36
+ sorted_unique_spectral_gaps = torch.tensor(list(sorted_unique_spectral_gaps)[:n_eqs])
21
37
 
22
38
  if n_eqs == 1:
23
39
  return single_gap_psr
@@ -107,16 +107,6 @@ class Backend(BackendInterface):
107
107
  state = unhorqify(state)
108
108
  return state
109
109
 
110
- def run_dm(
111
- self,
112
- circuit: ConvertedCircuit,
113
- noise: Noise,
114
- param_values: ParamDictType = {},
115
- state: ArrayLike | None = None,
116
- endianness: Endianness = Endianness.BIG,
117
- ) -> ArrayLike:
118
- raise NotImplementedError
119
-
120
110
  def expectation(
121
111
  self,
122
112
  circuit: ConvertedCircuit,
@@ -187,6 +187,7 @@ class Backend(BackendInterface):
187
187
  param_values: dict[str, Tensor] = {},
188
188
  state: Tensor | None = None,
189
189
  endianness: Endianness = Endianness.BIG,
190
+ noise: Noise | None = None,
190
191
  ) -> Tensor:
191
192
  vals = to_list_of_dicts(param_values)
192
193
 
@@ -197,37 +198,41 @@ class Backend(BackendInterface):
197
198
  "specify any cloud credentials to use the .run() method"
198
199
  )
199
200
 
200
- state = state if state is None else _convert_init_state(state)
201
- batched_wf = np.zeros((len(vals), 2**circuit.abstract.n_qubits), dtype=np.complex128)
201
+ if noise is None:
202
+ state = state if state is None else _convert_init_state(state)
203
+ batched_wf = np.zeros((len(vals), 2**circuit.abstract.n_qubits), dtype=np.complex128)
202
204
 
203
- for i, param_values_el in enumerate(vals):
204
- sequence = self.assign_parameters(circuit, param_values_el)
205
- pattern = circuit.original.register.device_specs.pattern
206
- if pattern is not None:
207
- add_addressing_pattern(sequence, pattern)
208
- sequence.measure()
209
- sim_result = simulate_sequence(sequence, self.config, state, n_shots=None)
210
- wf = (
211
- sim_result.get_final_state( # type:ignore [union-attr]
212
- ignore_global_phase=False, normalize=True
205
+ for i, param_values_el in enumerate(vals):
206
+ sequence = self.assign_parameters(circuit, param_values_el)
207
+ pattern = circuit.original.register.device_specs.pattern
208
+ if pattern is not None:
209
+ add_addressing_pattern(sequence, pattern)
210
+ sequence.measure()
211
+ sim_result = simulate_sequence(sequence, self.config, state, n_shots=None)
212
+ wf = (
213
+ sim_result.get_final_state( # type:ignore [union-attr]
214
+ ignore_global_phase=False, normalize=True
215
+ )
216
+ .full()
217
+ .flatten()
213
218
  )
214
- .full()
215
- .flatten()
216
- )
217
- # We flip the wavefunction coming out of pulser,
218
- # essentially changing logic 0 with logic 1 in the basis states.
219
- batched_wf[i] = np.flip(wf)
219
+ # We flip the wavefunction coming out of pulser,
220
+ # essentially changing logic 0 with logic 1 in the basis states.
221
+ batched_wf[i] = np.flip(wf)
220
222
 
221
- batched_wf_torch = torch.from_numpy(batched_wf)
223
+ batched_wf_torch = torch.from_numpy(batched_wf)
222
224
 
223
- if endianness != self.native_endianness:
224
- from qadence.transpile import invert_endianness
225
+ if endianness != self.native_endianness:
226
+ from qadence.transpile import invert_endianness
225
227
 
226
- batched_wf_torch = invert_endianness(batched_wf_torch)
228
+ batched_wf_torch = invert_endianness(batched_wf_torch)
227
229
 
228
- return batched_wf_torch
230
+ return batched_wf_torch
229
231
 
230
- def run_dm(
232
+ else:
233
+ return self._run_noisy(circuit, noise, param_values, state, endianness)
234
+
235
+ def _run_noisy(
231
236
  self,
232
237
  circuit: ConvertedCircuit,
233
238
  noise: Noise,
@@ -342,12 +347,12 @@ class Backend(BackendInterface):
342
347
  res = res if len(res.shape) > 0 else res.reshape(1)
343
348
  return res.real
344
349
  elif noise is not None:
345
- dms = self.run_dm(
350
+ dms = self.run(
346
351
  circuit=circuit,
347
- noise=noise,
348
352
  param_values=param_values,
349
353
  state=state,
350
354
  endianness=endianness,
355
+ noise=noise,
351
356
  )
352
357
  support = sorted(list(circuit.abstract.register.support))
353
358
  # TODO: There should be a better check for batched density matrices.
@@ -106,16 +106,6 @@ class Backend(BackendInterface):
106
106
  state = invert_endianness(state) if endianness != self.native_endianness else state
107
107
  return state
108
108
 
109
- def run_dm(
110
- self,
111
- circuit: ConvertedCircuit,
112
- noise: Noise,
113
- param_values: dict[str, Tensor] = {},
114
- state: Tensor | None = None,
115
- endianness: Endianness = Endianness.BIG,
116
- ) -> Tensor:
117
- raise NotImplementedError
118
-
119
109
  def _batched_expectation(
120
110
  self,
121
111
  circuit: ConvertedCircuit,
@@ -7,7 +7,6 @@ from typing import Any, Sequence, Tuple
7
7
  import pyqtorch as pyq
8
8
  import sympy
9
9
  import torch
10
- from pyqtorch.apply import apply_operator
11
10
  from pyqtorch.embed import Embedding
12
11
  from pyqtorch.matrices import _dagger
13
12
  from pyqtorch.time_dependent.sesolve import sesolve
@@ -15,6 +14,7 @@ from pyqtorch.utils import is_diag
15
14
  from torch import (
16
15
  Tensor,
17
16
  cdouble,
17
+ complex64,
18
18
  diag_embed,
19
19
  diagonal,
20
20
  exp,
@@ -45,7 +45,6 @@ from qadence.blocks import (
45
45
  )
46
46
  from qadence.blocks.block_to_tensor import (
47
47
  _block_to_tensor_embedded,
48
- block_to_tensor,
49
48
  )
50
49
  from qadence.blocks.primitive import ProjectorBlock
51
50
  from qadence.blocks.utils import parameters
@@ -78,6 +77,27 @@ def is_single_qubit_chain(block: AbstractBlock) -> bool:
78
77
  )
79
78
 
80
79
 
80
+ def extract_parameter(block: ScaleBlock | ParametricBlock, config: Configuration) -> str | Tensor:
81
+ """Extract the parameter as string or its tensor value.
82
+
83
+ Args:
84
+ block (ScaleBlock | ParametricBlock): Block to extract parameter from.
85
+ config (Configuration): Configuration instance.
86
+
87
+ Returns:
88
+ str | Tensor: Parameter value or symbol.
89
+ """
90
+ if not block.is_parametric:
91
+ tensor_val = tensor([block.parameters.parameter], dtype=complex64)
92
+ return (
93
+ tensor([block.parameters.parameter], dtype=float64)
94
+ if torch.all(tensor_val.imag == 0)
95
+ else tensor_val
96
+ )
97
+
98
+ return config.get_param_name(block)[0]
99
+
100
+
81
101
  def convert_block(
82
102
  block: AbstractBlock, n_qubits: int = None, config: Configuration = None
83
103
  ) -> Sequence[Module | Tensor | str | sympy.Expr]:
@@ -94,29 +114,37 @@ def convert_block(
94
114
 
95
115
  if isinstance(block, ScaleBlock):
96
116
  scaled_ops = convert_block(block.block, n_qubits, config)
97
- scale = (
98
- tensor([block.parameters.parameter], dtype=float64)
99
- if not block.is_parametric
100
- else config.get_param_name(block)[0]
101
- )
117
+ scale = extract_parameter(block, config)
102
118
  return [pyq.Scale(pyq.Sequence(scaled_ops), scale)]
103
119
 
104
120
  elif isinstance(block, TimeEvolutionBlock):
105
- # TODO add native pyq hamevo
106
- # generator = convert_block(block.generator, n_qubits, config)[0] # type: ignore[arg-type]
107
- # time_param = config.get_param_name(block)[0]
108
- # is_parametric = (
109
- # block.generator.is_parametric if isinstance(block.generator, AbstractBlock) else False
110
- # )
111
- # return [
112
- # pyq.HamiltonianEvolution(
113
- # qubit_support=qubit_support,
114
- # generator=generator,
115
- # time=time_param,
116
- # generator_parametric=is_parametric, # type: ignore[union-attr]
117
- # )
118
- # ]
119
- return [PyQHamiltonianEvolution(qubit_support, n_qubits, block, config)]
121
+ if getattr(block.generator, "is_time_dependent", False):
122
+ return [PyQTimeDependentEvolution(qubit_support, n_qubits, block, config)]
123
+ else:
124
+ if isinstance(block.generator, sympy.Basic):
125
+ generator = config.get_param_name(block)[1]
126
+ elif isinstance(block.generator, Tensor):
127
+ m = block.generator.to(dtype=cdouble)
128
+ generator = convert_block(
129
+ MatrixBlock(
130
+ m,
131
+ qubit_support=qubit_support,
132
+ check_unitary=False,
133
+ check_hermitian=True,
134
+ )
135
+ )[0]
136
+ else:
137
+ generator = convert_block(block.generator, n_qubits, config)[0] # type: ignore[arg-type]
138
+ time_param = config.get_param_name(block)[0]
139
+ return [
140
+ pyq.HamiltonianEvolution(
141
+ qubit_support=qubit_support,
142
+ generator=generator,
143
+ time=time_param,
144
+ cache_length=0,
145
+ )
146
+ ]
147
+
120
148
  elif isinstance(block, MatrixBlock):
121
149
  return [pyq.primitives.Primitive(block.matrix, block.qubit_support)]
122
150
  elif isinstance(block, CompositeBlock):
@@ -142,14 +170,14 @@ def convert_block(
142
170
  if isinstance(block, U):
143
171
  op = pyq_cls(qubit_support[0], *config.get_param_name(block))
144
172
  else:
145
- op = pyq_cls(qubit_support[0], config.get_param_name(block)[0])
173
+ op = pyq_cls(qubit_support[0], extract_parameter(block, config))
146
174
  else:
147
175
  op = pyq_cls(qubit_support[0])
148
176
  return [op]
149
177
  elif isinstance(block, tuple(two_qubit_gateset)):
150
178
  pyq_cls = getattr(pyq, block.name)
151
179
  if isinstance(block, ParametricBlock):
152
- op = pyq_cls(qubit_support[0], qubit_support[1], config.get_param_name(block)[0])
180
+ op = pyq_cls(qubit_support[0], qubit_support[1], extract_parameter(block, config))
153
181
  else:
154
182
  op = pyq_cls(qubit_support[0], qubit_support[1])
155
183
  return [op]
@@ -157,7 +185,7 @@ def convert_block(
157
185
  block_name = block.name[1:] if block.name.startswith("M") else block.name
158
186
  pyq_cls = getattr(pyq, block_name)
159
187
  if isinstance(block, ParametricBlock):
160
- op = pyq_cls(qubit_support[:-1], qubit_support[-1], config.get_param_name(block)[0])
188
+ op = pyq_cls(qubit_support[:-1], qubit_support[-1], extract_parameter(block, config))
161
189
  else:
162
190
  if "CSWAP" in block_name:
163
191
  op = pyq_cls(qubit_support[:-2], qubit_support[-2:])
@@ -172,7 +200,7 @@ def convert_block(
172
200
  )
173
201
 
174
202
 
175
- class PyQHamiltonianEvolution(Module):
203
+ class PyQTimeDependentEvolution(Module):
176
204
  def __init__(
177
205
  self,
178
206
  qubit_support: Tuple[int, ...],
@@ -188,50 +216,17 @@ class PyQHamiltonianEvolution(Module):
188
216
  self.hmat: Tensor
189
217
  self.config = config
190
218
 
191
- if isinstance(block.generator, AbstractBlock) and not block.generator.is_parametric:
192
- hmat = block_to_tensor(
193
- block.generator,
194
- qubit_support=self.qubit_support,
195
- use_full_support=False,
196
- )
197
- hmat = hmat.permute(1, 2, 0)
198
- self.register_buffer("hmat", hmat)
199
- self._hamiltonian = lambda self, values: self.hmat
200
-
201
- elif isinstance(block.generator, Tensor):
202
- m = block.generator.to(dtype=cdouble)
203
- hmat = block_to_tensor(
204
- MatrixBlock(
205
- m,
206
- qubit_support=block.qubit_support,
207
- check_unitary=False,
208
- check_hermitian=True,
209
- ),
219
+ def _hamiltonian(self: PyQTimeDependentEvolution, values: dict[str, Tensor]) -> Tensor:
220
+ hmat = _block_to_tensor_embedded(
221
+ block.generator, # type: ignore[arg-type]
222
+ values=values,
210
223
  qubit_support=self.qubit_support,
211
224
  use_full_support=False,
225
+ device=self.device,
212
226
  )
213
- hmat = hmat.permute(1, 2, 0)
214
- self.register_buffer("hmat", hmat)
215
- self._hamiltonian = lambda self, values: self.hmat
216
-
217
- elif isinstance(block.generator, sympy.Basic):
218
- self._hamiltonian = (
219
- lambda self, values: values[self.param_names[1]].squeeze(3).permute(1, 2, 0)
220
- )
221
- # FIXME Why are we squeezing
222
- else:
223
-
224
- def _hamiltonian(self: PyQHamiltonianEvolution, values: dict[str, Tensor]) -> Tensor:
225
- hmat = _block_to_tensor_embedded(
226
- block.generator, # type: ignore[arg-type]
227
- values=values,
228
- qubit_support=self.qubit_support,
229
- use_full_support=False,
230
- device=self.device,
231
- )
232
- return hmat.permute(1, 2, 0)
227
+ return hmat.permute(1, 2, 0)
233
228
 
234
- self._hamiltonian = _hamiltonian
229
+ self._hamiltonian = _hamiltonian
235
230
 
236
231
  self._time_evolution = lambda values: values[self.param_names[0]]
237
232
  self._device: torch_device = (
@@ -322,59 +317,51 @@ class PyQHamiltonianEvolution(Module):
322
317
  values: dict[str, Tensor] | ParameterDict = dict(),
323
318
  embedding: Embedding | None = None,
324
319
  ) -> Tensor:
325
- if getattr(self.block.generator, "is_time_dependent", False): # type: ignore [union-attr]
326
-
327
- def Ht(t: Tensor | float) -> Tensor:
328
- # values dict has to change with new value of t
329
- # initial value of a feature parameter inside generator block
330
- # has to be inferred here
331
- new_vals = dict()
332
- for str_expr, val in values.items():
333
- expr = sympy.sympify(str_expr)
334
- t_symb = sympy.Symbol(self._get_time_parameter())
335
- free_symbols = expr.free_symbols
336
- if t_symb in free_symbols:
337
- # create substitution list for time and feature params
338
- subs_list = [(t_symb, t)]
339
-
340
- if len(free_symbols) > 1:
341
- # get feature param symbols
342
- feat_symbols = free_symbols.difference(set([t_symb]))
343
-
344
- # get feature param values
345
- feat_vals = values["orig_param_values"]
346
-
347
- # update substitution list with feature param values
348
- for fs in feat_symbols:
349
- subs_list.append((fs, feat_vals[str(fs)]))
350
-
351
- # evaluate expression with new time param value
352
- new_vals[str_expr] = torch.tensor(float(expr.subs(subs_list)))
353
- else:
354
- # expression doesn't contain time parameter - copy it as is
355
- new_vals[str_expr] = val
356
-
357
- # get matrix form of generator
358
- hmat = _block_to_tensor_embedded(
359
- self.block.generator, # type: ignore[arg-type]
360
- values=new_vals,
361
- qubit_support=self.qubit_support,
362
- use_full_support=False,
363
- device=self.device,
364
- ).squeeze(0)
365
-
366
- return hmat
367
-
368
- tsave = torch.linspace(0, self.block.duration, self.config.n_steps_hevo) # type: ignore [attr-defined]
369
- result = pyqify(
370
- sesolve(Ht, unpyqify(state).T[:, 0:1], tsave, self.config.ode_solver).states[-1].T
371
- )
372
- else:
373
- result = apply_operator(
374
- state,
375
- self.unitary(values),
376
- self.qubit_support,
377
- )
320
+ def Ht(t: Tensor | float) -> Tensor:
321
+ # values dict has to change with new value of t
322
+ # initial value of a feature parameter inside generator block
323
+ # has to be inferred here
324
+ new_vals = dict()
325
+ for str_expr, val in values.items():
326
+ expr = sympy.sympify(str_expr)
327
+ t_symb = sympy.Symbol(self._get_time_parameter())
328
+ free_symbols = expr.free_symbols
329
+ if t_symb in free_symbols:
330
+ # create substitution list for time and feature params
331
+ subs_list = [(t_symb, t)]
332
+
333
+ if len(free_symbols) > 1:
334
+ # get feature param symbols
335
+ feat_symbols = free_symbols.difference(set([t_symb]))
336
+
337
+ # get feature param values
338
+ feat_vals = values["orig_param_values"]
339
+
340
+ # update substitution list with feature param values
341
+ for fs in feat_symbols:
342
+ subs_list.append((fs, feat_vals[str(fs)]))
343
+
344
+ # evaluate expression with new time param value
345
+ new_vals[str_expr] = torch.tensor(float(expr.subs(subs_list)))
346
+ else:
347
+ # expression doesn't contain time parameter - copy it as is
348
+ new_vals[str_expr] = val
349
+
350
+ # get matrix form of generator
351
+ hmat = _block_to_tensor_embedded(
352
+ self.block.generator, # type: ignore[arg-type]
353
+ values=new_vals,
354
+ qubit_support=self.qubit_support,
355
+ use_full_support=False,
356
+ device=self.device,
357
+ ).squeeze(0)
358
+
359
+ return hmat
360
+
361
+ tsave = torch.linspace(0, self.block.duration, self.config.n_steps_hevo) # type: ignore [attr-defined]
362
+ result = pyqify(
363
+ sesolve(Ht, unpyqify(state).T[:, 0:1], tsave, self.config.ode_solver).states[-1].T
364
+ )
378
365
 
379
366
  return result
380
367
 
@@ -386,7 +373,7 @@ class PyQHamiltonianEvolution(Module):
386
373
  def dtype(self) -> torch_dtype:
387
374
  return self._dtype
388
375
 
389
- def to(self, *args: Any, **kwargs: Any) -> PyQHamiltonianEvolution:
376
+ def to(self, *args: Any, **kwargs: Any) -> PyQTimeDependentEvolution:
390
377
  if hasattr(self, "hmat"):
391
378
  self.hmat = self.hmat.to(*args, **kwargs)
392
379
  self._device = self.hmat.device
@@ -231,8 +231,15 @@ class DifferentiableExpectation:
231
231
  if shift_factor == 1:
232
232
  param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args)
233
233
  else:
234
- psr_args_factor = {k: v * shift_factor for k, v in psr_args.items()}
235
- param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args)
234
+ psr_args_factor = psr_args.copy()
235
+ if "shift_prefac" in psr_args_factor:
236
+ if psr_args_factor["shift_prefac"] is not None:
237
+ psr_args_factor["shift_prefac"] = (
238
+ shift_factor * psr_args_factor["shift_prefac"]
239
+ )
240
+ else:
241
+ psr_args_factor["shift_prefac"] = shift_factor
242
+ param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args_factor)
236
243
  for obs in observable:
237
244
  for param_id, _ in uuid_to_eigen(obs).items():
238
245
  # We need the embedded fixed params of the observable in the param_values dict
@@ -82,7 +82,7 @@ def pulse_experiment(
82
82
  conv_circuit = backend.circuit(stretched_circuit)
83
83
  noisy_density_matrices.append(
84
84
  # Contain a single experiment result for the stretch.
85
- backend.run_dm(
85
+ backend.run(
86
86
  conv_circuit,
87
87
  param_values=param_values,
88
88
  state=state,
@@ -124,7 +124,7 @@ def noise_level_experiment(
124
124
  zne_datasets: list = []
125
125
  # Get noisy density matrices.
126
126
  conv_circuit = backend.circuit(circuit)
127
- noisy_density_matrices = backend.run_dm(
127
+ noisy_density_matrices = backend.run(
128
128
  conv_circuit, param_values=param_values, state=state, noise=noise, endianness=endianness
129
129
  )
130
130
  # Convert observable to Numpy types compatible with QuTip simulations.
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from .config import AnsatzConfig, Callback, FeatureMapConfig, TrainConfig
4
4
  from .constructors import create_ansatz, create_fm_blocks, observable_from_config
5
- from .data import DictDataLoader, InfiniteTensorDataset, to_dataloader
5
+ from .data import DictDataLoader, InfiniteTensorDataset, OptimizeResult, to_dataloader
6
6
  from .models import QNN
7
7
  from .optimize_step import optimize_step as default_optimize_step
8
8
  from .parameters import get_parameters, num_parameters, set_parameters
@@ -23,6 +23,7 @@ __all__ = [
23
23
  "observable_from_config",
24
24
  "QNN",
25
25
  "TrainConfig",
26
+ "OptimizeResult",
26
27
  "Callback",
27
28
  "train_with_grad",
28
29
  "train_gradient_free",
@@ -38,15 +38,22 @@ class Callback:
38
38
  Each callback function should take at least as first input
39
39
  an OptimizeResult instance.
40
40
 
41
+ Note: when setting call_after_opt to True, we skip
42
+ verifying iteration % called_every == 0.
43
+
41
44
  Attributes:
42
45
  callback (CallbackFunction): Callback function accepting an
43
46
  OptimizeResult as first argument.
44
47
  callback_condition (CallbackConditionFunction | None, optional): Function that
45
48
  conditions the call to callback. Defaults to None.
49
+ modify_optimize_result (CallbackFunction | dict[str, Any] | None, optional):
50
+ Function that modify the OptimizeResult before callback.
51
+ For instance, one can change the `extra` (dict) argument to be used in callback.
52
+ If a dict is provided, the `extra` field of OptimizeResult is updated with the dict.
46
53
  called_every (int, optional): Callback to be called each `called_every` epoch.
47
54
  Defaults to 1.
48
55
  If callback_condition is None, we set
49
- callback_condition to returns True when iteration % every == 0.
56
+ callback_condition to returns True when iteration % called_every == 0.
50
57
  call_before_opt (bool, optional): If true, callback is applied before training.
51
58
  Defaults to False.
52
59
  call_end_epoch (bool, optional): If true, callback is applied during training,
@@ -61,6 +68,7 @@ class Callback:
61
68
  self,
62
69
  callback: CallbackFunction,
63
70
  callback_condition: CallbackConditionFunction | None = None,
71
+ modify_optimize_result: CallbackFunction | dict[str, Any] | None = None,
64
72
  called_every: int = 1,
65
73
  call_before_opt: bool = False,
66
74
  call_end_epoch: bool = True,
@@ -74,10 +82,13 @@ class Callback:
74
82
  OptimizeResult as ifrst argument.
75
83
  callback_condition (CallbackConditionFunction | None, optional): Function that
76
84
  conditions the call to callback. Defaults to None.
85
+ modify_optimize_result (CallbackFunction | dict[str, Any] | None , optional):
86
+ Function that modify the OptimizeResult before callback. If a dict
87
+ is provided, this updates the `extra` field of OptimizeResult.
77
88
  called_every (int, optional): Callback to be called each `called_every` epoch.
78
89
  Defaults to 1.
79
90
  If callback_condition is None, we set
80
- callback_condition to returns True when iteration % every == 0.
91
+ callback_condition to returns True when iteration % called_every == 0.
81
92
  call_before_opt (bool, optional): If true, callback is applied before training.
82
93
  Defaults to False.
83
94
  call_end_epoch (bool, optional): If true, callback is applied during training,
@@ -102,9 +113,56 @@ class Callback:
102
113
  else:
103
114
  self.callback_condition = callback_condition
104
115
 
105
- def __call__(self, opt_result: OptimizeResult) -> Any:
116
+ if modify_optimize_result is None:
117
+ self.modify_optimize_result = lambda opt_result: opt_result
118
+ elif isinstance(modify_optimize_result, dict):
119
+
120
+ def update_extra(opt_result: OptimizeResult) -> OptimizeResult:
121
+ opt_result.extra.update(modify_optimize_result)
122
+ return opt_result
123
+
124
+ self.modify_optimize_result = update_extra
125
+ else:
126
+ self.modify_optimize_result = modify_optimize_result
127
+
128
+ def __call__(self, opt_result: OptimizeResult, is_last_iteration: bool = False) -> Any:
129
+ """Apply callback if conditions are met.
130
+
131
+ Note that the current result may be modified by specifying a function
132
+ `modify_optimize_result` for instance to add inputs to the `extra` argument
133
+ of the current OptimizeResult.
134
+
135
+ Args:
136
+ opt_result (OptimizeResult): Current result.
137
+ is_last_iteration (bool, optional): When True,
138
+ avoid verifying modulo. Defaults to False.
139
+ Useful when call_after_opt is True.
140
+
141
+ Returns:
142
+ Any: The result of the callback.
143
+ """
144
+ opt_result = self.modify_optimize_result(opt_result)
106
145
  if opt_result.iteration % self.called_every == 0 and self.callback_condition(opt_result):
107
146
  return self.callback(opt_result)
147
+ if is_last_iteration and self.callback_condition(opt_result):
148
+ return self.callback(opt_result)
149
+
150
+
151
+ def run_callbacks(
152
+ callback_iterable: list[Callback], opt_res: OptimizeResult, is_last_iteration: bool = False
153
+ ) -> None:
154
+ """Run a list of Callback given the current OptimizeResult.
155
+
156
+ Used in train functions.
157
+
158
+ Args:
159
+ callback_iterable (list[Callback]): Iterable of Callbacks
160
+ opt_res (OptimizeResult): Current optimization result,
161
+ is_last_iteration (bool, optional): Whether we reached the last iteration or not.
162
+ Defaults to False.
163
+ """
164
+ for callback in callback_iterable:
165
+ callback(opt_res, is_last_iteration)
108
166
 
109
167
 
110
168
  @dataclass
@@ -72,7 +72,11 @@ def write_checkpoint(
72
72
  device = None
73
73
  try:
74
74
  # We extract the device from the pyqtorch native circuit
75
- device = model.device if isinstance(QuantumModel, QNN) else next(model.parameters()).device
75
+ device = (
76
+ model.device
77
+ if isinstance(model, (QNN, QuantumModel))
78
+ else next(model.parameters()).device
79
+ )
76
80
  device = str(device).split(":")[0] # in case of using several CUDA devices
77
81
  except Exception as e:
78
82
  msg = (
@@ -14,7 +14,7 @@ from torch.optim import Optimizer
14
14
  from torch.utils.data import DataLoader
15
15
  from torch.utils.tensorboard import SummaryWriter
16
16
 
17
- from qadence.ml_tools.config import Callback, TrainConfig
17
+ from qadence.ml_tools.config import Callback, TrainConfig, run_callbacks
18
18
  from qadence.ml_tools.data import DictDataLoader, OptimizeResult, data_to_device
19
19
  from qadence.ml_tools.optimize_step import optimize_step
20
20
  from qadence.ml_tools.printing import (
@@ -194,7 +194,6 @@ def train(
194
194
  Callback(
195
195
  lambda opt_res: print_metrics(opt_res.loss, opt_res.metrics, opt_res.iteration - 1),
196
196
  called_every=config.print_every,
197
- call_after_opt=True,
198
197
  )
199
198
  ]
200
199
 
@@ -262,10 +261,6 @@ def train(
262
261
  )
263
262
  ]
264
263
 
265
- def run_callbacks(callback_iterable: list[Callback], opt_res: OptimizeResult) -> None:
266
- for callback in callback_iterable:
267
- callback(opt_res)
268
-
269
264
  callbacks_before_opt = [
270
265
  callback
271
266
  for callback in callbacks
@@ -349,7 +344,7 @@ def train(
349
344
 
350
345
  # Final callbacks, by default checkpointing and writing
351
346
  callbacks_after_opt = [callback for callback in callbacks if callback.call_after_opt]
352
- run_callbacks(callbacks_after_opt, opt_result)
347
+ run_callbacks(callbacks_after_opt, opt_result, is_last_iteration=True)
353
348
 
354
349
  # writing hyperparameters
355
350
  if config.hyperparams:
@@ -12,7 +12,7 @@ from torch.nn import Module
12
12
  from torch.utils.data import DataLoader
13
13
  from torch.utils.tensorboard import SummaryWriter
14
14
 
15
- from qadence.ml_tools.config import Callback, TrainConfig
15
+ from qadence.ml_tools.config import Callback, TrainConfig, run_callbacks
16
16
  from qadence.ml_tools.data import DictDataLoader, OptimizeResult
17
17
  from qadence.ml_tools.parameters import get_parameters, set_parameters
18
18
  from qadence.ml_tools.printing import (
@@ -160,10 +160,6 @@ def train(
160
160
  )
161
161
  ]
162
162
 
163
- def run_callbacks(callback_iterable: list[Callback], opt_res: OptimizeResult) -> None:
164
- for callback in callback_iterable:
165
- callback(opt_res)
166
-
167
163
  callbacks_end_opt = [
168
164
  callback
169
165
  for callback in callbacks
@@ -192,7 +188,7 @@ def train(
192
188
 
193
189
  # Final callbacks
194
190
  callbacks_after_opt = [callback for callback in callbacks if callback.call_after_opt]
195
- run_callbacks(callbacks_after_opt, opt_result)
191
+ run_callbacks(callbacks_after_opt, opt_result, is_last_iteration=True)
196
192
 
197
193
  # close tracker
198
194
  if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: qadence
3
- Version: 1.7.5
3
+ Version: 1.7.7
4
4
  Summary: Pasqal interface for circuit-based quantum computing SDKs
5
- Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>
5
+ Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>
6
6
  License: Apache 2.0
7
7
  License-File: LICENSE
8
8
  Classifier: License :: OSI Approved :: Apache Software License
@@ -22,7 +22,7 @@ Requires-Dist: matplotlib
22
22
  Requires-Dist: nevergrad
23
23
  Requires-Dist: numpy
24
24
  Requires-Dist: openfermion
25
- Requires-Dist: pyqtorch==1.4.3
25
+ Requires-Dist: pyqtorch==1.4.7
26
26
  Requires-Dist: pyyaml
27
27
  Requires-Dist: rich
28
28
  Requires-Dist: scipy
@@ -57,7 +57,7 @@ Requires-Dist: mlflow; extra == 'mlflow'
57
57
  Provides-Extra: protocols
58
58
  Requires-Dist: qadence-protocols; extra == 'protocols'
59
59
  Provides-Extra: pulser
60
- Requires-Dist: pasqal-cloud==0.11.3; extra == 'pulser'
60
+ Requires-Dist: pasqal-cloud==0.11.4; extra == 'pulser'
61
61
  Requires-Dist: pulser-core==0.19.0; extra == 'pulser'
62
62
  Requires-Dist: pulser-simulation==0.19.0; extra == 'pulser'
63
63
  Provides-Extra: visualization
@@ -1,5 +1,5 @@
1
1
  qadence/__init__.py,sha256=0SFU1_XZ-3WlSU1rA4W1Y0edxpZLO_sNg-YnpjlD77w,2638
2
- qadence/backend.py,sha256=TTzWEHEyOg7EH02IBiDkhE-Uwtj0fSLNMvQ48qtAcOk,14401
2
+ qadence/backend.py,sha256=N27CRrmjkgFGhwdTJvdRKn2hKjuTwGM5t0QFzGEgvJA,13351
3
3
  qadence/circuit.py,sha256=3lQdjj_srxgk6f5M3eh3kE-Qdov4FA9TZxZZb0E1_mI,6966
4
4
  qadence/decompose.py,sha256=C4LYia_GcC9Rx3QO0ZLWTI9dN63a8WTEAXO0ZVQWuiE,5221
5
5
  qadence/divergences.py,sha256=JhpELhWSnuDvQxa9hJp_DE3EQg2Ban-Ta0mHZ_fVrHg,1832
@@ -28,19 +28,19 @@ qadence/analog/hamiltonian_terms.py,sha256=9LKidqqEMJTTdXeaxkxP_otTmcv9i4yeJ-JKC
28
28
  qadence/analog/parse_analog.py,sha256=ppvMZtsKXOIkIehCgjbdmG9n232DIycSanyuyVth5Wg,4223
29
29
  qadence/backends/__init__.py,sha256=ibm7wmZxuIoMYAQxgAx0MsfLYWOVHNWgLwyS1HjMuuI,215
30
30
  qadence/backends/api.py,sha256=NPrvtZQ4klUBabUWJ5hbTUCVoaoW9-sHVbiXxAnTt3A,2643
31
- qadence/backends/gpsr.py,sha256=dTmXcOKm_kJywl2EPJbUIjkylRh3J7hfWQyhLDLXYZQ,4758
31
+ qadence/backends/gpsr.py,sha256=3lcOHgt0soCiDXAyZ8DVyS8dMgUypIPwkDADds2boSE,5371
32
32
  qadence/backends/jax_utils.py,sha256=VfKhqCKknHDWZO21UFipWH_Lkiq175Z5GkP49gWjbyw,5038
33
33
  qadence/backends/utils.py,sha256=7gWiV_yJH3yyGFxwt-AQLEMLYkBX8aThvmFUlF0M2R0,8302
34
34
  qadence/backends/braket/__init__.py,sha256=eruyDZKMqkh1LE7eJ980vcrLJbia35uUX6krAP78clI,121
35
- qadence/backends/braket/backend.py,sha256=WX5FG4WsrtdnG0at2DvIY0n_AFm44t4g5OIJ1e8r6fQ,8752
35
+ qadence/backends/braket/backend.py,sha256=HNqs4ASB1wgIaInBNifC83GDsXOfz8s6FIIvP4aY9IM,8481
36
36
  qadence/backends/braket/config.py,sha256=7cu22dmYdp48Fu760HPfxBHinaUnGmzx9MkE_EPhVN8,594
37
37
  qadence/backends/braket/convert_ops.py,sha256=DVXV7sT9sX_yGOgPKclD9KIGgmbBRuDy_e39i1Z8I1s,3417
38
38
  qadence/backends/horqrux/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
39
- qadence/backends/horqrux/backend.py,sha256=W5sYvX9QP-xD3MMjwX-ZMcpHuncPVqBTyn80jgWViUM,9094
39
+ qadence/backends/horqrux/backend.py,sha256=9BqJP_av_cyLtIw07ri8gwoOXLz3b2yCzm9CmP36ZWw,8821
40
40
  qadence/backends/horqrux/config.py,sha256=xz7JlUcwW_4JAbvProbSI9hA1SXZRRAN0Hr2bvmLzfg,892
41
41
  qadence/backends/horqrux/convert_ops.py,sha256=3uG3yLq5wjfrWzFHDs0HEnd8kER91ZHVX3HCpYjOdjk,8565
42
42
  qadence/backends/pulser/__init__.py,sha256=capQ-eHqwtOeLf4mWsI0BIseAHhiLGie5cFD4-iVhUo,116
43
- qadence/backends/pulser/backend.py,sha256=51lbX-KfK6wFxFW7t0QwsXXwAw06D6z2msvSZzM_vD8,15363
43
+ qadence/backends/pulser/backend.py,sha256=bALJrLH4ZyJ24ehxilRat1LdFL7cwIpF7MSjHTXhPZQ,15621
44
44
  qadence/backends/pulser/channels.py,sha256=ZF0yEXUFHAmi3IdeXjzdTNGR5NzaRRFTiUpUGVg2sO4,329
45
45
  qadence/backends/pulser/cloud.py,sha256=0uUluvbFV9sOuCPraE-9uiVtC3Q8QaDY1IJMDi8grDM,2057
46
46
  qadence/backends/pulser/config.py,sha256=aoHDmtgq5i0Zryxenw_p3uARY0B1w-UaYvfqDmrWHM0,2175
@@ -49,9 +49,9 @@ qadence/backends/pulser/devices.py,sha256=DermLZNfmCB3SqteKVW4uhg4jp6ya1G6ptnXbB
49
49
  qadence/backends/pulser/pulses.py,sha256=F4fExIRAhLPMtVg1bhNtDihUYHxu5RExGjovk8-CQIo,11884
50
50
  qadence/backends/pulser/waveforms.py,sha256=0uz95b7rUaUUtN0tuHBZmJ0H6UBmfHST_59ozwsRCzg,2227
51
51
  qadence/backends/pyqtorch/__init__.py,sha256=0OdVy6cq0oQggV48LO1WXdaZuSkDkz7OYNEPIkNAmfk,140
52
- qadence/backends/pyqtorch/backend.py,sha256=ITJ52hFAK0jfXo2-2QyIZ1Mt0NcxrwjJqVuT7dyR8hg,9178
52
+ qadence/backends/pyqtorch/backend.py,sha256=NG83pZBuL2eevIFxqioiWWBrmCMVNvcvnEAAo-gju3A,8907
53
53
  qadence/backends/pyqtorch/config.py,sha256=jK-if0OF6L_inP-oZhWI4-b8wcrOiK8-EVv3NYDOfBM,2056
54
- qadence/backends/pyqtorch/convert_ops.py,sha256=zDV9lj72K59YsJyky0Q7Je3NweZHrk5IO0uyMggLyKI,15097
54
+ qadence/backends/pyqtorch/convert_ops.py,sha256=PNn9TVXHWMGVyEpa8DqF1eJ4uSNAhZfuP9R_oT_fxOs,14314
55
55
  qadence/blocks/__init__.py,sha256=H6jEA_CptkE-eoB4UfSbUiDszbxxhZwECV_TgoZWXoU,960
56
56
  qadence/blocks/abstract.py,sha256=QFwKPagbTrn3V4c2DHpBd-QL_mVIUXfbvyBLUdD6zw4,12023
57
57
  qadence/blocks/analog.py,sha256=ymnnlSVoW1XL05ZvnnHCqRTHuOXIEY_7E9M0PNKJZy4,10812
@@ -90,7 +90,7 @@ qadence/engines/jax/differentiable_backend.py,sha256=W5rDA8wb-ECnFWoLj4dVugF9v1l
90
90
  qadence/engines/jax/differentiable_expectation.py,sha256=poI--yV3srG3wndTcg6hk1lV63RYPJEQjypiWGzwqsk,3680
91
91
  qadence/engines/torch/__init__.py,sha256=iZFdD32ot0B0CVyC-f5dVViOBnqoalxa6M9Lj4WQuPE,160
92
92
  qadence/engines/torch/differentiable_backend.py,sha256=AWthwvKE8pCOih4dZ3tXxQX4W1ps9mBcvo7n4V9V24Y,3553
93
- qadence/engines/torch/differentiable_expectation.py,sha256=sN21wa5AbdlXTDRNK5FonYbmfXWbLDQ-ESHrn17Ruso,9946
93
+ qadence/engines/torch/differentiable_expectation.py,sha256=iaWpd4Y3e_rGKt-S0TNXqqSFg5z6I_5_ZIJxjQxd7Ow,10290
94
94
  qadence/exceptions/__init__.py,sha256=BU6vWrI9mshzr1aTPm1Ticr_o_42GjTrWI4OZXhThsI,203
95
95
  qadence/exceptions/exceptions.py,sha256=4j_VJpx2sZ2Mir5BJUWu4nwb131FY1ygO4q8-XlyfRc,190
96
96
  qadence/measurements/__init__.py,sha256=RIjG9tVJMqhNzyj7maZI250Um0KgHl2PizDcKJag-JU,161
@@ -100,21 +100,21 @@ qadence/measurements/shadow.py,sha256=lYZWbBCJJh7pFXPV5jSvsyN_0g22ao3jARpKnx1jeJ
100
100
  qadence/measurements/tomography.py,sha256=8fzXhYOu_DaMiUoZzLvpP03WhuwlZ3ldkWepLUHjWqM,2665
101
101
  qadence/measurements/utils.py,sha256=CJmnSobzdeR4T4FuEpad7d-BSJ9W-wTaU9hRbveB6kY,6534
102
102
  qadence/mitigations/__init__.py,sha256=RzaxYJftePFMloGhBVSixZ8fSe-ps_Jc-EyPm6xz-bs,159
103
- qadence/mitigations/analog_zne.py,sha256=g0QkjSdF-N9Dv2N8Oza4sylnjUMid5ea-4NCT9Tcm3Y,7768
103
+ qadence/mitigations/analog_zne.py,sha256=M43TRSlPy8HjM1PQJHZUgVPXTodkVMe5KkOYFWVDa-4,7762
104
104
  qadence/mitigations/protocols.py,sha256=Jq9MyLujfTyWmc7XVUGYVRUkJT1MmZw-GgmWpVjmX2Y,1608
105
105
  qadence/mitigations/readout.py,sha256=HPfYmdjRlieUdOBMZTghFK4DRWfveM4KkDkEI0bMI0E,6262
106
- qadence/ml_tools/__init__.py,sha256=VE5ma3QDdtemU7sfkoB4XL2MxAfGmFDJMlMWgWtOyJg,1080
107
- qadence/ml_tools/config.py,sha256=-9gnxolagg4g6_OwSRop162iG_7oBMYbH1Z5zeAtstY,22374
106
+ qadence/ml_tools/__init__.py,sha256=nTXcVpfSnMBWwHjU18ASIbvqfht5TIY9Zt9Wu1DATUg,1118
107
+ qadence/ml_tools/config.py,sha256=3vXodiN_1t17vO9uZnss3qvMS9saGqTa_tAClDKQMbs,24999
108
108
  qadence/ml_tools/constructors.py,sha256=VM7VdtvQ4-4b6SBzUdDpy6fbdDAeQPrj4t2HnUlvUas,27877
109
109
  qadence/ml_tools/data.py,sha256=ubwtkNvoBf0ZTGQm2M2Lgaim2tBAiAsa9VoTRR_MWks,5175
110
110
  qadence/ml_tools/models.py,sha256=SjwAPbSl9zn9YqfmwqHc2lIXCkIpwG_ysz4jieRh7W0,16996
111
111
  qadence/ml_tools/optimize_step.py,sha256=L92-kNILrmwz20d_Xd_FIQw6SDGJYIEbFN3tSRz9eno,1835
112
112
  qadence/ml_tools/parameters.py,sha256=gew2Kq_5-RgRpaTvs8eauVhgo0sTqqDQEV6WHFEiLGM,1301
113
113
  qadence/ml_tools/printing.py,sha256=2xMhsn2j0nQdO2klLcLWY33GT_7r-Gi83Fv2M2rGQQE,4789
114
- qadence/ml_tools/saveload.py,sha256=r_AZstRiCwXfq44HxqammrH6yZGf7iAzp6Y0k6k_88M,5888
114
+ qadence/ml_tools/saveload.py,sha256=B6709ZdqHkg6kCZJmlZhCoWaNJ4ZynJe_W2IoaexLTs,5945
115
115
  qadence/ml_tools/tensors.py,sha256=xZ9ZRzOqEaMgLUGWQf1najDmL6iLuN1ojCGVFs1Tm94,1337
116
- qadence/ml_tools/train_grad.py,sha256=AaI6OHmElDdWHny1JAN2YzpICP3W2AR_GVCfMAAjtEU,13665
117
- qadence/ml_tools/train_no_grad.py,sha256=4SiIMVunTXCWLzoDudhs-eozDHOZ3t-Of2JXRJyKT20,7375
116
+ qadence/ml_tools/train_grad.py,sha256=sOQkx6aDxXrQ4HmFB1X7VKqKcOlaFthGD-5lx6gS0Jw,13502
117
+ qadence/ml_tools/train_no_grad.py,sha256=jUjnsxvWMrAa-7NV1bewpAo6mV9grF02gwBHy0SVTws,7249
118
118
  qadence/ml_tools/utils.py,sha256=PW8FyoV0mG_DtN1U8njTDV5qxZ0EK4mnFwMAsLBArfk,1410
119
119
  qadence/noise/__init__.py,sha256=r0nR8uEZeB1M9pI2UisjWq0bjw50fPFfVGzIMev923g,147
120
120
  qadence/noise/protocols.py,sha256=-aZ06JvMnpxCeT5v5lI_RNPOLbb9Ju1Pi1AB6uAXxVE,1653
@@ -133,7 +133,7 @@ qadence/transpile/digitalize.py,sha256=iWRwYAYQsD2INHj0HNbGJriv_3fRCuBW1nDBrwtKS
133
133
  qadence/transpile/flatten.py,sha256=EdhSG5WyF56nbnxINNLqrHgY84MRM1YFjT3fR4aph5Q,3427
134
134
  qadence/transpile/invert.py,sha256=KAefHTG2AWr39aengVhXrzCtJPhrZC-ZnL6vYvmbnY0,4867
135
135
  qadence/transpile/transpile.py,sha256=6MRRkk1OS279L1fwUQjazA6qlfpbd-T_EJMKT8hAhOU,2721
136
- qadence-1.7.5.dist-info/METADATA,sha256=fh8IjATKiYFEPkpPXOzirL9PCYnXk-YaLARmUmxoQP0,9936
137
- qadence-1.7.5.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
138
- qadence-1.7.5.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
139
- qadence-1.7.5.dist-info/RECORD,,
136
+ qadence-1.7.7.dist-info/METADATA,sha256=hkypZEbHl7IP8RyJRdlViMHC2hBVSXre2XAnpp1Q2JM,9986
137
+ qadence-1.7.7.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
138
+ qadence-1.7.7.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
139
+ qadence-1.7.7.dist-info/RECORD,,