qadence 1.10.3__py3-none-any.whl → 1.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from typing import Callable
3
4
  import numpy as np
4
5
  from sympy import Basic
5
6
 
@@ -24,7 +25,6 @@ from qadence.constructors.iia import iia
24
25
  from qadence.measurements import Measurements
25
26
  from qadence.noise import NoiseHandler
26
27
  from qadence.operations import CNOT, RX, RY, I, N, Z
27
- from qadence.parameters import Parameter
28
28
  from qadence.register import Register
29
29
  from qadence.types import (
30
30
  AnsatzType,
@@ -33,10 +33,10 @@ from qadence.types import (
33
33
  InputDiffMode,
34
34
  Interaction,
35
35
  MultivariateStrategy,
36
- ObservableTransform,
37
36
  ReuploadScaling,
38
37
  Strategy,
39
38
  TParameter,
39
+ TArray,
40
40
  )
41
41
 
42
42
  from .config import AnsatzConfig, FeatureMapConfig
@@ -706,35 +706,6 @@ def _interleave_ansatz_in_fm(
706
706
  return chain(*full_fm)
707
707
 
708
708
 
709
- def load_observable_transformations(config: ObservableConfig) -> tuple[Parameter, Parameter]:
710
- """
711
- Get the observable shifting and scaling factors.
712
-
713
- Args:
714
- config (ObservableConfig): Observable configuration.
715
-
716
- Returns:
717
- tuple[Parameter, Parameter]: The observable shifting and scaling factors.
718
- """
719
- shift = config.shift
720
- scale = config.scale
721
- if config.trainable_transform is not None:
722
- shift = Parameter(name=shift, trainable=config.trainable_transform)
723
- scale = Parameter(name=scale, trainable=config.trainable_transform)
724
- else:
725
- shift = Parameter(shift)
726
- scale = Parameter(scale)
727
- return scale, shift
728
-
729
-
730
- ObservableTransformMap = {
731
- ObservableTransform.RANGE: lambda detuning, scale, shift: (
732
- (shift, shift - scale) if detuning is N else (0.5 * (shift - scale), 0.5 * (scale + shift))
733
- ),
734
- ObservableTransform.SCALE: lambda _, scale, shift: (scale, shift),
735
- }
736
-
737
-
738
709
  def _global_identity(register: int | Register) -> KronBlock:
739
710
  """Create a global identity block."""
740
711
  return kron(
@@ -742,7 +713,7 @@ def _global_identity(register: int | Register) -> KronBlock:
742
713
  )
743
714
 
744
715
 
745
- def observable_from_config(
716
+ def create_observable(
746
717
  register: int | Register,
747
718
  config: ObservableConfig,
748
719
  ) -> AbstractBlock:
@@ -756,35 +727,11 @@ def observable_from_config(
756
727
  Returns:
757
728
  AbstractBlock: The observable block.
758
729
  """
759
- scale, shift = load_observable_transformations(config)
760
- return create_observable(register, config.detuning, scale, shift, config.transformation_type)
761
-
762
-
763
- def create_observable(
764
- register: int | Register,
765
- detuning: TDetuning = Z,
766
- scale: TParameter | None = None,
767
- shift: TParameter | None = None,
768
- transformation_type: ObservableTransform = ObservableTransform.NONE, # type: ignore[assignment]
769
- ) -> AbstractBlock:
770
- """
771
- Create an observable block.
772
-
773
- Args:
774
- register (int | Register): Number of qubits or a register object.
775
- detuning: The type of detuning.
776
- scale: A parameter for the scale.
777
- shift: A parameter for the shift.
778
-
779
- Returns:
780
- AbstractBlock: The observable block.
781
- """
782
- if transformation_type == ObservableTransform.RANGE:
783
- scale, shift = ObservableTransformMap[transformation_type](detuning, scale, shift) # type: ignore[index]
784
- shifting_term: AbstractBlock = shift * _global_identity(register) # type: ignore[operator]
785
- detuning_hamiltonian: AbstractBlock = scale * hamiltonian_factory( # type: ignore[operator]
730
+ shifting_term: AbstractBlock = config.shift * _global_identity(register) # type: ignore[operator]
731
+ detuning_hamiltonian: AbstractBlock = config.scale * hamiltonian_factory( # type: ignore[operator]
786
732
  register=register,
787
- detuning=detuning,
733
+ interaction=config.interaction,
734
+ detuning=config.detuning,
788
735
  )
789
736
  return add(shifting_term, detuning_hamiltonian)
790
737
 
@@ -844,9 +791,9 @@ def build_qnn_from_configs(
844
791
  circ = QuantumCircuit(register, *blocks)
845
792
 
846
793
  observable: AbstractBlock | list[AbstractBlock] = (
847
- [observable_from_config(register=register, config=cfg) for cfg in observable_config]
794
+ [create_observable(register=register, config=cfg) for cfg in observable_config]
848
795
  if isinstance(observable_config, list)
849
- else observable_from_config(register=register, config=observable_config)
796
+ else create_observable(register=register, config=observable_config)
850
797
  )
851
798
 
852
799
  ufa = QNN(
qadence/ml_tools/data.py CHANGED
@@ -34,6 +34,10 @@ class OptimizeResult:
34
34
  """Metrics that can be saved during training."""
35
35
  extra: dict = field(default_factory=lambda: dict())
36
36
  """Extra dict for saving anything else to be used in callbacks."""
37
+ rank: int = 0
38
+ """Rank of the process for which this result was generated."""
39
+ device: str | None = "cpu"
40
+ """Device on which this result for calculated."""
37
41
 
38
42
 
39
43
  @dataclass
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from collections import Counter
3
+ from collections import Counter, OrderedDict
4
4
  from logging import getLogger
5
5
  from typing import Any, Callable
6
6
 
@@ -19,6 +19,7 @@ from qadence.model import QuantumModel
19
19
  from qadence.noise import NoiseHandler
20
20
  from qadence.register import Register
21
21
  from qadence.types import BackendName, DiffMode, Endianness, InputDiffMode, ParamDictType
22
+ from qadence.utils import block_to_mathematical_expression
22
23
 
23
24
  logger = getLogger(__name__)
24
25
 
@@ -208,6 +209,8 @@ class QNN(QuantumModel):
208
209
  else:
209
210
  raise ValueError(f"Unkown forward diff mode: {self.input_diff_mode}")
210
211
 
212
+ self._model_configs: dict = dict()
213
+
211
214
  @classmethod
212
215
  def from_configs(
213
216
  cls,
@@ -255,7 +258,7 @@ class QNN(QuantumModel):
255
258
  from qadence.constructors import ObservableConfig
256
259
  from qadence.operations import Z
257
260
  from qadence.types import (
258
- AnsatzType, BackendName, BasisSet, ObservableTransform, ReuploadScaling, Strategy
261
+ AnsatzType, BackendName, BasisSet, ReuploadScaling, Strategy
259
262
  )
260
263
 
261
264
  register = 4
@@ -263,7 +266,6 @@ class QNN(QuantumModel):
263
266
  detuning=Z,
264
267
  scale=5.0,
265
268
  shift=0.0,
266
- transformation_type=ObservableTransform.SCALE,
267
269
  trainable_transform=None,
268
270
  )
269
271
  fm_config = FeatureMapConfig(
@@ -293,7 +295,7 @@ class QNN(QuantumModel):
293
295
  """
294
296
  from .constructors import build_qnn_from_configs
295
297
 
296
- return build_qnn_from_configs(
298
+ qnn = build_qnn_from_configs(
297
299
  register=register,
298
300
  observable_config=obs_config,
299
301
  fm_config=fm_config,
@@ -305,6 +307,69 @@ class QNN(QuantumModel):
305
307
  configuration=configuration,
306
308
  input_diff_mode=input_diff_mode,
307
309
  )
310
+ qnn._model_configs = {
311
+ "register": register,
312
+ "observable_config": obs_config,
313
+ "fm_config": fm_config,
314
+ "ansatz_config": ansatz_config,
315
+ }
316
+ return qnn
317
+
318
+ def __str__(self) -> str | Any:
319
+ """Return a string representation of a QNN.
320
+
321
+ When creating a QNN from a set of configurations,
322
+ we print the configurations used. Otherwise, we use the default printing.
323
+
324
+ Returns:
325
+ str | Any: A string representation of a QNN.
326
+
327
+ Example:
328
+ ```python exec="on" source="material-block" result="json"
329
+ from qadence import QNN
330
+ from qadence.constructors.hamiltonians import Interaction
331
+ from qadence.ml_tools.config import AnsatzConfig, FeatureMapConfig
332
+ from qadence.ml_tools.constructors import (
333
+ ObservableConfig,
334
+ )
335
+ from qadence.operations import Z
336
+ from qadence.types import BackendName
337
+
338
+ backend = BackendName.PYQTORCH
339
+ fm_config = FeatureMapConfig(num_features=1)
340
+ ansatz_config = AnsatzConfig()
341
+ observable_config = ObservableConfig(detuning=Z, interaction=Interaction.ZZ, scale=2)
342
+
343
+ qnn = QNN.from_configs(
344
+ register=2,
345
+ obs_config=observable_config,
346
+ fm_config=fm_config,
347
+ ansatz_config=ansatz_config,
348
+ backend=backend,
349
+ )
350
+ print(qnn) # markdown-exec: hide
351
+ ```
352
+ """
353
+ if bool(self._model_configs):
354
+ configs_str = "\n".join(
355
+ (
356
+ k + " = " + str(self._model_configs[k])
357
+ for k in sorted(self._model_configs.keys())
358
+ if k != "observable_config"
359
+ )
360
+ )
361
+ observable_str = ""
362
+ if self._observable:
363
+ observable_str = (
364
+ "observable_config = [\n"
365
+ + "\n".join(
366
+ (block_to_mathematical_expression(obs.original) for obs in self._observable)
367
+ )
368
+ + "\n]"
369
+ )
370
+ return f"{type(self).__name__}(\n{configs_str}\n{observable_str}\n)"
371
+
372
+ return super().__str__()
308
373
 
309
374
  def forward(
310
375
  self,
@@ -40,7 +40,6 @@ def optimize_step(
40
40
  """
41
41
 
42
42
  loss, metrics = None, {}
43
- xs_to_device = data_to_device(xs, device=device, dtype=dtype)
44
43
 
45
44
  def closure() -> Any:
46
45
  # NOTE: We need the nonlocal as we can't return a metric dict and
@@ -48,7 +47,7 @@ def optimize_step(
48
47
  # reason the returned loss is always the first one...
49
48
  nonlocal metrics, loss
50
49
  optimizer.zero_grad()
51
- loss, metrics = loss_fn(model, xs_to_device)
50
+ loss, metrics = loss_fn(model, xs)
52
51
  loss.backward(retain_graph=True)
53
52
  return loss.item()
54
53
 
@@ -2,6 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  from .base_trainer import BaseTrainer
4
4
  from .config_manager import ConfigManager
5
+ from .accelerator import Accelerator
6
+ from .distribution import Distributor
5
7
 
6
8
  # Modules to be automatically added to the qadence.ml_tools.loss namespace
7
- __all__ = ["BaseTrainer", "ConfigManager"]
9
+ __all__ = ["BaseTrainer", "ConfigManager", "Accelerator", "Distributor"]