qadence 1.11.0__py3-none-any.whl → 1.11.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,7 +8,7 @@ from qadence.engines.differentiable_backend import (
8
8
  DifferentiableBackend as DifferentiableBackendInterface,
9
9
  )
10
10
  from qadence.engines.torch.differentiable_expectation import DifferentiableExpectation
11
- from qadence.extensions import get_gpsr_fns
11
+ from qadence.backends.parameter_shift_rules import general_psr
12
12
  from qadence.measurements import Measurements
13
13
  from qadence.mitigations import Mitigations
14
14
  from qadence.noise import NoiseHandler
@@ -75,11 +75,8 @@ class DifferentiableBackend(DifferentiableBackendInterface):
75
75
  expectation = differentiable_expectation.ad
76
76
  elif self.diff_mode == DiffMode.ADJOINT:
77
77
  expectation = differentiable_expectation.adjoint
78
- else:
79
- try:
80
- fns = get_gpsr_fns()
81
- psr_fn = fns[self.diff_mode]
82
- except KeyError:
83
- raise ValueError(f"{self.diff_mode} differentiation mode is not supported")
84
- expectation = partial(differentiable_expectation.psr, psr_fn=psr_fn, **self.psr_args)
78
+ elif self.diff_mode == DiffMode.GPSR:
79
+ expectation = partial(
80
+ differentiable_expectation.psr, psr_fn=general_psr, **self.psr_args
81
+ )
85
82
  return expectation()
qadence/extensions.py CHANGED
@@ -108,14 +108,6 @@ def _supported_gates(backend_name: str) -> list[TAbstractBlock]:
108
108
  return [getattr(operations, gate) for gate in _supported_gates]
109
109
 
110
110
 
111
- def _gpsr_fns() -> dict:
112
- """Fallback function for native Qadence GPSR functions if extensions is not present."""
113
- # avoid circular import
114
- from qadence.backends.gpsr import general_psr
115
-
116
- return {DiffMode.GPSR: general_psr}
117
-
118
-
119
111
  def _validate_diff_mode(backend: Backend, diff_mode: DiffMode) -> None:
120
112
  """Fallback function for native Qadence diff_mode if extensions is not present."""
121
113
  if not backend.supports_ad and diff_mode == DiffMode.AD:
@@ -152,11 +144,9 @@ try:
152
144
  available_backends = getattr(module, "available_backends")
153
145
  available_engines = getattr(module, "available_engines")
154
146
  supported_gates = getattr(module, "supported_gates")
155
- get_gpsr_fns = getattr(module, "gpsr_fns")
156
147
  set_backend_config = getattr(module, "set_backend_config")
157
148
  except ModuleNotFoundError:
158
149
  available_backends = _available_backends
159
150
  available_engines = _available_engines
160
151
  supported_gates = _supported_gates
161
- get_gpsr_fns = _gpsr_fns
162
152
  set_backend_config = _set_backend_config
@@ -115,8 +115,10 @@ class CallbacksManager:
115
115
  self.add_callback("PlotMetrics", "train_end")
116
116
  # only save the last checkpoint if not checkpoint_best_only
117
117
  if not self.config.checkpoint_best_only:
118
- self.add_callback("SaveCheckpoint", "train_end")
119
- self.add_callback("WriteMetrics", "train_end")
118
+ if self.config.checkpoint_every != 0:
119
+ self.add_callback("SaveCheckpoint", "train_end")
120
+ if self.config.write_every != 0:
121
+ self.add_callback("WriteMetrics", "train_end")
120
122
 
121
123
  def add_callback(
122
124
  self, callback: str | Callback, on: str | TrainingStage, called_every: int = 1
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Callable
3
+ from typing import Any, Callable
4
4
  import numpy as np
5
5
  from sympy import Basic
6
6
 
@@ -8,7 +8,8 @@ from qadence.backend import BackendConfiguration
8
8
  from qadence.blocks import chain, kron
9
9
  from qadence.blocks.abstract import AbstractBlock
10
10
  from qadence.blocks.composite import ChainBlock, KronBlock
11
- from qadence.blocks.utils import add, tag
11
+ from qadence.blocks.utils import tag, add
12
+ from qadence.parameters import Parameter
12
13
  from qadence.circuit import QuantumCircuit
13
14
  from qadence.constructors import (
14
15
  analog_feature_map,
@@ -24,7 +25,7 @@ from qadence.constructors.hea import hea_digital, hea_sDAQC
24
25
  from qadence.constructors.iia import iia
25
26
  from qadence.measurements import Measurements
26
27
  from qadence.noise import NoiseHandler
27
- from qadence.operations import CNOT, RX, RY, I, N, Z
28
+ from qadence.operations import CNOT, RX, RY, RZ, I
28
29
  from qadence.register import Register
29
30
  from qadence.types import (
30
31
  AnsatzType,
@@ -733,7 +734,12 @@ def create_observable(
733
734
  interaction=config.interaction,
734
735
  detuning=config.detuning,
735
736
  )
736
- return add(shifting_term, detuning_hamiltonian)
737
+ obs: AbstractBlock = add(shifting_term, detuning_hamiltonian)
738
+
739
+ if isinstance(config.tag, str):
740
+ tag(obs, config.tag)
741
+
742
+ return obs
737
743
 
738
744
 
739
745
  def build_qnn_from_configs(
@@ -809,3 +815,262 @@ def build_qnn_from_configs(
809
815
  )
810
816
 
811
817
  return ufa
818
+
819
+
820
+ def _create_feature_map_qcnn(
821
+ n_qubits: int,
822
+ n_inputs: int,
823
+ fm_type: str = "Fourier",
824
+ op: Any = RX,
825
+ ) -> Any:
826
+ """
827
+ Creates a feature map (FM) by dividing qubits among inputs and applying.
828
+
829
+ the specified feature map type.
830
+
831
+ Args:
832
+ n_qubits (int): Total number of qubits.
833
+ n_inputs (int): Number of inputs.
834
+ fm_type (str): Type of feature map to use (e.g., "Fourier").
835
+ op (Any): Quantum operation to use in the feature map (e.g., RX).
836
+
837
+ Returns:
838
+ Any: The combined feature map as a kronecker product
839
+ of individual feature maps.
840
+ """
841
+ fm_temp = []
842
+ qubits_per_input = n_qubits // n_inputs # Base number of qubits per input
843
+ exceeding_qubits = n_qubits % n_inputs # Number of exceeding qubits
844
+ start = 0 # Track current qubit index
845
+
846
+ for i in range(n_inputs):
847
+ # Assign base qubits + 1 extra if input has exceeding qubits
848
+ num_qubits = qubits_per_input + 1 if i < exceeding_qubits else qubits_per_input
849
+ end = start + num_qubits
850
+
851
+ # Create FM for this input
852
+ fm_temp.append(
853
+ feature_map(
854
+ n_qubits=num_qubits,
855
+ param=f"\u03C6_{i}", # Use phi_i as the parameter
856
+ op=op,
857
+ fm_type=fm_type,
858
+ support=tuple(range(start, end)),
859
+ )
860
+ )
861
+ start = end # Update starting index for next FM
862
+
863
+ # Combine all feature maps using kronecker product
864
+ return kron(*fm_temp)
865
+
866
+
867
+ def _get_block_params(
868
+ params: dict,
869
+ layer: int,
870
+ rep: int,
871
+ pos: int,
872
+ is_corr: bool = False,
873
+ ) -> Any:
874
+ """
875
+ Retrieves the parameter for a given operation.
876
+
877
+ Args:
878
+ params (dict): Dictionary to store and retrieve parameters.
879
+ layer (int): The index of the current layer.
880
+ rep (int): The index of the current repetition in the layer.
881
+ pos (int): Position of the qubit in the layer.
882
+ is_corr (bool): If True, uses correlated parameters for corresponding gates in W^opt_ij.
883
+
884
+ Returns:
885
+ Parameter: the retrieved parameter.
886
+ """
887
+ if is_corr:
888
+ # Cycle pos from 0 to 8
889
+ key = f"θ_{layer}_{pos % 9}"
890
+ else:
891
+ key = f"θ_{layer}_{rep}_{pos}"
892
+
893
+ if key not in params:
894
+ params[key] = Parameter(key)
895
+ return params[key]
896
+
897
+
898
+ def _create_single_W(
899
+ params: dict,
900
+ operations: list[Any],
901
+ entangler: Any,
902
+ layer: int,
903
+ rep: int,
904
+ max_reps: int,
905
+ control: int,
906
+ target: int,
907
+ spacing: int,
908
+ n_qubits: int,
909
+ is_corr: bool = False,
910
+ ) -> ChainBlock:
911
+ """Creates a single convolutional cell W_ij."""
912
+ pad = [
913
+ I(q)
914
+ for q in range(control - spacing, control + spacing + 1)
915
+ if q != control and q != target and 0 <= q < n_qubits
916
+ ]
917
+ gates = []
918
+
919
+ # Track per-layer repetition index for proper parameter continuity
920
+ key_param_counter = f"param_index_{layer}_{rep}"
921
+ if key_param_counter not in params:
922
+ params[key_param_counter] = 0 # Initialize if first time
923
+
924
+ param_index = params[key_param_counter] # Load index
925
+ single_params = {} # Store params for single RZ/RY gates
926
+
927
+ # Apply the first sequence of operations
928
+ for _, op in enumerate(operations):
929
+ param_control = _get_block_params(params, layer, rep, param_index, is_corr)
930
+ param_index += 1
931
+ param_target = _get_block_params(params, layer, rep, param_index, is_corr)
932
+ param_index += 1
933
+ gates.append(
934
+ kron(
935
+ *pad,
936
+ op(control, param_control),
937
+ op(target, param_target),
938
+ )
939
+ )
940
+ # entangling gate
941
+ gates.append(entangler(target, control))
942
+
943
+ # Apply RZ, RY and entangling gates for intermediate step
944
+ single_params["control_rz"] = _get_block_params(params, layer, rep, param_index, is_corr)
945
+ param_index += 1
946
+ single_params["target_ry"] = _get_block_params(params, layer, rep, param_index, is_corr)
947
+ param_index += 1
948
+ gates.append(
949
+ kron(
950
+ *pad,
951
+ RZ(control, single_params["control_rz"]),
952
+ RY(target, single_params["target_ry"]),
953
+ )
954
+ )
955
+ # entangling gate
956
+ gates.append(entangler(control, target))
957
+
958
+ intermediate_ry = _get_block_params(params, layer, rep, param_index, is_corr)
959
+ param_index += 1
960
+ gates.append(
961
+ kron(
962
+ *pad,
963
+ I(control),
964
+ RY(target, intermediate_ry),
965
+ )
966
+ )
967
+ # entangling gate
968
+ gates.append(entangler(target, control))
969
+
970
+ # Apply the first sequence of operations
971
+ for _, op in enumerate(operations):
972
+ param_control = _get_block_params(params, layer, rep, param_index, is_corr)
973
+ param_index += 1
974
+ param_target = _get_block_params(params, layer, rep, param_index, is_corr)
975
+ param_index += 1
976
+ gates.append(
977
+ kron(
978
+ *pad,
979
+ op(control, param_control),
980
+ op(target, param_target),
981
+ )
982
+ )
983
+ # Add final entangling gate (control -> target)
984
+ if rep == int(max_reps - 1):
985
+ gates.append(entangler(control, target))
986
+
987
+ # Update params dict with the last used index
988
+ params[key_param_counter] = param_index
989
+
990
+ return chain(*gates)
991
+
992
+
993
+ def _create_conv_layer(
994
+ layer_index: int,
995
+ max_reps: int,
996
+ current_indices: list[int],
997
+ params: dict,
998
+ operations: list[Any],
999
+ entangler: Any,
1000
+ n_qubits: int,
1001
+ is_corr: bool,
1002
+ ) -> tuple[AbstractBlock, list[int]]:
1003
+ """
1004
+ Function to create a single convolutional layer.
1005
+
1006
+ Args:
1007
+ layer_index (int): The index of the current layer.
1008
+ reps (int): Number of repetitions for this layer.
1009
+ current_indices (List[int]): Indices of qubits for the current layer.
1010
+ params (dict): Dictionary to store and retrieve parameters.
1011
+ operations (List[Any]): List of quantum operations to apply in the gates.
1012
+ entangler (Any): Entangling operation, such as CZ.
1013
+ n_qubits (int): Total number of qubits.
1014
+
1015
+ Returns:
1016
+ Tuple[AbstractBlock, List[int]]: A tuple containing the quantum block
1017
+ for the layer and the target indices for the next layer.
1018
+ """
1019
+ current_layer = []
1020
+ next_indices = []
1021
+ spacing = layer_index
1022
+
1023
+ if layer_index in [0, 1]: # Special behavior for first two layers
1024
+ layer_reps = []
1025
+ for rep in range(max_reps):
1026
+ rep_kron = []
1027
+ # Define qubit pairs based on odd/even repetition
1028
+ if rep % 2 == 0: # Even d: regular behavior
1029
+ pairs = zip(current_indices[::2], current_indices[1::2])
1030
+ else: # Odd d: shift downward, leaving qubits 0 and 7 free
1031
+ pairs = zip(current_indices[1:-1:2], current_indices[2:-1:2])
1032
+
1033
+ # Build the gate sequence for each pair
1034
+ for control, target in pairs:
1035
+ W_pairs = _create_single_W(
1036
+ params,
1037
+ operations,
1038
+ entangler,
1039
+ layer_index,
1040
+ rep,
1041
+ max_reps,
1042
+ control,
1043
+ target,
1044
+ spacing,
1045
+ n_qubits,
1046
+ is_corr,
1047
+ )
1048
+ tag(W_pairs, f"W{control,target}")
1049
+ rep_kron.append(W_pairs)
1050
+
1051
+ layer_reps.append(kron(*rep_kron))
1052
+
1053
+ # Combine all repetitions using `chain`
1054
+ current_layer.append(chain(*layer_reps))
1055
+
1056
+ else: # Original behavior for other layers
1057
+ for rep in range(max_reps):
1058
+ for control, target in zip(current_indices[::2], current_indices[1::2]):
1059
+ W_pairs = _create_single_W(
1060
+ params,
1061
+ operations,
1062
+ entangler,
1063
+ layer_index,
1064
+ rep,
1065
+ max_reps,
1066
+ control,
1067
+ target,
1068
+ spacing,
1069
+ n_qubits,
1070
+ is_corr,
1071
+ )
1072
+ current_layer.append(W_pairs)
1073
+
1074
+ # Update `next_indices` with the **targets** of the current layer
1075
+ next_indices = current_indices[1::2]
1076
+ return chain(*current_layer), next_indices
@@ -360,13 +360,8 @@ class QNN(QuantumModel):
360
360
  )
361
361
  observable_str = ""
362
362
  if self._observable:
363
- observable_str = (
364
- "observable_config = [\n"
365
- + "\n".join(
366
- (block_to_mathematical_expression(obs.original) for obs in self._observable)
367
- )
368
- + "\n]"
369
- )
363
+ observable_str = f"observable_config = {self.observables_to_expression()}"
364
+
370
365
  return f"{type(self).__name__}(\n{configs_str}\n{observable_str}\n)"
371
366
 
372
367
  return super().__str__()
@@ -0,0 +1,158 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Callable
4
+ from qadence.blocks import chain
5
+ from qadence.parameters import Parameter
6
+ from qadence.blocks.utils import add, tag
7
+ from qadence.operations import Z, RX, CZ
8
+ from qadence.circuit import QuantumCircuit
9
+ from qadence.types import BackendName, DiffMode
10
+ from qadence.blocks.abstract import AbstractBlock
11
+
12
+ from .models import QNN
13
+ from qadence.ml_tools.constructors import _create_conv_layer, _create_feature_map_qcnn
14
+
15
+
16
+ class QCNN(QNN):
17
+ def __init__(
18
+ self,
19
+ n_inputs: int,
20
+ n_qubits: int,
21
+ depth: list[int],
22
+ operations: list[Any],
23
+ entangler: Any = CZ,
24
+ random_meas: bool = True,
25
+ fm_basis: str = "Fourier",
26
+ fm_gate: Any = RX,
27
+ is_corr: bool = False,
28
+ **kwargs: Any,
29
+ ) -> None:
30
+ """
31
+ Creates a QCNN model.
32
+
33
+ Args:
34
+ n_inputs (int): Number of input features.
35
+ n_qubits (int): Total number of qubits.
36
+ depth (list[int]): List defining the depth (repetitions) of each layer.
37
+ operations (list[Any]): List of quantum operations to apply
38
+ in the gates (e.g., [RX, RZ]).
39
+ entangler (Any): Entangling operation, such as CZ.
40
+ random_meas (bool): If True, applies random weighted measurements.
41
+ fm_basis (str): feature map basis.
42
+ fm_gate (Any): gate employed in the fm, such as.
43
+ **kwargs (Any): Additional keyword arguments for the parent QNN class.
44
+ """
45
+ self.n_inputs = n_inputs
46
+ self.n_qubits = n_qubits
47
+ self.depth = depth
48
+ self.operations = operations
49
+ self.entangler = entangler
50
+ self.random_meas = random_meas
51
+ self.fm_basis = fm_basis
52
+ self.fm_gate = fm_gate
53
+ self.is_corr = is_corr
54
+
55
+ circuit = self.qcnn_circuit(
56
+ self.n_inputs,
57
+ self.n_qubits,
58
+ self.depth,
59
+ self.operations,
60
+ self.entangler,
61
+ self.fm_basis,
62
+ self.fm_gate,
63
+ self.is_corr,
64
+ )
65
+
66
+ obs = self.qcnn_deferred_obs(self.n_qubits, self.random_meas)
67
+
68
+ super().__init__(
69
+ circuit=circuit,
70
+ observable=obs,
71
+ backend=BackendName.PYQTORCH,
72
+ diff_mode=DiffMode.AD,
73
+ inputs=[f"\u03C6_{i}" for i in range(self.n_inputs)],
74
+ **kwargs,
75
+ )
76
+
77
+ def qcnn_circuit(
78
+ self,
79
+ n_inputs: int,
80
+ n_qubits: int,
81
+ depth: list[int],
82
+ operations: list[Any],
83
+ entangler: AbstractBlock,
84
+ fm_basis: str,
85
+ fm_gate: AbstractBlock,
86
+ is_corr: bool,
87
+ ) -> QuantumCircuit:
88
+ """Defines the QCNN circuit."""
89
+ # Validate qubit count
90
+ if n_qubits < 4:
91
+ raise ValueError(
92
+ f"Invalid number of qubits: {n_qubits}. " "At least 4 qubits are required."
93
+ )
94
+ if n_qubits % 2 != 0:
95
+ raise ValueError(
96
+ f"Invalid number of qubits: {n_qubits}. " "The number of qubits must be even."
97
+ )
98
+
99
+ # Validate that all values in `depth` are odd
100
+ even_depths = [d for d in depth if d % 2 == 0]
101
+ if even_depths:
102
+ raise ValueError(
103
+ f"Invalid depth values: '{even_depths[0]}'. " "All the conv layer 'r's must be odd."
104
+ )
105
+
106
+ # Feature map (FM)
107
+ fm = _create_feature_map_qcnn(n_qubits, n_inputs, fm_basis, fm_gate)
108
+ tag(fm, "FM")
109
+
110
+ # Conv and Pool layer definition
111
+ conv_layers = []
112
+ params: dict[str, Parameter] = {}
113
+
114
+ # Define layer all the 2-qubit patterns based on depth
115
+ layer_patterns = [(2**layer_index, depth[layer_index]) for layer_index in range(len(depth))]
116
+
117
+ # Initialize all qubits for the current layer
118
+ current_indices = list(range(n_qubits))
119
+
120
+ # Build the circuit layer by layer using the helper
121
+ for layer_index, (_, reps) in enumerate(layer_patterns):
122
+ if reps == 0:
123
+ raise ValueError(f"Invalid layer {layer_index}: zero repetitions (reps = {reps}).")
124
+ if len(current_indices) < 2:
125
+ raise RuntimeError(
126
+ f"Layer {layer_index} requires at least 2 qubits, "
127
+ f"but found {len(current_indices)}."
128
+ )
129
+
130
+ layer_block, next_indices = _create_conv_layer(
131
+ layer_index, reps, current_indices, params, operations, entangler, n_qubits, is_corr
132
+ )
133
+ tag(layer_block, f"C+P layer {layer_index}")
134
+ conv_layers.append(layer_block)
135
+
136
+ # Update `current_indices` for the next layer
137
+ current_indices = next_indices
138
+
139
+ # Combine all layers for the final ansatz
140
+ ansatz = chain(*conv_layers)
141
+
142
+ return QuantumCircuit(n_qubits, fm, ansatz)
143
+
144
+ def qcnn_deferred_obs(
145
+ self, n_qubits: int, random_meas: bool
146
+ ) -> AbstractBlock | list[AbstractBlock]:
147
+ """
148
+ Defines the measurements to be performedthe traced out.
149
+
150
+ and remaining qubits.
151
+ """
152
+ if random_meas:
153
+ w1 = [Parameter(f"w{i}") for i in range(n_qubits)]
154
+ obs = add(Z(i) * w for i, w in zip(range(n_qubits), w1))
155
+ else:
156
+ obs = add(Z(i) for i in range(n_qubits))
157
+
158
+ return obs
@@ -530,15 +530,7 @@ class Trainer(BaseTrainer):
530
530
  self.ng_params = ng_params
531
531
  loss_metrics = loss, metrics
532
532
 
533
- # --------------------- FIX: Post-Optimization Loss --------------------- #
534
- # Because the loss/metrics are returned before the optimization. To sync
535
- # model state and current loss/metrics we calculate them again after optimization.
536
- # This is not strictly necessary.
537
- # TODO: Should be removed if loss can be logged at an unoptimized model state
538
- with torch.no_grad():
539
- post_update_loss_metrics = self.loss_fn(self.model, batch)
540
-
541
- return self._modify_batch_end_loss_metrics(post_update_loss_metrics)
533
+ return self._modify_batch_end_loss_metrics(loss_metrics)
542
534
 
543
535
  @BaseTrainer.callback("val_epoch")
544
536
  def run_validation(self, dataloader: DataLoader) -> list[tuple[torch.Tensor, dict[str, Any]]]:
qadence/model.py CHANGED
@@ -27,6 +27,7 @@ from qadence.mitigations import Mitigations
27
27
  from qadence.noise import NoiseHandler
28
28
  from qadence.parameters import Parameter
29
29
  from qadence.types import DiffMode, Endianness
30
+ from qadence.utils import block_to_mathematical_expression
30
31
 
31
32
  logger = getLogger(__name__)
32
33
 
@@ -153,6 +154,11 @@ class QuantumModel(nn.Module):
153
154
  """Variational parameters."""
154
155
  return OrderedDict({k: v.data for k, v in self._params.items() if v.requires_grad})
155
156
 
157
+ @property
158
+ def params(self) -> OrderedDict:
159
+ """All parameters."""
160
+ return OrderedDict({k: v.data for k, v in self._params.items()})
161
+
156
162
  @property
157
163
  def vals_vparams(self) -> Tensor:
158
164
  """Dictionary with parameters which are actually updated during optimization."""
@@ -175,6 +181,19 @@ class QuantumModel(nn.Module):
175
181
  """The number of variational parameters."""
176
182
  return len(self.vals_vparams)
177
183
 
184
+ @property
185
+ def show_config(self) -> str:
186
+ """Attain current quantum model configurations."""
187
+ if isinstance(self.backend, DifferentiableBackend):
188
+ current_config = self.backend.backend.config
189
+ return BackendConfiguration.available_options(current_config)
190
+
191
+ def change_config(self, new_config: dict) -> None:
192
+ """Change configuration with the input."""
193
+ if isinstance(self.backend, DifferentiableBackend):
194
+ current_config = self.backend.backend.config
195
+ BackendConfiguration.change_config(current_config, new_config)
196
+
178
197
  def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:
179
198
  """Get backend-converted circuit.
180
199
 
@@ -519,7 +538,7 @@ class QuantumModel(nn.Module):
519
538
  file_path = file_path / get_latest_checkpoint_name(file_path, "model")
520
539
 
521
540
  try:
522
- qm_pt = torch.load(file_path, map_location=map_location)
541
+ qm_pt = torch.load(file_path, map_location=map_location, weights_only=False)
523
542
  except Exception as e:
524
543
  logger.error(f"Unable to load QuantumModel due to {e}")
525
544
  return cls._from_dict(qm_pt, as_torch)
@@ -568,6 +587,28 @@ class QuantumModel(nn.Module):
568
587
  logger.warning(f"Unable to move {self} to {args}, {kwargs} due to {e}.")
569
588
  return self
570
589
 
590
+ def observables_to_expression(self) -> dict[str, str] | str:
591
+ """
592
+ Convert the observable to a dictionary representation of Pauli terms.
593
+
594
+ If no observable is set, returns an empty dictionary. Each observable is
595
+ represented by its tag (if available) as the key and its mathematical expression
596
+ as the value.
597
+
598
+ Returns:
599
+ dict[str, str]: A dictionary where the keys are observable tags (or "Obs." if not provided)
600
+ and the values are the corresponding mathematical expressions.
601
+ """
602
+ if self._observable is None:
603
+ return "No observable set."
604
+ else:
605
+ return {
606
+ obs.original.tag if obs.original.tag else "Obs.": block_to_mathematical_expression(
607
+ obs.original
608
+ )
609
+ for obs in self._observable
610
+ }
611
+
571
612
  @property
572
613
  def device(self) -> torch.device:
573
614
  """Get device.
qadence/parameters.py CHANGED
@@ -225,6 +225,8 @@ def sympy_to_numeric(expr: Basic) -> TNumber:
225
225
  if expr.as_real_imag()[1] != 0:
226
226
  return complex(expr)
227
227
  else:
228
+ if expr.is_Integer:
229
+ return int(expr)
228
230
  return float(expr)
229
231
 
230
232
 
qadence/register.py CHANGED
@@ -329,8 +329,12 @@ class Register:
329
329
  return Register(g, spacing=None, device_specs=self.device_specs)
330
330
 
331
331
  def _to_dict(self) -> dict:
332
+ try:
333
+ graph_data = nx.node_link_data(self.graph, edges="links")
334
+ except TypeError: # For Python 3.9 support
335
+ graph_data = nx.node_link_data(self.graph)
332
336
  return {
333
- "graph": nx.node_link_data(self.graph),
337
+ "graph": graph_data,
334
338
  "device_specs": self.device_specs._to_dict(),
335
339
  }
336
340
 
qadence/serialization.py CHANGED
@@ -226,7 +226,7 @@ def save_json(d: dict, file_path: str | Path) -> None:
226
226
 
227
227
 
228
228
  def load_pt(file_path: str | Path, map_location: str) -> Any:
229
- return torch.load(file_path, map_location=map_location)
229
+ return torch.load(file_path, map_location=map_location, weights_only=False)
230
230
 
231
231
 
232
232
  def load_json(file_path: str | Path, map_location: str) -> Any: