qadence 1.7.6__tar.gz → 1.7.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {qadence-1.7.6 → qadence-1.7.8}/PKG-INFO +4 -4
- {qadence-1.7.6 → qadence-1.7.8}/pyproject.toml +4 -3
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backend.py +0 -27
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/braket/backend.py +0 -10
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/horqrux/backend.py +0 -10
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pulser/backend.py +31 -26
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pyqtorch/backend.py +0 -10
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pyqtorch/convert_ops.py +19 -11
- {qadence-1.7.6 → qadence-1.7.8}/qadence/mitigations/analog_zne.py +2 -2
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/__init__.py +2 -1
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/config.py +75 -3
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/constructors.py +7 -2
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/saveload.py +5 -1
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/train_grad.py +41 -14
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/train_no_grad.py +2 -6
- {qadence-1.7.6 → qadence-1.7.8}/.coveragerc +0 -0
- {qadence-1.7.6/.github/workflows → qadence-1.7.8/.github}/ISSUE_TEMPLATE/bug-report.yml +0 -0
- {qadence-1.7.6/.github/workflows → qadence-1.7.8/.github}/ISSUE_TEMPLATE/config.yml +0 -0
- {qadence-1.7.6/.github/workflows → qadence-1.7.8/.github}/ISSUE_TEMPLATE/new-feature.yml +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/.github/workflows/build_docs.yml +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/.github/workflows/lint.yml +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/.github/workflows/test_all.yml +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/.github/workflows/test_examples.yml +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/.github/workflows/test_fast.yml +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/.gitignore +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/.pre-commit-config.yaml +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/LICENSE +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/MANIFEST.in +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/README.md +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/mkdocs.yml +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/analog/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/analog/addressing.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/analog/constants.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/analog/device.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/analog/hamiltonian_terms.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/analog/parse_analog.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/api.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/braket/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/braket/config.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/braket/convert_ops.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/gpsr.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/horqrux/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/horqrux/config.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/horqrux/convert_ops.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/jax_utils.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pulser/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pulser/channels.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pulser/cloud.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pulser/config.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pulser/convert_ops.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pulser/devices.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pulser/pulses.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pulser/waveforms.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pyqtorch/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/pyqtorch/config.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/backends/utils.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/abstract.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/analog.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/block_to_tensor.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/composite.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/embedding.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/manipulate.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/matrix.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/primitive.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/blocks/utils.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/circuit.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/ansatze.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/daqc/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/daqc/daqc.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/daqc/gen_parser.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/daqc/utils.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/feature_maps.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/hamiltonians.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/iia.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/qft.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/rydberg_feature_maps.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/rydberg_hea.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/constructors/utils.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/decompose.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/divergences.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/draw/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/draw/assets/dark/measurement.png +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/draw/assets/dark/measurement.svg +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/draw/assets/light/measurement.png +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/draw/assets/light/measurement.svg +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/draw/themes.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/draw/utils.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/draw/vizbackend.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/engines/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/engines/differentiable_backend.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/engines/jax/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/engines/jax/differentiable_backend.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/engines/jax/differentiable_expectation.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/engines/torch/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/engines/torch/differentiable_backend.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/engines/torch/differentiable_expectation.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/exceptions/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/exceptions/exceptions.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/execution.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/extensions.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/libs.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/log_config.yaml +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/logger.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/measurements/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/measurements/protocols.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/measurements/samples.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/measurements/shadow.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/measurements/tomography.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/measurements/utils.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/mitigations/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/mitigations/protocols.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/mitigations/readout.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/data.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/models.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/optimize_step.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/parameters.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/printing.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/tensors.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/ml_tools/utils.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/model.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/noise/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/noise/protocols.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/noise/readout.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/operations/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/operations/analog.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/operations/control_ops.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/operations/ham_evo.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/operations/parametric.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/operations/primitive.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/overlap.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/parameters.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/protocols.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/py.typed +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/qubit_support.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/register.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/serial_expr_grammar.peg +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/serialization.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/states.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/transpile/__init__.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/transpile/apply_fn.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/transpile/block.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/transpile/circuit.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/transpile/digitalize.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/transpile/flatten.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/transpile/invert.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/transpile/transpile.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/types.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/qadence/utils.py +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/renovate.json +0 -0
- {qadence-1.7.6 → qadence-1.7.8}/setup.py +0 -0
@@ -1,8 +1,8 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: qadence
|
3
|
-
Version: 1.7.
|
3
|
+
Version: 1.7.8
|
4
4
|
Summary: Pasqal interface for circuit-based quantum computing SDKs
|
5
|
-
Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>
|
5
|
+
Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>
|
6
6
|
License: Apache 2.0
|
7
7
|
License-File: LICENSE
|
8
8
|
Classifier: License :: OSI Approved :: Apache Software License
|
@@ -22,7 +22,7 @@ Requires-Dist: matplotlib
|
|
22
22
|
Requires-Dist: nevergrad
|
23
23
|
Requires-Dist: numpy
|
24
24
|
Requires-Dist: openfermion
|
25
|
-
Requires-Dist: pyqtorch==1.4.
|
25
|
+
Requires-Dist: pyqtorch==1.4.9
|
26
26
|
Requires-Dist: pyyaml
|
27
27
|
Requires-Dist: rich
|
28
28
|
Requires-Dist: scipy
|
@@ -57,7 +57,7 @@ Requires-Dist: mlflow; extra == 'mlflow'
|
|
57
57
|
Provides-Extra: protocols
|
58
58
|
Requires-Dist: qadence-protocols; extra == 'protocols'
|
59
59
|
Provides-Extra: pulser
|
60
|
-
Requires-Dist: pasqal-cloud==0.
|
60
|
+
Requires-Dist: pasqal-cloud==0.12.2; extra == 'pulser'
|
61
61
|
Requires-Dist: pulser-core==0.19.0; extra == 'pulser'
|
62
62
|
Requires-Dist: pulser-simulation==0.19.0; extra == 'pulser'
|
63
63
|
Provides-Extra: visualization
|
@@ -22,10 +22,11 @@ authors = [
|
|
22
22
|
{ name = "Ignacio Fernández Graña", email = "ignacio.fernandez-grana@pasqal.com" },
|
23
23
|
{ name = "Charles Moussa", email = "charles.moussa@pasqal.com" },
|
24
24
|
{ name = "Giorgio Tosti Balducci", email = "giorgio.tosti-balducci@pasqal.com" },
|
25
|
+
{ name = "Daniele Cucurachi", email = "daniele.cucurachi@pasqal.com" },
|
25
26
|
]
|
26
27
|
requires-python = ">=3.9"
|
27
28
|
license = { text = "Apache 2.0" }
|
28
|
-
version = "1.7.
|
29
|
+
version = "1.7.8"
|
29
30
|
classifiers = [
|
30
31
|
"License :: OSI Approved :: Apache Software License",
|
31
32
|
"Programming Language :: Python",
|
@@ -49,7 +50,7 @@ dependencies = [
|
|
49
50
|
"jsonschema",
|
50
51
|
"nevergrad",
|
51
52
|
"scipy",
|
52
|
-
"pyqtorch==1.4.
|
53
|
+
"pyqtorch==1.4.9",
|
53
54
|
"pyyaml",
|
54
55
|
"matplotlib",
|
55
56
|
"Arpeggio==2.0.2",
|
@@ -63,7 +64,7 @@ allow-ambiguous-features = true
|
|
63
64
|
pulser = [
|
64
65
|
"pulser-core==0.19.0",
|
65
66
|
"pulser-simulation==0.19.0",
|
66
|
-
"pasqal-cloud==0.
|
67
|
+
"pasqal-cloud==0.12.2",
|
67
68
|
]
|
68
69
|
braket = ["amazon-braket-sdk<1.71.2"]
|
69
70
|
visualization = [
|
@@ -282,33 +282,6 @@ class Backend(ABC):
|
|
282
282
|
"""
|
283
283
|
raise NotImplementedError
|
284
284
|
|
285
|
-
@abstractmethod
|
286
|
-
def run_dm(
|
287
|
-
self,
|
288
|
-
circuit: ConvertedCircuit,
|
289
|
-
noise: Noise,
|
290
|
-
param_values: dict[str, ArrayLike] = {},
|
291
|
-
state: Tensor | None = None,
|
292
|
-
endianness: Endianness = Endianness.BIG,
|
293
|
-
) -> Tensor:
|
294
|
-
"""Run a circuit and return the resulting the density matrix.
|
295
|
-
|
296
|
-
TODO: Temporary method for the purposes of noise model implementation.
|
297
|
-
To be removed in a later refactoring.
|
298
|
-
|
299
|
-
Arguments:
|
300
|
-
circuit: A converted circuit as returned by `backend.circuit`.
|
301
|
-
param_values: _**Already embedded**_ parameters of the circuit. See
|
302
|
-
[`embedding`][qadence.blocks.embedding.embedding] for more info.
|
303
|
-
state: Initial state.
|
304
|
-
endianness: Endianness of the resulting density matrix.
|
305
|
-
|
306
|
-
Returns:
|
307
|
-
A list of Counter objects where each key represents a bitstring
|
308
|
-
and its value the number of times it has been sampled from the given wave function.
|
309
|
-
"""
|
310
|
-
raise NotImplementedError
|
311
|
-
|
312
285
|
@abstractmethod
|
313
286
|
def expectation(
|
314
287
|
self,
|
@@ -131,16 +131,6 @@ class Backend(BackendInterface):
|
|
131
131
|
states = invert_endianness(states)
|
132
132
|
return states
|
133
133
|
|
134
|
-
def run_dm(
|
135
|
-
self,
|
136
|
-
circuit: ConvertedCircuit,
|
137
|
-
noise: Noise,
|
138
|
-
param_values: dict[str, Tensor] = {},
|
139
|
-
state: Tensor | None = None,
|
140
|
-
endianness: Endianness = Endianness.BIG,
|
141
|
-
) -> Tensor:
|
142
|
-
raise NotImplementedError
|
143
|
-
|
144
134
|
def sample(
|
145
135
|
self,
|
146
136
|
circuit: ConvertedCircuit,
|
@@ -107,16 +107,6 @@ class Backend(BackendInterface):
|
|
107
107
|
state = unhorqify(state)
|
108
108
|
return state
|
109
109
|
|
110
|
-
def run_dm(
|
111
|
-
self,
|
112
|
-
circuit: ConvertedCircuit,
|
113
|
-
noise: Noise,
|
114
|
-
param_values: ParamDictType = {},
|
115
|
-
state: ArrayLike | None = None,
|
116
|
-
endianness: Endianness = Endianness.BIG,
|
117
|
-
) -> ArrayLike:
|
118
|
-
raise NotImplementedError
|
119
|
-
|
120
110
|
def expectation(
|
121
111
|
self,
|
122
112
|
circuit: ConvertedCircuit,
|
@@ -187,6 +187,7 @@ class Backend(BackendInterface):
|
|
187
187
|
param_values: dict[str, Tensor] = {},
|
188
188
|
state: Tensor | None = None,
|
189
189
|
endianness: Endianness = Endianness.BIG,
|
190
|
+
noise: Noise | None = None,
|
190
191
|
) -> Tensor:
|
191
192
|
vals = to_list_of_dicts(param_values)
|
192
193
|
|
@@ -197,37 +198,41 @@ class Backend(BackendInterface):
|
|
197
198
|
"specify any cloud credentials to use the .run() method"
|
198
199
|
)
|
199
200
|
|
200
|
-
|
201
|
-
|
201
|
+
if noise is None:
|
202
|
+
state = state if state is None else _convert_init_state(state)
|
203
|
+
batched_wf = np.zeros((len(vals), 2**circuit.abstract.n_qubits), dtype=np.complex128)
|
202
204
|
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
205
|
+
for i, param_values_el in enumerate(vals):
|
206
|
+
sequence = self.assign_parameters(circuit, param_values_el)
|
207
|
+
pattern = circuit.original.register.device_specs.pattern
|
208
|
+
if pattern is not None:
|
209
|
+
add_addressing_pattern(sequence, pattern)
|
210
|
+
sequence.measure()
|
211
|
+
sim_result = simulate_sequence(sequence, self.config, state, n_shots=None)
|
212
|
+
wf = (
|
213
|
+
sim_result.get_final_state( # type:ignore [union-attr]
|
214
|
+
ignore_global_phase=False, normalize=True
|
215
|
+
)
|
216
|
+
.full()
|
217
|
+
.flatten()
|
213
218
|
)
|
214
|
-
|
215
|
-
.
|
216
|
-
|
217
|
-
# We flip the wavefunction coming out of pulser,
|
218
|
-
# essentially changing logic 0 with logic 1 in the basis states.
|
219
|
-
batched_wf[i] = np.flip(wf)
|
219
|
+
# We flip the wavefunction coming out of pulser,
|
220
|
+
# essentially changing logic 0 with logic 1 in the basis states.
|
221
|
+
batched_wf[i] = np.flip(wf)
|
220
222
|
|
221
|
-
|
223
|
+
batched_wf_torch = torch.from_numpy(batched_wf)
|
222
224
|
|
223
|
-
|
224
|
-
|
225
|
+
if endianness != self.native_endianness:
|
226
|
+
from qadence.transpile import invert_endianness
|
225
227
|
|
226
|
-
|
228
|
+
batched_wf_torch = invert_endianness(batched_wf_torch)
|
227
229
|
|
228
|
-
|
230
|
+
return batched_wf_torch
|
229
231
|
|
230
|
-
|
232
|
+
else:
|
233
|
+
return self._run_noisy(circuit, noise, param_values, state, endianness)
|
234
|
+
|
235
|
+
def _run_noisy(
|
231
236
|
self,
|
232
237
|
circuit: ConvertedCircuit,
|
233
238
|
noise: Noise,
|
@@ -342,12 +347,12 @@ class Backend(BackendInterface):
|
|
342
347
|
res = res if len(res.shape) > 0 else res.reshape(1)
|
343
348
|
return res.real
|
344
349
|
elif noise is not None:
|
345
|
-
dms = self.
|
350
|
+
dms = self.run(
|
346
351
|
circuit=circuit,
|
347
|
-
noise=noise,
|
348
352
|
param_values=param_values,
|
349
353
|
state=state,
|
350
354
|
endianness=endianness,
|
355
|
+
noise=noise,
|
351
356
|
)
|
352
357
|
support = sorted(list(circuit.abstract.register.support))
|
353
358
|
# TODO: There should be a better check for batched density matrices.
|
@@ -106,16 +106,6 @@ class Backend(BackendInterface):
|
|
106
106
|
state = invert_endianness(state) if endianness != self.native_endianness else state
|
107
107
|
return state
|
108
108
|
|
109
|
-
def run_dm(
|
110
|
-
self,
|
111
|
-
circuit: ConvertedCircuit,
|
112
|
-
noise: Noise,
|
113
|
-
param_values: dict[str, Tensor] = {},
|
114
|
-
state: Tensor | None = None,
|
115
|
-
endianness: Endianness = Endianness.BIG,
|
116
|
-
) -> Tensor:
|
117
|
-
raise NotImplementedError
|
118
|
-
|
119
109
|
def _batched_expectation(
|
120
110
|
self,
|
121
111
|
circuit: ConvertedCircuit,
|
@@ -14,6 +14,7 @@ from pyqtorch.utils import is_diag
|
|
14
14
|
from torch import (
|
15
15
|
Tensor,
|
16
16
|
cdouble,
|
17
|
+
complex64,
|
17
18
|
diag_embed,
|
18
19
|
diagonal,
|
19
20
|
exp,
|
@@ -77,11 +78,24 @@ def is_single_qubit_chain(block: AbstractBlock) -> bool:
|
|
77
78
|
|
78
79
|
|
79
80
|
def extract_parameter(block: ScaleBlock | ParametricBlock, config: Configuration) -> str | Tensor:
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
81
|
+
"""Extract the parameter as string or its tensor value.
|
82
|
+
|
83
|
+
Args:
|
84
|
+
block (ScaleBlock | ParametricBlock): Block to extract parameter from.
|
85
|
+
config (Configuration): Configuration instance.
|
86
|
+
|
87
|
+
Returns:
|
88
|
+
str | Tensor: Parameter value or symbol.
|
89
|
+
"""
|
90
|
+
if not block.is_parametric:
|
91
|
+
tensor_val = tensor([block.parameters.parameter], dtype=complex64)
|
92
|
+
return (
|
93
|
+
tensor([block.parameters.parameter], dtype=float64)
|
94
|
+
if torch.all(tensor_val.imag == 0)
|
95
|
+
else tensor_val
|
96
|
+
)
|
97
|
+
|
98
|
+
return config.get_param_name(block)[0]
|
85
99
|
|
86
100
|
|
87
101
|
def convert_block(
|
@@ -122,17 +136,11 @@ def convert_block(
|
|
122
136
|
else:
|
123
137
|
generator = convert_block(block.generator, n_qubits, config)[0] # type: ignore[arg-type]
|
124
138
|
time_param = config.get_param_name(block)[0]
|
125
|
-
is_parametric = (
|
126
|
-
block.generator.is_parametric
|
127
|
-
if isinstance(block.generator, AbstractBlock)
|
128
|
-
else False
|
129
|
-
)
|
130
139
|
return [
|
131
140
|
pyq.HamiltonianEvolution(
|
132
141
|
qubit_support=qubit_support,
|
133
142
|
generator=generator,
|
134
143
|
time=time_param,
|
135
|
-
generator_parametric=is_parametric, # type: ignore[union-attr]
|
136
144
|
cache_length=0,
|
137
145
|
)
|
138
146
|
]
|
@@ -82,7 +82,7 @@ def pulse_experiment(
|
|
82
82
|
conv_circuit = backend.circuit(stretched_circuit)
|
83
83
|
noisy_density_matrices.append(
|
84
84
|
# Contain a single experiment result for the stretch.
|
85
|
-
backend.
|
85
|
+
backend.run(
|
86
86
|
conv_circuit,
|
87
87
|
param_values=param_values,
|
88
88
|
state=state,
|
@@ -124,7 +124,7 @@ def noise_level_experiment(
|
|
124
124
|
zne_datasets: list = []
|
125
125
|
# Get noisy density matrices.
|
126
126
|
conv_circuit = backend.circuit(circuit)
|
127
|
-
noisy_density_matrices = backend.
|
127
|
+
noisy_density_matrices = backend.run(
|
128
128
|
conv_circuit, param_values=param_values, state=state, noise=noise, endianness=endianness
|
129
129
|
)
|
130
130
|
# Convert observable to Numpy types compatible with QuTip simulations.
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
from .config import AnsatzConfig, Callback, FeatureMapConfig, TrainConfig
|
4
4
|
from .constructors import create_ansatz, create_fm_blocks, observable_from_config
|
5
|
-
from .data import DictDataLoader, InfiniteTensorDataset, to_dataloader
|
5
|
+
from .data import DictDataLoader, InfiniteTensorDataset, OptimizeResult, to_dataloader
|
6
6
|
from .models import QNN
|
7
7
|
from .optimize_step import optimize_step as default_optimize_step
|
8
8
|
from .parameters import get_parameters, num_parameters, set_parameters
|
@@ -23,6 +23,7 @@ __all__ = [
|
|
23
23
|
"observable_from_config",
|
24
24
|
"QNN",
|
25
25
|
"TrainConfig",
|
26
|
+
"OptimizeResult",
|
26
27
|
"Callback",
|
27
28
|
"train_with_grad",
|
28
29
|
"train_gradient_free",
|
@@ -38,15 +38,22 @@ class Callback:
|
|
38
38
|
Each callback function should take at least as first input
|
39
39
|
an OptimizeResult instance.
|
40
40
|
|
41
|
+
Note: when setting call_after_opt to True, we skip
|
42
|
+
verifying iteration % called_every == 0.
|
43
|
+
|
41
44
|
Attributes:
|
42
45
|
callback (CallbackFunction): Callback function accepting an
|
43
46
|
OptimizeResult as first argument.
|
44
47
|
callback_condition (CallbackConditionFunction | None, optional): Function that
|
45
48
|
conditions the call to callback. Defaults to None.
|
49
|
+
modify_optimize_result (CallbackFunction | dict[str, Any] | None, optional):
|
50
|
+
Function that modify the OptimizeResult before callback.
|
51
|
+
For instance, one can change the `extra` (dict) argument to be used in callback.
|
52
|
+
If a dict is provided, the `extra` field of OptimizeResult is updated with the dict.
|
46
53
|
called_every (int, optional): Callback to be called each `called_every` epoch.
|
47
54
|
Defaults to 1.
|
48
55
|
If callback_condition is None, we set
|
49
|
-
callback_condition to returns True when iteration %
|
56
|
+
callback_condition to returns True when iteration % called_every == 0.
|
50
57
|
call_before_opt (bool, optional): If true, callback is applied before training.
|
51
58
|
Defaults to False.
|
52
59
|
call_end_epoch (bool, optional): If true, callback is applied during training,
|
@@ -61,6 +68,7 @@ class Callback:
|
|
61
68
|
self,
|
62
69
|
callback: CallbackFunction,
|
63
70
|
callback_condition: CallbackConditionFunction | None = None,
|
71
|
+
modify_optimize_result: CallbackFunction | dict[str, Any] | None = None,
|
64
72
|
called_every: int = 1,
|
65
73
|
call_before_opt: bool = False,
|
66
74
|
call_end_epoch: bool = True,
|
@@ -74,10 +82,13 @@ class Callback:
|
|
74
82
|
OptimizeResult as ifrst argument.
|
75
83
|
callback_condition (CallbackConditionFunction | None, optional): Function that
|
76
84
|
conditions the call to callback. Defaults to None.
|
85
|
+
modify_optimize_result (CallbackFunction | dict[str, Any] | None , optional):
|
86
|
+
Function that modify the OptimizeResult before callback. If a dict
|
87
|
+
is provided, this updates the `extra` field of OptimizeResult.
|
77
88
|
called_every (int, optional): Callback to be called each `called_every` epoch.
|
78
89
|
Defaults to 1.
|
79
90
|
If callback_condition is None, we set
|
80
|
-
callback_condition to returns True when iteration %
|
91
|
+
callback_condition to returns True when iteration % called_every == 0.
|
81
92
|
call_before_opt (bool, optional): If true, callback is applied before training.
|
82
93
|
Defaults to False.
|
83
94
|
call_end_epoch (bool, optional): If true, callback is applied during training,
|
@@ -102,9 +113,56 @@ class Callback:
|
|
102
113
|
else:
|
103
114
|
self.callback_condition = callback_condition
|
104
115
|
|
105
|
-
|
116
|
+
if modify_optimize_result is None:
|
117
|
+
self.modify_optimize_result = lambda opt_result: opt_result
|
118
|
+
elif isinstance(modify_optimize_result, dict):
|
119
|
+
|
120
|
+
def update_extra(opt_result: OptimizeResult) -> OptimizeResult:
|
121
|
+
opt_result.extra.update(modify_optimize_result)
|
122
|
+
return opt_result
|
123
|
+
|
124
|
+
self.modify_optimize_result = update_extra
|
125
|
+
else:
|
126
|
+
self.modify_optimize_result = modify_optimize_result
|
127
|
+
|
128
|
+
def __call__(self, opt_result: OptimizeResult, is_last_iteration: bool = False) -> Any:
|
129
|
+
"""Apply callback if conditions are met.
|
130
|
+
|
131
|
+
Note that the current result may be modified by specifying a function
|
132
|
+
`modify_optimize_result` for instance to add inputs to the `extra` argument
|
133
|
+
of the current OptimizeResult.
|
134
|
+
|
135
|
+
Args:
|
136
|
+
opt_result (OptimizeResult): Current result.
|
137
|
+
is_last_iteration (bool, optional): When True,
|
138
|
+
avoid verifying modulo. Defaults to False.
|
139
|
+
Useful when call_after_opt is True.
|
140
|
+
|
141
|
+
Returns:
|
142
|
+
Any: The result of the callback.
|
143
|
+
"""
|
144
|
+
opt_result = self.modify_optimize_result(opt_result)
|
106
145
|
if opt_result.iteration % self.called_every == 0 and self.callback_condition(opt_result):
|
107
146
|
return self.callback(opt_result)
|
147
|
+
if is_last_iteration and self.callback_condition(opt_result):
|
148
|
+
return self.callback(opt_result)
|
149
|
+
|
150
|
+
|
151
|
+
def run_callbacks(
|
152
|
+
callback_iterable: list[Callback], opt_res: OptimizeResult, is_last_iteration: bool = False
|
153
|
+
) -> None:
|
154
|
+
"""Run a list of Callback given the current OptimizeResult.
|
155
|
+
|
156
|
+
Used in train functions.
|
157
|
+
|
158
|
+
Args:
|
159
|
+
callback_iterable (list[Callback]): Iterable of Callbacks
|
160
|
+
opt_res (OptimizeResult): Current optimization result,
|
161
|
+
is_last_iteration (bool, optional): Whether we reached the last iteration or not.
|
162
|
+
Defaults to False.
|
163
|
+
"""
|
164
|
+
for callback in callback_iterable:
|
165
|
+
callback(opt_res, is_last_iteration)
|
108
166
|
|
109
167
|
|
110
168
|
@dataclass
|
@@ -389,6 +447,13 @@ class FeatureMapConfig:
|
|
389
447
|
assign `t, x = xs[:,0], xs[:,1]`.
|
390
448
|
"""
|
391
449
|
|
450
|
+
tag: str | None = None
|
451
|
+
"""
|
452
|
+
String to indicate the name tag of the feature map.
|
453
|
+
|
454
|
+
Defaults to None, in which case no tag will be applied.
|
455
|
+
"""
|
456
|
+
|
392
457
|
def __post_init__(self) -> None:
|
393
458
|
if self.multivariate_strategy == MultivariateStrategy.PARALLEL and self.num_features > 1:
|
394
459
|
assert (
|
@@ -548,6 +613,13 @@ class AnsatzConfig:
|
|
548
613
|
param_prefix: str = "theta"
|
549
614
|
"""The base bame of the variational parameter."""
|
550
615
|
|
616
|
+
tag: str | None = None
|
617
|
+
"""
|
618
|
+
String to indicate the name tag of the ansatz.
|
619
|
+
|
620
|
+
Defaults to None, in which case no tag will be applied.
|
621
|
+
"""
|
622
|
+
|
551
623
|
def __post_init__(self) -> None:
|
552
624
|
if self.ansatz_type == AnsatzType.IIA:
|
553
625
|
assert (
|
@@ -7,7 +7,7 @@ from qadence.backend import BackendConfiguration
|
|
7
7
|
from qadence.blocks import chain, kron
|
8
8
|
from qadence.blocks.abstract import AbstractBlock
|
9
9
|
from qadence.blocks.composite import ChainBlock, KronBlock
|
10
|
-
from qadence.blocks.utils import add
|
10
|
+
from qadence.blocks.utils import add, tag
|
11
11
|
from qadence.circuit import QuantumCircuit
|
12
12
|
from qadence.constructors import (
|
13
13
|
analog_feature_map,
|
@@ -774,10 +774,15 @@ def build_qnn_from_configs(
|
|
774
774
|
fm_blocks=fm_blocks,
|
775
775
|
ansatz_config=ansatz_config,
|
776
776
|
)
|
777
|
+
if isinstance(fm_config.tag, str):
|
778
|
+
tag(full_fm, fm_config.tag)
|
777
779
|
inputs = fm_config.inputs
|
778
780
|
blocks.append(full_fm)
|
779
781
|
|
780
|
-
|
782
|
+
ansatz = create_ansatz(register=register, config=ansatz_config)
|
783
|
+
if isinstance(ansatz_config.tag, str):
|
784
|
+
tag(ansatz, ansatz_config.tag)
|
785
|
+
blocks.append(ansatz)
|
781
786
|
|
782
787
|
circ = QuantumCircuit(register, *blocks)
|
783
788
|
|
@@ -72,7 +72,11 @@ def write_checkpoint(
|
|
72
72
|
device = None
|
73
73
|
try:
|
74
74
|
# We extract the device from the pyqtorch native circuit
|
75
|
-
device =
|
75
|
+
device = (
|
76
|
+
model.device
|
77
|
+
if isinstance(model, (QNN, QuantumModel))
|
78
|
+
else next(model.parameters()).device
|
79
|
+
)
|
76
80
|
device = str(device).split(":")[0] # in case of using several CUDA devices
|
77
81
|
except Exception as e:
|
78
82
|
msg = (
|
@@ -14,7 +14,7 @@ from torch.optim import Optimizer
|
|
14
14
|
from torch.utils.data import DataLoader
|
15
15
|
from torch.utils.tensorboard import SummaryWriter
|
16
16
|
|
17
|
-
from qadence.ml_tools.config import Callback, TrainConfig
|
17
|
+
from qadence.ml_tools.config import Callback, TrainConfig, run_callbacks
|
18
18
|
from qadence.ml_tools.data import DictDataLoader, OptimizeResult, data_to_device
|
19
19
|
from qadence.ml_tools.optimize_step import optimize_step
|
20
20
|
from qadence.ml_tools.printing import (
|
@@ -194,7 +194,6 @@ def train(
|
|
194
194
|
Callback(
|
195
195
|
lambda opt_res: print_metrics(opt_res.loss, opt_res.metrics, opt_res.iteration - 1),
|
196
196
|
called_every=config.print_every,
|
197
|
-
call_after_opt=True,
|
198
197
|
)
|
199
198
|
]
|
200
199
|
|
@@ -214,21 +213,48 @@ def train(
|
|
214
213
|
]
|
215
214
|
|
216
215
|
# writing metrics
|
216
|
+
# we specify two writers,
|
217
|
+
# to write at evaluation time and before evaluation
|
217
218
|
callbacks += [
|
218
219
|
Callback(
|
219
220
|
lambda opt_res: write_tracker(
|
220
221
|
writer,
|
221
222
|
opt_res.loss,
|
222
223
|
opt_res.metrics,
|
223
|
-
opt_res.iteration,
|
224
|
+
opt_res.iteration - 1, # loss returned be optimized_step is at -1
|
224
225
|
tracking_tool=config.tracking_tool,
|
225
226
|
),
|
226
227
|
called_every=config.write_every,
|
227
|
-
|
228
|
+
call_end_epoch=True,
|
229
|
+
),
|
230
|
+
Callback(
|
231
|
+
lambda opt_res: write_tracker(
|
232
|
+
writer,
|
233
|
+
opt_res.loss,
|
234
|
+
opt_res.metrics,
|
235
|
+
opt_res.iteration, # after_opt we match the right loss function
|
236
|
+
tracking_tool=config.tracking_tool,
|
237
|
+
),
|
238
|
+
called_every=config.write_every,
|
239
|
+
call_end_epoch=False,
|
228
240
|
call_after_opt=True,
|
229
|
-
|
230
|
-
)
|
241
|
+
),
|
231
242
|
]
|
243
|
+
if perform_val:
|
244
|
+
callbacks += [
|
245
|
+
Callback(
|
246
|
+
lambda opt_res: write_tracker(
|
247
|
+
writer,
|
248
|
+
None,
|
249
|
+
opt_res.metrics,
|
250
|
+
opt_res.iteration,
|
251
|
+
tracking_tool=config.tracking_tool,
|
252
|
+
),
|
253
|
+
called_every=config.write_every,
|
254
|
+
call_before_opt=True,
|
255
|
+
call_during_eval=True,
|
256
|
+
)
|
257
|
+
]
|
232
258
|
|
233
259
|
# checkpointing
|
234
260
|
if config.folder and config.checkpoint_every > 0 and not config.checkpoint_best_only:
|
@@ -262,10 +288,6 @@ def train(
|
|
262
288
|
)
|
263
289
|
]
|
264
290
|
|
265
|
-
def run_callbacks(callback_iterable: list[Callback], opt_res: OptimizeResult) -> None:
|
266
|
-
for callback in callback_iterable:
|
267
|
-
callback(opt_res)
|
268
|
-
|
269
291
|
callbacks_before_opt = [
|
270
292
|
callback
|
271
293
|
for callback in callbacks
|
@@ -337,19 +359,24 @@ def train(
|
|
337
359
|
logger.info("Terminating training gracefully after the current iteration.")
|
338
360
|
break
|
339
361
|
|
340
|
-
#
|
362
|
+
# For handling printing/writing the last training loss
|
341
363
|
# as optimize_step does not give the loss value at the last iteration
|
342
364
|
try:
|
343
365
|
loss, metrics, *_ = next_loss_iter(dl_iter)
|
344
|
-
if
|
345
|
-
|
366
|
+
if isinstance(loss, Tensor):
|
367
|
+
loss = loss.item()
|
368
|
+
if perform_val:
|
369
|
+
# reputting val_loss as already evaluated before
|
370
|
+
metrics["val_loss"] = val_loss
|
371
|
+
print_metrics(loss, metrics, iteration)
|
346
372
|
|
347
373
|
except KeyboardInterrupt:
|
348
374
|
logger.info("Terminating training gracefully after the current iteration.")
|
349
375
|
|
350
376
|
# Final callbacks, by default checkpointing and writing
|
377
|
+
opt_result = OptimizeResult(iteration, model, optimizer, loss, metrics)
|
351
378
|
callbacks_after_opt = [callback for callback in callbacks if callback.call_after_opt]
|
352
|
-
run_callbacks(callbacks_after_opt, opt_result)
|
379
|
+
run_callbacks(callbacks_after_opt, opt_result, is_last_iteration=True)
|
353
380
|
|
354
381
|
# writing hyperparameters
|
355
382
|
if config.hyperparams:
|
@@ -12,7 +12,7 @@ from torch.nn import Module
|
|
12
12
|
from torch.utils.data import DataLoader
|
13
13
|
from torch.utils.tensorboard import SummaryWriter
|
14
14
|
|
15
|
-
from qadence.ml_tools.config import Callback, TrainConfig
|
15
|
+
from qadence.ml_tools.config import Callback, TrainConfig, run_callbacks
|
16
16
|
from qadence.ml_tools.data import DictDataLoader, OptimizeResult
|
17
17
|
from qadence.ml_tools.parameters import get_parameters, set_parameters
|
18
18
|
from qadence.ml_tools.printing import (
|
@@ -160,10 +160,6 @@ def train(
|
|
160
160
|
)
|
161
161
|
]
|
162
162
|
|
163
|
-
def run_callbacks(callback_iterable: list[Callback], opt_res: OptimizeResult) -> None:
|
164
|
-
for callback in callback_iterable:
|
165
|
-
callback(opt_res)
|
166
|
-
|
167
163
|
callbacks_end_opt = [
|
168
164
|
callback
|
169
165
|
for callback in callbacks
|
@@ -192,7 +188,7 @@ def train(
|
|
192
188
|
|
193
189
|
# Final callbacks
|
194
190
|
callbacks_after_opt = [callback for callback in callbacks if callback.call_after_opt]
|
195
|
-
run_callbacks(callbacks_after_opt, opt_result)
|
191
|
+
run_callbacks(callbacks_after_opt, opt_result, is_last_iteration=True)
|
196
192
|
|
197
193
|
# close tracker
|
198
194
|
if config.tracking_tool == ExperimentTrackingTool.TENSORBOARD:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|