qadence 1.9.0__tar.gz → 1.9.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (158) hide show
  1. {qadence-1.9.0 → qadence-1.9.2}/.github/workflows/test_fast.yml +3 -5
  2. {qadence-1.9.0 → qadence-1.9.2}/PKG-INFO +9 -6
  3. {qadence-1.9.0 → qadence-1.9.2}/README.md +2 -0
  4. {qadence-1.9.0 → qadence-1.9.2}/pyproject.toml +7 -7
  5. {qadence-1.9.0 → qadence-1.9.2}/qadence/analog/device.py +7 -0
  6. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pulser/backend.py +1 -1
  7. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pyqtorch/convert_ops.py +5 -5
  8. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/utils.py +15 -1
  9. {qadence-1.9.0 → qadence-1.9.2}/qadence/engines/torch/differentiable_expectation.py +3 -1
  10. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/callbacks/__init__.py +10 -0
  11. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/callbacks/callback.py +325 -1
  12. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/callbacks/writer_registry.py +53 -42
  13. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/train_utils/base_trainer.py +33 -26
  14. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/trainer.py +72 -53
  15. {qadence-1.9.0 → qadence-1.9.2}/qadence/states.py +21 -0
  16. {qadence-1.9.0 → qadence-1.9.2}/qadence/types.py +1 -1
  17. {qadence-1.9.0 → qadence-1.9.2}/renovate.json +2 -1
  18. {qadence-1.9.0 → qadence-1.9.2}/.coveragerc +0 -0
  19. {qadence-1.9.0 → qadence-1.9.2}/.github/ISSUE_TEMPLATE/bug-report.yml +0 -0
  20. {qadence-1.9.0 → qadence-1.9.2}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  21. {qadence-1.9.0 → qadence-1.9.2}/.github/ISSUE_TEMPLATE/new-feature.yml +0 -0
  22. {qadence-1.9.0 → qadence-1.9.2}/.github/workflows/build_docs.yml +0 -0
  23. {qadence-1.9.0 → qadence-1.9.2}/.github/workflows/lint.yml +0 -0
  24. {qadence-1.9.0 → qadence-1.9.2}/.github/workflows/test_all.yml +0 -0
  25. {qadence-1.9.0 → qadence-1.9.2}/.github/workflows/test_examples.yml +0 -0
  26. {qadence-1.9.0 → qadence-1.9.2}/.gitignore +0 -0
  27. {qadence-1.9.0 → qadence-1.9.2}/.pre-commit-config.yaml +0 -0
  28. {qadence-1.9.0 → qadence-1.9.2}/LICENSE +0 -0
  29. {qadence-1.9.0 → qadence-1.9.2}/MANIFEST.in +0 -0
  30. {qadence-1.9.0 → qadence-1.9.2}/mkdocs.yml +0 -0
  31. {qadence-1.9.0 → qadence-1.9.2}/qadence/__init__.py +0 -0
  32. {qadence-1.9.0 → qadence-1.9.2}/qadence/analog/__init__.py +0 -0
  33. {qadence-1.9.0 → qadence-1.9.2}/qadence/analog/addressing.py +0 -0
  34. {qadence-1.9.0 → qadence-1.9.2}/qadence/analog/constants.py +0 -0
  35. {qadence-1.9.0 → qadence-1.9.2}/qadence/analog/hamiltonian_terms.py +0 -0
  36. {qadence-1.9.0 → qadence-1.9.2}/qadence/analog/parse_analog.py +0 -0
  37. {qadence-1.9.0 → qadence-1.9.2}/qadence/backend.py +0 -0
  38. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/__init__.py +0 -0
  39. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/api.py +0 -0
  40. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/gpsr.py +0 -0
  41. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/horqrux/__init__.py +0 -0
  42. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/horqrux/backend.py +0 -0
  43. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/horqrux/config.py +0 -0
  44. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/horqrux/convert_ops.py +0 -0
  45. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/jax_utils.py +0 -0
  46. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pulser/__init__.py +0 -0
  47. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pulser/channels.py +0 -0
  48. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pulser/cloud.py +0 -0
  49. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pulser/config.py +0 -0
  50. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pulser/convert_ops.py +0 -0
  51. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pulser/devices.py +0 -0
  52. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pulser/pulses.py +0 -0
  53. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pulser/waveforms.py +0 -0
  54. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pyqtorch/__init__.py +0 -0
  55. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pyqtorch/backend.py +0 -0
  56. {qadence-1.9.0 → qadence-1.9.2}/qadence/backends/pyqtorch/config.py +0 -0
  57. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/__init__.py +0 -0
  58. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/abstract.py +0 -0
  59. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/analog.py +0 -0
  60. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/block_to_tensor.py +0 -0
  61. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/composite.py +0 -0
  62. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/embedding.py +0 -0
  63. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/manipulate.py +0 -0
  64. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/matrix.py +0 -0
  65. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/primitive.py +0 -0
  66. {qadence-1.9.0 → qadence-1.9.2}/qadence/blocks/utils.py +0 -0
  67. {qadence-1.9.0 → qadence-1.9.2}/qadence/circuit.py +0 -0
  68. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/__init__.py +0 -0
  69. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/ansatze.py +0 -0
  70. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/daqc/__init__.py +0 -0
  71. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/daqc/daqc.py +0 -0
  72. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/daqc/gen_parser.py +0 -0
  73. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/daqc/utils.py +0 -0
  74. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/feature_maps.py +0 -0
  75. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/hamiltonians.py +0 -0
  76. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/iia.py +0 -0
  77. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/qft.py +0 -0
  78. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/rydberg_feature_maps.py +0 -0
  79. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/rydberg_hea.py +0 -0
  80. {qadence-1.9.0 → qadence-1.9.2}/qadence/constructors/utils.py +0 -0
  81. {qadence-1.9.0 → qadence-1.9.2}/qadence/decompose.py +0 -0
  82. {qadence-1.9.0 → qadence-1.9.2}/qadence/divergences.py +0 -0
  83. {qadence-1.9.0 → qadence-1.9.2}/qadence/draw/__init__.py +0 -0
  84. {qadence-1.9.0 → qadence-1.9.2}/qadence/draw/assets/dark/measurement.png +0 -0
  85. {qadence-1.9.0 → qadence-1.9.2}/qadence/draw/assets/dark/measurement.svg +0 -0
  86. {qadence-1.9.0 → qadence-1.9.2}/qadence/draw/assets/light/measurement.png +0 -0
  87. {qadence-1.9.0 → qadence-1.9.2}/qadence/draw/assets/light/measurement.svg +0 -0
  88. {qadence-1.9.0 → qadence-1.9.2}/qadence/draw/themes.py +0 -0
  89. {qadence-1.9.0 → qadence-1.9.2}/qadence/draw/utils.py +0 -0
  90. {qadence-1.9.0 → qadence-1.9.2}/qadence/draw/vizbackend.py +0 -0
  91. {qadence-1.9.0 → qadence-1.9.2}/qadence/engines/__init__.py +0 -0
  92. {qadence-1.9.0 → qadence-1.9.2}/qadence/engines/differentiable_backend.py +0 -0
  93. {qadence-1.9.0 → qadence-1.9.2}/qadence/engines/jax/__init__.py +0 -0
  94. {qadence-1.9.0 → qadence-1.9.2}/qadence/engines/jax/differentiable_backend.py +0 -0
  95. {qadence-1.9.0 → qadence-1.9.2}/qadence/engines/jax/differentiable_expectation.py +0 -0
  96. {qadence-1.9.0 → qadence-1.9.2}/qadence/engines/torch/__init__.py +0 -0
  97. {qadence-1.9.0 → qadence-1.9.2}/qadence/engines/torch/differentiable_backend.py +0 -0
  98. {qadence-1.9.0 → qadence-1.9.2}/qadence/exceptions/__init__.py +0 -0
  99. {qadence-1.9.0 → qadence-1.9.2}/qadence/exceptions/exceptions.py +0 -0
  100. {qadence-1.9.0 → qadence-1.9.2}/qadence/execution.py +0 -0
  101. {qadence-1.9.0 → qadence-1.9.2}/qadence/extensions.py +0 -0
  102. {qadence-1.9.0 → qadence-1.9.2}/qadence/libs.py +0 -0
  103. {qadence-1.9.0 → qadence-1.9.2}/qadence/log_config.yaml +0 -0
  104. {qadence-1.9.0 → qadence-1.9.2}/qadence/logger.py +0 -0
  105. {qadence-1.9.0 → qadence-1.9.2}/qadence/measurements/__init__.py +0 -0
  106. {qadence-1.9.0 → qadence-1.9.2}/qadence/measurements/protocols.py +0 -0
  107. {qadence-1.9.0 → qadence-1.9.2}/qadence/measurements/samples.py +0 -0
  108. {qadence-1.9.0 → qadence-1.9.2}/qadence/measurements/shadow.py +0 -0
  109. {qadence-1.9.0 → qadence-1.9.2}/qadence/measurements/tomography.py +0 -0
  110. {qadence-1.9.0 → qadence-1.9.2}/qadence/measurements/utils.py +0 -0
  111. {qadence-1.9.0 → qadence-1.9.2}/qadence/mitigations/__init__.py +0 -0
  112. {qadence-1.9.0 → qadence-1.9.2}/qadence/mitigations/analog_zne.py +0 -0
  113. {qadence-1.9.0 → qadence-1.9.2}/qadence/mitigations/protocols.py +0 -0
  114. {qadence-1.9.0 → qadence-1.9.2}/qadence/mitigations/readout.py +0 -0
  115. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/__init__.py +0 -0
  116. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/callbacks/callbackmanager.py +0 -0
  117. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/callbacks/saveload.py +0 -0
  118. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/config.py +0 -0
  119. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/constructors.py +0 -0
  120. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/data.py +0 -0
  121. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/loss/__init__.py +0 -0
  122. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/loss/loss.py +0 -0
  123. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/models.py +0 -0
  124. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/optimize_step.py +0 -0
  125. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/parameters.py +0 -0
  126. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/stages.py +0 -0
  127. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/tensors.py +0 -0
  128. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/train_utils/__init__.py +0 -0
  129. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/train_utils/config_manager.py +0 -0
  130. {qadence-1.9.0 → qadence-1.9.2}/qadence/ml_tools/utils.py +0 -0
  131. {qadence-1.9.0 → qadence-1.9.2}/qadence/model.py +0 -0
  132. {qadence-1.9.0 → qadence-1.9.2}/qadence/noise/__init__.py +0 -0
  133. {qadence-1.9.0 → qadence-1.9.2}/qadence/noise/protocols.py +0 -0
  134. {qadence-1.9.0 → qadence-1.9.2}/qadence/operations/__init__.py +0 -0
  135. {qadence-1.9.0 → qadence-1.9.2}/qadence/operations/analog.py +0 -0
  136. {qadence-1.9.0 → qadence-1.9.2}/qadence/operations/control_ops.py +0 -0
  137. {qadence-1.9.0 → qadence-1.9.2}/qadence/operations/ham_evo.py +0 -0
  138. {qadence-1.9.0 → qadence-1.9.2}/qadence/operations/parametric.py +0 -0
  139. {qadence-1.9.0 → qadence-1.9.2}/qadence/operations/primitive.py +0 -0
  140. {qadence-1.9.0 → qadence-1.9.2}/qadence/overlap.py +0 -0
  141. {qadence-1.9.0 → qadence-1.9.2}/qadence/parameters.py +0 -0
  142. {qadence-1.9.0 → qadence-1.9.2}/qadence/protocols.py +0 -0
  143. {qadence-1.9.0 → qadence-1.9.2}/qadence/py.typed +0 -0
  144. {qadence-1.9.0 → qadence-1.9.2}/qadence/qubit_support.py +0 -0
  145. {qadence-1.9.0 → qadence-1.9.2}/qadence/register.py +0 -0
  146. {qadence-1.9.0 → qadence-1.9.2}/qadence/serial_expr_grammar.peg +0 -0
  147. {qadence-1.9.0 → qadence-1.9.2}/qadence/serialization.py +0 -0
  148. {qadence-1.9.0 → qadence-1.9.2}/qadence/transpile/__init__.py +0 -0
  149. {qadence-1.9.0 → qadence-1.9.2}/qadence/transpile/apply_fn.py +0 -0
  150. {qadence-1.9.0 → qadence-1.9.2}/qadence/transpile/block.py +0 -0
  151. {qadence-1.9.0 → qadence-1.9.2}/qadence/transpile/circuit.py +0 -0
  152. {qadence-1.9.0 → qadence-1.9.2}/qadence/transpile/digitalize.py +0 -0
  153. {qadence-1.9.0 → qadence-1.9.2}/qadence/transpile/flatten.py +0 -0
  154. {qadence-1.9.0 → qadence-1.9.2}/qadence/transpile/invert.py +0 -0
  155. {qadence-1.9.0 → qadence-1.9.2}/qadence/transpile/noise.py +0 -0
  156. {qadence-1.9.0 → qadence-1.9.2}/qadence/transpile/transpile.py +0 -0
  157. {qadence-1.9.0 → qadence-1.9.2}/qadence/utils.py +0 -0
  158. {qadence-1.9.0 → qadence-1.9.2}/setup.py +0 -0
@@ -36,12 +36,10 @@ jobs:
36
36
  - name: Run fast tests
37
37
  run: |
38
38
  hatch -v run test -m "not slow"
39
- - name: Upload coverage data
40
- uses: actions/upload-artifact@v4
39
+ - name: Upload coverage reports to Codecov
40
+ uses: codecov/codecov-action@v5
41
41
  with:
42
- name: "coverage-data"
43
- path: .coverage.*
44
- if-no-files-found: ignore
42
+ token: ${{ secrets.CODECOV_TOKEN }}
45
43
 
46
44
  publish:
47
45
  name: Publish to PyPI
@@ -1,9 +1,10 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: qadence
3
- Version: 1.9.0
3
+ Version: 1.9.2
4
4
  Summary: Pasqal interface for circuit-based quantum computing SDKs
5
5
  Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>
6
6
  License: Apache 2.0
7
+ License-File: LICENSE
7
8
  Classifier: License :: OSI Approved :: Apache Software License
8
9
  Classifier: Programming Language :: Python
9
10
  Classifier: Programming Language :: Python :: 3
@@ -21,7 +22,7 @@ Requires-Dist: matplotlib
21
22
  Requires-Dist: nevergrad
22
23
  Requires-Dist: numpy
23
24
  Requires-Dist: openfermion
24
- Requires-Dist: pyqtorch==1.6.0
25
+ Requires-Dist: pyqtorch==1.7.0
25
26
  Requires-Dist: pyyaml
26
27
  Requires-Dist: rich
27
28
  Requires-Dist: scipy
@@ -53,9 +54,9 @@ Requires-Dist: mlflow; extra == 'mlflow'
53
54
  Provides-Extra: protocols
54
55
  Requires-Dist: qadence-protocols; extra == 'protocols'
55
56
  Provides-Extra: pulser
56
- Requires-Dist: pasqal-cloud==0.12.5; extra == 'pulser'
57
- Requires-Dist: pulser-core==1.1.1; extra == 'pulser'
58
- Requires-Dist: pulser-simulation==1.1.1; extra == 'pulser'
57
+ Requires-Dist: pasqal-cloud==0.12.6; extra == 'pulser'
58
+ Requires-Dist: pulser-core==1.2.0; extra == 'pulser'
59
+ Requires-Dist: pulser-simulation==1.2.0; extra == 'pulser'
59
60
  Provides-Extra: visualization
60
61
  Requires-Dist: graphviz; extra == 'visualization'
61
62
  Description-Content-Type: text/markdown
@@ -80,6 +81,8 @@ programs** with tunable qubit interactions and arbitrary register topologies rea
80
81
  [![Documentation](https://github.com/pasqal-io/qadence/actions/workflows/build_docs.yml/badge.svg)](https://pasqal-io.github.io/qadence/latest)
81
82
  [![Pypi](https://badge.fury.io/py/qadence.svg)](https://pypi.org/project/qadence/)
82
83
  [![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
84
+ ![Coverage](https://img.shields.io/codecov/c/github/pasqal-io/qadence?style=flat-square)
85
+
83
86
 
84
87
  ## Feature highlights
85
88
 
@@ -18,6 +18,8 @@ programs** with tunable qubit interactions and arbitrary register topologies rea
18
18
  [![Documentation](https://github.com/pasqal-io/qadence/actions/workflows/build_docs.yml/badge.svg)](https://pasqal-io.github.io/qadence/latest)
19
19
  [![Pypi](https://badge.fury.io/py/qadence.svg)](https://pypi.org/project/qadence/)
20
20
  [![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
21
+ ![Coverage](https://img.shields.io/codecov/c/github/pasqal-io/qadence?style=flat-square)
22
+
21
23
 
22
24
  ## Feature highlights
23
25
 
@@ -26,7 +26,7 @@ authors = [
26
26
  ]
27
27
  requires-python = ">=3.9"
28
28
  license = { text = "Apache 2.0" }
29
- version = "1.9.0"
29
+ version = "1.9.2"
30
30
  classifiers = [
31
31
  "License :: OSI Approved :: Apache Software License",
32
32
  "Programming Language :: Python",
@@ -50,7 +50,7 @@ dependencies = [
50
50
  "jsonschema",
51
51
  "nevergrad",
52
52
  "scipy",
53
- "pyqtorch==1.6.0",
53
+ "pyqtorch==1.7.0",
54
54
  "pyyaml",
55
55
  "matplotlib",
56
56
  "Arpeggio==2.0.2",
@@ -62,9 +62,9 @@ allow-ambiguous-features = true
62
62
 
63
63
  [project.optional-dependencies]
64
64
  pulser = [
65
- "pulser-core==1.1.1",
66
- "pulser-simulation==1.1.1",
67
- "pasqal-cloud==0.12.5",
65
+ "pulser-core==1.2.0",
66
+ "pulser-simulation==1.2.0",
67
+ "pasqal-cloud==0.12.6",
68
68
  ]
69
69
  visualization = [
70
70
  "graphviz",
@@ -107,7 +107,7 @@ dependencies = [
107
107
  features = ["pulser", "visualization", "horqrux", "mlflow"]
108
108
 
109
109
  [tool.hatch.envs.default.scripts]
110
- test = "pytest -n auto --cov-report lcov --cov-config=pyproject.toml --cov=qadence --cov=tests --ignore=./tests/test_examples.py {args}"
110
+ test = "pytest -n auto --cov-report=xml --cov-config=pyproject.toml --cov=qadence --cov=tests --ignore=./tests/test_examples.py {args}"
111
111
  test-examples = "pytest ./tests/test_examples.py {args}"
112
112
  no-cov = "cov --no-cov {args}"
113
113
  test-docs = "mkdocs build --clean --strict"
@@ -175,7 +175,7 @@ exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"]
175
175
 
176
176
  [tool.ruff]
177
177
  select = ["E", "F", "I", "Q"]
178
- extend-ignore = ["F841", "F403"]
178
+ extend-ignore = ["F841", "F403", "E731", "E741"]
179
179
  line-length = 100
180
180
 
181
181
  [tool.ruff.isort]
@@ -5,6 +5,8 @@ from dataclasses import dataclass, fields
5
5
  from qadence.analog import AddressingPattern
6
6
  from qadence.types import PI, DeviceType, Interaction
7
7
 
8
+ from .constants import C6_DICT
9
+
8
10
 
9
11
  @dataclass(frozen=True, eq=True)
10
12
  class RydbergDevice:
@@ -41,6 +43,11 @@ class RydbergDevice:
41
43
  type: DeviceType = DeviceType.IDEALIZED
42
44
  """DeviceType.IDEALIZED or REALISTIC to convert to the Pulser backend."""
43
45
 
46
+ @property
47
+ def coeff_ising(self) -> float:
48
+ """Value of C_6."""
49
+ return C6_DICT[self.rydberg_level]
50
+
44
51
  def __post_init__(self) -> None:
45
52
  # FIXME: Currently not supporting custom interaction functions.
46
53
  if self.interaction not in [Interaction.NN, Interaction.XY]:
@@ -259,7 +259,7 @@ class Backend(BackendInterface):
259
259
  for i, param_values_el in enumerate(vals):
260
260
  sequence = self.assign_parameters(circuit, param_values_el)
261
261
  sim_result: CoherentResults = simulate_sequence(sequence, self.config, state)
262
- final_state = sim_result.get_final_state().data.toarray()
262
+ final_state = sim_result.get_final_state().data.to_array()
263
263
  batched_dm[i] = np.flip(final_state)
264
264
  return torch.from_numpy(batched_dm)
265
265
 
@@ -264,7 +264,7 @@ def convert_block(
264
264
  duration=duration,
265
265
  solver=config.ode_solver,
266
266
  steps=config.n_steps_hevo,
267
- noise_operators=noise_operators,
267
+ noise=noise_operators if len(noise_operators) > 0 else None,
268
268
  )
269
269
  ]
270
270
 
@@ -351,22 +351,22 @@ def convert_block(
351
351
  )
352
352
 
353
353
 
354
- def convert_digital_noise(noise: NoiseHandler) -> pyq.noise.NoiseProtocol | None:
354
+ def convert_digital_noise(noise: NoiseHandler) -> pyq.noise.DigitalNoiseProtocol | None:
355
355
  """Convert the digital noise into pyqtorch NoiseProtocol.
356
356
 
357
357
  Args:
358
358
  noise (NoiseHandler): Noise to convert.
359
359
 
360
360
  Returns:
361
- pyq.noise.NoiseProtocol | None: Pyqtorch native noise protocol
361
+ pyq.noise.DigitalNoiseProtocol | None: Pyqtorch native noise protocol
362
362
  if there are any digital noise protocols.
363
363
  """
364
364
  digital_part = noise.filter(NoiseProtocol.DIGITAL)
365
365
  if digital_part is None:
366
366
  return None
367
- return pyq.noise.NoiseProtocol(
367
+ return pyq.noise.DigitalNoiseProtocol(
368
368
  [
369
- pyq.noise.NoiseProtocol(proto, option.get("error_probability"))
369
+ pyq.noise.DigitalNoiseProtocol(proto, option.get("error_probability"))
370
370
  for proto, option in zip(digital_part.protocol, digital_part.options)
371
371
  ]
372
372
  )
@@ -110,9 +110,23 @@ def to_list_of_dicts(param_values: ParamDictType) -> list[ParamDictType]:
110
110
 
111
111
 
112
112
  def pyqify(state: Tensor, n_qubits: int = None) -> ArrayLike:
113
- """Convert a state of shape (batch_size, 2**n_qubits) to [2] * n_qubits + [batch_size]."""
113
+ """Convert a state of shape (batch_size, 2**n_qubits) to [2] * n_qubits + [batch_size].
114
+
115
+ Or set the batch_size of a density matrix as the last dimension for PyQTorch.
116
+ """
114
117
  if n_qubits is None:
115
118
  n_qubits = int(log2(state.shape[1]))
119
+ if isinstance(state, DensityMatrix):
120
+ if (
121
+ len(state.shape) != 3
122
+ or (state.shape[1] != 2**n_qubits)
123
+ or (state.shape[1] != state.shape[2])
124
+ ):
125
+ raise ValueError(
126
+ "The initial state must be composed of tensors/arrays of size "
127
+ f"(batch_size, 2**n_qubits, 2**n_qubits). Found: {state.shape = }."
128
+ )
129
+ return torch.einsum("kij->ijk", state)
116
130
  if len(state.shape) != 2 or (state.shape[1] != 2**n_qubits):
117
131
  raise ValueError(
118
132
  "The initial state must be composed of tensors/arrays of size "
@@ -49,7 +49,9 @@ class PSRExpectation(Function):
49
49
  if isinstance(expectation_values[0], list):
50
50
  exp_vals: list = []
51
51
  for expectation_value in expectation_values:
52
- res = list(map(lambda x: x.get_final_state().data.toarray(), expectation_value))
52
+ res = list(
53
+ map(lambda x: x.get_final_state().data.to_array(), expectation_value)
54
+ )
53
55
  exp_vals.append(torch.tensor(res))
54
56
  expectation_values = exp_vals
55
57
  return torch.stack(expectation_values)
@@ -2,9 +2,14 @@ from __future__ import annotations
2
2
 
3
3
  from .callback import (
4
4
  Callback,
5
+ EarlyStopping,
6
+ GradientMonitoring,
5
7
  LoadCheckpoint,
6
8
  LogHyperparameters,
7
9
  LogModelTracker,
10
+ LRSchedulerCosineAnnealing,
11
+ LRSchedulerCyclic,
12
+ LRSchedulerStepDecay,
8
13
  PlotMetrics,
9
14
  PrintMetrics,
10
15
  SaveBestCheckpoint,
@@ -26,5 +31,10 @@ __all__ = [
26
31
  "SaveBestCheckpoint",
27
32
  "SaveCheckpoint",
28
33
  "WriteMetrics",
34
+ "GradientMonitoring",
35
+ "LRSchedulerStepDecay",
36
+ "LRSchedulerCyclic",
37
+ "LRSchedulerCosineAnnealing",
38
+ "EarlyStopping",
29
39
  "get_writer",
30
40
  ]
@@ -1,5 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import math
4
+ from logging import getLogger
3
5
  from typing import Any, Callable
4
6
 
5
7
  from qadence.ml_tools.callbacks.saveload import load_checkpoint, write_checkpoint
@@ -12,6 +14,8 @@ from qadence.ml_tools.stages import TrainingStage
12
14
  CallbackFunction = Callable[..., Any]
13
15
  CallbackConditionFunction = Callable[..., bool]
14
16
 
17
+ logger = getLogger("ml_tools")
18
+
15
19
 
16
20
  class Callback:
17
21
  """Base class for defining various training callbacks.
@@ -258,7 +262,7 @@ class WriteMetrics(Callback):
258
262
  writer (BaseWriter ): The writer object for logging.
259
263
  """
260
264
  opt_result = trainer.opt_result
261
- writer.write(opt_result)
265
+ writer.write(opt_result.iteration, opt_result.metrics)
262
266
 
263
267
 
264
268
  class PlotMetrics(Callback):
@@ -449,3 +453,323 @@ class LogModelTracker(Callback):
449
453
  writer.log_model(
450
454
  model, trainer.train_dataloader, trainer.val_dataloader, trainer.test_dataloader
451
455
  )
456
+
457
+
458
+ class LRSchedulerStepDecay(Callback):
459
+ """
460
+ Reduces the learning rate by a factor at regular intervals.
461
+
462
+ This callback adjusts the learning rate by multiplying it with a decay factor
463
+ after a specified number of iterations. The learning rate is updated as:
464
+ lr = lr * gamma
465
+
466
+ Example Usage in `TrainConfig`:
467
+ To use `LRSchedulerStepDecay`, include it in the `callbacks` list when setting
468
+ up your `TrainConfig`:
469
+ ```python exec="on" source="material-block" result="json"
470
+ from qadence.ml_tools import TrainConfig
471
+ from qadence.ml_tools.callbacks import LRSchedulerStepDecay
472
+
473
+ # Create an instance of the LRSchedulerStepDecay callback
474
+ lr_step_decay = LRSchedulerStepDecay(on="train_epoch_end",
475
+ called_every=100,
476
+ gamma=0.5)
477
+
478
+ config = TrainConfig(
479
+ max_iter=10000,
480
+ # Print metrics every 1000 training epochs
481
+ print_every=1000,
482
+ # Add the custom callback
483
+ callbacks=[lr_step_decay]
484
+ )
485
+ ```
486
+ """
487
+
488
+ def __init__(self, on: str, called_every: int, gamma: float = 0.5):
489
+ """Initializes the LRSchedulerStepDecay callback.
490
+
491
+ Args:
492
+ on (str): The event to trigger the callback.
493
+ called_every (int): Frequency of callback calls in terms of iterations.
494
+ gamma (float, optional): The decay factor applied to the learning rate.
495
+ A value < 1 reduces the learning rate over time. Default is 0.5.
496
+ """
497
+ super().__init__(on=on, called_every=called_every)
498
+ self.gamma = gamma
499
+
500
+ def run_callback(self, trainer: Any, config: TrainConfig, writer: BaseWriter) -> None:
501
+ """
502
+ Runs the callback to apply step decay to the learning rate.
503
+
504
+ Args:
505
+ trainer (Any): The training object.
506
+ config (TrainConfig): The configuration object.
507
+ writer (BaseWriter): The writer object for logging.
508
+ """
509
+ for param_group in trainer.optimizer.param_groups:
510
+ param_group["lr"] *= self.gamma
511
+
512
+
513
+ class LRSchedulerCyclic(Callback):
514
+ """
515
+ Applies a cyclic learning rate schedule during training.
516
+
517
+ This callback oscillates the learning rate between a minimum (base_lr)
518
+ and a maximum (max_lr) over a defined cycle length (step_size). The learning
519
+ rate follows a triangular wave pattern.
520
+
521
+ Example Usage in `TrainConfig`:
522
+ To use `LRSchedulerCyclic`, include it in the `callbacks` list when setting
523
+ up your `TrainConfig`:
524
+ ```python exec="on" source="material-block" result="json"
525
+ from qadence.ml_tools import TrainConfig
526
+ from qadence.ml_tools.callbacks import LRSchedulerCyclic
527
+
528
+ # Create an instance of the LRSchedulerCyclic callback
529
+ lr_cyclic = LRSchedulerCyclic(on="train_batch_end",
530
+ called_every=1,
531
+ base_lr=0.001,
532
+ max_lr=0.01,
533
+ step_size=2000)
534
+
535
+ config = TrainConfig(
536
+ max_iter=10000,
537
+ # Print metrics every 1000 training epochs
538
+ print_every=1000,
539
+ # Add the custom callback
540
+ callbacks=[lr_cyclic]
541
+ )
542
+ ```
543
+ """
544
+
545
+ def __init__(self, on: str, called_every: int, base_lr: float, max_lr: float, step_size: int):
546
+ """Initializes the LRSchedulerCyclic callback.
547
+
548
+ Args:
549
+ on (str): The event to trigger the callback.
550
+ called_every (int): Frequency of callback calls in terms of iterations.
551
+ base_lr (float): The minimum learning rate.
552
+ max_lr (float): The maximum learning rate.
553
+ step_size (int): Number of iterations for half a cycle.
554
+ """
555
+ super().__init__(on=on, called_every=called_every)
556
+ self.base_lr = base_lr
557
+ self.max_lr = max_lr
558
+ self.step_size = step_size
559
+
560
+ def run_callback(self, trainer: Any, config: TrainConfig, writer: BaseWriter) -> None:
561
+ """
562
+ Adjusts the learning rate cyclically.
563
+
564
+ Args:
565
+ trainer (Any): The training object.
566
+ config (TrainConfig): The configuration object.
567
+ writer (BaseWriter): The writer object for logging.
568
+ """
569
+ cycle = trainer.opt_result.iteration // (2 * self.step_size)
570
+ x = abs(trainer.opt_result.iteration / self.step_size - 2 * cycle - 1)
571
+ scale = max(0, (1 - x))
572
+ new_lr = self.base_lr + (self.max_lr - self.base_lr) * scale
573
+ for param_group in trainer.optimizer.param_groups:
574
+ param_group["lr"] = new_lr
575
+
576
+
577
+ class LRSchedulerCosineAnnealing(Callback):
578
+ """
579
+ Applies cosine annealing to the learning rate during training.
580
+
581
+ This callback decreases the learning rate following a cosine curve,
582
+ starting from the initial learning rate and annealing to a minimum (min_lr).
583
+
584
+ Example Usage in `TrainConfig`:
585
+ To use `LRSchedulerCosineAnnealing`, include it in the `callbacks` list
586
+ when setting up your `TrainConfig`:
587
+ ```python exec="on" source="material-block" result="json"
588
+ from qadence.ml_tools import TrainConfig
589
+ from qadence.ml_tools.callbacks import LRSchedulerCosineAnnealing
590
+
591
+ # Create an instance of the LRSchedulerCosineAnnealing callback
592
+ lr_cosine = LRSchedulerCosineAnnealing(on="train_batch_end",
593
+ called_every=1,
594
+ t_max=5000,
595
+ min_lr=1e-6)
596
+
597
+ config = TrainConfig(
598
+ max_iter=10000,
599
+ # Print metrics every 1000 training epochs
600
+ print_every=1000,
601
+ # Add the custom callback
602
+ callbacks=[lr_cosine]
603
+ )
604
+ ```
605
+ """
606
+
607
+ def __init__(self, on: str, called_every: int, t_max: int, min_lr: float = 0.0):
608
+ """Initializes the LRSchedulerCosineAnnealing callback.
609
+
610
+ Args:
611
+ on (str): The event to trigger the callback.
612
+ called_every (int): Frequency of callback calls in terms of iterations.
613
+ t_max (int): The total number of iterations for one annealing cycle.
614
+ min_lr (float, optional): The minimum learning rate. Default is 0.0.
615
+ """
616
+ super().__init__(on=on, called_every=called_every)
617
+ self.t_max = t_max
618
+ self.min_lr = min_lr
619
+
620
+ def run_callback(self, trainer: Any, config: TrainConfig, writer: BaseWriter) -> None:
621
+ """
622
+ Adjusts the learning rate using cosine annealing.
623
+
624
+ Args:
625
+ trainer (Any): The training object.
626
+ config (TrainConfig): The configuration object.
627
+ writer (BaseWriter): The writer object for logging.
628
+ """
629
+ for param_group in trainer.optimizer.param_groups:
630
+ max_lr = param_group["lr"]
631
+ new_lr = (
632
+ self.min_lr
633
+ + (max_lr - self.min_lr)
634
+ * (1 + math.cos(math.pi * trainer.opt_result.iteration / self.t_max))
635
+ / 2
636
+ )
637
+ param_group["lr"] = new_lr
638
+
639
+
640
+ class EarlyStopping(Callback):
641
+ """
642
+ Stops training when a monitored metric has not improved for a specified number of epochs.
643
+
644
+ This callback monitors a specified metric (e.g., validation loss or accuracy). If the metric
645
+ does not improve for a given patience period, training is stopped.
646
+
647
+ Example Usage in `TrainConfig`:
648
+ To use `EarlyStopping`, include it in the `callbacks` list when setting up your `TrainConfig`:
649
+ ```python exec="on" source="material-block" result="json"
650
+ from qadence.ml_tools import TrainConfig
651
+ from qadence.ml_tools.callbacks import EarlyStopping
652
+
653
+ # Create an instance of the EarlyStopping callback
654
+ early_stopping = EarlyStopping(on="val_epoch_end",
655
+ called_every=1,
656
+ monitor="val_loss",
657
+ patience=5,
658
+ mode="min")
659
+
660
+ config = TrainConfig(
661
+ max_iter=10000,
662
+ print_every=1000,
663
+ callbacks=[early_stopping]
664
+ )
665
+ ```
666
+ """
667
+
668
+ def __init__(
669
+ self, on: str, called_every: int, monitor: str, patience: int = 5, mode: str = "min"
670
+ ):
671
+ """Initializes the EarlyStopping callback.
672
+
673
+ Args:
674
+ on (str): The event to trigger the callback (e.g., "val_epoch_end").
675
+ called_every (int): Frequency of callback calls in terms of iterations.
676
+ monitor (str): The metric to monitor (e.g., "val_loss" or "train_loss").
677
+ All metrics returned by optimize step are available to monitor.
678
+ Please add "val_" and "train_" strings at the start of the metric name.
679
+ patience (int, optional): Number of iterations to wait for improvement. Default is 5.
680
+ mode (str, optional): Whether to minimize ("min") or maximize ("max") the metric.
681
+ Default is "min".
682
+ """
683
+ super().__init__(on=on, called_every=called_every)
684
+ self.monitor = monitor
685
+ self.patience = patience
686
+ self.mode = mode
687
+ self.best_value = float("inf") if mode == "min" else -float("inf")
688
+ self.counter = 0
689
+
690
+ def run_callback(self, trainer: Any, config: TrainConfig, writer: BaseWriter) -> None:
691
+ """
692
+ Monitors the metric and stops training if no improvement is observed.
693
+
694
+ Args:
695
+ trainer (Any): The training object.
696
+ config (TrainConfig): The configuration object.
697
+ writer (BaseWriter): The writer object for logging.
698
+ """
699
+ current_value = trainer.opt_result.metrics.get(self.monitor)
700
+ if current_value is None:
701
+ raise ValueError(f"Metric '{self.monitor}' is not available in the trainer's metrics.")
702
+
703
+ if (self.mode == "min" and current_value < self.best_value) or (
704
+ self.mode == "max" and current_value > self.best_value
705
+ ):
706
+ self.best_value = current_value
707
+ self.counter = 0
708
+ else:
709
+ self.counter += 1
710
+
711
+ if self.counter >= self.patience:
712
+ logger.info(
713
+ f"EarlyStopping: No improvement in '{self.monitor}' for {self.patience} epochs. "
714
+ "Stopping training."
715
+ )
716
+ trainer.stop_training = True
717
+
718
+
719
+ class GradientMonitoring(Callback):
720
+ """
721
+ Logs gradient statistics (e.g., mean, standard deviation, max) during training.
722
+
723
+ This callback monitors and logs statistics about the gradients of the model parameters
724
+ to help debug or optimize the training process.
725
+
726
+ Example Usage in `TrainConfig`:
727
+ To use `GradientMonitoring`, include it in the `callbacks` list when
728
+ setting up your `TrainConfig`:
729
+ ```python exec="on" source="material-block" result="json"
730
+ from qadence.ml_tools import TrainConfig
731
+ from qadence.ml_tools.callbacks import GradientMonitoring
732
+
733
+ # Create an instance of the GradientMonitoring callback
734
+ gradient_monitoring = GradientMonitoring(on="train_batch_end", called_every=10)
735
+
736
+ config = TrainConfig(
737
+ max_iter=10000,
738
+ print_every=1000,
739
+ callbacks=[gradient_monitoring]
740
+ )
741
+ ```
742
+ """
743
+
744
+ def __init__(self, on: str, called_every: int = 1):
745
+ """Initializes the GradientMonitoring callback.
746
+
747
+ Args:
748
+ on (str): The event to trigger the callback (e.g., "train_batch_end").
749
+ called_every (int): Frequency of callback calls in terms of iterations.
750
+ """
751
+ super().__init__(on=on, called_every=called_every)
752
+
753
+ def run_callback(self, trainer: Any, config: TrainConfig, writer: BaseWriter) -> None:
754
+ """
755
+ Logs gradient statistics.
756
+
757
+ Args:
758
+ trainer (Any): The training object.
759
+ config (TrainConfig): The configuration object.
760
+ writer (BaseWriter): The writer object for logging.
761
+ """
762
+ gradient_stats = {}
763
+ for name, param in trainer.model.named_parameters():
764
+ if param.grad is not None:
765
+ grad = param.grad
766
+ gradient_stats.update(
767
+ {
768
+ name + "_mean": grad.mean().item(),
769
+ name + "_std": grad.std().item(),
770
+ name + "_max": grad.max().item(),
771
+ name + "_min": grad.min().item(),
772
+ }
773
+ )
774
+
775
+ writer.write(trainer.opt_result.iteration, gradient_stats)