qadence 1.11.2__tar.gz → 1.11.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. {qadence-1.11.2 → qadence-1.11.3}/.pre-commit-config.yaml +1 -1
  2. {qadence-1.11.2 → qadence-1.11.3}/PKG-INFO +14 -4
  3. {qadence-1.11.2 → qadence-1.11.3}/mkdocs.yml +1 -0
  4. {qadence-1.11.2 → qadence-1.11.3}/pyproject.toml +12 -4
  5. {qadence-1.11.2 → qadence-1.11.3}/qadence/backend.py +23 -7
  6. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/horqrux/backend.py +19 -6
  7. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/jax_utils.py +5 -2
  8. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pulser/backend.py +16 -9
  9. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pyqtorch/backend.py +25 -11
  10. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/embedding.py +10 -1
  11. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/utils.py +33 -24
  12. {qadence-1.11.2 → qadence-1.11.3}/qadence/engines/differentiable_backend.py +7 -1
  13. {qadence-1.11.2 → qadence-1.11.3}/qadence/engines/jax/differentiable_backend.py +7 -1
  14. {qadence-1.11.2 → qadence-1.11.3}/qadence/engines/torch/differentiable_backend.py +7 -1
  15. {qadence-1.11.2 → qadence-1.11.3}/qadence/engines/torch/differentiable_expectation.py +12 -11
  16. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/__init__.py +2 -0
  17. {qadence-1.11.2 → qadence-1.11.3}/qadence/model.py +94 -7
  18. {qadence-1.11.2 → qadence-1.11.3}/qadence/transpile/__init__.py +3 -2
  19. {qadence-1.11.2 → qadence-1.11.3}/qadence/transpile/block.py +58 -5
  20. {qadence-1.11.2 → qadence-1.11.3}/qadence/types.py +1 -1
  21. {qadence-1.11.2 → qadence-1.11.3}/qadence/utils.py +23 -1
  22. {qadence-1.11.2 → qadence-1.11.3}/.coveragerc +0 -0
  23. {qadence-1.11.2 → qadence-1.11.3}/.github/ISSUE_TEMPLATE/bug-report.yml +0 -0
  24. {qadence-1.11.2 → qadence-1.11.3}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  25. {qadence-1.11.2 → qadence-1.11.3}/.github/ISSUE_TEMPLATE/new-feature.yml +0 -0
  26. {qadence-1.11.2 → qadence-1.11.3}/.github/workflows/build_docs.yml +0 -0
  27. {qadence-1.11.2 → qadence-1.11.3}/.github/workflows/lint.yml +0 -0
  28. {qadence-1.11.2 → qadence-1.11.3}/.github/workflows/test_all.yml +0 -0
  29. {qadence-1.11.2 → qadence-1.11.3}/.github/workflows/test_examples.yml +0 -0
  30. {qadence-1.11.2 → qadence-1.11.3}/.github/workflows/test_fast.yml +0 -0
  31. {qadence-1.11.2 → qadence-1.11.3}/.gitignore +0 -0
  32. {qadence-1.11.2 → qadence-1.11.3}/LICENSE +0 -0
  33. {qadence-1.11.2 → qadence-1.11.3}/MANIFEST.in +0 -0
  34. {qadence-1.11.2 → qadence-1.11.3}/README.md +0 -0
  35. {qadence-1.11.2 → qadence-1.11.3}/qadence/__init__.py +0 -0
  36. {qadence-1.11.2 → qadence-1.11.3}/qadence/analog/__init__.py +0 -0
  37. {qadence-1.11.2 → qadence-1.11.3}/qadence/analog/addressing.py +0 -0
  38. {qadence-1.11.2 → qadence-1.11.3}/qadence/analog/constants.py +0 -0
  39. {qadence-1.11.2 → qadence-1.11.3}/qadence/analog/device.py +0 -0
  40. {qadence-1.11.2 → qadence-1.11.3}/qadence/analog/hamiltonian_terms.py +0 -0
  41. {qadence-1.11.2 → qadence-1.11.3}/qadence/analog/parse_analog.py +0 -0
  42. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/__init__.py +0 -0
  43. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/agpsr_utils.py +0 -0
  44. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/api.py +0 -0
  45. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/horqrux/__init__.py +0 -0
  46. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/horqrux/config.py +0 -0
  47. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/horqrux/convert_ops.py +0 -0
  48. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/parameter_shift_rules.py +0 -0
  49. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pulser/__init__.py +0 -0
  50. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pulser/channels.py +0 -0
  51. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pulser/cloud.py +0 -0
  52. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pulser/config.py +0 -0
  53. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pulser/convert_ops.py +0 -0
  54. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pulser/devices.py +0 -0
  55. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pulser/pulses.py +0 -0
  56. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pulser/waveforms.py +0 -0
  57. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pyqtorch/__init__.py +0 -0
  58. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pyqtorch/config.py +0 -0
  59. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/pyqtorch/convert_ops.py +0 -0
  60. {qadence-1.11.2 → qadence-1.11.3}/qadence/backends/utils.py +0 -0
  61. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/__init__.py +0 -0
  62. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/abstract.py +0 -0
  63. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/analog.py +0 -0
  64. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/block_to_tensor.py +0 -0
  65. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/composite.py +0 -0
  66. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/manipulate.py +0 -0
  67. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/matrix.py +0 -0
  68. {qadence-1.11.2 → qadence-1.11.3}/qadence/blocks/primitive.py +0 -0
  69. {qadence-1.11.2 → qadence-1.11.3}/qadence/circuit.py +0 -0
  70. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/__init__.py +0 -0
  71. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/ala.py +0 -0
  72. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/daqc/__init__.py +0 -0
  73. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/daqc/daqc.py +0 -0
  74. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/daqc/gen_parser.py +0 -0
  75. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/daqc/utils.py +0 -0
  76. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/feature_maps.py +0 -0
  77. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/hamiltonians.py +0 -0
  78. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/hea.py +0 -0
  79. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/iia.py +0 -0
  80. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/qft.py +0 -0
  81. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/rydberg_feature_maps.py +0 -0
  82. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/rydberg_hea.py +0 -0
  83. {qadence-1.11.2 → qadence-1.11.3}/qadence/constructors/utils.py +0 -0
  84. {qadence-1.11.2 → qadence-1.11.3}/qadence/decompose.py +0 -0
  85. {qadence-1.11.2 → qadence-1.11.3}/qadence/divergences.py +0 -0
  86. {qadence-1.11.2 → qadence-1.11.3}/qadence/draw/__init__.py +0 -0
  87. {qadence-1.11.2 → qadence-1.11.3}/qadence/draw/assets/dark/measurement.png +0 -0
  88. {qadence-1.11.2 → qadence-1.11.3}/qadence/draw/assets/dark/measurement.svg +0 -0
  89. {qadence-1.11.2 → qadence-1.11.3}/qadence/draw/assets/light/measurement.png +0 -0
  90. {qadence-1.11.2 → qadence-1.11.3}/qadence/draw/assets/light/measurement.svg +0 -0
  91. {qadence-1.11.2 → qadence-1.11.3}/qadence/draw/themes.py +0 -0
  92. {qadence-1.11.2 → qadence-1.11.3}/qadence/draw/utils.py +0 -0
  93. {qadence-1.11.2 → qadence-1.11.3}/qadence/draw/vizbackend.py +0 -0
  94. {qadence-1.11.2 → qadence-1.11.3}/qadence/engines/__init__.py +0 -0
  95. {qadence-1.11.2 → qadence-1.11.3}/qadence/engines/jax/__init__.py +0 -0
  96. {qadence-1.11.2 → qadence-1.11.3}/qadence/engines/jax/differentiable_expectation.py +0 -0
  97. {qadence-1.11.2 → qadence-1.11.3}/qadence/engines/torch/__init__.py +0 -0
  98. {qadence-1.11.2 → qadence-1.11.3}/qadence/exceptions/__init__.py +0 -0
  99. {qadence-1.11.2 → qadence-1.11.3}/qadence/exceptions/exceptions.py +0 -0
  100. {qadence-1.11.2 → qadence-1.11.3}/qadence/execution.py +0 -0
  101. {qadence-1.11.2 → qadence-1.11.3}/qadence/extensions.py +0 -0
  102. {qadence-1.11.2 → qadence-1.11.3}/qadence/libs.py +0 -0
  103. {qadence-1.11.2 → qadence-1.11.3}/qadence/log_config.yaml +0 -0
  104. {qadence-1.11.2 → qadence-1.11.3}/qadence/logger.py +0 -0
  105. {qadence-1.11.2 → qadence-1.11.3}/qadence/measurements/__init__.py +0 -0
  106. {qadence-1.11.2 → qadence-1.11.3}/qadence/measurements/protocols.py +0 -0
  107. {qadence-1.11.2 → qadence-1.11.3}/qadence/measurements/samples.py +0 -0
  108. {qadence-1.11.2 → qadence-1.11.3}/qadence/measurements/shadow.py +0 -0
  109. {qadence-1.11.2 → qadence-1.11.3}/qadence/measurements/tomography.py +0 -0
  110. {qadence-1.11.2 → qadence-1.11.3}/qadence/measurements/utils.py +0 -0
  111. {qadence-1.11.2 → qadence-1.11.3}/qadence/mitigations/__init__.py +0 -0
  112. {qadence-1.11.2 → qadence-1.11.3}/qadence/mitigations/analog_zne.py +0 -0
  113. {qadence-1.11.2 → qadence-1.11.3}/qadence/mitigations/protocols.py +0 -0
  114. {qadence-1.11.2 → qadence-1.11.3}/qadence/mitigations/readout.py +0 -0
  115. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/callbacks/__init__.py +0 -0
  116. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/callbacks/callback.py +0 -0
  117. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/callbacks/callbackmanager.py +0 -0
  118. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/callbacks/saveload.py +0 -0
  119. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/callbacks/writer_registry.py +0 -0
  120. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/config.py +0 -0
  121. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/constructors.py +0 -0
  122. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/data.py +0 -0
  123. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/information/__init__.py +0 -0
  124. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/information/information_content.py +0 -0
  125. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/loss/__init__.py +0 -0
  126. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/loss/loss.py +0 -0
  127. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/models.py +0 -0
  128. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/optimize_step.py +0 -0
  129. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/parameters.py +0 -0
  130. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/qcnn_model.py +0 -0
  131. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/stages.py +0 -0
  132. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/tensors.py +0 -0
  133. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/train_utils/__init__.py +0 -0
  134. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/train_utils/accelerator.py +0 -0
  135. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/train_utils/base_trainer.py +0 -0
  136. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/train_utils/config_manager.py +0 -0
  137. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/train_utils/distribution.py +0 -0
  138. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/train_utils/execution.py +0 -0
  139. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/trainer.py +0 -0
  140. {qadence-1.11.2 → qadence-1.11.3}/qadence/ml_tools/utils.py +0 -0
  141. {qadence-1.11.2 → qadence-1.11.3}/qadence/noise/__init__.py +0 -0
  142. {qadence-1.11.2 → qadence-1.11.3}/qadence/noise/protocols.py +0 -0
  143. {qadence-1.11.2 → qadence-1.11.3}/qadence/operations/__init__.py +0 -0
  144. {qadence-1.11.2 → qadence-1.11.3}/qadence/operations/analog.py +0 -0
  145. {qadence-1.11.2 → qadence-1.11.3}/qadence/operations/control_ops.py +0 -0
  146. {qadence-1.11.2 → qadence-1.11.3}/qadence/operations/ham_evo.py +0 -0
  147. {qadence-1.11.2 → qadence-1.11.3}/qadence/operations/parametric.py +0 -0
  148. {qadence-1.11.2 → qadence-1.11.3}/qadence/operations/primitive.py +0 -0
  149. {qadence-1.11.2 → qadence-1.11.3}/qadence/overlap.py +0 -0
  150. {qadence-1.11.2 → qadence-1.11.3}/qadence/parameters.py +0 -0
  151. {qadence-1.11.2 → qadence-1.11.3}/qadence/pasqal_cloud_connection.py +0 -0
  152. {qadence-1.11.2 → qadence-1.11.3}/qadence/protocols.py +0 -0
  153. {qadence-1.11.2 → qadence-1.11.3}/qadence/py.typed +0 -0
  154. {qadence-1.11.2 → qadence-1.11.3}/qadence/qubit_support.py +0 -0
  155. {qadence-1.11.2 → qadence-1.11.3}/qadence/register.py +0 -0
  156. {qadence-1.11.2 → qadence-1.11.3}/qadence/serial_expr_grammar.peg +0 -0
  157. {qadence-1.11.2 → qadence-1.11.3}/qadence/serialization.py +0 -0
  158. {qadence-1.11.2 → qadence-1.11.3}/qadence/states.py +0 -0
  159. {qadence-1.11.2 → qadence-1.11.3}/qadence/transpile/apply_fn.py +0 -0
  160. {qadence-1.11.2 → qadence-1.11.3}/qadence/transpile/circuit.py +0 -0
  161. {qadence-1.11.2 → qadence-1.11.3}/qadence/transpile/digitalize.py +0 -0
  162. {qadence-1.11.2 → qadence-1.11.3}/qadence/transpile/flatten.py +0 -0
  163. {qadence-1.11.2 → qadence-1.11.3}/qadence/transpile/invert.py +0 -0
  164. {qadence-1.11.2 → qadence-1.11.3}/qadence/transpile/noise.py +0 -0
  165. {qadence-1.11.2 → qadence-1.11.3}/qadence/transpile/transpile.py +0 -0
  166. {qadence-1.11.2 → qadence-1.11.3}/renovate.json +0 -0
  167. {qadence-1.11.2 → qadence-1.11.3}/setup.py +0 -0
@@ -23,7 +23,7 @@ repos:
23
23
  rev: v1.14.1
24
24
  hooks:
25
25
  - id: mypy
26
- args: [--install-types, --non-interactive]
26
+ args: [--install-types, --non-interactive, --explicit-package-bases]
27
27
  exclude: examples|docs
28
28
 
29
29
  - repo: https://github.com/DanielNoord/pydocstringformatter
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: qadence
3
- Version: 1.11.2
3
+ Version: 1.11.3
4
4
  Summary: Pasqal interface for circuit-based quantum computing SDKs
5
- Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>, Manu Lahariya <manu.lahariya@pasqal.com>
5
+ Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>, Manu Lahariya <manu.lahariya@pasqal.com>, Sungwoo Ahn <sungwoo.ahn@pasqal.com>
6
6
  License: PASQAL OPEN-SOURCE SOFTWARE LICENSE (MIT-derived)
7
7
  License-File: LICENSE
8
8
  Classifier: License :: Other/Proprietary License
@@ -24,7 +24,7 @@ Requires-Dist: nevergrad
24
24
  Requires-Dist: numpy
25
25
  Requires-Dist: openfermion
26
26
  Requires-Dist: pasqal-cloud
27
- Requires-Dist: pyqtorch==1.7.5
27
+ Requires-Dist: pyqtorch==1.7.6
28
28
  Requires-Dist: pyyaml
29
29
  Requires-Dist: rich
30
30
  Requires-Dist: scipy
@@ -44,15 +44,25 @@ Requires-Dist: nvidia-pyindex; extra == 'dlprof'
44
44
  Provides-Extra: horqrux
45
45
  Requires-Dist: einops; extra == 'horqrux'
46
46
  Requires-Dist: flax; extra == 'horqrux'
47
- Requires-Dist: horqrux==0.8.0; extra == 'horqrux'
47
+ Requires-Dist: horqrux==0.8.1; extra == 'horqrux'
48
48
  Requires-Dist: jax; extra == 'horqrux'
49
49
  Requires-Dist: jaxopt; extra == 'horqrux'
50
50
  Requires-Dist: optax; extra == 'horqrux'
51
51
  Requires-Dist: sympy2jax; extra == 'horqrux'
52
+ Provides-Extra: hub
53
+ Requires-Dist: qadence-measurement; extra == 'hub'
54
+ Requires-Dist: qadence-mitigation; extra == 'hub'
55
+ Requires-Dist: qadence-model; extra == 'hub'
52
56
  Provides-Extra: libs
53
57
  Requires-Dist: qadence-libs; extra == 'libs'
58
+ Provides-Extra: measurement
59
+ Requires-Dist: qadence-measurement; extra == 'measurement'
60
+ Provides-Extra: mitigation
61
+ Requires-Dist: qadence-mitigation; extra == 'mitigation'
54
62
  Provides-Extra: mlflow
55
63
  Requires-Dist: mlflow; extra == 'mlflow'
64
+ Provides-Extra: model
65
+ Requires-Dist: qadence-model; extra == 'model'
56
66
  Provides-Extra: protocols
57
67
  Requires-Dist: qadence-protocols; extra == 'protocols'
58
68
  Provides-Extra: pulser
@@ -48,6 +48,7 @@ nav:
48
48
  - Classification with QNN: tutorials/qml/classification.md
49
49
  - Solving MaxCut with QAOA: tutorials/qml/qaoa.md
50
50
  - Solving a 1D ODE: tutorials/qml/dqc_1d.md
51
+ - QCNN model: tutorials/qml/qcnn.md
51
52
 
52
53
  - ML Tools:
53
54
  - tutorials/qml/ml_tools/intro.md
@@ -25,10 +25,11 @@ authors = [
25
25
  { name = "Daniele Cucurachi", email = "daniele.cucurachi@pasqal.com" },
26
26
  { name = "Pim Venderbosch", email = "pim.venderbosch@pasqal.com" },
27
27
  { name = "Manu Lahariya", email = "manu.lahariya@pasqal.com" },
28
+ { name = "Sungwoo Ahn", email = "sungwoo.ahn@pasqal.com" },
28
29
  ]
29
30
  requires-python = ">=3.9"
30
31
  license = { text = "PASQAL OPEN-SOURCE SOFTWARE LICENSE (MIT-derived)" }
31
- version = "1.11.2"
32
+ version = "1.11.3"
32
33
  classifiers = [
33
34
  "License :: Other/Proprietary License",
34
35
  "Programming Language :: Python",
@@ -53,7 +54,7 @@ dependencies = [
53
54
  "jsonschema",
54
55
  "nevergrad",
55
56
  "scipy",
56
- "pyqtorch==1.7.5",
57
+ "pyqtorch==1.7.6",
57
58
  "pyyaml",
58
59
  "matplotlib",
59
60
  "Arpeggio==2.0.2",
@@ -77,7 +78,7 @@ visualization = [
77
78
  # "scour",
78
79
  ]
79
80
  horqrux = [
80
- "horqrux==0.8.0",
81
+ "horqrux==0.8.1",
81
82
  "jax",
82
83
  "flax",
83
84
  "optax",
@@ -90,7 +91,14 @@ libs = ["qadence-libs"]
90
91
  dlprof = ["nvidia-pyindex", "nvidia-dlprof[pytorch]"]
91
92
  mlflow = ["mlflow"]
92
93
  all = ["pulser", "visualization", "protocols", "libs", "mlflow"]
93
-
94
+ hub = [
95
+ "qadence-mitigation",
96
+ "qadence-measurement",
97
+ "qadence-model",
98
+ ]
99
+ mitigation = ["qadence-mitigation"]
100
+ measurement = ["qadence-measurement"]
101
+ model = ["qadence-model"]
94
102
 
95
103
  [tool.hatch.envs.default]
96
104
  dependencies = [
@@ -25,7 +25,14 @@ from qadence.measurements import Measurements
25
25
  from qadence.mitigations import Mitigations
26
26
  from qadence.noise import NoiseHandler
27
27
  from qadence.parameters import stringify
28
- from qadence.types import ArrayLike, BackendName, DiffMode, Endianness, Engine, ParamDictType
28
+ from qadence.types import (
29
+ ArrayLike,
30
+ BackendName,
31
+ DiffMode,
32
+ Endianness,
33
+ Engine,
34
+ ParamDictType,
35
+ )
29
36
 
30
37
  logger = getLogger(__name__)
31
38
 
@@ -215,7 +222,7 @@ class Backend(ABC):
215
222
  if observable is not None:
216
223
  observable = observable if isinstance(observable, list) else [observable]
217
224
  conv_obs = []
218
- obs_embedding_fn_list = []
225
+ obs_embedding_fns = []
219
226
 
220
227
  for obs in observable:
221
228
  obs = check_observable(obs)
@@ -224,13 +231,18 @@ class Backend(ABC):
224
231
  c_obs.abstract, self.config._use_gate_params, self.engine
225
232
  )
226
233
  params.update(obs_params)
227
- obs_embedding_fn_list.append(obs_embedding_fn)
234
+ obs_embedding_fns.append(obs_embedding_fn)
228
235
  conv_obs.append(c_obs)
229
236
 
230
237
  def embedding_fn_dict(a: dict, b: dict) -> dict:
231
- embedding_dict = circ_embedding_fn(a, b)
232
- for o in obs_embedding_fn_list:
233
- embedding_dict.update(o(a, b))
238
+ if "circuit" in b or "observables" in b:
239
+ embedding_dict = {"circuit": circ_embedding_fn(a, b), "observables": dict()}
240
+ for obs_embedding_fn in obs_embedding_fns:
241
+ embedding_dict["observables"].update(obs_embedding_fn(a, b))
242
+ else:
243
+ embedding_dict = circ_embedding_fn(a, b)
244
+ for obs_embedding_fn in obs_embedding_fns:
245
+ embedding_dict.update(obs_embedding_fn(a, b))
234
246
  return embedding_dict
235
247
 
236
248
  return Converted(conv_circ, conv_obs, embedding_fn_dict, params)
@@ -316,7 +328,11 @@ class Backend(ABC):
316
328
  raise NotImplementedError
317
329
 
318
330
  @abstractmethod
319
- def assign_parameters(self, circuit: ConvertedCircuit, param_values: dict[str, Tensor]) -> Any:
331
+ def assign_parameters(
332
+ self,
333
+ circuit: ConvertedCircuit,
334
+ param_values: dict[str, Tensor] | dict[str, dict[str, Tensor]],
335
+ ) -> Any:
320
336
  raise NotImplementedError
321
337
 
322
338
  @staticmethod
@@ -134,19 +134,32 @@ class Backend(BackendInterface):
134
134
  Returns:
135
135
  A jax.Array of shape (batch_size, n_observables)
136
136
  """
137
- observable = observable if isinstance(observable, list) else [observable]
138
- batch_size = max([arr.size for arr in param_values.values()])
137
+ observables = observable if isinstance(observable, list) else [observable]
138
+ if "observables" in param_values or "circuit" in param_values:
139
+ raise NotImplementedError("The Horqrux backend does not support separated parameters.")
140
+ else:
141
+ merged_params = param_values
142
+ batch_size = max([arr.size for arr in param_values.values()]) # type: ignore[union-attr]
139
143
  n_obs = len(observable)
140
144
 
141
145
  def _expectation(params: ParamDictType) -> ArrayLike:
146
+ param_circuits = params["circuit"] if "circuit" in params else params
147
+ param_observables = params["observables"] if "observables" in params else params
142
148
  out_state = self.run(
143
- circuit, params, state, endianness, horqify_state=True, unhorqify_state=False
149
+ circuit,
150
+ param_circuits,
151
+ state,
152
+ endianness,
153
+ horqify_state=True,
154
+ unhorqify_state=False,
155
+ )
156
+ return jnp.array(
157
+ [observable.native(out_state, param_observables) for observable in observables]
144
158
  )
145
- return jnp.array([o.native(out_state, params) for o in observable])
146
159
 
147
160
  if batch_size > 1: # We vmap for batch_size > 1
148
- expvals = jax.vmap(_expectation, in_axes=({k: 0 for k in param_values.keys()},))(
149
- uniform_batchsize(param_values)
161
+ expvals = jax.vmap(_expectation, in_axes=({k: 0 for k in merged_params.keys()},))(
162
+ uniform_batchsize(merged_params)
150
163
  )
151
164
  else:
152
165
  expvals = _expectation(param_values)
@@ -19,6 +19,7 @@ from qadence.blocks import (
19
19
  )
20
20
  from qadence.blocks.block_to_tensor import _gate_parameters
21
21
  from qadence.types import Endianness, ParamDictType
22
+ from qadence.utils import merge_separate_params
22
23
 
23
24
 
24
25
  def jarr_to_tensor(arr: Array, dtype: Any = cdouble) -> Tensor:
@@ -52,9 +53,11 @@ def horqify(state: Array) -> Array:
52
53
 
53
54
 
54
55
  def uniform_batchsize(param_values: ParamDictType) -> ParamDictType:
55
- max_batch_size = max(p.size for p in param_values.values())
56
+ if "observables" in param_values or "circuit" in param_values:
57
+ param_values = merge_separate_params(param_values)
58
+ max_batch_size = max(p.size for p in param_values.values()) # type: ignore[union-attr]
56
59
  batched_values = {
57
- k: (v if v.size == max_batch_size else v.repeat(max_batch_size))
60
+ k: (v if v.size == max_batch_size else v.repeat(max_batch_size)) # type: ignore[union-attr]
58
61
  for k, v in param_values.items()
59
62
  }
60
63
  return batched_values
@@ -28,7 +28,7 @@ from qadence.noise import NoiseHandler
28
28
  from qadence.overlap import overlap_exact
29
29
  from qadence.register import Register
30
30
  from qadence.transpile import transpile
31
- from qadence.types import BackendName, DeviceType, Endianness, Engine, NoiseProtocol
31
+ from qadence.types import BackendName, DeviceType, Endianness, Engine, NoiseProtocol, ParamDictType
32
32
 
33
33
  from .channels import GLOBAL_CHANNEL, LOCAL_CHANNEL
34
34
  from .cloud import get_client
@@ -183,7 +183,7 @@ class Backend(BackendInterface):
183
183
  def run(
184
184
  self,
185
185
  circuit: ConvertedCircuit,
186
- param_values: dict[str, Tensor] = {},
186
+ param_values: ParamDictType = {},
187
187
  state: Tensor | None = None,
188
188
  endianness: Endianness = Endianness.BIG,
189
189
  noise: NoiseHandler | None = None,
@@ -235,7 +235,7 @@ class Backend(BackendInterface):
235
235
  self,
236
236
  circuit: ConvertedCircuit,
237
237
  noise: NoiseHandler,
238
- param_values: dict[str, Tensor] = dict(),
238
+ param_values: ParamDictType = dict(),
239
239
  state: Tensor | None = None,
240
240
  endianness: Endianness = Endianness.BIG,
241
241
  ) -> Tensor:
@@ -284,7 +284,7 @@ class Backend(BackendInterface):
284
284
  def sample(
285
285
  self,
286
286
  circuit: ConvertedCircuit,
287
- param_values: dict[str, Tensor] = {},
287
+ param_values: ParamDictType = {},
288
288
  n_shots: int = 1,
289
289
  state: Tensor | None = None,
290
290
  noise: NoiseHandler | None = None,
@@ -322,7 +322,7 @@ class Backend(BackendInterface):
322
322
  self,
323
323
  circuit: ConvertedCircuit,
324
324
  observable: list[ConvertedObservable] | ConvertedObservable,
325
- param_values: dict[str, Tensor] = {},
325
+ param_values: ParamDictType = {},
326
326
  state: Tensor | None = None,
327
327
  measurement: Measurements | None = None,
328
328
  noise: NoiseHandler | None = None,
@@ -330,14 +330,19 @@ class Backend(BackendInterface):
330
330
  endianness: Endianness = Endianness.BIG,
331
331
  ) -> Tensor:
332
332
  observable = observable if isinstance(observable, list) else [observable]
333
+ param_circuit = param_values["circuit"] if "circuit" in param_values else param_values
334
+ param_observables = (
335
+ param_values["observables"] if "observables" in param_values else param_values
336
+ )
333
337
  if mitigation is None:
334
338
  if noise is None:
335
339
  state = self.run(
336
- circuit, param_values=param_values, state=state, endianness=endianness
340
+ circuit, param_values=param_circuit, state=state, endianness=endianness
337
341
  )
338
342
  support = sorted(list(circuit.abstract.register.support))
339
343
  res_list = [
340
- obs.native(state, param_values, qubit_support=support) for obs in observable
344
+ obs.native(state, param_observables, qubit_support=support)
345
+ for obs in observable
341
346
  ]
342
347
  res = torch.transpose(torch.stack(res_list), 0, 1)
343
348
  res = res if len(res.shape) > 0 else res.reshape(1)
@@ -345,7 +350,7 @@ class Backend(BackendInterface):
345
350
  elif noise is not None:
346
351
  dms = self.run(
347
352
  circuit=circuit,
348
- param_values=param_values,
353
+ param_values=param_circuit,
349
354
  state=state,
350
355
  endianness=endianness,
351
356
  noise=noise,
@@ -353,7 +358,9 @@ class Backend(BackendInterface):
353
358
  support = sorted(list(circuit.abstract.register.support))
354
359
  res_list = [
355
360
  [
356
- obs.native(dm.squeeze(), param_values, qubit_support=support, noise=noise)
361
+ obs.native(
362
+ dm.squeeze(), param_observables, qubit_support=support, noise=noise
363
+ )
357
364
  for dm in dms
358
365
  ]
359
366
  for obs in observable
@@ -31,7 +31,7 @@ from qadence.transpile import (
31
31
  set_noise,
32
32
  transpile,
33
33
  )
34
- from qadence.types import BackendName, Endianness, Engine
34
+ from qadence.types import BackendName, Endianness, Engine, ParamDictType
35
35
 
36
36
  from .config import Configuration, default_passes
37
37
  from .convert_ops import convert_block, convert_readout_noise
@@ -182,16 +182,21 @@ class Backend(BackendInterface):
182
182
  self,
183
183
  circuit: ConvertedCircuit,
184
184
  observable: list[ConvertedObservable] | ConvertedObservable,
185
- param_values: dict[str, Tensor] = {},
185
+ param_values: ParamDictType = {},
186
186
  state: Tensor | None = None,
187
187
  measurement: Measurements | None = None,
188
188
  noise: NoiseHandler | None = None,
189
189
  endianness: Endianness = Endianness.BIG,
190
190
  ) -> Tensor:
191
191
  set_block_and_readout_noises(circuit, noise, self.config)
192
+ param_circuit = param_values["circuit"] if "circuit" in param_values else param_values
193
+ param_observables = (
194
+ param_values["observables"] if "observables" in param_values else param_values
195
+ )
196
+
192
197
  state = self.run(
193
198
  circuit,
194
- param_values=param_values,
199
+ param_values=param_circuit,
195
200
  state=state,
196
201
  endianness=endianness,
197
202
  pyqify_state=True,
@@ -200,7 +205,7 @@ class Backend(BackendInterface):
200
205
  )
201
206
  observable = observable if isinstance(observable, list) else [observable]
202
207
  _expectation = torch.hstack(
203
- [obs.native.expectation(state, param_values).reshape(-1, 1) for obs in observable]
208
+ [obs.native.expectation(state, param_observables).reshape(-1, 1) for obs in observable]
204
209
  )
205
210
  return _expectation
206
211
 
@@ -208,7 +213,7 @@ class Backend(BackendInterface):
208
213
  self,
209
214
  circuit: ConvertedCircuit,
210
215
  observable: list[ConvertedObservable] | ConvertedObservable,
211
- param_values: dict[str, Tensor] = {},
216
+ param_values: ParamDictType = {},
212
217
  state: Tensor | None = None,
213
218
  measurement: Measurements | None = None,
214
219
  noise: NoiseHandler | None = None,
@@ -230,9 +235,18 @@ class Backend(BackendInterface):
230
235
 
231
236
  list_expvals = []
232
237
  observables = observable if isinstance(observable, list) else [observable]
233
- for vals in to_list_of_dicts(param_values):
234
- wf = self.run(circuit, vals, state, endianness, pyqify_state=True, unpyqify_state=False)
235
- exs = torch.cat([obs.native.expectation(wf, vals) for obs in observables], 0)
238
+ param_circuits = param_values["circuit"] if "circuit" in param_values else param_values
239
+ param_observables = (
240
+ param_values["observables"] if "observables" in param_values else param_values
241
+ )
242
+
243
+ for vals_circ, vals_obs in zip(
244
+ to_list_of_dicts(param_circuits), to_list_of_dicts(param_observables)
245
+ ):
246
+ wf = self.run(
247
+ circuit, vals_circ, state, endianness, pyqify_state=True, unpyqify_state=False
248
+ )
249
+ exs = torch.cat([obs.native.expectation(wf, vals_obs) for obs in observables], 0)
236
250
  list_expvals.append(exs)
237
251
 
238
252
  batch_expvals = torch.vstack(list_expvals)
@@ -242,7 +256,7 @@ class Backend(BackendInterface):
242
256
  self,
243
257
  circuit: ConvertedCircuit,
244
258
  observable: list[ConvertedObservable] | ConvertedObservable,
245
- param_values: dict[str, Tensor] = {},
259
+ param_values: ParamDictType = {},
246
260
  state: Tensor | None = None,
247
261
  measurement: Measurements | None = None,
248
262
  noise: NoiseHandler | None = None,
@@ -269,7 +283,7 @@ class Backend(BackendInterface):
269
283
  def sample(
270
284
  self,
271
285
  circuit: ConvertedCircuit,
272
- param_values: dict[str, Tensor] = {},
286
+ param_values: ParamDictType = {},
273
287
  n_shots: int = 1,
274
288
  state: Tensor | None = None,
275
289
  noise: NoiseHandler | None = None,
@@ -295,7 +309,7 @@ class Backend(BackendInterface):
295
309
  samples = apply_mitigation(noise=noise, mitigation=mitigation, samples=samples)
296
310
  return samples
297
311
 
298
- def assign_parameters(self, circuit: ConvertedCircuit, param_values: dict[str, Tensor]) -> Any:
312
+ def assign_parameters(self, circuit: ConvertedCircuit, param_values: ParamDictType) -> Any:
299
313
  raise NotImplementedError
300
314
 
301
315
  @staticmethod
@@ -16,7 +16,14 @@ from qadence.blocks.utils import (
16
16
  uuid_to_expression,
17
17
  )
18
18
  from qadence.parameters import evaluate, make_differentiable, stringify
19
- from qadence.types import ArrayLike, DifferentiableExpression, Engine, ParamDictType, TNumber
19
+ from qadence.types import (
20
+ ArrayLike,
21
+ DifferentiableExpression,
22
+ Engine,
23
+ ParamDictType,
24
+ TNumber,
25
+ )
26
+ from qadence.utils import merge_separate_params
20
27
 
21
28
 
22
29
  def _concretize_parameter(engine: Engine) -> Callable:
@@ -110,6 +117,8 @@ def embedding(
110
117
 
111
118
  def embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
112
119
  embedded_params: dict[sympy.Expr, ArrayLike] = {}
120
+ if "circuit" in inputs or "observables" in inputs:
121
+ inputs = merge_separate_params(inputs)
113
122
  for expr, fn in embeddings.items():
114
123
  angle: ArrayLike
115
124
  values = {}
@@ -7,6 +7,7 @@ from logging import getLogger
7
7
  from typing import Generator, List, Type, TypeVar, Union, get_args
8
8
 
9
9
  from sympy import Array, Basic, Expr
10
+ import torch
10
11
  from torch import Tensor
11
12
 
12
13
  from qadence.blocks import (
@@ -292,31 +293,39 @@ def uuid_to_eigen(
292
293
 
293
294
  result = {}
294
295
  for uuid, b in uuid_to_block(block).items():
295
- if b.eigenvalues_generator is not None:
296
- if b.eigenvalues_generator.numel() > 0:
297
- # GPSR assumes a factor 0.5 for differentiation
298
- # so need rescaling
299
- if isinstance(b, TimeEvolutionBlock) and rescale_eigenvals_timeevo:
300
- if b.eigenvalues_generator.numel() > 1:
301
- result[uuid] = (
302
- b.eigenvalues_generator * 2.0,
303
- 0.5,
304
- )
296
+ eigs_generator = None
297
+
298
+ # this is to handle the case for the N operator
299
+ try:
300
+ eigs_generator = b.eigenvalues_generator
301
+ except ValueError:
302
+ result[uuid] = (torch.zeros(2), 1.0)
303
+ else:
304
+ if eigs_generator is not None:
305
+ if eigs_generator.numel() > 0:
306
+ # GPSR assumes a factor 0.5 for differentiation
307
+ # so need rescaling
308
+ if isinstance(b, TimeEvolutionBlock) and rescale_eigenvals_timeevo:
309
+ if eigs_generator.numel() > 1:
310
+ result[uuid] = (
311
+ eigs_generator * 2.0,
312
+ 0.5,
313
+ )
314
+ else:
315
+ result[uuid] = (
316
+ eigs_generator * 2.0,
317
+ (
318
+ 1.0 / (eigs_generator.item() * 2.0)
319
+ if len(eigs_generator) == 1
320
+ else 1.0
321
+ ),
322
+ )
305
323
  else:
306
- result[uuid] = (
307
- b.eigenvalues_generator * 2.0,
308
- (
309
- 1.0 / (b.eigenvalues_generator.item() * 2.0)
310
- if len(b.eigenvalues_generator) == 1
311
- else 1.0
312
- ),
313
- )
314
- else:
315
- result[uuid] = (b.eigenvalues_generator, 1.0)
316
-
317
- # leave only angle parameter uuid with eigenvals for ConstantAnalogRotation block
318
- if isinstance(block, ConstantAnalogRotation):
319
- break
324
+ result[uuid] = (eigs_generator, 1.0)
325
+
326
+ # leave only angle parameter uuid with eigenvals for ConstantAnalogRotation block
327
+ if isinstance(block, ConstantAnalogRotation):
328
+ break
320
329
 
321
330
  return result
322
331
 
@@ -12,7 +12,13 @@ from qadence.circuit import QuantumCircuit
12
12
  from qadence.measurements import Measurements
13
13
  from qadence.mitigations import Mitigations
14
14
  from qadence.noise import NoiseHandler
15
- from qadence.types import ArrayLike, DiffMode, Endianness, Engine, ParamDictType
15
+ from qadence.types import (
16
+ ArrayLike,
17
+ DiffMode,
18
+ Endianness,
19
+ Engine,
20
+ ParamDictType,
21
+ )
16
22
 
17
23
 
18
24
  @dataclass(frozen=True, eq=True)
@@ -8,7 +8,13 @@ from qadence.engines.jax.differentiable_expectation import DifferentiableExpecta
8
8
  from qadence.measurements import Measurements
9
9
  from qadence.mitigations import Mitigations
10
10
  from qadence.noise import NoiseHandler
11
- from qadence.types import ArrayLike, DiffMode, Endianness, Engine, ParamDictType
11
+ from qadence.types import (
12
+ ArrayLike,
13
+ DiffMode,
14
+ Endianness,
15
+ Engine,
16
+ ParamDictType,
17
+ )
12
18
 
13
19
 
14
20
  class DifferentiableBackend(DifferentiableBackendInterface):
@@ -12,7 +12,13 @@ from qadence.backends.parameter_shift_rules import general_psr
12
12
  from qadence.measurements import Measurements
13
13
  from qadence.mitigations import Mitigations
14
14
  from qadence.noise import NoiseHandler
15
- from qadence.types import ArrayLike, DiffMode, Endianness, Engine, ParamDictType
15
+ from qadence.types import (
16
+ ArrayLike,
17
+ DiffMode,
18
+ Endianness,
19
+ Engine,
20
+ ParamDictType,
21
+ )
16
22
 
17
23
 
18
24
  class DifferentiableBackend(DifferentiableBackendInterface):
@@ -20,7 +20,7 @@ from qadence.measurements import Measurements
20
20
  from qadence.mitigations import Mitigations
21
21
  from qadence.ml_tools import promote_to_tensor
22
22
  from qadence.noise import NoiseHandler
23
- from qadence.types import Endianness
23
+ from qadence.types import Endianness, ParamDictType
24
24
 
25
25
 
26
26
  class PSRExpectation(Function):
@@ -94,7 +94,7 @@ class DifferentiableExpectation:
94
94
  backend: QuantumBackend
95
95
  circuit: ConvertedCircuit
96
96
  observable: list[ConvertedObservable] | ConvertedObservable
97
- param_values: dict[str, Tensor]
97
+ param_values: ParamDictType
98
98
  state: Tensor | None = None
99
99
  measurement: Measurements | None = None
100
100
  noise: NoiseHandler | None = None
@@ -135,8 +135,6 @@ class DifferentiableExpectation:
135
135
  self.observable = (
136
136
  self.observable if isinstance(self.observable, list) else [self.observable]
137
137
  )
138
- if len(self.observable) > 1:
139
- raise NotImplementedError("AdjointExpectation currently only supports one observable.")
140
138
 
141
139
  n_qubits = self.circuit.abstract.n_qubits
142
140
  values_batch_size = infer_batchsize(self.param_values)
@@ -150,18 +148,21 @@ class DifferentiableExpectation:
150
148
  else self.state
151
149
  )
152
150
  batch_size = max(values_batch_size, self.state.size(-1))
153
- return (
154
- AdjointExpectation.apply(
151
+
152
+ def expectation_fn(i: int) -> Tensor:
153
+ return AdjointExpectation.apply(
155
154
  self.circuit.native,
156
155
  self.state,
157
- self.observable[0].native, # Currently, adjoint only supports a single observable.
156
+ self.observable[i].native, # Currently, adjoint only supports a single observable.
158
157
  None,
159
158
  self.param_values.keys(),
160
159
  *self.param_values.values(),
161
- )
162
- .unsqueeze(1)
163
- .reshape(batch_size, 1)
164
- ) # we expect (batch_size, n_observables) shape
160
+ ).reshape(
161
+ batch_size, 1
162
+ ) # we expect (batch_size, n_observables) shape
163
+
164
+ expectation_list = [expectation_fn(i) for i in range(len(self.observable))]
165
+ return torch.vstack(expectation_list)
165
166
 
166
167
  def psr(self, psr_fn: Callable, **psr_args: int | float | None) -> Tensor:
167
168
  # wrapper which unpacks the parameters
@@ -10,6 +10,7 @@ from .optimize_step import optimize_step as default_optimize_step
10
10
  from .parameters import get_parameters, num_parameters, set_parameters
11
11
  from .tensors import numpy_to_tensor, promote_to, promote_to_tensor
12
12
  from .trainer import Trainer
13
+ from .qcnn_model import QCNN
13
14
 
14
15
  # Modules to be automatically added to the qadence namespace
15
16
  __all__ = [
@@ -25,4 +26,5 @@ __all__ = [
25
26
  "OptimizeResult",
26
27
  "Trainer",
27
28
  "write_checkpoint",
29
+ "QCNN",
28
30
  ]