qadence 1.7.4__tar.gz → 1.7.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. qadence-1.7.6/.github/workflows/ISSUE_TEMPLATE/bug-report.yml +52 -0
  2. qadence-1.7.6/.github/workflows/ISSUE_TEMPLATE/config.yml +5 -0
  3. qadence-1.7.6/.github/workflows/ISSUE_TEMPLATE/new-feature.yml +36 -0
  4. {qadence-1.7.4 → qadence-1.7.6}/PKG-INFO +4 -4
  5. {qadence-1.7.4 → qadence-1.7.6}/pyproject.toml +6 -4
  6. {qadence-1.7.4 → qadence-1.7.6}/qadence/analog/addressing.py +7 -3
  7. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/api.py +9 -8
  8. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/gpsr.py +18 -2
  9. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/horqrux/convert_ops.py +1 -1
  10. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pyqtorch/backend.py +8 -11
  11. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pyqtorch/convert_ops.py +100 -123
  12. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/utils.py +1 -1
  13. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/composite.py +5 -3
  14. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/utils.py +36 -2
  15. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/utils.py +26 -26
  16. {qadence-1.7.4 → qadence-1.7.6}/qadence/engines/jax/differentiable_expectation.py +1 -1
  17. {qadence-1.7.4 → qadence-1.7.6}/qadence/engines/torch/differentiable_expectation.py +17 -6
  18. {qadence-1.7.4 → qadence-1.7.6}/qadence/extensions.py +28 -8
  19. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/__init__.py +2 -1
  20. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/config.py +131 -25
  21. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/constructors.py +39 -33
  22. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/data.py +27 -1
  23. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/optimize_step.py +3 -2
  24. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/saveload.py +3 -2
  25. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/train_grad.py +154 -94
  26. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/train_no_grad.py +86 -40
  27. {qadence-1.7.4 → qadence-1.7.6}/qadence/model.py +47 -3
  28. {qadence-1.7.4 → qadence-1.7.6}/qadence/types.py +2 -2
  29. {qadence-1.7.4 → qadence-1.7.6}/.coveragerc +0 -0
  30. {qadence-1.7.4 → qadence-1.7.6}/.github/workflows/build_docs.yml +0 -0
  31. {qadence-1.7.4 → qadence-1.7.6}/.github/workflows/lint.yml +0 -0
  32. {qadence-1.7.4 → qadence-1.7.6}/.github/workflows/test_all.yml +0 -0
  33. {qadence-1.7.4 → qadence-1.7.6}/.github/workflows/test_examples.yml +0 -0
  34. {qadence-1.7.4 → qadence-1.7.6}/.github/workflows/test_fast.yml +0 -0
  35. {qadence-1.7.4 → qadence-1.7.6}/.gitignore +0 -0
  36. {qadence-1.7.4 → qadence-1.7.6}/.pre-commit-config.yaml +0 -0
  37. {qadence-1.7.4 → qadence-1.7.6}/LICENSE +0 -0
  38. {qadence-1.7.4 → qadence-1.7.6}/MANIFEST.in +0 -0
  39. {qadence-1.7.4 → qadence-1.7.6}/README.md +0 -0
  40. {qadence-1.7.4 → qadence-1.7.6}/mkdocs.yml +0 -0
  41. {qadence-1.7.4 → qadence-1.7.6}/qadence/__init__.py +0 -0
  42. {qadence-1.7.4 → qadence-1.7.6}/qadence/analog/__init__.py +0 -0
  43. {qadence-1.7.4 → qadence-1.7.6}/qadence/analog/constants.py +0 -0
  44. {qadence-1.7.4 → qadence-1.7.6}/qadence/analog/device.py +0 -0
  45. {qadence-1.7.4 → qadence-1.7.6}/qadence/analog/hamiltonian_terms.py +0 -0
  46. {qadence-1.7.4 → qadence-1.7.6}/qadence/analog/parse_analog.py +0 -0
  47. {qadence-1.7.4 → qadence-1.7.6}/qadence/backend.py +0 -0
  48. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/__init__.py +0 -0
  49. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/braket/__init__.py +0 -0
  50. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/braket/backend.py +0 -0
  51. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/braket/config.py +0 -0
  52. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/braket/convert_ops.py +0 -0
  53. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/horqrux/__init__.py +0 -0
  54. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/horqrux/backend.py +0 -0
  55. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/horqrux/config.py +0 -0
  56. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/jax_utils.py +0 -0
  57. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pulser/__init__.py +0 -0
  58. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pulser/backend.py +0 -0
  59. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pulser/channels.py +0 -0
  60. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pulser/cloud.py +0 -0
  61. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pulser/config.py +0 -0
  62. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pulser/convert_ops.py +0 -0
  63. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pulser/devices.py +0 -0
  64. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pulser/pulses.py +0 -0
  65. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pulser/waveforms.py +0 -0
  66. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pyqtorch/__init__.py +0 -0
  67. {qadence-1.7.4 → qadence-1.7.6}/qadence/backends/pyqtorch/config.py +0 -0
  68. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/__init__.py +0 -0
  69. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/abstract.py +0 -0
  70. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/analog.py +0 -0
  71. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/block_to_tensor.py +0 -0
  72. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/embedding.py +0 -0
  73. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/manipulate.py +0 -0
  74. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/matrix.py +0 -0
  75. {qadence-1.7.4 → qadence-1.7.6}/qadence/blocks/primitive.py +0 -0
  76. {qadence-1.7.4 → qadence-1.7.6}/qadence/circuit.py +0 -0
  77. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/__init__.py +0 -0
  78. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/ansatze.py +0 -0
  79. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/daqc/__init__.py +0 -0
  80. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/daqc/daqc.py +0 -0
  81. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/daqc/gen_parser.py +0 -0
  82. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/daqc/utils.py +0 -0
  83. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/feature_maps.py +0 -0
  84. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/hamiltonians.py +0 -0
  85. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/iia.py +0 -0
  86. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/qft.py +0 -0
  87. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/rydberg_feature_maps.py +0 -0
  88. {qadence-1.7.4 → qadence-1.7.6}/qadence/constructors/rydberg_hea.py +0 -0
  89. {qadence-1.7.4 → qadence-1.7.6}/qadence/decompose.py +0 -0
  90. {qadence-1.7.4 → qadence-1.7.6}/qadence/divergences.py +0 -0
  91. {qadence-1.7.4 → qadence-1.7.6}/qadence/draw/__init__.py +0 -0
  92. {qadence-1.7.4 → qadence-1.7.6}/qadence/draw/assets/dark/measurement.png +0 -0
  93. {qadence-1.7.4 → qadence-1.7.6}/qadence/draw/assets/dark/measurement.svg +0 -0
  94. {qadence-1.7.4 → qadence-1.7.6}/qadence/draw/assets/light/measurement.png +0 -0
  95. {qadence-1.7.4 → qadence-1.7.6}/qadence/draw/assets/light/measurement.svg +0 -0
  96. {qadence-1.7.4 → qadence-1.7.6}/qadence/draw/themes.py +0 -0
  97. {qadence-1.7.4 → qadence-1.7.6}/qadence/draw/utils.py +0 -0
  98. {qadence-1.7.4 → qadence-1.7.6}/qadence/draw/vizbackend.py +0 -0
  99. {qadence-1.7.4 → qadence-1.7.6}/qadence/engines/__init__.py +0 -0
  100. {qadence-1.7.4 → qadence-1.7.6}/qadence/engines/differentiable_backend.py +0 -0
  101. {qadence-1.7.4 → qadence-1.7.6}/qadence/engines/jax/__init__.py +0 -0
  102. {qadence-1.7.4 → qadence-1.7.6}/qadence/engines/jax/differentiable_backend.py +0 -0
  103. {qadence-1.7.4 → qadence-1.7.6}/qadence/engines/torch/__init__.py +0 -0
  104. {qadence-1.7.4 → qadence-1.7.6}/qadence/engines/torch/differentiable_backend.py +0 -0
  105. {qadence-1.7.4 → qadence-1.7.6}/qadence/exceptions/__init__.py +0 -0
  106. {qadence-1.7.4 → qadence-1.7.6}/qadence/exceptions/exceptions.py +0 -0
  107. {qadence-1.7.4 → qadence-1.7.6}/qadence/execution.py +0 -0
  108. {qadence-1.7.4 → qadence-1.7.6}/qadence/libs.py +0 -0
  109. {qadence-1.7.4 → qadence-1.7.6}/qadence/log_config.yaml +0 -0
  110. {qadence-1.7.4 → qadence-1.7.6}/qadence/logger.py +0 -0
  111. {qadence-1.7.4 → qadence-1.7.6}/qadence/measurements/__init__.py +0 -0
  112. {qadence-1.7.4 → qadence-1.7.6}/qadence/measurements/protocols.py +0 -0
  113. {qadence-1.7.4 → qadence-1.7.6}/qadence/measurements/samples.py +0 -0
  114. {qadence-1.7.4 → qadence-1.7.6}/qadence/measurements/shadow.py +0 -0
  115. {qadence-1.7.4 → qadence-1.7.6}/qadence/measurements/tomography.py +0 -0
  116. {qadence-1.7.4 → qadence-1.7.6}/qadence/measurements/utils.py +0 -0
  117. {qadence-1.7.4 → qadence-1.7.6}/qadence/mitigations/__init__.py +0 -0
  118. {qadence-1.7.4 → qadence-1.7.6}/qadence/mitigations/analog_zne.py +0 -0
  119. {qadence-1.7.4 → qadence-1.7.6}/qadence/mitigations/protocols.py +0 -0
  120. {qadence-1.7.4 → qadence-1.7.6}/qadence/mitigations/readout.py +0 -0
  121. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/models.py +0 -0
  122. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/parameters.py +0 -0
  123. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/printing.py +0 -0
  124. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/tensors.py +0 -0
  125. {qadence-1.7.4 → qadence-1.7.6}/qadence/ml_tools/utils.py +0 -0
  126. {qadence-1.7.4 → qadence-1.7.6}/qadence/noise/__init__.py +0 -0
  127. {qadence-1.7.4 → qadence-1.7.6}/qadence/noise/protocols.py +0 -0
  128. {qadence-1.7.4 → qadence-1.7.6}/qadence/noise/readout.py +0 -0
  129. {qadence-1.7.4 → qadence-1.7.6}/qadence/operations/__init__.py +0 -0
  130. {qadence-1.7.4 → qadence-1.7.6}/qadence/operations/analog.py +0 -0
  131. {qadence-1.7.4 → qadence-1.7.6}/qadence/operations/control_ops.py +0 -0
  132. {qadence-1.7.4 → qadence-1.7.6}/qadence/operations/ham_evo.py +0 -0
  133. {qadence-1.7.4 → qadence-1.7.6}/qadence/operations/parametric.py +0 -0
  134. {qadence-1.7.4 → qadence-1.7.6}/qadence/operations/primitive.py +0 -0
  135. {qadence-1.7.4 → qadence-1.7.6}/qadence/overlap.py +0 -0
  136. {qadence-1.7.4 → qadence-1.7.6}/qadence/parameters.py +0 -0
  137. {qadence-1.7.4 → qadence-1.7.6}/qadence/protocols.py +0 -0
  138. {qadence-1.7.4 → qadence-1.7.6}/qadence/py.typed +0 -0
  139. {qadence-1.7.4 → qadence-1.7.6}/qadence/qubit_support.py +0 -0
  140. {qadence-1.7.4 → qadence-1.7.6}/qadence/register.py +0 -0
  141. {qadence-1.7.4 → qadence-1.7.6}/qadence/serial_expr_grammar.peg +0 -0
  142. {qadence-1.7.4 → qadence-1.7.6}/qadence/serialization.py +0 -0
  143. {qadence-1.7.4 → qadence-1.7.6}/qadence/states.py +0 -0
  144. {qadence-1.7.4 → qadence-1.7.6}/qadence/transpile/__init__.py +0 -0
  145. {qadence-1.7.4 → qadence-1.7.6}/qadence/transpile/apply_fn.py +0 -0
  146. {qadence-1.7.4 → qadence-1.7.6}/qadence/transpile/block.py +0 -0
  147. {qadence-1.7.4 → qadence-1.7.6}/qadence/transpile/circuit.py +0 -0
  148. {qadence-1.7.4 → qadence-1.7.6}/qadence/transpile/digitalize.py +0 -0
  149. {qadence-1.7.4 → qadence-1.7.6}/qadence/transpile/flatten.py +0 -0
  150. {qadence-1.7.4 → qadence-1.7.6}/qadence/transpile/invert.py +0 -0
  151. {qadence-1.7.4 → qadence-1.7.6}/qadence/transpile/transpile.py +0 -0
  152. {qadence-1.7.4 → qadence-1.7.6}/qadence/utils.py +0 -0
  153. {qadence-1.7.4 → qadence-1.7.6}/renovate.json +0 -0
  154. {qadence-1.7.4 → qadence-1.7.6}/setup.py +0 -0
@@ -0,0 +1,52 @@
1
+ name: Bug Report
2
+ description: Report a bug
3
+ labels: ["bug"]
4
+ body:
5
+ - type: markdown
6
+ attributes:
7
+ value: |
8
+ Please fill out the sections below to help everyone identify and fix the bug
9
+ - type: textarea
10
+ id: description
11
+ attributes:
12
+ label: Short description
13
+ placeholder: Describe here
14
+ validations:
15
+ required: true
16
+ - type: textarea
17
+ id: expected
18
+ attributes:
19
+ label: What is the expected result?
20
+ placeholder: Describe here
21
+ - type: textarea
22
+ id: actual
23
+ attributes:
24
+ label: What is the actual result?
25
+ placeholder: Describe here
26
+ - type: textarea
27
+ id: code_bug
28
+ attributes:
29
+ label: Steps/Code to reproduce
30
+ placeholder: |
31
+ from qadence import *
32
+ validations:
33
+ required: true
34
+ - type: textarea
35
+ id: texterror
36
+ attributes:
37
+ label: Tracebacks (optional)
38
+ - type: textarea
39
+ id: textenv
40
+ attributes:
41
+ label: Environment details (optional)
42
+ placeholder: Qadence version, Operating system, Python version, PyTorch version
43
+ - type: dropdown
44
+ id: assign
45
+ attributes:
46
+ label: "Would you like to work on this issue?"
47
+ options:
48
+ - "Yes"
49
+ - type: markdown
50
+ attributes:
51
+ value: |
52
+ Thanks for reporting this issue! We will get back to you as soon as possible.
@@ -0,0 +1,5 @@
1
+ blank_issues_enabled: true
2
+ contact_links:
3
+ - name: Contribution guidelines
4
+ url: https://pasqal-io.github.io/qadence/latest/getting_started/CONTRIBUTING/
5
+ about: How to contribute
@@ -0,0 +1,36 @@
1
+ name: New feature
2
+ description: Suggest or request a new feature
3
+ labels: ["feature"]
4
+ body:
5
+ - type: markdown
6
+ attributes:
7
+ value: |
8
+ Please fill out the sections below to properly describe the new feature you are suggesting.
9
+ - type: textarea
10
+ id: description
11
+ attributes:
12
+ label: Describe the feature
13
+ placeholder: I want ...
14
+ validations:
15
+ required: true
16
+ - type: textarea
17
+ id: rationale
18
+ attributes:
19
+ label: It should be implemented because
20
+ placeholder: It will allow ...
21
+ - type: textarea
22
+ id: context
23
+ attributes:
24
+ label: Additional context
25
+ placeholder: |
26
+ Add any other context or screenshots about the feature request here.
27
+ - type: dropdown
28
+ id: assign
29
+ attributes:
30
+ label: "Would you like to work on this issue?"
31
+ options:
32
+ - "Yes"
33
+ - type: markdown
34
+ attributes:
35
+ value: |
36
+ Thanks for your suggestion! Let's see together if it can be implemented.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: qadence
3
- Version: 1.7.4
3
+ Version: 1.7.6
4
4
  Summary: Pasqal interface for circuit-based quantum computing SDKs
5
5
  Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>
6
6
  License: Apache 2.0
@@ -22,7 +22,7 @@ Requires-Dist: matplotlib
22
22
  Requires-Dist: nevergrad
23
23
  Requires-Dist: numpy
24
24
  Requires-Dist: openfermion
25
- Requires-Dist: pyqtorch==1.3.2
25
+ Requires-Dist: pyqtorch==1.4.4
26
26
  Requires-Dist: pyyaml
27
27
  Requires-Dist: rich
28
28
  Requires-Dist: scipy
@@ -45,7 +45,7 @@ Requires-Dist: nvidia-pyindex; extra == 'dlprof'
45
45
  Provides-Extra: horqrux
46
46
  Requires-Dist: einops; extra == 'horqrux'
47
47
  Requires-Dist: flax; extra == 'horqrux'
48
- Requires-Dist: horqrux==0.6.0; extra == 'horqrux'
48
+ Requires-Dist: horqrux==0.6.2; extra == 'horqrux'
49
49
  Requires-Dist: jax; extra == 'horqrux'
50
50
  Requires-Dist: jaxopt; extra == 'horqrux'
51
51
  Requires-Dist: optax; extra == 'horqrux'
@@ -57,7 +57,7 @@ Requires-Dist: mlflow; extra == 'mlflow'
57
57
  Provides-Extra: protocols
58
58
  Requires-Dist: qadence-protocols; extra == 'protocols'
59
59
  Provides-Extra: pulser
60
- Requires-Dist: pasqal-cloud==0.11.1; extra == 'pulser'
60
+ Requires-Dist: pasqal-cloud==0.11.3; extra == 'pulser'
61
61
  Requires-Dist: pulser-core==0.19.0; extra == 'pulser'
62
62
  Requires-Dist: pulser-simulation==0.19.0; extra == 'pulser'
63
63
  Provides-Extra: visualization
@@ -25,7 +25,7 @@ authors = [
25
25
  ]
26
26
  requires-python = ">=3.9"
27
27
  license = { text = "Apache 2.0" }
28
- version = "1.7.4"
28
+ version = "1.7.6"
29
29
  classifiers = [
30
30
  "License :: OSI Approved :: Apache Software License",
31
31
  "Programming Language :: Python",
@@ -49,7 +49,7 @@ dependencies = [
49
49
  "jsonschema",
50
50
  "nevergrad",
51
51
  "scipy",
52
- "pyqtorch==1.3.2",
52
+ "pyqtorch==1.4.4",
53
53
  "pyyaml",
54
54
  "matplotlib",
55
55
  "Arpeggio==2.0.2",
@@ -63,7 +63,7 @@ allow-ambiguous-features = true
63
63
  pulser = [
64
64
  "pulser-core==0.19.0",
65
65
  "pulser-simulation==0.19.0",
66
- "pasqal-cloud==0.11.1",
66
+ "pasqal-cloud==0.11.3",
67
67
  ]
68
68
  braket = ["amazon-braket-sdk<1.71.2"]
69
69
  visualization = [
@@ -73,7 +73,7 @@ visualization = [
73
73
  # "scour",
74
74
  ]
75
75
  horqrux = [
76
- "horqrux==0.6.0",
76
+ "horqrux==0.6.2",
77
77
  "jax",
78
78
  "flax",
79
79
  "optax",
@@ -87,6 +87,7 @@ dlprof = ["nvidia-pyindex", "nvidia-dlprof[pytorch]"]
87
87
  mlflow = ["mlflow"]
88
88
  all = ["pulser", "braket", "visualization", "protocols", "libs", "mlflow"]
89
89
 
90
+
90
91
  [tool.hatch.envs.default]
91
92
  dependencies = [
92
93
  "flaky",
@@ -132,6 +133,7 @@ filterwarnings = [
132
133
  [tool.hatch.envs.docs]
133
134
  dependencies = [
134
135
  "mkdocs",
136
+ "mkdocs_autorefs<1.1.0",
135
137
  "mkdocs-material",
136
138
  "mkdocstrings",
137
139
  "mkdocstrings-python",
@@ -59,9 +59,13 @@ class AddressingPattern:
59
59
  ) -> dict:
60
60
  # augment weight dict if needed
61
61
  weights = {
62
- i: Parameter(0.0)
63
- if i not in weights
64
- else (Parameter(weights[i]) if not isinstance(weights[i], Parameter) else weights[i])
62
+ i: (
63
+ Parameter(0.0)
64
+ if i not in weights
65
+ else (
66
+ Parameter(weights[i]) if not isinstance(weights[i], Parameter) else weights[i]
67
+ )
68
+ )
65
69
  for i in range(self.n_qubits)
66
70
  }
67
71
 
@@ -3,6 +3,9 @@ from __future__ import annotations
3
3
  from qadence.backend import Backend, BackendConfiguration
4
4
  from qadence.engines.differentiable_backend import DifferentiableBackend
5
5
  from qadence.extensions import (
6
+ BackendNotFoundError,
7
+ ConfigNotFoundError,
8
+ EngineNotFoundError,
6
9
  import_backend,
7
10
  import_config,
8
11
  import_engine,
@@ -49,12 +52,9 @@ def backend_factory(
49
52
  diff_backend_cls = import_engine(backend_inst.engine)
50
53
  backend_inst = diff_backend_cls(backend=backend_inst, diff_mode=DiffMode(diff_mode)) # type: ignore[operator]
51
54
  return backend_inst
52
- except Exception as e:
53
- msg = f"The requested backend '{backend}' is either not installed\
54
- or could not be imported due to {e}."
55
- logger.error(msg)
56
- raise Exception(msg)
57
- # Set backend configurations which depend on the differentiation mode
55
+ except (BackendNotFoundError, EngineNotFoundError, ConfigNotFoundError) as e:
56
+ logger.error(e.msg)
57
+ raise e
58
58
 
59
59
 
60
60
  def config_factory(backend_name: BackendName | str, config: dict) -> BackendConfiguration:
@@ -62,6 +62,7 @@ def config_factory(backend_name: BackendName | str, config: dict) -> BackendConf
62
62
  try:
63
63
  BackendConfigCls = import_config(backend_name)
64
64
  cfg = BackendConfigCls(**config) # type: ignore[operator]
65
- except Exception as e:
66
- logger.debug(f"Unable to import config for backend {backend_name} due to {e}.")
65
+ except ConfigNotFoundError as e:
66
+ logger.error(e.msg)
67
+ raise e
67
68
  return cfg
@@ -11,13 +11,29 @@ from qadence.utils import _round_complex
11
11
 
12
12
 
13
13
  def general_psr(spectrum: Tensor, n_eqs: int | None = None, shift_prefac: float = 0.5) -> Callable:
14
+ """Define whether single_gap_psr or multi_gap_psr is used.
15
+
16
+ Args:
17
+ spectrum (Tensor): Spectrum of the operation we apply PSR onto.
18
+ n_eqs (int | None, optional): Number of equations. Defaults to None.
19
+ If provided, we keep the n_eqs higher spectral gaps.
20
+ shift_prefac (float, optional): Shift prefactor. Defaults to 0.5.
21
+
22
+ Returns:
23
+ Callable: single_gap_psr or multi_gap_psr function for
24
+ concerned operation.
25
+ """
14
26
  diffs = _round_complex(spectrum - spectrum.reshape(-1, 1))
15
27
  sorted_unique_spectral_gaps = torch.unique(torch.abs(torch.tril(diffs)))
16
28
 
17
29
  # We have to filter out zeros
18
30
  sorted_unique_spectral_gaps = sorted_unique_spectral_gaps[sorted_unique_spectral_gaps > 0]
19
- n_eqs = len(sorted_unique_spectral_gaps)
20
- sorted_unique_spectral_gaps = torch.tensor(list(sorted_unique_spectral_gaps))
31
+ n_eqs = (
32
+ len(sorted_unique_spectral_gaps)
33
+ if n_eqs is None
34
+ else min(n_eqs, len(sorted_unique_spectral_gaps))
35
+ )
36
+ sorted_unique_spectral_gaps = torch.tensor(list(sorted_unique_spectral_gaps)[:n_eqs])
21
37
 
22
38
  if n_eqs == 1:
23
39
  return single_gap_psr
@@ -7,11 +7,11 @@ from operator import add
7
7
  from typing import Any, Callable, Dict
8
8
 
9
9
  import jax.numpy as jnp
10
- from horqrux.abstract import Primitive as Gate
11
10
  from horqrux.analog import _HamiltonianEvolution as NativeHorqHEvo
12
11
  from horqrux.apply import apply_gate
13
12
  from horqrux.parametric import RX, RY, RZ
14
13
  from horqrux.primitive import NOT, SWAP, H, I, X, Y, Z
14
+ from horqrux.primitive import Primitive as Gate
15
15
  from horqrux.utils import inner
16
16
  from jax import Array
17
17
  from jax.scipy.linalg import expm
@@ -71,19 +71,16 @@ class Backend(BackendInterface):
71
71
  def observable(self, observable: AbstractBlock, n_qubits: int) -> ConvertedObservable:
72
72
  # make sure only leaves, i.e. primitive blocks are scaled
73
73
  transpilations = [
74
- lambda block: chain_single_qubit_ops(block)
75
- if self.config.use_single_qubit_composition
76
- else flatten(block),
74
+ lambda block: (
75
+ chain_single_qubit_ops(block)
76
+ if self.config.use_single_qubit_composition
77
+ else flatten(block)
78
+ ),
77
79
  scale_primitive_blocks_only,
78
80
  ]
79
81
  block = transpile(*transpilations)(observable) # type: ignore[call-overload]
80
82
  operations = convert_block(block, n_qubits, self.config)
81
- obs_cls = (
82
- pyq.DiagonalObservable
83
- if block._is_diag_pauli and not block.is_parametric
84
- else pyq.Observable
85
- )
86
- native = obs_cls(n_qubits=n_qubits, operations=operations)
83
+ native = pyq.Observable(operations=operations)
87
84
  return ConvertedObservable(native=native, abstract=block, original=observable)
88
85
 
89
86
  def run(
@@ -140,7 +137,7 @@ class Backend(BackendInterface):
140
137
  )
141
138
  observable = observable if isinstance(observable, list) else [observable]
142
139
  _expectation = torch.hstack(
143
- [obs.native(state, param_values).reshape(-1, 1) for obs in observable]
140
+ [obs.native.expectation(state, param_values).reshape(-1, 1) for obs in observable]
144
141
  )
145
142
  return _expectation
146
143
 
@@ -169,7 +166,7 @@ class Backend(BackendInterface):
169
166
  observables = observable if isinstance(observable, list) else [observable]
170
167
  for vals in to_list_of_dicts(param_values):
171
168
  wf = self.run(circuit, vals, state, endianness, pyqify_state=True, unpyqify_state=False)
172
- exs = torch.cat([obs.native(wf, vals) for obs in observables], 0)
169
+ exs = torch.cat([obs.native.expectation(wf, vals) for obs in observables], 0)
173
170
  list_expvals.append(exs)
174
171
 
175
172
  batch_expvals = torch.vstack(list_expvals)
@@ -7,7 +7,6 @@ from typing import Any, Sequence, Tuple
7
7
  import pyqtorch as pyq
8
8
  import sympy
9
9
  import torch
10
- from pyqtorch.apply import apply_operator
11
10
  from pyqtorch.embed import Embedding
12
11
  from pyqtorch.matrices import _dagger
13
12
  from pyqtorch.time_dependent.sesolve import sesolve
@@ -45,7 +44,6 @@ from qadence.blocks import (
45
44
  )
46
45
  from qadence.blocks.block_to_tensor import (
47
46
  _block_to_tensor_embedded,
48
- block_to_tensor,
49
47
  )
50
48
  from qadence.blocks.primitive import ProjectorBlock
51
49
  from qadence.blocks.utils import parameters
@@ -78,6 +76,14 @@ def is_single_qubit_chain(block: AbstractBlock) -> bool:
78
76
  )
79
77
 
80
78
 
79
+ def extract_parameter(block: ScaleBlock | ParametricBlock, config: Configuration) -> str | Tensor:
80
+ return (
81
+ tensor([block.parameters.parameter], dtype=float64)
82
+ if not block.is_parametric
83
+ else config.get_param_name(block)[0]
84
+ )
85
+
86
+
81
87
  def convert_block(
82
88
  block: AbstractBlock, n_qubits: int = None, config: Configuration = None
83
89
  ) -> Sequence[Module | Tensor | str | sympy.Expr]:
@@ -94,31 +100,45 @@ def convert_block(
94
100
 
95
101
  if isinstance(block, ScaleBlock):
96
102
  scaled_ops = convert_block(block.block, n_qubits, config)
97
- scale = (
98
- tensor([block.parameters.parameter], dtype=float64)
99
- if not block.is_parametric
100
- else config.get_param_name(block)[0]
101
- )
103
+ scale = extract_parameter(block, config)
102
104
  return [pyq.Scale(pyq.Sequence(scaled_ops), scale)]
103
105
 
104
106
  elif isinstance(block, TimeEvolutionBlock):
105
- # TODO add native pyq hamevo
106
- # generator = convert_block(block.generator, n_qubits, config)[0] # type: ignore[arg-type]
107
- # time_param = config.get_param_name(block)[0]
108
- # is_parametric = (
109
- # block.generator.is_parametric if isinstance(block.generator, AbstractBlock) else False
110
- # )
111
- # return [
112
- # pyq.HamiltonianEvolution(
113
- # qubit_support=qubit_support,
114
- # generator=generator,
115
- # time=time_param,
116
- # generator_parametric=is_parametric, # type: ignore[union-attr]
117
- # )
118
- # ]
119
- return [PyQHamiltonianEvolution(qubit_support, n_qubits, block, config)]
107
+ if getattr(block.generator, "is_time_dependent", False):
108
+ return [PyQTimeDependentEvolution(qubit_support, n_qubits, block, config)]
109
+ else:
110
+ if isinstance(block.generator, sympy.Basic):
111
+ generator = config.get_param_name(block)[1]
112
+ elif isinstance(block.generator, Tensor):
113
+ m = block.generator.to(dtype=cdouble)
114
+ generator = convert_block(
115
+ MatrixBlock(
116
+ m,
117
+ qubit_support=qubit_support,
118
+ check_unitary=False,
119
+ check_hermitian=True,
120
+ )
121
+ )[0]
122
+ else:
123
+ generator = convert_block(block.generator, n_qubits, config)[0] # type: ignore[arg-type]
124
+ time_param = config.get_param_name(block)[0]
125
+ is_parametric = (
126
+ block.generator.is_parametric
127
+ if isinstance(block.generator, AbstractBlock)
128
+ else False
129
+ )
130
+ return [
131
+ pyq.HamiltonianEvolution(
132
+ qubit_support=qubit_support,
133
+ generator=generator,
134
+ time=time_param,
135
+ generator_parametric=is_parametric, # type: ignore[union-attr]
136
+ cache_length=0,
137
+ )
138
+ ]
139
+
120
140
  elif isinstance(block, MatrixBlock):
121
- return [pyq.primitive.Primitive(block.matrix, block.qubit_support)]
141
+ return [pyq.primitives.Primitive(block.matrix, block.qubit_support)]
122
142
  elif isinstance(block, CompositeBlock):
123
143
  ops = list(flatten(*(convert_block(b, n_qubits, config) for b in block.blocks)))
124
144
  if isinstance(block, AddBlock):
@@ -142,14 +162,14 @@ def convert_block(
142
162
  if isinstance(block, U):
143
163
  op = pyq_cls(qubit_support[0], *config.get_param_name(block))
144
164
  else:
145
- op = pyq_cls(qubit_support[0], config.get_param_name(block)[0])
165
+ op = pyq_cls(qubit_support[0], extract_parameter(block, config))
146
166
  else:
147
167
  op = pyq_cls(qubit_support[0])
148
168
  return [op]
149
169
  elif isinstance(block, tuple(two_qubit_gateset)):
150
170
  pyq_cls = getattr(pyq, block.name)
151
171
  if isinstance(block, ParametricBlock):
152
- op = pyq_cls(qubit_support[0], qubit_support[1], config.get_param_name(block)[0])
172
+ op = pyq_cls(qubit_support[0], qubit_support[1], extract_parameter(block, config))
153
173
  else:
154
174
  op = pyq_cls(qubit_support[0], qubit_support[1])
155
175
  return [op]
@@ -157,7 +177,7 @@ def convert_block(
157
177
  block_name = block.name[1:] if block.name.startswith("M") else block.name
158
178
  pyq_cls = getattr(pyq, block_name)
159
179
  if isinstance(block, ParametricBlock):
160
- op = pyq_cls(qubit_support[:-1], qubit_support[-1], config.get_param_name(block)[0])
180
+ op = pyq_cls(qubit_support[:-1], qubit_support[-1], extract_parameter(block, config))
161
181
  else:
162
182
  if "CSWAP" in block_name:
163
183
  op = pyq_cls(qubit_support[:-2], qubit_support[-2:])
@@ -172,7 +192,7 @@ def convert_block(
172
192
  )
173
193
 
174
194
 
175
- class PyQHamiltonianEvolution(Module):
195
+ class PyQTimeDependentEvolution(Module):
176
196
  def __init__(
177
197
  self,
178
198
  qubit_support: Tuple[int, ...],
@@ -188,50 +208,17 @@ class PyQHamiltonianEvolution(Module):
188
208
  self.hmat: Tensor
189
209
  self.config = config
190
210
 
191
- if isinstance(block.generator, AbstractBlock) and not block.generator.is_parametric:
192
- hmat = block_to_tensor(
193
- block.generator,
194
- qubit_support=self.qubit_support,
195
- use_full_support=False,
196
- )
197
- hmat = hmat.permute(1, 2, 0)
198
- self.register_buffer("hmat", hmat)
199
- self._hamiltonian = lambda self, values: self.hmat
200
-
201
- elif isinstance(block.generator, Tensor):
202
- m = block.generator.to(dtype=cdouble)
203
- hmat = block_to_tensor(
204
- MatrixBlock(
205
- m,
206
- qubit_support=block.qubit_support,
207
- check_unitary=False,
208
- check_hermitian=True,
209
- ),
211
+ def _hamiltonian(self: PyQTimeDependentEvolution, values: dict[str, Tensor]) -> Tensor:
212
+ hmat = _block_to_tensor_embedded(
213
+ block.generator, # type: ignore[arg-type]
214
+ values=values,
210
215
  qubit_support=self.qubit_support,
211
216
  use_full_support=False,
217
+ device=self.device,
212
218
  )
213
- hmat = hmat.permute(1, 2, 0)
214
- self.register_buffer("hmat", hmat)
215
- self._hamiltonian = lambda self, values: self.hmat
216
-
217
- elif isinstance(block.generator, sympy.Basic):
218
- self._hamiltonian = (
219
- lambda self, values: values[self.param_names[1]].squeeze(3).permute(1, 2, 0)
220
- )
221
- # FIXME Why are we squeezing
222
- else:
223
-
224
- def _hamiltonian(self: PyQHamiltonianEvolution, values: dict[str, Tensor]) -> Tensor:
225
- hmat = _block_to_tensor_embedded(
226
- block.generator, # type: ignore[arg-type]
227
- values=values,
228
- qubit_support=self.qubit_support,
229
- use_full_support=False,
230
- device=self.device,
231
- )
232
- return hmat.permute(1, 2, 0)
219
+ return hmat.permute(1, 2, 0)
233
220
 
234
- self._hamiltonian = _hamiltonian
221
+ self._hamiltonian = _hamiltonian
235
222
 
236
223
  self._time_evolution = lambda values: values[self.param_names[0]]
237
224
  self._device: torch_device = (
@@ -322,61 +309,51 @@ class PyQHamiltonianEvolution(Module):
322
309
  values: dict[str, Tensor] | ParameterDict = dict(),
323
310
  embedding: Embedding | None = None,
324
311
  ) -> Tensor:
325
- if getattr(self.block.generator, "is_time_dependent", False): # type: ignore [union-attr]
326
-
327
- def Ht(t: Tensor | float) -> Tensor:
328
- # values dict has to change with new value of t
329
- # initial value of a feature parameter inside generator block
330
- # has to be inferred here
331
- new_vals = dict()
332
- for str_expr, val in values.items():
333
- expr = sympy.sympify(str_expr)
334
- t_symb = sympy.Symbol(self._get_time_parameter())
335
- free_symbols = expr.free_symbols
336
- if t_symb in free_symbols:
337
- # create substitution list for time and feature params
338
- subs_list = [(t_symb, t)]
339
-
340
- if len(free_symbols) > 1:
341
- # get feature param symbols
342
- feat_symbols = free_symbols.difference(set([t_symb]))
343
-
344
- # get feature param values
345
- feat_vals = values["orig_param_values"]
346
-
347
- # update substitution list with feature param values
348
- for fs in feat_symbols:
349
- subs_list.append((fs, feat_vals[str(fs)]))
350
-
351
- # evaluate expression with new time param value
352
- new_vals[str_expr] = torch.tensor(float(expr.subs(subs_list)))
353
- else:
354
- # expression doesn't contain time parameter - copy it as is
355
- new_vals[str_expr] = val
356
-
357
- # get matrix form of generator
358
- hmat = _block_to_tensor_embedded(
359
- self.block.generator, # type: ignore[arg-type]
360
- values=new_vals,
361
- qubit_support=self.qubit_support,
362
- use_full_support=False,
363
- device=self.device,
364
- ).squeeze(0)
365
-
366
- return hmat
367
-
368
- tsave = torch.linspace(0, self.block.duration, self.config.n_steps_hevo) # type: ignore [attr-defined]
369
- result = pyqify(
370
- sesolve(Ht, unpyqify(state).T[:, 0:1], tsave, self.config.ode_solver).states[-1].T
371
- )
372
- else:
373
- result = apply_operator(
374
- state,
375
- self.unitary(values),
376
- self.qubit_support,
377
- self.n_qubits,
378
- self.batch_size,
379
- )
312
+ def Ht(t: Tensor | float) -> Tensor:
313
+ # values dict has to change with new value of t
314
+ # initial value of a feature parameter inside generator block
315
+ # has to be inferred here
316
+ new_vals = dict()
317
+ for str_expr, val in values.items():
318
+ expr = sympy.sympify(str_expr)
319
+ t_symb = sympy.Symbol(self._get_time_parameter())
320
+ free_symbols = expr.free_symbols
321
+ if t_symb in free_symbols:
322
+ # create substitution list for time and feature params
323
+ subs_list = [(t_symb, t)]
324
+
325
+ if len(free_symbols) > 1:
326
+ # get feature param symbols
327
+ feat_symbols = free_symbols.difference(set([t_symb]))
328
+
329
+ # get feature param values
330
+ feat_vals = values["orig_param_values"]
331
+
332
+ # update substitution list with feature param values
333
+ for fs in feat_symbols:
334
+ subs_list.append((fs, feat_vals[str(fs)]))
335
+
336
+ # evaluate expression with new time param value
337
+ new_vals[str_expr] = torch.tensor(float(expr.subs(subs_list)))
338
+ else:
339
+ # expression doesn't contain time parameter - copy it as is
340
+ new_vals[str_expr] = val
341
+
342
+ # get matrix form of generator
343
+ hmat = _block_to_tensor_embedded(
344
+ self.block.generator, # type: ignore[arg-type]
345
+ values=new_vals,
346
+ qubit_support=self.qubit_support,
347
+ use_full_support=False,
348
+ device=self.device,
349
+ ).squeeze(0)
350
+
351
+ return hmat
352
+
353
+ tsave = torch.linspace(0, self.block.duration, self.config.n_steps_hevo) # type: ignore [attr-defined]
354
+ result = pyqify(
355
+ sesolve(Ht, unpyqify(state).T[:, 0:1], tsave, self.config.ode_solver).states[-1].T
356
+ )
380
357
 
381
358
  return result
382
359
 
@@ -388,7 +365,7 @@ class PyQHamiltonianEvolution(Module):
388
365
  def dtype(self) -> torch_dtype:
389
366
  return self._dtype
390
367
 
391
- def to(self, *args: Any, **kwargs: Any) -> PyQHamiltonianEvolution:
368
+ def to(self, *args: Any, **kwargs: Any) -> PyQTimeDependentEvolution:
392
369
  if hasattr(self, "hmat"):
393
370
  self.hmat = self.hmat.to(*args, **kwargs)
394
371
  self._device = self.hmat.device
@@ -9,7 +9,7 @@ import pyqtorch as pyq
9
9
  import torch
10
10
  from numpy.typing import ArrayLike
11
11
  from pyqtorch.apply import apply_operator
12
- from pyqtorch.parametric import Parametric as PyQParametric
12
+ from pyqtorch.primitives import Parametric as PyQParametric
13
13
  from torch import (
14
14
  Tensor,
15
15
  cat,
@@ -129,9 +129,11 @@ class CompositeBlock(AbstractBlock):
129
129
  from qadence.blocks.utils import _construct, tag
130
130
 
131
131
  blocks = [
132
- getattr(operations, b["type"])._from_dict(b)
133
- if hasattr(operations, b["type"])
134
- else getattr(qadenceblocks, b["type"])._from_dict(b)
132
+ (
133
+ getattr(operations, b["type"])._from_dict(b)
134
+ if hasattr(operations, b["type"])
135
+ else getattr(qadenceblocks, b["type"])._from_dict(b)
136
+ )
135
137
  for b in d["blocks"]
136
138
  ]
137
139
  block = _construct(cls, blocks) # type: ignore[arg-type]