qadence 1.6.1__tar.gz → 1.6.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. {qadence-1.6.1 → qadence-1.6.3}/PKG-INFO +14 -3
  2. {qadence-1.6.1 → qadence-1.6.3}/README.md +11 -0
  3. {qadence-1.6.1 → qadence-1.6.3}/pyproject.toml +3 -3
  4. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/gpsr.py +15 -3
  5. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pyqtorch/backend.py +28 -43
  6. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pyqtorch/convert_ops.py +40 -225
  7. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/matrix.py +1 -1
  8. {qadence-1.6.1 → qadence-1.6.3}/qadence/engines/torch/differentiable_expectation.py +1 -1
  9. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/saveload.py +1 -4
  10. {qadence-1.6.1 → qadence-1.6.3}/qadence/parameters.py +1 -1
  11. {qadence-1.6.1 → qadence-1.6.3}/qadence/states.py +7 -8
  12. qadence-1.6.1/qadence/backends/adjoint.py +0 -163
  13. {qadence-1.6.1 → qadence-1.6.3}/.coveragerc +0 -0
  14. {qadence-1.6.1 → qadence-1.6.3}/.github/dependabot.yml +0 -0
  15. {qadence-1.6.1 → qadence-1.6.3}/.github/workflows/build_docs.yml +0 -0
  16. {qadence-1.6.1 → qadence-1.6.3}/.github/workflows/dependabot.yml +0 -0
  17. {qadence-1.6.1 → qadence-1.6.3}/.github/workflows/lint.yml +0 -0
  18. {qadence-1.6.1 → qadence-1.6.3}/.github/workflows/test_all.yml +0 -0
  19. {qadence-1.6.1 → qadence-1.6.3}/.github/workflows/test_examples.yml +0 -0
  20. {qadence-1.6.1 → qadence-1.6.3}/.github/workflows/test_fast.yml +0 -0
  21. {qadence-1.6.1 → qadence-1.6.3}/.gitignore +0 -0
  22. {qadence-1.6.1 → qadence-1.6.3}/.pre-commit-config.yaml +0 -0
  23. {qadence-1.6.1 → qadence-1.6.3}/LICENSE +0 -0
  24. {qadence-1.6.1 → qadence-1.6.3}/MANIFEST.in +0 -0
  25. {qadence-1.6.1 → qadence-1.6.3}/mkdocs.yml +0 -0
  26. {qadence-1.6.1 → qadence-1.6.3}/qadence/__init__.py +0 -0
  27. {qadence-1.6.1 → qadence-1.6.3}/qadence/analog/__init__.py +0 -0
  28. {qadence-1.6.1 → qadence-1.6.3}/qadence/analog/addressing.py +0 -0
  29. {qadence-1.6.1 → qadence-1.6.3}/qadence/analog/constants.py +0 -0
  30. {qadence-1.6.1 → qadence-1.6.3}/qadence/analog/device.py +0 -0
  31. {qadence-1.6.1 → qadence-1.6.3}/qadence/analog/hamiltonian_terms.py +0 -0
  32. {qadence-1.6.1 → qadence-1.6.3}/qadence/analog/parse_analog.py +0 -0
  33. {qadence-1.6.1 → qadence-1.6.3}/qadence/backend.py +0 -0
  34. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/__init__.py +0 -0
  35. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/api.py +0 -0
  36. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/braket/__init__.py +0 -0
  37. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/braket/backend.py +0 -0
  38. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/braket/config.py +0 -0
  39. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/braket/convert_ops.py +0 -0
  40. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/horqrux/__init__.py +0 -0
  41. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/horqrux/backend.py +0 -0
  42. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/horqrux/config.py +0 -0
  43. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/horqrux/convert_ops.py +0 -0
  44. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/jax_utils.py +0 -0
  45. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pulser/__init__.py +0 -0
  46. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pulser/backend.py +0 -0
  47. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pulser/channels.py +0 -0
  48. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pulser/cloud.py +0 -0
  49. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pulser/config.py +0 -0
  50. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pulser/convert_ops.py +0 -0
  51. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pulser/devices.py +0 -0
  52. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pulser/pulses.py +0 -0
  53. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pulser/waveforms.py +0 -0
  54. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pyqtorch/__init__.py +0 -0
  55. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/pyqtorch/config.py +0 -0
  56. {qadence-1.6.1 → qadence-1.6.3}/qadence/backends/utils.py +0 -0
  57. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/__init__.py +0 -0
  58. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/abstract.py +0 -0
  59. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/analog.py +0 -0
  60. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/block_to_tensor.py +0 -0
  61. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/composite.py +0 -0
  62. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/embedding.py +0 -0
  63. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/manipulate.py +0 -0
  64. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/primitive.py +0 -0
  65. {qadence-1.6.1 → qadence-1.6.3}/qadence/blocks/utils.py +0 -0
  66. {qadence-1.6.1 → qadence-1.6.3}/qadence/circuit.py +0 -0
  67. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/__init__.py +0 -0
  68. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/ansatze.py +0 -0
  69. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/daqc/__init__.py +0 -0
  70. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/daqc/daqc.py +0 -0
  71. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/daqc/gen_parser.py +0 -0
  72. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/daqc/utils.py +0 -0
  73. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/feature_maps.py +0 -0
  74. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/hamiltonians.py +0 -0
  75. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/iia.py +0 -0
  76. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/qft.py +0 -0
  77. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/rydberg_feature_maps.py +0 -0
  78. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/rydberg_hea.py +0 -0
  79. {qadence-1.6.1 → qadence-1.6.3}/qadence/constructors/utils.py +0 -0
  80. {qadence-1.6.1 → qadence-1.6.3}/qadence/decompose.py +0 -0
  81. {qadence-1.6.1 → qadence-1.6.3}/qadence/divergences.py +0 -0
  82. {qadence-1.6.1 → qadence-1.6.3}/qadence/draw/__init__.py +0 -0
  83. {qadence-1.6.1 → qadence-1.6.3}/qadence/draw/assets/dark/measurement.png +0 -0
  84. {qadence-1.6.1 → qadence-1.6.3}/qadence/draw/assets/dark/measurement.svg +0 -0
  85. {qadence-1.6.1 → qadence-1.6.3}/qadence/draw/assets/light/measurement.png +0 -0
  86. {qadence-1.6.1 → qadence-1.6.3}/qadence/draw/assets/light/measurement.svg +0 -0
  87. {qadence-1.6.1 → qadence-1.6.3}/qadence/draw/themes.py +0 -0
  88. {qadence-1.6.1 → qadence-1.6.3}/qadence/draw/utils.py +0 -0
  89. {qadence-1.6.1 → qadence-1.6.3}/qadence/draw/vizbackend.py +0 -0
  90. {qadence-1.6.1 → qadence-1.6.3}/qadence/engines/__init__.py +0 -0
  91. {qadence-1.6.1 → qadence-1.6.3}/qadence/engines/differentiable_backend.py +0 -0
  92. {qadence-1.6.1 → qadence-1.6.3}/qadence/engines/jax/__init__.py +0 -0
  93. {qadence-1.6.1 → qadence-1.6.3}/qadence/engines/jax/differentiable_backend.py +0 -0
  94. {qadence-1.6.1 → qadence-1.6.3}/qadence/engines/jax/differentiable_expectation.py +0 -0
  95. {qadence-1.6.1 → qadence-1.6.3}/qadence/engines/torch/__init__.py +0 -0
  96. {qadence-1.6.1 → qadence-1.6.3}/qadence/engines/torch/differentiable_backend.py +0 -0
  97. {qadence-1.6.1 → qadence-1.6.3}/qadence/exceptions/__init__.py +0 -0
  98. {qadence-1.6.1 → qadence-1.6.3}/qadence/exceptions/exceptions.py +0 -0
  99. {qadence-1.6.1 → qadence-1.6.3}/qadence/execution.py +0 -0
  100. {qadence-1.6.1 → qadence-1.6.3}/qadence/extensions.py +0 -0
  101. {qadence-1.6.1 → qadence-1.6.3}/qadence/finitediff.py +0 -0
  102. {qadence-1.6.1 → qadence-1.6.3}/qadence/libs.py +0 -0
  103. {qadence-1.6.1 → qadence-1.6.3}/qadence/log_config.yaml +0 -0
  104. {qadence-1.6.1 → qadence-1.6.3}/qadence/logger.py +0 -0
  105. {qadence-1.6.1 → qadence-1.6.3}/qadence/measurements/__init__.py +0 -0
  106. {qadence-1.6.1 → qadence-1.6.3}/qadence/measurements/protocols.py +0 -0
  107. {qadence-1.6.1 → qadence-1.6.3}/qadence/measurements/samples.py +0 -0
  108. {qadence-1.6.1 → qadence-1.6.3}/qadence/measurements/shadow.py +0 -0
  109. {qadence-1.6.1 → qadence-1.6.3}/qadence/measurements/tomography.py +0 -0
  110. {qadence-1.6.1 → qadence-1.6.3}/qadence/measurements/utils.py +0 -0
  111. {qadence-1.6.1 → qadence-1.6.3}/qadence/mitigations/__init__.py +0 -0
  112. {qadence-1.6.1 → qadence-1.6.3}/qadence/mitigations/analog_zne.py +0 -0
  113. {qadence-1.6.1 → qadence-1.6.3}/qadence/mitigations/protocols.py +0 -0
  114. {qadence-1.6.1 → qadence-1.6.3}/qadence/mitigations/readout.py +0 -0
  115. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/__init__.py +0 -0
  116. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/config.py +0 -0
  117. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/data.py +0 -0
  118. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/models.py +0 -0
  119. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/optimize_step.py +0 -0
  120. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/parameters.py +0 -0
  121. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/printing.py +0 -0
  122. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/tensors.py +0 -0
  123. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/train_grad.py +0 -0
  124. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/train_no_grad.py +0 -0
  125. {qadence-1.6.1 → qadence-1.6.3}/qadence/ml_tools/utils.py +0 -0
  126. {qadence-1.6.1 → qadence-1.6.3}/qadence/models/__init__.py +0 -0
  127. {qadence-1.6.1 → qadence-1.6.3}/qadence/models/qnn.py +0 -0
  128. {qadence-1.6.1 → qadence-1.6.3}/qadence/models/quantum_model.py +0 -0
  129. {qadence-1.6.1 → qadence-1.6.3}/qadence/noise/__init__.py +0 -0
  130. {qadence-1.6.1 → qadence-1.6.3}/qadence/noise/protocols.py +0 -0
  131. {qadence-1.6.1 → qadence-1.6.3}/qadence/noise/readout.py +0 -0
  132. {qadence-1.6.1 → qadence-1.6.3}/qadence/operations/__init__.py +0 -0
  133. {qadence-1.6.1 → qadence-1.6.3}/qadence/operations/analog.py +0 -0
  134. {qadence-1.6.1 → qadence-1.6.3}/qadence/operations/control_ops.py +0 -0
  135. {qadence-1.6.1 → qadence-1.6.3}/qadence/operations/ham_evo.py +0 -0
  136. {qadence-1.6.1 → qadence-1.6.3}/qadence/operations/parametric.py +0 -0
  137. {qadence-1.6.1 → qadence-1.6.3}/qadence/operations/primitive.py +0 -0
  138. {qadence-1.6.1 → qadence-1.6.3}/qadence/overlap.py +0 -0
  139. {qadence-1.6.1 → qadence-1.6.3}/qadence/protocols.py +0 -0
  140. {qadence-1.6.1 → qadence-1.6.3}/qadence/py.typed +0 -0
  141. {qadence-1.6.1 → qadence-1.6.3}/qadence/qubit_support.py +0 -0
  142. {qadence-1.6.1 → qadence-1.6.3}/qadence/register.py +0 -0
  143. {qadence-1.6.1 → qadence-1.6.3}/qadence/serial_expr_grammar.peg +0 -0
  144. {qadence-1.6.1 → qadence-1.6.3}/qadence/serialization.py +0 -0
  145. {qadence-1.6.1 → qadence-1.6.3}/qadence/transpile/__init__.py +0 -0
  146. {qadence-1.6.1 → qadence-1.6.3}/qadence/transpile/apply_fn.py +0 -0
  147. {qadence-1.6.1 → qadence-1.6.3}/qadence/transpile/block.py +0 -0
  148. {qadence-1.6.1 → qadence-1.6.3}/qadence/transpile/circuit.py +0 -0
  149. {qadence-1.6.1 → qadence-1.6.3}/qadence/transpile/digitalize.py +0 -0
  150. {qadence-1.6.1 → qadence-1.6.3}/qadence/transpile/flatten.py +0 -0
  151. {qadence-1.6.1 → qadence-1.6.3}/qadence/transpile/invert.py +0 -0
  152. {qadence-1.6.1 → qadence-1.6.3}/qadence/transpile/transpile.py +0 -0
  153. {qadence-1.6.1 → qadence-1.6.3}/qadence/types.py +0 -0
  154. {qadence-1.6.1 → qadence-1.6.3}/qadence/utils.py +0 -0
  155. {qadence-1.6.1 → qadence-1.6.3}/renovate.json +0 -0
  156. {qadence-1.6.1 → qadence-1.6.3}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: qadence
3
- Version: 1.6.1
3
+ Version: 1.6.3
4
4
  Summary: Pasqal interface for circuit-based quantum computing SDKs
5
5
  Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>
6
6
  License: Apache 2.0
@@ -22,7 +22,7 @@ Requires-Dist: matplotlib
22
22
  Requires-Dist: nevergrad
23
23
  Requires-Dist: numpy
24
24
  Requires-Dist: openfermion
25
- Requires-Dist: pyqtorch==1.1.2
25
+ Requires-Dist: pyqtorch==1.2.1
26
26
  Requires-Dist: pyyaml
27
27
  Requires-Dist: rich
28
28
  Requires-Dist: scipy
@@ -53,7 +53,7 @@ Requires-Dist: qadence-libs; extra == 'libs'
53
53
  Provides-Extra: protocols
54
54
  Requires-Dist: qadence-protocols; extra == 'protocols'
55
55
  Provides-Extra: pulser
56
- Requires-Dist: pasqal-cloud==0.8.1; extra == 'pulser'
56
+ Requires-Dist: pasqal-cloud==0.10.1; extra == 'pulser'
57
57
  Requires-Dist: pulser-core==0.18.1; extra == 'pulser'
58
58
  Requires-Dist: pulser-simulation==0.18.1; extra == 'pulser'
59
59
  Provides-Extra: visualization
@@ -149,6 +149,15 @@ brew install graphviz
149
149
  conda install python-graphviz
150
150
  ```
151
151
 
152
+ On Windows Linux Subsystem (WSL2) it has been reported that in some cases "wslutilities" must be installed.
153
+ Please follow instructions [here](https://wslutiliti.es/wslu/install.html) for your flavour.
154
+ For example on Ubuntu 22.04 LTS and later you must run:
155
+ ```
156
+ sudo add-apt-repository ppa:wslutilities/wslu
157
+ sudo apt update
158
+ sudo apt install wslu
159
+ ```
160
+
152
161
  ## Contributing
153
162
 
154
163
  Before making a contribution, please review our [code of conduct](docs/getting_started/CODE_OF_CONDUCT.md).
@@ -179,6 +188,8 @@ install it from source using `pip`:
179
188
  python -m pip install -e .
180
189
  ```
181
190
 
191
+ Users also report problems running Hatch on Windows, we suggest using WSL2.
192
+
182
193
  ## Citation
183
194
 
184
195
  If you use Qadence for a publication, we kindly ask you to cite our work using the following BibTex entry:
@@ -87,6 +87,15 @@ brew install graphviz
87
87
  conda install python-graphviz
88
88
  ```
89
89
 
90
+ On Windows Linux Subsystem (WSL2) it has been reported that in some cases "wslutilities" must be installed.
91
+ Please follow instructions [here](https://wslutiliti.es/wslu/install.html) for your flavour.
92
+ For example on Ubuntu 22.04 LTS and later you must run:
93
+ ```
94
+ sudo add-apt-repository ppa:wslutilities/wslu
95
+ sudo apt update
96
+ sudo apt install wslu
97
+ ```
98
+
90
99
  ## Contributing
91
100
 
92
101
  Before making a contribution, please review our [code of conduct](docs/getting_started/CODE_OF_CONDUCT.md).
@@ -117,6 +126,8 @@ install it from source using `pip`:
117
126
  python -m pip install -e .
118
127
  ```
119
128
 
129
+ Users also report problems running Hatch on Windows, we suggest using WSL2.
130
+
120
131
  ## Citation
121
132
 
122
133
  If you use Qadence for a publication, we kindly ask you to cite our work using the following BibTex entry:
@@ -21,7 +21,7 @@ authors = [
21
21
  ]
22
22
  requires-python = ">=3.9"
23
23
  license = {text = "Apache 2.0"}
24
- version = "1.6.1"
24
+ version = "1.6.3"
25
25
  classifiers=[
26
26
  "License :: OSI Approved :: Apache Software License",
27
27
  "Programming Language :: Python",
@@ -44,7 +44,7 @@ dependencies = [
44
44
  "jsonschema",
45
45
  "nevergrad",
46
46
  "scipy",
47
- "pyqtorch==1.1.2",
47
+ "pyqtorch==1.2.1",
48
48
  "pyyaml",
49
49
  "matplotlib",
50
50
  "Arpeggio==2.0.2",
@@ -55,7 +55,7 @@ allow-direct-references = true
55
55
  allow-ambiguous-features = true
56
56
 
57
57
  [project.optional-dependencies]
58
- pulser = ["pulser-core==0.18.1", "pulser-simulation==0.18.1","pasqal-cloud==0.8.1"]
58
+ pulser = ["pulser-core==0.18.1", "pulser-simulation==0.18.1","pasqal-cloud==0.10.1"]
59
59
  braket = ["amazon-braket-sdk<1.71.2"]
60
60
  visualization = [
61
61
  "graphviz",
@@ -48,7 +48,13 @@ def single_gap_psr(
48
48
  Returns:
49
49
  Tensor: tensor containing derivative values
50
50
  """
51
-
51
+ device = torch.device("cpu")
52
+ try:
53
+ device = [v.device for v in param_dict.values()][0]
54
+ except Exception:
55
+ pass
56
+ spectral_gap = spectral_gap.to(device=device)
57
+ shift = shift.to(device=device)
52
58
  # + pi/2 shift
53
59
  shifted_params = param_dict.copy()
54
60
  shifted_params[param_name] = shifted_params[param_name] + shift
@@ -89,11 +95,17 @@ def multi_gap_psr(
89
95
 
90
96
  # get shift values
91
97
  shifts = shift_prefac * torch.linspace(PI / 2 - PI / 5, PI / 2 + PI / 5, n_eqs)
92
-
98
+ device = torch.device("cpu")
99
+ try:
100
+ device = [v.device for v in param_dict.values()][0]
101
+ except Exception:
102
+ pass
103
+ spectral_gaps = spectral_gaps.to(device=device)
104
+ shifts = shifts.to(device=device)
93
105
  # calculate F vector and M matrix
94
106
  # (see: https://arxiv.org/pdf/2108.01218.pdf on p. 4 for definitions)
95
107
  F = []
96
- M = torch.empty((n_eqs, n_eqs))
108
+ M = torch.empty((n_eqs, n_eqs)).to(device=device)
97
109
  n_obs = 1
98
110
  for i in range(n_eqs):
99
111
  # + shift
@@ -31,10 +31,10 @@ from qadence.transpile import (
31
31
  transpile,
32
32
  )
33
33
  from qadence.types import BackendName, Endianness, Engine
34
- from qadence.utils import infer_batchsize, int_to_basis
34
+ from qadence.utils import infer_batchsize
35
35
 
36
36
  from .config import Configuration, default_passes
37
- from .convert_ops import convert_block, convert_observable
37
+ from .convert_ops import convert_block
38
38
 
39
39
  logger = getLogger(__name__)
40
40
 
@@ -77,8 +77,13 @@ class Backend(BackendInterface):
77
77
  scale_primitive_blocks_only,
78
78
  ]
79
79
  block = transpile(*transpilations)(observable) # type: ignore[call-overload]
80
-
81
- (native,) = convert_observable(block, n_qubits=n_qubits, config=self.config)
80
+ operations = convert_block(block, n_qubits, self.config)
81
+ obs_cls = (
82
+ pyq.DiagonalObservable
83
+ if block._is_diag_pauli and not block.is_parametric
84
+ else pyq.Observable
85
+ )
86
+ native = obs_cls(n_qubits=n_qubits, operations=operations)
82
87
  return ConvertedObservable(native=native, abstract=block, original=observable)
83
88
 
84
89
  def run(
@@ -99,7 +104,7 @@ class Backend(BackendInterface):
99
104
  validate_state(state, n_qubits)
100
105
  # pyqtorch expects input shape [2] * n_qubits + [batch_size]
101
106
  state = pyqify(state, n_qubits) if pyqify_state else state
102
- state = circuit.native.run(state, param_values)
107
+ state = circuit.native.run(state=state, values=param_values)
103
108
  state = unpyqify(state) if unpyqify_state else state
104
109
  state = invert_endianness(state) if endianness != self.native_endianness else state
105
110
  return state
@@ -208,46 +213,26 @@ class Backend(BackendInterface):
208
213
  noise: Noise | None = None,
209
214
  mitigation: Mitigations | None = None,
210
215
  endianness: Endianness = Endianness.BIG,
216
+ pyqify_state: bool = True,
211
217
  ) -> list[Counter]:
212
- if n_shots < 1:
213
- raise ValueError("You can only call sample with n_shots>0.")
214
-
215
- def _sample(_probs: Tensor, n_shots: int, endianness: Endianness, n_qubits: int) -> Counter:
216
- return Counter(
217
- {
218
- int_to_basis(k=k, n_qubits=n_qubits, endianness=endianness): count.item()
219
- for k, count in enumerate(
220
- torch.bincount(
221
- torch.multinomial(input=_probs, num_samples=n_shots, replacement=True)
222
- )
223
- )
224
- if count > 0
225
- }
226
- )
227
-
228
- with torch.no_grad():
229
- wf = self.run(circuit=circuit, param_values=param_values, state=state)
230
- probs = torch.abs(torch.pow(wf, 2))
231
- samples = list(
232
- map(
233
- lambda _probs: _sample(
234
- _probs=_probs,
235
- n_shots=n_shots,
236
- endianness=endianness,
237
- n_qubits=circuit.abstract.n_qubits,
238
- ),
239
- probs,
240
- )
218
+ if state is None:
219
+ state = circuit.native.init_state(batch_size=infer_batchsize(param_values))
220
+ elif state is not None and pyqify_state:
221
+ n_qubits = circuit.abstract.n_qubits
222
+ state = pyqify(state, n_qubits) if pyqify_state else state
223
+ samples: list[Counter] = circuit.native.sample(
224
+ state=state, values=param_values, n_shots=n_shots
225
+ )
226
+ samples = invert_endianness(samples) if endianness != Endianness.BIG else samples
227
+ if noise is not None:
228
+ samples = apply_noise(noise=noise, samples=samples)
229
+ if mitigation is not None:
230
+ logger.warning(
231
+ "Mitigation protocol is deprecated. Use qadence-protocols instead.",
241
232
  )
242
- if noise is not None:
243
- samples = apply_noise(noise=noise, samples=samples)
244
- if mitigation is not None:
245
- logger.warning(
246
- "Mitigation protocol is deprecated. Use qadence-protocols instead.",
247
- )
248
- assert noise
249
- samples = apply_mitigation(noise=noise, mitigation=mitigation, samples=samples)
250
- return samples
233
+ assert noise
234
+ samples = apply_mitigation(noise=noise, mitigation=mitigation, samples=samples)
235
+ return samples
251
236
 
252
237
  def assign_parameters(self, circuit: ConvertedCircuit, param_values: dict[str, Tensor]) -> Any:
253
238
  raise NotImplementedError
@@ -1,10 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
- from functools import reduce
4
3
  from itertools import chain as flatten
5
4
  from math import prod
6
- from operator import add
7
- from typing import Any, Iterable, Sequence, Tuple
5
+ from typing import Any, Sequence, Tuple
8
6
 
9
7
  import pyqtorch as pyq
10
8
  import sympy
@@ -13,15 +11,12 @@ from pyqtorch.matrices import _dagger
13
11
  from pyqtorch.utils import is_diag
14
12
  from torch import (
15
13
  Tensor,
16
- argsort,
17
- bmm,
18
14
  cdouble,
19
15
  diag_embed,
20
16
  diagonal,
21
17
  exp,
18
+ float64,
22
19
  linalg,
23
- ones_like,
24
- permute,
25
20
  tensor,
26
21
  transpose,
27
22
  )
@@ -31,8 +26,6 @@ from torch.nn import Module
31
26
 
32
27
  from qadence.backends.utils import (
33
28
  finitediff,
34
- pyqify,
35
- unpyqify,
36
29
  )
37
30
  from qadence.blocks import (
38
31
  AbstractBlock,
@@ -45,11 +38,7 @@ from qadence.blocks import (
45
38
  ScaleBlock,
46
39
  TimeEvolutionBlock,
47
40
  )
48
- from qadence.blocks.block_to_tensor import (
49
- _block_to_tensor_embedded,
50
- block_to_diagonal,
51
- block_to_tensor,
52
- )
41
+ from qadence.blocks.block_to_tensor import _block_to_tensor_embedded, block_to_tensor
53
42
  from qadence.blocks.primitive import ProjectorBlock
54
43
  from qadence.operations import (
55
44
  U,
@@ -60,7 +49,6 @@ from qadence.operations import (
60
49
  two_qubit_gateset,
61
50
  )
62
51
  from qadence.types import OpName
63
- from qadence.utils import infer_batchsize
64
52
 
65
53
  from .config import Configuration
66
54
 
@@ -81,15 +69,13 @@ def is_single_qubit_chain(block: AbstractBlock) -> bool:
81
69
  )
82
70
 
83
71
 
84
- def convert_observable(
85
- block: AbstractBlock, n_qubits: int, config: Configuration = None
86
- ) -> Sequence[Module]:
87
- return [PyQObservable(block, n_qubits, config)]
88
-
89
-
90
72
  def convert_block(
91
73
  block: AbstractBlock, n_qubits: int = None, config: Configuration = None
92
- ) -> Sequence[Module]:
74
+ ) -> Sequence[Module | Tensor | str | sympy.Expr]:
75
+ if isinstance(block, (Tensor, str, sympy.Expr)): # case for hamevo generators
76
+ if isinstance(block, Tensor):
77
+ block = block.permute(1, 2, 0) # put batch size in the back
78
+ return [block]
93
79
  qubit_support = block.qubit_support
94
80
  if n_qubits is None:
95
81
  n_qubits = max(qubit_support) + 1
@@ -98,37 +84,40 @@ def convert_block(
98
84
  config = Configuration()
99
85
 
100
86
  if isinstance(block, ScaleBlock):
101
- return [ScalePyQOperation(n_qubits, block, config)]
102
-
103
- elif isinstance(block, AddBlock):
104
- ops = list(flatten(*(convert_block(b, n_qubits, config) for b in block.blocks)))
105
- return [AddPyQOperation(n_qubits, ops)]
87
+ scaled_ops = convert_block(block.block, n_qubits, config)
88
+ scale = (
89
+ tensor([block.parameters.parameter], dtype=float64)
90
+ if not block.is_parametric
91
+ else config.get_param_name(block)[0]
92
+ )
93
+ return [pyq.Scale(pyq.Sequence(scaled_ops), scale)]
106
94
 
107
95
  elif isinstance(block, TimeEvolutionBlock):
108
- return [
109
- PyQHamiltonianEvolution(
110
- qubit_support=qubit_support,
111
- n_qubits=n_qubits,
112
- block=block,
113
- config=config,
114
- )
115
- ]
96
+ # TODO add native pyq hamevo
97
+ # generator = convert_block(block.generator, n_qubits, config)[0] # type: ignore[arg-type]
98
+ # time_param = config.get_param_name(block)[0]
99
+ # is_parametric = (
100
+ # block.generator.is_parametric if isinstance(block.generator, AbstractBlock) else False
101
+ # )
102
+ # return [
103
+ # pyq.HamiltonianEvolution(
104
+ # qubit_support=qubit_support,
105
+ # generator=generator,
106
+ # time=time_param,
107
+ # generator_parametric=is_parametric, # type: ignore[union-attr]
108
+ # )
109
+ # ]
110
+ return [PyQHamiltonianEvolution(qubit_support, n_qubits, block, config)]
116
111
  elif isinstance(block, MatrixBlock):
117
- return [PyQMatrixBlock(block, n_qubits, config)]
112
+ return [pyq.primitive.Primitive(block.matrix, block.qubit_support)]
118
113
  elif isinstance(block, CompositeBlock):
119
114
  ops = list(flatten(*(convert_block(b, n_qubits, config) for b in block.blocks)))
120
- if is_single_qubit_chain(block) and config.use_single_qubit_composition:
121
- return [PyQComposedBlock(ops, qubit_support, n_qubits, config)]
115
+ if isinstance(block, AddBlock):
116
+ return [pyq.Add(ops)] # add
117
+ elif is_single_qubit_chain(block) and config.use_single_qubit_composition:
118
+ return [pyq.Merge(ops)] # for chains of single qubit ops on the same qubit
122
119
  else:
123
- # NOTE: without wrapping in a pyq.QuantumCircuit here the kron/chain
124
- # blocks won't be properly nested which leads to incorrect results from
125
- # the `AddBlock`s. For example:
126
- # add(chain(Z(0), Z(1))) has to result in the following (pseudo-code)
127
- # AddPyQOperation(pyq.QuantumCircuit(Z, Z))
128
- # as opposed to
129
- # AddPyQOperation(Z, Z)
130
- # which would be wrong.
131
- return [pyq.QuantumCircuit(n_qubits, ops)]
120
+ return [pyq.Sequence(ops)] # for kron and chain
132
121
  elif isinstance(block, tuple(non_unitary_gateset)):
133
122
  if isinstance(block, ProjectorBlock):
134
123
  projector = getattr(pyq, block.name)
@@ -161,7 +150,10 @@ def convert_block(
161
150
  if isinstance(block, ParametricBlock):
162
151
  op = pyq_cls(qubit_support[:-1], qubit_support[-1], config.get_param_name(block)[0])
163
152
  else:
164
- op = pyq_cls(qubit_support[:-1], qubit_support[-1])
153
+ if "CSWAP" in block_name:
154
+ op = pyq_cls(qubit_support[:-2], qubit_support[-2:])
155
+ else:
156
+ op = pyq_cls(qubit_support[:-1], qubit_support[-1])
165
157
  return [op]
166
158
  else:
167
159
  raise NotImplementedError(
@@ -171,147 +163,6 @@ def convert_block(
171
163
  )
172
164
 
173
165
 
174
- class PyQMatrixBlock(Module):
175
- def __init__(self, block: MatrixBlock, n_qubits: int, config: Configuration = None):
176
- super().__init__()
177
- self.n_qubits = n_qubits
178
- self.qubits = block.qubit_support
179
- self.register_buffer("mat", block.matrix.unsqueeze(2))
180
- self.mat: Tensor
181
- self._device: torch_device = self.mat.device
182
- self._dtype: torch_dtype = self.mat.dtype
183
-
184
- def forward(self, state: Tensor, _: dict[str, Tensor] = None) -> Tensor:
185
- return apply_operator(state, self.mat, self.qubits, self.n_qubits)
186
-
187
- @property
188
- def device(self) -> torch_device:
189
- return self._device
190
-
191
- def to(self, *args: Any, **kwargs: Any) -> PyQMatrixBlock:
192
- self.mat = self.mat.to(*args, **kwargs)
193
- self._device = self.mat.device
194
- self._dtype = self.mat.dtype
195
- return self
196
-
197
-
198
- class PyQComposedBlock(pyq.QuantumCircuit):
199
- def __init__(
200
- self,
201
- ops: list[Module],
202
- qubits: Tuple[int, ...],
203
- n_qubits: int,
204
- config: Configuration = None,
205
- ):
206
- """
207
- Merge operations that are adjacent and have identical qubit_support.
208
-
209
- It results in fewer call of apply_operator
210
- """
211
- super().__init__(n_qubits, ops)
212
- self.qubits = qubits
213
- self.merged_qubits_support = [
214
- grouped_op[-1].qubit_support for grouped_op in self.grouped_operations()
215
- ]
216
-
217
- def grouped_operations(self) -> list[list[Module]]:
218
- # takes a list of operations and group adjacent operations into sublist
219
- # if those operations have the same control qubits
220
- def _sublist_grouper(x: Iterable[list[Module]], y: Module) -> list[list[Module]]:
221
- # Appends the element y with the last sublist in the list x
222
- # if they have the same qubit_support.
223
- # Appends the element y as a new sublist to x if it has different qubit_domain
224
- x = list(x)
225
- if y.qubit_support == x[-1][-1].qubit_support:
226
- x[-1].append(y)
227
- return x
228
- else:
229
- x.append([y])
230
- return x
231
-
232
- return list(reduce(_sublist_grouper, iter(self.operations[1:]), [[self.operations[0]]]))
233
-
234
- def merged_unitary(self, values: dict[str, Tensor] | None, batch_size: int) -> list[Tensor]:
235
- # compute the tensor multiplication of each group of operations
236
- batch_first_perm = (2, 0, 1)
237
- undo_perm = tuple(argsort(tensor(batch_first_perm)))
238
-
239
- def _expand(m: Tensor) -> Tensor:
240
- if len(m.size()) == 2:
241
- m = m.unsqueeze(2).repeat(
242
- 1, 1, batch_size
243
- ) # Primitive gates are 2D, so we expand them.
244
- elif m.shape != (2, 2, batch_size) and m.shape != (4, 4, batch_size):
245
- m = m.repeat(1, 1, batch_size) # In case a tensor is 3D doesnt have batch_size.
246
- return m
247
-
248
- def _batch_first(m: Tensor) -> Tensor:
249
- return permute(m, batch_first_perm) # This returns shape (batch_size, 2, 2)
250
-
251
- def _batch_last(m: Tensor) -> Tensor:
252
- return permute(
253
- m, undo_perm
254
- ) # We need to undo the permute since PyQ expects (2, 2, batch_size).
255
-
256
- def _list_wise_bmm(ops: list[Module]) -> Tensor:
257
- # Takes a list of operations and apply torch.bmm to all the unitaries of the list
258
- return _batch_last(
259
- reduce(bmm, [_batch_first(_expand(op.unitary(values))) for op in reversed(ops)])
260
- ) # We reverse the list of tensors here since matmul is not commutative.
261
-
262
- return list(map(_list_wise_bmm, reversed(self.grouped_operations())))[::-1]
263
-
264
- def forward(self, state: Tensor, values: dict[str, Tensor] | None = None) -> Tensor:
265
- # compute evolution of the state by the list of operations
266
- batch_size = infer_batchsize(values)
267
- return reduce(
268
- lambda y, x: apply_operator(state=y, operator=x[0], qubits=x[1]),
269
- zip(self.merged_unitary(values, batch_size), self.merged_qubits_support),
270
- state,
271
- )
272
-
273
-
274
- class PyQObservable(Module):
275
- def __init__(self, block: AbstractBlock, n_qubits: int, config: Configuration = None):
276
- super().__init__()
277
- if config is None:
278
- config = Configuration()
279
- self.n_qubits = n_qubits
280
- if block._is_diag_pauli and not block.is_parametric:
281
- self.register_buffer("operation", block_to_diagonal(block, tuple(range(n_qubits))))
282
- self._forward = lambda self, state, values: pyqify(
283
- self.operation * unpyqify(state), n_qubits=self.n_qubits
284
- )
285
- else:
286
- self.operation = pyq.QuantumCircuit(
287
- n_qubits,
288
- convert_block(block, n_qubits, config),
289
- )
290
- self._forward = lambda self, state, values: self.operation(state, values)
291
- self._device = self.operation.device
292
- self._dtype = self.operation.dtype
293
-
294
- def run(self, state: Tensor, values: dict[str, Tensor]) -> Tensor:
295
- return self._forward(self, state, values)
296
-
297
- def forward(self, state: Tensor, values: dict[str, Tensor]) -> Tensor:
298
- return pyq.inner_prod(state, self.run(state, values)).real
299
-
300
- @property
301
- def device(self) -> torch_device:
302
- return self._device
303
-
304
- @property
305
- def dtype(self) -> torch_dtype:
306
- return self._dtype
307
-
308
- def to(self, *args: Any, **kwargs: Any) -> PyQObservable:
309
- self.operation = self.operation.to(*args, **kwargs)
310
- self._device = self.operation.device
311
- self._dtype = self.operation.dtype
312
- return self
313
-
314
-
315
166
  class PyQHamiltonianEvolution(Module):
316
167
  def __init__(
317
168
  self,
@@ -463,39 +314,3 @@ class PyQHamiltonianEvolution(Module):
463
314
  self._device = self.hmat.device
464
315
  self._dtype = self.hmat.dtype
465
316
  return self
466
-
467
-
468
- class AddPyQOperation(pyq.QuantumCircuit):
469
- def __init__(self, n_qubits: int, operations: list[Module]):
470
- super().__init__(n_qubits=n_qubits, operations=operations)
471
-
472
- def forward(self, state: Tensor, values: dict[str, Tensor]) -> Tensor:
473
- return reduce(add, (op(state, values) for op in self.operations))
474
-
475
-
476
- class ScalePyQOperation(pyq.QuantumCircuit):
477
- def __init__(self, n_qubits: int, block: ScaleBlock, config: Configuration):
478
- if not isinstance(block.block, PrimitiveBlock):
479
- raise NotImplementedError(
480
- "The pyqtorch backend can currently only scale `PrimitiveBlock` types.\
481
- Please use the following transpile function on your circuit first:\
482
- from qadence.transpile import scale_primitive_blocks_only"
483
- )
484
- ops = convert_block(block.block, n_qubits, config)
485
- assert len(ops) == 1
486
- super().__init__(n_qubits, ops)
487
- (self.param_name,) = config.get_param_name(block)
488
- self.qubit_support = self.operations[0].qubit_support
489
-
490
- def forward(self, state: Tensor, values: dict[str, Tensor]) -> Tensor:
491
- return apply_operator(state, self.unitary(values), self.qubit_support, self.n_qubits)
492
-
493
- def unitary(self, values: dict[str, Tensor]) -> Tensor:
494
- thetas = values[self.param_name]
495
- return thetas * self.operations[0].unitary(values)
496
-
497
- def dagger(self, values: dict[str, Tensor]) -> Tensor:
498
- return _dagger(self.unitary(values))
499
-
500
- def jacobian(self, values: dict[str, Tensor]) -> Tensor:
501
- return values[self.param_name] * ones_like(self.unitary(values))
@@ -22,7 +22,7 @@ class MatrixBlock(PrimitiveBlock):
22
22
 
23
23
  Examples:
24
24
  ```python exec="on" source="material-block" result="json"
25
- import torch
25
+ import torch
26
26
 
27
27
  from qadence.circuit import QuantumCircuit
28
28
  from qadence.types import BackendName, DiffMode
@@ -6,12 +6,12 @@ from functools import partial
6
6
  from typing import Any, Callable, Sequence
7
7
 
8
8
  import torch
9
+ from pyqtorch.adjoint import AdjointExpectation
9
10
  from torch import Tensor
10
11
  from torch.autograd import Function
11
12
 
12
13
  from qadence.backend import Backend as QuantumBackend
13
14
  from qadence.backend import ConvertedCircuit, ConvertedObservable
14
- from qadence.backends.adjoint import AdjointExpectation
15
15
  from qadence.backends.utils import infer_batchsize, is_pyq_shape, param_dict, pyqify, validate_state
16
16
  from qadence.blocks.abstract import AbstractBlock
17
17
  from qadence.blocks.utils import uuid_to_eigen
@@ -120,10 +120,7 @@ def load_model(
120
120
  except Exception as e:
121
121
  msg = f"Unable to load state dict due to {e}.\
122
122
  No corresponding pre-trained model found. Returning the un-trained model."
123
- import warnings
124
-
125
- warnings.warn(msg, UserWarning)
126
- logger.warn(msg)
123
+ logger.warning(msg)
127
124
  return model, iteration
128
125
 
129
126
 
@@ -16,7 +16,7 @@ from torch import Tensor, heaviside, no_grad, rand, tensor
16
16
  from qadence.types import DifferentiableExpression, Engine, TNumber
17
17
 
18
18
  # Modules to be automatically added to the qadence namespace
19
- __all__ = ["FeatureParameter", "Parameter", "VariationalParameter"]
19
+ __all__ = ["FeatureParameter", "Parameter", "VariationalParameter", "ParamMap"]
20
20
 
21
21
  logger = getLogger(__name__)
22
22