qadence 1.10.1__tar.gz → 1.10.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. {qadence-1.10.1 → qadence-1.10.3}/PKG-INFO +9 -6
  2. {qadence-1.10.1 → qadence-1.10.3}/README.md +4 -1
  3. {qadence-1.10.1 → qadence-1.10.3}/mkdocs.yml +3 -0
  4. {qadence-1.10.1 → qadence-1.10.3}/pyproject.toml +7 -6
  5. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/horqrux/convert_ops.py +1 -1
  6. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/block_to_tensor.py +8 -8
  7. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/matrix.py +4 -0
  8. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/__init__.py +1 -0
  9. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/config.py +23 -4
  10. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/constructors.py +56 -1
  11. qadence-1.10.3/qadence/ml_tools/information/__init__.py +3 -0
  12. qadence-1.10.3/qadence/ml_tools/information/information_content.py +339 -0
  13. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/trainer.py +106 -1
  14. {qadence-1.10.1 → qadence-1.10.3}/qadence/operations/primitive.py +0 -4
  15. {qadence-1.10.1 → qadence-1.10.3}/qadence/states.py +27 -5
  16. {qadence-1.10.1 → qadence-1.10.3}/qadence/types.py +2 -0
  17. {qadence-1.10.1 → qadence-1.10.3}/qadence/utils.py +0 -2
  18. {qadence-1.10.1 → qadence-1.10.3}/.coveragerc +0 -0
  19. {qadence-1.10.1 → qadence-1.10.3}/.github/ISSUE_TEMPLATE/bug-report.yml +0 -0
  20. {qadence-1.10.1 → qadence-1.10.3}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  21. {qadence-1.10.1 → qadence-1.10.3}/.github/ISSUE_TEMPLATE/new-feature.yml +0 -0
  22. {qadence-1.10.1 → qadence-1.10.3}/.github/workflows/build_docs.yml +0 -0
  23. {qadence-1.10.1 → qadence-1.10.3}/.github/workflows/lint.yml +0 -0
  24. {qadence-1.10.1 → qadence-1.10.3}/.github/workflows/test_all.yml +0 -0
  25. {qadence-1.10.1 → qadence-1.10.3}/.github/workflows/test_examples.yml +0 -0
  26. {qadence-1.10.1 → qadence-1.10.3}/.github/workflows/test_fast.yml +0 -0
  27. {qadence-1.10.1 → qadence-1.10.3}/.gitignore +0 -0
  28. {qadence-1.10.1 → qadence-1.10.3}/.pre-commit-config.yaml +0 -0
  29. {qadence-1.10.1 → qadence-1.10.3}/LICENSE +0 -0
  30. {qadence-1.10.1 → qadence-1.10.3}/MANIFEST.in +0 -0
  31. {qadence-1.10.1 → qadence-1.10.3}/qadence/__init__.py +0 -0
  32. {qadence-1.10.1 → qadence-1.10.3}/qadence/analog/__init__.py +0 -0
  33. {qadence-1.10.1 → qadence-1.10.3}/qadence/analog/addressing.py +0 -0
  34. {qadence-1.10.1 → qadence-1.10.3}/qadence/analog/constants.py +0 -0
  35. {qadence-1.10.1 → qadence-1.10.3}/qadence/analog/device.py +0 -0
  36. {qadence-1.10.1 → qadence-1.10.3}/qadence/analog/hamiltonian_terms.py +0 -0
  37. {qadence-1.10.1 → qadence-1.10.3}/qadence/analog/parse_analog.py +0 -0
  38. {qadence-1.10.1 → qadence-1.10.3}/qadence/backend.py +0 -0
  39. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/__init__.py +0 -0
  40. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/api.py +0 -0
  41. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/gpsr.py +0 -0
  42. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/horqrux/__init__.py +0 -0
  43. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/horqrux/backend.py +0 -0
  44. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/horqrux/config.py +0 -0
  45. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/jax_utils.py +0 -0
  46. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pulser/__init__.py +0 -0
  47. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pulser/backend.py +0 -0
  48. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pulser/channels.py +0 -0
  49. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pulser/cloud.py +0 -0
  50. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pulser/config.py +0 -0
  51. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pulser/convert_ops.py +0 -0
  52. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pulser/devices.py +0 -0
  53. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pulser/pulses.py +0 -0
  54. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pulser/waveforms.py +0 -0
  55. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pyqtorch/__init__.py +0 -0
  56. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pyqtorch/backend.py +0 -0
  57. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pyqtorch/config.py +0 -0
  58. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/pyqtorch/convert_ops.py +0 -0
  59. {qadence-1.10.1 → qadence-1.10.3}/qadence/backends/utils.py +0 -0
  60. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/__init__.py +0 -0
  61. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/abstract.py +0 -0
  62. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/analog.py +0 -0
  63. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/composite.py +0 -0
  64. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/embedding.py +0 -0
  65. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/manipulate.py +0 -0
  66. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/primitive.py +0 -0
  67. {qadence-1.10.1 → qadence-1.10.3}/qadence/blocks/utils.py +0 -0
  68. {qadence-1.10.1 → qadence-1.10.3}/qadence/circuit.py +0 -0
  69. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/__init__.py +0 -0
  70. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/ala.py +0 -0
  71. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/daqc/__init__.py +0 -0
  72. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/daqc/daqc.py +0 -0
  73. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/daqc/gen_parser.py +0 -0
  74. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/daqc/utils.py +0 -0
  75. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/feature_maps.py +0 -0
  76. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/hamiltonians.py +0 -0
  77. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/hea.py +0 -0
  78. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/iia.py +0 -0
  79. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/qft.py +0 -0
  80. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/rydberg_feature_maps.py +0 -0
  81. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/rydberg_hea.py +0 -0
  82. {qadence-1.10.1 → qadence-1.10.3}/qadence/constructors/utils.py +0 -0
  83. {qadence-1.10.1 → qadence-1.10.3}/qadence/decompose.py +0 -0
  84. {qadence-1.10.1 → qadence-1.10.3}/qadence/divergences.py +0 -0
  85. {qadence-1.10.1 → qadence-1.10.3}/qadence/draw/__init__.py +0 -0
  86. {qadence-1.10.1 → qadence-1.10.3}/qadence/draw/assets/dark/measurement.png +0 -0
  87. {qadence-1.10.1 → qadence-1.10.3}/qadence/draw/assets/dark/measurement.svg +0 -0
  88. {qadence-1.10.1 → qadence-1.10.3}/qadence/draw/assets/light/measurement.png +0 -0
  89. {qadence-1.10.1 → qadence-1.10.3}/qadence/draw/assets/light/measurement.svg +0 -0
  90. {qadence-1.10.1 → qadence-1.10.3}/qadence/draw/themes.py +0 -0
  91. {qadence-1.10.1 → qadence-1.10.3}/qadence/draw/utils.py +0 -0
  92. {qadence-1.10.1 → qadence-1.10.3}/qadence/draw/vizbackend.py +0 -0
  93. {qadence-1.10.1 → qadence-1.10.3}/qadence/engines/__init__.py +0 -0
  94. {qadence-1.10.1 → qadence-1.10.3}/qadence/engines/differentiable_backend.py +0 -0
  95. {qadence-1.10.1 → qadence-1.10.3}/qadence/engines/jax/__init__.py +0 -0
  96. {qadence-1.10.1 → qadence-1.10.3}/qadence/engines/jax/differentiable_backend.py +0 -0
  97. {qadence-1.10.1 → qadence-1.10.3}/qadence/engines/jax/differentiable_expectation.py +0 -0
  98. {qadence-1.10.1 → qadence-1.10.3}/qadence/engines/torch/__init__.py +0 -0
  99. {qadence-1.10.1 → qadence-1.10.3}/qadence/engines/torch/differentiable_backend.py +0 -0
  100. {qadence-1.10.1 → qadence-1.10.3}/qadence/engines/torch/differentiable_expectation.py +0 -0
  101. {qadence-1.10.1 → qadence-1.10.3}/qadence/exceptions/__init__.py +0 -0
  102. {qadence-1.10.1 → qadence-1.10.3}/qadence/exceptions/exceptions.py +0 -0
  103. {qadence-1.10.1 → qadence-1.10.3}/qadence/execution.py +0 -0
  104. {qadence-1.10.1 → qadence-1.10.3}/qadence/extensions.py +0 -0
  105. {qadence-1.10.1 → qadence-1.10.3}/qadence/libs.py +0 -0
  106. {qadence-1.10.1 → qadence-1.10.3}/qadence/log_config.yaml +0 -0
  107. {qadence-1.10.1 → qadence-1.10.3}/qadence/logger.py +0 -0
  108. {qadence-1.10.1 → qadence-1.10.3}/qadence/measurements/__init__.py +0 -0
  109. {qadence-1.10.1 → qadence-1.10.3}/qadence/measurements/protocols.py +0 -0
  110. {qadence-1.10.1 → qadence-1.10.3}/qadence/measurements/samples.py +0 -0
  111. {qadence-1.10.1 → qadence-1.10.3}/qadence/measurements/shadow.py +0 -0
  112. {qadence-1.10.1 → qadence-1.10.3}/qadence/measurements/tomography.py +0 -0
  113. {qadence-1.10.1 → qadence-1.10.3}/qadence/measurements/utils.py +0 -0
  114. {qadence-1.10.1 → qadence-1.10.3}/qadence/mitigations/__init__.py +0 -0
  115. {qadence-1.10.1 → qadence-1.10.3}/qadence/mitigations/analog_zne.py +0 -0
  116. {qadence-1.10.1 → qadence-1.10.3}/qadence/mitigations/protocols.py +0 -0
  117. {qadence-1.10.1 → qadence-1.10.3}/qadence/mitigations/readout.py +0 -0
  118. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/callbacks/__init__.py +0 -0
  119. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/callbacks/callback.py +0 -0
  120. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/callbacks/callbackmanager.py +0 -0
  121. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/callbacks/saveload.py +0 -0
  122. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/callbacks/writer_registry.py +0 -0
  123. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/data.py +0 -0
  124. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/loss/__init__.py +0 -0
  125. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/loss/loss.py +0 -0
  126. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/models.py +0 -0
  127. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/optimize_step.py +0 -0
  128. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/parameters.py +0 -0
  129. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/stages.py +0 -0
  130. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/tensors.py +0 -0
  131. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/train_utils/__init__.py +0 -0
  132. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/train_utils/base_trainer.py +0 -0
  133. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/train_utils/config_manager.py +0 -0
  134. {qadence-1.10.1 → qadence-1.10.3}/qadence/ml_tools/utils.py +0 -0
  135. {qadence-1.10.1 → qadence-1.10.3}/qadence/model.py +0 -0
  136. {qadence-1.10.1 → qadence-1.10.3}/qadence/noise/__init__.py +0 -0
  137. {qadence-1.10.1 → qadence-1.10.3}/qadence/noise/protocols.py +0 -0
  138. {qadence-1.10.1 → qadence-1.10.3}/qadence/operations/__init__.py +0 -0
  139. {qadence-1.10.1 → qadence-1.10.3}/qadence/operations/analog.py +0 -0
  140. {qadence-1.10.1 → qadence-1.10.3}/qadence/operations/control_ops.py +0 -0
  141. {qadence-1.10.1 → qadence-1.10.3}/qadence/operations/ham_evo.py +0 -0
  142. {qadence-1.10.1 → qadence-1.10.3}/qadence/operations/parametric.py +0 -0
  143. {qadence-1.10.1 → qadence-1.10.3}/qadence/overlap.py +0 -0
  144. {qadence-1.10.1 → qadence-1.10.3}/qadence/parameters.py +0 -0
  145. {qadence-1.10.1 → qadence-1.10.3}/qadence/pasqal_cloud_connection.py +0 -0
  146. {qadence-1.10.1 → qadence-1.10.3}/qadence/protocols.py +0 -0
  147. {qadence-1.10.1 → qadence-1.10.3}/qadence/py.typed +0 -0
  148. {qadence-1.10.1 → qadence-1.10.3}/qadence/qubit_support.py +0 -0
  149. {qadence-1.10.1 → qadence-1.10.3}/qadence/register.py +0 -0
  150. {qadence-1.10.1 → qadence-1.10.3}/qadence/serial_expr_grammar.peg +0 -0
  151. {qadence-1.10.1 → qadence-1.10.3}/qadence/serialization.py +0 -0
  152. {qadence-1.10.1 → qadence-1.10.3}/qadence/transpile/__init__.py +0 -0
  153. {qadence-1.10.1 → qadence-1.10.3}/qadence/transpile/apply_fn.py +0 -0
  154. {qadence-1.10.1 → qadence-1.10.3}/qadence/transpile/block.py +0 -0
  155. {qadence-1.10.1 → qadence-1.10.3}/qadence/transpile/circuit.py +0 -0
  156. {qadence-1.10.1 → qadence-1.10.3}/qadence/transpile/digitalize.py +0 -0
  157. {qadence-1.10.1 → qadence-1.10.3}/qadence/transpile/flatten.py +0 -0
  158. {qadence-1.10.1 → qadence-1.10.3}/qadence/transpile/invert.py +0 -0
  159. {qadence-1.10.1 → qadence-1.10.3}/qadence/transpile/noise.py +0 -0
  160. {qadence-1.10.1 → qadence-1.10.3}/qadence/transpile/transpile.py +0 -0
  161. {qadence-1.10.1 → qadence-1.10.3}/renovate.json +0 -0
  162. {qadence-1.10.1 → qadence-1.10.3}/setup.py +0 -0
@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: qadence
3
- Version: 1.10.1
3
+ Version: 1.10.3
4
4
  Summary: Pasqal interface for circuit-based quantum computing SDKs
5
- Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>
5
+ Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>, Manu Lahariya <manu.lahariya@pasqal.com>
6
6
  License: Apache 2.0
7
7
  License-File: LICENSE
8
8
  Classifier: License :: OSI Approved :: Apache Software License
@@ -43,7 +43,7 @@ Requires-Dist: nvidia-pyindex; extra == 'dlprof'
43
43
  Provides-Extra: horqrux
44
44
  Requires-Dist: einops; extra == 'horqrux'
45
45
  Requires-Dist: flax; extra == 'horqrux'
46
- Requires-Dist: horqrux==0.6.2; extra == 'horqrux'
46
+ Requires-Dist: horqrux==0.7.0; extra == 'horqrux'
47
47
  Requires-Dist: jax; extra == 'horqrux'
48
48
  Requires-Dist: jaxopt; extra == 'horqrux'
49
49
  Requires-Dist: optax; extra == 'horqrux'
@@ -56,8 +56,8 @@ Provides-Extra: protocols
56
56
  Requires-Dist: qadence-protocols; extra == 'protocols'
57
57
  Provides-Extra: pulser
58
58
  Requires-Dist: pasqal-cloud==0.12.7; extra == 'pulser'
59
- Requires-Dist: pulser-core==1.2.0; extra == 'pulser'
60
- Requires-Dist: pulser-simulation==1.2.0; extra == 'pulser'
59
+ Requires-Dist: pulser-core==1.2.2; extra == 'pulser'
60
+ Requires-Dist: pulser-simulation==1.2.2; extra == 'pulser'
61
61
  Provides-Extra: visualization
62
62
  Requires-Dist: graphviz; extra == 'visualization'
63
63
  Description-Content-Type: text/markdown
@@ -75,7 +75,10 @@ programs** with tunable qubit interactions and arbitrary register topologies rea
75
75
 
76
76
  **For a high-level overview of Qadence features, [check out our white paper](https://arxiv.org/abs/2401.09915).**
77
77
 
78
- **For more detailed information, [check out the documentation](https://pasqal-io.github.io/qadence/latest/).**
78
+ **For more detailed information, [check out the documentation](https://pasqal-io.github.io/qadence/latest/).
79
+
80
+ **For any questions or comments, [feel free to start a discussion](https://github.com/pasqal-io/qadence/discussions).
81
+ **
79
82
 
80
83
  [![Linting](https://github.com/pasqal-io/qadence/actions/workflows/lint.yml/badge.svg)](https://github.com/pasqal-io/qadence/actions/workflows/lint.yml)
81
84
  [![Tests](https://github.com/pasqal-io/qadence/actions/workflows/test_fast.yml/badge.svg)](https://github.com/pasqal-io/qadence/actions/workflows/test_fast.yml)
@@ -11,7 +11,10 @@ programs** with tunable qubit interactions and arbitrary register topologies rea
11
11
 
12
12
  **For a high-level overview of Qadence features, [check out our white paper](https://arxiv.org/abs/2401.09915).**
13
13
 
14
- **For more detailed information, [check out the documentation](https://pasqal-io.github.io/qadence/latest/).**
14
+ **For more detailed information, [check out the documentation](https://pasqal-io.github.io/qadence/latest/).
15
+
16
+ **For any questions or comments, [feel free to start a discussion](https://github.com/pasqal-io/qadence/discussions).
17
+ **
15
18
 
16
19
  [![Linting](https://github.com/pasqal-io/qadence/actions/workflows/lint.yml/badge.svg)](https://github.com/pasqal-io/qadence/actions/workflows/lint.yml)
17
20
  [![Tests](https://github.com/pasqal-io/qadence/actions/workflows/test_fast.yml/badge.svg)](https://github.com/pasqal-io/qadence/actions/workflows/test_fast.yml)
@@ -19,6 +19,7 @@ nav:
19
19
  - Quantum models: content/quantummodels.md
20
20
  - Quantum registers: content/register.md
21
21
  - State initialization: content/state_init.md
22
+ - Noisy Simulation: content/noisy_simulation.md
22
23
  - Arbitrary Hamiltonians: content/hamiltonians.md
23
24
  - Time-dependent generators: content/time_dependent.md
24
25
  - QML Constructors: content/qml_constructors.md
@@ -92,6 +93,8 @@ nav:
92
93
  - Pulser: api/backends/pulser.md
93
94
  - DifferentiableBackend: api/backends/differentiable.md
94
95
 
96
+ - Contact: https://github.com/pasqal-io/qadence/discussions
97
+
95
98
  edit_uri: edit/main/docs/
96
99
 
97
100
  theme:
@@ -23,11 +23,12 @@ authors = [
23
23
  { name = "Charles Moussa", email = "charles.moussa@pasqal.com" },
24
24
  { name = "Giorgio Tosti Balducci", email = "giorgio.tosti-balducci@pasqal.com" },
25
25
  { name = "Daniele Cucurachi", email = "daniele.cucurachi@pasqal.com" },
26
- { name = "Pim Venderbosch", email = "pim.venderbosch@pasqal.com" }
26
+ { name = "Pim Venderbosch", email = "pim.venderbosch@pasqal.com" },
27
+ { name = "Manu Lahariya", email = "manu.lahariya@pasqal.com" },
27
28
  ]
28
29
  requires-python = ">=3.9"
29
30
  license = { text = "Apache 2.0" }
30
- version = "1.10.1"
31
+ version = "1.10.3"
31
32
  classifiers = [
32
33
  "License :: OSI Approved :: Apache Software License",
33
34
  "Programming Language :: Python",
@@ -64,8 +65,8 @@ allow-ambiguous-features = true
64
65
 
65
66
  [project.optional-dependencies]
66
67
  pulser = [
67
- "pulser-core==1.2.0",
68
- "pulser-simulation==1.2.0",
68
+ "pulser-core==1.2.2",
69
+ "pulser-simulation==1.2.2",
69
70
  "pasqal-cloud==0.12.7",
70
71
  ]
71
72
  visualization = [
@@ -75,7 +76,7 @@ visualization = [
75
76
  # "scour",
76
77
  ]
77
78
  horqrux = [
78
- "horqrux==0.6.2",
79
+ "horqrux==0.7.0",
79
80
  "jax",
80
81
  "flax",
81
82
  "optax",
@@ -136,7 +137,7 @@ filterwarnings = [
136
137
  [tool.hatch.envs.docs]
137
138
  dependencies = [
138
139
  "mkdocs",
139
- "mkdocs_autorefs<1.3.1",
140
+ "mkdocs_autorefs",
140
141
  "mkdocs-material",
141
142
  "mkdocstrings",
142
143
  "mkdocstrings-python",
@@ -245,7 +245,7 @@ class HorqHamiltonianEvolution(NativeHorqHEvo):
245
245
 
246
246
  self._time_evolution = lambda values: values[self.param_names[0]]
247
247
 
248
- def unitary(self, values: dict[str, Array]) -> Array:
248
+ def _unitary(self, values: dict[str, Array]) -> Array:
249
249
  """The evolved operator given current parameter values for generator and time evolution."""
250
250
  return expm(self._hamiltonian(self, values) * (-1j * self._time_evolution(values)))
251
251
 
@@ -79,6 +79,7 @@ def _fill_identities(
79
79
  torch.Tensor: augmented matrix with dimensions (2**nqubits, 2**nqubits)
80
80
  or a tensor (2**n_qubits) if diag_only
81
81
  """
82
+ full_qubit_support = tuple(sorted(full_qubit_support))
82
83
  qubit_support = tuple(sorted(qubit_support))
83
84
  block_mat = block_mat.to(device)
84
85
  mat = IMAT.to(device) if qubit_support[0] != full_qubit_support[0] else block_mat
@@ -469,14 +470,13 @@ def _block_to_tensor_embedded(
469
470
  )
470
471
 
471
472
  elif isinstance(block, MatrixBlock):
472
- mat = block.matrix.unsqueeze(0)
473
- # FIXME: properly handle identity filling in matrix blocks
474
- # mat = _fill_identities(
475
- # block.matrix.unsqueeze(0),
476
- # block.qubit_support,
477
- # qubit_support,
478
- # endianness=endianness,
479
- # )
473
+ mat = _fill_identities(
474
+ block.matrix.unsqueeze(0),
475
+ block.qubit_support,
476
+ qubit_support,
477
+ endianness=endianness,
478
+ device=device,
479
+ )
480
480
 
481
481
  elif isinstance(block, SWAP):
482
482
  swap_block = _swap_block(block)
@@ -7,6 +7,8 @@ import numpy as np
7
7
  import torch
8
8
  from torch.linalg import eigvals
9
9
 
10
+ from math import log
11
+
10
12
  from qadence.blocks import PrimitiveBlock
11
13
  from qadence.noise import NoiseHandler
12
14
 
@@ -84,6 +86,8 @@ class MatrixBlock(PrimitiveBlock):
84
86
  if not self.is_unitary(matrix):
85
87
  logger.warning("Provided matrix is not unitary.")
86
88
  self.matrix = matrix.clone()
89
+ if int(log(self.matrix.size(1), 2)) != len(qubit_support):
90
+ raise ValueError("Provided matrix does not match the qubit_support length.")
87
91
  super().__init__(qubit_support, noise)
88
92
 
89
93
  @cached_property
@@ -4,6 +4,7 @@ from .callbacks.saveload import load_checkpoint, load_model, write_checkpoint
4
4
  from .config import AnsatzConfig, FeatureMapConfig, TrainConfig
5
5
  from .constructors import create_ansatz, create_fm_blocks, observable_from_config
6
6
  from .data import DictDataLoader, InfiniteTensorDataset, OptimizeResult, to_dataloader
7
+ from .information import InformationContent
7
8
  from .models import QNN
8
9
  from .optimize_step import optimize_step as default_optimize_step
9
10
  from .parameters import get_parameters, num_parameters, set_parameters
@@ -434,15 +434,17 @@ class AnsatzConfig:
434
434
  """What type of ansatz.
435
435
 
436
436
  `AnsatzType.HEA` for Hardware Efficient Ansatz.
437
- `AnsatzType.IIA` for Identity intialized Ansatz.
437
+ `AnsatzType.IIA` for Identity Intialized Ansatz.
438
+ `AnsatzType.ALA` for Alternating Layer Ansatz.
438
439
  """
439
440
 
440
441
  ansatz_strategy: Strategy = Strategy.DIGITAL
441
442
  """Ansatz strategy.
442
443
 
443
- `Strategy.DIGITAL` for fully digital ansatz. Required if `ansatz_type` is `AnsatzType.IIA`.
444
- `Strategy.SDAQC` for analog entangling block.
445
- `Strategy.RYDBERG` for fully rydberg hea ansatz.
444
+ `Strategy.DIGITAL` for fully digital ansatz. Required if `ansatz_type` is `AnsatzType.ALA`.
445
+ `Strategy.SDAQC` for analog entangling block. Only available for `AnsatzType.HEA` or
446
+ `AnsatzType.ALA`.
447
+ `Strategy.RYDBERG` for fully rydberg hea ansatz. Only available for `AnsatzType.HEA`.
446
448
  """
447
449
 
448
450
  strategy_args: dict = field(default_factory=dict)
@@ -484,6 +486,13 @@ class AnsatzConfig:
484
486
  """
485
487
  # The default for a dataclass can not be a mutable object without using this default_factory.
486
488
 
489
+ m_block_qubits: int | None = None
490
+ """
491
+ The number of qubits in the local entangling block of an Alternating Layer Ansatz (ALA).
492
+
493
+ Only used when `ansatz_type` is `AnsatzType.ALA`.
494
+ """
495
+
487
496
  param_prefix: str = "theta"
488
497
  """The base bame of the variational parameter."""
489
498
 
@@ -499,3 +508,13 @@ class AnsatzConfig:
499
508
  assert (
500
509
  self.ansatz_strategy != Strategy.RYDBERG
501
510
  ), "Rydberg strategy not allowed for Identity-initialized ansatz."
511
+
512
+ if self.ansatz_type == AnsatzType.ALA:
513
+ assert (
514
+ self.ansatz_strategy == Strategy.DIGITAL
515
+ ), f"{self.ansatz_strategy} not allowed for Alternating Layer Ansatz.\
516
+ Only `Strategy.DIGITAL` allowed."
517
+
518
+ assert (
519
+ self.m_block_qubits is not None
520
+ ), "m_block_qubits must be specified for Alternating Layer Ansatz."
@@ -13,13 +13,14 @@ from qadence.constructors import (
13
13
  analog_feature_map,
14
14
  feature_map,
15
15
  hamiltonian_factory,
16
- iia,
17
16
  rydberg_feature_map,
18
17
  rydberg_hea,
19
18
  rydberg_tower_feature_map,
20
19
  )
20
+ from qadence.constructors.ala import ala_digital
21
21
  from qadence.constructors.hamiltonians import ObservableConfig, TDetuning
22
22
  from qadence.constructors.hea import hea_digital, hea_sDAQC
23
+ from qadence.constructors.iia import iia
23
24
  from qadence.measurements import Measurements
24
25
  from qadence.noise import NoiseHandler
25
26
  from qadence.operations import CNOT, RX, RY, I, N, Z
@@ -596,6 +597,58 @@ def _create_hea(
596
597
  )
597
598
 
598
599
 
600
+ def _create_ala_digital(
601
+ num_qubits: int,
602
+ config: AnsatzConfig,
603
+ ) -> AbstractBlock:
604
+ """
605
+ Create the Digital Alternating Layer Ansatz based on the configuration.
606
+
607
+ Args:
608
+ num_qubits (int): The number of qubits.
609
+ config (AnsatzConfig): The configuration for the ansatz.
610
+
611
+ Returns:
612
+ AbstractBlock: The Digital Alternating Layer Ansatz.
613
+ """
614
+ operations = config.strategy_args.get("operation", [RX, RY, RX])
615
+ entangler = config.strategy_args.get("entangler", CNOT)
616
+
617
+ return ala_digital(
618
+ n_qubits=num_qubits,
619
+ m_block_qubits=config.m_block_qubits, # type: ignore[arg-type]
620
+ param_prefix=config.param_prefix,
621
+ operations=operations,
622
+ entangler=entangler,
623
+ )
624
+
625
+
626
+ def _create_ala(
627
+ num_qubits: int,
628
+ config: AnsatzConfig,
629
+ ) -> AbstractBlock:
630
+ """
631
+ Create the Alternating Layer Ansatz based on the configuration.
632
+
633
+ Args:
634
+ num_qubits (int): The number of qubits.
635
+ config (AnsatzConfig): The configuration for the ansatz.
636
+
637
+ Returns:
638
+ AbstractBlock: The Alternating Layer Ansatz.
639
+
640
+ Raises:
641
+ ValueError: If the ansatz strategy is not `Strategy.DIGITAL`.
642
+ """
643
+ if config.ansatz_strategy == Strategy.DIGITAL:
644
+ return _create_ala_digital(num_qubits=num_qubits, config=config)
645
+ else:
646
+ raise ValueError(
647
+ f"Invalid ansatz strategy {config.ansatz_strategy} provided. Only `Strategy.DIGITAL` \
648
+ allowed"
649
+ )
650
+
651
+
599
652
  def create_ansatz(
600
653
  register: int | Register,
601
654
  config: AnsatzConfig,
@@ -619,6 +672,8 @@ def create_ansatz(
619
672
  return _create_iia(num_qubits=num_qubits, config=config)
620
673
  elif config.ansatz_type == AnsatzType.HEA:
621
674
  return _create_hea(register=register, config=config)
675
+ elif config.ansatz_type == AnsatzType.ALA:
676
+ return _create_ala(num_qubits=num_qubits, config=config)
622
677
  else:
623
678
  raise NotImplementedError(
624
679
  f"Ansatz of type {config.ansatz_type} not implemented yet. Only `AnsatzType.HEA` and\
@@ -0,0 +1,3 @@
1
+ from __future__ import annotations
2
+
3
+ from .information_content import InformationContent
@@ -0,0 +1,339 @@
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ from logging import getLogger
5
+ from math import log, sqrt
6
+ from statistics import NormalDist
7
+ from typing import Any, Callable
8
+
9
+ import torch
10
+ from torch import nn
11
+ from torch.func import functional_call # type: ignore
12
+
13
+ logger = getLogger("ml_tools")
14
+
15
+
16
+ class InformationContent:
17
+ def __init__(
18
+ self,
19
+ model: nn.Module,
20
+ loss_fn: Callable,
21
+ xs: Any,
22
+ epsilons: torch.Tensor,
23
+ variation_multiple: int = 20,
24
+ ) -> None:
25
+ """Information Landscape class.
26
+
27
+ This class handles the study of loss landscape from information theoretic
28
+ perspective and provides methods to get bounds on the norm of the
29
+ gradient from the Information Content of the loss landscape.
30
+
31
+ Args:
32
+ model: The quantum or classical model to analyze.
33
+ loss_fn: Loss function that takes model output and calculates loss
34
+ xs: Input data to evaluate the model on
35
+ epsilons: The thresholds to use for discretization of the finite derivatives
36
+ variation_multiple: The number of sets of variational parameters to generate per each
37
+ variational parameter. The number of variational parameters required for the
38
+ statistical analysis scales linearly with the amount of them present in the
39
+ model. This is that linear factor.
40
+
41
+ Notes:
42
+ This class provides flexibility in terms of what the model, the loss function,
43
+ and the xs are. The only requirement is that the loss_fn takes the model and xs as
44
+ arguments and returns the loss, and another dictionary of other metrics.
45
+
46
+ Thus, assumed structure:
47
+ loss_fn(model, xs) -> (loss, metrics, ...)
48
+
49
+ Example: A Classifier
50
+ ```python
51
+ model = nn.Linear(10, 1)
52
+
53
+ def loss_fn(
54
+ model: nn.Module,
55
+ xs: tuple[torch.Tensor, torch.Tensor]
56
+ ) -> tuple[torch.Tensor, dict[str, float]:
57
+ criterion = nn.MSELoss()
58
+ inputs, labels = xs
59
+ outputs = model(inputs)
60
+ loss = criterion(outputs, labels)
61
+ metrics = {"loss": loss.item()}
62
+ return loss, metrics
63
+
64
+ xs = (torch.randn(10, 10), torch.randn(10, 1))
65
+
66
+ info_landscape = InfoLandscape(model, loss_fn, xs)
67
+ ```
68
+ In this example, the model is a linear classifier, and the `xs` include both the
69
+ inputs and the target labels. The logic for calculation of the loss from this lies
70
+ entirely within the `loss_fn` function. This can then further be used to obtain the
71
+ bounds on the average norm of the gradient of the loss function.
72
+
73
+ Example: A Physics Informed Neural Network
74
+ ```python
75
+ class PhysicsInformedNN(nn.Module):
76
+ // <Initialization Logic>
77
+
78
+ def forward(self, xs: dict[str, torch.Tensor]):
79
+ return {
80
+ "pde_residual": pde_residual(xs["pde"]),
81
+ "boundary_condition": bc_term(xs["bc"]),
82
+ }
83
+
84
+ def loss_fn(
85
+ model: PhysicsInformedNN,
86
+ xs: dict[str, torch.Tensor]
87
+ ) -> tuple[torch.Tensor, dict[str, float]:
88
+ pde_residual, bc_term = model(xs)
89
+ loss = torch.mean(torch.sum(pde_residual**2, dim=1), dim=0)
90
+ + torch.mean(torch.sum(bc_term**2, dim=1), dim=0)
91
+
92
+ return loss, {"pde_residual": pde_residual, "bc_term": bc_term}
93
+
94
+ xs = {
95
+ "pde": torch.linspace(0, 1, 10),
96
+ "bc": torch.tensor([0.0]),
97
+ }
98
+
99
+ info_landscape = InfoLandscape(model, loss_fn, xs)
100
+ ```
101
+
102
+ In this example, the model is a Physics Informed Neural Network, and the `xs`
103
+ are the inputs to the different residual components of the model. The logic
104
+ for calculation of the residuals lies within the PhysicsInformedNN class, and
105
+ the loss function is defined to calculate the loss that is to be optimized
106
+ from these residuals. This can then further be used to obtain the
107
+ bounds on the average norm of the gradient of the loss function.
108
+
109
+ The first value that the `loss_fn` returns is the loss value that is being optimized.
110
+ The function is also expected to return other value(s), often the metrics that are
111
+ used to calculate the loss. These values are ignored for the purpose of this class.
112
+ """
113
+ self.model = model
114
+ self.loss_fn = loss_fn
115
+ self.xs = xs
116
+ self.epsilons = epsilons
117
+ self.device = next(model.parameters()).device
118
+
119
+ self.param_shapes = {}
120
+ self.total_params = 0
121
+
122
+ for name, param in model.named_parameters():
123
+ self.param_shapes[name] = param.shape
124
+ self.total_params += param.numel()
125
+ self.n_variations = variation_multiple * self.total_params
126
+ self.all_variations = torch.empty(
127
+ (self.n_variations, self.total_params), device=self.device
128
+ ).uniform_(0, 2 * torch.pi)
129
+
130
+ def reshape_param_variations(self) -> dict[str, torch.Tensor]:
131
+ """Reshape variations of the model's variational parameters.
132
+
133
+ Returns:
134
+ Dictionary of parameter tensors, each with shape [n_variations, *param_shape]
135
+ """
136
+ param_variations = {}
137
+ start_idx = 0
138
+
139
+ for name, shape in self.param_shapes.items():
140
+ param_size = torch.prod(torch.tensor(shape)).item()
141
+ param_variations[name] = self.all_variations[
142
+ :, start_idx : start_idx + param_size
143
+ ].view(self.n_variations, *shape)
144
+ start_idx += param_size
145
+
146
+ return param_variations
147
+
148
+ def batched_loss(self) -> torch.Tensor:
149
+ """Calculate loss for all parameter variations in a batched manner.
150
+
151
+ Returns: Tensor of loss values for each parameter variation
152
+ """
153
+ param_variations = self.reshape_param_variations()
154
+ losses = torch.zeros(self.n_variations, device=self.device)
155
+
156
+ for i in range(self.n_variations):
157
+ params = {name: param[i] for name, param in param_variations.items()}
158
+ current_model = lambda x: functional_call(self.model, params, (x,))
159
+ losses[i] = self.loss_fn(current_model, self.xs)[0]
160
+
161
+ return losses
162
+
163
+ def randomized_finite_der(self) -> torch.Tensor:
164
+ """
165
+ Calculate normalized finite difference of loss on doing random walk in the parameter space.
166
+
167
+ This serves as a proxy for the derivative of the loss with respect to parameters.
168
+
169
+ Returns:
170
+ Tensor containing normalized finite differences (approximate directional derivatives)
171
+ between consecutive points in the random walk. Shape: [n_variations - 1]
172
+ """
173
+ losses = self.batched_loss()
174
+
175
+ return (losses[1:] - losses[:-1]) / (
176
+ torch.norm(self.all_variations[1:] - self.all_variations[:-1], dim=1) + 1e-8
177
+ )
178
+
179
+ def discretize_derivatives(self) -> torch.Tensor:
180
+ """
181
+ Convert finite derivatives into discrete values.
182
+
183
+ Returns:
184
+ Tensor containing discretized derivatives with shape [n_epsilons, n_variations-2]
185
+ Each row contains {-1, 0, 1} values for that epsilon
186
+ """
187
+ derivatives = self.randomized_finite_der()
188
+
189
+ derivatives = derivatives.unsqueeze(0)
190
+ epsilons = self.epsilons.unsqueeze(1)
191
+
192
+ discretized = torch.zeros((len(epsilons), len(derivatives[0])), device=self.device)
193
+ discretized[derivatives > epsilons] = 1
194
+ discretized[derivatives < -epsilons] = -1
195
+
196
+ return discretized
197
+
198
+ def calculate_transition_probabilities_batch(self) -> torch.Tensor:
199
+ """
200
+ Calculate transition probabilities for multiple epsilon values.
201
+
202
+ Returns:
203
+ Tensor of shape [n_epsilons, 6] containing probabilities for each transition type
204
+ Columns order: [+1to0, +1to-1, 0to+1, 0to-1, -1to0, -1to+1]
205
+ """
206
+ discretized = self.discretize_derivatives()
207
+
208
+ current = discretized[:, :-1]
209
+ next_val = discretized[:, 1:]
210
+
211
+ transitions = torch.stack(
212
+ [
213
+ ((current == 1) & (next_val == 0)).sum(dim=1),
214
+ ((current == 1) & (next_val == -1)).sum(dim=1),
215
+ ((current == 0) & (next_val == 1)).sum(dim=1),
216
+ ((current == 0) & (next_val == -1)).sum(dim=1),
217
+ ((current == -1) & (next_val == 0)).sum(dim=1),
218
+ ((current == -1) & (next_val == 1)).sum(dim=1),
219
+ ],
220
+ dim=1,
221
+ ).float()
222
+
223
+ total_transitions = current.size(1)
224
+ probabilities = transitions / total_transitions
225
+
226
+ return probabilities
227
+
228
+ @functools.cached_property
229
+ def calculate_IC(self) -> torch.Tensor:
230
+ """
231
+ Calculate Information Content for multiple epsilon values.
232
+
233
+ Returns: Tensor of IC values for each epsilon [n_epsilons]
234
+ """
235
+ probs = self.calculate_transition_probabilities_batch()
236
+
237
+ mask = probs > 1e-4
238
+
239
+ ic_terms = torch.where(mask, -probs * torch.log(probs), torch.zeros_like(probs))
240
+ ic_values = ic_terms.sum(dim=1) / torch.log(torch.tensor(6.0))
241
+
242
+ return ic_values
243
+
244
+ def max_IC(self) -> tuple[float, float]:
245
+ """
246
+ Get the maximum Information Content and its corresponding epsilon.
247
+
248
+ Returns: Tuple of (maximum IC value, optimal epsilon)
249
+ """
250
+ max_ic, max_idx = torch.max(self.calculate_IC, dim=0)
251
+ max_epsilon = self.epsilons[max_idx]
252
+ return max_ic.item(), max_epsilon.item()
253
+
254
+ def sensitivity_IC(self, eta: float) -> float:
255
+ """
256
+ Find the minimum value of epsilon such that the information content is less than eta.
257
+
258
+ Args:
259
+ eta: Threshold value, the sensitivity IC.
260
+
261
+ Returns: The epsilon value that gives IC that is less than the sensitivity IC.
262
+ """
263
+ ic_values = self.calculate_IC
264
+ mask = ic_values < eta
265
+ epsilons = self.epsilons[mask]
266
+ return float(epsilons.min().item())
267
+
268
+ @staticmethod
269
+ @functools.lru_cache
270
+ def q_value(H_value: float) -> float:
271
+ """
272
+ Compute the q value.
273
+
274
+ q is the solution to the equation:
275
+ H(x) = 4h(x) + 2h(1/2 - 2x)
276
+
277
+ It is the value of the probability of 4 of the 6 transitions such that
278
+ the IC is the same as the IC of our system.
279
+
280
+ This quantity is useful in calculating the bounds on the norms of the gradients.
281
+
282
+ Args:
283
+ H_value (float): The information content.
284
+
285
+ Returns:
286
+ float: The q value
287
+ """
288
+
289
+ x = torch.linspace(0.001, 0.16667, 10000)
290
+
291
+ H = -4 * x * torch.log(x) / torch.log(torch.tensor(6)) - 2 * (0.5 - 2 * x) * torch.log(
292
+ 0.5 - 2 * x
293
+ ) / torch.log(torch.tensor(6))
294
+ err = torch.abs(H - H_value)
295
+ idx = torch.argmin(err)
296
+ return float(x[idx].item())
297
+
298
+ def get_grad_norm_bounds_max_IC(self) -> tuple[float, float]:
299
+ """
300
+ Compute the bounds on the average norm of the gradient.
301
+
302
+ Returns:
303
+ tuple[Tensor, Tensor]: The lower and upper bounds.
304
+ """
305
+ max_IC, epsilon_m = self.max_IC()
306
+ lower_bound = (
307
+ epsilon_m
308
+ * sqrt(self.total_params)
309
+ / (NormalDist().inv_cdf(1 - 2 * self.q_value(max_IC)))
310
+ )
311
+ upper_bound = (
312
+ epsilon_m
313
+ * sqrt(self.total_params)
314
+ / (NormalDist().inv_cdf(0.5 * (1 + 2 * self.q_value(max_IC))))
315
+ )
316
+
317
+ if max_IC < log(2, 6):
318
+ logger.warning(
319
+ "Warning: The maximum IC is less than the required value. The bounds may be"
320
+ + " inaccurate."
321
+ )
322
+
323
+ return lower_bound, upper_bound
324
+
325
+ def get_grad_norm_bounds_sensitivity_IC(self, eta: float) -> float:
326
+ """
327
+ Compute the bounds on the average norm of the gradient.
328
+
329
+ Args:
330
+ eta (float): The sensitivity IC.
331
+
332
+ Returns:
333
+ Tensor: The lower bound.
334
+ """
335
+ epsilon_sensitivity = self.sensitivity_IC(eta)
336
+ upper_bound = (
337
+ epsilon_sensitivity * sqrt(self.total_params) / (NormalDist().inv_cdf(1 - 3 * eta / 2))
338
+ )
339
+ return upper_bound