tensorcircuit-nightly 1.2.1.dev20250724__tar.gz → 1.2.1.dev20250726__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tensorcircuit-nightly might be problematic. Click here for more details.

Files changed (146) hide show
  1. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/CHANGELOG.md +6 -0
  2. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/PKG-INFO +1 -1
  3. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/advance.rst +9 -0
  4. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/conf.py +2 -2
  5. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/faq.rst +2 -1
  6. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/quickstart.rst +7 -8
  7. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/__init__.py +1 -1
  8. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/jax_backend.py +8 -1
  9. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/circuit.py +48 -0
  10. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cons.py +60 -2
  11. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/fgs.py +251 -70
  12. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/mpscircuit.py +45 -4
  13. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/stabilizercircuit.py +26 -15
  14. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/translation.py +10 -2
  15. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit_nightly.egg-info/PKG-INFO +1 -1
  16. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit_nightly.egg-info/SOURCES.txt +0 -1
  17. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_circuit.py +20 -6
  18. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_dmcircuit.py +1 -1
  19. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_miscs.py +20 -0
  20. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_stabilizer.py +9 -0
  21. tensorcircuit_nightly-1.2.1.dev20250724/docs/source/modules.rst.backup +0 -29
  22. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/HISTORY.md +0 -0
  23. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/LICENSE +0 -0
  24. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/MANIFEST.in +0 -0
  25. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/README.md +0 -0
  26. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/README_cn.md +0 -0
  27. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/cnconf.py +0 -0
  28. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/contribution.rst +0 -0
  29. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/generate_rst.py +0 -0
  30. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/index.rst +0 -0
  31. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/infras.rst +0 -0
  32. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/modules.rst +0 -0
  33. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/sharpbits.rst +0 -0
  34. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/textbooktoc.rst +0 -0
  35. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/tutorial.rst +0 -0
  36. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/tutorial_cn.rst +0 -0
  37. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/whitepapertoc.rst +0 -0
  38. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/docs/source/whitepapertoc_cn.rst +0 -0
  39. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/setup.cfg +0 -0
  40. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/setup.py +0 -0
  41. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/about.py +0 -0
  42. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/abstractcircuit.py +0 -0
  43. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/__init__.py +0 -0
  44. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/ai/__init__.py +0 -0
  45. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/ai/ensemble.py +0 -0
  46. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/dqas.py +0 -0
  47. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/finance/__init__.py +0 -0
  48. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/finance/portfolio.py +0 -0
  49. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/graphdata.py +0 -0
  50. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/layers.py +0 -0
  51. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/optimization.py +0 -0
  52. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/physics/__init__.py +0 -0
  53. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/physics/baseline.py +0 -0
  54. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/physics/fss.py +0 -0
  55. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/utils.py +0 -0
  56. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/vags.py +0 -0
  57. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/van.py +0 -0
  58. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/applications/vqes.py +0 -0
  59. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/asciiart.py +0 -0
  60. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/__init__.py +0 -0
  61. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/abstract_backend.py +0 -0
  62. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/backend_factory.py +0 -0
  63. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/cupy_backend.py +0 -0
  64. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/jax_ops.py +0 -0
  65. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/numpy_backend.py +0 -0
  66. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/pytorch_backend.py +0 -0
  67. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/pytorch_ops.py +0 -0
  68. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/tensorflow_backend.py +0 -0
  69. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/backends/tf_ops.py +0 -0
  70. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/basecircuit.py +0 -0
  71. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/channels.py +0 -0
  72. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cloud/__init__.py +0 -0
  73. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cloud/abstraction.py +0 -0
  74. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cloud/apis.py +0 -0
  75. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cloud/config.py +0 -0
  76. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cloud/local.py +0 -0
  77. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cloud/quafu_provider.py +0 -0
  78. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cloud/tencent.py +0 -0
  79. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cloud/utils.py +0 -0
  80. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/cloud/wrapper.py +0 -0
  81. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/compiler/__init__.py +0 -0
  82. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/compiler/composed_compiler.py +0 -0
  83. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/compiler/qiskit_compiler.py +0 -0
  84. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/compiler/simple_compiler.py +0 -0
  85. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/densitymatrix.py +0 -0
  86. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/experimental.py +0 -0
  87. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/gates.py +0 -0
  88. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/interfaces/__init__.py +0 -0
  89. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/interfaces/jax.py +0 -0
  90. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/interfaces/numpy.py +0 -0
  91. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/interfaces/scipy.py +0 -0
  92. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/interfaces/tensorflow.py +0 -0
  93. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/interfaces/tensortrans.py +0 -0
  94. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/interfaces/torch.py +0 -0
  95. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/keras.py +0 -0
  96. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/mps_base.py +0 -0
  97. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/noisemodel.py +0 -0
  98. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/quantum.py +0 -0
  99. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/results/__init__.py +0 -0
  100. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/results/counts.py +0 -0
  101. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/results/qem/__init__.py +0 -0
  102. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/results/qem/benchmark_circuits.py +0 -0
  103. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/results/qem/qem_methods.py +0 -0
  104. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/results/readout_mitigation.py +0 -0
  105. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/shadows.py +0 -0
  106. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/simplify.py +0 -0
  107. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/templates/__init__.py +0 -0
  108. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/templates/ansatz.py +0 -0
  109. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/templates/blocks.py +0 -0
  110. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/templates/chems.py +0 -0
  111. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/templates/conversions.py +0 -0
  112. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/templates/dataset.py +0 -0
  113. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/templates/graphs.py +0 -0
  114. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/templates/lattice.py +0 -0
  115. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/templates/measurements.py +0 -0
  116. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/torchnn.py +0 -0
  117. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/utils.py +0 -0
  118. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit/vis.py +0 -0
  119. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit_nightly.egg-info/dependency_links.txt +0 -0
  120. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit_nightly.egg-info/requires.txt +0 -0
  121. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tensorcircuit_nightly.egg-info/top_level.txt +0 -0
  122. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/__init__.py +0 -0
  123. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/conftest.py +0 -0
  124. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_backends.py +0 -0
  125. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_calibrating.py +0 -0
  126. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_channels.py +0 -0
  127. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_cloud.py +0 -0
  128. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_compiler.py +0 -0
  129. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_ensemble.py +0 -0
  130. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_fgs.py +0 -0
  131. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_gates.py +0 -0
  132. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_interfaces.py +0 -0
  133. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_keras.py +0 -0
  134. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_lattice.py +0 -0
  135. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_mpscircuit.py +0 -0
  136. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_noisemodel.py +0 -0
  137. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_qaoa.py +0 -0
  138. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_qem.py +0 -0
  139. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_quantum.py +0 -0
  140. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_quantum_attr.py +0 -0
  141. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_results.py +0 -0
  142. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_shadows.py +0 -0
  143. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_simplify.py +0 -0
  144. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_templates.py +0 -0
  145. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_torchnn.py +0 -0
  146. {tensorcircuit_nightly-1.2.1.dev20250724 → tensorcircuit_nightly-1.2.1.dev20250726}/tests/test_van.py +0 -0
@@ -10,10 +10,16 @@
10
10
 
11
11
  - Add `circuit.amplitude_before()` method to return the corresponding tensornetwork nodes.
12
12
 
13
+ - Add `with_prob` for `stabilizercircuit.measure()`.
14
+
15
+ - Add `tc.cons.function_nodes_capture` decorator and `tc.cons.runtime_nodes_capture` context manager for directly return nodes before real contraction.
16
+
13
17
  ### Fixed
14
18
 
15
19
  - Fix the nodes order in contraction by giving each node a global `_stable_id_`.
16
20
 
21
+ - Fix `to_dlpack` for jax version >= 0.7.
22
+
17
23
  ## v1.2.1
18
24
 
19
25
  ### Fixed
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tensorcircuit-nightly
3
- Version: 1.2.1.dev20250724
3
+ Version: 1.2.1.dev20250726
4
4
  Summary: nightly release for tensorcircuit
5
5
  Home-page: https://github.com/refraction-ray/tensorcircuit-dev
6
6
  Author: TensorCircuit Authors
@@ -485,6 +485,15 @@ Here's an example studying entanglement asymmetry in tilted ferromagnet states:
485
485
  Randoms, Jit, Backend Agnostic, and Their Interplay
486
486
  --------------------------------------------------------
487
487
 
488
+ This section explains how random number generation interacts with JIT compilation and backend agnosticism in TensorCircuit. Understanding this interplay is crucial for reproducible and correct simulation results, especially when using JAX.
489
+
490
+ **Key Management for Reproducibility:**
491
+ In JAX, random number generation is deterministic and relies on explicit "keys" that manage the random state. This is different from TensorFlow or NumPy, where random states are often managed implicitly. For reproducible results and correct JIT compilation, JAX requires these keys to be passed and split explicitly.
492
+
493
+ **Why Explicit Key Management?**
494
+ When a JIT-compiled function is called multiple times with the same inputs, JAX aims to produce the same output. If random numbers were generated implicitly within a JIT-compiled function, subsequent calls would produce the same "random" numbers, which is often not the desired behavior for simulations requiring true randomness across runs.
495
+ Explicit key management ensures that each call to a random function, even within JIT, uses a new, distinct random state derived from a split key, thus maintaining the desired randomness and reproducibility.
496
+
488
497
  .. code-block:: python
489
498
 
490
499
  import tensorcircuit as tc
@@ -20,8 +20,8 @@ sys.path.insert(0, os.path.abspath("../ext/"))
20
20
 
21
21
  # -- Project information -----------------------------------------------------
22
22
 
23
- project = "tensorcircuit"
24
- copyright = "2020, TensorCircuit Development Team. Created by Shi-Xin Zhang"
23
+ project = "tensorcircuit-ng"
24
+ copyright = "2020, TensorCircuit Development Team. Created by Shi-Xin Zhang."
25
25
  author = "refraction-ray"
26
26
 
27
27
  # The short X.Y version
@@ -4,7 +4,7 @@ Frequently Asked Questions
4
4
  What is the relation between TensorCircuit and TensorCircuit-NG?
5
5
  -------------------------------------------------------------------
6
6
 
7
- Both packages are created by `Shi-Xin Zhang <https://www.iop.cas.cn/rcjy/tpyjy/?id=6789>`_ (`@refraction-ray <https://github.com/refraction-ray>`_). For the history of the evolution of tensorcircuit, please refer to `history <https://github.com/tensorcircuit/tensorcircuit-ng/blob/master/HISTORY.md>`_.
7
+ Both packages are created by `Shi-Xin Zhang <https://www.iop.cas.cn/rcjy/tpyjy/?id=6789>`_ (`@refraction-ray <https://github.com/refraction-ray>`_). For the history of the evolution of TensorCircuit-NG, please refer to `history <https://github.com/tensorcircuit/tensorcircuit-ng/blob/master/HISTORY.md>`_.
8
8
 
9
9
  From users' perspective, TensorCircuit-NG maintains full compatibility with the TensorCircuit API, enhancing it with additional features and critical bug fixes. Only TensorCircuit-NG is kept up-to-date with the fast evolving scientific computing and machine learning ecosystem in Python.
10
10
 
@@ -41,6 +41,7 @@ How can I use multiple GPUs?
41
41
  For different observables evaluation on different cards, see `example <https://github.com/tensorcircuit/tensorcircuit-ng/blob/master/examples/vqe_parallel_pmap.py>`_.
42
42
 
43
43
  For distributed simulation of one circuit on multiple cards, see `example for expectation <https://github.com/tensorcircuit/tensorcircuit-ng/blob/master/examples/slicing_auto_pmap_vqa.py>`_ and `example for MPO <https://github.com/tensorcircuit/tensorcircuit-ng/blob/master/examples/slicing_auto_pmap_mpo.py>`_.
44
+ We also introduce a new interface for the multi-GPU tensornetwork contraction, see `example for VQE <https://github.com/tensorcircuit/tensorcircuit-ng/blob/master/examples/distributed_interface_vqe.py>`_ and `example for amplitude <https://github.com/tensorcircuit/tensorcircuit-ng/blob/master/examples/distributed_interface_amplitude.py>`_.
44
45
 
45
46
 
46
47
  When should I jit the function?
@@ -10,26 +10,25 @@ Installation
10
10
  ``pip install tensorcircuit-ng``
11
11
 
12
12
  is in general enough.
13
- Either pip from conda or other python env managers is fine.
14
13
 
15
14
  Since there are many optional packages for various features,
16
- the users may need to install more pip packages when required.
15
+ the users may need to install more packages by pip when required.
17
16
 
18
17
  For Nvidia GPU,
19
18
  please refer to the GPU aware installation guide of corresponding machine learning frameworks:
20
- `TensorFlow <https://www.tensorflow.org/install/gpu>`_,
21
- `Jax <https://github.com/google/jax#pip-installation-gpu-cuda>`_,
19
+ `TensorFlow <https://www.tensorflow.org/install/pip>`_ (``pip install 'tensorflow[and-cuda]'``),
20
+ `Jax <https://docs.jax.dev/en/latest/installation.html#pip-installation-nvidia-gpu-cuda-installed-via-pip-easier>`_ (``pip install 'jax[cuda-12]'``),
22
21
  or `PyTorch <https://pytorch.org/get-started/locally/>`_.
23
22
 
24
- Docker is also recommended (especially Linux + Nvidia GPU setup):
23
+ Docker is also accessible:
25
24
 
26
25
  ``sudo docker run -it --network host --gpus all tensorcircuit/tensorcircuit``.
27
26
 
28
27
  For more details on docker setup, please refer to `docker readme <https://github.com/tensorcircuit/tensorcircuit-ng/tree/master/docker>`_.
29
28
 
30
29
 
31
- Overall, the installation of TensorCircuit-NG is simple, since it is purely in Python and hence very portable.
32
- As long as the users can take care of the installation of ML frameworks on the corresponding system, TensorCircuit-NG will work as expected.
30
+ Overall, the installation of TensorCircuit-NG is simple, since it is purely in Python and hence portable.
31
+ As long as the users can take care of the installation of ML frameworks on the corresponding operating system, TensorCircuit-NG will work as expected.
33
32
 
34
33
  To debug the installation issue or report bugs, please check the environment information by ``tc.about()``.
35
34
 
@@ -218,7 +217,7 @@ If the user has no intention to maintain the application code in a backend agnos
218
217
  Automatic Differentiation, JIT, and Vectorized Parallelism
219
218
  -------------------------------------------------------------
220
219
 
221
- For concepts of AD, JIT and VMAP, please refer to `Jax documentation <https://jax.readthedocs.io/en/latest/jax-101/index.html>`__ .
220
+ For concepts of AD, JIT and VMAP, please refer to `Jax documentation <https://docs.jax.dev/en/latest/tutorials.html>`__ .
222
221
 
223
222
  The related API design in TensorCircuit-NG closely follows the functional programming design pattern in Jax with some slight differences. So we strongly recommend users learn some basics about Jax no matter which ML backend they intend to use.
224
223
 
@@ -1,4 +1,4 @@
1
- __version__ = "1.2.1.dev20250724"
1
+ __version__ = "1.2.1.dev20250726"
2
2
  __author__ = "TensorCircuit Authors"
3
3
  __creator__ = "refraction-ray"
4
4
 
@@ -442,7 +442,14 @@ class JaxBackend(jax_backend.JaxBackend, ExtendedBackend): # type: ignore
442
442
  def to_dlpack(self, a: Tensor) -> Any:
443
443
  import jax.dlpack
444
444
 
445
- return jax.dlpack.to_dlpack(a)
445
+ try:
446
+ return jax.dlpack.to_dlpack(a) # type: ignore
447
+ except AttributeError: # jax >v0.7
448
+ # jax.dlpack.to_dlpack was deprecated in JAX v0.6.0 and removed in JAX v0.7.0.
449
+ # Please use the newer DLPack API based on __dlpack__ and __dlpack_device__ instead.
450
+ # Typically, you can pass a JAX array directly to the `from_dlpack` function of
451
+ # another framework without using `to_dlpack`.
452
+ return a.__dlpack__()
446
453
 
447
454
  def set_random_state(
448
455
  self, seed: Optional[Union[int, PRNGKeyArray]] = None, get_only: bool = False
@@ -231,6 +231,25 @@ class Circuit(BaseCircuit):
231
231
  pz: float,
232
232
  status: Optional[float] = None,
233
233
  ) -> float:
234
+ """
235
+ Apply a depolarizing channel to the circuit in a Monte Carlo way.
236
+ For each call, one of the Pauli gates (X, Y, Z) or an Identity gate is applied to the qubit
237
+ at the given index based on the probabilities `px`, `py`, and `pz`.
238
+
239
+ :param index: The index of the qubit to apply the depolarizing channel on.
240
+ :type index: int
241
+ :param px: The probability of applying an X gate.
242
+ :type px: float
243
+ :param py: The probability of applying a Y gate.
244
+ :type py: float
245
+ :param pz: The probability of applying a Z gate.
246
+ :type pz: float
247
+ :param status: A random number between 0 and 1 to determine which gate to apply. If None,
248
+ a random number is generated automatically. Defaults to None.
249
+ :type status: Optional[float], optional
250
+ :return: Returns 0.0. The function modifies the circuit in place.
251
+ :rtype: float
252
+ """
234
253
  if status is None:
235
254
  status = backend.implicit_randu()[0]
236
255
  g = backend.cond(
@@ -323,6 +342,35 @@ class Circuit(BaseCircuit):
323
342
  status: Optional[float] = None,
324
343
  name: Optional[str] = None,
325
344
  ) -> Tensor:
345
+ """
346
+ Apply a unitary Kraus channel to the circuit using a Monte Carlo approach. This method is functionally
347
+ similar to `unitary_kraus` but uses `backend.switch` for selecting the Kraus operator, which can have
348
+ different performance characteristics on some backends.
349
+
350
+ A random Kraus operator from the provided list is applied to the circuit based on the given probabilities.
351
+ This method is jittable and suitable for simulating noisy quantum circuits where the noise is represented
352
+ by unitary Kraus operators.
353
+
354
+ .. warning::
355
+ This method may have issues with `vmap` due to potential concurrent access locks, potentially related with
356
+ `backend.switch`. `unitary_kraus` is generally recommended.
357
+
358
+ :param kraus: A sequence of `Gate` objects representing the unitary Kraus operators.
359
+ :type kraus: Sequence[Gate]
360
+ :param index: The qubit indices on which to apply the Kraus channel.
361
+ :type index: int
362
+ :param prob: A sequence of probabilities corresponding to each Kraus operator. If None, probabilities
363
+ are derived from the operators themselves. Defaults to None.
364
+ :type prob: Optional[Sequence[float]], optional
365
+ :param status: A random number between 0 and 1 to determine which Kraus operator to apply. If None,
366
+ a random number is generated automatically. Defaults to None.
367
+ :type status: Optional[float], optional
368
+ :param name: An optional name for the operation. Defaults to None.
369
+ :type name: Optional[str], optional
370
+ :return: A tensor indicating which Kraus operator was applied.
371
+ :rtype: Tensor
372
+ """
373
+
326
374
  # dont use, has issue conflicting with vmap, concurrent access lock emerged
327
375
  # potential issue raised from switch
328
376
  # general impl from Monte Carlo trajectory depolarizing above
@@ -516,8 +516,8 @@ def _get_path_cache_friendly(
516
516
  nodes = list(nodes)
517
517
 
518
518
  nodes_new = sorted(nodes, key=lambda node: getattr(node, "_stable_id_", -1))
519
- if isinstance(algorithm, list):
520
- return algorithm, nodes_new
519
+ # if isinstance(algorithm, list):
520
+ # return algorithm, [nodes_new]
521
521
 
522
522
  all_edges = tn.get_all_edges(nodes_new)
523
523
  all_edges_sorted = sorted_edges(all_edges)
@@ -693,6 +693,51 @@ def _base(
693
693
  return final_node
694
694
 
695
695
 
696
+ class NodesReturn(Exception):
697
+ """
698
+ Intentionally stop execution to return a value.
699
+ """
700
+
701
+ def __init__(self, value_to_return: Any):
702
+ self.value = value_to_return
703
+ super().__init__(
704
+ f"Intentionally stopping execution to return: {value_to_return}"
705
+ )
706
+
707
+
708
+ def _get_sorted_nodes(nodes: List[Any], *args: Any, **kws: Any) -> Any:
709
+ nodes_new = sorted(nodes, key=lambda node: getattr(node, "_stable_id_", -1))
710
+ raise NodesReturn(nodes_new)
711
+
712
+
713
+ def function_nodes_capture(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
714
+ @wraps(func)
715
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
716
+ with runtime_contractor(method="before"):
717
+ try:
718
+ result = func(*args, **kwargs)
719
+ return result
720
+ except NodesReturn as e:
721
+ return e.value
722
+
723
+ return wrapper
724
+
725
+
726
+ @contextmanager
727
+ def runtime_nodes_capture(key: str = "nodes") -> Iterator[Any]:
728
+ old_contractor = getattr(thismodule, "contractor")
729
+ set_contractor(method="before")
730
+ captured_value: Dict[str, List[tn.Node]] = {}
731
+ try:
732
+ yield captured_value
733
+ except NodesReturn as e:
734
+ captured_value[key] = e.value
735
+ finally:
736
+ for module in sys.modules:
737
+ if module.startswith(package_name):
738
+ setattr(sys.modules[module], "contractor", old_contractor)
739
+
740
+
696
741
  def custom(
697
742
  nodes: List[Any],
698
743
  optimizer: Any,
@@ -763,6 +808,16 @@ def custom_stateful(
763
808
 
764
809
  # only work for custom
765
810
  def contraction_info_decorator(algorithm: Callable[..., Any]) -> Callable[..., Any]:
811
+ """Decorator to add contraction information logging to an optimizer.
812
+
813
+ This decorator wraps an optimization algorithm and prints detailed information
814
+ about the contraction cost (FLOPs, size, write) and path finding time.
815
+
816
+ :param algorithm: The optimization algorithm to decorate.
817
+ :type algorithm: Callable[..., Any]
818
+ :return: The decorated optimization algorithm.
819
+ :rtype: Callable[..., Any]
820
+ """
766
821
  from cotengra import ContractionTree
767
822
 
768
823
  def new_algorithm(
@@ -869,6 +924,9 @@ def set_contractor(
869
924
  **kws,
870
925
  )
871
926
 
927
+ elif method == "before": # a hack way to get the nodes
928
+ cf = _get_sorted_nodes
929
+
872
930
  else:
873
931
  # cf = getattr(tn.contractors, method, None)
874
932
  # if not cf: