eqc-models 0.14.5__tar.gz → 0.15.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. {eqc_models-0.14.5 → eqc_models-0.15.1}/.gitignore +1 -1
  2. {eqc_models-0.14.5 → eqc_models-0.15.1}/PKG-INFO +1 -1
  3. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/source/dependencies.rst +1 -1
  4. eqc_models-0.15.1/eqc_models/algorithms/__init__.py +12 -0
  5. eqc_models-0.15.1/eqc_models/algorithms/alm.py +630 -0
  6. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/graph/shortestpath.py +20 -0
  7. eqc_models-0.15.1/eqc_models/ml/utils.py +132 -0
  8. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/solvers/__init__.py +2 -2
  9. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models.egg-info/PKG-INFO +1 -1
  10. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models.egg-info/SOURCES.txt +5 -0
  11. eqc_models-0.15.1/scripts/ALM_pipeline.ipynb +418 -0
  12. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/qboost_iris_dirac3.py +22 -32
  13. eqc_models-0.15.1/scripts/test_reservoir.py +48 -0
  14. eqc_models-0.15.1/test/testalm.py +412 -0
  15. eqc_models-0.14.5/eqc_models/algorithms/__init__.py +0 -4
  16. {eqc_models-0.14.5 → eqc_models-0.15.1}/.gitlab-ci.yml +0 -0
  17. {eqc_models-0.14.5 → eqc_models-0.15.1}/LICENSE.txt +0 -0
  18. {eqc_models-0.14.5 → eqc_models-0.15.1}/MANIFEST.in +0 -0
  19. {eqc_models-0.14.5 → eqc_models-0.15.1}/README.md +0 -0
  20. {eqc_models-0.14.5 → eqc_models-0.15.1}/compile_extensions.py +0 -0
  21. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/Makefile +0 -0
  22. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/build/html/_static/basic.css +0 -0
  23. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/build/html/_static/css/badge_only.css +0 -0
  24. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/build/html/_static/css/theme.css +0 -0
  25. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/build/html/_static/custom.css +0 -0
  26. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/build/html/_static/file.png +0 -0
  27. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/build/html/_static/minus.png +0 -0
  28. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/build/html/_static/plus.png +0 -0
  29. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/build/html/_static/pygments.css +0 -0
  30. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/build/html/_static/white_logo.png +0 -0
  31. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/make.bat +0 -0
  32. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/source/_static/custom.css +0 -0
  33. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/source/_static/white_logo.png +0 -0
  34. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/source/conf.py +0 -0
  35. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/source/eqc_models.rst +0 -0
  36. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/source/index.rst +0 -0
  37. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/source/modules.rst +0 -0
  38. {eqc_models-0.14.5 → eqc_models-0.15.1}/docs/source/usage.rst +0 -0
  39. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/__init__.py +0 -0
  40. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/algorithms/base.py +0 -0
  41. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/algorithms/penaltymultiplier.py +0 -0
  42. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/allocation/__init__.py +0 -0
  43. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/allocation/allocation.py +0 -0
  44. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/allocation/portbase.py +0 -0
  45. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/allocation/portmomentum.py +0 -0
  46. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/assignment/__init__.py +0 -0
  47. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/assignment/qap.py +0 -0
  48. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/assignment/resource.py +0 -0
  49. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/assignment/setpartition.py +0 -0
  50. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/base/__init__.py +0 -0
  51. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/base/base.py +0 -0
  52. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/base/binaries.py +0 -0
  53. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/base/constraints.py +0 -0
  54. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/base/operators.py +0 -0
  55. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/base/polyeval.pyx +0 -0
  56. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/base/polynomial.py +0 -0
  57. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/base/quadratic.py +0 -0
  58. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/base/results.py +0 -0
  59. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/combinatorics/__init__.py +0 -0
  60. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/combinatorics/setcover.py +0 -0
  61. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/combinatorics/setpartition.py +0 -0
  62. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/decoding.py +0 -0
  63. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/graph/__init__.py +0 -0
  64. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/graph/base.py +0 -0
  65. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/graph/hypergraph.py +0 -0
  66. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/graph/maxcut.py +0 -0
  67. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/graph/maxkcut.py +0 -0
  68. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/graph/partition.py +0 -0
  69. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/graph/rcshortestpath.py +0 -0
  70. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/__init__.py +0 -0
  71. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/classifierbase.py +0 -0
  72. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/classifierqboost.py +0 -0
  73. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/classifierqsvm.py +0 -0
  74. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/clustering.py +0 -0
  75. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/clusteringbase.py +0 -0
  76. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/cvqboost_hamiltonian.pyx +0 -0
  77. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/cvqboost_hamiltonian_c_func.c +0 -0
  78. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/cvqboost_hamiltonian_c_func.h +0 -0
  79. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/decomposition.py +0 -0
  80. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/forecast.py +0 -0
  81. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/forecastbase.py +0 -0
  82. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/regressor.py +0 -0
  83. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/regressorbase.py +0 -0
  84. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/ml/reservoir.py +0 -0
  85. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/process/base.py +0 -0
  86. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/process/mpc.py +0 -0
  87. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/sequence/__init__.py +0 -0
  88. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/sequence/tsp.py +0 -0
  89. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/solvers/eqcdirect.py +0 -0
  90. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/solvers/mip.py +0 -0
  91. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/solvers/qciclient.py +0 -0
  92. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/solvers/responselog.py +0 -0
  93. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/utilities/__init__.py +0 -0
  94. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/utilities/fileio.py +0 -0
  95. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/utilities/general.py +0 -0
  96. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/utilities/polynomial.py +0 -0
  97. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models/utilities/qplib.py +0 -0
  98. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models.egg-info/dependency_links.txt +0 -0
  99. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models.egg-info/requires.txt +0 -0
  100. {eqc_models-0.14.5 → eqc_models-0.15.1}/eqc_models.egg-info/top_level.txt +0 -0
  101. {eqc_models-0.14.5 → eqc_models-0.15.1}/pyproject.toml +0 -0
  102. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/binary_job_example.py +0 -0
  103. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/binary_w_continuous_solver_example.py +0 -0
  104. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/c6h6_graph_clustering.py +0 -0
  105. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/clustering.py +0 -0
  106. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/continuous_job_example.py +0 -0
  107. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/convert_to_json_problem.py +0 -0
  108. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/crew_assignment_example.py +0 -0
  109. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/graph_clustering.py +0 -0
  110. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/graph_partitioning.py +0 -0
  111. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/hamiltonian_to_polynomial.py +0 -0
  112. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/hypergraph.py +0 -0
  113. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/integer_job_example.py +0 -0
  114. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/karate_graph_clustering.py +0 -0
  115. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/lin_reg_dirac3.py +0 -0
  116. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/mackey_glass_cell_production_series.csv +0 -0
  117. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/mip_example.py +0 -0
  118. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/pca_iris_dirac3.py +0 -0
  119. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/port_opt_dirac3.py +0 -0
  120. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/qboost_iris_dirac3_weak_cls.py +0 -0
  121. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/qplib_benchmark_config.py +0 -0
  122. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/qplib_reader.py +0 -0
  123. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/qplib_runner.py +0 -0
  124. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/qsvm_dual_iris_dirac3.py +0 -0
  125. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/qsvm_iris_dirac3.py +0 -0
  126. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/reservoir_forecast.py +0 -0
  127. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/results_example.py +0 -0
  128. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/rundoctests.py +0 -0
  129. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/test_shortestpath.py +0 -0
  130. {eqc_models-0.14.5 → eqc_models-0.15.1}/scripts/utils.py +0 -0
  131. {eqc_models-0.14.5 → eqc_models-0.15.1}/setup.cfg +0 -0
  132. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/doctest_base.py +0 -0
  133. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testallocationmodel.py +0 -0
  134. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testconstraint.py +0 -0
  135. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testcvqboost.py +0 -0
  136. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testeqcdirectsolver.py +0 -0
  137. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testgraphpartitionmodel.py +0 -0
  138. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testhypergraphmodel.py +0 -0
  139. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testmaxcutmodel.py +0 -0
  140. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testpolynomialmodel.py +0 -0
  141. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testqapmodel.py +0 -0
  142. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testqciclientsolver.py +0 -0
  143. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testquadraticmodel.py +0 -0
  144. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testsetcovermodel.py +0 -0
  145. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testsetpartitionmodel.py +0 -0
  146. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testshortestpath.py +0 -0
  147. {eqc_models-0.14.5 → eqc_models-0.15.1}/test/testtsp.py +0 -0
  148. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/README.txt +0 -0
  149. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/run_tests.py +0 -0
  150. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/c6h6_graph_clustering/c6h6_graph_clustering.py +0 -0
  151. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/clustering/clustering.py +0 -0
  152. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/clustering/data/X.npy +0 -0
  153. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/cvqboost_iris/cvqboost_iris.py +0 -0
  154. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/cvqboost_iris/data/X_test.npy +0 -0
  155. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/cvqboost_iris/data/X_train.npy +0 -0
  156. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/cvqboost_iris/data/y_test.npy +0 -0
  157. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/cvqboost_iris/data/y_train.npy +0 -0
  158. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/karate_graph_clustering/karate_graph_clustering.py +0 -0
  159. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/pca_iris/pca_iris.py +0 -0
  160. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/protein_design_1MJC/data/C_1MJC.npy +0 -0
  161. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/protein_design_1MJC/data/J_1MJC.npy +0 -0
  162. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/protein_design_1MJC/protein_design_1MJC.py +0 -0
  163. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/protein_design_1NXB/data/C_1NXB.npy +0 -0
  164. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/protein_design_1NXB/data/J_1NXB.npy +0 -0
  165. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/protein_design_1NXB/protein_design_1NXB.py +0 -0
  166. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/protein_design_1POH/data/C_1POH.npy +0 -0
  167. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/protein_design_1POH/data/J_1POH.npy +0 -0
  168. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/protein_design_1POH/protein_design_1POH.py +0 -0
  169. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_dual_iris/data/X_test.npy +0 -0
  170. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_dual_iris/data/X_train.npy +0 -0
  171. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_dual_iris/data/y_test.npy +0 -0
  172. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_dual_iris/data/y_train.npy +0 -0
  173. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_dual_iris/qsvm_dual_iris.py +0 -0
  174. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_primal_iris/data/X_test.npy +0 -0
  175. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_primal_iris/data/X_train.npy +0 -0
  176. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_primal_iris/data/y_test.npy +0 -0
  177. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_primal_iris/data/y_train.npy +0 -0
  178. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/qsvm_primal_iris/qsvm_primal_iris.py +0 -0
  179. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_100/data/C_8000000_100.npy +0 -0
  180. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_100/data/J_8000000_100.npy +0 -0
  181. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_100/synthetic_cls_100.py +0 -0
  182. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_300/data/C_8000000_300.npy +0 -0
  183. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_300/data/J_8000000_300.npy +0 -0
  184. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_300/synthetic_cls_300.py +0 -0
  185. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_500/data/C_8000000_500.npy +0 -0
  186. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_500/data/J_8000000_500.npy +0 -0
  187. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_500/synthetic_cls_500.py +0 -0
  188. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_700/data/C_8000000_700.npy +0 -0
  189. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_700/data/J_8000000_700.npy +0 -0
  190. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_700/synthetic_cls_700.py +0 -0
  191. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_900/data/C_8000000_900.npy +0 -0
  192. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_900/data/J_8000000_900.npy +0 -0
  193. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_cases/synthetic_cls_900/synthetic_cls_900.py +0 -0
  194. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_suite_config.json +0 -0
  195. {eqc_models-0.14.5 → eqc_models-0.15.1}/test_suite/test_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  __pycache__
2
2
  build
3
3
  eqc_models.egg-info
4
-
4
+ scripts/test_reservoir.py
5
5
  *.c
6
6
  *.so
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eqc-models
3
- Version: 0.14.5
3
+ Version: 0.15.1
4
4
  Summary: Optimization and ML modeling package targeting EQC devices
5
5
  Author-email: "Quantum Computing Inc." <support@quantumcomputinginc.com>
6
6
  Project-URL: Homepage, https://quantumcomputinginc.com
@@ -13,5 +13,5 @@ Packages
13
13
  - ``pandas >=2.1.0, <3``
14
14
  - ``scikit-learn >=1.2.1, <2``
15
15
  - ``lightgbm >= 4.6.0, <5``
16
- - ``xgboost >= 1.7.4, <2``
16
+ - ``xgboost >= 1.7.4, <4``
17
17
  - ``qci-client>=5, <6``
@@ -0,0 +1,12 @@
1
+ # (C) Quantum Computing Inc., 2024.
2
+ from .penaltymultiplier import PenaltyMultiplierAlgorithm
3
+ from .alm import (
4
+ ALMAlgorithm,
5
+ ConstraintRegistry,
6
+ ALMConfig,
7
+ ALMConstraint,
8
+ ALMBlock
9
+ )
10
+
11
+ __all__ = ["PenaltyMultiplierAlgorithm", "ALMAlgorithm", "ConstraintRegistry",
12
+ "ALMConfig", "ALMConstraint", "ALMBlock",]
@@ -0,0 +1,630 @@
1
+ # (C) Quantum Computing Inc., 2025.
2
+ from dataclasses import dataclass
3
+ from typing import Callable, Dict, List, Tuple, Optional, Sequence, Union
4
+ import numpy as np
5
+ from collections import defaultdict
6
+ from eqc_models.base.polynomial import PolynomialModel
7
+
8
+ Array = np.ndarray
9
+ PolyTerm = Tuple[Tuple[int, ...], float]
10
+
11
+
12
+ @dataclass
13
+ class ALMConstraint:
14
+ """One constraint family; fun returns a vector; jac returns its Jacobian."""
15
+ kind: str # "eq" or "ineq"
16
+ fun: Callable[[Array], Array] # h(x) or g(x)
17
+ jac: Optional[Callable[[Array], Array]] = None
18
+ name: str = ""
19
+
20
+
21
+ @dataclass
22
+ class ALMBlock:
23
+ """Lifted discrete variable block (optional)."""
24
+ idx: Sequence[int] # indices of block in the full x
25
+ levels: Array # (k,) level values (b_i)
26
+ enforce_sum_to_one: bool = True # register as equality via helper
27
+ enforce_one_hot: bool = True # ALM linearization with M = 11^T - I
28
+
29
+
30
+ @dataclass
31
+ class ALMConfig:
32
+ # penalties
33
+ rho_h: float = 50.0 # equalities
34
+ rho_g: float = 50.0 # inequalities / one-hot
35
+ rho_min: float = 1e-3
36
+ rho_max: float = 1e3
37
+ # adaptation toggles
38
+ adapt: bool = True
39
+ tau_up_h: float = 0.90
40
+ tau_down_h: float = 0.50
41
+ tau_up_g: float = 0.90
42
+ tau_down_g: float = 0.50
43
+ gamma_up: float = 2.0
44
+ gamma_down: float = 1.0
45
+ # tolerances & loop
46
+ tol_h: float = 1e-6
47
+ tol_g: float = 1e-6
48
+ max_outer: int = 100
49
+ # stagnation safety net
50
+ use_stagnation_bump: bool = True
51
+ patience_h: int = 10
52
+ patience_g: int = 10
53
+ stagnation_factor: float = 1e-3
54
+ # smoothing (optional)
55
+ ema_alpha: float = 0.3
56
+ # finite diff (only used if jac=None)
57
+ fd_eps: float = 1e-6
58
+ # activation threshold for projected ALM
59
+ act_tol: float = 1e-10
60
+
61
+
62
+ class ConstraintRegistry:
63
+ """
64
+ Holds constraints and block metadata; keeps ALMAlgorithm stateless. Register constraints and
65
+ (optional) lifted-discrete blocks here.
66
+ """
67
+ def __init__(self):
68
+ self.constraints: List[ALMConstraint] = []
69
+ self.blocks: List[ALMBlock] = []
70
+
71
+ def add_equality(self, fun, jac=None, name=""):
72
+ self.constraints.append(ALMConstraint("eq", fun, jac, name))
73
+
74
+ def add_inequality(self, fun, jac=None, name=""):
75
+ self.constraints.append(ALMConstraint("ineq", fun, jac, name))
76
+
77
+ def add_block(self, idx: Sequence[int], levels: Array, sum_to_one=True, one_hot=True):
78
+ self.blocks.append(ALMBlock(list(idx), np.asarray(levels, float), sum_to_one, one_hot))
79
+
80
+
81
+ class ALMAlgorithm:
82
+ """Stateless ALM outer loop. Call `run(model, registry, core, cfg, **core_kwargs)`."""
83
+
84
+ # ---- helpers (static) ----
85
+ @staticmethod
86
+ def _finite_diff_jac(fun: Callable[[Array], Array], x: Array, eps: float) -> Array:
87
+ y0 = fun(x)
88
+ m = int(np.prod(y0.shape))
89
+ y0 = y0.reshape(-1)
90
+ n = x.size
91
+ J = np.zeros((m, n), dtype=float)
92
+ for j in range(n):
93
+ xp = x.copy()
94
+ xp[j] += eps
95
+ J[:, j] = (fun(xp).reshape(-1) - y0) / eps
96
+ return J
97
+
98
+ @staticmethod
99
+ def _pairwise_M(k: int) -> Array:
100
+ return np.ones((k, k), dtype=float) - np.eye(k, dtype=float)
101
+
102
+ @staticmethod
103
+ def _sum_to_one_selector(n: int, idx: Sequence[int]) -> Array:
104
+ S = np.zeros((1, n), dtype=float)
105
+ S[0, np.array(list(idx), int)] = 1.0
106
+ return S
107
+
108
+ @staticmethod
109
+ def _make_sum1_fun(S):
110
+ return lambda x: S @ x - np.array([1.0])
111
+
112
+ @staticmethod
113
+ def _make_sum1_jac(S):
114
+ return lambda x: S
115
+
116
+ @staticmethod
117
+ def _make_onehot_fun(sl, M):
118
+ sl = np.array(sl, int)
119
+
120
+ def _f(x):
121
+ s = x[sl]
122
+ return np.array([float(s @ (M @ s))]) # shape (1,)
123
+
124
+ return _f
125
+
126
+ @staticmethod
127
+ def _make_onehot_jac(sl, M, n):
128
+ sl = np.array(sl, int)
129
+
130
+ def _J(x):
131
+ s = x[sl]
132
+ grad_blk = 2.0 * (M @ s) # (k,)
133
+ J = np.zeros((1, n), dtype=float) # shape (1, n)
134
+ J[0, sl] = grad_blk
135
+ return J
136
+
137
+ return _J
138
+
139
+ @staticmethod
140
+ def _poly_value(poly_terms: List[PolyTerm], x: Array) -> float:
141
+ val = 0.0
142
+ for inds, coeff in poly_terms:
143
+ prod = 1.0
144
+ for j in inds:
145
+ if j == 0:
146
+ continue
147
+ else:
148
+ prod *= x[j - 1]
149
+ val += coeff * prod
150
+ return float(val)
151
+
152
+ @staticmethod
153
+ def _merge_poly(poly_terms: Optional[List[PolyTerm]], Q_aug: Optional[Array],
154
+ c_aug: Optional[Array]) -> List[PolyTerm]:
155
+ """
156
+ Merge ALM's quadratic/linear increments (Q_aug, c_aug) into the base polynomial term list `poly_terms`.
157
+ If 'poly_terms' is None, then turn x^T Q_aug x + c_aug^T x into polynomial monomials.
158
+ Terms are of the form:
159
+ ((0, i), w) for linear, ((i, j), w) for quadratic.
160
+ """
161
+ merged = list(poly_terms) if poly_terms is not None else []
162
+
163
+ if Q_aug is not None:
164
+ Qs = 0.5 * (Q_aug + Q_aug.T)
165
+ n = Qs.shape[0]
166
+ for i in range(n):
167
+ # diagonal contributes Qii * x_i^2
168
+ if Qs[i, i] != 0.0:
169
+ merged.append(((i + 1, i + 1), float(Qs[i, i])))
170
+ for j in range(i + 1, n):
171
+ q = 2.0 * Qs[i, j] # x^T Q x -> sum_{i<j} 2*Q_ij x_i x_j
172
+ if q != 0.0:
173
+ merged.append(((i + 1, j + 1), float(q)))
174
+ if c_aug is not None:
175
+ for i, ci in enumerate(c_aug):
176
+ if ci != 0.0:
177
+ merged.append(((0, i + 1), float(ci)))
178
+ return merged
179
+
180
+ @staticmethod
181
+ def _block_offsets(blocks: List[ALMBlock]) -> List[int]:
182
+ """
183
+ Return starting offsets (0-based) for each lifted block in the
184
+ concatenated s-vector.
185
+ """
186
+ offs, pos = [], 0
187
+ for blk in blocks:
188
+ offs.append(pos)
189
+ pos += len(blk.levels) # each block contributes k lifted coordinates
190
+ return offs
191
+
192
+ @staticmethod
193
+ def lift_Qc_to_poly_terms(
194
+ Q_native: np.ndarray, # shape (m, m) over original discrete vars (one block per var)
195
+ c_native: np.ndarray, # shape (m,)
196
+ blocks: List[ALMBlock], # ALM blocks (in the same order as the rows/cols of Q_native, c_native)
197
+ ) -> Tuple[List[Tuple[int, ...]], List[float]]:
198
+ """
199
+ Expand a quadratic/linear objective over m native discrete variables into the lifted
200
+ (one-hot) s-space.
201
+
202
+ Given `Q_native` and `c_native` over original m discrete vars (one block per var), and
203
+ `blocks` (`list[ALMBlock]` in the same order as original vars), we enforce:
204
+ - For variable i with level values b_i (length k_i), we create k_i lifted coords s_{i,a}.
205
+ - Quadratic term: J_ij = Q_native[i,j] * (b_i b_j^T) contributes to pairs (s_{i,a}, s_{j,b})
206
+ - Linear term: C_i = c_native[i] * b_i contributes to s_{i,a}
207
+
208
+ Returns polynomial (indices, coeffs) over concatenated s variables.
209
+ """
210
+ m = len(blocks)
211
+ assert Q_native.shape == (m, m), "Q_native must match number of blocks"
212
+ assert c_native.shape == (m,), "c_native must match number of blocks"
213
+
214
+ offs = ALMAlgorithm._block_offsets(blocks)
215
+ terms_acc = defaultdict(float)
216
+
217
+ # Quadratic lift: J = kron-expansion
218
+ for i in range(m):
219
+ bi = blocks[i].levels[:, None] # (k_i, 1)
220
+ oi = offs[i]
221
+ for j in range(m):
222
+ bj = blocks[j].levels[:, None] # (k_j, 1)
223
+ oj = offs[j]
224
+ J_ij = Q_native[i, j] * (bi @ bj.T) # (k_i, k_j)
225
+ if np.allclose(J_ij, 0.0):
226
+ continue
227
+ ki, kj = bi.shape[0], bj.shape[0]
228
+ for a in range(ki):
229
+ ia = oi + a + 1
230
+ for b in range(kj):
231
+ jb = oj + b + 1
232
+ w = float(J_ij[a, b])
233
+ if w == 0.0:
234
+ continue
235
+ if ia == jb:
236
+ terms_acc[(ia, ia)] += w
237
+ else:
238
+ # NOTE: we store each cross monomial once (i < j); for i==j block pairs,
239
+ # double to represent J_ab + J_ba
240
+ if i == j: # intra-block off-diagonal needs 2x
241
+ w *= 2.0
242
+ i1, i2 = (ia, jb) if ia < jb else (jb, ia)
243
+ terms_acc[(i1, i2)] += w
244
+
245
+ # Linear lift: C_i = L_i * b_i
246
+ for i in range(m):
247
+ b = blocks[i].levels
248
+ oi = offs[i]
249
+ for a, val in enumerate(b):
250
+ ia = oi + a + 1
251
+ w = float(c_native[i] * val)
252
+ if w != 0.0:
253
+ terms_acc[(0, ia)] += w
254
+
255
+ # Pack to PolynomialModel format
256
+ indices = [tuple(k) for k in terms_acc.keys()]
257
+ coeffs = [float(v) for v in terms_acc.values()]
258
+ return indices, coeffs
259
+
260
+ # ---- main entrypoint ----
261
+ @staticmethod
262
+ def run(
263
+ base_model: PolynomialModel,
264
+ registry: ConstraintRegistry,
265
+ solver,
266
+ cfg: ALMConfig = ALMConfig(),
267
+ x0: Optional[Array] = None,
268
+ *,
269
+ parse_output=None,
270
+ verbose: bool = True,
271
+ **solver_kwargs,
272
+ ) -> Dict[str, Union[Array, Dict[int, float], Dict]]:
273
+ """
274
+ Solve with ALM. Keep all ALM state local to this call (no global side-effects).
275
+ Handles three modes:
276
+ (A) No blocks -> continuous; use base_model as-is
277
+ (B) Blocks + native base_model -> lift to s-space
278
+ (C) Blocks + already-lifted base_model -> use as-is (compat)
279
+
280
+ Returns:
281
+ {
282
+ "x": final iterate,
283
+ "decoded": {start_idx_of_block: level_value, ...} for lifted blocks,
284
+ "decoded_debug": {start_idx_of_block: native_device_value, ...},
285
+ "hist": { "eq_inf": [...], "ineq_inf": [...], "obj": [...], "x": [...] }
286
+ }
287
+ """
288
+ blocks = registry.blocks
289
+ has_blocks = len(blocks) > 0
290
+
291
+ # ---- choose working model + dimension n ----
292
+ if not has_blocks:
293
+ # (A) continuous case: use the provided model directly
294
+ model = base_model
295
+ # Prefer model.n, fall back to bounds; else infer from polynomial indices
296
+ n = getattr(model, "n", None)
297
+ if n is None:
298
+ ub = getattr(model, "upper_bound", None)
299
+ lb = getattr(model, "lower_bound", None)
300
+ if ub is not None:
301
+ n = len(ub)
302
+ elif lb is not None:
303
+ n = len(lb)
304
+ else:
305
+ # infer from polynomial terms
306
+ n = 0
307
+ for inds in getattr(model, "indices", getattr(model.polynomial, "indices", [])):
308
+ for j in inds:
309
+ if max(j) > 0:
310
+ n = max(n, j)
311
+ lifted_slices: List[List[int]] = []
312
+
313
+ else:
314
+ # (B/C) lifted (discrete) case
315
+ target_lifted_n = sum(len(blk.levels) for blk in blocks)
316
+ base_n = getattr(base_model, "n", None)
317
+
318
+ # detect "already-lifted" native input (compat path)
319
+ already_lifted = (base_n == target_lifted_n)
320
+
321
+ if already_lifted:
322
+ # (C) use provided model directly; assume bounds already sensible
323
+ model = base_model
324
+ n = target_lifted_n
325
+ else:
326
+ # (B) lift from native space
327
+ # base_model must expose coefficients/indices compatible with this call
328
+ c_base, Q_base = base_model._quadratic_polynomial_to_qubo_coefficients(
329
+ getattr(base_model, "coefficients", getattr(base_model.polynomial, "coefficients", [])),
330
+ getattr(base_model, "indices", getattr(base_model.polynomial, "indices", [])),
331
+ getattr(base_model, "n")
332
+ )
333
+ assert Q_base.shape[0] == Q_base.shape[1]
334
+ assert c_base.shape[0] == Q_base.shape[0]
335
+ indices_lifted, coeffs_lifted = ALMAlgorithm.lift_Qc_to_poly_terms(Q_base, c_base, blocks)
336
+ model = PolynomialModel(coeffs_lifted, indices_lifted)
337
+ n = target_lifted_n
338
+ # set canonical [0,1] bounds for lifted s
339
+ setattr(model, "lower_bound", np.zeros(n, float))
340
+ setattr(model, "upper_bound", np.ones(n, float))
341
+
342
+ # ---- n and lifted_slices ----
343
+ lifted_slices = []
344
+ pos = 0
345
+ for blk in blocks:
346
+ k = len(blk.levels) # number of lifted coords for this block
347
+ lifted_slices.append(list(range(pos, pos + k))) # 0-based in lifted x
348
+ pos += k
349
+
350
+ # Algorithm initial solution and bounds
351
+ lb = getattr(model, "lower_bound", None)
352
+ ub = getattr(model, "upper_bound", None)
353
+ if x0 is not None:
354
+ x = np.asarray(x0, float).copy()
355
+ else:
356
+ # default init
357
+ if (lb is not None) and (ub is not None) and np.all(np.isfinite(lb)) and np.all(np.isfinite(ub)):
358
+ x = 0.5 * (np.asarray(lb, float) + np.asarray(ub, float))
359
+ else:
360
+ x = np.zeros(n, float)
361
+
362
+ # ---- collect constraints ----
363
+ problem_eqs = [c for c in registry.constraints if c.kind == "eq"]
364
+ problem_ineqs = [c for c in registry.constraints if c.kind == "ineq"]
365
+
366
+ # auto-install sum-to-one and one-hot as equalities
367
+ # (One-hot: s^T (11^T - I) s = 0))
368
+ def _install_block_equalities() -> List[ALMConstraint]:
369
+ if not has_blocks:
370
+ return []
371
+ eqs: List[ALMConstraint] = []
372
+ for blk, lift_idx in zip(registry.blocks, lifted_slices):
373
+ if blk.enforce_sum_to_one:
374
+ S = ALMAlgorithm._sum_to_one_selector(n, lift_idx)
375
+ eqs.append(ALMConstraint(
376
+ "eq",
377
+ fun=ALMAlgorithm._make_sum1_fun(S),
378
+ jac=ALMAlgorithm._make_sum1_jac(S),
379
+ name=f"sum_to_one_block_{lift_idx[0]}",
380
+ ))
381
+ if blk.enforce_one_hot:
382
+ k = len(lift_idx)
383
+ M = ALMAlgorithm._pairwise_M(k)
384
+ eqs.append(ALMConstraint(
385
+ "eq",
386
+ fun=ALMAlgorithm._make_onehot_fun(lift_idx, M),
387
+ jac=ALMAlgorithm._make_onehot_jac(lift_idx, M, n),
388
+ name=f"onehot_block_{lift_idx[0]}",
389
+ ))
390
+ return eqs
391
+
392
+ block_eqs = _install_block_equalities()
393
+
394
+ # Unified equality list (order is fixed for whole run)
395
+ full_eqs = problem_eqs + block_eqs
396
+
397
+ # Allocate multipliers for every equality in full_eqs
398
+ lam_eq = []
399
+ for csp in full_eqs:
400
+ r0 = csp.fun(x).reshape(-1)
401
+ lam_eq.append(np.zeros_like(r0, dtype=float))
402
+
403
+ # Inequality multipliers per user inequality
404
+ mu_ineq = []
405
+ for csp in problem_ineqs:
406
+ r0 = csp.fun(x).reshape(-1)
407
+ mu_ineq.append(np.zeros_like(r0, dtype=float))
408
+
409
+ # -------- running stats for adaptive penalties --------
410
+ rho_h, rho_g = cfg.rho_h, cfg.rho_g
411
+ best_eq, best_ineq = np.inf, np.inf
412
+ no_imp_eq = no_imp_ineq = 0
413
+ prev_eq_inf, prev_ineq_inf = np.inf, np.inf
414
+ eps = 1e-12
415
+
416
+ hist = {"eq_inf": [], "ineq_inf": [], "obj": [], "x": [],
417
+ # per-iteration logs for parameters/multipliers
418
+ "rho_h": [], "rho_g": [],
419
+ }
420
+ for k_idx, csp in enumerate(full_eqs):
421
+ if csp.kind != "eq":
422
+ continue
423
+ hist[f"lam_eq_max_idx{k_idx}"] = []
424
+ hist[f"lam_eq_min_idx{k_idx}"] = []
425
+ for k_idx, csp in enumerate(problem_ineqs):
426
+ if csp.kind != "ineq":
427
+ continue
428
+ hist[f"mu_ineq_max_idx{k_idx}"] = []
429
+ hist[f"mu_ineq_min_idx{k_idx}"] = []
430
+
431
+ for it in range(cfg.max_outer):
432
+ # -------- base polynomial (does not include fixed penalties here) --------
433
+ base_terms: List[PolyTerm] = list(zip(model.polynomial.indices, model.polynomial.coefficients))
434
+
435
+ # -------- ALM quadratic/linear pieces (assembled here, kept separate) --------
436
+ Q_aug = np.zeros((n, n), dtype=float)
437
+ c_aug = np.zeros(n, dtype=float)
438
+ have_aug = False
439
+
440
+ # (A) Equalities: linearize h near x^t => (rho/2)||A x - b||^2 + lam^T(Ax - b)
441
+ for k_idx, csp in enumerate(full_eqs):
442
+ if csp.kind != "eq":
443
+ continue
444
+ h = csp.fun(x).reshape(-1)
445
+ A = csp.jac(x) if csp.jac is not None else ALMAlgorithm._finite_diff_jac(csp.fun, x, cfg.fd_eps)
446
+ A = np.atleast_2d(A)
447
+ assert A.shape[1] == n, f"A has {A.shape[1]} cols, expected {n}"
448
+ # linearization about current x: residual model r(x) = A x - b, with b = A x - h
449
+ b = A @ x - h
450
+ Qk = 0.5 * rho_h * (A.T @ A)
451
+ ck = (A.T @ lam_eq[k_idx]) - rho_h * (A.T @ b)
452
+ Q_aug += Qk
453
+ c_aug += ck
454
+ have_aug = True
455
+
456
+ # (B) Inequalities: projected ALM. Linearize g near x^t.
457
+ for k_idx, csp in enumerate(problem_ineqs):
458
+ if csp.kind != "ineq":
459
+ continue
460
+ g = csp.fun(x).reshape(-1)
461
+ G = csp.jac(x) if csp.jac is not None else ALMAlgorithm._finite_diff_jac(csp.fun, x, cfg.fd_eps)
462
+ G = np.atleast_2d(G)
463
+ assert G.shape[1] == n, f"G has {G.shape[1]} cols, expected {n}"
464
+ d = G @ x - g
465
+ # Activation measure at current iterate; meaning, the current violating inequality components:
466
+ # g(x) + mu/rho; Powell-Hestenes-Rockafellar shifted residual
467
+ y = G @ x - d + mu_ineq[k_idx] / rho_g
468
+ active = (y > cfg.act_tol)
469
+ if np.any(active):
470
+ GA = G[active, :]
471
+ muA = mu_ineq[k_idx][active]
472
+ gA = g[active]
473
+ # Q += (rho/2) * GA^T GA
474
+ Qk = 0.5 * rho_g * (GA.T @ GA)
475
+ # c += GA^T mu - rho * GA^T (GA x - gA); where GA x - gA is active measures of d = G @ x - g
476
+ ck = (GA.T @ muA) - rho_g * (GA.T @ (GA @ x - gA))
477
+ Q_aug += Qk
478
+ c_aug += ck
479
+ have_aug = True
480
+
481
+ # -------- build merged polynomial for the core solver --------
482
+ all_terms = ALMAlgorithm._merge_poly(base_terms, Q_aug if have_aug else None,
483
+ c_aug if have_aug else None)
484
+ idxs, coeffs = zip(*[(inds, w) for (inds, w) in all_terms]) if all_terms else ([], [])
485
+ poly_model = PolynomialModel(list(coeffs), list(idxs))
486
+ if lb is not None and hasattr(poly_model, "lower_bound"):
487
+ poly_model.lower_bound = np.asarray(lb, float)
488
+ if ub is not None and hasattr(poly_model, "upper_bound"):
489
+ poly_model.upper_bound = np.asarray(ub, float)
490
+
491
+ x_ws = x.copy()
492
+
493
+ # Convention: many cores look for one of these fields if present.
494
+ # Use one or more to be future-proof; harmless if ignored.
495
+ setattr(poly_model, "initial_guess", x_ws)
496
+ setattr(poly_model, "warm_start", x_ws)
497
+ setattr(poly_model, "x0", x_ws)
498
+
499
+ # -------- inner solve --------
500
+ out = solver.solve(poly_model, **solver_kwargs)
501
+
502
+ # -------- parse --------
503
+ if parse_output:
504
+ x = parse_output(out)
505
+ else:
506
+ # default: support (value, x) or `.x` or raw x
507
+ if isinstance(out, tuple) and len(out) == 2:
508
+ _, x = out
509
+ elif isinstance(out, dict) and "results" in out and "solutions" in out["results"]:
510
+ x = out["results"]["solutions"][0]
511
+ elif isinstance(out, dict) and "x" in out:
512
+ x = out["x"]
513
+ else:
514
+ x = getattr(out, "x", out)
515
+ x = np.asarray(x, float)
516
+
517
+ # -------- residuals + multiplier updates --------
518
+ eq_infs = []
519
+ for k_idx, csp in enumerate(full_eqs):
520
+ if csp.kind != "eq": continue
521
+ r = csp.fun(x).reshape(-1)
522
+ lam_eq[k_idx] = lam_eq[k_idx] + rho_h * r
523
+ if r.size:
524
+ eq_infs.append(np.max(np.abs(r)))
525
+ eq_inf = float(np.max(eq_infs)) if eq_infs else 0.0
526
+
527
+ ineq_infs = []
528
+ for k_idx, csp in enumerate(problem_ineqs):
529
+ if csp.kind != "ineq": continue
530
+ r = csp.fun(x).reshape(-1)
531
+ mu_ineq[k_idx] = np.maximum(0.0, mu_ineq[k_idx] + rho_g * r)
532
+ if r.size:
533
+ ineq_infs.append(np.max(np.maximum(0.0, r)))
534
+ ineq_inf = float(np.max(ineq_infs)) if ineq_infs else 0.0
535
+
536
+ assert len(lam_eq) == len(full_eqs)
537
+ assert len(mu_ineq) == len(problem_ineqs)
538
+
539
+ # evaluate base polynomial only (ca add aug value if want to track full L_A)
540
+ f_val = ALMAlgorithm._poly_value(base_terms, x)
541
+
542
+ hist["eq_inf"].append(eq_inf); hist["ineq_inf"].append(ineq_inf)
543
+ hist["obj"].append(float(f_val)); hist["x"].append(x.copy())
544
+ # parameter & multiplier tracking
545
+ hist["rho_h"].append(float(rho_h)); hist["rho_g"].append(float(rho_g))
546
+ for k_idx, csp in enumerate(full_eqs):
547
+ if csp.kind != "eq": continue
548
+ hist[f"lam_eq_max_idx{k_idx}"].append(float(np.max(lam_eq[k_idx])))
549
+ hist[f"lam_eq_min_idx{k_idx}"].append(float(np.min(lam_eq[k_idx])))
550
+ for k_idx, csp in enumerate(problem_ineqs):
551
+ if csp.kind != "ineq": continue
552
+ hist[f"mu_ineq_max_idx{k_idx}"].append(float(np.max(mu_ineq[k_idx])))
553
+ hist[f"mu_ineq_min_idx{k_idx}"].append(float(np.min(mu_ineq[k_idx])))
554
+
555
+ if verbose:
556
+ print(f"[ALM {it:02d}] f={f_val:.6g} | eq_inf={eq_inf:.2e} | ineq_inf={ineq_inf:.2e} "
557
+ f"| rho_h={rho_h:.2e} | rho_g={rho_g:.2e}")
558
+
559
+ # stopping
560
+ if eq_inf <= cfg.tol_h and ineq_inf <= cfg.tol_g:
561
+ if verbose:
562
+ print(f"[ALM] converged at iter {it}")
563
+ break
564
+
565
+ # EMA smoothing to reduce jitter
566
+ if it == 0:
567
+ eq_inf_smooth = eq_inf
568
+ ineq_inf_smooth = ineq_inf
569
+ else:
570
+ eq_inf_smooth = cfg.ema_alpha * eq_inf + (1 - cfg.ema_alpha) * eq_inf_smooth
571
+ ineq_inf_smooth = cfg.ema_alpha * ineq_inf + (1 - cfg.ema_alpha) * ineq_inf_smooth
572
+
573
+ # -------- Residual-ratio controller --------
574
+ if cfg.adapt and it > 0:
575
+ # Equality group
576
+ if eq_inf_smooth > cfg.tau_up_h * max(prev_eq_inf, eps): # stalled or not shrinking
577
+ rho_h = min(cfg.gamma_up * rho_h, cfg.rho_max)
578
+ elif eq_inf_smooth < cfg.tau_down_h * max(prev_eq_inf, eps): # fast progress, allow relaxation
579
+ rho_h = max(cfg.gamma_down * rho_h, cfg.rho_min)
580
+
581
+ # Inequality group
582
+ if ineq_inf_smooth > cfg.tau_up_g * max(prev_ineq_inf, eps):
583
+ rho_g = min(cfg.gamma_up * rho_g, cfg.rho_max)
584
+ elif ineq_inf_smooth < cfg.tau_down_g * max(prev_ineq_inf, eps):
585
+ rho_g = max(cfg.gamma_down * rho_g, cfg.rho_min)
586
+
587
+ # -------- Stagnation bump (safety net) --------
588
+ if cfg.use_stagnation_bump:
589
+ # Equality stagnation
590
+ if eq_inf <= best_eq * (1 - cfg.stagnation_factor):
591
+ best_eq = eq_inf; no_imp_eq = 0
592
+ else:
593
+ no_imp_eq += 1
594
+ if no_imp_eq >= cfg.patience_h:
595
+ rho_h = min(2.0 * rho_h, cfg.rho_max); no_imp_eq = 0
596
+
597
+ # Inequality stagnation
598
+ if ineq_inf <= best_ineq * (1 - cfg.stagnation_factor):
599
+ best_ineq = ineq_inf; no_imp_ineq = 0
600
+ else:
601
+ no_imp_ineq += 1
602
+ if no_imp_ineq >= cfg.patience_g:
603
+ rho_g = min(2.0 * rho_g, cfg.rho_max); no_imp_ineq = 0
604
+
605
+ # -------- finalize for next iteration --------
606
+ prev_eq_inf = max(eq_inf_smooth, eps)
607
+ prev_ineq_inf = max(ineq_inf_smooth, eps)
608
+
609
+ # ---- decoding back to native levels (only if blocks) ----
610
+ decoded_native: Dict[int, float] = {} # maps original var anchor -> chosen level value
611
+ decoded_lifted: Dict[int, int] = {} # maps lifted start index -> argmax position (optional)
612
+ if has_blocks:
613
+ for blk, lift_idx in zip(registry.blocks, lifted_slices):
614
+ if not lift_idx:
615
+ continue
616
+ sl = np.array(lift_idx, int)
617
+ if len(sl) == 0:
618
+ continue
619
+ sblk = x[sl]
620
+ j = int(np.argmax(sblk)) # which level got selected in the block
621
+ orig_anchor = int(blk.idx[0]) # anchor original var id for this block
622
+ decoded_native[orig_anchor] = float(blk.levels[j])
623
+ decoded_lifted[sl[0]] = j # optional: lifted index -> chosen slot
624
+
625
+ return {
626
+ "x": x,
627
+ "decoded": decoded_native if has_blocks else {},
628
+ "decoded_debug": decoded_lifted if has_blocks else {},
629
+ "hist": hist,
630
+ }