qmp-kit 0.0.54__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qmp/__init__.py +7 -0
- qmp/__main__.py +55 -0
- qmp/_hamiltonian.cpp +94 -0
- qmp/_hamiltonian_cpu.cpp +566 -0
- qmp/_hamiltonian_cuda.cu +1018 -0
- qmp/_version.py +34 -0
- qmp/algorithms/chop_imag.py +99 -0
- qmp/algorithms/guide.py +215 -0
- qmp/algorithms/haar.py +526 -0
- qmp/algorithms/pert.py +66 -0
- qmp/algorithms/precompile.py +41 -0
- qmp/algorithms/rldiag.py +223 -0
- qmp/algorithms/vmc.py +141 -0
- qmp/bitspack.py +94 -0
- qmp/common.py +184 -0
- qmp/hamiltonian.py +206 -0
- qmp/losses.py +234 -0
- qmp/model_dict.py +222 -0
- qmp/models/__init__.py +3 -0
- qmp/models/fcidump.py +377 -0
- qmp/models/free_fermion.py +116 -0
- qmp/models/hubbard.py +235 -0
- qmp/models/ising.py +366 -0
- qmp/models/openfermion.py +271 -0
- qmp/networks/__init__.py +3 -0
- qmp/networks/attention.py +884 -0
- qmp/networks/crossmlp.py +163 -0
- qmp/networks/mlp.py +510 -0
- qmp/networks/peps.py +126 -0
- qmp/optimizer.py +99 -0
- qmp/random_engine.py +31 -0
- qmp/subcommand_dict.py +27 -0
- qmp/version.py +11 -0
- qmp_kit-0.0.54.dist-info/METADATA +156 -0
- qmp_kit-0.0.54.dist-info/RECORD +39 -0
- qmp_kit-0.0.54.dist-info/WHEEL +5 -0
- qmp_kit-0.0.54.dist-info/entry_points.txt +2 -0
- qmp_kit-0.0.54.dist-info/licenses/LICENSE.md +675 -0
- qmp_kit-0.0.54.dist-info/top_level.txt +1 -0
qmp/__init__.py
ADDED
qmp/__main__.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This is the main entry point for the command line application.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import pathlib
|
|
6
|
+
import hydra
|
|
7
|
+
import omegaconf
|
|
8
|
+
from .subcommand_dict import subcommand_dict
|
|
9
|
+
from .common import CommonConfig
|
|
10
|
+
from .model_dict import model_dict
|
|
11
|
+
from .models import openfermion as _ # type: ignore[no-redef]
|
|
12
|
+
from .models import fcidump as _ # type: ignore[no-redef]
|
|
13
|
+
from .models import hubbard as _ # type: ignore[no-redef]
|
|
14
|
+
from .models import free_fermion as _ # type: ignore[no-redef]
|
|
15
|
+
from .models import ising as _ # type: ignore[no-redef]
|
|
16
|
+
from .algorithms import guide as _ # type: ignore[no-redef]
|
|
17
|
+
from .algorithms import vmc as _ # type: ignore[no-redef]
|
|
18
|
+
from .algorithms import haar as _ # type: ignore[no-redef]
|
|
19
|
+
from .algorithms import rldiag as _ # type: ignore[no-redef]
|
|
20
|
+
from .algorithms import precompile as _ # type: ignore[no-redef]
|
|
21
|
+
from .algorithms import chop_imag as _ # type: ignore[no-redef]
|
|
22
|
+
from .algorithms import pert as _ # type: ignore[no-redef]
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@hydra.main(version_base=None, config_path=str(pathlib.Path().resolve()), config_name="config")
|
|
26
|
+
def main(config: omegaconf.DictConfig) -> None:
|
|
27
|
+
"""
|
|
28
|
+
The main function for the command line application.
|
|
29
|
+
"""
|
|
30
|
+
action = subcommand_dict[config.action.name]
|
|
31
|
+
common = CommonConfig(
|
|
32
|
+
log_path=pathlib.Path(hydra.core.hydra_config.HydraConfig.get().runtime.output_dir),
|
|
33
|
+
model_name=config.model.name,
|
|
34
|
+
network_name=config.network.name,
|
|
35
|
+
**config.common,
|
|
36
|
+
)
|
|
37
|
+
run = action(
|
|
38
|
+
common=common,
|
|
39
|
+
**config.action.params,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
model_t = model_dict[config.model.name]
|
|
43
|
+
model_config_t = model_t.config_t
|
|
44
|
+
model_param = model_config_t(**config.model.params)
|
|
45
|
+
network_config_t = model_t.network_dict[config.network.name]
|
|
46
|
+
network_param = network_config_t(**config.network.params)
|
|
47
|
+
|
|
48
|
+
if config.action.name == "guide":
|
|
49
|
+
run.main(model_param=model_param, network_param=network_param, config=config) # type: ignore[call-arg]
|
|
50
|
+
else:
|
|
51
|
+
run.main(model_param=model_param, network_param=network_param) # type: ignore[call-arg]
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
if __name__ == "__main__":
|
|
55
|
+
main() # pylint: disable=no-value-for-parameter
|
qmp/_hamiltonian.cpp
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
#include <pybind11/complex.h>
|
|
2
|
+
#include <torch/extension.h>
|
|
3
|
+
|
|
4
|
+
namespace qmb_hamiltonian {
|
|
5
|
+
|
|
6
|
+
// The `prepare` function is responsible for parsing a raw Python dictionary representing Hamiltonian terms
|
|
7
|
+
// and transforming it into a structured tuple of tensors. This tuple is then stored on the Python side
|
|
8
|
+
// and utilized in subsequent calls to the PyTorch operators for further processing.
|
|
9
|
+
//
|
|
10
|
+
// The function takes a Python dictionary `hamiltonian` as input, where each key-value pair represents a term
|
|
11
|
+
// in the Hamiltonian. The key is a tuple of tuples, where each inner tuple contains two elements:
|
|
12
|
+
// - The first element is an integer representing the site index of the operator.
|
|
13
|
+
// - The second element is an integer representing the type of operator (0 for annihilation, 1 for creation).
|
|
14
|
+
// The value is either a float or a complex number representing the coefficient of the term.
|
|
15
|
+
//
|
|
16
|
+
// The function processes the dictionary and constructs three tensors:
|
|
17
|
+
// - `site`: An int16 tensor of shape [term_number, max_op_number], representing the site indices of the operators for
|
|
18
|
+
// each term.
|
|
19
|
+
// - `kind`: An uint8 tensor of shape [term_number, max_op_number], representing the type of operator for each term.
|
|
20
|
+
// The value are encoded as follows:
|
|
21
|
+
// - 0: Annihilation operator
|
|
22
|
+
// - 1: Creation operator
|
|
23
|
+
// - 2: Empty (identity operator)
|
|
24
|
+
// - `coef`: A float64 tensor of shape [term_number, 2], representing the coefficients of each term, with two elements
|
|
25
|
+
// for real and imaginary parts.
|
|
26
|
+
//
|
|
27
|
+
// The `max_op_number` template argument specifies the maximum number of operators per term, typically set to 4 for
|
|
28
|
+
// 2-body interactions.
|
|
29
|
+
template<std::int64_t max_op_number>
|
|
30
|
+
auto prepare(py::dict hamiltonian) {
|
|
31
|
+
std::int64_t term_number = hamiltonian.size();
|
|
32
|
+
|
|
33
|
+
auto site = torch::empty({term_number, max_op_number}, torch::TensorOptions().dtype(torch::kInt16).device(torch::kCPU));
|
|
34
|
+
// No need to initialize
|
|
35
|
+
auto kind = torch::full({term_number, max_op_number}, 2, torch::TensorOptions().dtype(torch::kUInt8).device(torch::kCPU));
|
|
36
|
+
// Initialize to 2 for identity as default
|
|
37
|
+
auto coef = torch::empty({term_number, 2}, torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCPU));
|
|
38
|
+
// No need to initialize
|
|
39
|
+
|
|
40
|
+
auto site_accessor = site.accessor<std::int16_t, 2>();
|
|
41
|
+
auto kind_accessor = kind.accessor<std::uint8_t, 2>();
|
|
42
|
+
auto coef_accessor = coef.accessor<double, 2>();
|
|
43
|
+
|
|
44
|
+
std::int64_t index = 0;
|
|
45
|
+
for (auto& item : hamiltonian) {
|
|
46
|
+
auto key = item.first.cast<py::tuple>();
|
|
47
|
+
auto value_is_float = py::isinstance<py::float_>(item.second);
|
|
48
|
+
auto value = value_is_float ? std::complex<double>(item.second.cast<double>()) : item.second.cast<std::complex<double>>();
|
|
49
|
+
|
|
50
|
+
std::int64_t op_number = key.size();
|
|
51
|
+
for (std::int64_t i = 0; i < op_number; ++i) {
|
|
52
|
+
auto tuple = key[i].cast<py::tuple>();
|
|
53
|
+
site_accessor[index][i] = tuple[0].cast<std::int16_t>();
|
|
54
|
+
kind_accessor[index][i] = tuple[1].cast<std::uint8_t>();
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
coef_accessor[index][0] = value.real();
|
|
58
|
+
coef_accessor[index][1] = value.imag();
|
|
59
|
+
|
|
60
|
+
++index;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
return std::make_tuple(site, kind, coef);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
#ifndef N_QUBYTES
|
|
67
|
+
#define N_QUBYTES 0
|
|
68
|
+
#endif
|
|
69
|
+
#ifndef PARTICLE_CUT
|
|
70
|
+
#define PARTICLE_CUT 0
|
|
71
|
+
#endif
|
|
72
|
+
|
|
73
|
+
#if N_QUBYTES == 0
|
|
74
|
+
// Expose the `prepare` function to Python.
|
|
75
|
+
PYBIND11_MODULE(qmb_hamiltonian, m) {
|
|
76
|
+
m.def("prepare", prepare</*max_op_number=*/4>, py::arg("hamiltonian"));
|
|
77
|
+
}
|
|
78
|
+
#endif
|
|
79
|
+
|
|
80
|
+
#if N_QUBYTES != 0
|
|
81
|
+
#define QMB_LIBRARY_HELPER(x, y) qmb_hamiltonian_##x##_##y
|
|
82
|
+
#define QMB_LIBRARY(x, y) QMB_LIBRARY_HELPER(x, y)
|
|
83
|
+
TORCH_LIBRARY_FRAGMENT(QMB_LIBRARY(N_QUBYTES, PARTICLE_CUT), m) {
|
|
84
|
+
m.def("apply_within(Tensor configs_i, Tensor psi_i, Tensor configs_j, Tensor site, Tensor kind, Tensor coef) -> Tensor");
|
|
85
|
+
m.def("find_relative(Tensor configs_i, Tensor psi_i, int count_selected, Tensor site, Tensor kind, Tensor coef, Tensor configs_exclude) -> Tensor"
|
|
86
|
+
);
|
|
87
|
+
m.def("diagonal_term(Tensor configs, Tensor site, Tensor kind, Tensor coef) -> Tensor");
|
|
88
|
+
m.def("single_relative(Tensor configs, Tensor site, Tensor kind, Tensor coef) -> Tensor");
|
|
89
|
+
}
|
|
90
|
+
#undef QMB_LIBRARY
|
|
91
|
+
#undef QMB_LIBRARY_HELPER
|
|
92
|
+
#endif
|
|
93
|
+
|
|
94
|
+
} // namespace qmb_hamiltonian
|