featomic-torch 0.6.0__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. featomic/torch/__init__.py +39 -0
  2. featomic/torch/_build_versions.py +5 -0
  3. featomic/torch/_c_lib.py +138 -0
  4. featomic/torch/calculator_base.py +176 -0
  5. featomic/torch/calculators.py +52 -0
  6. featomic/torch/clebsch_gordan.py +89 -0
  7. featomic/torch/system.py +106 -0
  8. featomic/torch/torch-2.1/bin/featomic_torch.dll +0 -0
  9. featomic/torch/torch-2.1/include/featomic/torch/autograd.hpp +58 -0
  10. featomic/torch/torch-2.1/include/featomic/torch/calculator.hpp +134 -0
  11. featomic/torch/torch-2.1/include/featomic/torch/exports.h +43 -0
  12. featomic/torch/torch-2.1/include/featomic/torch/system.hpp +112 -0
  13. featomic/torch/torch-2.1/include/featomic/torch.hpp +10 -0
  14. featomic/torch/torch-2.1/lib/cmake/featomic_torch/featomic_torch-config-version.cmake +85 -0
  15. featomic/torch/torch-2.1/lib/cmake/featomic_torch/featomic_torch-config.cmake +28 -0
  16. featomic/torch/torch-2.1/lib/cmake/featomic_torch/featomic_torch-targets-release.cmake +19 -0
  17. featomic/torch/torch-2.1/lib/cmake/featomic_torch/featomic_torch-targets.cmake +108 -0
  18. featomic/torch/torch-2.1/lib/featomic_torch.lib +0 -0
  19. featomic/torch/torch-2.2/bin/featomic_torch.dll +0 -0
  20. featomic/torch/torch-2.2/include/featomic/torch/autograd.hpp +58 -0
  21. featomic/torch/torch-2.2/include/featomic/torch/calculator.hpp +134 -0
  22. featomic/torch/torch-2.2/include/featomic/torch/exports.h +43 -0
  23. featomic/torch/torch-2.2/include/featomic/torch/system.hpp +112 -0
  24. featomic/torch/torch-2.2/include/featomic/torch.hpp +10 -0
  25. featomic/torch/torch-2.2/lib/cmake/featomic_torch/featomic_torch-config-version.cmake +85 -0
  26. featomic/torch/torch-2.2/lib/cmake/featomic_torch/featomic_torch-config.cmake +28 -0
  27. featomic/torch/torch-2.2/lib/cmake/featomic_torch/featomic_torch-targets-release.cmake +19 -0
  28. featomic/torch/torch-2.2/lib/cmake/featomic_torch/featomic_torch-targets.cmake +108 -0
  29. featomic/torch/torch-2.2/lib/featomic_torch.lib +0 -0
  30. featomic/torch/torch-2.3/bin/featomic_torch.dll +0 -0
  31. featomic/torch/torch-2.3/include/featomic/torch/autograd.hpp +58 -0
  32. featomic/torch/torch-2.3/include/featomic/torch/calculator.hpp +134 -0
  33. featomic/torch/torch-2.3/include/featomic/torch/exports.h +43 -0
  34. featomic/torch/torch-2.3/include/featomic/torch/system.hpp +112 -0
  35. featomic/torch/torch-2.3/include/featomic/torch.hpp +10 -0
  36. featomic/torch/torch-2.3/lib/cmake/featomic_torch/featomic_torch-config-version.cmake +85 -0
  37. featomic/torch/torch-2.3/lib/cmake/featomic_torch/featomic_torch-config.cmake +28 -0
  38. featomic/torch/torch-2.3/lib/cmake/featomic_torch/featomic_torch-targets-release.cmake +19 -0
  39. featomic/torch/torch-2.3/lib/cmake/featomic_torch/featomic_torch-targets.cmake +108 -0
  40. featomic/torch/torch-2.3/lib/featomic_torch.lib +0 -0
  41. featomic/torch/torch-2.4/bin/featomic_torch.dll +0 -0
  42. featomic/torch/torch-2.4/include/featomic/torch/autograd.hpp +58 -0
  43. featomic/torch/torch-2.4/include/featomic/torch/calculator.hpp +134 -0
  44. featomic/torch/torch-2.4/include/featomic/torch/exports.h +43 -0
  45. featomic/torch/torch-2.4/include/featomic/torch/system.hpp +112 -0
  46. featomic/torch/torch-2.4/include/featomic/torch.hpp +10 -0
  47. featomic/torch/torch-2.4/lib/cmake/featomic_torch/featomic_torch-config-version.cmake +85 -0
  48. featomic/torch/torch-2.4/lib/cmake/featomic_torch/featomic_torch-config.cmake +28 -0
  49. featomic/torch/torch-2.4/lib/cmake/featomic_torch/featomic_torch-targets-release.cmake +19 -0
  50. featomic/torch/torch-2.4/lib/cmake/featomic_torch/featomic_torch-targets.cmake +108 -0
  51. featomic/torch/torch-2.4/lib/featomic_torch.lib +0 -0
  52. featomic/torch/torch-2.5/bin/featomic_torch.dll +0 -0
  53. featomic/torch/torch-2.5/include/featomic/torch/autograd.hpp +58 -0
  54. featomic/torch/torch-2.5/include/featomic/torch/calculator.hpp +134 -0
  55. featomic/torch/torch-2.5/include/featomic/torch/exports.h +43 -0
  56. featomic/torch/torch-2.5/include/featomic/torch/system.hpp +112 -0
  57. featomic/torch/torch-2.5/include/featomic/torch.hpp +10 -0
  58. featomic/torch/torch-2.5/lib/cmake/featomic_torch/featomic_torch-config-version.cmake +85 -0
  59. featomic/torch/torch-2.5/lib/cmake/featomic_torch/featomic_torch-config.cmake +28 -0
  60. featomic/torch/torch-2.5/lib/cmake/featomic_torch/featomic_torch-targets-release.cmake +19 -0
  61. featomic/torch/torch-2.5/lib/cmake/featomic_torch/featomic_torch-targets.cmake +108 -0
  62. featomic/torch/torch-2.5/lib/featomic_torch.lib +0 -0
  63. featomic/torch/utils.py +19 -0
  64. featomic_torch-0.6.0.dist-info/AUTHORS +6 -0
  65. featomic_torch-0.6.0.dist-info/LICENSE +28 -0
  66. featomic_torch-0.6.0.dist-info/METADATA +36 -0
  67. featomic_torch-0.6.0.dist-info/RECORD +69 -0
  68. featomic_torch-0.6.0.dist-info/WHEEL +5 -0
  69. featomic_torch-0.6.0.dist-info/top_level.txt +2 -0
@@ -0,0 +1,39 @@
1
+ import importlib.metadata
2
+
3
+
4
+ __version__ = importlib.metadata.version("featomic-torch")
5
+
6
+
7
+ from ._c_lib import _load_library
8
+
9
+
10
+ _load_library()
11
+
12
+ from . import utils # noqa: E402, F401
13
+ from .calculator_base import CalculatorModule, register_autograd # noqa: E402, F401
14
+
15
+ # don't forget to also update `featomic/__init__.py` and
16
+ # `featomic/torch/calculators.py` when modifying this file
17
+ from .calculators import ( # noqa: E402, F401
18
+ AtomicComposition,
19
+ LodeSphericalExpansion,
20
+ NeighborList,
21
+ SoapPowerSpectrum,
22
+ SoapRadialSpectrum,
23
+ SortedDistances,
24
+ SphericalExpansion,
25
+ SphericalExpansionByPair,
26
+ )
27
+ from .system import systems_to_torch # noqa: E402, F401
28
+
29
+
30
+ __all__ = [
31
+ "AtomicComposition",
32
+ "LodeSphericalExpansion",
33
+ "NeighborList",
34
+ "SoapPowerSpectrum",
35
+ "SoapRadialSpectrum",
36
+ "SortedDistances",
37
+ "SphericalExpansion",
38
+ "SphericalExpansionByPair",
39
+ ]
@@ -0,0 +1,5 @@
1
+ # Autogenerated file, do not edit
2
+
3
+
4
+ # version of featomic used when compiling this package
5
+ BUILD_FEATOMIC_VERSION = '0.6.0'
@@ -0,0 +1,138 @@
1
+ import glob
2
+ import os
3
+ import re
4
+ import sys
5
+ from collections import namedtuple
6
+
7
+ import metatensor.torch
8
+ import torch
9
+
10
+ import featomic
11
+
12
+ from ._build_versions import BUILD_FEATOMIC_VERSION
13
+
14
+
15
+ Version = namedtuple("Version", ["major", "minor", "patch"])
16
+
17
+
18
+ def parse_version(version):
19
+ match = re.match(r"(\d+)\.(\d+)\.(\d+).*", version)
20
+ if match:
21
+ return Version(*map(int, match.groups()))
22
+ else:
23
+ raise ValueError("Invalid version string format")
24
+
25
+
26
+ def version_compatible(actual, required):
27
+ actual = parse_version(actual)
28
+ required = parse_version(required)
29
+
30
+ if actual.major != required.major:
31
+ return False
32
+ elif actual.minor != required.minor:
33
+ return False
34
+ else:
35
+ return True
36
+
37
+
38
+ if not version_compatible(featomic.__version__, BUILD_FEATOMIC_VERSION):
39
+ raise ImportError(
40
+ f"Trying to load featomic-torch with featomic v{featomic.__version__}, "
41
+ f"but it was compiled against featomic v{BUILD_FEATOMIC_VERSION}, which "
42
+ "is not ABI compatible"
43
+ )
44
+
45
+ _HERE = os.path.realpath(os.path.dirname(__file__))
46
+
47
+
48
+ def _lib_path():
49
+ torch_version = parse_version(torch.__version__)
50
+ install_prefix = os.path.join(
51
+ _HERE, f"torch-{torch_version.major}.{torch_version.minor}"
52
+ )
53
+
54
+ if os.path.exists(install_prefix):
55
+ if sys.platform.startswith("darwin"):
56
+ path = os.path.join(install_prefix, "lib", "libfeatomic_torch.dylib")
57
+ windows = False
58
+ elif sys.platform.startswith("linux"):
59
+ path = os.path.join(install_prefix, "lib", "libfeatomic_torch.so")
60
+ windows = False
61
+ elif sys.platform.startswith("win"):
62
+ path = os.path.join(install_prefix, "bin", "featomic_torch.dll")
63
+ windows = True
64
+ else:
65
+ raise ImportError("Unknown platform. Please edit this file")
66
+
67
+ if os.path.isfile(path):
68
+ if windows:
69
+ _check_dll(path)
70
+ return path
71
+ else:
72
+ raise ImportError("Could not find featomic_torch shared library at " + path)
73
+
74
+ # gather which torch version(s) the current install was built
75
+ # with to create the error message
76
+ existing_versions = []
77
+ for prefix in glob.glob(os.path.join(_HERE, "torch-*")):
78
+ existing_versions.append(os.path.basename(prefix)[6:])
79
+
80
+ if len(existing_versions) == 1:
81
+ raise ImportError(
82
+ f"Trying to load featomic-torch with torch v{torch.__version__}, "
83
+ f"but it was compiled against torch v{existing_versions[0]}, which "
84
+ "is not ABI compatible"
85
+ )
86
+ else:
87
+ all_versions = ", ".join(map(lambda version: f"v{version}", existing_versions))
88
+ raise ImportError(
89
+ f"Trying to load featomic-torch with torch v{torch.__version__}, "
90
+ f"we found builds for torch {all_versions}; which are not ABI compatible.\n"
91
+ "You can try to re-install from source with "
92
+ "`pip install featomic-torch --no-binary=featomic-torch`"
93
+ )
94
+
95
+
96
+ def _check_dll(path):
97
+ """
98
+ Check if the DLL pointer size matches Python (32-bit or 64-bit)
99
+ """
100
+ import platform
101
+ import struct
102
+
103
+ IMAGE_FILE_MACHINE_I386 = 332
104
+ IMAGE_FILE_MACHINE_AMD64 = 34404
105
+
106
+ machine = None
107
+ with open(path, "rb") as fd:
108
+ header = fd.read(2).decode(encoding="utf-8", errors="strict")
109
+ if header != "MZ":
110
+ raise ImportError(path + " is not a DLL")
111
+ else:
112
+ fd.seek(60)
113
+ header = fd.read(4)
114
+ header_offset = struct.unpack("<L", header)[0]
115
+ fd.seek(header_offset + 4)
116
+ header = fd.read(2)
117
+ machine = struct.unpack("<H", header)[0]
118
+
119
+ arch = platform.architecture()[0]
120
+ if arch == "32bit":
121
+ if machine != IMAGE_FILE_MACHINE_I386:
122
+ raise ImportError("Python is 32-bit, but this DLL is not")
123
+ elif arch == "64bit":
124
+ if machine != IMAGE_FILE_MACHINE_AMD64:
125
+ raise ImportError("Python is 64-bit, but this DLL is not")
126
+ else:
127
+ raise ImportError("Could not determine pointer size of Python")
128
+
129
+
130
+ def _load_library():
131
+ # Load featomic & metatensor-torch shared library in the process first, to ensure
132
+ # the featomic_torch shared library can find them
133
+ metatensor.torch._c_lib._load_library()
134
+
135
+ featomic._c_lib._get_library()
136
+
137
+ # load the C++ operators and custom classes
138
+ torch.ops.load_library(_lib_path())
@@ -0,0 +1,176 @@
1
+ from typing import List, Optional, Union
2
+
3
+ import torch
4
+ from metatensor.torch import Labels, TensorMap
5
+ from metatensor.torch.atomistic import NeighborListOptions
6
+
7
+ from .system import System
8
+
9
+
10
+ CalculatorHolder = torch.classes.featomic.CalculatorHolder
11
+
12
+
13
+ def register_autograd(
14
+ systems: Union[List[System], System],
15
+ precomputed: TensorMap,
16
+ forward_gradients: Optional[List[str]] = None,
17
+ ) -> TensorMap:
18
+ """
19
+ Register autograd nodes between ``system.positions`` and ``system.cell`` for each of
20
+ the systems and the values in the ``precomputed``
21
+ :py:class:`metatensor.torch.TensorMap`.
22
+
23
+ This is an advanced function must users should not need to use.
24
+
25
+ The autograd nodes ``backward`` function will use the gradients already stored in
26
+ ``precomputed``, meaning that if any of the system's positions ``requires_grad``,
27
+ ``precomputed`` must contain ``"positions"`` gradients. Similarly, if any of the
28
+ system's cell ``requires_grad``, ``precomputed`` must contain ``"cell"`` gradients.
29
+
30
+ :param systems: list of system used to compute ``precomputed``
31
+ :param precomputed: precomputed :py:class:`metatensor.torch.TensorMap`
32
+ :param forward_gradients: which gradients to keep in the output, defaults to None
33
+ """
34
+ if forward_gradients is None:
35
+ forward_gradients = []
36
+
37
+ if not isinstance(systems, list):
38
+ systems = [systems]
39
+
40
+ return torch.ops.featomic.register_autograd(systems, precomputed, forward_gradients)
41
+
42
+
43
+ class CalculatorModule(torch.nn.Module):
44
+ """
45
+ This is the base class for calculators in featomic-torch, providing the
46
+ :py:meth:`CalculatorModule.compute` function and integration with
47
+ :py:class:`torch.nn.Module`.
48
+
49
+ One can initialize a py:class:`CalculatorModule` in two ways: either directly with
50
+ the registered name and JSON parameter string (which are documented in the
51
+ :ref:`userdoc-calculators`); or through one of the child class documented below.
52
+
53
+ :param name: name used to register this calculator
54
+ :param parameters: JSON parameter string for the calculator
55
+ """
56
+
57
+ def __init__(self, name: str, parameters: str):
58
+ """"""
59
+ # empty docstring here for the docs to render corectly
60
+ super().__init__()
61
+ self._c_name = name
62
+ self._c = CalculatorHolder(name=name, parameters=parameters)
63
+
64
+ @property
65
+ def name(self) -> str:
66
+ """name of this calculator"""
67
+ return self._c.name
68
+
69
+ @property
70
+ def c_name(self) -> str:
71
+ """name used to register & create this calculator"""
72
+ return self._c_name
73
+
74
+ @property
75
+ def parameters(self) -> str:
76
+ """parameters (formatted as JSON) used to create this calculator"""
77
+ return self._c.parameters
78
+
79
+ @property
80
+ def cutoffs(self) -> List[float]:
81
+ """all the radial cutoffs used by this calculator's neighbors lists"""
82
+ return self._c.cutoffs
83
+
84
+ def requested_neighbor_lists(self) -> List[NeighborListOptions]:
85
+ options = []
86
+ for cutoff in self.cutoffs:
87
+ options.append(
88
+ NeighborListOptions(
89
+ cutoff=cutoff,
90
+ full_list=False,
91
+ # we will re-filter the NL when converting to featomic internal
92
+ # type, so we don't need the engine to pre-filter it for us
93
+ strict=False,
94
+ requestor="featomic",
95
+ )
96
+ )
97
+ return options
98
+
99
+ def compute(
100
+ self,
101
+ systems: Union[System, List[System]],
102
+ gradients: Optional[List[str]] = None,
103
+ use_native_system: bool = True,
104
+ selected_samples: Optional[Union[Labels, TensorMap]] = None,
105
+ selected_properties: Optional[Union[Labels, TensorMap]] = None,
106
+ selected_keys: Optional[Labels] = None,
107
+ ) -> TensorMap:
108
+ """Runs a calculation with this calculator on the given ``systems``.
109
+
110
+ .. seealso::
111
+
112
+ :py:func:`featomic.calculators.CalculatorBase.compute` for more information
113
+ on the different parameters of this function.
114
+
115
+ :param systems: single system or list of systems on which to run the
116
+ calculation. If any of the systems' ``positions`` or ``cell`` has
117
+ ``requires_grad`` set to ``True``, then the corresponding gradients are
118
+ computed and registered as a custom node in the computational graph, to
119
+ allow backward propagation of the gradients later.
120
+
121
+ :param gradients: List of forward gradients to keep in the output. If this is
122
+ ``None`` or an empty list ``[]``, no gradients are kept in the output. Some
123
+ gradients might still be computed at runtime to allow for backward
124
+ propagation.
125
+
126
+ :param use_native_system: This can only be ``True``, and is here for
127
+ compatibility with the same parameter on
128
+ :py:meth:`featomic.calculators.CalculatorBase.compute`.
129
+
130
+ :param selected_samples: Set of samples on which to run the calculation, with
131
+ the same meaning as in
132
+ :py:func:`featomic.calculators.CalculatorBase.compute`.
133
+
134
+ :param selected_properties: Set of properties to compute, with the same meaning
135
+ as in :py:func:`featomic.calculators.CalculatorBase.compute`.
136
+
137
+ :param selected_keys: Selection for the keys to include in the output, with the
138
+ same meaning as in :py:func:`featomic.calculators.CalculatorBase.compute`.
139
+ """
140
+ if gradients is None:
141
+ gradients = []
142
+
143
+ if not isinstance(systems, list):
144
+ systems = [systems]
145
+
146
+ # We have this parameter to have the same API as featomic.
147
+ if not use_native_system:
148
+ raise ValueError("only `use_native_system=True` is supported")
149
+
150
+ options = torch.classes.featomic.CalculatorOptions()
151
+ options.gradients = gradients
152
+ options.selected_samples = selected_samples
153
+ options.selected_properties = selected_properties
154
+ options.selected_keys = selected_keys
155
+
156
+ return self._c.compute(systems=systems, options=options)
157
+
158
+ def forward(
159
+ self,
160
+ systems: List[System],
161
+ gradients: Optional[List[str]] = None,
162
+ use_native_system: bool = True,
163
+ selected_samples: Optional[Union[Labels, TensorMap]] = None,
164
+ selected_properties: Optional[Union[Labels, TensorMap]] = None,
165
+ selected_keys: Optional[Labels] = None,
166
+ ) -> TensorMap:
167
+ """forward just calls :py:meth:`CalculatorModule.compute`"""
168
+
169
+ return self.compute(
170
+ systems=systems,
171
+ gradients=gradients,
172
+ use_native_system=use_native_system,
173
+ selected_samples=selected_samples,
174
+ selected_properties=selected_properties,
175
+ selected_keys=selected_keys,
176
+ )
@@ -0,0 +1,52 @@
1
+ import importlib
2
+ import sys
3
+
4
+ import featomic.calculators
5
+
6
+ from .calculator_base import CalculatorModule
7
+
8
+
9
+ # CAREFUL ADVENTURER, HERE BE DRAGONS!
10
+ #
11
+ # \||/
12
+ # | @___oo
13
+ # /\ /\ / (__,,,,|
14
+ # ) /^\) ^\/ _)
15
+ # ) /^\/ _)
16
+ # ) _ / / _)
17
+ # /\ )/\/ || | )_)
18
+ # < > |(,,) )__)
19
+ # || / \)___)\
20
+ # | \____( )___) )___
21
+ # \______(_______;;; __;;;
22
+ #
23
+ #
24
+ # This module tries to re-use code from `featomic.calculators`, which contains a more
25
+ # user-friendly interface to the different calculator. At the C-API level calculators
26
+ # are just defined by a name & JSON parameter string. `featomic.calculators` defines
27
+ # one class for each name and set the `__init__` parameters with the top-level keys of
28
+ # the JSON parameters.
29
+ #
30
+ # To achieve this, we import the module in a special mode with `importlib`, defining a
31
+ # global variable `CalculatorBase` which is pointing to `CalculatorModule`. Then,
32
+ # `featomic.calculators` checks if `CalculatorBase` is defined and otherwise imports it
33
+ # from `featomic.calculator_base`.
34
+ #
35
+ # This means the same code is used to define two versions of each class: one will be
36
+ # used in `featomic` and have a base class of `featomic.CalculatorBase`, and one in
37
+ # `featomic.torch` with base classes `featomic.torch.CalculatorModule` and
38
+ # `torch.nn.Module`.
39
+
40
+
41
+ spec = importlib.util.spec_from_file_location(
42
+ # create a module with this name
43
+ "featomic.torch.calculators",
44
+ # using the code from there
45
+ featomic.calculators.__file__,
46
+ )
47
+ module = importlib.util.module_from_spec(spec)
48
+ sys.modules[spec.name] = module
49
+
50
+ module.__dict__["CalculatorBase"] = CalculatorModule
51
+
52
+ spec.loader.exec_module(module)
@@ -0,0 +1,89 @@
1
+ import importlib
2
+ import os
3
+ import sys
4
+ from typing import Any
5
+
6
+ import metatensor.torch
7
+ import torch
8
+
9
+ import featomic.utils
10
+
11
+ from .calculator_base import CalculatorModule
12
+ from .system import System
13
+
14
+
15
+ # For details what is happening here take a look an `featomic.torch.calculators`.
16
+
17
+ # create the `_backend` module as an empty module
18
+ spec = importlib.util.spec_from_loader(
19
+ "featomic.torch.clebsch_gordan._backend",
20
+ loader=None,
21
+ )
22
+ module = importlib.util.module_from_spec(spec)
23
+ # This module only exposes a handful of things, defined here. Any changes here MUST also
24
+ # be made to the `featomic/clebsch_gordan/_backend.py` file, which is used in
25
+ # non-TorchScript mode.
26
+ module.__dict__["BACKEND_IS_METATENSOR_TORCH"] = True
27
+
28
+ module.__dict__["Labels"] = metatensor.torch.Labels
29
+ module.__dict__["TensorBlock"] = metatensor.torch.TensorBlock
30
+ module.__dict__["TensorMap"] = metatensor.torch.TensorMap
31
+ module.__dict__["LabelsEntry"] = metatensor.torch.LabelsEntry
32
+
33
+ module.__dict__["CalculatorBase"] = CalculatorModule
34
+ module.__dict__["IntoSystem"] = System
35
+
36
+ module.__dict__["TorchTensor"] = torch.Tensor
37
+ module.__dict__["TorchModule"] = torch.nn.Module
38
+ module.__dict__["TorchScriptClass"] = torch.ScriptClass
39
+ module.__dict__["Array"] = torch.Tensor
40
+ module.__dict__["DType"] = torch.dtype
41
+ module.__dict__["Device"] = torch.device
42
+
43
+ module.__dict__["torch_jit_is_scripting"] = torch.jit.is_scripting
44
+ module.__dict__["torch_jit_export"] = torch.jit.export
45
+
46
+ if os.environ.get("METATENSOR_IMPORT_FOR_SPHINX", "0") == "0":
47
+ module.__dict__["operations"] = metatensor.torch.operations
48
+ else:
49
+ # FIXME: we can remove this hack once metatensor-operations v0.2.4 is released
50
+ module.__dict__["operations"] = None
51
+
52
+
53
+ def is_labels(obj: Any):
54
+ return isinstance(obj, metatensor.torch.Labels)
55
+
56
+
57
+ if os.environ.get("FEATOMIC_IMPORT_FOR_SPHINX") is None:
58
+ is_labels = torch.jit.script(is_labels)
59
+
60
+ module.__dict__["is_labels"] = is_labels
61
+
62
+
63
+ def check_isinstance(obj, ty):
64
+ if isinstance(ty, torch.ScriptClass):
65
+ # This branch is taken when `ty` is a custom class (TensorMap, …). since `ty` is
66
+ # an instance of `torch.ScriptClass` and not a class itself, there is no way to
67
+ # check if obj is an "instance" of this class, so we always return True and hope
68
+ # for the best. Most errors should be caught by the TorchScript compiler anyway.
69
+ return True
70
+ else:
71
+ assert isinstance(ty, type)
72
+ return isinstance(obj, ty)
73
+
74
+
75
+ # register the module in sys.modules, so future import find it directly
76
+ sys.modules[spec.name] = module
77
+
78
+ # create a module named `featomic.torch.clebsch_gordan` using code from
79
+ # `featomic.clebsch_gordan`
80
+ spec = importlib.util.spec_from_file_location(
81
+ "featomic.torch.clebsch_gordan", featomic.clebsch_gordan.__file__
82
+ )
83
+
84
+ module = importlib.util.module_from_spec(spec)
85
+
86
+ # override `featomic.torch.clebsch_gordan` (the module associated with the current file)
87
+ # with the newly created module
88
+ sys.modules[spec.name] = module
89
+ spec.loader.exec_module(module)
@@ -0,0 +1,106 @@
1
+ from typing import List, Optional, Sequence, overload
2
+
3
+ import numpy as np
4
+ import torch
5
+ from metatensor.torch.atomistic import System
6
+
7
+ import featomic
8
+
9
+
10
+ @overload
11
+ def systems_to_torch(
12
+ systems: featomic.systems.IntoSystem,
13
+ positions_requires_grad: Optional[bool] = None,
14
+ cell_requires_grad: Optional[bool] = None,
15
+ ) -> System:
16
+ pass
17
+
18
+
19
+ @overload
20
+ def systems_to_torch(
21
+ systems: Sequence[featomic.systems.IntoSystem],
22
+ positions_requires_grad: Optional[bool] = None,
23
+ cell_requires_grad: Optional[bool] = None,
24
+ ) -> List[System]:
25
+ pass
26
+
27
+
28
+ def systems_to_torch(
29
+ systems,
30
+ positions_requires_grad=None,
31
+ cell_requires_grad=None,
32
+ ) -> List[System]:
33
+ """
34
+ Convert a arbitrary system to metatensor's atomistic
35
+ :py:class:`metatensor.torch.atomistic.System`, putting all the data in
36
+ :py:class:`torch.Tensor` and making the overall object compatible with TorchScript.
37
+
38
+ :param system: any system supported by featomic. If this is an iterable of system,
39
+ this function converts them all and returns a list of converted systems.
40
+
41
+ :param positions_requires_grad: The value of ``requires_grad`` on the output
42
+ ``positions``. If ``None`` and the positions of the input is already a
43
+ :py:class:`torch.Tensor`, ``requires_grad`` is kept the same. Otherwise it is
44
+ initialized to ``False``.
45
+
46
+ :param cell_requires_grad: The value of ``requires_grad`` on the output ``cell``. If
47
+ ``None`` and the positions of the input is already a :py:class:`torch.Tensor`,
48
+ ``requires_grad`` is kept the same. Otherwise it is initialized to ``False``.
49
+ """
50
+
51
+ try:
52
+ return _system_to_torch(systems, positions_requires_grad, cell_requires_grad)
53
+ except TypeError:
54
+ # try iterating over the systems
55
+ return [
56
+ _system_to_torch(system, positions_requires_grad, cell_requires_grad)
57
+ for system in systems
58
+ ]
59
+
60
+
61
+ def _system_to_torch(system, positions_requires_grad, cell_requires_grad):
62
+ if not _is_torch_system(system):
63
+ system = featomic.systems.wrap_system(system)
64
+ system = System(
65
+ types=torch.tensor(system.types()),
66
+ positions=torch.tensor(system.positions()),
67
+ cell=torch.tensor(system.cell()),
68
+ pbc=(
69
+ torch.tensor([False, False, False])
70
+ if np.all(system.cell() == 0.0)
71
+ else torch.tensor([True, True, True])
72
+ ),
73
+ )
74
+
75
+ if positions_requires_grad is not None:
76
+ system.positions.requires_grad_(positions_requires_grad)
77
+
78
+ if cell_requires_grad is not None:
79
+ system.cell.requires_grad_(cell_requires_grad)
80
+
81
+ return system
82
+
83
+
84
+ def _is_torch_system(system):
85
+ if not isinstance(system, torch.ScriptObject):
86
+ return False
87
+
88
+ torch_version_tuple = tuple(map(int, torch.__version__.split(".")[:2]))
89
+ if torch_version_tuple >= (2, 1):
90
+ return system._type().name() == "System"
91
+
92
+ # For older torch version, we check that we have the right properties
93
+ properties = system._properties()
94
+ if len(properties) != 3:
95
+ return False
96
+
97
+ if properties[0].name != "species":
98
+ return False
99
+
100
+ if properties[1].name != "positions":
101
+ return False
102
+
103
+ if properties[2].name != "cell":
104
+ return False
105
+
106
+ return True
@@ -0,0 +1,58 @@
1
+ // IWYU pragma: private; include "featomic/torch.hpp"
2
+
3
+ #ifndef FEATOMIC_TORCH_AUTOGRAD_HPP
4
+ #define FEATOMIC_TORCH_AUTOGRAD_HPP
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <torch/autograd.h>
8
+
9
+ #include <metatensor/torch.hpp>
10
+
11
+ #include "featomic/torch/exports.h"
12
+
13
+ namespace featomic_torch {
14
+
15
+ /// Custom torch::autograd::Function integrating featomic with torch autograd.
16
+ ///
17
+ /// This is a bit more complex than your typical autograd because there is some
18
+ /// impedance mismatch between featomic and torch. Most of it should be taken
19
+ /// care of by the `compute` function below.
20
+ class FEATOMIC_TORCH_EXPORT FeatomicAutograd: public torch::autograd::Function<FeatomicAutograd> {
21
+ public:
22
+ /// Register a pseudo node in Torch's computational graph going from
23
+ /// `all_positions` and `all_cell` to the values in `block`; using the
24
+ /// pre-computed gradients in `block`.
25
+ ///
26
+ /// If `all_positions.requires_grad` is True, `block` must have a
27
+ /// `"positions"` gradient; and `systems_start` should contain the index of
28
+ /// the first atom of each system in `all_positions`.
29
+ ///
30
+ /// If `all_cells.requires_grad` is True, `block` must have a `"cell"`
31
+ /// gradient, and the block samples must contain a `"stucture"` dimension.
32
+ ///
33
+ /// This function returns a vector with one element corresponding to
34
+ /// `block.values`, which should be left unused. It is only there to make
35
+ /// sure torch registers a `grad_fn` for the tensors stored inside the
36
+ /// TensorBlock (the values in the TensorBlock are references to the ones
37
+ /// returned by this function, so when a `grad_fn` is added to one, it is
38
+ /// also added to the other).
39
+ static std::vector<torch::Tensor> forward(
40
+ torch::autograd::AutogradContext *ctx,
41
+ torch::Tensor all_positions,
42
+ torch::Tensor all_cells,
43
+ torch::IValue systems_start,
44
+ metatensor_torch::TorchTensorBlock block
45
+ );
46
+
47
+ /// Backward step: get the gradients of some quantity `A` w.r.t. the outputs
48
+ /// of `forward`; and compute the gradients of the same quantity `A` w.r.t.
49
+ /// the inputs of `forward` (i.e. cell and positions).
50
+ static std::vector<torch::Tensor> backward(
51
+ torch::autograd::AutogradContext *ctx,
52
+ std::vector<torch::Tensor> grad_outputs
53
+ );
54
+ };
55
+
56
+ }
57
+
58
+ #endif