madspace 0.3.1__cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. madspace/__init__.py +1 -0
  2. madspace/_madspace_py.cpython-311-x86_64-linux-gnu.so +0 -0
  3. madspace/_madspace_py.pyi +2189 -0
  4. madspace/_madspace_py_loader.py +111 -0
  5. madspace/include/madspace/constants.h +17 -0
  6. madspace/include/madspace/madcode/function.h +102 -0
  7. madspace/include/madspace/madcode/function_builder_mixin.h +591 -0
  8. madspace/include/madspace/madcode/instruction.h +208 -0
  9. madspace/include/madspace/madcode/opcode_mixin.h +134 -0
  10. madspace/include/madspace/madcode/optimizer.h +31 -0
  11. madspace/include/madspace/madcode/type.h +203 -0
  12. madspace/include/madspace/madcode.h +6 -0
  13. madspace/include/madspace/phasespace/base.h +74 -0
  14. madspace/include/madspace/phasespace/channel_weight_network.h +46 -0
  15. madspace/include/madspace/phasespace/channel_weights.h +51 -0
  16. madspace/include/madspace/phasespace/chili.h +32 -0
  17. madspace/include/madspace/phasespace/cross_section.h +47 -0
  18. madspace/include/madspace/phasespace/cuts.h +34 -0
  19. madspace/include/madspace/phasespace/discrete_flow.h +44 -0
  20. madspace/include/madspace/phasespace/discrete_sampler.h +53 -0
  21. madspace/include/madspace/phasespace/flow.h +53 -0
  22. madspace/include/madspace/phasespace/histograms.h +26 -0
  23. madspace/include/madspace/phasespace/integrand.h +204 -0
  24. madspace/include/madspace/phasespace/invariants.h +26 -0
  25. madspace/include/madspace/phasespace/luminosity.h +41 -0
  26. madspace/include/madspace/phasespace/matrix_element.h +70 -0
  27. madspace/include/madspace/phasespace/mlp.h +37 -0
  28. madspace/include/madspace/phasespace/multichannel.h +49 -0
  29. madspace/include/madspace/phasespace/observable.h +85 -0
  30. madspace/include/madspace/phasespace/pdf.h +78 -0
  31. madspace/include/madspace/phasespace/phasespace.h +67 -0
  32. madspace/include/madspace/phasespace/rambo.h +26 -0
  33. madspace/include/madspace/phasespace/scale.h +52 -0
  34. madspace/include/madspace/phasespace/t_propagator_mapping.h +34 -0
  35. madspace/include/madspace/phasespace/three_particle.h +68 -0
  36. madspace/include/madspace/phasespace/topology.h +116 -0
  37. madspace/include/madspace/phasespace/two_particle.h +63 -0
  38. madspace/include/madspace/phasespace/vegas.h +53 -0
  39. madspace/include/madspace/phasespace.h +27 -0
  40. madspace/include/madspace/runtime/context.h +147 -0
  41. madspace/include/madspace/runtime/discrete_optimizer.h +24 -0
  42. madspace/include/madspace/runtime/event_generator.h +257 -0
  43. madspace/include/madspace/runtime/format.h +68 -0
  44. madspace/include/madspace/runtime/io.h +343 -0
  45. madspace/include/madspace/runtime/lhe_output.h +132 -0
  46. madspace/include/madspace/runtime/logger.h +46 -0
  47. madspace/include/madspace/runtime/runtime_base.h +39 -0
  48. madspace/include/madspace/runtime/tensor.h +603 -0
  49. madspace/include/madspace/runtime/thread_pool.h +101 -0
  50. madspace/include/madspace/runtime/vegas_optimizer.h +26 -0
  51. madspace/include/madspace/runtime.h +12 -0
  52. madspace/include/madspace/umami.h +202 -0
  53. madspace/include/madspace/util.h +142 -0
  54. madspace/lib/libmadspace.so +0 -0
  55. madspace/lib/libmadspace_cpu.so +0 -0
  56. madspace/lib/libmadspace_cpu_avx2.so +0 -0
  57. madspace/lib/libmadspace_cpu_avx512.so +0 -0
  58. madspace/lib/libmadspace_cuda.so +0 -0
  59. madspace/lib/libmadspace_hip.so +0 -0
  60. madspace/madnis/__init__.py +44 -0
  61. madspace/madnis/buffer.py +167 -0
  62. madspace/madnis/channel_grouping.py +85 -0
  63. madspace/madnis/distribution.py +103 -0
  64. madspace/madnis/integrand.py +175 -0
  65. madspace/madnis/integrator.py +973 -0
  66. madspace/madnis/interface.py +191 -0
  67. madspace/madnis/losses.py +186 -0
  68. madspace/torch.py +82 -0
  69. madspace-0.3.1.dist-info/METADATA +71 -0
  70. madspace-0.3.1.dist-info/RECORD +75 -0
  71. madspace-0.3.1.dist-info/WHEEL +6 -0
  72. madspace-0.3.1.dist-info/licenses/LICENSE +21 -0
  73. madspace.libs/libgfortran-83c28eba.so.5.0.0 +0 -0
  74. madspace.libs/libopenblas-r0-11edc3fa.3.15.so +0 -0
  75. madspace.libs/libquadmath-2284e583.so.0.0.0 +0 -0
@@ -0,0 +1,103 @@
1
+ import math
2
+ from collections.abc import Callable
3
+ from typing import Literal, Protocol
4
+
5
+ import numpy as np
6
+ import torch
7
+ import torch.nn as nn
8
+
9
+ L2PI = -0.5 * math.log(2 * math.pi)
10
+
11
+ Mapping = Callable[[torch.Tensor, bool], tuple[torch.Tensor, torch.Tensor]]
12
+
13
+
14
+ class Distribution(Protocol):
15
+ """
16
+ Protocol for a (potentially learnable) distribution that can be used for sampling and
17
+ density estimation, like a normalizing flow.
18
+ """
19
+
20
+ def sample(
21
+ self,
22
+ n: int,
23
+ c: torch.Tensor | None = None,
24
+ channel: torch.Tensor | list[int] | int | None = None,
25
+ return_log_prob: bool = False,
26
+ return_prob: bool = False,
27
+ device: torch.device | None = None,
28
+ dtype: torch.dtype | None = None,
29
+ ) -> torch.Tensor | tuple[torch.Tensor, ...]:
30
+ """
31
+ Draws samples following the distribution
32
+
33
+ Args:
34
+ n: number of samples
35
+ c: condition, shape (n, dims_c) or None for an unconditional flow
36
+ channel: encodes the channel of the samples. It must have one of the following types:
37
+
38
+ - ``Tensor``: integer tensor of shape (n, ), containing the channel index for every
39
+ input sample;
40
+ - ``list``: list of integers, specifying the number of samples in each channel;
41
+ - ``int``: integer specifying a single channel containing all the samples;
42
+ - ``None``: used in the single-channel case or to indicate that all channels contain
43
+ the same number of samples in the multi-channel case.
44
+ return_log_prob: if True, also return the log-probabilities
45
+ return_prob: if True, also return the probabilities
46
+ device: device of the returned tensor. Only required if no condition is given.
47
+ dtype: dtype of the returned tensor. Only required if no condition is given.
48
+ Returns:
49
+ samples with shape (n, dims_in). Depending on the arguments ``return_log_prob``,
50
+ ``return_prob``, this function should also return the log-probabilities with shape (n, ),
51
+ the probabilities with shape (n, ).
52
+ """
53
+ ...
54
+
55
+ def log_prob(
56
+ self,
57
+ x: torch.Tensor,
58
+ c: torch.Tensor | None = None,
59
+ channel: torch.Tensor | list[int] | int | None = None,
60
+ ) -> torch.Tensor:
61
+ """
62
+ Computes the log-probabilities of the input data.
63
+
64
+ Args:
65
+ x: input data, shape (n, dims_in)
66
+ c: condition, shape (n, dims_c) or None for an unconditional flow
67
+ channel: encodes the channel of the samples. It must have one of the following types:
68
+
69
+ - ``Tensor``: integer tensor of shape (n, ), containing the channel index for every
70
+ input sample;
71
+ - ``list``: list of integers, specifying the number of samples in each channel;
72
+ - ``int``: integer specifying a single channel containing all the samples;
73
+ - ``None``: used in the single-channel case or to indicate that all channels contain
74
+ the same number of samples in the multi-channel case.
75
+ Returns:
76
+ log-probabilities with shape (n, )
77
+ """
78
+ return self.prob(x, c, channel).log()
79
+
80
+ def prob(
81
+ self,
82
+ x: torch.Tensor,
83
+ c: torch.Tensor | None = None,
84
+ channel: torch.Tensor | list[int] | int | None = None,
85
+ ) -> torch.Tensor:
86
+ """
87
+ Computes the probabilities of the input data.
88
+
89
+ Args:
90
+ x: input data, shape (n, dims_in)
91
+ c: condition, shape (n, dims_c) or None for an unconditional flow
92
+ channel: encodes the channel of the samples. It must have one of the following types:
93
+
94
+ - ``Tensor``: integer tensor of shape (n, ), containing the channel index for every
95
+ input sample;
96
+ - ``list``: list of integers, specifying the number of samples in each channel;
97
+ - ``int``: integer specifying a single channel containing all the samples;
98
+ - ``None``: used in the single-channel case or to indicate that all channels contain
99
+ the same number of samples in the multi-channel case.
100
+ Returns:
101
+ probabilities with shape (n, )
102
+ """
103
+ return self.log_prob(x, c, channel).exp()
@@ -0,0 +1,175 @@
1
+ from collections.abc import Callable
2
+ from typing import Literal
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from .channel_grouping import ChannelGrouping
8
+
9
+
10
+ class Integrand(nn.Module):
11
+ """
12
+ Class that wraps an integrand function and meta-data necessary to use advanced MadNIS
13
+ features like learnable multi-channel weights, grouped channels and channel weight priors.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ function: Callable,
19
+ input_dim: int,
20
+ bounds: list[list[float]] | None = None,
21
+ channel_count: int | None = None,
22
+ remapped_dim: int | None = None,
23
+ has_channel_weight_prior: bool = False,
24
+ channel_grouping: ChannelGrouping | None = None,
25
+ function_includes_sampling: bool = False,
26
+ update_active_channels_mask: Callable[[torch.Tensor], None] | None = None,
27
+ discrete_dims: list[int] = [],
28
+ discrete_dims_position: Literal["first", "last"] = "first",
29
+ discrete_prior_prob_function: (
30
+ Callable[[torch.Tensor, int], torch.Tensor] | None
31
+ ) = None,
32
+ ):
33
+ """
34
+ Args:
35
+ function: integrand function.
36
+ The signature depends on the other arguments:
37
+
38
+ - single-channel integration, ``channel_count=None``: ``x -> f``
39
+ - basic multi-channel integration, ``remapped_dim=None``,
40
+ ``has_channel_weight_prior=False``: ``(x, c) -> f``
41
+ - with channel weights, ``remapped_dim=None``, ``has_channel_weight_prior=True``:
42
+ ``(x, c) -> (f, alpha)`` (no trainable channel weights possible)
43
+ - with channel-dependent mapping, ``remapped_dim: int``,
44
+ ``has_channel_weight_prior=False``: ``(x, c) -> (f, y)``
45
+ - all features, ``remapped_dim: int``, ``has_channel_weight_prior=True``:
46
+ ``(x, c) -> (f, y, alpha)``
47
+
48
+ with the following tensors:
49
+
50
+ - ``x`` is a point generated by the importance sampling, shape (n, input_dim),
51
+ - ``c`` is the channel index, shape (n, ),
52
+ - ``f`` is the integrand value, shape (n, ),
53
+ - ``y`` is the point after applying a channel-dependent mapping, shape
54
+ (n, remapped_dim)
55
+ - ``alpha`` is the prior channel weight, shape (n, channel_count).
56
+ input_dim: dimension of the integration space
57
+ bounds: List of pairs ``[lower bound, upper bound]`` of the integration interval for
58
+ all dimensions. The integrand is rescaled so that the MadNIS training can be
59
+ performed on the unit hypercube. If None, the unit hypercube is used as integration
60
+ domain.
61
+ channel_count: None in the single-channel case, specifies the number of channels
62
+ otherwise.
63
+ remapped_dim: If different from None, it gives the dimension of a remapped space,
64
+ with a channel-dependent mapping computed as part of the integrand function.
65
+ has_channel_weight_prior: If True, the integrand returns channel weights
66
+ channel_grouping: ChannelGrouping object or None if all channels are independent
67
+ """
68
+ # TODO: update documentation
69
+ super().__init__()
70
+ self.input_dim = input_dim
71
+ self.remapped_dim = input_dim if remapped_dim is None else remapped_dim
72
+ self.channel_count = channel_count
73
+ self.has_channel_weight_prior = has_channel_weight_prior
74
+ self.channel_grouping = channel_grouping
75
+ self.function_includes_sampling = function_includes_sampling
76
+ self.update_active_channels_mask_func = update_active_channels_mask
77
+
78
+ self.discrete_dims = discrete_dims
79
+ self.discrete_dims_position = discrete_dims_position
80
+ self.discrete_prior_prob_function = discrete_prior_prob_function
81
+
82
+ if function_includes_sampling:
83
+ self.function = function
84
+ elif channel_count is None:
85
+ self.function = lambda x, channels: (function(x), None, None)
86
+ elif remapped_dim is None:
87
+ if has_channel_weight_prior:
88
+
89
+ def func(x, channels):
90
+ w, prior = function(x, channels)
91
+ return w, None, prior
92
+
93
+ self.function = func
94
+ else:
95
+ self.function = lambda x, channels: (function(x, channels), None, None)
96
+
97
+ elif has_channel_weight_prior:
98
+ self.function = function
99
+ else:
100
+
101
+ def func(x, channels):
102
+ w, y = function(x, channels)
103
+ return w, y, None
104
+
105
+ self.function = func
106
+
107
+ if bounds is not None:
108
+ bounds = torch.tensor(bounds)
109
+ self.register_buffer("scale", bounds[:, 1] - bounds[:, 0])
110
+ self.register_buffer("offset", bounds[:, 0])
111
+ self.register_buffer("scale_det", self.scale.prod())
112
+ old_func = self.function
113
+
114
+ def rescaled_func(x, channels):
115
+ w, y, prior = old_func(self.scale * x + self.offset, channels)
116
+ return self.scale_det * w, y, prior
117
+
118
+ self.function = rescaled_func
119
+
120
+ self.register_buffer(
121
+ "channel_id_map",
122
+ (
123
+ None
124
+ if self.channel_grouping is None
125
+ else torch.tensor(
126
+ [
127
+ channel.group.group_index
128
+ for channel in self.channel_grouping.channels
129
+ ]
130
+ )
131
+ ),
132
+ )
133
+
134
+ def unique_channel_count(self) -> int:
135
+ """
136
+ Returns the number of channels, or, if some channels are grouped together, the number of
137
+ channel groups
138
+ """
139
+ if self.channel_grouping is None:
140
+ return self.channel_count
141
+ else:
142
+ return len(self.channel_grouping.groups)
143
+
144
+ def remap_channels(self, channels: torch.Tensor | int) -> torch.Tensor | int:
145
+ """
146
+ Remaps channel indices to the indices of their respective channel groups if a
147
+ ``ChannelGrouping`` object was provided, otherwise returns the indices unchanged.
148
+
149
+ Args:
150
+ channels: channel indices, tensor with shape (n, ) or integer
151
+ Returns:
152
+ remapped channel indices, tensor with shape (n, ) or integer
153
+ """
154
+ if self.channel_grouping is None:
155
+ return channels
156
+ elif isinstance(channels, int):
157
+ return self.channel_id_map[channels].item()
158
+ else:
159
+ return self.channel_id_map[channels]
160
+
161
+ def update_active_channels_mask(self, mask: torch.Tensor) -> None:
162
+ if self.update_active_channels_mask_func is None:
163
+ return
164
+
165
+ full_mask = mask[
166
+ self.remap_channels(
167
+ torch.arange(len(self.channel_grouping.channels), device=mask.device)
168
+ )
169
+ ]
170
+ self.update_active_channels_mask_func(full_mask)
171
+
172
+ def forward(
173
+ self, x: torch.Tensor, channels: torch.Tensor | None
174
+ ) -> tuple[torch.Tensor, torch.Tensor | None, torch.Tensor | None]:
175
+ return self.function(x, channels)