madspace 0.3.1__cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- madspace/__init__.py +1 -0
- madspace/_madspace_py.cpython-311-x86_64-linux-gnu.so +0 -0
- madspace/_madspace_py.pyi +2189 -0
- madspace/_madspace_py_loader.py +111 -0
- madspace/include/madspace/constants.h +17 -0
- madspace/include/madspace/madcode/function.h +102 -0
- madspace/include/madspace/madcode/function_builder_mixin.h +591 -0
- madspace/include/madspace/madcode/instruction.h +208 -0
- madspace/include/madspace/madcode/opcode_mixin.h +134 -0
- madspace/include/madspace/madcode/optimizer.h +31 -0
- madspace/include/madspace/madcode/type.h +203 -0
- madspace/include/madspace/madcode.h +6 -0
- madspace/include/madspace/phasespace/base.h +74 -0
- madspace/include/madspace/phasespace/channel_weight_network.h +46 -0
- madspace/include/madspace/phasespace/channel_weights.h +51 -0
- madspace/include/madspace/phasespace/chili.h +32 -0
- madspace/include/madspace/phasespace/cross_section.h +47 -0
- madspace/include/madspace/phasespace/cuts.h +34 -0
- madspace/include/madspace/phasespace/discrete_flow.h +44 -0
- madspace/include/madspace/phasespace/discrete_sampler.h +53 -0
- madspace/include/madspace/phasespace/flow.h +53 -0
- madspace/include/madspace/phasespace/histograms.h +26 -0
- madspace/include/madspace/phasespace/integrand.h +204 -0
- madspace/include/madspace/phasespace/invariants.h +26 -0
- madspace/include/madspace/phasespace/luminosity.h +41 -0
- madspace/include/madspace/phasespace/matrix_element.h +70 -0
- madspace/include/madspace/phasespace/mlp.h +37 -0
- madspace/include/madspace/phasespace/multichannel.h +49 -0
- madspace/include/madspace/phasespace/observable.h +85 -0
- madspace/include/madspace/phasespace/pdf.h +78 -0
- madspace/include/madspace/phasespace/phasespace.h +67 -0
- madspace/include/madspace/phasespace/rambo.h +26 -0
- madspace/include/madspace/phasespace/scale.h +52 -0
- madspace/include/madspace/phasespace/t_propagator_mapping.h +34 -0
- madspace/include/madspace/phasespace/three_particle.h +68 -0
- madspace/include/madspace/phasespace/topology.h +116 -0
- madspace/include/madspace/phasespace/two_particle.h +63 -0
- madspace/include/madspace/phasespace/vegas.h +53 -0
- madspace/include/madspace/phasespace.h +27 -0
- madspace/include/madspace/runtime/context.h +147 -0
- madspace/include/madspace/runtime/discrete_optimizer.h +24 -0
- madspace/include/madspace/runtime/event_generator.h +257 -0
- madspace/include/madspace/runtime/format.h +68 -0
- madspace/include/madspace/runtime/io.h +343 -0
- madspace/include/madspace/runtime/lhe_output.h +132 -0
- madspace/include/madspace/runtime/logger.h +46 -0
- madspace/include/madspace/runtime/runtime_base.h +39 -0
- madspace/include/madspace/runtime/tensor.h +603 -0
- madspace/include/madspace/runtime/thread_pool.h +101 -0
- madspace/include/madspace/runtime/vegas_optimizer.h +26 -0
- madspace/include/madspace/runtime.h +12 -0
- madspace/include/madspace/umami.h +202 -0
- madspace/include/madspace/util.h +142 -0
- madspace/lib/libmadspace.so +0 -0
- madspace/lib/libmadspace_cpu.so +0 -0
- madspace/lib/libmadspace_cpu_avx2.so +0 -0
- madspace/lib/libmadspace_cpu_avx512.so +0 -0
- madspace/lib/libmadspace_cuda.so +0 -0
- madspace/lib/libmadspace_hip.so +0 -0
- madspace/madnis/__init__.py +44 -0
- madspace/madnis/buffer.py +167 -0
- madspace/madnis/channel_grouping.py +85 -0
- madspace/madnis/distribution.py +103 -0
- madspace/madnis/integrand.py +175 -0
- madspace/madnis/integrator.py +973 -0
- madspace/madnis/interface.py +191 -0
- madspace/madnis/losses.py +186 -0
- madspace/torch.py +82 -0
- madspace-0.3.1.dist-info/METADATA +71 -0
- madspace-0.3.1.dist-info/RECORD +75 -0
- madspace-0.3.1.dist-info/WHEEL +6 -0
- madspace-0.3.1.dist-info/licenses/LICENSE +21 -0
- madspace.libs/libgfortran-83c28eba.so.5.0.0 +0 -0
- madspace.libs/libopenblas-r0-11edc3fa.3.15.so +0 -0
- madspace.libs/libquadmath-2284e583.so.0.0.0 +0 -0
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
import torch.nn as nn
|
|
5
|
+
|
|
6
|
+
from .. import _madspace_py_loader as ms
|
|
7
|
+
from ..torch import FunctionModule
|
|
8
|
+
from .channel_grouping import ChannelGrouping
|
|
9
|
+
from .distribution import Distribution
|
|
10
|
+
from .integrand import Integrand
|
|
11
|
+
|
|
12
|
+
MADNIS_INTEGRAND_FLAGS = (
|
|
13
|
+
ms.Integrand.sample
|
|
14
|
+
| ms.Integrand.return_latent
|
|
15
|
+
| ms.Integrand.return_channel
|
|
16
|
+
| ms.Integrand.return_chan_weights
|
|
17
|
+
| ms.Integrand.return_cwnet_input
|
|
18
|
+
| ms.Integrand.return_discrete_latent
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class IntegrandDistribution(nn.Module, Distribution):
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
channels: list[ms.Integrand],
|
|
26
|
+
channel_remap_function: Callable[[torch.Tensor], torch.Tensor],
|
|
27
|
+
context: ms.Context,
|
|
28
|
+
):
|
|
29
|
+
super().__init__()
|
|
30
|
+
self.channel_count = len(channels)
|
|
31
|
+
self.channels = channels
|
|
32
|
+
self.context = context
|
|
33
|
+
self.channel_remap_function = channel_remap_function
|
|
34
|
+
self.latent_dims, self.latent_float = channels[0].latent_dims()
|
|
35
|
+
self.integrand_prob = None
|
|
36
|
+
self.update_channel_mask(torch.ones(self.channel_count, dtype=torch.bool))
|
|
37
|
+
|
|
38
|
+
def update_channel_mask(self, mask: torch.Tensor) -> None:
|
|
39
|
+
self.channel_mask = mask
|
|
40
|
+
multi_prob = ms.MultiChannelFunction(
|
|
41
|
+
[
|
|
42
|
+
ms.IntegrandProbability(chan)
|
|
43
|
+
for chan, active in zip(self.channels, mask)
|
|
44
|
+
if active
|
|
45
|
+
]
|
|
46
|
+
)
|
|
47
|
+
func = multi_prob.function()
|
|
48
|
+
if self.integrand_prob is None:
|
|
49
|
+
self.integrand_prob = FunctionModule(func, self.context)
|
|
50
|
+
else:
|
|
51
|
+
self.integrand_prob.runtime = ms.FunctionRuntime(func, self.context)
|
|
52
|
+
|
|
53
|
+
def sample(
|
|
54
|
+
self,
|
|
55
|
+
n: int,
|
|
56
|
+
c: torch.Tensor | None = None,
|
|
57
|
+
channel: torch.Tensor | list[int] | int | None = None,
|
|
58
|
+
return_log_prob: bool = False,
|
|
59
|
+
return_prob: bool = False,
|
|
60
|
+
device: torch.device | None = None,
|
|
61
|
+
dtype: torch.dtype | None = None,
|
|
62
|
+
) -> torch.Tensor | tuple[torch.Tensor, ...]:
|
|
63
|
+
raise NotImplementedError(
|
|
64
|
+
"IntegrandDistribution does not support sampling directly. "
|
|
65
|
+
"Use the underlying ms.Integrand object instead."
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def prob(
|
|
69
|
+
self,
|
|
70
|
+
x: torch.Tensor,
|
|
71
|
+
c: torch.Tensor | None = None,
|
|
72
|
+
channel: torch.Tensor | list[int] | int | None = None,
|
|
73
|
+
) -> torch.Tensor:
|
|
74
|
+
channel_perm = None
|
|
75
|
+
if isinstance(channel, torch.Tensor):
|
|
76
|
+
channel = self.channel_remap_function(channel)
|
|
77
|
+
channel_perm = torch.argsort(channel)
|
|
78
|
+
x = x[channel_perm]
|
|
79
|
+
channel = channel.bincount(minlength=self.channel_count).to(torch.int32)
|
|
80
|
+
elif channel is None:
|
|
81
|
+
channel = torch.tensor([len(x)], dtype=torch.int32)
|
|
82
|
+
else:
|
|
83
|
+
raise NotImplementedError("channel argument type not supported")
|
|
84
|
+
channel = channel[self.channel_mask]
|
|
85
|
+
|
|
86
|
+
prob_args = [
|
|
87
|
+
xi if is_float else xi[:, 0].to(torch.int32)
|
|
88
|
+
for xi, is_float in zip(x.split(self.latent_dims, dim=1), self.latent_float)
|
|
89
|
+
]
|
|
90
|
+
prob = self.integrand_prob(*prob_args, channel.cpu())
|
|
91
|
+
if channel_perm is None:
|
|
92
|
+
return prob
|
|
93
|
+
else:
|
|
94
|
+
channel_perm_inv = torch.argsort(channel_perm)
|
|
95
|
+
return prob[channel_perm_inv]
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class IntegrandFunction:
|
|
99
|
+
def __init__(self, channels: list[ms.Integrand], context: ms.Context):
|
|
100
|
+
self.channel_count = len(channels)
|
|
101
|
+
self.channels = channels
|
|
102
|
+
self.context = context
|
|
103
|
+
self.update_channel_mask(torch.ones(self.channel_count, dtype=torch.bool))
|
|
104
|
+
|
|
105
|
+
def update_channel_mask(self, mask: torch.Tensor) -> None:
|
|
106
|
+
self.channel_mask = mask.cpu()
|
|
107
|
+
multi_integrand = ms.MultiChannelIntegrand(
|
|
108
|
+
[chan for chan, active in zip(self.channels, mask) if active]
|
|
109
|
+
)
|
|
110
|
+
self.multi_runtime = ms.FunctionRuntime(
|
|
111
|
+
multi_integrand.function(), self.context
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
def __call__(self, channels: torch.Tensor) -> tuple[torch.Tensor, ...]:
|
|
115
|
+
channel_perm = torch.argsort(channels)
|
|
116
|
+
channels = channels.bincount(minlength=self.channel_count).cpu().to(torch.int32)
|
|
117
|
+
channels = channels[self.channel_mask]
|
|
118
|
+
(
|
|
119
|
+
full_weight,
|
|
120
|
+
latent,
|
|
121
|
+
inv_prob,
|
|
122
|
+
chan_index,
|
|
123
|
+
alphas_prior,
|
|
124
|
+
alpha_selected,
|
|
125
|
+
y,
|
|
126
|
+
*rest,
|
|
127
|
+
) = self.multi_runtime(channels)
|
|
128
|
+
|
|
129
|
+
x_parts = [latent, *rest]
|
|
130
|
+
x = torch.cat(
|
|
131
|
+
[xi.double().reshape(latent.shape[0], -1) for xi in x_parts], dim=1
|
|
132
|
+
)
|
|
133
|
+
prob = 1 / inv_prob
|
|
134
|
+
weight = torch.where(
|
|
135
|
+
alpha_selected == 0.0, 0.0, full_weight * prob / alpha_selected
|
|
136
|
+
)
|
|
137
|
+
channel_perm_inv = torch.argsort(channel_perm)
|
|
138
|
+
return (
|
|
139
|
+
x[channel_perm_inv],
|
|
140
|
+
prob[channel_perm_inv],
|
|
141
|
+
weight[channel_perm_inv],
|
|
142
|
+
y[channel_perm_inv],
|
|
143
|
+
alphas_prior[channel_perm_inv],
|
|
144
|
+
chan_index[channel_perm_inv].to(torch.int64),
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def build_madnis_integrand(
|
|
149
|
+
channels: list[ms.Integrand],
|
|
150
|
+
cwnet: ms.ChannelWeightNetwork | None = None,
|
|
151
|
+
channel_grouping: ChannelGrouping | None = None,
|
|
152
|
+
context: ms.Context = ms.default_context(),
|
|
153
|
+
) -> tuple[Integrand, Distribution, nn.Module | None]:
|
|
154
|
+
device = torch.device("cpu" if context.device() == ms.cpu_device() else "cuda:0")
|
|
155
|
+
if channel_grouping is None:
|
|
156
|
+
remap_channels = lambda channels: channels
|
|
157
|
+
group_indices = torch.arange(len(channels))
|
|
158
|
+
else:
|
|
159
|
+
channel_id_map = torch.tensor(
|
|
160
|
+
[channel.group.group_index for channel in channel_grouping.channels],
|
|
161
|
+
device=device,
|
|
162
|
+
)
|
|
163
|
+
remap_channels = lambda channels: channel_id_map[channels]
|
|
164
|
+
group_indices = torch.tensor(
|
|
165
|
+
[group.target_index for group in channel_grouping.groups], device=device
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
integrand_function = IntegrandFunction(channels, context)
|
|
169
|
+
flow = IntegrandDistribution(channels, remap_channels, context)
|
|
170
|
+
|
|
171
|
+
def update_mask(mask: torch.Tensor) -> None:
|
|
172
|
+
context.get_global(cwnet.mask_name()).torch()[0, :] = mask.double()
|
|
173
|
+
group_mask = mask[group_indices]
|
|
174
|
+
if torch.any(group_mask.cpu() != integrand_function.channel_mask):
|
|
175
|
+
integrand_function.update_channel_mask(group_mask)
|
|
176
|
+
flow.update_channel_mask(group_mask)
|
|
177
|
+
|
|
178
|
+
integrand = Integrand(
|
|
179
|
+
function=integrand_function,
|
|
180
|
+
input_dim=sum(channels[0].latent_dims()[0]),
|
|
181
|
+
channel_count=len(channel_grouping.channels),
|
|
182
|
+
remapped_dim=cwnet.preprocessing().output_dim(),
|
|
183
|
+
has_channel_weight_prior=cwnet is not None,
|
|
184
|
+
channel_grouping=channel_grouping,
|
|
185
|
+
function_includes_sampling=True,
|
|
186
|
+
update_active_channels_mask=update_mask,
|
|
187
|
+
)
|
|
188
|
+
cwnet_module = (
|
|
189
|
+
None if cwnet is None else FunctionModule(cwnet.mlp().function(), context)
|
|
190
|
+
)
|
|
191
|
+
return integrand, flow, cwnet_module
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
"""Implementation of multi-channel loss functions"""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
from functools import wraps
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def dtype_epsilon(tensor: torch.Tensor) -> float:
|
|
10
|
+
return torch.finfo(tensor.dtype).eps
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
SingleChannelLoss = Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor]
|
|
14
|
+
MultiChannelLoss = Callable[
|
|
15
|
+
[torch.Tensor, torch.Tensor, torch.Tensor | None, torch.Tensor | None], torch.Tensor
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def multi_channel_loss(loss: SingleChannelLoss) -> MultiChannelLoss:
|
|
20
|
+
"""
|
|
21
|
+
Turns a single-channel loss function into a multi-channel loss function by evaluating it for
|
|
22
|
+
each channel separately and then adding them weighted by TODO weighted by what?
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
loss: single-channel loss function, that expects the integrand value, test probability and
|
|
26
|
+
sampling probability as arguments
|
|
27
|
+
Returns:
|
|
28
|
+
multi-channel loss function, that expects the integrand value, test probability and,
|
|
29
|
+
optionally, sampling probability and channel indices as arguments.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
# TODO: this unfortunately does not yield the correct signature (with the extra channels argument),
|
|
33
|
+
# so it does not show up in the documentation
|
|
34
|
+
@wraps(loss)
|
|
35
|
+
def wrapped_multi(
|
|
36
|
+
f_true: torch.Tensor,
|
|
37
|
+
q_test: torch.Tensor,
|
|
38
|
+
q_sample: torch.Tensor | None = None,
|
|
39
|
+
channels: torch.Tensor | None = None,
|
|
40
|
+
) -> torch.Tensor:
|
|
41
|
+
if q_sample is None:
|
|
42
|
+
q_sample = q_test
|
|
43
|
+
if channels is None:
|
|
44
|
+
return loss(f_true, q_test, q_sample)
|
|
45
|
+
|
|
46
|
+
loss_tot = 0
|
|
47
|
+
for channel in channels.unique():
|
|
48
|
+
mask = channels == channel
|
|
49
|
+
fi, qti, qsi = f_true[mask], q_test[mask], q_sample[mask]
|
|
50
|
+
ni = mask.count_nonzero()
|
|
51
|
+
# loss_tot += ni / q_sample.shape[0] * loss(fi, qti, qsi) if ni > 0 else 0.0
|
|
52
|
+
loss_tot += loss(fi, qti, qsi) if ni > 0 else 0.0
|
|
53
|
+
return loss_tot
|
|
54
|
+
|
|
55
|
+
return wrapped_multi
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def stratified_variance(
|
|
59
|
+
f_true: torch.Tensor,
|
|
60
|
+
q_test: torch.Tensor,
|
|
61
|
+
q_sample: torch.Tensor | None = None,
|
|
62
|
+
channels: torch.Tensor | None = None,
|
|
63
|
+
):
|
|
64
|
+
"""
|
|
65
|
+
Computes the stratified variance as introduced in [2311.01548] for two given sets of
|
|
66
|
+
probabilities, ``f_true`` and ``q_test``. It uses importance sampling with a sampling
|
|
67
|
+
probability specified by ``q_sample``.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
f_true: normalized integrand values
|
|
71
|
+
q_test: estimated function/probability
|
|
72
|
+
q_sample: sampling probability
|
|
73
|
+
channels: channel indices or None in the single-channel case
|
|
74
|
+
Returns:
|
|
75
|
+
computed stratified variance
|
|
76
|
+
"""
|
|
77
|
+
if q_sample is None:
|
|
78
|
+
q_sample = q_test
|
|
79
|
+
if channels is None:
|
|
80
|
+
abs_integral = torch.mean(f_true.detach().abs() / q_sample)
|
|
81
|
+
return _variance(f_true, q_test, q_sample) / abs_integral.square()
|
|
82
|
+
|
|
83
|
+
stddev_sum = 0
|
|
84
|
+
abs_integral = 0
|
|
85
|
+
for i in channels.unique():
|
|
86
|
+
mask = channels == i
|
|
87
|
+
fi, qti, qsi = f_true[mask], q_test[mask], q_sample[mask]
|
|
88
|
+
stddev_sum += torch.sqrt(_variance(fi, qti, qsi) + dtype_epsilon(f_true))
|
|
89
|
+
abs_integral += torch.mean(fi.detach().abs() / qsi)
|
|
90
|
+
return (stddev_sum / abs_integral) ** 2
|
|
91
|
+
|
|
92
|
+
# variances = []
|
|
93
|
+
# abs_integrals = []
|
|
94
|
+
# for i in channels.unique():
|
|
95
|
+
# mask = channels == i
|
|
96
|
+
# fi, qti, qsi = f_true[mask], q_test[mask], q_sample[mask]
|
|
97
|
+
# variances.append(_variance(fi, qti, qsi) + dtype_epsilon(f_true))
|
|
98
|
+
# abs_integrals.append(torch.mean(fi.abs() / qsi))
|
|
99
|
+
# abs_integral_tot = sum(abs_integrals)
|
|
100
|
+
# return sum(
|
|
101
|
+
# abs_integral / abs_integral_tot * variance
|
|
102
|
+
# for abs_integral, variance in zip(abs_integrals, variances)
|
|
103
|
+
# ) / abs_integral_tot.detach().square()
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@multi_channel_loss
|
|
107
|
+
def variance(
|
|
108
|
+
f_true: torch.Tensor, q_test: torch.Tensor, q_sample: torch.Tensor
|
|
109
|
+
) -> torch.Tensor:
|
|
110
|
+
abs_integral = torch.mean(f_true.detach().abs() / q_sample) + dtype_epsilon(f_true)
|
|
111
|
+
return _variance(f_true, q_test, q_sample) / abs_integral.square()
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _variance(
|
|
115
|
+
f_true: torch.Tensor,
|
|
116
|
+
q_test: torch.Tensor,
|
|
117
|
+
q_sample: torch.Tensor,
|
|
118
|
+
) -> torch.Tensor:
|
|
119
|
+
"""
|
|
120
|
+
Computes the variance for two given sets of probabilities, ``f_true`` and ``q_test``. It uses
|
|
121
|
+
importance sampling with a sampling probability specified by ``q_sample``.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
f_true: normalized integrand values
|
|
125
|
+
q_test: estimated function/probability
|
|
126
|
+
q_sample: sampling probability
|
|
127
|
+
Returns:
|
|
128
|
+
computed variance
|
|
129
|
+
"""
|
|
130
|
+
ratio = q_test / q_sample
|
|
131
|
+
mean = torch.mean(f_true / q_sample)
|
|
132
|
+
sq = (f_true / q_test - mean) ** 2
|
|
133
|
+
return (
|
|
134
|
+
torch.mean(sq * ratio)
|
|
135
|
+
if len(f_true) > 0
|
|
136
|
+
else torch.tensor(0.0, device=f_true.device, dtype=f_true.dtype)
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
@multi_channel_loss
|
|
141
|
+
def kl_divergence(
|
|
142
|
+
f_true: torch.Tensor, q_test: torch.Tensor, q_sample: torch.Tensor
|
|
143
|
+
) -> torch.Tensor:
|
|
144
|
+
"""
|
|
145
|
+
Computes the Kullback-Leibler divergence for two given sets of probabilities, ``f_true`` and
|
|
146
|
+
``q_test``. It uses importance sampling, i.e. the estimator is divided by an additional factor
|
|
147
|
+
of ``q_sample``.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
f_true: normalized integrand values
|
|
151
|
+
q_test: estimated function/probability
|
|
152
|
+
q_sample: sampling probability
|
|
153
|
+
channels: channel indices or None in the single-channel case
|
|
154
|
+
Returns:
|
|
155
|
+
computed KL divergence
|
|
156
|
+
"""
|
|
157
|
+
f_true = f_true.detach().abs()
|
|
158
|
+
f_true /= torch.mean(f_true / q_sample)
|
|
159
|
+
log_q = torch.log(q_test)
|
|
160
|
+
log_f = torch.log(f_true + dtype_epsilon(f_true))
|
|
161
|
+
return torch.mean(f_true / q_sample * (log_f - log_q))
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
@multi_channel_loss
|
|
165
|
+
def rkl_divergence(
|
|
166
|
+
f_true: torch.Tensor, q_test: torch.Tensor, q_sample: torch.Tensor
|
|
167
|
+
) -> torch.Tensor:
|
|
168
|
+
"""
|
|
169
|
+
Computes the reverse Kullback-Leibler divergence for two given sets of probabilities, ``f_true``
|
|
170
|
+
and ``q_test``. It uses importance sampling, i.e. the estimator is divided by an additional
|
|
171
|
+
factor of ``q_sample``.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
f_true: normalized integrand values
|
|
175
|
+
q_test: estimated function/probability
|
|
176
|
+
q_sample: sampling probability
|
|
177
|
+
channels: channel indices or None in the single-channel case
|
|
178
|
+
Returns:
|
|
179
|
+
computed KL divergence
|
|
180
|
+
"""
|
|
181
|
+
f_true = f_true.detach().abs()
|
|
182
|
+
f_true /= torch.mean(f_true / q_sample)
|
|
183
|
+
ratio = q_test / q_sample
|
|
184
|
+
log_q = torch.log(q_test)
|
|
185
|
+
log_f = torch.log(f_true + dtype_epsilon(f_true))
|
|
186
|
+
return torch.mean(ratio * (log_q - log_f))
|
madspace/torch.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn as nn
|
|
3
|
+
from torch.autograd.function import FunctionCtx, once_differentiable
|
|
4
|
+
|
|
5
|
+
from . import _madspace_py_loader as me
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class FunctionModule(nn.Module):
|
|
9
|
+
def __init__(
|
|
10
|
+
self,
|
|
11
|
+
function: me.Function,
|
|
12
|
+
context: me.Context = me.default_context(),
|
|
13
|
+
):
|
|
14
|
+
super().__init__()
|
|
15
|
+
self.global_params = nn.ParameterDict(
|
|
16
|
+
{
|
|
17
|
+
name.replace(".", ":"): nn.Parameter(
|
|
18
|
+
context.get_global(name).torch(),
|
|
19
|
+
context.global_requires_grad(name),
|
|
20
|
+
)
|
|
21
|
+
for name in function.globals
|
|
22
|
+
}
|
|
23
|
+
)
|
|
24
|
+
self.runtime = me.FunctionRuntime(function, context)
|
|
25
|
+
self.dummy = torch.zeros(
|
|
26
|
+
1,
|
|
27
|
+
requires_grad=any(
|
|
28
|
+
glob.requires_grad for glob in self.global_params.values()
|
|
29
|
+
),
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
def forward(self, *args: torch.Tensor) -> list[torch.Tensor]:
|
|
33
|
+
if torch.is_grad_enabled():
|
|
34
|
+
return AutogradWrapper.apply(self, self.dummy, *args)
|
|
35
|
+
else:
|
|
36
|
+
outputs = self.runtime.call(args)
|
|
37
|
+
if len(outputs) == 1:
|
|
38
|
+
return torch.from_dlpack(outputs[0])
|
|
39
|
+
else:
|
|
40
|
+
return tuple(torch.from_dlpack(out) for out in outputs)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class AutogradWrapper(torch.autograd.Function):
|
|
44
|
+
@staticmethod
|
|
45
|
+
def forward(
|
|
46
|
+
ctx: FunctionCtx,
|
|
47
|
+
module: FunctionModule,
|
|
48
|
+
dummy: torch.Tensor,
|
|
49
|
+
*args: torch.Tensor,
|
|
50
|
+
) -> list[torch.Tensor]:
|
|
51
|
+
outputs, local_grads, eval_grad = module.runtime.call_with_grad(
|
|
52
|
+
[arg.detach() for arg in args], [arg.requires_grad for arg in args]
|
|
53
|
+
)
|
|
54
|
+
ctx.module = module
|
|
55
|
+
ctx.eval_grad = eval_grad
|
|
56
|
+
ctx.save_for_backward(
|
|
57
|
+
*(None if grad is None else torch.from_dlpack(grad) for grad in local_grads)
|
|
58
|
+
)
|
|
59
|
+
if len(outputs) == 1:
|
|
60
|
+
return torch.from_dlpack(outputs[0])
|
|
61
|
+
else:
|
|
62
|
+
return tuple(torch.from_dlpack(out) for out in outputs)
|
|
63
|
+
|
|
64
|
+
@staticmethod
|
|
65
|
+
@once_differentiable
|
|
66
|
+
def backward(ctx: FunctionCtx, *output_grads: torch.Tensor):
|
|
67
|
+
input_grads, global_grads = ctx.module.runtime.call_backward(
|
|
68
|
+
output_grads, ctx.saved_tensors, ctx.eval_grad
|
|
69
|
+
)
|
|
70
|
+
for name, grad in global_grads:
|
|
71
|
+
if grad is None:
|
|
72
|
+
continue
|
|
73
|
+
param = ctx.module.global_params[name.replace(".", ":")]
|
|
74
|
+
grad_torch = torch.from_dlpack(grad)
|
|
75
|
+
if param.grad is None:
|
|
76
|
+
param.grad = grad_torch
|
|
77
|
+
else:
|
|
78
|
+
param.grad += grad_torch
|
|
79
|
+
input_grads_opt = (
|
|
80
|
+
None if grad is None else torch.from_dlpack(grad) for grad in input_grads
|
|
81
|
+
)
|
|
82
|
+
return None, None, *input_grads_opt
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: madspace
|
|
3
|
+
Version: 0.3.1
|
|
4
|
+
Summary: Fast and flexible phase space mappings
|
|
5
|
+
Author-Email: Theo Heimel <theo.heimel@uclouvain.be>
|
|
6
|
+
Classifier: Programming Language :: Python :: 3
|
|
7
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
8
|
+
Classifier: Operating System :: OS Independent
|
|
9
|
+
Requires-Python: >=3.11
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
|
|
12
|
+
<p align="center">
|
|
13
|
+
<img src="https://raw.githubusercontent.com/madgraph-ml/madevent7/refs/heads/dev/docs/source/_static/logo-light-madspace.png" width="500", alt="MadSpace">
|
|
14
|
+
</p>
|
|
15
|
+
|
|
16
|
+
### Installation
|
|
17
|
+
|
|
18
|
+
#### Package
|
|
19
|
+
|
|
20
|
+
Packages on PyPI are available for Linux and MacOS X (with Apple silicon),
|
|
21
|
+
for Python 3.11 to 3.14.
|
|
22
|
+
|
|
23
|
+
```sh
|
|
24
|
+
pip install madspace
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
#### Build
|
|
28
|
+
|
|
29
|
+
First install `scikit_build_core` with
|
|
30
|
+
|
|
31
|
+
```sh
|
|
32
|
+
pip install scikit_build_core
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
The pre-installed version of `cmake` under MacOS is outdated, so you might need to install a
|
|
36
|
+
newer version, for example with
|
|
37
|
+
|
|
38
|
+
```sh
|
|
39
|
+
brew install cmake
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Then check out the `madspace` repository and build and install it with
|
|
43
|
+
|
|
44
|
+
```sh
|
|
45
|
+
git clone git@github.com:madgraph-ml/madspace.git
|
|
46
|
+
cd madspace
|
|
47
|
+
pip install .
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
For a development version allowing for incremental build, use the following command instead:
|
|
51
|
+
|
|
52
|
+
```sh
|
|
53
|
+
pip install --no-build-isolation -Cbuild-dir=build -Ccmake.build-type=RelWithDebInfo .
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
This will create a directory `build` where you can run make directly to make development
|
|
57
|
+
easier. To update the python module itself, make sure to also run the `pip install` command
|
|
58
|
+
above again. This will not happen automatically, even if you make the installation editable!
|
|
59
|
+
Build type `RelWithDebInfo` generates optimized code but includes debug symbols, so you
|
|
60
|
+
can use `lldb` or `gdb` to debug the code.
|
|
61
|
+
|
|
62
|
+
### Tests
|
|
63
|
+
|
|
64
|
+
To run the tests, you need to have the `pytest`, `numpy` and `torch` packages installed.
|
|
65
|
+
One test optionally requires the `lhapdf` package (can be installed via conda or built from
|
|
66
|
+
source) and the `NNPDF40_nlo_as_01180` PDF set.
|
|
67
|
+
|
|
68
|
+
To run the tests, go to the root directory of the repository and run
|
|
69
|
+
```sh
|
|
70
|
+
pytest tests
|
|
71
|
+
```
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
madspace/__init__.py,sha256=_TJn7-SPJOLeE-6jzggKLKTL9hKfSYj-SM9ikcXbCd0,35
|
|
2
|
+
madspace/_madspace_py.cpython-311-x86_64-linux-gnu.so,sha256=cfm94vkyWohw1_1xUwyfFXsqsN9MMziKm8WJoBBmI-0,1926064
|
|
3
|
+
madspace/_madspace_py.pyi,sha256=Ghgma25kyma__8vdX4wgfajHT0ilnYuQSoqMiNTGNYc,79564
|
|
4
|
+
madspace/_madspace_py_loader.py,sha256=vLGrMuowLhDZWwEs2_ZKAwQo8GQZ1smG9Izdw3vkdiI,3398
|
|
5
|
+
madspace/torch.py,sha256=nJmAtTtOX_40GN6HNxPTx0BR16XaD6DtpSjMewm4rH0,2796
|
|
6
|
+
madspace/include/madspace/constants.h,sha256=2SEY6x7ODAyDy0TwSlMRDZNxb71ULadBh_HXd7u3XmA,443
|
|
7
|
+
madspace/include/madspace/madcode.h,sha256=O7BI2AuMTlpDh2xUfycaIxkpqLvYwenSC-AyxLvbCnM,134
|
|
8
|
+
madspace/include/madspace/phasespace.h,sha256=K-rnXoaVqzj5PWkkqLBXlQubPg9HAWyVZYjqbw7ZNZk,890
|
|
9
|
+
madspace/include/madspace/runtime.h,sha256=k6gmivKqEcBJvFXcnWdyFx_W1B44G7cE_T79K8Dhnao,330
|
|
10
|
+
madspace/include/madspace/umami.h,sha256=mIJ4mxUHsBRl-lRJkI5EHCEWzGOq5aV1dqXZaF_ste4,5601
|
|
11
|
+
madspace/include/madspace/util.h,sha256=WbzL0LsMACHiEgGfooC3nVbrr_ucBGqUNMKNYDRAsQc,3927
|
|
12
|
+
madspace/include/madspace/madcode/function.h,sha256=gbr1z5saZLZp4BdBbHEFAhbxu6GgeoBghweVFRaPA0w,3114
|
|
13
|
+
madspace/include/madspace/madcode/function_builder_mixin.h,sha256=gLbJY8IlaIoLV6APSKDpIyHLqVyYd7QguUukI1QjvcI,21303
|
|
14
|
+
madspace/include/madspace/madcode/instruction.h,sha256=Ib0v0RjXbxZvePHtYFHwYcupDbIvHRQUxLpzNw6VC1Y,6664
|
|
15
|
+
madspace/include/madspace/madcode/opcode_mixin.h,sha256=Esvkw3qI2iO4GdLNGCBrmc7YwiSZq-LcSROs5m3tawA,2841
|
|
16
|
+
madspace/include/madspace/madcode/optimizer.h,sha256=x5gb3o76boojMoe2n_Yq9uyp3esAV6ptANkZ7grggNE,670
|
|
17
|
+
madspace/include/madspace/madcode/type.h,sha256=eAIiMQxDKnJOOBWnjlJa_c9yi-FqBM87sFuyVJzwq5w,7323
|
|
18
|
+
madspace/include/madspace/phasespace/base.h,sha256=qr0W7_E7sTp6N0HCC8uWKzkI0VvxLxoLIL8IWo6ziLw,2286
|
|
19
|
+
madspace/include/madspace/phasespace/channel_weight_network.h,sha256=5P7vanQIOX0dZVq2syT8UPePM1S_PxCCaZ_gbao1Vnc,1259
|
|
20
|
+
madspace/include/madspace/phasespace/channel_weights.h,sha256=niMLyoIRtZzxuMJOEsrOgDrfAvzTZKkpZ5tNAWuNg80,1499
|
|
21
|
+
madspace/include/madspace/phasespace/chili.h,sha256=PZkKp2-Yd8dk-bfVL8wu29Biq0qJLwfrJ3LsKmkJ1ww,751
|
|
22
|
+
madspace/include/madspace/phasespace/cross_section.h,sha256=hoj0vxgHKDYRX9zD3Kh1zz_tl_skAkmXxpffNbfOYoY,1536
|
|
23
|
+
madspace/include/madspace/phasespace/cuts.h,sha256=c373HlSJCNHKH8WU9tLb1MTbgy9QNJRxTeB4XRAbJEw,838
|
|
24
|
+
madspace/include/madspace/phasespace/discrete_flow.h,sha256=CV3Tr0ad6tVW8qTmCiTU2zeyAlcmaR02DJ9xZ8Fh-rM,1386
|
|
25
|
+
madspace/include/madspace/phasespace/discrete_sampler.h,sha256=g2MkMm4FyDwyCtq_4OhftB08NzwDA35hlQKok1RhKak,1550
|
|
26
|
+
madspace/include/madspace/phasespace/flow.h,sha256=8WwQwW4SKvk7Y7V2BsZNF4S_X342FewgnxsNvI3oIK4,1539
|
|
27
|
+
madspace/include/madspace/phasespace/histograms.h,sha256=hgWqwFKxWn1VqN64lr-nkIX0fh-VmL-vhhebWOG44kk,635
|
|
28
|
+
madspace/include/madspace/phasespace/integrand.h,sha256=FUM9OzLj5ucQv0vD2CaH0gDhxE3Tzeuw8wDs8T8XqY4,8020
|
|
29
|
+
madspace/include/madspace/phasespace/invariants.h,sha256=W2Y45eAN3hZvzNONyqzqogkKdkJDuBtyybeL_j6qKx8,683
|
|
30
|
+
madspace/include/madspace/phasespace/luminosity.h,sha256=YVARtY5x57wA7WMeZHjdTDplB3bXpnrzxFPQwP3V334,1049
|
|
31
|
+
madspace/include/madspace/phasespace/matrix_element.h,sha256=dRmR-bRcUp8K0L72xqSwjvCXZtSbcYgC3LWJyGZviPA,2207
|
|
32
|
+
madspace/include/madspace/phasespace/mlp.h,sha256=xncDnGM0UIlIcKPaIYZuZChg-F3VdmK-TKSsheYBNzI,1058
|
|
33
|
+
madspace/include/madspace/phasespace/multichannel.h,sha256=PK9KKsmJfXuINf3YGK4vyCJ68hpxZO6cYfEIkSHstMw,1275
|
|
34
|
+
madspace/include/madspace/phasespace/observable.h,sha256=hq80DwuSM1ngWEKXxKj8Q4ZxFbJRaLprTURRQY_RhOg,2373
|
|
35
|
+
madspace/include/madspace/phasespace/pdf.h,sha256=B615sw5L5p62LVs50HMPeiflLk9y-wXhy6_ngMuRQfo,2444
|
|
36
|
+
madspace/include/madspace/phasespace/phasespace.h,sha256=jTEHePcsW14RpfSbXqv0GvHXDK-mQNBYPW_AGmq3GEs,2144
|
|
37
|
+
madspace/include/madspace/phasespace/rambo.h,sha256=kcplgg0LUvWMEGXJQ90J6VwsU3etULsIAefhCTeB6Jo,645
|
|
38
|
+
madspace/include/madspace/phasespace/scale.h,sha256=LMmhwVS_edfShbleAXJlUkx_pr9xa6q5Xdc4hVpW308,1381
|
|
39
|
+
madspace/include/madspace/phasespace/t_propagator_mapping.h,sha256=IGQarmhcTGxP5jNjQP-hZbxh57TWi_EiTvDNNydMR00,999
|
|
40
|
+
madspace/include/madspace/phasespace/three_particle.h,sha256=mAzC9BYBSCu7luLRWTGqR8KWoSXEoI9AJogwuDRawgU,1954
|
|
41
|
+
madspace/include/madspace/phasespace/topology.h,sha256=b1GCgWnv5XDyAj3c72BI7ws57WGgHiQVA2bWp3GTPhc,3728
|
|
42
|
+
madspace/include/madspace/phasespace/two_particle.h,sha256=E4nhlcY-inIpRu_RrqU8H1nzx0_otQfxUi_xZkHin1c,1730
|
|
43
|
+
madspace/include/madspace/phasespace/vegas.h,sha256=t65pvX7B9RtbIL5AMywyEdAGsiOYLnjrO7Q914B9hNc,1514
|
|
44
|
+
madspace/include/madspace/runtime/context.h,sha256=oHD6xaTzt-T9VgxFLuh355CiDHHfAVmwySOcgivB73E,4705
|
|
45
|
+
madspace/include/madspace/runtime/discrete_optimizer.h,sha256=UJWGw0te5xXpQmxXQV_C_M7xmD9WT8mW0b2UmqB_I-A,664
|
|
46
|
+
madspace/include/madspace/runtime/event_generator.h,sha256=E0lwslt25ogaaY3bojnzMmpsGA-bTUxUOuFLXvwSsMw,8789
|
|
47
|
+
madspace/include/madspace/runtime/format.h,sha256=BgSKfcaCXxk7b83GJy0ZJYHZzJjq4wZhQbqGX3lPjNc,1901
|
|
48
|
+
madspace/include/madspace/runtime/io.h,sha256=o_zf4EFnVJy-s352U6lKdkBS6a-qtXupTbewoTE6FU4,11231
|
|
49
|
+
madspace/include/madspace/runtime/lhe_output.h,sha256=KsqaOPuctdinPKip8M1YQLxDwJBbpIHdgjQfaDkXICc,3654
|
|
50
|
+
madspace/include/madspace/runtime/logger.h,sha256=BOzfJM1Nx68Z1a-6yqnVTyi2vu7uhW5HrMs7D_U1010,1396
|
|
51
|
+
madspace/include/madspace/runtime/runtime_base.h,sha256=pW4obFKqgWcIiZF8sRHO1ly7JmmdwnjJz7ta-dcQ-Gc,1213
|
|
52
|
+
madspace/include/madspace/runtime/tensor.h,sha256=9E4zrohNRCvbEa5UEpfgEYyv5oiyqDqw6g9EmphFH7Y,17464
|
|
53
|
+
madspace/include/madspace/runtime/thread_pool.h,sha256=986mqqluJFRfkxIObJ_T2E8bKvxOrtOeHE0mgxwayUY,3150
|
|
54
|
+
madspace/include/madspace/runtime/vegas_optimizer.h,sha256=Ot2hg4kWOxQQhT27ua5iYNwxYYTEp_H_PObrisCWhO4,653
|
|
55
|
+
madspace/lib/libmadspace.so,sha256=SuUAsi62bHVy3g9pe2Tg5TSMERntD_CqFI3LZ_yLJ0M,2107984
|
|
56
|
+
madspace/lib/libmadspace_cpu.so,sha256=9bP4J8q09jfiZxvXGE8XhSUjjyCWpxGd0gRgZpqW9c4,3250889
|
|
57
|
+
madspace/lib/libmadspace_cpu_avx2.so,sha256=fdWbPP2u2iqTPdQmaVPuCruoHIFeufyLUXNqc23tMKQ,3787513
|
|
58
|
+
madspace/lib/libmadspace_cpu_avx512.so,sha256=BfgTapVD2J6qntfDHmcrLP0Zw64-LXY-2IUQVt3IiFU,3782633
|
|
59
|
+
madspace/lib/libmadspace_cuda.so,sha256=-tWaKvtSb4Jm3EIxkFgo1I68lIAc61bZLPVieGelhxM,43864960
|
|
60
|
+
madspace/lib/libmadspace_hip.so,sha256=qQW8mYq7lXcZHJbV-Q1x03Rs3J3swPkx1JMbgZvounA,20934944
|
|
61
|
+
madspace/madnis/__init__.py,sha256=fqraYGuiFlLs9r8uTTl_1mnx4YOBuTjxv1eOkMCGeSI,1060
|
|
62
|
+
madspace/madnis/buffer.py,sha256=ri2TlFkQCp7G_fBa0yTb4f78Ddsv_Bknrp508yoZQdY,6287
|
|
63
|
+
madspace/madnis/channel_grouping.py,sha256=ub7qkPH9q5hvTovn8NIKvnrMq4GP3yfufNGNH2szQp8,2791
|
|
64
|
+
madspace/madnis/distribution.py,sha256=NjBaGswHZqLpXnFzGI9RPI5_MNp1x-NyzaB561Vbwtg,4381
|
|
65
|
+
madspace/madnis/integrand.py,sha256=TU9YVAK0cKdXTuu9PpZqKOIkihyMh_3UEGg5o2-wG_8,7201
|
|
66
|
+
madspace/madnis/integrator.py,sha256=UKIQX0C1z-zV0VJqB1jLo7GBBDZDC-Yf0872SALvoTw,40896
|
|
67
|
+
madspace/madnis/interface.py,sha256=poRCZDCAZpbtIS9K24aqNJgBfuy00bHjACpvkj1rnlM,7008
|
|
68
|
+
madspace/madnis/losses.py,sha256=xln1jvqO28bb6y8mCa-d7VQljIzRXS0H7AfUXftubb0,6477
|
|
69
|
+
madspace.libs/libgfortran-83c28eba.so.5.0.0,sha256=5VSHn-5-2oAFW2yhQHpU0ABQh0-DDMhrK_P1DfXHuys,2714697
|
|
70
|
+
madspace.libs/libopenblas-r0-11edc3fa.3.15.so,sha256=ttQfy1lpKahm326QdidnjmHyTJTDXtYE5oeghyitWbg,37105401
|
|
71
|
+
madspace.libs/libquadmath-2284e583.so.0.0.0,sha256=qTB7uqNTYj-gFOi2gtUMRin15cfWHaqahlL30WXNQho,272193
|
|
72
|
+
madspace-0.3.1.dist-info/METADATA,sha256=aP0fzUEbcUrZLsAbFuKdOHEmhKzdYCIC9_Oa9mVfvw8,2050
|
|
73
|
+
madspace-0.3.1.dist-info/WHEEL,sha256=ySegkulW2pNmB05QoI2fb4eLl_ZQeVpF0irxyte32ls,157
|
|
74
|
+
madspace-0.3.1.dist-info/RECORD,,
|
|
75
|
+
madspace-0.3.1.dist-info/licenses/LICENSE,sha256=kKZDz5i69Frn0LWZ9xbuk1g1W1Jco18Dk777rNHG-zw,1074
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 The MadGraph Team
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
Binary file
|
|
Binary file
|
|
Binary file
|