pcntoolkit 0.32.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pcntoolkit/__init__.py +4 -0
- pcntoolkit/configs.py +9 -0
- pcntoolkit/dataio/__init__.py +1 -0
- pcntoolkit/dataio/fileio.py +608 -0
- pcntoolkit/model/KnuOp.py +48 -0
- pcntoolkit/model/NP.py +88 -0
- pcntoolkit/model/NPR.py +86 -0
- pcntoolkit/model/SHASH.py +509 -0
- pcntoolkit/model/__init__.py +6 -0
- pcntoolkit/model/architecture.py +219 -0
- pcntoolkit/model/bayesreg.py +585 -0
- pcntoolkit/model/core.21290 +0 -0
- pcntoolkit/model/gp.py +489 -0
- pcntoolkit/model/hbr.py +1584 -0
- pcntoolkit/model/rfa.py +245 -0
- pcntoolkit/normative.py +1647 -0
- pcntoolkit/normative_NP.py +336 -0
- pcntoolkit/normative_model/__init__.py +6 -0
- pcntoolkit/normative_model/norm_base.py +62 -0
- pcntoolkit/normative_model/norm_blr.py +303 -0
- pcntoolkit/normative_model/norm_gpr.py +112 -0
- pcntoolkit/normative_model/norm_hbr.py +752 -0
- pcntoolkit/normative_model/norm_np.py +333 -0
- pcntoolkit/normative_model/norm_rfa.py +109 -0
- pcntoolkit/normative_model/norm_utils.py +29 -0
- pcntoolkit/normative_parallel.py +1420 -0
- pcntoolkit/regression_model/blr/warp.py +1 -0
- pcntoolkit/trendsurf.py +315 -0
- pcntoolkit/util/__init__.py +1 -0
- pcntoolkit/util/bspline.py +149 -0
- pcntoolkit/util/hbr_utils.py +242 -0
- pcntoolkit/util/utils.py +1698 -0
- pcntoolkit-0.32.0.dist-info/LICENSE +674 -0
- pcntoolkit-0.32.0.dist-info/METADATA +134 -0
- pcntoolkit-0.32.0.dist-info/RECORD +37 -0
- pcntoolkit-0.32.0.dist-info/WHEEL +4 -0
- pcntoolkit-0.32.0.dist-info/entry_points.txt +5 -0
pcntoolkit/model/NP.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Created on Mon Jun 24 15:06:06 2019
|
|
5
|
+
|
|
6
|
+
@author: seykia
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import torch
|
|
10
|
+
from torch import nn
|
|
11
|
+
from torch.nn import functional as F
|
|
12
|
+
|
|
13
|
+
##################################### NP Model ################################
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class NP(nn.Module):
|
|
17
|
+
def __init__(self, encoder, decoder, args):
|
|
18
|
+
super(NP, self).__init__()
|
|
19
|
+
self.r_dim = encoder.r_dim
|
|
20
|
+
self.z_dim = encoder.z_dim
|
|
21
|
+
self.dp_level = encoder.dp_level
|
|
22
|
+
self.encoder = encoder
|
|
23
|
+
self.decoder = decoder
|
|
24
|
+
self.r_to_z_mean_dp = nn.Dropout(p=self.dp_level)
|
|
25
|
+
self.r_to_z_mean = nn.Linear(self.r_dim, self.z_dim)
|
|
26
|
+
self.r_to_z_logvar_dp = nn.Dropout(p=self.dp_level)
|
|
27
|
+
self.r_to_z_logvar = nn.Linear(self.r_dim, self.z_dim)
|
|
28
|
+
self.device = args.device
|
|
29
|
+
self.type = args.type
|
|
30
|
+
|
|
31
|
+
def xy_to_z_params(self, x, y):
|
|
32
|
+
r = self.encoder.forward(x, y)
|
|
33
|
+
mu = self.r_to_z_mean(self.r_to_z_mean_dp(r))
|
|
34
|
+
logvar = self.r_to_z_logvar(self.r_to_z_logvar_dp(r))
|
|
35
|
+
return mu, logvar
|
|
36
|
+
|
|
37
|
+
def reparameterise(self, z):
|
|
38
|
+
mu, logvar = z
|
|
39
|
+
std = torch.exp(0.5 * logvar)
|
|
40
|
+
eps = torch.randn_like(std)
|
|
41
|
+
z_sample = eps.mul(std).add_(mu)
|
|
42
|
+
return z_sample
|
|
43
|
+
|
|
44
|
+
def forward(self, x_context, y_context, x_all=None, y_all=None, n=10):
|
|
45
|
+
y_sigma = None
|
|
46
|
+
z_context = self.xy_to_z_params(x_context, y_context)
|
|
47
|
+
if self.training:
|
|
48
|
+
z_all = self.xy_to_z_params(x_all, y_all)
|
|
49
|
+
z_sample = self.reparameterise(z_all)
|
|
50
|
+
y_hat = self.decoder.forward(z_sample, x_all)
|
|
51
|
+
else:
|
|
52
|
+
z_all = z_context
|
|
53
|
+
if self.type == 'ST':
|
|
54
|
+
temp = torch.zeros(
|
|
55
|
+
[n, y_context.shape[0], y_context.shape[2]], device='cpu')
|
|
56
|
+
elif self.type == 'MT':
|
|
57
|
+
temp = torch.zeros([n, y_context.shape[0], 1, y_context.shape[2], y_context.shape[3],
|
|
58
|
+
y_context.shape[4]], device='cpu')
|
|
59
|
+
for i in range(n):
|
|
60
|
+
z_sample = self.reparameterise(z_all)
|
|
61
|
+
temp[i, :] = self.decoder.forward(z_sample, x_context)
|
|
62
|
+
y_hat = torch.mean(temp, dim=0).to(self.device)
|
|
63
|
+
if n > 1:
|
|
64
|
+
y_sigma = torch.std(temp, dim=0).to(self.device)
|
|
65
|
+
return y_hat, z_all, z_context, y_sigma
|
|
66
|
+
|
|
67
|
+
###############################################################################
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def apply_dropout_test(m):
|
|
71
|
+
if type(m) == nn.Dropout:
|
|
72
|
+
m.train()
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def kl_div_gaussians(mu_q, logvar_q, mu_p, logvar_p):
|
|
76
|
+
var_p = torch.exp(logvar_p)
|
|
77
|
+
kl_div = (torch.exp(logvar_q) + (mu_q - mu_p) ** 2) / (var_p) \
|
|
78
|
+
- 1.0 \
|
|
79
|
+
+ logvar_p - logvar_q
|
|
80
|
+
kl_div = 0.5 * kl_div.sum()
|
|
81
|
+
return kl_div
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def np_loss(y_hat, y, z_all, z_context):
|
|
85
|
+
BCE = F.binary_cross_entropy(torch.squeeze(
|
|
86
|
+
y_hat), torch.mean(y, dim=1), reduction="sum")
|
|
87
|
+
KLD = kl_div_gaussians(z_all[0], z_all[1], z_context[0], z_context[1])
|
|
88
|
+
return BCE + KLD
|
pcntoolkit/model/NPR.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Created on Fri Nov 22 14:32:37 2019
|
|
5
|
+
|
|
6
|
+
@author: seykia
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import torch
|
|
10
|
+
from torch import nn
|
|
11
|
+
from torch.nn import functional as F
|
|
12
|
+
|
|
13
|
+
##################################### NP Model ################################
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class NPR(nn.Module):
|
|
17
|
+
def __init__(self, encoder, decoder, args):
|
|
18
|
+
super(NPR, self).__init__()
|
|
19
|
+
self.r_dim = encoder.r_dim
|
|
20
|
+
self.z_dim = encoder.z_dim
|
|
21
|
+
self.encoder = encoder
|
|
22
|
+
self.decoder = decoder
|
|
23
|
+
self.r_to_z_mean = nn.Linear(self.r_dim, self.z_dim)
|
|
24
|
+
self.r_to_z_logvar = nn.Linear(self.r_dim, self.z_dim)
|
|
25
|
+
self.device = args.device
|
|
26
|
+
|
|
27
|
+
def xy_to_z_params(self, x, y):
|
|
28
|
+
r = self.encoder.forward(x, y)
|
|
29
|
+
mu = self.r_to_z_mean(r)
|
|
30
|
+
logvar = self.r_to_z_logvar(r)
|
|
31
|
+
return mu, logvar
|
|
32
|
+
|
|
33
|
+
def reparameterise(self, z):
|
|
34
|
+
mu, logvar = z
|
|
35
|
+
std = torch.exp(0.5 * logvar)
|
|
36
|
+
eps = torch.randn_like(std)
|
|
37
|
+
z_sample = eps.mul(std).add_(mu)
|
|
38
|
+
return z_sample
|
|
39
|
+
|
|
40
|
+
def forward(self, x_context, y_context, x_all=None, y_all=None, n=10):
|
|
41
|
+
y_sigma = None
|
|
42
|
+
y_sigma_84 = None
|
|
43
|
+
z_context = self.xy_to_z_params(x_context, y_context)
|
|
44
|
+
if self.training:
|
|
45
|
+
z_all = self.xy_to_z_params(x_all, y_all)
|
|
46
|
+
z_sample = self.reparameterise(z_all)
|
|
47
|
+
y_hat, y_hat_84 = self.decoder.forward(z_sample)
|
|
48
|
+
else:
|
|
49
|
+
z_all = z_context
|
|
50
|
+
temp = torch.zeros(
|
|
51
|
+
[n, y_context.shape[0], y_context.shape[2]], device=self.device)
|
|
52
|
+
temp_84 = torch.zeros(
|
|
53
|
+
[n, y_context.shape[0], y_context.shape[2]], device=self.device)
|
|
54
|
+
for i in range(n):
|
|
55
|
+
z_sample = self.reparameterise(z_all)
|
|
56
|
+
temp[i, :], temp_84[i, :] = self.decoder.forward(z_sample)
|
|
57
|
+
y_hat = torch.mean(temp, dim=0).to(self.device)
|
|
58
|
+
y_hat_84 = torch.mean(temp_84, dim=0).to(self.device)
|
|
59
|
+
if n > 1:
|
|
60
|
+
y_sigma = torch.std(temp, dim=0).to(self.device)
|
|
61
|
+
y_sigma_84 = torch.std(temp_84, dim=0).to(self.device)
|
|
62
|
+
return y_hat, y_hat_84, z_all, z_context, y_sigma, y_sigma_84
|
|
63
|
+
|
|
64
|
+
###############################################################################
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def kl_div_gaussians(mu_q, logvar_q, mu_p, logvar_p):
|
|
68
|
+
var_p = torch.exp(logvar_p)
|
|
69
|
+
kl_div = (torch.exp(logvar_q) + (mu_q - mu_p) ** 2) / (var_p) \
|
|
70
|
+
- 1.0 \
|
|
71
|
+
+ logvar_p - logvar_q
|
|
72
|
+
kl_div = 0.5 * kl_div.sum()
|
|
73
|
+
return kl_div
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def np_loss(y_hat, y_hat_84, y, z_all, z_context):
|
|
77
|
+
# PBL = pinball_loss(y, y_hat, 0.05)
|
|
78
|
+
BCE = F.binary_cross_entropy(torch.squeeze(
|
|
79
|
+
y_hat), torch.mean(y, dim=1), reduction="sum")
|
|
80
|
+
idx1 = (y >= y_hat_84).squeeze()
|
|
81
|
+
idx2 = (y < y_hat_84).squeeze()
|
|
82
|
+
BCE84 = 0.84 * F.binary_cross_entropy(torch.squeeze(y_hat_84[idx1, :]), torch.mean(y[idx1, :], dim=1), reduction="sum") + \
|
|
83
|
+
0.16 * F.binary_cross_entropy(torch.squeeze(
|
|
84
|
+
y_hat_84[idx2, :]), torch.mean(y[idx2, :], dim=1), reduction="sum")
|
|
85
|
+
KLD = kl_div_gaussians(z_all[0], z_all[1], z_context[0], z_context[1])
|
|
86
|
+
return BCE + KLD + BCE84
|
|
@@ -0,0 +1,509 @@
|
|
|
1
|
+
"""
|
|
2
|
+
@author: Stijn de Boer (AuguB)
|
|
3
|
+
See: Jones et al. (2009), Sinh-Arcsinh distributions.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from functools import lru_cache
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
from pymc import floatX
|
|
10
|
+
from pymc.distributions import Continuous
|
|
11
|
+
from pytensor.tensor import as_tensor_variable
|
|
12
|
+
from pytensor.tensor.elemwise import Elemwise
|
|
13
|
+
from pytensor.tensor.random.op import RandomVariable
|
|
14
|
+
from scipy.special import kv
|
|
15
|
+
|
|
16
|
+
from pcntoolkit.model.KnuOp import knuop
|
|
17
|
+
|
|
18
|
+
##### Constants #####
|
|
19
|
+
|
|
20
|
+
CONST1 = np.exp(0.25) / np.power(8.0 * np.pi, 0.5)
|
|
21
|
+
CONST2 = -np.log(2 * np.pi) / 2
|
|
22
|
+
|
|
23
|
+
##### SHASH Transformations #####
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def S(x, epsilon, delta):
|
|
27
|
+
"""Sinh-arcsinh transformation.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
x: input value
|
|
31
|
+
epsilon: parameter for skew
|
|
32
|
+
delta: parameter for kurtosis
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Sinh-arcsinh transformed value
|
|
36
|
+
"""
|
|
37
|
+
return np.sinh(np.arcsinh(x) * delta - epsilon)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def S_inv(x, epsilon, delta):
|
|
41
|
+
"""Inverse sinh-arcsinh transformation.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
x: input value
|
|
45
|
+
epsilon: parameter for skew
|
|
46
|
+
delta: parameter for kurtosis
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Inverse sinh-arcsinh transformed value
|
|
50
|
+
"""
|
|
51
|
+
return np.sinh((np.arcsinh(x) + epsilon) / delta)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def C(x, epsilon, delta):
|
|
55
|
+
"""Cosh-arcsinh transformation.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
x: input value
|
|
59
|
+
epsilon: parameter for skew
|
|
60
|
+
delta: parameter for kurtosis
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
The cosh-arcsinh transformation of x.
|
|
64
|
+
|
|
65
|
+
Note: C(x) = sqrt(1+S(x)^2)
|
|
66
|
+
"""
|
|
67
|
+
return np.cosh(np.arcsinh(x) * delta - epsilon)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
##### SHASH Distributions #####
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class SHASHrv(RandomVariable):
|
|
74
|
+
"""SHASH RV, described by Jones et al., based on a standard normal distribution."""
|
|
75
|
+
|
|
76
|
+
name = "shash"
|
|
77
|
+
signature = "(),()->()"
|
|
78
|
+
dtype = "floatX"
|
|
79
|
+
_print_name = ("SHASH", "\\operatorname{SHASH}")
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def rng_fn(cls, rng, epsilon, delta, size=None):
|
|
83
|
+
"""Draw random samples from SHASH distribution.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
rng: Random number generator
|
|
87
|
+
epsilon: skew parameter
|
|
88
|
+
delta: kurtosis parameter
|
|
89
|
+
size: sample size. Defaults to None.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Random samples from SHASH distribution
|
|
93
|
+
"""
|
|
94
|
+
return np.sinh(
|
|
95
|
+
(np.arcsinh(rng.normal(loc=0, scale=1, size=size)) + epsilon) / delta
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
shash = SHASHrv()
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class SHASH(Continuous):
|
|
103
|
+
rv_op = shash
|
|
104
|
+
"""
|
|
105
|
+
SHASH distribution described by Jones et al., based on a standard normal distribution.
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
# Instance of the KOp
|
|
109
|
+
my_K = Elemwise(knuop)
|
|
110
|
+
|
|
111
|
+
@staticmethod
|
|
112
|
+
@lru_cache(maxsize=128)
|
|
113
|
+
def P(q):
|
|
114
|
+
"""The P function as given in Jones et al.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
q: input parameter for the P function
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Result of the P function computation
|
|
121
|
+
"""
|
|
122
|
+
K1 = SHASH.my_K((q + 1) / 2, 0.25)
|
|
123
|
+
K2 = SHASH.my_K((q - 1) / 2, 0.25)
|
|
124
|
+
a = (K1 + K2) * CONST1
|
|
125
|
+
return a
|
|
126
|
+
|
|
127
|
+
@staticmethod
|
|
128
|
+
def m1(epsilon, delta):
|
|
129
|
+
"""The first moment of the SHASH distribution parametrized by epsilon and delta.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
epsilon: skew parameter
|
|
133
|
+
delta: kurtosis parameter
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
First moment of the SHASH distribution
|
|
137
|
+
"""
|
|
138
|
+
return np.sinh(epsilon / delta) * SHASH.P(1 / delta)
|
|
139
|
+
|
|
140
|
+
@staticmethod
|
|
141
|
+
def m2(epsilon, delta):
|
|
142
|
+
"""The second moment of the SHASH distribution parametrized by epsilon and delta.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
epsilon: skew parameter
|
|
146
|
+
delta: kurtosis parameter
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Second moment of the SHASH distribution
|
|
150
|
+
"""
|
|
151
|
+
return (np.cosh(2 * epsilon / delta) * SHASH.P(2 / delta) - 1) / 2
|
|
152
|
+
|
|
153
|
+
@staticmethod
|
|
154
|
+
def m1m2(epsilon, delta):
|
|
155
|
+
"""Compute both first and second moments together to avoid redundant calculations.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
epsilon: skew parameter
|
|
159
|
+
delta: kurtosis parameter
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
Tuple containing (mean, variance) of the SHASH distribution
|
|
163
|
+
"""
|
|
164
|
+
inv_delta = 1.0 / delta
|
|
165
|
+
two_inv_delta = 2.0 * inv_delta
|
|
166
|
+
|
|
167
|
+
# Compute P values once
|
|
168
|
+
p1 = SHASH.P(inv_delta)
|
|
169
|
+
p2 = SHASH.P(two_inv_delta)
|
|
170
|
+
|
|
171
|
+
# Compute trig terms once
|
|
172
|
+
eps_delta = epsilon / delta
|
|
173
|
+
sinh_eps_delta = np.sinh(eps_delta)
|
|
174
|
+
cosh_2eps_delta = np.cosh(2 * eps_delta)
|
|
175
|
+
|
|
176
|
+
# Compute moments
|
|
177
|
+
mean = sinh_eps_delta * p1
|
|
178
|
+
raw_second = (cosh_2eps_delta * p2 - 1) / 2
|
|
179
|
+
var = raw_second - mean**2
|
|
180
|
+
return mean, var
|
|
181
|
+
|
|
182
|
+
@classmethod
|
|
183
|
+
def dist(cls, epsilon, delta, **kwargs):
|
|
184
|
+
"""Return a SHASH distribution.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
epsilon: skew parameter
|
|
188
|
+
delta: kurtosis parameter
|
|
189
|
+
**kwargs: Additional arguments passed to the distribution
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
A SHASH distribution
|
|
193
|
+
"""
|
|
194
|
+
epsilon = as_tensor_variable(floatX(epsilon))
|
|
195
|
+
delta = as_tensor_variable(floatX(delta))
|
|
196
|
+
return super().dist([epsilon, delta], **kwargs)
|
|
197
|
+
|
|
198
|
+
def logp(value, epsilon, delta):
|
|
199
|
+
"""Log-probability of the SHASH distribution.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
value: value to evaluate the log-probability at
|
|
203
|
+
epsilon: skew parameter
|
|
204
|
+
delta: kurtosis parameter
|
|
205
|
+
|
|
206
|
+
Returns:
|
|
207
|
+
Log-probability of the SHASH distribution
|
|
208
|
+
"""
|
|
209
|
+
this_S = S(value, epsilon, delta)
|
|
210
|
+
this_S_sqr = np.log(this_S)
|
|
211
|
+
this_C_sqr = 1 + this_S_sqr
|
|
212
|
+
frac2 = np.log(delta) + np.log(this_C_sqr) / 2 - np.log(1 + np.log(value)) / 2
|
|
213
|
+
exp = -this_S_sqr / 2
|
|
214
|
+
return CONST2 + frac2 + exp
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class SHASHoRV(RandomVariable):
|
|
218
|
+
"""SHASHo Random Variable.
|
|
219
|
+
|
|
220
|
+
Samples from a SHASHo distribution, which is a SHASH distribution scaled by sigma and translated by mu.
|
|
221
|
+
"""
|
|
222
|
+
|
|
223
|
+
name = "shasho"
|
|
224
|
+
signature = "(),(),(),()->()"
|
|
225
|
+
dtype = "floatX"
|
|
226
|
+
_print_name = ("SHASHo", "\\operatorname{SHASHo}")
|
|
227
|
+
|
|
228
|
+
@classmethod
|
|
229
|
+
def rng_fn(cls, rng, mu, sigma, epsilon, delta, size=None):
|
|
230
|
+
"""Draw random samples from a SHASHo distribution.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
rng: Random number generator
|
|
234
|
+
mu: location parameter
|
|
235
|
+
sigma: scale parameter
|
|
236
|
+
epsilon: skew parameter
|
|
237
|
+
delta: kurtosis parameter
|
|
238
|
+
size: sample size. Defaults to None.
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
Random samples from SHASHo distribution
|
|
242
|
+
"""
|
|
243
|
+
s = rng.normal(size=size)
|
|
244
|
+
return np.sinh((np.arcsinh(s) + epsilon) / delta) * sigma + mu
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
shasho = SHASHoRV()
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
class SHASHo(Continuous):
|
|
251
|
+
"""SHASHo distribution, which is a SHASH distribution scaled by sigma and translated by mu."""
|
|
252
|
+
|
|
253
|
+
rv_op = shasho
|
|
254
|
+
|
|
255
|
+
@classmethod
|
|
256
|
+
def dist(cls, mu, sigma, epsilon, delta, **kwargs):
|
|
257
|
+
"""Return a SHASHo distribution.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
mu: location parameter
|
|
261
|
+
sigma: scale parameter
|
|
262
|
+
epsilon: skew parameter
|
|
263
|
+
delta: kurtosis parameter
|
|
264
|
+
**kwargs: Additional arguments passed to the distribution
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
A SHASHo distribution
|
|
268
|
+
"""
|
|
269
|
+
mu = as_tensor_variable(floatX(mu))
|
|
270
|
+
sigma = as_tensor_variable(floatX(sigma))
|
|
271
|
+
epsilon = as_tensor_variable(floatX(epsilon))
|
|
272
|
+
delta = as_tensor_variable(floatX(delta))
|
|
273
|
+
return super().dist([mu, sigma, epsilon, delta], **kwargs)
|
|
274
|
+
|
|
275
|
+
def logp(value, mu, sigma, epsilon, delta):
|
|
276
|
+
"""The log-probability of the SHASHo distribution.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
value: value to evaluate the log-probability at
|
|
280
|
+
mu: location parameter
|
|
281
|
+
sigma: scale parameter
|
|
282
|
+
epsilon: skew parameter
|
|
283
|
+
delta: kurtosis parameter
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
Log-probability of the SHASHo distribution
|
|
287
|
+
"""
|
|
288
|
+
remapped_value = (value - mu) / sigma
|
|
289
|
+
this_S = S(remapped_value, epsilon, delta)
|
|
290
|
+
this_S_sqr = np.log(this_S)
|
|
291
|
+
this_C_sqr = 1 + this_S_sqr
|
|
292
|
+
frac2 = (
|
|
293
|
+
np.log(delta)
|
|
294
|
+
+ np.log(this_C_sqr) / 2
|
|
295
|
+
- np.log(1 + np.log(remapped_value)) / 2
|
|
296
|
+
)
|
|
297
|
+
exp = -this_S_sqr / 2
|
|
298
|
+
return CONST2 + frac2 + exp - np.log(sigma)
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
class SHASHo2RV(RandomVariable):
|
|
302
|
+
"""SHASHo2 Random Variable.
|
|
303
|
+
|
|
304
|
+
Samples from a SHASHo2 distribution, which is a SHASH distribution scaled by sigma/delta
|
|
305
|
+
and translated by mu. This variant provides an alternative parameterization where the
|
|
306
|
+
scale parameter is adjusted by the kurtosis parameter.
|
|
307
|
+
"""
|
|
308
|
+
|
|
309
|
+
name = "shasho2"
|
|
310
|
+
signature = "(),(),(),()->()"
|
|
311
|
+
dtype = "floatX"
|
|
312
|
+
_print_name = ("SHASHo2", "\\operatorname{SHASHo2}")
|
|
313
|
+
|
|
314
|
+
@classmethod
|
|
315
|
+
def rng_fn(cls, rng, mu, sigma, epsilon, delta, size=None):
|
|
316
|
+
"""Draw random samples from SHASHo2 distribution.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
rng: Random number generator
|
|
320
|
+
mu: location parameter
|
|
321
|
+
sigma: scale parameter
|
|
322
|
+
epsilon: skew parameter
|
|
323
|
+
delta: kurtosis parameter
|
|
324
|
+
size: sample size. Defaults to None.
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
Random samples from SHASHo2 distribution
|
|
328
|
+
"""
|
|
329
|
+
s = rng.normal(size=size)
|
|
330
|
+
sigma_d = sigma / delta
|
|
331
|
+
return np.sinh((np.arcsinh(s) + epsilon) / delta) * sigma_d + mu
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
shasho2 = SHASHo2RV()
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
class SHASHo2(Continuous):
|
|
338
|
+
"""SHASHo2 distribution, which is a SHASH distribution scaled by sigma/delta and translated by mu.
|
|
339
|
+
|
|
340
|
+
This distribution provides an alternative parameterization of the SHASH distribution where
|
|
341
|
+
the scale parameter is adjusted by the kurtosis parameter. This can be useful in scenarios
|
|
342
|
+
where the relationship between scale and kurtosis needs to be explicitly modeled.
|
|
343
|
+
"""
|
|
344
|
+
|
|
345
|
+
rv_op = shasho2
|
|
346
|
+
|
|
347
|
+
@classmethod
|
|
348
|
+
def dist(cls, mu, sigma, epsilon, delta, **kwargs):
|
|
349
|
+
"""Return a SHASHo2 distribution.
|
|
350
|
+
|
|
351
|
+
Args:
|
|
352
|
+
mu: location parameter
|
|
353
|
+
sigma: scale parameter
|
|
354
|
+
epsilon: skew parameter
|
|
355
|
+
delta: kurtosis parameter
|
|
356
|
+
**kwargs: Additional arguments passed to the distribution
|
|
357
|
+
|
|
358
|
+
Returns:
|
|
359
|
+
A SHASHo2 distribution
|
|
360
|
+
"""
|
|
361
|
+
mu = as_tensor_variable(floatX(mu))
|
|
362
|
+
sigma = as_tensor_variable(floatX(sigma))
|
|
363
|
+
epsilon = as_tensor_variable(floatX(epsilon))
|
|
364
|
+
delta = as_tensor_variable(floatX(delta))
|
|
365
|
+
return super().dist([mu, sigma, epsilon, delta], **kwargs)
|
|
366
|
+
|
|
367
|
+
def logp(value, mu, sigma, epsilon, delta):
|
|
368
|
+
"""The log-probability of the SHASHo2 distribution.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
value: value to evaluate the log-probability at
|
|
372
|
+
mu: location parameter
|
|
373
|
+
sigma: scale parameter
|
|
374
|
+
epsilon: skew parameter
|
|
375
|
+
delta: kurtosis parameter
|
|
376
|
+
|
|
377
|
+
Returns:
|
|
378
|
+
Log-probability of the SHASHo2 distribution
|
|
379
|
+
"""
|
|
380
|
+
sigma_d = sigma / delta
|
|
381
|
+
remapped_value = (value - mu) / sigma_d
|
|
382
|
+
this_S = S(remapped_value, epsilon, delta)
|
|
383
|
+
this_S_sqr = np.log(this_S)
|
|
384
|
+
this_C_sqr = 1 + this_S_sqr
|
|
385
|
+
frac2 = (
|
|
386
|
+
np.log(delta)
|
|
387
|
+
+ np.log(this_C_sqr) / 2
|
|
388
|
+
- np.log(1 + np.log(remapped_value)) / 2
|
|
389
|
+
)
|
|
390
|
+
exp = -this_S_sqr / 2
|
|
391
|
+
return CONST2 + frac2 + exp - np.log(sigma_d)
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
class SHASHbRV(RandomVariable):
|
|
395
|
+
"""SHASHb Random Variable.
|
|
396
|
+
|
|
397
|
+
Samples from a SHASHb distribution, which is a standardized SHASH distribution scaled by sigma
|
|
398
|
+
and translated by mu. This variant provides a standardized version of the SHASH distribution
|
|
399
|
+
where the base distribution is normalized to have zero mean and unit variance before applying
|
|
400
|
+
the location and scale transformations.
|
|
401
|
+
"""
|
|
402
|
+
|
|
403
|
+
name = "shashb"
|
|
404
|
+
signature = "(),(),(),()->()"
|
|
405
|
+
dtype = "floatX"
|
|
406
|
+
_print_name = ("SHASHb", "\\operatorname{SHASHb}")
|
|
407
|
+
|
|
408
|
+
@classmethod
|
|
409
|
+
def rng_fn(cls, rng, mu, sigma, epsilon, delta, size=None):
|
|
410
|
+
"""Draw random samples from SHASHb distribution.
|
|
411
|
+
|
|
412
|
+
Args:
|
|
413
|
+
rng: Random number generator
|
|
414
|
+
mu: location parameter
|
|
415
|
+
sigma: scale parameter
|
|
416
|
+
epsilon: skew parameter
|
|
417
|
+
delta: kurtosis parameter
|
|
418
|
+
size: sample size. Defaults to None.
|
|
419
|
+
|
|
420
|
+
Returns:
|
|
421
|
+
Random samples from SHASHb distribution
|
|
422
|
+
"""
|
|
423
|
+
s = rng.normal(size=size)
|
|
424
|
+
|
|
425
|
+
def P(q):
|
|
426
|
+
K1 = kv((q + 1) / 2, 0.25)
|
|
427
|
+
K2 = kv((q - 1) / 2, 0.25)
|
|
428
|
+
a = (K1 + K2) * CONST1
|
|
429
|
+
return a
|
|
430
|
+
|
|
431
|
+
def m1m2(epsilon, delta):
|
|
432
|
+
inv_delta = 1.0 / delta
|
|
433
|
+
two_inv_delta = 2.0 * inv_delta
|
|
434
|
+
p1 = P(inv_delta)
|
|
435
|
+
p2 = P(two_inv_delta)
|
|
436
|
+
eps_delta = epsilon / delta
|
|
437
|
+
sinh_eps_delta = np.sinh(eps_delta)
|
|
438
|
+
cosh_2eps_delta = np.cosh(2 * eps_delta)
|
|
439
|
+
mean = sinh_eps_delta * p1
|
|
440
|
+
raw_second = (cosh_2eps_delta * p2 - 1) / 2
|
|
441
|
+
var = raw_second - mean**2
|
|
442
|
+
return mean, var
|
|
443
|
+
|
|
444
|
+
mean, var = m1m2(epsilon, delta)
|
|
445
|
+
out = (
|
|
446
|
+
(np.sinh((np.arcsinh(s) + epsilon) / delta) - mean) / np.sqrt(var)
|
|
447
|
+
) * sigma + mu
|
|
448
|
+
return out
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
shashb = SHASHbRV()
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
class SHASHb(Continuous):
|
|
455
|
+
"""SHASHb distribution, which is a standardized SHASH distribution scaled by sigma and translated by mu.
|
|
456
|
+
|
|
457
|
+
This distribution provides a standardized version of the SHASH distribution where the base
|
|
458
|
+
distribution is normalized to have zero mean and unit variance before applying the location
|
|
459
|
+
and scale transformations. This transformation aims to remove the correlation between the
|
|
460
|
+
parameters, which can be useful in MCMC sampling.
|
|
461
|
+
"""
|
|
462
|
+
|
|
463
|
+
rv_op = shashb
|
|
464
|
+
|
|
465
|
+
@classmethod
|
|
466
|
+
def dist(cls, mu, sigma, epsilon, delta, **kwargs):
|
|
467
|
+
"""Return a SHASHb distribution.
|
|
468
|
+
|
|
469
|
+
Args:
|
|
470
|
+
mu: location parameter
|
|
471
|
+
sigma: scale parameter
|
|
472
|
+
epsilon: skew parameter
|
|
473
|
+
delta: kurtosis parameter
|
|
474
|
+
**kwargs: Additional arguments passed to the distribution
|
|
475
|
+
|
|
476
|
+
Returns:
|
|
477
|
+
A SHASHb distribution
|
|
478
|
+
"""
|
|
479
|
+
mu = as_tensor_variable(floatX(mu))
|
|
480
|
+
sigma = as_tensor_variable(floatX(sigma))
|
|
481
|
+
epsilon = as_tensor_variable(floatX(epsilon))
|
|
482
|
+
delta = as_tensor_variable(floatX(delta))
|
|
483
|
+
return super().dist([mu, sigma, epsilon, delta], **kwargs)
|
|
484
|
+
|
|
485
|
+
def logp(value, mu, sigma, epsilon, delta):
|
|
486
|
+
"""The log-probability of the SHASHb distribution.
|
|
487
|
+
|
|
488
|
+
Args:
|
|
489
|
+
value: value to evaluate the log-probability at
|
|
490
|
+
mu: location parameter
|
|
491
|
+
sigma: scale parameter
|
|
492
|
+
epsilon: skew parameter
|
|
493
|
+
delta: kurtosis parameter
|
|
494
|
+
|
|
495
|
+
Returns:
|
|
496
|
+
Log-probability of the SHASHb distribution
|
|
497
|
+
"""
|
|
498
|
+
mean, var = SHASH.m1m2(epsilon, delta)
|
|
499
|
+
remapped_value = ((value - mu) / sigma) * np.sqrt(var) + mean
|
|
500
|
+
this_S = S(remapped_value, epsilon, delta)
|
|
501
|
+
this_S_sqr = np.square(this_S)
|
|
502
|
+
this_C_sqr = 1 + this_S_sqr
|
|
503
|
+
frac2 = (
|
|
504
|
+
np.log(delta)
|
|
505
|
+
+ np.log(this_C_sqr) / 2
|
|
506
|
+
- np.log(1 + np.square(remapped_value)) / 2
|
|
507
|
+
)
|
|
508
|
+
exp = -this_S_sqr / 2
|
|
509
|
+
return CONST2 + frac2 + exp + np.log(var) / 2 - np.log(sigma)
|