homa 0.0.1__py3-none-any.whl → 0.0.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- homa/activations/__init__.py +2 -0
- homa/activations/classes/APLU.py +48 -0
- homa/activations/classes/GALU.py +51 -0
- homa/activations/classes/MELU.py +50 -0
- homa/activations/classes/PDELU.py +39 -0
- homa/activations/classes/SReLU.py +49 -0
- homa/activations/classes/SmallGALU.py +39 -0
- homa/activations/classes/StochasticActivation.py +20 -0
- homa/activations/classes/WideMELU.py +61 -0
- homa/activations/classes/__init__.py +8 -0
- homa/activations/utils.py +27 -0
- homa/cli/HomaCommand.py +12 -0
- homa/cli/namespaces/CacheNamespace.py +29 -0
- homa/cli/namespaces/MakeNamespace.py +18 -0
- homa/cli/namespaces/__init__.py +2 -0
- homa/ensemble/Ensemble.py +18 -0
- homa/ensemble/__init__.py +1 -0
- homa/ensemble/concerns/CalculatesMetricNecessities.py +20 -0
- homa/ensemble/concerns/HasNetwork.py +5 -0
- homa/ensemble/concerns/HasStateDicts.py +8 -0
- homa/ensemble/concerns/PredictsProbabilities.py +11 -0
- homa/ensemble/concerns/RecordsStateDictionaries.py +23 -0
- homa/ensemble/concerns/ReportsClassificationMetrics.py +13 -0
- homa/ensemble/concerns/ReportsEnsembleAccuracy.py +10 -0
- homa/ensemble/concerns/ReportsEnsembleF1.py +10 -0
- homa/ensemble/concerns/ReportsEnsembleKappa.py +10 -0
- homa/ensemble/concerns/ReportsLogits.py +13 -0
- homa/ensemble/concerns/ReportsSize.py +11 -0
- homa/ensemble/concerns/__init__.py +10 -0
- homa/torch/Module.py +8 -0
- homa/torch/__init__.py +2 -0
- homa/torch/helpers.py +6 -0
- homa/utils.py +2 -0
- homa/vision/Model.py +2 -0
- homa/vision/Resnet.py +12 -0
- homa/vision/StochasticResnet.py +8 -0
- homa/vision/__init__.py +3 -0
- homa/vision/concerns/Trainable.py +26 -0
- homa/vision/concerns/__init__.py +1 -0
- homa/vision/modules/ResnetModule.py +23 -0
- homa/vision/modules/StochasticResnetModule.py +9 -0
- homa/vision/modules/__init__.py +2 -0
- homa/vision/utils.py +21 -0
- homa-0.0.19.dist-info/METADATA +21 -0
- homa-0.0.19.dist-info/RECORD +51 -0
- homa-0.0.19.dist-info/entry_points.txt +2 -0
- homa-0.0.1.dist-info/METADATA +0 -12
- homa-0.0.1.dist-info/RECORD +0 -7
- {homa-0.0.1.dist-info → homa-0.0.19.dist-info}/WHEEL +0 -0
- {homa-0.0.1.dist-info → homa-0.0.19.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class APLU(torch.nn.Module):
|
|
5
|
+
def __init__(self, max_input: float = 1.0):
|
|
6
|
+
super(APLU, self).__init__()
|
|
7
|
+
self.max_input = max_input
|
|
8
|
+
self.alpha = None
|
|
9
|
+
self.beta = None
|
|
10
|
+
self.gamma = None
|
|
11
|
+
self.xi = None
|
|
12
|
+
self.psi = None
|
|
13
|
+
self.mu = None
|
|
14
|
+
self._num_channels = None
|
|
15
|
+
|
|
16
|
+
def _initialize_parameters(self, x):
|
|
17
|
+
if x.ndim < 2:
|
|
18
|
+
raise ValueError(
|
|
19
|
+
f"Input tensor must have at least 2 dimensions (N, C), but got shape {x.shape}"
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
num_channels = x.shape[1]
|
|
23
|
+
self._num_channels = num_channels
|
|
24
|
+
|
|
25
|
+
param_shape = [1] * x.ndim
|
|
26
|
+
param_shape[1] = num_channels
|
|
27
|
+
|
|
28
|
+
self.alpha = torch.nn.Parameter(torch.zeros(param_shape))
|
|
29
|
+
self.beta = torch.nn.Parameter(torch.zeros(param_shape))
|
|
30
|
+
self.gamma = torch.nn.Parameter(torch.zeros(param_shape))
|
|
31
|
+
|
|
32
|
+
self.xi = torch.nn.Parameter(self.max_input * torch.rand(param_shape))
|
|
33
|
+
self.psi = torch.nn.Parameter(self.max_input * torch.rand(param_shape))
|
|
34
|
+
self.mu = torch.nn.Parameter(self.max_input * torch.rand(param_shape))
|
|
35
|
+
|
|
36
|
+
def forward(self, x):
|
|
37
|
+
if self.alpha is None:
|
|
38
|
+
self._initialize_parameters(x)
|
|
39
|
+
|
|
40
|
+
a = torch.relu(x)
|
|
41
|
+
|
|
42
|
+
# following are called hinges
|
|
43
|
+
b = self.alpha * torch.relu(-x + self.xi)
|
|
44
|
+
c = self.beta * torch.relu(-x + self.psi)
|
|
45
|
+
d = self.gamma * torch.relu(-x + self.mu)
|
|
46
|
+
z = a + b + c + d
|
|
47
|
+
|
|
48
|
+
return z
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class GALU(torch.nn.Module):
|
|
5
|
+
def __init__(self, max_input: float = 1.0):
|
|
6
|
+
super(GALU, self).__init__()
|
|
7
|
+
if max_input <= 0:
|
|
8
|
+
raise ValueError("max_input must be positive.")
|
|
9
|
+
self.max_input = max_input
|
|
10
|
+
self.alpha = None
|
|
11
|
+
self.beta = None
|
|
12
|
+
self.gamma = None
|
|
13
|
+
self.delta = None
|
|
14
|
+
self._num_channels = None
|
|
15
|
+
|
|
16
|
+
def _initialize_parameters(self, x):
|
|
17
|
+
if x.ndim < 2:
|
|
18
|
+
raise ValueError(
|
|
19
|
+
f"Input tensor must have at least 2 dimensions (N, C), but got shape {x.shape}"
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
num_channels = x.shape[1]
|
|
23
|
+
self._num_channels = num_channels
|
|
24
|
+
param_shape = [1] * x.ndim
|
|
25
|
+
param_shape[1] = num_channels
|
|
26
|
+
self.alpha = torch.nn.Parameter(torch.zeros(param_shape))
|
|
27
|
+
self.beta = torch.nn.Parameter(torch.zeros(param_shape))
|
|
28
|
+
self.gamma = torch.nn.Parameter(torch.zeros(param_shape))
|
|
29
|
+
self.delta = torch.nn.Parameter(torch.zeros(param_shape))
|
|
30
|
+
|
|
31
|
+
def forward(self, x):
|
|
32
|
+
if self.alpha is None:
|
|
33
|
+
self._initialize_parameters(x)
|
|
34
|
+
|
|
35
|
+
zero = torch.tensor(0.0, device=x.device, dtype=x.dtype)
|
|
36
|
+
x_norm = x / self.max_input
|
|
37
|
+
part_prelu = torch.relu(x_norm) + self.alpha * torch.min(x_norm, zero)
|
|
38
|
+
part_beta = self.beta * (
|
|
39
|
+
torch.relu(1.0 - torch.abs(x_norm - 1.0))
|
|
40
|
+
+ torch.min(torch.abs(x_norm - 3.0) - 1.0, zero)
|
|
41
|
+
)
|
|
42
|
+
part_gamma = self.gamma * (
|
|
43
|
+
torch.relu(0.5 - torch.abs(x_norm - 0.5))
|
|
44
|
+
+ torch.min(torch.abs(x_norm - 1.5) - 0.5, zero)
|
|
45
|
+
)
|
|
46
|
+
part_delta = self.delta * (
|
|
47
|
+
torch.relu(0.5 - torch.abs(x_norm - 2.5))
|
|
48
|
+
+ torch.min(torch.abs(x_norm - 3.5) - 0.5, zero)
|
|
49
|
+
)
|
|
50
|
+
z = part_prelu + part_beta + part_gamma + part_delta
|
|
51
|
+
return z * self.max_input
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class MELU(torch.nn.Module):
|
|
5
|
+
def __init__(self, maxInput: float = 1.0):
|
|
6
|
+
super().__init__()
|
|
7
|
+
self.maxInput = float(maxInput)
|
|
8
|
+
self.alpha = None
|
|
9
|
+
self.beta = None
|
|
10
|
+
self.gamma = None
|
|
11
|
+
self.delta = None
|
|
12
|
+
self.xi = None
|
|
13
|
+
self.psi = None
|
|
14
|
+
self._initialized = False
|
|
15
|
+
|
|
16
|
+
def _initialize_parameters(self, X: torch.Tensor):
|
|
17
|
+
if X.dim() != 4:
|
|
18
|
+
raise ValueError(
|
|
19
|
+
f"Expected 4D input (B, C, H, W), but got {X.dim()}D input."
|
|
20
|
+
)
|
|
21
|
+
num_channels = X.shape[1]
|
|
22
|
+
shape = (1, num_channels, 1, 1)
|
|
23
|
+
self.alpha = torch.nn.Parameter(torch.zeros(shape))
|
|
24
|
+
self.beta = torch.nn.Parameter(torch.zeros(shape))
|
|
25
|
+
self.gamma = torch.nn.Parameter(torch.zeros(shape))
|
|
26
|
+
self.delta = torch.nn.Parameter(torch.zeros(shape))
|
|
27
|
+
self.xi = torch.nn.Parameter(torch.zeros(shape))
|
|
28
|
+
self.psi = torch.nn.Parameter(torch.zeros(shape))
|
|
29
|
+
self._initialized = True
|
|
30
|
+
|
|
31
|
+
def forward(self, X: torch.Tensor) -> torch.Tensor:
|
|
32
|
+
if not self._initialized:
|
|
33
|
+
self._initialize_parameters(X)
|
|
34
|
+
X_norm = X / self.maxInput
|
|
35
|
+
Y = torch.roll(X_norm, shifts=-1, dims=1)
|
|
36
|
+
term1 = torch.relu(X_norm)
|
|
37
|
+
term2 = self.alpha * torch.clamp(X_norm, max=0)
|
|
38
|
+
dist_sq_beta = (X_norm - 2) ** 2 + (Y - 2) ** 2
|
|
39
|
+
dist_sq_gamma = (X_norm - 1) ** 2 + (Y - 1) ** 2
|
|
40
|
+
dist_sq_delta = (X_norm - 1) ** 2 + (Y - 3) ** 2
|
|
41
|
+
dist_sq_xi = (X_norm - 3) ** 2 + (Y - 1) ** 2
|
|
42
|
+
dist_sq_psi = (X_norm - 3) ** 2 + (Y - 3) ** 2
|
|
43
|
+
term3 = self.beta * torch.sqrt(torch.relu(2 - dist_sq_beta))
|
|
44
|
+
term4 = self.gamma * torch.sqrt(torch.relu(1 - dist_sq_gamma))
|
|
45
|
+
term5 = self.delta * torch.sqrt(torch.relu(1 - dist_sq_delta))
|
|
46
|
+
term6 = self.xi * torch.sqrt(torch.relu(1 - dist_sq_xi))
|
|
47
|
+
term7 = self.psi * torch.sqrt(torch.relu(1 - dist_sq_psi))
|
|
48
|
+
Z_norm = term1 + term2 + term3 + term4 + term5 + term6 + term7
|
|
49
|
+
Z = Z_norm * self.maxInput
|
|
50
|
+
return Z
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class PDELU(torch.nn.Module):
|
|
5
|
+
def __init__(self, theta: float = 0.5):
|
|
6
|
+
super(PDELU, self).__init__()
|
|
7
|
+
if theta == 1.0:
|
|
8
|
+
raise ValueError(
|
|
9
|
+
"theta cannot be 1.0, as it would cause a division by zero."
|
|
10
|
+
)
|
|
11
|
+
self.theta = theta
|
|
12
|
+
self._power_val = 1.0 / (1.0 - self.theta)
|
|
13
|
+
self.alpha = torch.nn.UninitializedParameter()
|
|
14
|
+
self._num_channels = None
|
|
15
|
+
|
|
16
|
+
def _initialize_parameters(self, x: torch.Tensor):
|
|
17
|
+
if x.ndim < 2:
|
|
18
|
+
raise ValueError(
|
|
19
|
+
f"Input tensor must have at least 2 dimensions (N, C), but got shape {x.shape}"
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
num_channels = x.shape[1]
|
|
23
|
+
self._num_channels = num_channels
|
|
24
|
+
param_shape = [1] * x.ndim
|
|
25
|
+
param_shape[1] = num_channels
|
|
26
|
+
init_tensor = torch.zeros(param_shape) + 0.1
|
|
27
|
+
self.alpha = torch.nn.Parameter(init_tensor)
|
|
28
|
+
|
|
29
|
+
def forward(self, x: torch.Tensor):
|
|
30
|
+
if self.alpha is None:
|
|
31
|
+
self._initialize_parameters(x)
|
|
32
|
+
|
|
33
|
+
zero = torch.tensor(0.0, device=x.device, dtype=x.dtype)
|
|
34
|
+
positive_part = torch.relu(x)
|
|
35
|
+
inner_term = torch.relu(1.0 + (1.0 - self.theta) * x)
|
|
36
|
+
powered_term = torch.pow(inner_term, self._power_val)
|
|
37
|
+
subtracted_term = powered_term - 1.0
|
|
38
|
+
negative_part = self.alpha * torch.min(subtracted_term, zero)
|
|
39
|
+
return positive_part + negative_part
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class SReLU(torch.nn.Module):
|
|
5
|
+
def __init__(
|
|
6
|
+
self,
|
|
7
|
+
alpha_init: float = 0.0,
|
|
8
|
+
beta_init: float = 0.0,
|
|
9
|
+
gamma_init: float = 1.0,
|
|
10
|
+
delta_init: float = 1.0,
|
|
11
|
+
):
|
|
12
|
+
super().__init__()
|
|
13
|
+
self.alpha_init_val = alpha_init
|
|
14
|
+
self.beta_init_val = beta_init
|
|
15
|
+
self.gamma_init_val = gamma_init
|
|
16
|
+
self.delta_init_val = delta_init
|
|
17
|
+
self.alpha = torch.nn.UninitializedParameter()
|
|
18
|
+
self.beta = torch.nn.UninitializedParameter()
|
|
19
|
+
self.gamma = torch.nn.UninitializedParameter()
|
|
20
|
+
self.delta = torch.nn.UninitializedParameter()
|
|
21
|
+
|
|
22
|
+
def _initialize_parameters(self, x: torch.Tensor):
|
|
23
|
+
if isinstance(self.alpha, torch.nn.UninitializedParameter):
|
|
24
|
+
if x.dim() < 2:
|
|
25
|
+
raise ValueError(
|
|
26
|
+
f"Input tensor must have at least 2 dimensions (N, C), but got {x.dim()}"
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
num_channels = x.shape[1]
|
|
30
|
+
param_shape = [1] * x.dim()
|
|
31
|
+
param_shape[1] = num_channels
|
|
32
|
+
self.alpha = torch.nn.Parameter(
|
|
33
|
+
torch.full(param_shape, self.alpha_init_val)
|
|
34
|
+
)
|
|
35
|
+
self.beta = torch.nn.Parameter(torch.full(param_shape, self.beta_init_val))
|
|
36
|
+
self.gamma = torch.nn.Parameter(
|
|
37
|
+
torch.full(param_shape, self.gamma_init_val)
|
|
38
|
+
)
|
|
39
|
+
self.delta = torch.nn.Parameter(
|
|
40
|
+
torch.full(param_shape, self.delta_init_val)
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
44
|
+
self._initialize_parameters(x)
|
|
45
|
+
start = self.beta + self.alpha * (x - self.beta)
|
|
46
|
+
finish = self.delta + self.gamma * (x - self.delta)
|
|
47
|
+
out = torch.where(x < self.beta, start, x)
|
|
48
|
+
out = torch.where(x > self.delta, finish, out)
|
|
49
|
+
return out
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class SmallGALU(torch.nn.Module):
|
|
5
|
+
def __init__(self, max_input: float = 1.0):
|
|
6
|
+
super(SmallGALU, self).__init__()
|
|
7
|
+
if max_input <= 0:
|
|
8
|
+
raise ValueError("max_input must be positive.")
|
|
9
|
+
self.max_input = max_input
|
|
10
|
+
self.alpha = None
|
|
11
|
+
self.beta = None
|
|
12
|
+
self._num_channels = None
|
|
13
|
+
|
|
14
|
+
def _initialize_parameters(self, x):
|
|
15
|
+
if x.ndim < 2:
|
|
16
|
+
raise ValueError(
|
|
17
|
+
f"Input tensor must have at least 2 dimensions (N, C), but got shape {x.shape}"
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
num_channels = x.shape[1]
|
|
21
|
+
self._num_channels = num_channels
|
|
22
|
+
param_shape = [1] * x.ndim
|
|
23
|
+
param_shape[1] = num_channels
|
|
24
|
+
self.alpha = torch.nn.Parameter(torch.zeros(param_shape))
|
|
25
|
+
self.beta = torch.nn.Parameter(torch.zeros(param_shape))
|
|
26
|
+
|
|
27
|
+
def forward(self, x):
|
|
28
|
+
if self.alpha is None:
|
|
29
|
+
self._initialize_parameters(x)
|
|
30
|
+
|
|
31
|
+
zero = torch.tensor(0.0, device=x.device, dtype=x.dtype)
|
|
32
|
+
x_norm = x / self.max_input
|
|
33
|
+
part_prelu = torch.relu(x_norm) + self.alpha * torch.min(x_norm, zero)
|
|
34
|
+
part_beta = self.beta * (
|
|
35
|
+
torch.relu(1.0 - torch.abs(x_norm - 1.0))
|
|
36
|
+
+ torch.min(torch.abs(x_norm - 3.0) - 1.0, zero)
|
|
37
|
+
)
|
|
38
|
+
z = part_prelu + part_beta
|
|
39
|
+
return z * self.max_input
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import random
|
|
3
|
+
|
|
4
|
+
from .APLU import APLU
|
|
5
|
+
from .GALU import GALU
|
|
6
|
+
from .SmallGALU import SmallGALU
|
|
7
|
+
from .MELU import MELU
|
|
8
|
+
from .WideMELU import WideMELU
|
|
9
|
+
from .PDELU import PDELU
|
|
10
|
+
from .SReLU import SReLU
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class StochasticActivation(torch.nn.Module):
|
|
14
|
+
def __init__(self):
|
|
15
|
+
super().__init__()
|
|
16
|
+
self.gate = random.choice([APLU, GALU, SmallGALU, MELU, WideMELU, PDELU, SReLU])
|
|
17
|
+
self.gate = self.gate()
|
|
18
|
+
|
|
19
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
20
|
+
return self.gate(x)
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class WideMELU(torch.nn.Module):
|
|
5
|
+
def __init__(self, maxInput: float = 1.0):
|
|
6
|
+
super().__init__()
|
|
7
|
+
self.maxInput = float(maxInput)
|
|
8
|
+
self.alpha = None
|
|
9
|
+
self.beta = None
|
|
10
|
+
self.gamma = None
|
|
11
|
+
self.delta = None
|
|
12
|
+
self.xi = None
|
|
13
|
+
self.psi = None
|
|
14
|
+
self.theta = None
|
|
15
|
+
self.lam = None
|
|
16
|
+
self._initialized = False
|
|
17
|
+
|
|
18
|
+
def _initialize_parameters(self, X: torch.Tensor):
|
|
19
|
+
if X.dim() != 4:
|
|
20
|
+
raise ValueError(
|
|
21
|
+
f"Expected 4D input (B, C, H, W), but got {X.dim()}D input."
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
num_channels = X.shape[1]
|
|
25
|
+
shape = (1, num_channels, 1, 1)
|
|
26
|
+
|
|
27
|
+
self.alpha = torch.nn.Parameter(torch.zeros(shape))
|
|
28
|
+
self.beta = torch.nn.Parameter(torch.zeros(shape))
|
|
29
|
+
self.gamma = torch.nn.Parameter(torch.zeros(shape))
|
|
30
|
+
self.delta = torch.nn.Parameter(torch.zeros(shape))
|
|
31
|
+
self.xi = torch.nn.Parameter(torch.zeros(shape))
|
|
32
|
+
self.psi = torch.nn.Parameter(torch.zeros(shape))
|
|
33
|
+
self.theta = torch.nn.Parameter(torch.zeros(shape))
|
|
34
|
+
self.lam = torch.nn.Parameter(torch.zeros(shape))
|
|
35
|
+
self._initialized = True
|
|
36
|
+
|
|
37
|
+
def forward(self, X: torch.Tensor) -> torch.Tensor:
|
|
38
|
+
if not self._initialized:
|
|
39
|
+
self._initialize_parameters(X)
|
|
40
|
+
X_norm = X / self.maxInput
|
|
41
|
+
Y = torch.roll(X_norm, shifts=-1, dims=1)
|
|
42
|
+
term1 = torch.relu(X_norm)
|
|
43
|
+
term2 = self.alpha * torch.clamp(X_norm, max=0)
|
|
44
|
+
dist_sq_beta = (X_norm - 2) ** 2 + (Y - 2) ** 2
|
|
45
|
+
dist_sq_gamma = (X_norm - 1) ** 2 + (Y - 1) ** 2
|
|
46
|
+
dist_sq_delta = (X_norm - 1) ** 2 + (Y - 3) ** 2
|
|
47
|
+
dist_sq_xi = (X_norm - 3) ** 2 + (Y - 1) ** 2
|
|
48
|
+
dist_sq_psi = (X_norm - 3) ** 2 + (Y - 3) ** 2
|
|
49
|
+
dist_sq_theta = (X_norm - 1) ** 2 + (Y - 2) ** 2
|
|
50
|
+
dist_sq_lambda = (X_norm - 3) ** 2 + (Y - 2) ** 2
|
|
51
|
+
|
|
52
|
+
term3 = self.beta * torch.sqrt(torch.relu(2 - dist_sq_beta))
|
|
53
|
+
term4 = self.gamma * torch.sqrt(torch.relu(1 - dist_sq_gamma))
|
|
54
|
+
term5 = self.delta * torch.sqrt(torch.relu(1 - dist_sq_delta))
|
|
55
|
+
term6 = self.xi * torch.sqrt(torch.relu(1 - dist_sq_xi))
|
|
56
|
+
term7 = self.psi * torch.sqrt(torch.relu(1 - dist_sq_psi))
|
|
57
|
+
term8 = self.theta * torch.sqrt(torch.relu(1 - dist_sq_theta))
|
|
58
|
+
term9 = self.lam * torch.sqrt(torch.relu(1 - dist_sq_lambda))
|
|
59
|
+
Z_norm = term1 + term2 + term3 + term4 + term5 + term6 + term7 + term8 + term9
|
|
60
|
+
Z = Z_norm * self.maxInput
|
|
61
|
+
return Z
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def negative_part(x):
|
|
5
|
+
return torch.minimum(x, torch.zeros_like(x))
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def positive_part(x):
|
|
9
|
+
return torch.maximum(x, torch.zeros_like(x))
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def as_channel_parameters(parameter: torch.Tensor, x: torch.Tensor):
|
|
13
|
+
shape = [1] * x.dim()
|
|
14
|
+
shape[1] = -1
|
|
15
|
+
return parameter.view(*shape)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def device_compatibility_check(model, x: torch.Tensor):
|
|
19
|
+
for p in model.parameters():
|
|
20
|
+
if p.device != x.device or p.dtype != x.dtype:
|
|
21
|
+
p.data = p.data.to(device=x.device, dtype=x.dtype)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def phi_hat(x, a, lam):
|
|
25
|
+
term_pos = torch.maximum(lam - torch.abs(x - a), torch.zeros_like(x))
|
|
26
|
+
term_neg = torch.minimum(torch.abs(x - (a + 2 * lam)) - lam, torch.zeros_like(x))
|
|
27
|
+
return term_pos + term_neg
|
homa/cli/HomaCommand.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
import sys
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CacheNamespace:
|
|
7
|
+
def clear(self):
|
|
8
|
+
root = Path.cwd()
|
|
9
|
+
removed = 0
|
|
10
|
+
errors: list[str] = []
|
|
11
|
+
|
|
12
|
+
for candidate in root.rglob("__pycache__"):
|
|
13
|
+
if not candidate.is_dir():
|
|
14
|
+
continue
|
|
15
|
+
try:
|
|
16
|
+
shutil.rmtree(candidate)
|
|
17
|
+
removed += 1
|
|
18
|
+
except OSError as exc:
|
|
19
|
+
errors.append(f"{candidate}: {exc}")
|
|
20
|
+
|
|
21
|
+
if errors:
|
|
22
|
+
print("Failed to remove the following paths:", file=sys.stderr)
|
|
23
|
+
for error in errors:
|
|
24
|
+
print(f" - {error}", file=sys.stderr)
|
|
25
|
+
return 1
|
|
26
|
+
|
|
27
|
+
return (
|
|
28
|
+
f"Removed {removed} __pycache__ director{'ies' if removed != 1 else 'y'}."
|
|
29
|
+
)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class MakeNamespace:
|
|
5
|
+
def trait(self, name: str):
|
|
6
|
+
class_name = name.split(".")[-1]
|
|
7
|
+
file = name.replace(".", "/") + ".py"
|
|
8
|
+
path = Path(file)
|
|
9
|
+
parent = path.parent
|
|
10
|
+
parent.mkdir(parents=True, exist_ok=True)
|
|
11
|
+
path.touch()
|
|
12
|
+
|
|
13
|
+
# copy the tempalte path
|
|
14
|
+
current_path = Path(__file__).parent.parent.resolve()
|
|
15
|
+
template_path = current_path / "templates" / "trait.txt"
|
|
16
|
+
content = template_path.read_text()
|
|
17
|
+
content = content.replace("{{CLASS}}", class_name)
|
|
18
|
+
path.write_text(content)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from .concerns import (
|
|
2
|
+
ReportsSize,
|
|
3
|
+
RecordsStateDictionaries,
|
|
4
|
+
ReportsClassificationMetrics,
|
|
5
|
+
HasNetwork,
|
|
6
|
+
PredictsProbabilities,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Ensemble(
|
|
11
|
+
ReportsSize,
|
|
12
|
+
ReportsClassificationMetrics,
|
|
13
|
+
RecordsStateDictionaries,
|
|
14
|
+
PredictsProbabilities,
|
|
15
|
+
HasNetwork,
|
|
16
|
+
):
|
|
17
|
+
def __init__(self):
|
|
18
|
+
super().__init__()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .Ensemble import Ensemble
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class CalculatesMetricNecessities:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def metric_necessities(self, dataloader):
|
|
9
|
+
all_predictions = []
|
|
10
|
+
all_labels = []
|
|
11
|
+
for x, y in dataloader:
|
|
12
|
+
batch_logits_list = []
|
|
13
|
+
for model in self.models:
|
|
14
|
+
batch_logits_list.append(model(x))
|
|
15
|
+
all_batch_logits = torch.stack(batch_logits_list)
|
|
16
|
+
avg_logits = torch.mean(all_batch_logits, dim=0)
|
|
17
|
+
_, preds = torch.max(avg_logits, 1)
|
|
18
|
+
all_predictions.extend(preds.cpu().numpy())
|
|
19
|
+
all_labels.extend(y.cpu().numpy())
|
|
20
|
+
return all_predictions, all_labels
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .ReportsLogits import ReportsLogits
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class PredictsProbabilities(ReportsLogits):
|
|
6
|
+
def __init__(self, *args, **kwargs):
|
|
7
|
+
super().__init__(*args, **kwargs)
|
|
8
|
+
|
|
9
|
+
def predict(self, x: torch.Tensor) -> torch.Tensor:
|
|
10
|
+
logits = self.logits(x)
|
|
11
|
+
return torch.nn.functional.softmax(logits, dim=1)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from copy import deepcopy
|
|
2
|
+
from .HasStateDicts import HasStateDicts
|
|
3
|
+
from ...vision import Model
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class RecordsStateDictionaries(HasStateDicts):
|
|
7
|
+
def __init__(self, *args, **kwargs):
|
|
8
|
+
super().__init__(*args, **kwargs)
|
|
9
|
+
|
|
10
|
+
def record(self, model: Model):
|
|
11
|
+
if self.network is None:
|
|
12
|
+
self.network = deepcopy(model.network)
|
|
13
|
+
|
|
14
|
+
self.state_dicts.append(model.network.state_dict())
|
|
15
|
+
|
|
16
|
+
def push(self, *args, **kwargs):
|
|
17
|
+
self.record(*args, **kwargs)
|
|
18
|
+
|
|
19
|
+
def append(self, *args, **kwargs):
|
|
20
|
+
self.record(*args, **kwargs)
|
|
21
|
+
|
|
22
|
+
def add(self, *args, **kwargs):
|
|
23
|
+
self.record(*args, **kwargs)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from .ReportsEnsembleF1 import ReportsEnsembleF1
|
|
2
|
+
from .ReportsEnsembleAccuracy import ReportsEnsembleAccuracy
|
|
3
|
+
from .ReportsEnsembleKappa import ReportsEnsembleKappa
|
|
4
|
+
from .CalculatesMetricNecessities import CalculatesMetricNecessities
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ReportsClassificationMetrics(
|
|
8
|
+
CalculatesMetricNecessities,
|
|
9
|
+
ReportsEnsembleF1,
|
|
10
|
+
ReportsEnsembleAccuracy,
|
|
11
|
+
ReportsEnsembleKappa,
|
|
12
|
+
):
|
|
13
|
+
pass
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from sklearn.metrics import accuracy_score as accuracy
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ReportsEnsembleAccuracy:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def accuracy(self) -> float:
|
|
9
|
+
predictions, labels = self.metric_necessities()
|
|
10
|
+
return accuracy(labels, predictions)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from sklearn.metrics import f1_score as f1
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ReportsEnsembleF1:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def f1(self) -> float:
|
|
9
|
+
predictions, labels = self.metric_necessities()
|
|
10
|
+
return f1(labels, predictions, average="weighted")
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from sklearn.metrics import cohen_kappa_score as kappa
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ReportsEnsembleKappa:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def accuracy(self) -> float:
|
|
9
|
+
predictions, labels = self.metric_necessities()
|
|
10
|
+
return kappa(labels, predictions)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ReportsLogits:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def logits(self, x: torch.Tensor) -> torch.Tensor:
|
|
9
|
+
batch_size = x.shape[0]
|
|
10
|
+
logits = torch.zeros((batch_size, self.num_classes))
|
|
11
|
+
for model in self.models:
|
|
12
|
+
logits += model(x)
|
|
13
|
+
return logits
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from .CalculatesMetricNecessities import CalculatesMetricNecessities
|
|
2
|
+
from .HasNetwork import HasNetwork
|
|
3
|
+
from .PredictsProbabilities import PredictsProbabilities
|
|
4
|
+
from .RecordsStateDictionaries import RecordsStateDictionaries
|
|
5
|
+
from .ReportsClassificationMetrics import ReportsClassificationMetrics
|
|
6
|
+
from .ReportsEnsembleAccuracy import ReportsEnsembleAccuracy
|
|
7
|
+
from .ReportsEnsembleF1 import ReportsEnsembleF1
|
|
8
|
+
from .ReportsEnsembleKappa import ReportsEnsembleKappa
|
|
9
|
+
from .ReportsLogits import ReportsLogits
|
|
10
|
+
from .ReportsSize import ReportsSize
|
homa/torch/Module.py
ADDED
homa/torch/__init__.py
ADDED
homa/torch/helpers.py
ADDED
homa/utils.py
ADDED
homa/vision/Model.py
ADDED
homa/vision/Resnet.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .modules import ResnetModule
|
|
3
|
+
from .Model import Model
|
|
4
|
+
from .concerns import Trainable
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Resnet(Model, Trainable):
|
|
8
|
+
def __init__(self, num_classes: int, lr: float):
|
|
9
|
+
super().__init__()
|
|
10
|
+
self.network = ResnetModule(num_classes)
|
|
11
|
+
self.criterion = torch.nn.CrossEntropyLoss()
|
|
12
|
+
self.optimizer = torch.optim.SGD(self.network.parameters(), lr=lr, momentum=0.9)
|
homa/vision/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from torch import Tensor
|
|
2
|
+
from torch.utils.data.dataloader import DataLoader
|
|
3
|
+
from ...device import get_device
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Trainable:
|
|
7
|
+
def __init__(self, *args, **kwargs):
|
|
8
|
+
super().__init__(*args, **kwargs)
|
|
9
|
+
|
|
10
|
+
def train(self, x: Tensor | DataLoader, y: Tensor | None = None):
|
|
11
|
+
if y is None and isinstance(x, DataLoader):
|
|
12
|
+
self.train_dataloader(x)
|
|
13
|
+
return
|
|
14
|
+
self.train_tensors(x, y)
|
|
15
|
+
|
|
16
|
+
def train_tensors(self, x: Tensor, y: Tensor):
|
|
17
|
+
self.network.train()
|
|
18
|
+
self.optimizer.zero_grad()
|
|
19
|
+
loss = self.criterion(x, y)
|
|
20
|
+
loss.backward()
|
|
21
|
+
self.optimizer.step()
|
|
22
|
+
|
|
23
|
+
def train_dataloader(self, dataloader: DataLoader):
|
|
24
|
+
for x, y in dataloader:
|
|
25
|
+
x, y = x.to(get_device()), y.to(get_device())
|
|
26
|
+
self.train_tensors(x, y)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .Trainable import Trainable
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torchvision.models import resnet50
|
|
3
|
+
from torch.nn.init import kaiming_uniform_ as kaiming
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ResnetModule(torch.nn.Module):
|
|
7
|
+
def __init__(self, num_classes: int):
|
|
8
|
+
super().__init__()
|
|
9
|
+
self.num_classes = num_classes
|
|
10
|
+
self._create_encoder()
|
|
11
|
+
self._create_fc()
|
|
12
|
+
|
|
13
|
+
def _create_encoder(self):
|
|
14
|
+
self.encoder = resnet50(weights="DEFAULT")
|
|
15
|
+
self.encoder.fc = torch.nn.Identity()
|
|
16
|
+
|
|
17
|
+
def _create_fc(self):
|
|
18
|
+
self.fc = torch.nn.Linear(2048, self.num_classes)
|
|
19
|
+
kaiming(self.fc.weight, mode="fan_in", nonlinearity="relu")
|
|
20
|
+
|
|
21
|
+
def forward(self, images: torch.Tensor):
|
|
22
|
+
features = self.encoder(images)
|
|
23
|
+
return self.fc(features)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
from .ResnetModule import ResnetModule
|
|
2
|
+
from ..utils import replace_relu
|
|
3
|
+
from ...activations import StochasticActivation
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class StochasticResnetModule(ResnetModule):
|
|
7
|
+
def __init__(self, *args, **kwargs):
|
|
8
|
+
super().__init__(*args, **kwargs)
|
|
9
|
+
replace_relu(self, StochasticActivation)
|
homa/vision/utils.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def replace_modules(
|
|
5
|
+
model: torch.nn.Module, find: list | torch.Tensor, replacement: torch.nn.Module
|
|
6
|
+
) -> int:
|
|
7
|
+
if not isinstance(find, list):
|
|
8
|
+
find = [find]
|
|
9
|
+
|
|
10
|
+
replaced = 0
|
|
11
|
+
for parent in model.modules():
|
|
12
|
+
for name, child in list(parent.named_children()):
|
|
13
|
+
for needle in find:
|
|
14
|
+
if isinstance(child, needle):
|
|
15
|
+
setattr(parent, name, replacement())
|
|
16
|
+
replaced += 1
|
|
17
|
+
return replaced
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def replace_relu(model: torch.nn.Module, replacement: torch.nn.Module):
|
|
21
|
+
return replace_modules(model, torch.nn.ReLU, replacement)
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: homa
|
|
3
|
+
Version: 0.0.19
|
|
4
|
+
Summary: A curated list of machine learning and deep learning helpers.
|
|
5
|
+
Author-email: Taha Shieenavaz <tahashieenavaz@gmail.com>
|
|
6
|
+
Requires-Python: >=3.7
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
Requires-Dist: numpy
|
|
9
|
+
Requires-Dist: torch
|
|
10
|
+
Requires-Dist: fire
|
|
11
|
+
|
|
12
|
+
## Device Management
|
|
13
|
+
|
|
14
|
+
```py
|
|
15
|
+
from homa import cpu, mps, cuda, device
|
|
16
|
+
|
|
17
|
+
torch.tensor([1, 2, 3, 4, 5]).to(cpu())
|
|
18
|
+
torch.tensor([1, 2, 3, 4, 5]).to(cuda())
|
|
19
|
+
torch.tensor([1, 2, 3, 4, 5]).to(mps())
|
|
20
|
+
torch.tensor([1, 2, 3, 4, 5]).to(device())
|
|
21
|
+
```
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
homa/__init__.py,sha256=NBYFKizG8UASiz5HLsEBqzXNGlWr78xm4sLr5hxKvjU,46
|
|
2
|
+
homa/device.py,sha256=9kKXfpYfnEk2cFQWPfcJrVloHgC_SSbP4I8IRY9TYk4,343
|
|
3
|
+
homa/settings.py,sha256=CPZDPvs1380O7SY7FcSKol8kBVFVVYFgSJl3YEyJuZ0,263
|
|
4
|
+
homa/utils.py,sha256=dPp6TItJwWxBqxmkMzUuCtX_BzdPT-kMOZyXRGVMCbQ,70
|
|
5
|
+
homa/activations/__init__.py,sha256=zrEyWrYuQAD9R8GJBJUChfkW8_lsMUEwHvAcASoac3k,44
|
|
6
|
+
homa/activations/utils.py,sha256=asXbV7tl48ORLiggXHo6hjYNIjWuHvmuhVv311IJebA,734
|
|
7
|
+
homa/activations/classes/APLU.py,sha256=tjdHYrOqO6vA6qFVAbUuu1O9kB_bPq4AYuiXpbyk2vg,1499
|
|
8
|
+
homa/activations/classes/GALU.py,sha256=LNH-iO89cDXoL6iXtny5DxW3n7sO34s4_GWXcFlzgQM,1864
|
|
9
|
+
homa/activations/classes/MELU.py,sha256=80_0H4pjGXngiGsyT09c4h2MFRkds8Ypn-zXRJ45NQY,2060
|
|
10
|
+
homa/activations/classes/PDELU.py,sha256=tRdxa1RLQuXEjXrSD283qgIulbOPX1mQtV5GGqrVUH0,1418
|
|
11
|
+
homa/activations/classes/SReLU.py,sha256=TxTxcAvRQHYykGq0uXP2QthNIK_BNsLaoqzHPHSFRIY,1828
|
|
12
|
+
homa/activations/classes/SmallGALU.py,sha256=LS92S4lgIOrqbJPMA1pBJSkkm3lIChd1dRE_XV-YRd8,1342
|
|
13
|
+
homa/activations/classes/StochasticActivation.py,sha256=0A2iTQmtvA7Yx9hOf4fuqTvTSi_8bIT3dsKLe--Od3o,515
|
|
14
|
+
homa/activations/classes/WideMELU.py,sha256=7-p-mhOt8kc_qniXqF6wGYVxtLveL7bUIUJSFKQYD0M,2507
|
|
15
|
+
homa/activations/classes/__init__.py,sha256=RBI51Jc9EBPrpwLNCmzLkz3fhJCRRSI46HjsI1CL1Es,238
|
|
16
|
+
homa/cli/HomaCommand.py,sha256=zUWDLpXb6zO6DWZ71MuhPNawjXTqkXJPEwZxWN7Ejx0,226
|
|
17
|
+
homa/cli/namespaces/CacheNamespace.py,sha256=QXGljzj287stzTx0y_MXnqvCgPLqd7WjSPop2WDe14E,784
|
|
18
|
+
homa/cli/namespaces/MakeNamespace.py,sha256=5G6LHk3lDkXROz7uq4jYE0DyO_V7JvnhJ33IFCiqYro,590
|
|
19
|
+
homa/cli/namespaces/__init__.py,sha256=zAKUGPH4wcacxfH5Qvidp-uOuHdfzhan6kvVI6eMKA8,84
|
|
20
|
+
homa/ensemble/Ensemble.py,sha256=gYfQfdKSZdLlWNGMUGCOEv3vQj0nhYYpJmznETizXfA,346
|
|
21
|
+
homa/ensemble/__init__.py,sha256=1pk2W-NbgfDFh9WLKZVLUk2E3PTjVZ5Bap9dQEnrs9o,31
|
|
22
|
+
homa/ensemble/concerns/CalculatesMetricNecessities.py,sha256=hU0Nn-4UYQXUy7gQmWL638w4vZjwigAMkxyFnqRboiM,706
|
|
23
|
+
homa/ensemble/concerns/HasNetwork.py,sha256=WlE-gpt7WlCOW0vIlnqJJsYCi2Ids7uE6CDjolOoElw,227
|
|
24
|
+
homa/ensemble/concerns/HasStateDicts.py,sha256=EAQj01qC_wnJUgQ6ReZI9kc1fFC-tqPrXjeGhE2Ka6M,215
|
|
25
|
+
homa/ensemble/concerns/PredictsProbabilities.py,sha256=qdCpS-NdoYQdtXJXRKz96MY1VW06baJ-pu_7zP3X_JU,330
|
|
26
|
+
homa/ensemble/concerns/RecordsStateDictionaries.py,sha256=FeIyDDxm60p5DCNVjq7xRyuhM04gvJI0ftigtjfXXLA,635
|
|
27
|
+
homa/ensemble/concerns/ReportsClassificationMetrics.py,sha256=YDFH7nMStudtc6pJqBGDNzQmknKIVDLgwCWsr4NFXc4,395
|
|
28
|
+
homa/ensemble/concerns/ReportsEnsembleAccuracy.py,sha256=x_cfPsikXc4dUsta2-gnOUoiSCb5J-gq4ao4ESZPDFs,306
|
|
29
|
+
homa/ensemble/concerns/ReportsEnsembleF1.py,sha256=hdtdCQrWaFJNUn1KP9cAmi_q_EA4FYnpkBMlYLjzRZg,296
|
|
30
|
+
homa/ensemble/concerns/ReportsEnsembleKappa.py,sha256=ZRbtrFCTD84EDql6ZL1xeWtTLFxpO5Y5tQaUlR6_0jw,300
|
|
31
|
+
homa/ensemble/concerns/ReportsLogits.py,sha256=H0AFvCcRDYjGtgrpinVLUyhvumF4Lmo2nyvtNOa_ARM,355
|
|
32
|
+
homa/ensemble/concerns/ReportsSize.py,sha256=6XJYc24isbbnoNV8DJWwaFfhi__qAnYEQZQJMDPL4VA,248
|
|
33
|
+
homa/ensemble/concerns/__init__.py,sha256=UtEqGhxlNypLa29bxw59Z1W4SN3Tju1ynJI72luIiAo,538
|
|
34
|
+
homa/torch/Module.py,sha256=UuRTJXxp7hlQHHL2dBGVlHlyA2OFgJ1Xz9SAff8Iv3c,171
|
|
35
|
+
homa/torch/__init__.py,sha256=Z-sIT4UjMBo2BgGoEPyQ20xJWVeX1W5Q4_C7CDa0SdE,50
|
|
36
|
+
homa/torch/helpers.py,sha256=CLbTCXRrroM0n4PfM-K_xFavs4dCZJEu_L7hdgb1DCI,134
|
|
37
|
+
homa/vision/Model.py,sha256=JIeVpHJwirHfsDfYYbLsu0kt7bGf4nhMQGIOagUDKw4,22
|
|
38
|
+
homa/vision/Resnet.py,sha256=Kh5QLYp8X8o9vFHYqTeOs1uRb6n36FsENdHTIiFZTAs,409
|
|
39
|
+
homa/vision/StochasticResnet.py,sha256=mLgKBfqRJtOgWdgfVHHTnIJVg2SHQDYz4ywXiTWqcIY,241
|
|
40
|
+
homa/vision/__init__.py,sha256=9M65-r8ykfncoPS3UYJC536vi2ytyzHAf4ZZ4ZlMz1g,99
|
|
41
|
+
homa/vision/utils.py,sha256=O58TkXooa31mXJ1JQTykO2OXzmUm9H8Qvvk9lsIZnZ0,623
|
|
42
|
+
homa/vision/concerns/Trainable.py,sha256=pgwFoesMLNuz4ejPyHAoJLO0NPYhRsbBjNbtBYZgiRs,813
|
|
43
|
+
homa/vision/concerns/__init__.py,sha256=UnZfL_YH4IwNqip1wokoWJHe6fn4pE0ePtchTuykxJY,33
|
|
44
|
+
homa/vision/modules/ResnetModule.py,sha256=eFudBnILD6OmgQtcW_CQQ8aZ62NEa4HyZ15-lobTtt0,712
|
|
45
|
+
homa/vision/modules/StochasticResnetModule.py,sha256=zSfx6FW5c5NfHMDGw7MTbaxQ3EmyKV1WVnWqH7G7CDc,298
|
|
46
|
+
homa/vision/modules/__init__.py,sha256=kGlcc0BvYjuT_pBijI54VXLoWluPMfLzif1eO_9RoJc,98
|
|
47
|
+
homa-0.0.19.dist-info/METADATA,sha256=jut1aYkO2ByRxiuJbSLpjT9_F70lleWcUb261_wBhcs,540
|
|
48
|
+
homa-0.0.19.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
49
|
+
homa-0.0.19.dist-info/entry_points.txt,sha256=tJZzjs-f2QvFe3ES8Qta8IE5sAbeE8-cyZ_UtbgqG4s,51
|
|
50
|
+
homa-0.0.19.dist-info/top_level.txt,sha256=tmOfy2tuaAwc3W5-i6j61_vYJsXgR4ivBWkhJ3ZtJDc,5
|
|
51
|
+
homa-0.0.19.dist-info/RECORD,,
|
homa-0.0.1.dist-info/METADATA
DELETED
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: homa
|
|
3
|
-
Version: 0.0.1
|
|
4
|
-
Summary: A curated list of machine learning and deep learning helpers.
|
|
5
|
-
Author-email: Taha Shieenavaz <tahashieenavaz@gmail.com>
|
|
6
|
-
Requires-Python: >=3.7
|
|
7
|
-
Description-Content-Type: text/markdown
|
|
8
|
-
Requires-Dist: numpy
|
|
9
|
-
Requires-Dist: torch
|
|
10
|
-
|
|
11
|
-
# homa
|
|
12
|
-
A curated collection of machine learning and deep learning helper functions.
|
homa-0.0.1.dist-info/RECORD
DELETED
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
homa/__init__.py,sha256=NBYFKizG8UASiz5HLsEBqzXNGlWr78xm4sLr5hxKvjU,46
|
|
2
|
-
homa/device.py,sha256=9kKXfpYfnEk2cFQWPfcJrVloHgC_SSbP4I8IRY9TYk4,343
|
|
3
|
-
homa/settings.py,sha256=CPZDPvs1380O7SY7FcSKol8kBVFVVYFgSJl3YEyJuZ0,263
|
|
4
|
-
homa-0.0.1.dist-info/METADATA,sha256=TMXUMO31Dt-5DrlD4maXJTNtWYlzMBFJvg3JONUhXj4,366
|
|
5
|
-
homa-0.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
6
|
-
homa-0.0.1.dist-info/top_level.txt,sha256=tmOfy2tuaAwc3W5-i6j61_vYJsXgR4ivBWkhJ3ZtJDc,5
|
|
7
|
-
homa-0.0.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|