homa 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of homa might be problematic. Click here for more details.
- homa/__init__.py +2 -0
- homa/activations/__init__.py +2 -0
- homa/activations/classes/APLU.py +49 -0
- homa/activations/classes/ActivationFunction.py +6 -0
- homa/activations/classes/CaLU.py +13 -0
- homa/activations/classes/ERF.py +10 -0
- homa/activations/classes/Elliot.py +10 -0
- homa/activations/classes/GCU.py +9 -0
- homa/activations/classes/GaLU.py +11 -0
- homa/activations/classes/GaussianReLU.py +50 -0
- homa/activations/classes/LaLU.py +11 -0
- homa/activations/classes/Logish.py +9 -0
- homa/activations/classes/MeLU.py +11 -0
- homa/activations/classes/MexicanReLU.py +49 -0
- homa/activations/classes/SGELU.py +12 -0
- homa/activations/classes/SReLU.py +37 -0
- homa/activations/classes/SmallGaLU.py +11 -0
- homa/activations/classes/Smish.py +9 -0
- homa/activations/classes/TeLU.py +9 -0
- homa/activations/classes/TripleStateSwish.py +15 -0
- homa/activations/classes/WideMeLU.py +15 -0
- homa/activations/classes/__init__.py +19 -0
- homa/activations/utils.py +22 -0
- homa/cli/HomaCommand.py +12 -0
- homa/cli/namespaces/CacheNamespace.py +29 -0
- homa/cli/namespaces/MakeNamespace.py +18 -0
- homa/cli/namespaces/__init__.py +2 -0
- homa/device.py +25 -0
- homa/ensemble/Ensemble.py +16 -0
- homa/ensemble/__init__.py +1 -0
- homa/ensemble/concerns/CalculatesMetricNecessities.py +24 -0
- homa/ensemble/concerns/PredictsProbabilities.py +15 -0
- homa/ensemble/concerns/ReportsClassificationMetrics.py +13 -0
- homa/ensemble/concerns/ReportsEnsembleAccuracy.py +11 -0
- homa/ensemble/concerns/ReportsEnsembleF1.py +10 -0
- homa/ensemble/concerns/ReportsEnsembleKappa.py +10 -0
- homa/ensemble/concerns/ReportsLogits.py +17 -0
- homa/ensemble/concerns/ReportsSize.py +11 -0
- homa/ensemble/concerns/StoresModels.py +29 -0
- homa/ensemble/concerns/__init__.py +9 -0
- homa/loss/LogitNormLoss.py +12 -0
- homa/loss/Loss.py +2 -0
- homa/loss/__init__.py +2 -0
- homa/settings.py +12 -0
- homa/torch/__init__.py +1 -0
- homa/torch/helpers.py +6 -0
- homa/utils.py +2 -0
- homa/vision/Classifier.py +5 -0
- homa/vision/Model.py +2 -0
- homa/vision/Resnet.py +13 -0
- homa/vision/StochasticClassifier.py +23 -0
- homa/vision/StochasticResnet.py +10 -0
- homa/vision/StochasticSwin.py +10 -0
- homa/vision/Swin.py +12 -0
- homa/vision/__init__.py +5 -0
- homa/vision/concerns/HasLabels.py +13 -0
- homa/vision/concerns/HasLogits.py +12 -0
- homa/vision/concerns/HasProbabilities.py +9 -0
- homa/vision/concerns/ReportsAccuracy.py +27 -0
- homa/vision/concerns/ReportsMetrics.py +6 -0
- homa/vision/concerns/Trainable.py +29 -0
- homa/vision/concerns/__init__.py +6 -0
- homa/vision/modules/ResnetModule.py +23 -0
- homa/vision/modules/SwinModule.py +23 -0
- homa/vision/modules/__init__.py +2 -0
- homa/vision/utils.py +12 -0
- homa-0.2.3.dist-info/METADATA +75 -0
- homa-0.2.3.dist-info/RECORD +71 -0
- homa-0.2.3.dist-info/WHEEL +5 -0
- homa-0.2.3.dist-info/entry_points.txt +2 -0
- homa-0.2.3.dist-info/top_level.txt +1 -0
homa/__init__.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class APLU(torch.nn.Module):
|
|
5
|
+
def __init__(
|
|
6
|
+
self, channels: int | None = None, n: int = 2, init_b: str = "linspace"
|
|
7
|
+
):
|
|
8
|
+
super().__init__()
|
|
9
|
+
self.n = n
|
|
10
|
+
self.init_b = init_b
|
|
11
|
+
if channels is None:
|
|
12
|
+
self.register_parameter("a", None)
|
|
13
|
+
self.register_parameter("b", None)
|
|
14
|
+
else:
|
|
15
|
+
self._init_params(channels, device=None, dtype=None)
|
|
16
|
+
|
|
17
|
+
def _init_params(self, channels, device, dtype):
|
|
18
|
+
a = torch.zeros(channels, self.n, device=device, dtype=dtype)
|
|
19
|
+
if self.init_b == "linspace":
|
|
20
|
+
b = (
|
|
21
|
+
torch.linspace(-1.0, 1.0, steps=self.n, device=device, dtype=dtype)
|
|
22
|
+
.expand(channels, -1)
|
|
23
|
+
.contiguous()
|
|
24
|
+
)
|
|
25
|
+
else:
|
|
26
|
+
b = torch.empty(channels, self.n, device=device, dtype=dtype).uniform_(
|
|
27
|
+
-1.0, 1.0
|
|
28
|
+
)
|
|
29
|
+
self.a = torch.nn.Parameter(a)
|
|
30
|
+
self.b = torch.nn.Parameter(b)
|
|
31
|
+
|
|
32
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
33
|
+
if self.a is None or self.b is None:
|
|
34
|
+
self._init_params(x.shape[1], device=x.device, dtype=x.dtype)
|
|
35
|
+
|
|
36
|
+
y = F.relu(x)
|
|
37
|
+
x_exp = x.unsqueeze(-1)
|
|
38
|
+
expand_shape = (
|
|
39
|
+
(
|
|
40
|
+
1,
|
|
41
|
+
x.shape[1],
|
|
42
|
+
)
|
|
43
|
+
+ (1,) * (x.dim() - 2)
|
|
44
|
+
+ (self.n,)
|
|
45
|
+
)
|
|
46
|
+
a = self.a.view(*expand_shape)
|
|
47
|
+
b = self.b.view(*expand_shape)
|
|
48
|
+
hinges = (-x_exp + b).clamp_max(0.0)
|
|
49
|
+
return y + (a * hinges).sum(dim=-1)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import math
|
|
3
|
+
from .ActivationFunction import ActivationFunction
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CaLU(ActivationFunction):
|
|
7
|
+
def __init__(self, *args, **kwargs):
|
|
8
|
+
super().__init__(*args, **kwargs)
|
|
9
|
+
|
|
10
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
11
|
+
a = torch.arctan(x) / math.pi
|
|
12
|
+
b = 0.5
|
|
13
|
+
return x * (a + b)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .ActivationFunction import ActivationFunction
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class Elliot(ActivationFunction):
|
|
6
|
+
def __init__(self, *args, **kwargs):
|
|
7
|
+
super().__init__(*args, **kwargs)
|
|
8
|
+
|
|
9
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
10
|
+
return 0.5 + torch.div(0.5 * x, 1 + torch.abs(x))
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from .GaussianReLU import GaussianReLU
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class GaLU(GaussianReLU):
|
|
5
|
+
def __init__(
|
|
6
|
+
self,
|
|
7
|
+
channels: int | None = None,
|
|
8
|
+
max_input: float = 1.0,
|
|
9
|
+
):
|
|
10
|
+
self.hats = [(2.0, 2.0), (1.0, 1.0), (3.0, 1.0)]
|
|
11
|
+
super().__init__(self.hats, channels=channels, max_input=max_input)
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from typing import Sequence, Tuple
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class GaussianReLU(torch.nn.Module):
|
|
6
|
+
def __init__(
|
|
7
|
+
self,
|
|
8
|
+
alphas_lambdas: Sequence[Tuple[float, float]],
|
|
9
|
+
channels: int | None = None,
|
|
10
|
+
max_input: float = 1.0,
|
|
11
|
+
):
|
|
12
|
+
super().__init__()
|
|
13
|
+
self.M = float(max_input)
|
|
14
|
+
self.register_buffer(
|
|
15
|
+
"alphas", torch.tensor([a for a, _ in alphas_lambdas], dtype=torch.float32)
|
|
16
|
+
)
|
|
17
|
+
self.register_buffer(
|
|
18
|
+
"lambdas", torch.tensor([l for _, l in alphas_lambdas], dtype=torch.float32)
|
|
19
|
+
)
|
|
20
|
+
self.K = len(alphas_lambdas)
|
|
21
|
+
|
|
22
|
+
if channels is None:
|
|
23
|
+
self.register_parameter("c0", None) # per-channel (PReLU slope)
|
|
24
|
+
self.register_parameter("c", None) # (C, K) coefficients
|
|
25
|
+
else:
|
|
26
|
+
self._init_params(channels, None, None)
|
|
27
|
+
|
|
28
|
+
def _init_params(self, C: int, device, dtype):
|
|
29
|
+
self.c0 = torch.nn.Parameter(torch.zeros(C, device=device, dtype=dtype))
|
|
30
|
+
self.c = torch.nn.Parameter(torch.zeros(C, self.K, device=device, dtype=dtype))
|
|
31
|
+
|
|
32
|
+
def _expand_param(p: torch.Tensor, x: torch.Tensor, add_K: bool = False):
|
|
33
|
+
shape = (
|
|
34
|
+
(1, x.shape[1]) + (1,) * (x.dim() - 2) + ((p.shape[-1],) if add_K else ())
|
|
35
|
+
)
|
|
36
|
+
return p.view(shape)
|
|
37
|
+
|
|
38
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
39
|
+
if self.c0 is None or self.c is None:
|
|
40
|
+
self._init_params(x.shape[1], x.device, x.dtype)
|
|
41
|
+
c0 = self._expand_param(self.c0, x)
|
|
42
|
+
y = torch.nn.functional.relu(x) - c0 * torch.nn.functional.relu(-x)
|
|
43
|
+
a = self.alphas.to(x.device, x.dtype).view(*((1,) * x.dim()), -1)
|
|
44
|
+
l = self.lambdas.to(x.device, x.dtype).view(*((1,) * x.dim()), -1)
|
|
45
|
+
xE = x.unsqueeze(-1)
|
|
46
|
+
term1 = (l * self.M - (xE - a * self.M).abs()).clamp_min(0.0)
|
|
47
|
+
term2 = ((xE - a * self.M - 2 * l * self.M).abs() - l * self.M).clamp_max(0.0)
|
|
48
|
+
hats = term1 + term2
|
|
49
|
+
c = self._expand_param(self.c, x, add_K=True) # (1,C,...,K)
|
|
50
|
+
return y + (c * hats).sum(dim=-1)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .ActivationFunction import ActivationFunction
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class LaLU(ActivationFunction):
|
|
6
|
+
def __init__(self, *args, **kwargs):
|
|
7
|
+
super().__init__(*args, **kwargs)
|
|
8
|
+
|
|
9
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
10
|
+
phi_laplace = torch.where(x >= 0, 1 - 0.5 * torch.exp(-x), 0.5 * torch.exp(x))
|
|
11
|
+
return x * phi_laplace
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from .MexicanReLU import MexicanReLU
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class MeLU(MexicanReLU):
|
|
5
|
+
def __init__(self, channels: int | None = None, max_input: float = 1.0):
|
|
6
|
+
self.hats = [
|
|
7
|
+
(2.0, 2.0),
|
|
8
|
+
(1.0, 1.0),
|
|
9
|
+
(3.0, 1.0),
|
|
10
|
+
]
|
|
11
|
+
super().__init__(self.hats, channels=channels, max_input=max_input)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from typing import Sequence, Tuple
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class MexicanReLU(torch.nn.Module):
|
|
6
|
+
def __init__(
|
|
7
|
+
self,
|
|
8
|
+
alphas_lambdas: Sequence[Tuple[float, float]],
|
|
9
|
+
channels: int | None = None,
|
|
10
|
+
max_input: float = 1.0,
|
|
11
|
+
):
|
|
12
|
+
super().__init__()
|
|
13
|
+
self.M = float(max_input)
|
|
14
|
+
self.register_buffer(
|
|
15
|
+
"alphas", torch.tensor([a for a, _ in alphas_lambdas], dtype=torch.float32)
|
|
16
|
+
)
|
|
17
|
+
self.register_buffer(
|
|
18
|
+
"lambdas", torch.tensor([l for _, l in alphas_lambdas], dtype=torch.float32)
|
|
19
|
+
)
|
|
20
|
+
self.K = len(alphas_lambdas)
|
|
21
|
+
|
|
22
|
+
if channels is None:
|
|
23
|
+
self.register_parameter("c0", None) # PReLU negative slope (per-channel)
|
|
24
|
+
self.register_parameter("c", None) # (C, K) coefficients
|
|
25
|
+
else:
|
|
26
|
+
self._init_params(channels, device=None, dtype=None)
|
|
27
|
+
|
|
28
|
+
def _init_params(self, C: int, device, dtype):
|
|
29
|
+
self.c0 = torch.nn.Parameter(torch.zeros(C, device=device, dtype=dtype))
|
|
30
|
+
self.c = torch.nn.Parameter(torch.zeros(C, self.K, device=device, dtype=dtype))
|
|
31
|
+
|
|
32
|
+
def _expand_param(p: torch.Tensor, x: torch.Tensor, n_extra: int = 0):
|
|
33
|
+
shape = (
|
|
34
|
+
(1, x.shape[1]) + (1,) * (x.dim() - 2) + ((p.shape[-1],) if n_extra else ())
|
|
35
|
+
)
|
|
36
|
+
return p.view(shape)
|
|
37
|
+
|
|
38
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
39
|
+
if self.c0 is None or self.c is None:
|
|
40
|
+
self._init_params(x.shape[1], x.device, x.dtype)
|
|
41
|
+
c0 = self._expand_param(self.c0, x)
|
|
42
|
+
y = F.relu(x) - c0 * F.relu(-x)
|
|
43
|
+
xE = x.unsqueeze(-1)
|
|
44
|
+
cE = self._expand_param(self.c, x, n_extra=1)
|
|
45
|
+
aE = self.alphas.to(x.device, x.dtype).view(*((1,) * x.dim()), -1) # (..., K)
|
|
46
|
+
lE = self.lambdas.to(x.device, x.dtype).view(*((1,) * x.dim()), -1) # (..., K)
|
|
47
|
+
hats = (lE * self.M - (xE - aE * self.M).abs()).clamp_min(0.0)
|
|
48
|
+
y = y + (cE * hats).sum(dim=-1)
|
|
49
|
+
return y
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import math
|
|
3
|
+
from .ActivationFunction import ActivationFunction
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SGELU(ActivationFunction):
|
|
7
|
+
def __init__(self, alpha: float = 0.1, *args, **kwargs):
|
|
8
|
+
super().__init__(*args, **kwargs)
|
|
9
|
+
self.alpha = alpha
|
|
10
|
+
|
|
11
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
12
|
+
return self.alpha * x * torch.erf(x / math.sqrt(2))
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class SReLU(torch.nn.Module):
|
|
5
|
+
def __init__(self, channels: int | None = None, max_input: float = 1.0):
|
|
6
|
+
super().__init__()
|
|
7
|
+
self.M = float(max_input)
|
|
8
|
+
if channels is None:
|
|
9
|
+
self.register_parameter("t_l", None)
|
|
10
|
+
self.register_parameter("t_r", None)
|
|
11
|
+
self.register_parameter("a_l", None)
|
|
12
|
+
self.register_parameter("a_r", None)
|
|
13
|
+
else:
|
|
14
|
+
self._init_params(channels, None, None)
|
|
15
|
+
|
|
16
|
+
def _init_params(self, C: int, device, dtype):
|
|
17
|
+
self.t_l = torch.nn.Parameter(torch.zeros(C, device=device, dtype=dtype))
|
|
18
|
+
self.t_r = torch.nn.Parameter(
|
|
19
|
+
torch.full((C,), self.M, device=device, dtype=dtype)
|
|
20
|
+
)
|
|
21
|
+
self.a_l = torch.nn.Parameter(torch.zeros(C, device=device, dtype=dtype))
|
|
22
|
+
self.a_r = torch.nn.Parameter(torch.ones(C, device=device, dtype=dtype))
|
|
23
|
+
|
|
24
|
+
def _expand_param(p: torch.Tensor, x: torch.Tensor):
|
|
25
|
+
return p.view((1, x.shape[1]) + (1,) * (x.dim() - 2))
|
|
26
|
+
|
|
27
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
28
|
+
if self.t_l is None:
|
|
29
|
+
self._init_params(x.shape[1], x.device, x.dtype)
|
|
30
|
+
|
|
31
|
+
t_l = self._expand_param(self.t_l, x)
|
|
32
|
+
t_r = self._expand_param(self.t_r, x)
|
|
33
|
+
a_l = self._expand_param(self.a_l, x)
|
|
34
|
+
a_r = self._expand_param(self.a_r, x)
|
|
35
|
+
y = torch.where(x < t_l, t_l + a_l * (x - t_l), x)
|
|
36
|
+
y = torch.where(x > t_r, t_r + a_r * (x - t_r), y)
|
|
37
|
+
return y
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from .GaussianReLU import GaussianReLU
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class SmallGaLU(GaussianReLU):
|
|
5
|
+
def __init__(
|
|
6
|
+
self,
|
|
7
|
+
channels: int | None = None,
|
|
8
|
+
max_input: float = 1.0,
|
|
9
|
+
):
|
|
10
|
+
self.hats = [(2.0, 2.0)]
|
|
11
|
+
super().__init__(self.hats, channels=channels, max_input=max_input)
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .ActivationFunction import ActivationFunction
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class TripleStateSwish(ActivationFunction):
|
|
6
|
+
def __init__(self, alpha: float = 20, beta: float = 40, *args, **kwargs):
|
|
7
|
+
super().__init__(*args, **kwargs)
|
|
8
|
+
self.alpha = alpha
|
|
9
|
+
self.beta = beta
|
|
10
|
+
|
|
11
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
12
|
+
a = 1 / (1 + torch.exp(-x))
|
|
13
|
+
b = 1 / (1 + torch.exp(-x + self.alpha))
|
|
14
|
+
c = 1 / (1 + torch.exp(-x + self.beta))
|
|
15
|
+
return x * a * (a + b + c)
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from .MexicanReLU import MexicanReLU
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class WideMeLU(MexicanReLU):
|
|
5
|
+
def __init__(self, channels: int | None = None, max_input: float = 1.0):
|
|
6
|
+
self.hats = [
|
|
7
|
+
(2.0, 2.0),
|
|
8
|
+
(1.0, 1.0),
|
|
9
|
+
(3.0, 1.0),
|
|
10
|
+
(0.5, 0.5),
|
|
11
|
+
(1.5, 0.5),
|
|
12
|
+
(2.5, 0.5),
|
|
13
|
+
(3.5, 0.5),
|
|
14
|
+
]
|
|
15
|
+
super().__init__(self.hats, channels=channels, max_input=max_input)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from .ActivationFunction import ActivationFunction
|
|
2
|
+
from .APLU import APLU
|
|
3
|
+
from .CaLU import CaLU
|
|
4
|
+
from .Elliot import Elliot
|
|
5
|
+
from .ERF import ERF
|
|
6
|
+
from .GaLU import GaLU
|
|
7
|
+
from .GaussianReLU import GaussianReLU
|
|
8
|
+
from .GCU import GCU
|
|
9
|
+
from .LaLU import LaLU
|
|
10
|
+
from .Logish import Logish
|
|
11
|
+
from .MeLU import MeLU
|
|
12
|
+
from .MexicanReLU import MexicanReLU
|
|
13
|
+
from .SGELU import SGELU
|
|
14
|
+
from .SmallGaLU import SmallGaLU
|
|
15
|
+
from .Smish import Smish
|
|
16
|
+
from .SReLU import SReLU
|
|
17
|
+
from .TeLU import TeLU
|
|
18
|
+
from .TripleStateSwish import TripleStateSwish
|
|
19
|
+
from .WideMeLU import WideMeLU
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from typing import Type
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def replace_layers(
|
|
6
|
+
module: torch.nn.Module,
|
|
7
|
+
target_class: Type[torch.nn.Module],
|
|
8
|
+
replacement_class: Type[torch.nn.Module],
|
|
9
|
+
) -> None:
|
|
10
|
+
for name, child in module.named_children():
|
|
11
|
+
if isinstance(child, target_class):
|
|
12
|
+
inplace = getattr(child, "inplace", False)
|
|
13
|
+
try:
|
|
14
|
+
new_layer = replacement_class(inplace=inplace)
|
|
15
|
+
except TypeError:
|
|
16
|
+
try:
|
|
17
|
+
new_layer = replacement_class()
|
|
18
|
+
except:
|
|
19
|
+
continue
|
|
20
|
+
setattr(module, name, new_layer)
|
|
21
|
+
else:
|
|
22
|
+
replace_layers(child, target_class, replacement_class)
|
homa/cli/HomaCommand.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
import sys
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CacheNamespace:
|
|
7
|
+
def clear(self):
|
|
8
|
+
root = Path.cwd()
|
|
9
|
+
removed = 0
|
|
10
|
+
errors: list[str] = []
|
|
11
|
+
|
|
12
|
+
for candidate in root.rglob("__pycache__"):
|
|
13
|
+
if not candidate.is_dir():
|
|
14
|
+
continue
|
|
15
|
+
try:
|
|
16
|
+
shutil.rmtree(candidate)
|
|
17
|
+
removed += 1
|
|
18
|
+
except OSError as exc:
|
|
19
|
+
errors.append(f"{candidate}: {exc}")
|
|
20
|
+
|
|
21
|
+
if errors:
|
|
22
|
+
print("Failed to remove the following paths:", file=sys.stderr)
|
|
23
|
+
for error in errors:
|
|
24
|
+
print(f" - {error}", file=sys.stderr)
|
|
25
|
+
return 1
|
|
26
|
+
|
|
27
|
+
return (
|
|
28
|
+
f"Removed {removed} __pycache__ director{'ies' if removed != 1 else 'y'}."
|
|
29
|
+
)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class MakeNamespace:
|
|
5
|
+
def trait(self, name: str):
|
|
6
|
+
class_name = name.split(".")[-1]
|
|
7
|
+
file = name.replace(".", "/") + ".py"
|
|
8
|
+
path = Path(file)
|
|
9
|
+
parent = path.parent
|
|
10
|
+
parent.mkdir(parents=True, exist_ok=True)
|
|
11
|
+
path.touch()
|
|
12
|
+
|
|
13
|
+
# copy the tempalte path
|
|
14
|
+
current_path = Path(__file__).parent.parent.resolve()
|
|
15
|
+
template_path = current_path / "templates" / "trait.txt"
|
|
16
|
+
content = template_path.read_text()
|
|
17
|
+
content = content.replace("{{CLASS}}", class_name)
|
|
18
|
+
path.write_text(content)
|
homa/device.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def get_device():
|
|
5
|
+
if torch.backends.mps.is_available():
|
|
6
|
+
return mps()
|
|
7
|
+
if torch.cuda.is_available():
|
|
8
|
+
return cuda()
|
|
9
|
+
return cpu()
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def cpu():
|
|
13
|
+
return torch.device("cpu")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def cuda():
|
|
17
|
+
return torch.device("cuda")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def mps():
|
|
21
|
+
return torch.device("mps")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def device():
|
|
25
|
+
return get_device()
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from .concerns import (
|
|
2
|
+
ReportsSize,
|
|
3
|
+
StoresModels,
|
|
4
|
+
ReportsClassificationMetrics,
|
|
5
|
+
PredictsProbabilities,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Ensemble(
|
|
10
|
+
ReportsSize,
|
|
11
|
+
ReportsClassificationMetrics,
|
|
12
|
+
PredictsProbabilities,
|
|
13
|
+
StoresModels,
|
|
14
|
+
):
|
|
15
|
+
def __init__(self):
|
|
16
|
+
super().__init__()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .Ensemble import Ensemble
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from ...device import get_device
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class CalculatesMetricNecessities:
|
|
6
|
+
def __init__(self, *args, **kwargs):
|
|
7
|
+
super().__init__(*args, **kwargs)
|
|
8
|
+
|
|
9
|
+
@torch.no_grad()
|
|
10
|
+
def metric_necessities(self, dataloader):
|
|
11
|
+
predictions, labels = [], []
|
|
12
|
+
device = get_device()
|
|
13
|
+
for x, y in dataloader:
|
|
14
|
+
x, y = x.to(device), y.to(device)
|
|
15
|
+
sum_logits = None
|
|
16
|
+
for model in self.models:
|
|
17
|
+
model.to(device)
|
|
18
|
+
model.eval()
|
|
19
|
+
logits = model(x)
|
|
20
|
+
sum_logits = logits if sum_logits is None else sum_logits + logits
|
|
21
|
+
batch_predictions = sum_logits.argmax(dim=1)
|
|
22
|
+
predictions.extend(batch_predictions.cpu().numpy())
|
|
23
|
+
labels.extend(y.cpu().numpy())
|
|
24
|
+
return predictions, labels
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .ReportsLogits import ReportsLogits
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class PredictsProbabilities(ReportsLogits):
|
|
6
|
+
def __init__(self, *args, **kwargs):
|
|
7
|
+
super().__init__(*args, **kwargs)
|
|
8
|
+
|
|
9
|
+
def predict(self, x: torch.Tensor) -> torch.Tensor:
|
|
10
|
+
logits = self.logits(x)
|
|
11
|
+
return torch.nn.functional.softmax(logits, dim=1)
|
|
12
|
+
|
|
13
|
+
@torch.no_grad()
|
|
14
|
+
def predict_(self, x: torch.Tensor) -> torch.Tensor:
|
|
15
|
+
return self.predict(x)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from .ReportsEnsembleF1 import ReportsEnsembleF1
|
|
2
|
+
from .ReportsEnsembleAccuracy import ReportsEnsembleAccuracy
|
|
3
|
+
from .ReportsEnsembleKappa import ReportsEnsembleKappa
|
|
4
|
+
from .CalculatesMetricNecessities import CalculatesMetricNecessities
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ReportsClassificationMetrics(
|
|
8
|
+
CalculatesMetricNecessities,
|
|
9
|
+
ReportsEnsembleAccuracy,
|
|
10
|
+
ReportsEnsembleF1,
|
|
11
|
+
ReportsEnsembleKappa,
|
|
12
|
+
):
|
|
13
|
+
pass
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from sklearn.metrics import accuracy_score as accuracy
|
|
2
|
+
from torch.utils.data import DataLoader
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ReportsEnsembleAccuracy:
|
|
6
|
+
def __init__(self, *args, **kwargs):
|
|
7
|
+
super().__init__(*args, **kwargs)
|
|
8
|
+
|
|
9
|
+
def accuracy(self, dataloader: DataLoader) -> float:
|
|
10
|
+
predictions, labels = self.metric_necessities(dataloader)
|
|
11
|
+
return accuracy(labels, predictions)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from sklearn.metrics import f1_score as f1
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ReportsEnsembleF1:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def f1(self) -> float:
|
|
9
|
+
predictions, labels = self.metric_necessities()
|
|
10
|
+
return f1(labels, predictions, average="weighted")
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from sklearn.metrics import cohen_kappa_score as kappa
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ReportsEnsembleKappa:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def accuracy(self) -> float:
|
|
9
|
+
predictions, labels = self.metric_necessities()
|
|
10
|
+
return kappa(labels, predictions)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ReportsLogits:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def logits(self, x: torch.Tensor) -> torch.Tensor:
|
|
9
|
+
batch_size = x.shape[0]
|
|
10
|
+
logits = torch.zeros((batch_size, self.num_classes))
|
|
11
|
+
for model in self.models:
|
|
12
|
+
logits += model(x)
|
|
13
|
+
return logits
|
|
14
|
+
|
|
15
|
+
@torch.no_grad()
|
|
16
|
+
def logits_(self, *args, **kwargs):
|
|
17
|
+
return self.logits(*args, **kwargs)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from copy import deepcopy
|
|
3
|
+
from typing import List
|
|
4
|
+
from ...vision import Model
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class StoresModels:
|
|
8
|
+
def __init__(self, *args, **kwargs):
|
|
9
|
+
super().__init__(*args, **kwargs)
|
|
10
|
+
self.models: List[torch.nn.Module] = []
|
|
11
|
+
|
|
12
|
+
def record(self, model: Model | torch.nn.Module):
|
|
13
|
+
model_: torch.nn.Module | None = None
|
|
14
|
+
if isinstance(model, Model):
|
|
15
|
+
model_ = deepcopy(model.network)
|
|
16
|
+
elif isinstance(model, torch.nn.Module):
|
|
17
|
+
model_ = deepcopy(model)
|
|
18
|
+
else:
|
|
19
|
+
raise TypeError("Wrong input to ensemble record")
|
|
20
|
+
self.models.append(model_)
|
|
21
|
+
|
|
22
|
+
def push(self, *args, **kwargs):
|
|
23
|
+
self.record(*args, **kwargs)
|
|
24
|
+
|
|
25
|
+
def append(self, *args, **kwargs):
|
|
26
|
+
self.record(*args, **kwargs)
|
|
27
|
+
|
|
28
|
+
def add(self, *args, **kwargs):
|
|
29
|
+
self.record(*args, **kwargs)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
from .CalculatesMetricNecessities import CalculatesMetricNecessities
|
|
2
|
+
from .PredictsProbabilities import PredictsProbabilities
|
|
3
|
+
from .ReportsClassificationMetrics import ReportsClassificationMetrics
|
|
4
|
+
from .ReportsEnsembleAccuracy import ReportsEnsembleAccuracy
|
|
5
|
+
from .ReportsEnsembleF1 import ReportsEnsembleF1
|
|
6
|
+
from .ReportsEnsembleKappa import ReportsEnsembleKappa
|
|
7
|
+
from .ReportsLogits import ReportsLogits
|
|
8
|
+
from .ReportsSize import ReportsSize
|
|
9
|
+
from .StoresModels import StoresModels
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .Loss import Loss
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class LogitNormLoss(Loss):
|
|
6
|
+
def __init__(self, *args, **kwargs):
|
|
7
|
+
super().__init__(*args, **kwargs)
|
|
8
|
+
|
|
9
|
+
def forward(self, logits, target):
|
|
10
|
+
norms = torch.norm(logits, p=2, dim=-1, keepdim=True) + 1e-7
|
|
11
|
+
normalized_logits = torch.div(logits, norms)
|
|
12
|
+
return torch.nn.functional.cross_entropy(normalized_logits, target)
|
homa/loss/Loss.py
ADDED
homa/loss/__init__.py
ADDED
homa/settings.py
ADDED
homa/torch/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .helpers import *
|
homa/torch/helpers.py
ADDED
homa/utils.py
ADDED
homa/vision/Model.py
ADDED
homa/vision/Resnet.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .modules import ResnetModule
|
|
3
|
+
from .Classifier import Classifier
|
|
4
|
+
from .concerns import Trainable, ReportsMetrics
|
|
5
|
+
from ..device import get_device
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Resnet(Classifier, Trainable, ReportsMetrics):
|
|
9
|
+
def __init__(self, num_classes: int, lr: float = 0.001):
|
|
10
|
+
super().__init__()
|
|
11
|
+
self.network = ResnetModule(num_classes).to(get_device())
|
|
12
|
+
self.criterion = torch.nn.CrossEntropyLoss()
|
|
13
|
+
self.optimizer = torch.optim.SGD(self.network.parameters(), lr=lr, momentum=0.9)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from ..activations import SGELU, LaLU, CaLU, TripleStateSwish
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class StochasticClassifier:
|
|
6
|
+
def __init__(self, *args, **kwargs):
|
|
7
|
+
super().__init__(*args, **kwargs)
|
|
8
|
+
self._activation_pool = [
|
|
9
|
+
torch.nn.ELU,
|
|
10
|
+
torch.nn.PReLU,
|
|
11
|
+
torch.nn.ReLU,
|
|
12
|
+
torch.nn.ReLU6,
|
|
13
|
+
torch.nn.RReLU,
|
|
14
|
+
torch.nn.SELU,
|
|
15
|
+
torch.nn.CELU,
|
|
16
|
+
torch.nn.GELU,
|
|
17
|
+
torch.nn.SiLU,
|
|
18
|
+
torch.nn.Mish,
|
|
19
|
+
SGELU,
|
|
20
|
+
LaLU,
|
|
21
|
+
CaLU,
|
|
22
|
+
TripleStateSwish,
|
|
23
|
+
]
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .Resnet import Resnet
|
|
3
|
+
from .StochasticClassifier import StochasticClassifier
|
|
4
|
+
from .utils import replace_activations
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class StochasticResnet(Resnet, StochasticClassifier):
|
|
8
|
+
def __init__(self, *args, **kwargs):
|
|
9
|
+
super().__init__(*args, **kwargs)
|
|
10
|
+
replace_activations(self.network, torch.nn.ReLU, self._activation_pool)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .Swin import Swin
|
|
3
|
+
from .StochasticClassifier import StochasticClassifier
|
|
4
|
+
from .utils import replace_activations
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class StochasticSwin(Swin, StochasticClassifier):
|
|
8
|
+
def __init__(self, *args, **kwargs):
|
|
9
|
+
super().__init__(*args, **kwargs)
|
|
10
|
+
replace_activations(self.network, torch.nn.GELU, self._activation_pool)
|
homa/vision/Swin.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from .Classifier import Classifier
|
|
3
|
+
from .concerns import Trainable, ReportsMetrics
|
|
4
|
+
from .modules import SwinModule
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Swin(Classifier, Trainable, ReportsMetrics):
|
|
8
|
+
def __init__(self, num_classes: int, lr: float = 0.0001):
|
|
9
|
+
super().__init__()
|
|
10
|
+
self.network = SwinModule(num_classes=num_classes)
|
|
11
|
+
self.optimizer = torch.optim.AdamW(self.network.parameters(), lr=lr)
|
|
12
|
+
self.criterion = torch.nn.CrossEntropyLoss()
|
homa/vision/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class HasLabels:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def predict(self, x: torch.Tensor):
|
|
9
|
+
return torch.argmax(self.logits(x), dim=1)
|
|
10
|
+
|
|
11
|
+
@torch.no_grad()
|
|
12
|
+
def predict_(self, x: torch.Tensor):
|
|
13
|
+
return torch.argmax(self.logits(x), dim=1)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class HasLogits:
|
|
5
|
+
def __init__(self, *args, **kwargs):
|
|
6
|
+
super().__init__(*args, **kwargs)
|
|
7
|
+
|
|
8
|
+
def logits(self, x: torch.Tensor) -> torch.Tensor:
|
|
9
|
+
return self.network(x)
|
|
10
|
+
|
|
11
|
+
def logits_(self, x: torch.Tensor) -> torch.Tensor:
|
|
12
|
+
return self.network(x)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from torch import Tensor, no_grad
|
|
2
|
+
from torch.utils.data.dataloader import DataLoader
|
|
3
|
+
from ...device import get_device
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ReportsAccuracy:
|
|
7
|
+
def __init__(self, *args, **kwargs):
|
|
8
|
+
super().__init__(*args, **kwargs)
|
|
9
|
+
|
|
10
|
+
def accuracy_tensors(self, x: Tensor, y: Tensor) -> float:
|
|
11
|
+
predictions = self.predict_(x)
|
|
12
|
+
return (predictions == y).float().mean().item()
|
|
13
|
+
|
|
14
|
+
def accuracy_dataloader(self, dataloader: DataLoader):
|
|
15
|
+
correct, total = 0, 0
|
|
16
|
+
for x, y in dataloader:
|
|
17
|
+
x, y = x.to(get_device()), y.to(get_device())
|
|
18
|
+
predictions = self.predict_(x)
|
|
19
|
+
correct += (predictions == y).sum().item()
|
|
20
|
+
total += y.numel()
|
|
21
|
+
return correct / total if total > 0 else 0.0
|
|
22
|
+
|
|
23
|
+
def accuracy(self, x: Tensor | DataLoader, y: Tensor | None = None) -> float:
|
|
24
|
+
self.network.eval()
|
|
25
|
+
if isinstance(x, DataLoader):
|
|
26
|
+
return self.accuracy_dataloader(x)
|
|
27
|
+
return self.accuracy_tensors(x, y)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from torch import Tensor
|
|
2
|
+
from torch.utils.data.dataloader import DataLoader
|
|
3
|
+
from .HasLogits import HasLogits
|
|
4
|
+
from .HasProbabilities import HasProbabilities
|
|
5
|
+
from .HasLabels import HasLabels
|
|
6
|
+
from ...device import get_device
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Trainable(HasLogits, HasProbabilities, HasLabels):
|
|
10
|
+
def __init__(self, *args, **kwargs):
|
|
11
|
+
super().__init__(*args, **kwargs)
|
|
12
|
+
|
|
13
|
+
def train(self, x: Tensor | DataLoader, y: Tensor | None = None):
|
|
14
|
+
if y is None and isinstance(x, DataLoader):
|
|
15
|
+
self.train_dataloader(x)
|
|
16
|
+
return
|
|
17
|
+
self.train_tensors(x, y)
|
|
18
|
+
|
|
19
|
+
def train_tensors(self, x: Tensor, y: Tensor):
|
|
20
|
+
self.network.train()
|
|
21
|
+
self.optimizer.zero_grad()
|
|
22
|
+
loss = self.criterion(self.network(x).float(), y)
|
|
23
|
+
loss.backward()
|
|
24
|
+
self.optimizer.step()
|
|
25
|
+
|
|
26
|
+
def train_dataloader(self, dataloader: DataLoader):
|
|
27
|
+
for x, y in dataloader:
|
|
28
|
+
x, y = x.to(get_device()), y.to(get_device())
|
|
29
|
+
self.train_tensors(x, y)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torchvision.models import resnet50
|
|
3
|
+
from torch.nn.init import kaiming_uniform_ as kaiming
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ResnetModule(torch.nn.Module):
|
|
7
|
+
def __init__(self, num_classes: int):
|
|
8
|
+
super().__init__()
|
|
9
|
+
self.num_classes = num_classes
|
|
10
|
+
self._create_encoder()
|
|
11
|
+
self._create_fc()
|
|
12
|
+
|
|
13
|
+
def _create_encoder(self):
|
|
14
|
+
self.encoder = resnet50(weights="DEFAULT")
|
|
15
|
+
self.encoder.fc = torch.nn.Identity()
|
|
16
|
+
|
|
17
|
+
def _create_fc(self):
|
|
18
|
+
self.fc = torch.nn.Linear(2048, self.num_classes)
|
|
19
|
+
kaiming(self.fc.weight, mode="fan_in", nonlinearity="relu")
|
|
20
|
+
|
|
21
|
+
def forward(self, images: torch.Tensor):
|
|
22
|
+
features = self.encoder(images)
|
|
23
|
+
return self.fc(features)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torchvision.models import swin_v2_b
|
|
3
|
+
from torch.nn.init import kaiming_uniform_ as kaiming
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SwinModule(torch.nn.Module):
|
|
7
|
+
def __init__(self, num_classes: int):
|
|
8
|
+
super().__init__()
|
|
9
|
+
self.num_classes = num_classes
|
|
10
|
+
self._create_encoder()
|
|
11
|
+
self._create_fc()
|
|
12
|
+
|
|
13
|
+
def _create_encoder(self):
|
|
14
|
+
self.encoder = swin_v2_b(weights="DEFAULT")
|
|
15
|
+
self.encoder.head = torch.nn.Identity()
|
|
16
|
+
|
|
17
|
+
def _create_fc(self):
|
|
18
|
+
self.fc = torch.nn.Linear(1024, self.num_classes)
|
|
19
|
+
kaiming(self.fc.weight, mode="fan_in", nonlinearity="relu")
|
|
20
|
+
|
|
21
|
+
def forward(self, images: torch.Tensor):
|
|
22
|
+
features = self.encoder(images)
|
|
23
|
+
return self.fc(features)
|
homa/vision/utils.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import random
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def replace_activations(module, needle: torch.nn.Module, candidates: list):
|
|
6
|
+
for name, module in module.named_children():
|
|
7
|
+
if isinstance(module, needle):
|
|
8
|
+
factory = random.choice(candidates)
|
|
9
|
+
new_module = factory()
|
|
10
|
+
setattr(module, name, new_module)
|
|
11
|
+
else:
|
|
12
|
+
replace_activations(module, needle, candidates)
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: homa
|
|
3
|
+
Version: 0.2.3
|
|
4
|
+
Summary: A curated list of machine learning and deep learning helpers.
|
|
5
|
+
Author-email: Taha Shieenavaz <tahashieenavaz@gmail.com>
|
|
6
|
+
Requires-Python: >=3.7
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
Requires-Dist: numpy
|
|
9
|
+
Requires-Dist: torch
|
|
10
|
+
Requires-Dist: fire
|
|
11
|
+
|
|
12
|
+
# Core
|
|
13
|
+
|
|
14
|
+
### Device Management
|
|
15
|
+
|
|
16
|
+
```py
|
|
17
|
+
from homa import cpu, mps, cuda, device
|
|
18
|
+
|
|
19
|
+
torch.tensor([1, 2, 3, 4, 5]).to(cpu())
|
|
20
|
+
torch.tensor([1, 2, 3, 4, 5]).to(cuda())
|
|
21
|
+
torch.tensor([1, 2, 3, 4, 5]).to(mps())
|
|
22
|
+
torch.tensor([1, 2, 3, 4, 5]).to(device())
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
# Vision
|
|
26
|
+
|
|
27
|
+
## Resnet
|
|
28
|
+
|
|
29
|
+
This is the standard ResNet50 module.
|
|
30
|
+
|
|
31
|
+
You can train the model with a `DataLoader` object.
|
|
32
|
+
|
|
33
|
+
```py
|
|
34
|
+
from homa.vision import Resnet
|
|
35
|
+
|
|
36
|
+
model = Resnet(num_classes=10, lr=0.001)
|
|
37
|
+
for epoch in range(10):
|
|
38
|
+
model.train(train_dataloader)
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Similarly you can manually take care of decomposition of data from the `DataLoader`.
|
|
42
|
+
|
|
43
|
+
```py
|
|
44
|
+
from homa.vision import Resnet
|
|
45
|
+
|
|
46
|
+
model = Resnet(num_classes=10, lr=0.001)
|
|
47
|
+
for epoch in range(10):
|
|
48
|
+
for x, y in train_dataloader:
|
|
49
|
+
model.train(x, y)
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## StochasticResnet
|
|
53
|
+
|
|
54
|
+
This is a ResNet module whose activation functions are replaced from a pool of different activation functions randomly. Read more on the [(paper)](https://www.mdpi.com/1424-8220/22/16/6129).
|
|
55
|
+
|
|
56
|
+
You can train the model with a `DataLoader` object.
|
|
57
|
+
|
|
58
|
+
```py
|
|
59
|
+
from homa.vision import StochasticResnet
|
|
60
|
+
|
|
61
|
+
model = StochasticResnet(num_classes=10, lr=0.001)
|
|
62
|
+
for epoch in range(10):
|
|
63
|
+
model.train(train_dataloader)
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
Similarly you can manually take care of decomposition of data from the `DataLoader`.
|
|
67
|
+
|
|
68
|
+
```py
|
|
69
|
+
from homa.vision import StochasticResnet
|
|
70
|
+
|
|
71
|
+
model = StochasticResnet(num_classes=10, lr=0.001)
|
|
72
|
+
for epoch in range(10):
|
|
73
|
+
for x, y in train_dataloader:
|
|
74
|
+
model.train(x, y)
|
|
75
|
+
```
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
homa/__init__.py,sha256=NBYFKizG8UASiz5HLsEBqzXNGlWr78xm4sLr5hxKvjU,46
|
|
2
|
+
homa/device.py,sha256=9kKXfpYfnEk2cFQWPfcJrVloHgC_SSbP4I8IRY9TYk4,343
|
|
3
|
+
homa/settings.py,sha256=CPZDPvs1380O7SY7FcSKol8kBVFVVYFgSJl3YEyJuZ0,263
|
|
4
|
+
homa/utils.py,sha256=dPp6TItJwWxBqxmkMzUuCtX_BzdPT-kMOZyXRGVMCbQ,70
|
|
5
|
+
homa/activations/__init__.py,sha256=zrEyWrYuQAD9R8GJBJUChfkW8_lsMUEwHvAcASoac3k,44
|
|
6
|
+
homa/activations/utils.py,sha256=4Tw5gdeWcxtOeAHVUb0sH1fjy3E0GPe-Z_2DfQQfNU0,695
|
|
7
|
+
homa/activations/classes/APLU.py,sha256=cUf6LUjY8TewXe_V1avO_7IcOtY66Hd6Dyk_1K4R3Ms,1555
|
|
8
|
+
homa/activations/classes/ActivationFunction.py,sha256=XUw7Pa5E-CPG6rPL8Us_pDH7xCZqY0c2P9xtnJMyX44,141
|
|
9
|
+
homa/activations/classes/CaLU.py,sha256=n0drKwp4GstHql69p4S58KeVctdaQ1B5oK_AIoI_okk,331
|
|
10
|
+
homa/activations/classes/ERF.py,sha256=tDgHbo7UNFU93XPlcQCBRRxPMksr-FOE19mlsqfzmU8,252
|
|
11
|
+
homa/activations/classes/Elliot.py,sha256=RDxERH9vFh6FYwtZXKHMDmLVG2ia1UfOoW18Gm2_8hM,298
|
|
12
|
+
homa/activations/classes/GCU.py,sha256=hXwty6WPovnhPGAxQDd4bIixujdoMOORN-77imVri7s,199
|
|
13
|
+
homa/activations/classes/GaLU.py,sha256=5QHnHsUsLAy28s-LTxtwRN-t1hO1tg9xtWmkzE1T7Ck,308
|
|
14
|
+
homa/activations/classes/GaussianReLU.py,sha256=ufNeVnod6dxkPLmdd9ye-xt0SIWap2dehX14_YxSZVM,2051
|
|
15
|
+
homa/activations/classes/LaLU.py,sha256=UiulXzSTmnoU_Gp8qKigFoL6efonqbldUlsBBlm9mB8,356
|
|
16
|
+
homa/activations/classes/Logish.py,sha256=DQzmqSoCN6V1HTbwpWPDnft7EEg1lSU5uCBNQY3J6t8,187
|
|
17
|
+
homa/activations/classes/MeLU.py,sha256=f13h2AAQCwp9soR3RWbMAA4Bl38oqRdBAsdzh6Bf4k8,321
|
|
18
|
+
homa/activations/classes/MexicanReLU.py,sha256=vfDa1lWI-PgY4ztDY34aeBMaJ2rOyAYt5ifZBG0DS0c,1946
|
|
19
|
+
homa/activations/classes/SGELU.py,sha256=AaNmXRoFQ68Xsgt4sSWMZxnSCTR5OD5ZEuqxxg1mvfg,358
|
|
20
|
+
homa/activations/classes/SReLU.py,sha256=xyChK3G2HPpM7C8icQNfMzrOm142boDLY31n9yXqPtg,1472
|
|
21
|
+
homa/activations/classes/SmallGaLU.py,sha256=ERrK-g3QMZTNFDzUyiSLAovymEpV5h1x1696CN5K4Zg,289
|
|
22
|
+
homa/activations/classes/Smish.py,sha256=hsr5FS4KywsCmsuFUKP-4pKoXkJK0hhRVDleq_CFGX0,198
|
|
23
|
+
homa/activations/classes/TeLU.py,sha256=qU5x0EskjQs6d5rCtbL91C6cMAm8vjDnjQNMX0LcEt8,180
|
|
24
|
+
homa/activations/classes/TripleStateSwish.py,sha256=UG5BGY29wUEJaryClB2rDM90s0jt5vMJF9Kv-5M4Rgo,507
|
|
25
|
+
homa/activations/classes/WideMeLU.py,sha256=ieJjTjnK9JJtApPFGpmTynu3G8YlyH5jw6qnhkJkStI,421
|
|
26
|
+
homa/activations/classes/__init__.py,sha256=A3tViJPce5NM1sLA6C2_sacUB63_uUsNqVq1vBV2NZ4,547
|
|
27
|
+
homa/cli/HomaCommand.py,sha256=zUWDLpXb6zO6DWZ71MuhPNawjXTqkXJPEwZxWN7Ejx0,226
|
|
28
|
+
homa/cli/namespaces/CacheNamespace.py,sha256=QXGljzj287stzTx0y_MXnqvCgPLqd7WjSPop2WDe14E,784
|
|
29
|
+
homa/cli/namespaces/MakeNamespace.py,sha256=5G6LHk3lDkXROz7uq4jYE0DyO_V7JvnhJ33IFCiqYro,590
|
|
30
|
+
homa/cli/namespaces/__init__.py,sha256=zAKUGPH4wcacxfH5Qvidp-uOuHdfzhan6kvVI6eMKA8,84
|
|
31
|
+
homa/ensemble/Ensemble.py,sha256=GNkXEV7Nli8lHSTQ3qTTCTeSBwST1PLZS5wxpKpeC5U,290
|
|
32
|
+
homa/ensemble/__init__.py,sha256=1pk2W-NbgfDFh9WLKZVLUk2E3PTjVZ5Bap9dQEnrs9o,31
|
|
33
|
+
homa/ensemble/concerns/CalculatesMetricNecessities.py,sha256=QccROg_FOp_X2T_lZDg8p1DMZhPYdO-7aEdnebRXMsY,825
|
|
34
|
+
homa/ensemble/concerns/PredictsProbabilities.py,sha256=7rmI66DzE7-QGoJgZEk-9fu5YQvJW-4ZnMn_dWEEhqU,440
|
|
35
|
+
homa/ensemble/concerns/ReportsClassificationMetrics.py,sha256=bg__cdCKp2U1H9qN1aOJH4BoX98oIvt8XaPDGApJhSM,395
|
|
36
|
+
homa/ensemble/concerns/ReportsEnsembleAccuracy.py,sha256=AX5X3VGOm7DfdonW0N7FFgUwEr7wnsojRSVEULEii7c,380
|
|
37
|
+
homa/ensemble/concerns/ReportsEnsembleF1.py,sha256=hdtdCQrWaFJNUn1KP9cAmi_q_EA4FYnpkBMlYLjzRZg,296
|
|
38
|
+
homa/ensemble/concerns/ReportsEnsembleKappa.py,sha256=ZRbtrFCTD84EDql6ZL1xeWtTLFxpO5Y5tQaUlR6_0jw,300
|
|
39
|
+
homa/ensemble/concerns/ReportsLogits.py,sha256=vTGuC9NR4rno3Mkbm0MhL8f7YopuCErGyjIorxamKTM,461
|
|
40
|
+
homa/ensemble/concerns/ReportsSize.py,sha256=S7lo_Wu6rDnuqyAcv6AI6jspaBhcpfsirpp9RVD8c20,238
|
|
41
|
+
homa/ensemble/concerns/StoresModels.py,sha256=PNoaoAOx4v8rercxXHmf7zqVIPGYM4APzIHHEb3RwT0,850
|
|
42
|
+
homa/ensemble/concerns/__init__.py,sha256=X0F_b2Jsv0XpiNhYwJsl-dfPsBOdEeW53LQPE4xQD0w,479
|
|
43
|
+
homa/loss/LogitNormLoss.py,sha256=LJMzRA1WoJ7aDYTV-FYGhgo8DMkcpv7e8_74qiJ4zT8,386
|
|
44
|
+
homa/loss/Loss.py,sha256=COUr_idShYgAP8xKCxcaXbyUyAoJg7IOON0ARTQykmQ,21
|
|
45
|
+
homa/loss/__init__.py,sha256=4mPVzme2_-M64bgBu1cANIfBFAL0voa5I71-ceMr_qk,64
|
|
46
|
+
homa/torch/__init__.py,sha256=HTxCVaw1TLgpHMH8guB3hHYQ80cX6_fSEoPT_hz2Y8w,23
|
|
47
|
+
homa/torch/helpers.py,sha256=CLbTCXRrroM0n4PfM-K_xFavs4dCZJEu_L7hdgb1DCI,134
|
|
48
|
+
homa/vision/Classifier.py,sha256=bAypqREQVuPamnc8hpbLCwmW9Uly3T1rvrlbMxXp1eA,61
|
|
49
|
+
homa/vision/Model.py,sha256=JIeVpHJwirHfsDfYYbLsu0kt7bGf4nhMQGIOagUDKw4,22
|
|
50
|
+
homa/vision/Resnet.py,sha256=Uitf58bEzIKkZd-F4FTvJ8nmhoFHlzZjJTvBPXEt2Iw,513
|
|
51
|
+
homa/vision/StochasticClassifier.py,sha256=dQaN0hYZYRRj5OrSJ-g_gj4pQw-KbjHzoak2sYjs07g,589
|
|
52
|
+
homa/vision/StochasticResnet.py,sha256=cEL_wbMAy_TbINOlHGDQIalWfIb_GR4m7s-XkecQknY,353
|
|
53
|
+
homa/vision/StochasticSwin.py,sha256=tV6g0NF9aMGEVyFRJjXLGkke4VpzcZjsLMkH9AKPCVM,345
|
|
54
|
+
homa/vision/Swin.py,sha256=6_lkPjgNt5d8TtMJ-LFwDtFt5DQkB4hXs3n7SiaglvU,459
|
|
55
|
+
homa/vision/__init__.py,sha256=byjM7ZijxY-FzukuohutbZKb2g7qgIMEKXYrtAx0MQU,157
|
|
56
|
+
homa/vision/utils.py,sha256=vKpkP_-8VvoxNc8s6oVtdhTM2TScYrj8C25zld3nnko,396
|
|
57
|
+
homa/vision/concerns/HasLabels.py,sha256=fM6nHLeQaEaWDlV6R8NQ5hgOSiwspPxOIwj-nvYXbP0,321
|
|
58
|
+
homa/vision/concerns/HasLogits.py,sha256=oStX4NCV7zwxI7Vj23M8wQSlY1xoSmAYJ_6cBNJpVCk,290
|
|
59
|
+
homa/vision/concerns/HasProbabilities.py,sha256=m1_ObS2BNYO-WVCNVMiHXzC3XAsyb88_0N4BWVDwCw0,221
|
|
60
|
+
homa/vision/concerns/ReportsAccuracy.py,sha256=DD0YTr5i8JMllIJTQn88Dn711yjZ2uiecaTi7WqpOEw,986
|
|
61
|
+
homa/vision/concerns/ReportsMetrics.py,sha256=93Hw_JBUbwfkrJNJA1xFSQ4cqRwzbSv4nPU524PGF6I,169
|
|
62
|
+
homa/vision/concerns/Trainable.py,sha256=SRCW3XpG9_DQgubyqhALlYDHwAWNzVVFjshUv1ecuEQ,988
|
|
63
|
+
homa/vision/concerns/__init__.py,sha256=mrw1YvN-GpQPvMwDF00KxnFkksPKo23RWM4KRioURsg,234
|
|
64
|
+
homa/vision/modules/ResnetModule.py,sha256=eFudBnILD6OmgQtcW_CQQ8aZ62NEa4HyZ15-lobTtt0,712
|
|
65
|
+
homa/vision/modules/SwinModule.py,sha256=h7wq1YdKoN6-7C3FVFA0bpkAET_30002iTRbjZxziFQ,714
|
|
66
|
+
homa/vision/modules/__init__.py,sha256=zVMYB9IAO_xZylC1-N3p8ymHgEkAE2sBbuVz8K5Y1kk,74
|
|
67
|
+
homa-0.2.3.dist-info/METADATA,sha256=W_8QUBx6Ii1kJf6eMXy9SfkYAW9n6FbYs_q9JuYM_Qc,1759
|
|
68
|
+
homa-0.2.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
69
|
+
homa-0.2.3.dist-info/entry_points.txt,sha256=tJZzjs-f2QvFe3ES8Qta8IE5sAbeE8-cyZ_UtbgqG4s,51
|
|
70
|
+
homa-0.2.3.dist-info/top_level.txt,sha256=tmOfy2tuaAwc3W5-i6j61_vYJsXgR4ivBWkhJ3ZtJDc,5
|
|
71
|
+
homa-0.2.3.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
homa
|