homa 0.0.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of homa might be problematic. Click here for more details.

Files changed (57) hide show
  1. homa-0.0.18/PKG-INFO +21 -0
  2. homa-0.0.18/README.md +10 -0
  3. homa-0.0.18/pyproject.toml +28 -0
  4. homa-0.0.18/setup.cfg +4 -0
  5. homa-0.0.18/src/homa/__init__.py +2 -0
  6. homa-0.0.18/src/homa/activations/__init__.py +2 -0
  7. homa-0.0.18/src/homa/activations/classes/APLU.py +48 -0
  8. homa-0.0.18/src/homa/activations/classes/GALU.py +51 -0
  9. homa-0.0.18/src/homa/activations/classes/MELU.py +50 -0
  10. homa-0.0.18/src/homa/activations/classes/PDELU.py +39 -0
  11. homa-0.0.18/src/homa/activations/classes/SReLU.py +49 -0
  12. homa-0.0.18/src/homa/activations/classes/SmallGALU.py +39 -0
  13. homa-0.0.18/src/homa/activations/classes/StochasticActivation.py +20 -0
  14. homa-0.0.18/src/homa/activations/classes/WideMELU.py +61 -0
  15. homa-0.0.18/src/homa/activations/classes/__init__.py +8 -0
  16. homa-0.0.18/src/homa/activations/utils.py +27 -0
  17. homa-0.0.18/src/homa/cli/HomaCommand.py +12 -0
  18. homa-0.0.18/src/homa/cli/namespaces/CacheNamespace.py +29 -0
  19. homa-0.0.18/src/homa/cli/namespaces/MakeNamespace.py +18 -0
  20. homa-0.0.18/src/homa/cli/namespaces/__init__.py +2 -0
  21. homa-0.0.18/src/homa/device.py +25 -0
  22. homa-0.0.18/src/homa/ensemble/Ensemble.py +18 -0
  23. homa-0.0.18/src/homa/ensemble/__init__.py +1 -0
  24. homa-0.0.18/src/homa/ensemble/concerns/CalculatesMetricNecessities.py +20 -0
  25. homa-0.0.18/src/homa/ensemble/concerns/HasNetwork.py +5 -0
  26. homa-0.0.18/src/homa/ensemble/concerns/HasStateDicts.py +8 -0
  27. homa-0.0.18/src/homa/ensemble/concerns/PredictsProbabilities.py +11 -0
  28. homa-0.0.18/src/homa/ensemble/concerns/RecordsStateDictionaries.py +23 -0
  29. homa-0.0.18/src/homa/ensemble/concerns/ReportsClassificationMetrics.py +13 -0
  30. homa-0.0.18/src/homa/ensemble/concerns/ReportsEnsembleAccuracy.py +10 -0
  31. homa-0.0.18/src/homa/ensemble/concerns/ReportsEnsembleF1.py +10 -0
  32. homa-0.0.18/src/homa/ensemble/concerns/ReportsEnsembleKappa.py +10 -0
  33. homa-0.0.18/src/homa/ensemble/concerns/ReportsLogits.py +13 -0
  34. homa-0.0.18/src/homa/ensemble/concerns/ReportsSize.py +11 -0
  35. homa-0.0.18/src/homa/ensemble/concerns/__init__.py +10 -0
  36. homa-0.0.18/src/homa/settings.py +12 -0
  37. homa-0.0.18/src/homa/torch/Module.py +8 -0
  38. homa-0.0.18/src/homa/torch/__init__.py +2 -0
  39. homa-0.0.18/src/homa/torch/helpers.py +6 -0
  40. homa-0.0.18/src/homa/utils.py +2 -0
  41. homa-0.0.18/src/homa/vision/Model.py +2 -0
  42. homa-0.0.18/src/homa/vision/Resnet.py +18 -0
  43. homa-0.0.18/src/homa/vision/StochasticResnet.py +8 -0
  44. homa-0.0.18/src/homa/vision/__init__.py +3 -0
  45. homa-0.0.18/src/homa/vision/modules/ResnetModule.py +23 -0
  46. homa-0.0.18/src/homa/vision/modules/StochasticResnetModule.py +9 -0
  47. homa-0.0.18/src/homa/vision/modules/__init__.py +2 -0
  48. homa-0.0.18/src/homa/vision/utils.py +21 -0
  49. homa-0.0.18/src/homa.egg-info/PKG-INFO +21 -0
  50. homa-0.0.18/src/homa.egg-info/SOURCES.txt +55 -0
  51. homa-0.0.18/src/homa.egg-info/dependency_links.txt +1 -0
  52. homa-0.0.18/src/homa.egg-info/entry_points.txt +2 -0
  53. homa-0.0.18/src/homa.egg-info/requires.txt +3 -0
  54. homa-0.0.18/src/homa.egg-info/top_level.txt +1 -0
  55. homa-0.0.18/tests/test_ensemble.py +28 -0
  56. homa-0.0.18/tests/test_resnet.py +21 -0
  57. homa-0.0.18/tests/test_stochastic_resnet.py +20 -0
homa-0.0.18/PKG-INFO ADDED
@@ -0,0 +1,21 @@
1
+ Metadata-Version: 2.4
2
+ Name: homa
3
+ Version: 0.0.18
4
+ Summary: A curated list of machine learning and deep learning helpers.
5
+ Author-email: Taha Shieenavaz <tahashieenavaz@gmail.com>
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: numpy
9
+ Requires-Dist: torch
10
+ Requires-Dist: fire
11
+
12
+ ## Device Management
13
+
14
+ ```py
15
+ from homa import cpu, mps, cuda, device
16
+
17
+ torch.tensor([1, 2, 3, 4, 5]).to(cpu())
18
+ torch.tensor([1, 2, 3, 4, 5]).to(cuda())
19
+ torch.tensor([1, 2, 3, 4, 5]).to(mps())
20
+ torch.tensor([1, 2, 3, 4, 5]).to(device())
21
+ ```
homa-0.0.18/README.md ADDED
@@ -0,0 +1,10 @@
1
+ ## Device Management
2
+
3
+ ```py
4
+ from homa import cpu, mps, cuda, device
5
+
6
+ torch.tensor([1, 2, 3, 4, 5]).to(cpu())
7
+ torch.tensor([1, 2, 3, 4, 5]).to(cuda())
8
+ torch.tensor([1, 2, 3, 4, 5]).to(mps())
9
+ torch.tensor([1, 2, 3, 4, 5]).to(device())
10
+ ```
@@ -0,0 +1,28 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "homa"
7
+ version = "0.0.18"
8
+ description = "A curated list of machine learning and deep learning helpers."
9
+ authors = [
10
+ { name="Taha Shieenavaz", email="tahashieenavaz@gmail.com" },
11
+ ]
12
+ readme = "README.md"
13
+ requires-python = ">=3.7"
14
+
15
+ dependencies = [
16
+ "numpy",
17
+ "torch",
18
+ "fire"
19
+ ]
20
+
21
+ [project.scripts]
22
+ homa = "homa.cli.HomaCommand:main"
23
+
24
+ [tool.setuptools.packages.find]
25
+ where = ["src"]
26
+
27
+ [tool.setuptools.package-data]
28
+ "homa.cli" = ["*"]
homa-0.0.18/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,2 @@
1
+ from .device import *
2
+ from .settings import *
@@ -0,0 +1,2 @@
1
+ from .classes import *
2
+ from .utils import *
@@ -0,0 +1,48 @@
1
+ import torch
2
+
3
+
4
+ class APLU(torch.nn.Module):
5
+ def __init__(self, max_input: float = 1.0):
6
+ super(APLU, self).__init__()
7
+ self.max_input = max_input
8
+ self.alpha = None
9
+ self.beta = None
10
+ self.gamma = None
11
+ self.xi = None
12
+ self.psi = None
13
+ self.mu = None
14
+ self._num_channels = None
15
+
16
+ def _initialize_parameters(self, x):
17
+ if x.ndim < 2:
18
+ raise ValueError(
19
+ f"Input tensor must have at least 2 dimensions (N, C), but got shape {x.shape}"
20
+ )
21
+
22
+ num_channels = x.shape[1]
23
+ self._num_channels = num_channels
24
+
25
+ param_shape = [1] * x.ndim
26
+ param_shape[1] = num_channels
27
+
28
+ self.alpha = torch.nn.Parameter(torch.zeros(param_shape))
29
+ self.beta = torch.nn.Parameter(torch.zeros(param_shape))
30
+ self.gamma = torch.nn.Parameter(torch.zeros(param_shape))
31
+
32
+ self.xi = torch.nn.Parameter(self.max_input * torch.rand(param_shape))
33
+ self.psi = torch.nn.Parameter(self.max_input * torch.rand(param_shape))
34
+ self.mu = torch.nn.Parameter(self.max_input * torch.rand(param_shape))
35
+
36
+ def forward(self, x):
37
+ if self.alpha is None:
38
+ self._initialize_parameters(x)
39
+
40
+ a = torch.relu(x)
41
+
42
+ # following are called hinges
43
+ b = self.alpha * torch.relu(-x + self.xi)
44
+ c = self.beta * torch.relu(-x + self.psi)
45
+ d = self.gamma * torch.relu(-x + self.mu)
46
+ z = a + b + c + d
47
+
48
+ return z
@@ -0,0 +1,51 @@
1
+ import torch
2
+
3
+
4
+ class GALU(torch.nn.Module):
5
+ def __init__(self, max_input: float = 1.0):
6
+ super(GALU, self).__init__()
7
+ if max_input <= 0:
8
+ raise ValueError("max_input must be positive.")
9
+ self.max_input = max_input
10
+ self.alpha = None
11
+ self.beta = None
12
+ self.gamma = None
13
+ self.delta = None
14
+ self._num_channels = None
15
+
16
+ def _initialize_parameters(self, x):
17
+ if x.ndim < 2:
18
+ raise ValueError(
19
+ f"Input tensor must have at least 2 dimensions (N, C), but got shape {x.shape}"
20
+ )
21
+
22
+ num_channels = x.shape[1]
23
+ self._num_channels = num_channels
24
+ param_shape = [1] * x.ndim
25
+ param_shape[1] = num_channels
26
+ self.alpha = torch.nn.Parameter(torch.zeros(param_shape))
27
+ self.beta = torch.nn.Parameter(torch.zeros(param_shape))
28
+ self.gamma = torch.nn.Parameter(torch.zeros(param_shape))
29
+ self.delta = torch.nn.Parameter(torch.zeros(param_shape))
30
+
31
+ def forward(self, x):
32
+ if self.alpha is None:
33
+ self._initialize_parameters(x)
34
+
35
+ zero = torch.tensor(0.0, device=x.device, dtype=x.dtype)
36
+ x_norm = x / self.max_input
37
+ part_prelu = torch.relu(x_norm) + self.alpha * torch.min(x_norm, zero)
38
+ part_beta = self.beta * (
39
+ torch.relu(1.0 - torch.abs(x_norm - 1.0))
40
+ + torch.min(torch.abs(x_norm - 3.0) - 1.0, zero)
41
+ )
42
+ part_gamma = self.gamma * (
43
+ torch.relu(0.5 - torch.abs(x_norm - 0.5))
44
+ + torch.min(torch.abs(x_norm - 1.5) - 0.5, zero)
45
+ )
46
+ part_delta = self.delta * (
47
+ torch.relu(0.5 - torch.abs(x_norm - 2.5))
48
+ + torch.min(torch.abs(x_norm - 3.5) - 0.5, zero)
49
+ )
50
+ z = part_prelu + part_beta + part_gamma + part_delta
51
+ return z * self.max_input
@@ -0,0 +1,50 @@
1
+ import torch
2
+
3
+
4
+ class MELU(torch.nn.Module):
5
+ def __init__(self, maxInput: float = 1.0):
6
+ super().__init__()
7
+ self.maxInput = float(maxInput)
8
+ self.alpha = None
9
+ self.beta = None
10
+ self.gamma = None
11
+ self.delta = None
12
+ self.xi = None
13
+ self.psi = None
14
+ self._initialized = False
15
+
16
+ def _initialize_parameters(self, X: torch.Tensor):
17
+ if X.dim() != 4:
18
+ raise ValueError(
19
+ f"Expected 4D input (B, C, H, W), but got {X.dim()}D input."
20
+ )
21
+ num_channels = X.shape[1]
22
+ shape = (1, num_channels, 1, 1)
23
+ self.alpha = torch.nn.Parameter(torch.zeros(shape))
24
+ self.beta = torch.nn.Parameter(torch.zeros(shape))
25
+ self.gamma = torch.nn.Parameter(torch.zeros(shape))
26
+ self.delta = torch.nn.Parameter(torch.zeros(shape))
27
+ self.xi = torch.nn.Parameter(torch.zeros(shape))
28
+ self.psi = torch.nn.Parameter(torch.zeros(shape))
29
+ self._initialized = True
30
+
31
+ def forward(self, X: torch.Tensor) -> torch.Tensor:
32
+ if not self._initialized:
33
+ self._initialize_parameters(X)
34
+ X_norm = X / self.maxInput
35
+ Y = torch.roll(X_norm, shifts=-1, dims=1)
36
+ term1 = torch.relu(X_norm)
37
+ term2 = self.alpha * torch.clamp(X_norm, max=0)
38
+ dist_sq_beta = (X_norm - 2) ** 2 + (Y - 2) ** 2
39
+ dist_sq_gamma = (X_norm - 1) ** 2 + (Y - 1) ** 2
40
+ dist_sq_delta = (X_norm - 1) ** 2 + (Y - 3) ** 2
41
+ dist_sq_xi = (X_norm - 3) ** 2 + (Y - 1) ** 2
42
+ dist_sq_psi = (X_norm - 3) ** 2 + (Y - 3) ** 2
43
+ term3 = self.beta * torch.sqrt(torch.relu(2 - dist_sq_beta))
44
+ term4 = self.gamma * torch.sqrt(torch.relu(1 - dist_sq_gamma))
45
+ term5 = self.delta * torch.sqrt(torch.relu(1 - dist_sq_delta))
46
+ term6 = self.xi * torch.sqrt(torch.relu(1 - dist_sq_xi))
47
+ term7 = self.psi * torch.sqrt(torch.relu(1 - dist_sq_psi))
48
+ Z_norm = term1 + term2 + term3 + term4 + term5 + term6 + term7
49
+ Z = Z_norm * self.maxInput
50
+ return Z
@@ -0,0 +1,39 @@
1
+ import torch
2
+
3
+
4
+ class PDELU(torch.nn.Module):
5
+ def __init__(self, theta: float = 0.5):
6
+ super(PDELU, self).__init__()
7
+ if theta == 1.0:
8
+ raise ValueError(
9
+ "theta cannot be 1.0, as it would cause a division by zero."
10
+ )
11
+ self.theta = theta
12
+ self._power_val = 1.0 / (1.0 - self.theta)
13
+ self.alpha = torch.nn.UninitializedParameter()
14
+ self._num_channels = None
15
+
16
+ def _initialize_parameters(self, x: torch.Tensor):
17
+ if x.ndim < 2:
18
+ raise ValueError(
19
+ f"Input tensor must have at least 2 dimensions (N, C), but got shape {x.shape}"
20
+ )
21
+
22
+ num_channels = x.shape[1]
23
+ self._num_channels = num_channels
24
+ param_shape = [1] * x.ndim
25
+ param_shape[1] = num_channels
26
+ init_tensor = torch.zeros(param_shape) + 0.1
27
+ self.alpha = torch.nn.Parameter(init_tensor)
28
+
29
+ def forward(self, x: torch.Tensor):
30
+ if self.alpha is None:
31
+ self._initialize_parameters(x)
32
+
33
+ zero = torch.tensor(0.0, device=x.device, dtype=x.dtype)
34
+ positive_part = torch.relu(x)
35
+ inner_term = torch.relu(1.0 + (1.0 - self.theta) * x)
36
+ powered_term = torch.pow(inner_term, self._power_val)
37
+ subtracted_term = powered_term - 1.0
38
+ negative_part = self.alpha * torch.min(subtracted_term, zero)
39
+ return positive_part + negative_part
@@ -0,0 +1,49 @@
1
+ import torch
2
+
3
+
4
+ class SReLU(torch.nn.Module):
5
+ def __init__(
6
+ self,
7
+ alpha_init: float = 0.0,
8
+ beta_init: float = 0.0,
9
+ gamma_init: float = 1.0,
10
+ delta_init: float = 1.0,
11
+ ):
12
+ super().__init__()
13
+ self.alpha_init_val = alpha_init
14
+ self.beta_init_val = beta_init
15
+ self.gamma_init_val = gamma_init
16
+ self.delta_init_val = delta_init
17
+ self.alpha = torch.nn.UninitializedParameter()
18
+ self.beta = torch.nn.UninitializedParameter()
19
+ self.gamma = torch.nn.UninitializedParameter()
20
+ self.delta = torch.nn.UninitializedParameter()
21
+
22
+ def _initialize_parameters(self, x: torch.Tensor):
23
+ if isinstance(self.alpha, torch.nn.UninitializedParameter):
24
+ if x.dim() < 2:
25
+ raise ValueError(
26
+ f"Input tensor must have at least 2 dimensions (N, C), but got {x.dim()}"
27
+ )
28
+
29
+ num_channels = x.shape[1]
30
+ param_shape = [1] * x.dim()
31
+ param_shape[1] = num_channels
32
+ self.alpha = torch.nn.Parameter(
33
+ torch.full(param_shape, self.alpha_init_val)
34
+ )
35
+ self.beta = torch.nn.Parameter(torch.full(param_shape, self.beta_init_val))
36
+ self.gamma = torch.nn.Parameter(
37
+ torch.full(param_shape, self.gamma_init_val)
38
+ )
39
+ self.delta = torch.nn.Parameter(
40
+ torch.full(param_shape, self.delta_init_val)
41
+ )
42
+
43
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
44
+ self._initialize_parameters(x)
45
+ start = self.beta + self.alpha * (x - self.beta)
46
+ finish = self.delta + self.gamma * (x - self.delta)
47
+ out = torch.where(x < self.beta, start, x)
48
+ out = torch.where(x > self.delta, finish, out)
49
+ return out
@@ -0,0 +1,39 @@
1
+ import torch
2
+
3
+
4
+ class SmallGALU(torch.nn.Module):
5
+ def __init__(self, max_input: float = 1.0):
6
+ super(SmallGALU, self).__init__()
7
+ if max_input <= 0:
8
+ raise ValueError("max_input must be positive.")
9
+ self.max_input = max_input
10
+ self.alpha = None
11
+ self.beta = None
12
+ self._num_channels = None
13
+
14
+ def _initialize_parameters(self, x):
15
+ if x.ndim < 2:
16
+ raise ValueError(
17
+ f"Input tensor must have at least 2 dimensions (N, C), but got shape {x.shape}"
18
+ )
19
+
20
+ num_channels = x.shape[1]
21
+ self._num_channels = num_channels
22
+ param_shape = [1] * x.ndim
23
+ param_shape[1] = num_channels
24
+ self.alpha = torch.nn.Parameter(torch.zeros(param_shape))
25
+ self.beta = torch.nn.Parameter(torch.zeros(param_shape))
26
+
27
+ def forward(self, x):
28
+ if self.alpha is None:
29
+ self._initialize_parameters(x)
30
+
31
+ zero = torch.tensor(0.0, device=x.device, dtype=x.dtype)
32
+ x_norm = x / self.max_input
33
+ part_prelu = torch.relu(x_norm) + self.alpha * torch.min(x_norm, zero)
34
+ part_beta = self.beta * (
35
+ torch.relu(1.0 - torch.abs(x_norm - 1.0))
36
+ + torch.min(torch.abs(x_norm - 3.0) - 1.0, zero)
37
+ )
38
+ z = part_prelu + part_beta
39
+ return z * self.max_input
@@ -0,0 +1,20 @@
1
+ import torch
2
+ import random
3
+
4
+ from .APLU import APLU
5
+ from .GALU import GALU
6
+ from .SmallGALU import SmallGALU
7
+ from .MELU import MELU
8
+ from .WideMELU import WideMELU
9
+ from .PDELU import PDELU
10
+ from .SReLU import SReLU
11
+
12
+
13
+ class StochasticActivation(torch.nn.Module):
14
+ def __init__(self):
15
+ super().__init__()
16
+ self.gate = random.choice([APLU, GALU, SmallGALU, MELU, WideMELU, PDELU, SReLU])
17
+ self.gate = self.gate()
18
+
19
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
20
+ return self.gate(x)
@@ -0,0 +1,61 @@
1
+ import torch
2
+
3
+
4
+ class WideMELU(torch.nn.Module):
5
+ def __init__(self, maxInput: float = 1.0):
6
+ super().__init__()
7
+ self.maxInput = float(maxInput)
8
+ self.alpha = None
9
+ self.beta = None
10
+ self.gamma = None
11
+ self.delta = None
12
+ self.xi = None
13
+ self.psi = None
14
+ self.theta = None
15
+ self.lam = None
16
+ self._initialized = False
17
+
18
+ def _initialize_parameters(self, X: torch.Tensor):
19
+ if X.dim() != 4:
20
+ raise ValueError(
21
+ f"Expected 4D input (B, C, H, W), but got {X.dim()}D input."
22
+ )
23
+
24
+ num_channels = X.shape[1]
25
+ shape = (1, num_channels, 1, 1)
26
+
27
+ self.alpha = torch.nn.Parameter(torch.zeros(shape))
28
+ self.beta = torch.nn.Parameter(torch.zeros(shape))
29
+ self.gamma = torch.nn.Parameter(torch.zeros(shape))
30
+ self.delta = torch.nn.Parameter(torch.zeros(shape))
31
+ self.xi = torch.nn.Parameter(torch.zeros(shape))
32
+ self.psi = torch.nn.Parameter(torch.zeros(shape))
33
+ self.theta = torch.nn.Parameter(torch.zeros(shape))
34
+ self.lam = torch.nn.Parameter(torch.zeros(shape))
35
+ self._initialized = True
36
+
37
+ def forward(self, X: torch.Tensor) -> torch.Tensor:
38
+ if not self._initialized:
39
+ self._initialize_parameters(X)
40
+ X_norm = X / self.maxInput
41
+ Y = torch.roll(X_norm, shifts=-1, dims=1)
42
+ term1 = torch.relu(X_norm)
43
+ term2 = self.alpha * torch.clamp(X_norm, max=0)
44
+ dist_sq_beta = (X_norm - 2) ** 2 + (Y - 2) ** 2
45
+ dist_sq_gamma = (X_norm - 1) ** 2 + (Y - 1) ** 2
46
+ dist_sq_delta = (X_norm - 1) ** 2 + (Y - 3) ** 2
47
+ dist_sq_xi = (X_norm - 3) ** 2 + (Y - 1) ** 2
48
+ dist_sq_psi = (X_norm - 3) ** 2 + (Y - 3) ** 2
49
+ dist_sq_theta = (X_norm - 1) ** 2 + (Y - 2) ** 2
50
+ dist_sq_lambda = (X_norm - 3) ** 2 + (Y - 2) ** 2
51
+
52
+ term3 = self.beta * torch.sqrt(torch.relu(2 - dist_sq_beta))
53
+ term4 = self.gamma * torch.sqrt(torch.relu(1 - dist_sq_gamma))
54
+ term5 = self.delta * torch.sqrt(torch.relu(1 - dist_sq_delta))
55
+ term6 = self.xi * torch.sqrt(torch.relu(1 - dist_sq_xi))
56
+ term7 = self.psi * torch.sqrt(torch.relu(1 - dist_sq_psi))
57
+ term8 = self.theta * torch.sqrt(torch.relu(1 - dist_sq_theta))
58
+ term9 = self.lam * torch.sqrt(torch.relu(1 - dist_sq_lambda))
59
+ Z_norm = term1 + term2 + term3 + term4 + term5 + term6 + term7 + term8 + term9
60
+ Z = Z_norm * self.maxInput
61
+ return Z
@@ -0,0 +1,8 @@
1
+ from .APLU import APLU
2
+ from .GALU import GALU
3
+ from .SmallGALU import SmallGALU
4
+ from .MELU import MELU
5
+ from .WideMELU import WideMELU
6
+ from .PDELU import PDELU
7
+ from .SReLU import SReLU
8
+ from .StochasticActivation import StochasticActivation
@@ -0,0 +1,27 @@
1
+ import torch
2
+
3
+
4
+ def negative_part(x):
5
+ return torch.minimum(x, torch.zeros_like(x))
6
+
7
+
8
+ def positive_part(x):
9
+ return torch.maximum(x, torch.zeros_like(x))
10
+
11
+
12
+ def as_channel_parameters(parameter: torch.Tensor, x: torch.Tensor):
13
+ shape = [1] * x.dim()
14
+ shape[1] = -1
15
+ return parameter.view(*shape)
16
+
17
+
18
+ def device_compatibility_check(model, x: torch.Tensor):
19
+ for p in model.parameters():
20
+ if p.device != x.device or p.dtype != x.dtype:
21
+ p.data = p.data.to(device=x.device, dtype=x.dtype)
22
+
23
+
24
+ def phi_hat(x, a, lam):
25
+ term_pos = torch.maximum(lam - torch.abs(x - a), torch.zeros_like(x))
26
+ term_neg = torch.minimum(torch.abs(x - (a + 2 * lam)) - lam, torch.zeros_like(x))
27
+ return term_pos + term_neg
@@ -0,0 +1,12 @@
1
+ import fire
2
+ from .namespaces import MakeNamespace, CacheNamespace
3
+
4
+
5
+ class HomaCommand:
6
+ def __init__(self):
7
+ self.make = MakeNamespace()
8
+ self.cache = CacheNamespace()
9
+
10
+
11
+ def main():
12
+ fire.Fire(HomaCommand)
@@ -0,0 +1,29 @@
1
+ import shutil
2
+ import sys
3
+ from pathlib import Path
4
+
5
+
6
+ class CacheNamespace:
7
+ def clear(self):
8
+ root = Path.cwd()
9
+ removed = 0
10
+ errors: list[str] = []
11
+
12
+ for candidate in root.rglob("__pycache__"):
13
+ if not candidate.is_dir():
14
+ continue
15
+ try:
16
+ shutil.rmtree(candidate)
17
+ removed += 1
18
+ except OSError as exc:
19
+ errors.append(f"{candidate}: {exc}")
20
+
21
+ if errors:
22
+ print("Failed to remove the following paths:", file=sys.stderr)
23
+ for error in errors:
24
+ print(f" - {error}", file=sys.stderr)
25
+ return 1
26
+
27
+ return (
28
+ f"Removed {removed} __pycache__ director{'ies' if removed != 1 else 'y'}."
29
+ )
@@ -0,0 +1,18 @@
1
+ from pathlib import Path
2
+
3
+
4
+ class MakeNamespace:
5
+ def trait(self, name: str):
6
+ class_name = name.split(".")[-1]
7
+ file = name.replace(".", "/") + ".py"
8
+ path = Path(file)
9
+ parent = path.parent
10
+ parent.mkdir(parents=True, exist_ok=True)
11
+ path.touch()
12
+
13
+ # copy the tempalte path
14
+ current_path = Path(__file__).parent.parent.resolve()
15
+ template_path = current_path / "templates" / "trait.txt"
16
+ content = template_path.read_text()
17
+ content = content.replace("{{CLASS}}", class_name)
18
+ path.write_text(content)
@@ -0,0 +1,2 @@
1
+ from .MakeNamespace import MakeNamespace
2
+ from .CacheNamespace import CacheNamespace
@@ -0,0 +1,25 @@
1
+ import torch
2
+
3
+
4
+ def get_device():
5
+ if torch.backends.mps.is_available():
6
+ return mps()
7
+ if torch.cuda.is_available():
8
+ return cuda()
9
+ return cpu()
10
+
11
+
12
+ def cpu():
13
+ return torch.device("cpu")
14
+
15
+
16
+ def cuda():
17
+ return torch.device("cuda")
18
+
19
+
20
+ def mps():
21
+ return torch.device("mps")
22
+
23
+
24
+ def device():
25
+ return get_device()
@@ -0,0 +1,18 @@
1
+ from .concerns import (
2
+ ReportsSize,
3
+ RecordsStateDictionaries,
4
+ ReportsClassificationMetrics,
5
+ HasNetwork,
6
+ PredictsProbabilities,
7
+ )
8
+
9
+
10
+ class Ensemble(
11
+ ReportsSize,
12
+ ReportsClassificationMetrics,
13
+ RecordsStateDictionaries,
14
+ PredictsProbabilities,
15
+ HasNetwork,
16
+ ):
17
+ def __init__(self):
18
+ super().__init__()
@@ -0,0 +1 @@
1
+ from .Ensemble import Ensemble
@@ -0,0 +1,20 @@
1
+ import torch
2
+
3
+
4
+ class CalculatesMetricNecessities:
5
+ def __init__(self, *args, **kwargs):
6
+ super().__init__(*args, **kwargs)
7
+
8
+ def metric_necessities(self, dataloader):
9
+ all_predictions = []
10
+ all_labels = []
11
+ for x, y in dataloader:
12
+ batch_logits_list = []
13
+ for model in self.models:
14
+ batch_logits_list.append(model(x))
15
+ all_batch_logits = torch.stack(batch_logits_list)
16
+ avg_logits = torch.mean(all_batch_logits, dim=0)
17
+ _, preds = torch.max(avg_logits, 1)
18
+ all_predictions.extend(preds.cpu().numpy())
19
+ all_labels.extend(y.cpu().numpy())
20
+ return all_predictions, all_labels
@@ -0,0 +1,5 @@
1
+ class HasNetwork:
2
+ def __init__(self, *args, **kwargs):
3
+ super().__init__(*args, **kwargs)
4
+ # This property is going to be filled with the first model that is fed into the ensemble.
5
+ self.network = None
@@ -0,0 +1,8 @@
1
+ from typing import List
2
+ from collections import OrderedDict
3
+
4
+
5
+ class HasStateDicts:
6
+ def __init__(self, *args, **kwargs):
7
+ super().__init__(*args, **kwargs)
8
+ self.state_dicts: List[OrderedDict] = []
@@ -0,0 +1,11 @@
1
+ import torch
2
+ from .ReportsLogits import ReportsLogits
3
+
4
+
5
+ class PredictsProbabilities(ReportsLogits):
6
+ def __init__(self, *args, **kwargs):
7
+ super().__init__(*args, **kwargs)
8
+
9
+ def predict(self, x: torch.Tensor) -> torch.Tensor:
10
+ logits = self.logits(x)
11
+ return torch.nn.functional.softmax(logits, dim=1)
@@ -0,0 +1,23 @@
1
+ from copy import deepcopy
2
+ from .HasStateDicts import HasStateDicts
3
+ from ...vision import Model
4
+
5
+
6
+ class RecordsStateDictionaries(HasStateDicts):
7
+ def __init__(self, *args, **kwargs):
8
+ super().__init__(*args, **kwargs)
9
+
10
+ def record(self, model: Model):
11
+ if self.network is None:
12
+ self.network = deepcopy(model.network)
13
+
14
+ self.state_dicts.append(model.network.state_dict())
15
+
16
+ def push(self, *args, **kwargs):
17
+ self.record(*args, **kwargs)
18
+
19
+ def append(self, *args, **kwargs):
20
+ self.record(*args, **kwargs)
21
+
22
+ def add(self, *args, **kwargs):
23
+ self.record(*args, **kwargs)
@@ -0,0 +1,13 @@
1
+ from .ReportsEnsembleF1 import ReportsEnsembleF1
2
+ from .ReportsEnsembleAccuracy import ReportsEnsembleAccuracy
3
+ from .ReportsEnsembleKappa import ReportsEnsembleKappa
4
+ from .CalculatesMetricNecessities import CalculatesMetricNecessities
5
+
6
+
7
+ class ReportsClassificationMetrics(
8
+ CalculatesMetricNecessities,
9
+ ReportsEnsembleF1,
10
+ ReportsEnsembleAccuracy,
11
+ ReportsEnsembleKappa,
12
+ ):
13
+ pass
@@ -0,0 +1,10 @@
1
+ from sklearn.metrics import accuracy_score as accuracy
2
+
3
+
4
+ class ReportsEnsembleAccuracy:
5
+ def __init__(self, *args, **kwargs):
6
+ super().__init__(*args, **kwargs)
7
+
8
+ def accuracy(self) -> float:
9
+ predictions, labels = self.metric_necessities()
10
+ return accuracy(labels, predictions)
@@ -0,0 +1,10 @@
1
+ from sklearn.metrics import f1_score as f1
2
+
3
+
4
+ class ReportsEnsembleF1:
5
+ def __init__(self, *args, **kwargs):
6
+ super().__init__(*args, **kwargs)
7
+
8
+ def f1(self) -> float:
9
+ predictions, labels = self.metric_necessities()
10
+ return f1(labels, predictions, average="weighted")
@@ -0,0 +1,10 @@
1
+ from sklearn.metrics import cohen_kappa_score as kappa
2
+
3
+
4
+ class ReportsEnsembleKappa:
5
+ def __init__(self, *args, **kwargs):
6
+ super().__init__(*args, **kwargs)
7
+
8
+ def accuracy(self) -> float:
9
+ predictions, labels = self.metric_necessities()
10
+ return kappa(labels, predictions)
@@ -0,0 +1,13 @@
1
+ import torch
2
+
3
+
4
+ class ReportsLogits:
5
+ def __init__(self, *args, **kwargs):
6
+ super().__init__(*args, **kwargs)
7
+
8
+ def logits(self, x: torch.Tensor) -> torch.Tensor:
9
+ batch_size = x.shape[0]
10
+ logits = torch.zeros((batch_size, self.num_classes))
11
+ for model in self.models:
12
+ logits += model(x)
13
+ return logits
@@ -0,0 +1,11 @@
1
+ class ReportsSize:
2
+ def __init__(self, *args, **kwargs):
3
+ super().__init__(*args, **kwargs)
4
+
5
+ @property
6
+ def size(self):
7
+ return len(self.state_dicts)
8
+
9
+ @property
10
+ def length(self):
11
+ return len(self.state_dicts)
@@ -0,0 +1,10 @@
1
+ from .CalculatesMetricNecessities import CalculatesMetricNecessities
2
+ from .HasNetwork import HasNetwork
3
+ from .PredictsProbabilities import PredictsProbabilities
4
+ from .RecordsStateDictionaries import RecordsStateDictionaries
5
+ from .ReportsClassificationMetrics import ReportsClassificationMetrics
6
+ from .ReportsEnsembleAccuracy import ReportsEnsembleAccuracy
7
+ from .ReportsEnsembleF1 import ReportsEnsembleF1
8
+ from .ReportsEnsembleKappa import ReportsEnsembleKappa
9
+ from .ReportsLogits import ReportsLogits
10
+ from .ReportsSize import ReportsSize
@@ -0,0 +1,12 @@
1
+ import json
2
+
3
+
4
+ def settings(key: str, _cache: dict = {}):
5
+ if not _cache:
6
+ with open("settings.json", "r") as f:
7
+ _cache.update(json.load(f))
8
+ return _cache.get(key)
9
+
10
+
11
+ def get_settings(*args, **kwargs):
12
+ return settings(*args, **kwargs)
@@ -0,0 +1,8 @@
1
+ import torch
2
+ from ..device import get_device
3
+
4
+
5
+ class Module(torch.nn.Module):
6
+ def __init__(self):
7
+ super(Module, self).__init__()
8
+ self.to(get_device())
@@ -0,0 +1,2 @@
1
+ from .Module import Module
2
+ from .helpers import *
@@ -0,0 +1,6 @@
1
+ import torch
2
+ from ..device import get_device
3
+
4
+
5
+ def tensor(*args, **kwargs):
6
+ return torch.tensor(*args, **kwargs).to(get_device())
@@ -0,0 +1,2 @@
1
+ def invoke(base, *args, **kwargs):
2
+ return base()(*args, **kwargs)
@@ -0,0 +1,2 @@
1
+ class Model:
2
+ pass
@@ -0,0 +1,18 @@
1
+ import torch
2
+ from .modules import ResnetModule
3
+ from .Model import Model
4
+
5
+
6
+ class Resnet(Model):
7
+ def __init__(self, num_classes: int, lr: float):
8
+ super().__init__()
9
+ self.network = ResnetModule(num_classes)
10
+ self.optimizer = torch.optim.SGD(self.network.parameters(), lr=lr, momentum=0.9)
11
+ self.criterion = torch.nn.CrossEntropyLoss()
12
+
13
+ def train(self, x: torch.Tensor, y: torch.Tensor):
14
+ self.network.train()
15
+ self.optimizer.zero_grad()
16
+ loss = self.criterion(x, y)
17
+ loss.backward()
18
+ self.optimizer.step()
@@ -0,0 +1,8 @@
1
+ from .modules import StochasticResnetModule
2
+ from .Resnet import Resnet
3
+
4
+
5
+ class StochasticResnet(Resnet):
6
+ def __init__(self, **kwargs):
7
+ super().__init__(**kwargs)
8
+ self.model = StochasticResnetModule(kwargs["num_classes"])
@@ -0,0 +1,3 @@
1
+ from .Model import Model
2
+ from .Resnet import Resnet
3
+ from .StochasticResnet import StochasticResnet
@@ -0,0 +1,23 @@
1
+ import torch
2
+ from torchvision.models import resnet50
3
+ from torch.nn.init import kaiming_uniform_ as kaiming
4
+
5
+
6
+ class ResnetModule(torch.nn.Module):
7
+ def __init__(self, num_classes: int):
8
+ super().__init__()
9
+ self.num_classes = num_classes
10
+ self._create_encoder()
11
+ self._create_fc()
12
+
13
+ def _create_encoder(self):
14
+ self.encoder = resnet50(weights="DEFAULT")
15
+ self.encoder.fc = torch.nn.Identity()
16
+
17
+ def _create_fc(self):
18
+ self.fc = torch.nn.Linear(2048, self.num_classes)
19
+ kaiming(self.fc.weight, mode="fan_in", nonlinearity="relu")
20
+
21
+ def forward(self, images: torch.Tensor):
22
+ features = self.encoder(images)
23
+ return self.fc(features)
@@ -0,0 +1,9 @@
1
+ from .ResnetModule import ResnetModule
2
+ from ..utils import replace_relu
3
+ from ...activations import StochasticActivation
4
+
5
+
6
+ class StochasticResnetModule(ResnetModule):
7
+ def __init__(self, *args, **kwargs):
8
+ super().__init__(*args, **kwargs)
9
+ replace_relu(self, StochasticActivation)
@@ -0,0 +1,2 @@
1
+ from .ResnetModule import ResnetModule
2
+ from .StochasticResnetModule import StochasticResnetModule
@@ -0,0 +1,21 @@
1
+ import torch
2
+
3
+
4
+ def replace_modules(
5
+ model: torch.nn.Module, find: list | torch.Tensor, replacement: torch.nn.Module
6
+ ) -> int:
7
+ if not isinstance(find, list):
8
+ find = [find]
9
+
10
+ replaced = 0
11
+ for parent in model.modules():
12
+ for name, child in list(parent.named_children()):
13
+ for needle in find:
14
+ if isinstance(child, needle):
15
+ setattr(parent, name, replacement())
16
+ replaced += 1
17
+ return replaced
18
+
19
+
20
+ def replace_relu(model: torch.nn.Module, replacement: torch.nn.Module):
21
+ return replace_modules(model, torch.nn.ReLU, replacement)
@@ -0,0 +1,21 @@
1
+ Metadata-Version: 2.4
2
+ Name: homa
3
+ Version: 0.0.18
4
+ Summary: A curated list of machine learning and deep learning helpers.
5
+ Author-email: Taha Shieenavaz <tahashieenavaz@gmail.com>
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: numpy
9
+ Requires-Dist: torch
10
+ Requires-Dist: fire
11
+
12
+ ## Device Management
13
+
14
+ ```py
15
+ from homa import cpu, mps, cuda, device
16
+
17
+ torch.tensor([1, 2, 3, 4, 5]).to(cpu())
18
+ torch.tensor([1, 2, 3, 4, 5]).to(cuda())
19
+ torch.tensor([1, 2, 3, 4, 5]).to(mps())
20
+ torch.tensor([1, 2, 3, 4, 5]).to(device())
21
+ ```
@@ -0,0 +1,55 @@
1
+ README.md
2
+ pyproject.toml
3
+ src/homa/__init__.py
4
+ src/homa/device.py
5
+ src/homa/settings.py
6
+ src/homa/utils.py
7
+ src/homa.egg-info/PKG-INFO
8
+ src/homa.egg-info/SOURCES.txt
9
+ src/homa.egg-info/dependency_links.txt
10
+ src/homa.egg-info/entry_points.txt
11
+ src/homa.egg-info/requires.txt
12
+ src/homa.egg-info/top_level.txt
13
+ src/homa/activations/__init__.py
14
+ src/homa/activations/utils.py
15
+ src/homa/activations/classes/APLU.py
16
+ src/homa/activations/classes/GALU.py
17
+ src/homa/activations/classes/MELU.py
18
+ src/homa/activations/classes/PDELU.py
19
+ src/homa/activations/classes/SReLU.py
20
+ src/homa/activations/classes/SmallGALU.py
21
+ src/homa/activations/classes/StochasticActivation.py
22
+ src/homa/activations/classes/WideMELU.py
23
+ src/homa/activations/classes/__init__.py
24
+ src/homa/cli/HomaCommand.py
25
+ src/homa/cli/namespaces/CacheNamespace.py
26
+ src/homa/cli/namespaces/MakeNamespace.py
27
+ src/homa/cli/namespaces/__init__.py
28
+ src/homa/ensemble/Ensemble.py
29
+ src/homa/ensemble/__init__.py
30
+ src/homa/ensemble/concerns/CalculatesMetricNecessities.py
31
+ src/homa/ensemble/concerns/HasNetwork.py
32
+ src/homa/ensemble/concerns/HasStateDicts.py
33
+ src/homa/ensemble/concerns/PredictsProbabilities.py
34
+ src/homa/ensemble/concerns/RecordsStateDictionaries.py
35
+ src/homa/ensemble/concerns/ReportsClassificationMetrics.py
36
+ src/homa/ensemble/concerns/ReportsEnsembleAccuracy.py
37
+ src/homa/ensemble/concerns/ReportsEnsembleF1.py
38
+ src/homa/ensemble/concerns/ReportsEnsembleKappa.py
39
+ src/homa/ensemble/concerns/ReportsLogits.py
40
+ src/homa/ensemble/concerns/ReportsSize.py
41
+ src/homa/ensemble/concerns/__init__.py
42
+ src/homa/torch/Module.py
43
+ src/homa/torch/__init__.py
44
+ src/homa/torch/helpers.py
45
+ src/homa/vision/Model.py
46
+ src/homa/vision/Resnet.py
47
+ src/homa/vision/StochasticResnet.py
48
+ src/homa/vision/__init__.py
49
+ src/homa/vision/utils.py
50
+ src/homa/vision/modules/ResnetModule.py
51
+ src/homa/vision/modules/StochasticResnetModule.py
52
+ src/homa/vision/modules/__init__.py
53
+ tests/test_ensemble.py
54
+ tests/test_resnet.py
55
+ tests/test_stochastic_resnet.py
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ homa = homa.cli.HomaCommand:main
@@ -0,0 +1,3 @@
1
+ numpy
2
+ torch
3
+ fire
@@ -0,0 +1 @@
1
+ homa
@@ -0,0 +1,28 @@
1
+ import pytest
2
+ from homa.ensemble import Ensemble
3
+ from homa.models import Resnet
4
+
5
+
6
+ @pytest.fixture
7
+ def ensemble():
8
+ return Ensemble()
9
+
10
+
11
+ @pytest.fixture
12
+ def resnet():
13
+ return Resnet(lr=0.001, num_classes=10)
14
+
15
+
16
+ def test_ensemble_initialization(ensemble):
17
+ assert isinstance(ensemble, Ensemble)
18
+
19
+
20
+ def test_ensemble_records_models(ensemble, resnet):
21
+ assert ensemble.network is None
22
+ ensemble.record(resnet)
23
+ assert ensemble.network is not None
24
+ ensemble.append(resnet)
25
+ ensemble.push(resnet)
26
+ ensemble.add(resnet)
27
+ assert ensemble.size == 4
28
+ assert ensemble.length == 4
@@ -0,0 +1,21 @@
1
+ import pytest
2
+ import torch
3
+ from homa.models import Resnet, Model
4
+ from homa.models.modules import ResnetModule
5
+
6
+
7
+ @pytest.fixture
8
+ def resnet_model():
9
+ model = Resnet(num_classes=5, lr=0.001)
10
+ return model
11
+
12
+
13
+ def test_resnet_initialization(resnet_model):
14
+ assert isinstance(resnet_model, Resnet)
15
+ assert isinstance(resnet_model, Model)
16
+ assert hasattr(resnet_model, "network")
17
+ assert hasattr(resnet_model, "optimizer")
18
+ assert hasattr(resnet_model, "criterion")
19
+ assert isinstance(resnet_model.network, ResnetModule)
20
+ assert isinstance(resnet_model.optimizer, torch.optim.SGD)
21
+ assert isinstance(resnet_model.criterion, torch.nn.CrossEntropyLoss)
@@ -0,0 +1,20 @@
1
+ import pytest
2
+ import torch
3
+ from homa.models import StochasticResnet, Model
4
+ from homa.models.modules import ResnetModule
5
+
6
+
7
+ @pytest.fixture
8
+ def stochastic_resnet_model():
9
+ return StochasticResnet(num_classes=5, lr=0.001)
10
+
11
+
12
+ def test_resnet_initialization(stochastic_resnet_model):
13
+ assert isinstance(stochastic_resnet_model, StochasticResnet)
14
+ assert isinstance(stochastic_resnet_model, Model)
15
+ assert hasattr(stochastic_resnet_model, "network")
16
+ assert hasattr(stochastic_resnet_model, "optimizer")
17
+ assert hasattr(stochastic_resnet_model, "criterion")
18
+ assert isinstance(stochastic_resnet_model.network, ResnetModule)
19
+ assert isinstance(stochastic_resnet_model.optimizer, torch.optim.SGD)
20
+ assert isinstance(stochastic_resnet_model.criterion, torch.nn.CrossEntropyLoss)