ilovetools 0.2.34__tar.gz → 0.2.35__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ilovetools-0.2.34/ilovetools.egg-info → ilovetools-0.2.35}/PKG-INFO +2 -2
- ilovetools-0.2.35/ilovetools/ml/autoencoder.py +668 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35/ilovetools.egg-info}/PKG-INFO +2 -2
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools.egg-info/SOURCES.txt +2 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/pyproject.toml +2 -2
- {ilovetools-0.2.34 → ilovetools-0.2.35}/setup.py +2 -2
- ilovetools-0.2.35/tests/test_autoencoder.py +363 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/LICENSE +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/MANIFEST.in +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/README.md +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ai/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ai/embeddings.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ai/inference.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ai/llm_helpers.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/audio/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/automation/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/automation/file_organizer.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/conversion/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/conversion/config_converter.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/conversion/config_converter_fixed_header.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/data/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/data/feature_engineering.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/data/preprocessing.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/database/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/datetime/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/email/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/email/template_engine.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/files/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/image/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/activations.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/anomaly_detection.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/attention.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/augmentation.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/clustering.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/cnn.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/convolution.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/cross_validation.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/dimensionality.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/dropout.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/embedding.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/ensemble.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/feature_selection.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/gnn.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/gradient_descent.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/imbalanced.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/interpretation.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/loss_functions.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/losses.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/lr_schedulers.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/metrics.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/neural_network.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/normalization.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/normalization_advanced.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/optimizers.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/pipeline.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/pooling.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/positional_encoding.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/recurrent.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/regularization.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/rnn.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/schedulers.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/timeseries.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/tuning.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/ml/weight_init.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/security/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/security/password_checker.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/text/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/utils/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/utils/cache_system.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/utils/logger.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/utils/rate_limiter.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/utils/retry.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/validation/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/validation/data_validator.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/web/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/web/scraper.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools/web/url_shortener.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools.egg-info/dependency_links.txt +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools.egg-info/requires.txt +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/ilovetools.egg-info/top_level.txt +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/requirements.txt +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/setup.cfg +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/__init__.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_activations.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_attention.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_augmentation.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_cnn.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_convolution.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_dropout.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_embedding.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_gnn.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_gradient_descent.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_loss_functions.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_losses.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_lr_schedulers.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_neural_network.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_normalization.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_normalization_advanced.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_optimizers.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_pooling.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_positional_encoding.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_pypi_installation.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_recurrent.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_regularization.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_rnn.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_schedulers.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/test_weight_init.py +0 -0
- {ilovetools-0.2.34 → ilovetools-0.2.35}/tests/verify_positional_encoding.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ilovetools
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.35
|
|
4
4
|
Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
|
|
5
5
|
Home-page: https://github.com/AliMehdi512/ilovetools
|
|
6
6
|
Author: Ali Mehdi
|
|
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
|
|
|
11
11
|
Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
|
|
12
12
|
Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
|
|
13
13
|
Project-URL: Source, https://github.com/AliMehdi512/ilovetools
|
|
14
|
-
Keywords: utilities,tools,ai,ml,data-processing,automation,
|
|
14
|
+
Keywords: utilities,tools,ai,ml,data-processing,automation,autoencoder,variational-autoencoder,vae,denoising-autoencoder,sparse-autoencoder,contractive-autoencoder,encoder-decoder,latent-space,bottleneck,reconstruction-loss,kl-divergence,reparameterization-trick,dimensionality-reduction,anomaly-detection,fraud-detection,feature-learning,unsupervised-learning,generative-models,image-compression,image-denoising,elbo,probabilistic-models,deep-learning,neural-networks,pytorch,tensorflow,keras
|
|
15
15
|
Classifier: Development Status :: 3 - Alpha
|
|
16
16
|
Classifier: Intended Audience :: Developers
|
|
17
17
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
@@ -0,0 +1,668 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Autoencoder Architectures Suite
|
|
3
|
+
|
|
4
|
+
This module implements various autoencoder architectures for unsupervised learning.
|
|
5
|
+
Autoencoders learn to compress data into a latent space and reconstruct it, enabling
|
|
6
|
+
dimensionality reduction, anomaly detection, and feature learning.
|
|
7
|
+
|
|
8
|
+
Implemented Autoencoder Types:
|
|
9
|
+
1. VanillaAutoencoder - Basic autoencoder with encoder-decoder
|
|
10
|
+
2. DenoisingAutoencoder - Learns to denoise corrupted inputs
|
|
11
|
+
3. SparseAutoencoder - Enforces sparsity in latent representations
|
|
12
|
+
4. ContractiveAutoencoder - Robust to small input perturbations
|
|
13
|
+
5. VAE - Variational Autoencoder (probabilistic, generative)
|
|
14
|
+
|
|
15
|
+
Key Benefits:
|
|
16
|
+
- Unsupervised feature learning
|
|
17
|
+
- Dimensionality reduction (alternative to PCA)
|
|
18
|
+
- Anomaly detection (reconstruction error)
|
|
19
|
+
- Image compression and denoising
|
|
20
|
+
- Generative modeling (VAE)
|
|
21
|
+
|
|
22
|
+
References:
|
|
23
|
+
- Autoencoder: Hinton & Salakhutdinov, "Reducing the Dimensionality of Data with Neural Networks" (2006)
|
|
24
|
+
- Denoising: Vincent et al., "Extracting and Composing Robust Features with Denoising Autoencoders" (2008)
|
|
25
|
+
- Sparse: Ng, "Sparse Autoencoder" (2011)
|
|
26
|
+
- Contractive: Rifai et al., "Contractive Auto-Encoders" (2011)
|
|
27
|
+
- VAE: Kingma & Welling, "Auto-Encoding Variational Bayes" (2014)
|
|
28
|
+
|
|
29
|
+
Author: Ali Mehdi
|
|
30
|
+
Date: January 31, 2026
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
import numpy as np
|
|
34
|
+
from typing import Optional, Tuple
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# ============================================================================
|
|
38
|
+
# VANILLA AUTOENCODER
|
|
39
|
+
# ============================================================================
|
|
40
|
+
|
|
41
|
+
class VanillaAutoencoder:
|
|
42
|
+
"""
|
|
43
|
+
Vanilla Autoencoder.
|
|
44
|
+
|
|
45
|
+
Basic autoencoder with encoder-decoder architecture. Learns to compress
|
|
46
|
+
input into latent space and reconstruct it.
|
|
47
|
+
|
|
48
|
+
Architecture:
|
|
49
|
+
Encoder: input → hidden → latent
|
|
50
|
+
Decoder: latent → hidden → output
|
|
51
|
+
|
|
52
|
+
Loss:
|
|
53
|
+
L = MSE(x, x̂) = ||x - x̂||²
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
input_dim: Input dimension
|
|
57
|
+
latent_dim: Latent space dimension (bottleneck)
|
|
58
|
+
hidden_dims: List of hidden layer dimensions (default: [512, 256])
|
|
59
|
+
activation: Activation function ('relu', 'sigmoid', 'tanh') (default: 'relu')
|
|
60
|
+
|
|
61
|
+
Example:
|
|
62
|
+
>>> ae = VanillaAutoencoder(input_dim=784, latent_dim=32)
|
|
63
|
+
>>> x = np.random.randn(100, 784) # 100 samples
|
|
64
|
+
>>> encoded = ae.encode(x)
|
|
65
|
+
>>> decoded = ae.decode(encoded)
|
|
66
|
+
>>> print(encoded.shape) # (100, 32)
|
|
67
|
+
>>> print(decoded.shape) # (100, 784)
|
|
68
|
+
|
|
69
|
+
Use Case:
|
|
70
|
+
Dimensionality reduction, feature learning, image compression
|
|
71
|
+
|
|
72
|
+
Reference:
|
|
73
|
+
Hinton & Salakhutdinov, "Reducing the Dimensionality of Data" (2006)
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __init__(self, input_dim: int, latent_dim: int,
|
|
77
|
+
hidden_dims: Optional[list] = None,
|
|
78
|
+
activation: str = 'relu'):
|
|
79
|
+
self.input_dim = input_dim
|
|
80
|
+
self.latent_dim = latent_dim
|
|
81
|
+
self.hidden_dims = hidden_dims or [512, 256]
|
|
82
|
+
self.activation = activation
|
|
83
|
+
|
|
84
|
+
# Initialize encoder weights
|
|
85
|
+
self.encoder_weights = []
|
|
86
|
+
prev_dim = input_dim
|
|
87
|
+
|
|
88
|
+
for hidden_dim in self.hidden_dims:
|
|
89
|
+
w = np.random.randn(prev_dim, hidden_dim) * np.sqrt(2.0 / prev_dim)
|
|
90
|
+
b = np.zeros(hidden_dim)
|
|
91
|
+
self.encoder_weights.append((w, b))
|
|
92
|
+
prev_dim = hidden_dim
|
|
93
|
+
|
|
94
|
+
# Encoder to latent
|
|
95
|
+
w = np.random.randn(prev_dim, latent_dim) * np.sqrt(2.0 / prev_dim)
|
|
96
|
+
b = np.zeros(latent_dim)
|
|
97
|
+
self.encoder_weights.append((w, b))
|
|
98
|
+
|
|
99
|
+
# Initialize decoder weights (symmetric)
|
|
100
|
+
self.decoder_weights = []
|
|
101
|
+
prev_dim = latent_dim
|
|
102
|
+
|
|
103
|
+
for hidden_dim in reversed(self.hidden_dims):
|
|
104
|
+
w = np.random.randn(prev_dim, hidden_dim) * np.sqrt(2.0 / prev_dim)
|
|
105
|
+
b = np.zeros(hidden_dim)
|
|
106
|
+
self.decoder_weights.append((w, b))
|
|
107
|
+
prev_dim = hidden_dim
|
|
108
|
+
|
|
109
|
+
# Decoder to output
|
|
110
|
+
w = np.random.randn(prev_dim, input_dim) * np.sqrt(2.0 / prev_dim)
|
|
111
|
+
b = np.zeros(input_dim)
|
|
112
|
+
self.decoder_weights.append((w, b))
|
|
113
|
+
|
|
114
|
+
def activate(self, x: np.ndarray) -> np.ndarray:
|
|
115
|
+
"""Apply activation function."""
|
|
116
|
+
if self.activation == 'relu':
|
|
117
|
+
return np.maximum(0, x)
|
|
118
|
+
elif self.activation == 'sigmoid':
|
|
119
|
+
return 1 / (1 + np.exp(-np.clip(x, -500, 500)))
|
|
120
|
+
elif self.activation == 'tanh':
|
|
121
|
+
return np.tanh(x)
|
|
122
|
+
else:
|
|
123
|
+
return x
|
|
124
|
+
|
|
125
|
+
def encode(self, x: np.ndarray) -> np.ndarray:
|
|
126
|
+
"""
|
|
127
|
+
Encode input to latent space.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
x: Input data (batch_size, input_dim)
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Latent representation (batch_size, latent_dim)
|
|
134
|
+
"""
|
|
135
|
+
h = x
|
|
136
|
+
|
|
137
|
+
for i, (w, b) in enumerate(self.encoder_weights):
|
|
138
|
+
h = h @ w + b
|
|
139
|
+
|
|
140
|
+
# Apply activation (except last layer)
|
|
141
|
+
if i < len(self.encoder_weights) - 1:
|
|
142
|
+
h = self.activate(h)
|
|
143
|
+
|
|
144
|
+
return h
|
|
145
|
+
|
|
146
|
+
def decode(self, z: np.ndarray) -> np.ndarray:
|
|
147
|
+
"""
|
|
148
|
+
Decode latent representation to output.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
z: Latent representation (batch_size, latent_dim)
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
Reconstructed output (batch_size, input_dim)
|
|
155
|
+
"""
|
|
156
|
+
h = z
|
|
157
|
+
|
|
158
|
+
for i, (w, b) in enumerate(self.decoder_weights):
|
|
159
|
+
h = h @ w + b
|
|
160
|
+
|
|
161
|
+
# Apply activation (except last layer)
|
|
162
|
+
if i < len(self.decoder_weights) - 1:
|
|
163
|
+
h = self.activate(h)
|
|
164
|
+
|
|
165
|
+
# Sigmoid activation for output (0-1 range)
|
|
166
|
+
h = 1 / (1 + np.exp(-np.clip(h, -500, 500)))
|
|
167
|
+
|
|
168
|
+
return h
|
|
169
|
+
|
|
170
|
+
def forward(self, x: np.ndarray) -> np.ndarray:
|
|
171
|
+
"""
|
|
172
|
+
Forward pass (encode + decode).
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
x: Input data (batch_size, input_dim)
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Reconstructed output (batch_size, input_dim)
|
|
179
|
+
"""
|
|
180
|
+
z = self.encode(x)
|
|
181
|
+
x_reconstructed = self.decode(z)
|
|
182
|
+
return x_reconstructed
|
|
183
|
+
|
|
184
|
+
def reconstruction_loss(self, x: np.ndarray, x_reconstructed: np.ndarray) -> float:
|
|
185
|
+
"""
|
|
186
|
+
Compute reconstruction loss (MSE).
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
x: Original input
|
|
190
|
+
x_reconstructed: Reconstructed output
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
Mean squared error
|
|
194
|
+
"""
|
|
195
|
+
return np.mean((x - x_reconstructed) ** 2)
|
|
196
|
+
|
|
197
|
+
def __call__(self, x: np.ndarray) -> np.ndarray:
|
|
198
|
+
return self.forward(x)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
# ============================================================================
|
|
202
|
+
# DENOISING AUTOENCODER
|
|
203
|
+
# ============================================================================
|
|
204
|
+
|
|
205
|
+
class DenoisingAutoencoder(VanillaAutoencoder):
|
|
206
|
+
"""
|
|
207
|
+
Denoising Autoencoder.
|
|
208
|
+
|
|
209
|
+
Learns to reconstruct clean data from corrupted inputs. Adds noise during
|
|
210
|
+
training to make the model robust.
|
|
211
|
+
|
|
212
|
+
Formula:
|
|
213
|
+
x̃ = corrupt(x)
|
|
214
|
+
L = MSE(x, decode(encode(x̃)))
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
input_dim: Input dimension
|
|
218
|
+
latent_dim: Latent space dimension
|
|
219
|
+
hidden_dims: List of hidden layer dimensions
|
|
220
|
+
noise_factor: Noise level (0.0 to 1.0) (default: 0.3)
|
|
221
|
+
noise_type: Type of noise ('gaussian', 'masking') (default: 'gaussian')
|
|
222
|
+
|
|
223
|
+
Example:
|
|
224
|
+
>>> dae = DenoisingAutoencoder(input_dim=784, latent_dim=32, noise_factor=0.3)
|
|
225
|
+
>>> x = np.random.randn(100, 784)
|
|
226
|
+
>>> x_noisy = dae.add_noise(x)
|
|
227
|
+
>>> x_reconstructed = dae.forward(x_noisy)
|
|
228
|
+
>>> print(x_reconstructed.shape) # (100, 784)
|
|
229
|
+
|
|
230
|
+
Use Case:
|
|
231
|
+
Image denoising, robust feature learning, data cleaning
|
|
232
|
+
|
|
233
|
+
Reference:
|
|
234
|
+
Vincent et al., "Extracting and Composing Robust Features" (2008)
|
|
235
|
+
"""
|
|
236
|
+
|
|
237
|
+
def __init__(self, input_dim: int, latent_dim: int,
|
|
238
|
+
hidden_dims: Optional[list] = None,
|
|
239
|
+
noise_factor: float = 0.3,
|
|
240
|
+
noise_type: str = 'gaussian'):
|
|
241
|
+
super().__init__(input_dim, latent_dim, hidden_dims)
|
|
242
|
+
self.noise_factor = noise_factor
|
|
243
|
+
self.noise_type = noise_type
|
|
244
|
+
|
|
245
|
+
def add_noise(self, x: np.ndarray) -> np.ndarray:
|
|
246
|
+
"""
|
|
247
|
+
Add noise to input.
|
|
248
|
+
|
|
249
|
+
Args:
|
|
250
|
+
x: Clean input
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
Noisy input
|
|
254
|
+
"""
|
|
255
|
+
if self.noise_type == 'gaussian':
|
|
256
|
+
# Gaussian noise
|
|
257
|
+
noise = np.random.randn(*x.shape) * self.noise_factor
|
|
258
|
+
x_noisy = x + noise
|
|
259
|
+
elif self.noise_type == 'masking':
|
|
260
|
+
# Random masking
|
|
261
|
+
mask = np.random.binomial(1, 1 - self.noise_factor, size=x.shape)
|
|
262
|
+
x_noisy = x * mask
|
|
263
|
+
else:
|
|
264
|
+
x_noisy = x
|
|
265
|
+
|
|
266
|
+
return np.clip(x_noisy, 0, 1)
|
|
267
|
+
|
|
268
|
+
def forward(self, x: np.ndarray, add_noise: bool = True) -> np.ndarray:
|
|
269
|
+
"""
|
|
270
|
+
Forward pass with optional noise.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
x: Input data
|
|
274
|
+
add_noise: Whether to add noise (training mode)
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
Reconstructed clean output
|
|
278
|
+
"""
|
|
279
|
+
if add_noise:
|
|
280
|
+
x_noisy = self.add_noise(x)
|
|
281
|
+
else:
|
|
282
|
+
x_noisy = x
|
|
283
|
+
|
|
284
|
+
return super().forward(x_noisy)
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
# ============================================================================
|
|
288
|
+
# SPARSE AUTOENCODER
|
|
289
|
+
# ============================================================================
|
|
290
|
+
|
|
291
|
+
class SparseAutoencoder(VanillaAutoencoder):
|
|
292
|
+
"""
|
|
293
|
+
Sparse Autoencoder.
|
|
294
|
+
|
|
295
|
+
Enforces sparsity in latent representations, encouraging only a few neurons
|
|
296
|
+
to be active. Uses L1 regularization or KL divergence.
|
|
297
|
+
|
|
298
|
+
Loss:
|
|
299
|
+
L = MSE(x, x̂) + λ * Σ|z_i| (L1 regularization)
|
|
300
|
+
or
|
|
301
|
+
L = MSE(x, x̂) + β * KL(ρ || ρ̂) (KL divergence)
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
input_dim: Input dimension
|
|
305
|
+
latent_dim: Latent space dimension
|
|
306
|
+
hidden_dims: List of hidden layer dimensions
|
|
307
|
+
sparsity_weight: Weight for sparsity penalty (default: 0.001)
|
|
308
|
+
sparsity_type: Type of sparsity ('l1', 'kl') (default: 'l1')
|
|
309
|
+
target_sparsity: Target average activation (for KL) (default: 0.05)
|
|
310
|
+
|
|
311
|
+
Example:
|
|
312
|
+
>>> sae = SparseAutoencoder(input_dim=784, latent_dim=64, sparsity_weight=0.001)
|
|
313
|
+
>>> x = np.random.randn(100, 784)
|
|
314
|
+
>>> encoded = sae.encode(x)
|
|
315
|
+
>>> loss = sae.total_loss(x, sae.forward(x), encoded)
|
|
316
|
+
>>> print(f"Total loss: {loss:.4f}")
|
|
317
|
+
|
|
318
|
+
Use Case:
|
|
319
|
+
Feature learning, interpretable representations, classification pretraining
|
|
320
|
+
|
|
321
|
+
Reference:
|
|
322
|
+
Ng, "Sparse Autoencoder" (2011)
|
|
323
|
+
"""
|
|
324
|
+
|
|
325
|
+
def __init__(self, input_dim: int, latent_dim: int,
|
|
326
|
+
hidden_dims: Optional[list] = None,
|
|
327
|
+
sparsity_weight: float = 0.001,
|
|
328
|
+
sparsity_type: str = 'l1',
|
|
329
|
+
target_sparsity: float = 0.05):
|
|
330
|
+
super().__init__(input_dim, latent_dim, hidden_dims)
|
|
331
|
+
self.sparsity_weight = sparsity_weight
|
|
332
|
+
self.sparsity_type = sparsity_type
|
|
333
|
+
self.target_sparsity = target_sparsity
|
|
334
|
+
|
|
335
|
+
def sparsity_penalty(self, z: np.ndarray) -> float:
|
|
336
|
+
"""
|
|
337
|
+
Compute sparsity penalty.
|
|
338
|
+
|
|
339
|
+
Args:
|
|
340
|
+
z: Latent representation
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
Sparsity penalty
|
|
344
|
+
"""
|
|
345
|
+
if self.sparsity_type == 'l1':
|
|
346
|
+
# L1 regularization
|
|
347
|
+
return np.mean(np.abs(z))
|
|
348
|
+
|
|
349
|
+
elif self.sparsity_type == 'kl':
|
|
350
|
+
# KL divergence
|
|
351
|
+
rho_hat = np.mean(z, axis=0) # Average activation
|
|
352
|
+
rho = self.target_sparsity
|
|
353
|
+
|
|
354
|
+
# KL(rho || rho_hat)
|
|
355
|
+
kl = rho * np.log(rho / (rho_hat + 1e-8)) + \
|
|
356
|
+
(1 - rho) * np.log((1 - rho) / (1 - rho_hat + 1e-8))
|
|
357
|
+
|
|
358
|
+
return np.sum(kl)
|
|
359
|
+
|
|
360
|
+
return 0.0
|
|
361
|
+
|
|
362
|
+
def total_loss(self, x: np.ndarray, x_reconstructed: np.ndarray,
|
|
363
|
+
z: np.ndarray) -> float:
|
|
364
|
+
"""
|
|
365
|
+
Compute total loss (reconstruction + sparsity).
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
x: Original input
|
|
369
|
+
x_reconstructed: Reconstructed output
|
|
370
|
+
z: Latent representation
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
Total loss
|
|
374
|
+
"""
|
|
375
|
+
reconstruction = self.reconstruction_loss(x, x_reconstructed)
|
|
376
|
+
sparsity = self.sparsity_penalty(z)
|
|
377
|
+
|
|
378
|
+
return reconstruction + self.sparsity_weight * sparsity
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
# ============================================================================
|
|
382
|
+
# CONTRACTIVE AUTOENCODER
|
|
383
|
+
# ============================================================================
|
|
384
|
+
|
|
385
|
+
class ContractiveAutoencoder(VanillaAutoencoder):
|
|
386
|
+
"""
|
|
387
|
+
Contractive Autoencoder.
|
|
388
|
+
|
|
389
|
+
Learns representations robust to small input perturbations by penalizing
|
|
390
|
+
the Frobenius norm of the Jacobian.
|
|
391
|
+
|
|
392
|
+
Loss:
|
|
393
|
+
L = MSE(x, x̂) + λ * ||J_f(x)||²_F
|
|
394
|
+
|
|
395
|
+
where J_f(x) is the Jacobian of the encoder
|
|
396
|
+
|
|
397
|
+
Args:
|
|
398
|
+
input_dim: Input dimension
|
|
399
|
+
latent_dim: Latent space dimension
|
|
400
|
+
hidden_dims: List of hidden layer dimensions
|
|
401
|
+
contractive_weight: Weight for contractive penalty (default: 0.1)
|
|
402
|
+
|
|
403
|
+
Example:
|
|
404
|
+
>>> cae = ContractiveAutoencoder(input_dim=784, latent_dim=32)
|
|
405
|
+
>>> x = np.random.randn(100, 784)
|
|
406
|
+
>>> encoded = cae.encode(x)
|
|
407
|
+
>>> penalty = cae.contractive_penalty(x, encoded)
|
|
408
|
+
>>> print(f"Contractive penalty: {penalty:.4f}")
|
|
409
|
+
|
|
410
|
+
Use Case:
|
|
411
|
+
Robust feature learning, invariant representations, classification
|
|
412
|
+
|
|
413
|
+
Reference:
|
|
414
|
+
Rifai et al., "Contractive Auto-Encoders" (2011)
|
|
415
|
+
"""
|
|
416
|
+
|
|
417
|
+
def __init__(self, input_dim: int, latent_dim: int,
|
|
418
|
+
hidden_dims: Optional[list] = None,
|
|
419
|
+
contractive_weight: float = 0.1):
|
|
420
|
+
super().__init__(input_dim, latent_dim, hidden_dims)
|
|
421
|
+
self.contractive_weight = contractive_weight
|
|
422
|
+
|
|
423
|
+
def contractive_penalty(self, x: np.ndarray, z: np.ndarray) -> float:
|
|
424
|
+
"""
|
|
425
|
+
Compute contractive penalty (Frobenius norm of Jacobian).
|
|
426
|
+
|
|
427
|
+
Args:
|
|
428
|
+
x: Input
|
|
429
|
+
z: Latent representation
|
|
430
|
+
|
|
431
|
+
Returns:
|
|
432
|
+
Contractive penalty
|
|
433
|
+
"""
|
|
434
|
+
# Approximate Jacobian using finite differences
|
|
435
|
+
epsilon = 1e-4
|
|
436
|
+
batch_size = x.shape[0]
|
|
437
|
+
|
|
438
|
+
penalty = 0.0
|
|
439
|
+
|
|
440
|
+
for i in range(min(10, self.input_dim)): # Sample dimensions for efficiency
|
|
441
|
+
# Perturb input
|
|
442
|
+
x_perturbed = x.copy()
|
|
443
|
+
x_perturbed[:, i] += epsilon
|
|
444
|
+
|
|
445
|
+
# Encode perturbed input
|
|
446
|
+
z_perturbed = self.encode(x_perturbed)
|
|
447
|
+
|
|
448
|
+
# Compute gradient
|
|
449
|
+
grad = (z_perturbed - z) / epsilon
|
|
450
|
+
|
|
451
|
+
# Add to penalty
|
|
452
|
+
penalty += np.sum(grad ** 2)
|
|
453
|
+
|
|
454
|
+
return penalty / batch_size
|
|
455
|
+
|
|
456
|
+
def total_loss(self, x: np.ndarray, x_reconstructed: np.ndarray,
|
|
457
|
+
z: np.ndarray) -> float:
|
|
458
|
+
"""
|
|
459
|
+
Compute total loss (reconstruction + contractive).
|
|
460
|
+
|
|
461
|
+
Args:
|
|
462
|
+
x: Original input
|
|
463
|
+
x_reconstructed: Reconstructed output
|
|
464
|
+
z: Latent representation
|
|
465
|
+
|
|
466
|
+
Returns:
|
|
467
|
+
Total loss
|
|
468
|
+
"""
|
|
469
|
+
reconstruction = self.reconstruction_loss(x, x_reconstructed)
|
|
470
|
+
contractive = self.contractive_penalty(x, z)
|
|
471
|
+
|
|
472
|
+
return reconstruction + self.contractive_weight * contractive
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
# ============================================================================
|
|
476
|
+
# VARIATIONAL AUTOENCODER (VAE)
|
|
477
|
+
# ============================================================================
|
|
478
|
+
|
|
479
|
+
class VAE:
|
|
480
|
+
"""
|
|
481
|
+
Variational Autoencoder.
|
|
482
|
+
|
|
483
|
+
Probabilistic autoencoder that learns a distribution over latent space.
|
|
484
|
+
Enables generative modeling by sampling from the latent distribution.
|
|
485
|
+
|
|
486
|
+
Formula:
|
|
487
|
+
Encoder: q(z|x) = N(μ(x), σ²(x))
|
|
488
|
+
Decoder: p(x|z)
|
|
489
|
+
Loss: L = -E[log p(x|z)] + KL(q(z|x) || p(z))
|
|
490
|
+
= Reconstruction Loss + KL Divergence
|
|
491
|
+
|
|
492
|
+
Reparameterization Trick:
|
|
493
|
+
z = μ + σ * ε, where ε ~ N(0, 1)
|
|
494
|
+
|
|
495
|
+
Args:
|
|
496
|
+
input_dim: Input dimension
|
|
497
|
+
latent_dim: Latent space dimension
|
|
498
|
+
hidden_dims: List of hidden layer dimensions (default: [512, 256])
|
|
499
|
+
|
|
500
|
+
Example:
|
|
501
|
+
>>> vae = VAE(input_dim=784, latent_dim=32)
|
|
502
|
+
>>> x = np.random.randn(100, 784)
|
|
503
|
+
>>> x_reconstructed, mu, logvar = vae.forward(x)
|
|
504
|
+
>>> loss = vae.loss(x, x_reconstructed, mu, logvar)
|
|
505
|
+
>>> print(f"VAE loss: {loss:.4f}")
|
|
506
|
+
>>> # Generate new samples
|
|
507
|
+
>>> z_sample = np.random.randn(10, 32)
|
|
508
|
+
>>> generated = vae.decode(z_sample)
|
|
509
|
+
|
|
510
|
+
Use Case:
|
|
511
|
+
Generative modeling, image generation, latent space interpolation
|
|
512
|
+
|
|
513
|
+
Reference:
|
|
514
|
+
Kingma & Welling, "Auto-Encoding Variational Bayes" (2014)
|
|
515
|
+
"""
|
|
516
|
+
|
|
517
|
+
def __init__(self, input_dim: int, latent_dim: int,
|
|
518
|
+
hidden_dims: Optional[list] = None):
|
|
519
|
+
self.input_dim = input_dim
|
|
520
|
+
self.latent_dim = latent_dim
|
|
521
|
+
self.hidden_dims = hidden_dims or [512, 256]
|
|
522
|
+
|
|
523
|
+
# Initialize encoder weights (to mu and logvar)
|
|
524
|
+
self.encoder_weights = []
|
|
525
|
+
prev_dim = input_dim
|
|
526
|
+
|
|
527
|
+
for hidden_dim in self.hidden_dims:
|
|
528
|
+
w = np.random.randn(prev_dim, hidden_dim) * np.sqrt(2.0 / prev_dim)
|
|
529
|
+
b = np.zeros(hidden_dim)
|
|
530
|
+
self.encoder_weights.append((w, b))
|
|
531
|
+
prev_dim = hidden_dim
|
|
532
|
+
|
|
533
|
+
# Encoder to mu
|
|
534
|
+
self.w_mu = np.random.randn(prev_dim, latent_dim) * np.sqrt(2.0 / prev_dim)
|
|
535
|
+
self.b_mu = np.zeros(latent_dim)
|
|
536
|
+
|
|
537
|
+
# Encoder to logvar
|
|
538
|
+
self.w_logvar = np.random.randn(prev_dim, latent_dim) * np.sqrt(2.0 / prev_dim)
|
|
539
|
+
self.b_logvar = np.zeros(latent_dim)
|
|
540
|
+
|
|
541
|
+
# Initialize decoder weights
|
|
542
|
+
self.decoder_weights = []
|
|
543
|
+
prev_dim = latent_dim
|
|
544
|
+
|
|
545
|
+
for hidden_dim in reversed(self.hidden_dims):
|
|
546
|
+
w = np.random.randn(prev_dim, hidden_dim) * np.sqrt(2.0 / prev_dim)
|
|
547
|
+
b = np.zeros(hidden_dim)
|
|
548
|
+
self.decoder_weights.append((w, b))
|
|
549
|
+
prev_dim = hidden_dim
|
|
550
|
+
|
|
551
|
+
# Decoder to output
|
|
552
|
+
self.w_out = np.random.randn(prev_dim, input_dim) * np.sqrt(2.0 / prev_dim)
|
|
553
|
+
self.b_out = np.zeros(input_dim)
|
|
554
|
+
|
|
555
|
+
def encode(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
|
556
|
+
"""
|
|
557
|
+
Encode input to latent distribution parameters.
|
|
558
|
+
|
|
559
|
+
Args:
|
|
560
|
+
x: Input data (batch_size, input_dim)
|
|
561
|
+
|
|
562
|
+
Returns:
|
|
563
|
+
Tuple of (mu, logvar)
|
|
564
|
+
"""
|
|
565
|
+
h = x
|
|
566
|
+
|
|
567
|
+
for w, b in self.encoder_weights:
|
|
568
|
+
h = h @ w + b
|
|
569
|
+
h = np.maximum(0, h) # ReLU
|
|
570
|
+
|
|
571
|
+
# Compute mu and logvar
|
|
572
|
+
mu = h @ self.w_mu + self.b_mu
|
|
573
|
+
logvar = h @ self.w_logvar + self.b_logvar
|
|
574
|
+
|
|
575
|
+
return mu, logvar
|
|
576
|
+
|
|
577
|
+
def reparameterize(self, mu: np.ndarray, logvar: np.ndarray) -> np.ndarray:
|
|
578
|
+
"""
|
|
579
|
+
Reparameterization trick: z = μ + σ * ε
|
|
580
|
+
|
|
581
|
+
Args:
|
|
582
|
+
mu: Mean
|
|
583
|
+
logvar: Log variance
|
|
584
|
+
|
|
585
|
+
Returns:
|
|
586
|
+
Sampled latent vector
|
|
587
|
+
"""
|
|
588
|
+
std = np.exp(0.5 * logvar)
|
|
589
|
+
epsilon = np.random.randn(*mu.shape)
|
|
590
|
+
z = mu + std * epsilon
|
|
591
|
+
return z
|
|
592
|
+
|
|
593
|
+
def decode(self, z: np.ndarray) -> np.ndarray:
|
|
594
|
+
"""
|
|
595
|
+
Decode latent representation to output.
|
|
596
|
+
|
|
597
|
+
Args:
|
|
598
|
+
z: Latent representation (batch_size, latent_dim)
|
|
599
|
+
|
|
600
|
+
Returns:
|
|
601
|
+
Reconstructed output (batch_size, input_dim)
|
|
602
|
+
"""
|
|
603
|
+
h = z
|
|
604
|
+
|
|
605
|
+
for w, b in self.decoder_weights:
|
|
606
|
+
h = h @ w + b
|
|
607
|
+
h = np.maximum(0, h) # ReLU
|
|
608
|
+
|
|
609
|
+
# Output layer with sigmoid
|
|
610
|
+
h = h @ self.w_out + self.b_out
|
|
611
|
+
h = 1 / (1 + np.exp(-np.clip(h, -500, 500)))
|
|
612
|
+
|
|
613
|
+
return h
|
|
614
|
+
|
|
615
|
+
def forward(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
616
|
+
"""
|
|
617
|
+
Forward pass.
|
|
618
|
+
|
|
619
|
+
Args:
|
|
620
|
+
x: Input data
|
|
621
|
+
|
|
622
|
+
Returns:
|
|
623
|
+
Tuple of (reconstructed, mu, logvar)
|
|
624
|
+
"""
|
|
625
|
+
mu, logvar = self.encode(x)
|
|
626
|
+
z = self.reparameterize(mu, logvar)
|
|
627
|
+
x_reconstructed = self.decode(z)
|
|
628
|
+
|
|
629
|
+
return x_reconstructed, mu, logvar
|
|
630
|
+
|
|
631
|
+
def loss(self, x: np.ndarray, x_reconstructed: np.ndarray,
|
|
632
|
+
mu: np.ndarray, logvar: np.ndarray) -> float:
|
|
633
|
+
"""
|
|
634
|
+
Compute VAE loss (ELBO).
|
|
635
|
+
|
|
636
|
+
Loss = Reconstruction Loss + KL Divergence
|
|
637
|
+
|
|
638
|
+
Args:
|
|
639
|
+
x: Original input
|
|
640
|
+
x_reconstructed: Reconstructed output
|
|
641
|
+
mu: Mean of latent distribution
|
|
642
|
+
logvar: Log variance of latent distribution
|
|
643
|
+
|
|
644
|
+
Returns:
|
|
645
|
+
Total VAE loss
|
|
646
|
+
"""
|
|
647
|
+
# Reconstruction loss (binary cross-entropy)
|
|
648
|
+
reconstruction = -np.sum(
|
|
649
|
+
x * np.log(x_reconstructed + 1e-8) +
|
|
650
|
+
(1 - x) * np.log(1 - x_reconstructed + 1e-8)
|
|
651
|
+
) / x.shape[0]
|
|
652
|
+
|
|
653
|
+
# KL divergence: KL(q(z|x) || N(0, 1))
|
|
654
|
+
kl_divergence = -0.5 * np.sum(1 + logvar - mu**2 - np.exp(logvar)) / x.shape[0]
|
|
655
|
+
|
|
656
|
+
return reconstruction + kl_divergence
|
|
657
|
+
|
|
658
|
+
def __call__(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
659
|
+
return self.forward(x)
|
|
660
|
+
|
|
661
|
+
|
|
662
|
+
__all__ = [
|
|
663
|
+
'VanillaAutoencoder',
|
|
664
|
+
'DenoisingAutoencoder',
|
|
665
|
+
'SparseAutoencoder',
|
|
666
|
+
'ContractiveAutoencoder',
|
|
667
|
+
'VAE',
|
|
668
|
+
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ilovetools
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.35
|
|
4
4
|
Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
|
|
5
5
|
Home-page: https://github.com/AliMehdi512/ilovetools
|
|
6
6
|
Author: Ali Mehdi
|
|
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
|
|
|
11
11
|
Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
|
|
12
12
|
Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
|
|
13
13
|
Project-URL: Source, https://github.com/AliMehdi512/ilovetools
|
|
14
|
-
Keywords: utilities,tools,ai,ml,data-processing,automation,
|
|
14
|
+
Keywords: utilities,tools,ai,ml,data-processing,automation,autoencoder,variational-autoencoder,vae,denoising-autoencoder,sparse-autoencoder,contractive-autoencoder,encoder-decoder,latent-space,bottleneck,reconstruction-loss,kl-divergence,reparameterization-trick,dimensionality-reduction,anomaly-detection,fraud-detection,feature-learning,unsupervised-learning,generative-models,image-compression,image-denoising,elbo,probabilistic-models,deep-learning,neural-networks,pytorch,tensorflow,keras
|
|
15
15
|
Classifier: Development Status :: 3 - Alpha
|
|
16
16
|
Classifier: Intended Audience :: Developers
|
|
17
17
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|