ilovetools 0.2.31__tar.gz → 0.2.32__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ilovetools-0.2.31/ilovetools.egg-info → ilovetools-0.2.32}/PKG-INFO +2 -2
- ilovetools-0.2.32/ilovetools/ml/recurrent.py +534 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32/ilovetools.egg-info}/PKG-INFO +2 -2
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools.egg-info/SOURCES.txt +2 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/pyproject.toml +2 -2
- {ilovetools-0.2.31 → ilovetools-0.2.32}/setup.py +2 -2
- ilovetools-0.2.32/tests/test_recurrent.py +400 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/LICENSE +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/MANIFEST.in +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/README.md +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ai/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ai/embeddings.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ai/inference.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ai/llm_helpers.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/audio/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/automation/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/automation/file_organizer.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/conversion/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/conversion/config_converter.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/conversion/config_converter_fixed_header.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/data/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/data/feature_engineering.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/data/preprocessing.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/database/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/datetime/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/email/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/email/template_engine.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/files/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/image/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/activations.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/anomaly_detection.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/attention.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/augmentation.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/clustering.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/cnn.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/convolution.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/cross_validation.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/dimensionality.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/dropout.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/ensemble.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/feature_selection.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/gradient_descent.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/imbalanced.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/interpretation.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/loss_functions.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/losses.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/lr_schedulers.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/metrics.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/neural_network.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/normalization.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/normalization_advanced.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/optimizers.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/pipeline.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/pooling.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/positional_encoding.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/regularization.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/rnn.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/schedulers.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/timeseries.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/tuning.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/ml/weight_init.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/security/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/security/password_checker.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/text/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/utils/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/utils/cache_system.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/utils/logger.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/utils/rate_limiter.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/utils/retry.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/validation/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/validation/data_validator.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/web/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/web/scraper.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools/web/url_shortener.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools.egg-info/dependency_links.txt +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools.egg-info/requires.txt +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/ilovetools.egg-info/top_level.txt +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/requirements.txt +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/setup.cfg +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/__init__.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_activations.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_attention.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_augmentation.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_cnn.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_convolution.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_dropout.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_gradient_descent.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_loss_functions.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_losses.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_lr_schedulers.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_neural_network.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_normalization.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_normalization_advanced.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_optimizers.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_pooling.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_positional_encoding.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_pypi_installation.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_regularization.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_rnn.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_schedulers.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/test_weight_init.py +0 -0
- {ilovetools-0.2.31 → ilovetools-0.2.32}/tests/verify_positional_encoding.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ilovetools
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.32
|
|
4
4
|
Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
|
|
5
5
|
Home-page: https://github.com/AliMehdi512/ilovetools
|
|
6
6
|
Author: Ali Mehdi
|
|
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
|
|
|
11
11
|
Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
|
|
12
12
|
Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
|
|
13
13
|
Project-URL: Source, https://github.com/AliMehdi512/ilovetools
|
|
14
|
-
Keywords: utilities,tools,ai,ml,data-processing,automation,
|
|
14
|
+
Keywords: utilities,tools,ai,ml,data-processing,automation,recurrent-neural-networks,rnn,lstm,gru,bilstm,bigru,long-short-term-memory,gated-recurrent-unit,bidirectional-rnn,bidirectional-lstm,bidirectional-gru,sequence-modeling,sequence-to-sequence,seq2seq,nlp,natural-language-processing,time-series,time-series-forecasting,speech-recognition,machine-translation,text-classification,sentiment-analysis,named-entity-recognition,ner,pos-tagging,vanishing-gradient,exploding-gradient,gates,forget-gate,input-gate,output-gate,update-gate,reset-gate,cell-state,hidden-state,deep-learning,neural-networks,pytorch,tensorflow,keras
|
|
15
15
|
Classifier: Development Status :: 3 - Alpha
|
|
16
16
|
Classifier: Intended Audience :: Developers
|
|
17
17
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
@@ -0,0 +1,534 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Recurrent Layers Suite
|
|
3
|
+
|
|
4
|
+
This module implements various recurrent neural network layers for sequence modeling.
|
|
5
|
+
Recurrent layers process sequential data by maintaining hidden states across time steps.
|
|
6
|
+
|
|
7
|
+
Implemented Recurrent Types:
|
|
8
|
+
1. RNN - Vanilla Recurrent Neural Network
|
|
9
|
+
2. LSTM - Long Short-Term Memory (solves vanishing gradients)
|
|
10
|
+
3. GRU - Gated Recurrent Unit (efficient alternative to LSTM)
|
|
11
|
+
4. BiLSTM - Bidirectional LSTM (context from both directions)
|
|
12
|
+
5. BiGRU - Bidirectional GRU (efficient bidirectional processing)
|
|
13
|
+
|
|
14
|
+
Key Benefits:
|
|
15
|
+
- Sequence modeling (text, audio, time series)
|
|
16
|
+
- Long-term dependency learning (LSTM/GRU)
|
|
17
|
+
- Solves vanishing gradient problem
|
|
18
|
+
- Bidirectional context understanding
|
|
19
|
+
- Variable-length sequence support
|
|
20
|
+
|
|
21
|
+
References:
|
|
22
|
+
- RNN: Rumelhart et al., "Learning Internal Representations by Error Propagation" (1986)
|
|
23
|
+
- LSTM: Hochreiter & Schmidhuber, "Long Short-Term Memory" (1997)
|
|
24
|
+
- GRU: Cho et al., "Learning Phrase Representations using RNN Encoder-Decoder" (2014)
|
|
25
|
+
- Bidirectional RNN: Schuster & Paliwal, "Bidirectional Recurrent Neural Networks" (1997)
|
|
26
|
+
|
|
27
|
+
Author: Ali Mehdi
|
|
28
|
+
Date: January 22, 2026
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
import numpy as np
|
|
32
|
+
from typing import Tuple, Optional
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# ============================================================================
|
|
36
|
+
# VANILLA RNN
|
|
37
|
+
# ============================================================================
|
|
38
|
+
|
|
39
|
+
class RNN:
|
|
40
|
+
"""
|
|
41
|
+
Vanilla Recurrent Neural Network.
|
|
42
|
+
|
|
43
|
+
Processes sequences by maintaining a hidden state across time steps.
|
|
44
|
+
Suffers from vanishing gradient problem for long sequences.
|
|
45
|
+
|
|
46
|
+
Formula:
|
|
47
|
+
h(t) = tanh(W_hh * h(t-1) + W_xh * x(t) + b_h)
|
|
48
|
+
y(t) = W_hy * h(t) + b_y
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
input_size: Size of input features
|
|
52
|
+
hidden_size: Size of hidden state
|
|
53
|
+
bias: Whether to use bias (default: True)
|
|
54
|
+
|
|
55
|
+
Example:
|
|
56
|
+
>>> rnn = RNN(input_size=128, hidden_size=256)
|
|
57
|
+
>>> x = np.random.randn(32, 10, 128) # (batch, seq_len, input_size)
|
|
58
|
+
>>> output, hidden = rnn.forward(x)
|
|
59
|
+
>>> print(output.shape) # (32, 10, 256)
|
|
60
|
+
>>> print(hidden.shape) # (32, 256)
|
|
61
|
+
|
|
62
|
+
Use Case:
|
|
63
|
+
Short sequences, simple sequence modeling, baseline
|
|
64
|
+
|
|
65
|
+
Reference:
|
|
66
|
+
Rumelhart et al., "Learning Internal Representations by Error Propagation" (1986)
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
def __init__(self, input_size: int, hidden_size: int, bias: bool = True):
|
|
70
|
+
self.input_size = input_size
|
|
71
|
+
self.hidden_size = hidden_size
|
|
72
|
+
self.use_bias = bias
|
|
73
|
+
|
|
74
|
+
# Initialize weights (Xavier initialization)
|
|
75
|
+
self.W_xh = np.random.randn(hidden_size, input_size) * np.sqrt(2.0 / (input_size + hidden_size))
|
|
76
|
+
self.W_hh = np.random.randn(hidden_size, hidden_size) * np.sqrt(2.0 / (hidden_size + hidden_size))
|
|
77
|
+
|
|
78
|
+
if bias:
|
|
79
|
+
self.b_h = np.zeros(hidden_size)
|
|
80
|
+
else:
|
|
81
|
+
self.b_h = None
|
|
82
|
+
|
|
83
|
+
self.cache = None
|
|
84
|
+
|
|
85
|
+
def forward(self, x: np.ndarray, h0: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
|
|
86
|
+
"""
|
|
87
|
+
Forward pass.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
x: Input tensor (batch, seq_len, input_size)
|
|
91
|
+
h0: Initial hidden state (batch, hidden_size). If None, initialized to zeros.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Tuple of (output, hidden_state)
|
|
95
|
+
- output: (batch, seq_len, hidden_size)
|
|
96
|
+
- hidden_state: (batch, hidden_size) - final hidden state
|
|
97
|
+
"""
|
|
98
|
+
batch_size, seq_len, input_size = x.shape
|
|
99
|
+
|
|
100
|
+
if input_size != self.input_size:
|
|
101
|
+
raise ValueError(f"Expected input_size {self.input_size}, got {input_size}")
|
|
102
|
+
|
|
103
|
+
# Initialize hidden state
|
|
104
|
+
if h0 is None:
|
|
105
|
+
h = np.zeros((batch_size, self.hidden_size))
|
|
106
|
+
else:
|
|
107
|
+
h = h0
|
|
108
|
+
|
|
109
|
+
# Store outputs for each time step
|
|
110
|
+
outputs = []
|
|
111
|
+
hidden_states = []
|
|
112
|
+
|
|
113
|
+
# Process sequence
|
|
114
|
+
for t in range(seq_len):
|
|
115
|
+
x_t = x[:, t, :] # (batch, input_size)
|
|
116
|
+
|
|
117
|
+
# Compute new hidden state
|
|
118
|
+
h = np.tanh(np.dot(x_t, self.W_xh.T) + np.dot(h, self.W_hh.T))
|
|
119
|
+
|
|
120
|
+
if self.use_bias:
|
|
121
|
+
h = h + self.b_h
|
|
122
|
+
|
|
123
|
+
outputs.append(h)
|
|
124
|
+
hidden_states.append(h)
|
|
125
|
+
|
|
126
|
+
# Stack outputs
|
|
127
|
+
output = np.stack(outputs, axis=1) # (batch, seq_len, hidden_size)
|
|
128
|
+
|
|
129
|
+
self.cache = (x, hidden_states)
|
|
130
|
+
return output, h
|
|
131
|
+
|
|
132
|
+
def __call__(self, x: np.ndarray, h0: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
|
|
133
|
+
return self.forward(x, h0)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
# ============================================================================
|
|
137
|
+
# LSTM
|
|
138
|
+
# ============================================================================
|
|
139
|
+
|
|
140
|
+
class LSTM:
|
|
141
|
+
"""
|
|
142
|
+
Long Short-Term Memory Network.
|
|
143
|
+
|
|
144
|
+
Solves vanishing gradient problem using gates and cell state.
|
|
145
|
+
Maintains long-term dependencies effectively.
|
|
146
|
+
|
|
147
|
+
Gates:
|
|
148
|
+
- Forget gate: f(t) = σ(W_f * [h(t-1), x(t)] + b_f)
|
|
149
|
+
- Input gate: i(t) = σ(W_i * [h(t-1), x(t)] + b_i)
|
|
150
|
+
- Output gate: o(t) = σ(W_o * [h(t-1), x(t)] + b_o)
|
|
151
|
+
- Cell candidate: c̃(t) = tanh(W_c * [h(t-1), x(t)] + b_c)
|
|
152
|
+
|
|
153
|
+
State Updates:
|
|
154
|
+
- Cell state: c(t) = f(t) ⊙ c(t-1) + i(t) ⊙ c̃(t)
|
|
155
|
+
- Hidden state: h(t) = o(t) ⊙ tanh(c(t))
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
input_size: Size of input features
|
|
159
|
+
hidden_size: Size of hidden state
|
|
160
|
+
bias: Whether to use bias (default: True)
|
|
161
|
+
|
|
162
|
+
Example:
|
|
163
|
+
>>> lstm = LSTM(input_size=128, hidden_size=256)
|
|
164
|
+
>>> x = np.random.randn(32, 100, 128) # (batch, seq_len, input_size)
|
|
165
|
+
>>> output, (hidden, cell) = lstm.forward(x)
|
|
166
|
+
>>> print(output.shape) # (32, 100, 256)
|
|
167
|
+
>>> print(hidden.shape) # (32, 256)
|
|
168
|
+
>>> print(cell.shape) # (32, 256)
|
|
169
|
+
|
|
170
|
+
Use Case:
|
|
171
|
+
Long sequences, NLP, time series, speech recognition
|
|
172
|
+
|
|
173
|
+
Reference:
|
|
174
|
+
Hochreiter & Schmidhuber, "Long Short-Term Memory" (1997)
|
|
175
|
+
"""
|
|
176
|
+
|
|
177
|
+
def __init__(self, input_size: int, hidden_size: int, bias: bool = True):
|
|
178
|
+
self.input_size = input_size
|
|
179
|
+
self.hidden_size = hidden_size
|
|
180
|
+
self.use_bias = bias
|
|
181
|
+
|
|
182
|
+
# Combined input size (input + hidden)
|
|
183
|
+
combined_size = input_size + hidden_size
|
|
184
|
+
|
|
185
|
+
# Initialize weights for all gates (Xavier initialization)
|
|
186
|
+
scale = np.sqrt(2.0 / (combined_size + hidden_size))
|
|
187
|
+
|
|
188
|
+
self.W_f = np.random.randn(hidden_size, combined_size) * scale # Forget gate
|
|
189
|
+
self.W_i = np.random.randn(hidden_size, combined_size) * scale # Input gate
|
|
190
|
+
self.W_o = np.random.randn(hidden_size, combined_size) * scale # Output gate
|
|
191
|
+
self.W_c = np.random.randn(hidden_size, combined_size) * scale # Cell candidate
|
|
192
|
+
|
|
193
|
+
if bias:
|
|
194
|
+
self.b_f = np.zeros(hidden_size)
|
|
195
|
+
self.b_i = np.zeros(hidden_size)
|
|
196
|
+
self.b_o = np.zeros(hidden_size)
|
|
197
|
+
self.b_c = np.zeros(hidden_size)
|
|
198
|
+
else:
|
|
199
|
+
self.b_f = self.b_i = self.b_o = self.b_c = None
|
|
200
|
+
|
|
201
|
+
self.cache = None
|
|
202
|
+
|
|
203
|
+
def forward(self, x: np.ndarray,
|
|
204
|
+
h0: Optional[np.ndarray] = None,
|
|
205
|
+
c0: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
|
|
206
|
+
"""
|
|
207
|
+
Forward pass.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
x: Input tensor (batch, seq_len, input_size)
|
|
211
|
+
h0: Initial hidden state (batch, hidden_size)
|
|
212
|
+
c0: Initial cell state (batch, hidden_size)
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Tuple of (output, (hidden_state, cell_state))
|
|
216
|
+
- output: (batch, seq_len, hidden_size)
|
|
217
|
+
- hidden_state: (batch, hidden_size)
|
|
218
|
+
- cell_state: (batch, hidden_size)
|
|
219
|
+
"""
|
|
220
|
+
batch_size, seq_len, input_size = x.shape
|
|
221
|
+
|
|
222
|
+
if input_size != self.input_size:
|
|
223
|
+
raise ValueError(f"Expected input_size {self.input_size}, got {input_size}")
|
|
224
|
+
|
|
225
|
+
# Initialize hidden and cell states
|
|
226
|
+
if h0 is None:
|
|
227
|
+
h = np.zeros((batch_size, self.hidden_size))
|
|
228
|
+
else:
|
|
229
|
+
h = h0
|
|
230
|
+
|
|
231
|
+
if c0 is None:
|
|
232
|
+
c = np.zeros((batch_size, self.hidden_size))
|
|
233
|
+
else:
|
|
234
|
+
c = c0
|
|
235
|
+
|
|
236
|
+
# Store outputs
|
|
237
|
+
outputs = []
|
|
238
|
+
|
|
239
|
+
# Process sequence
|
|
240
|
+
for t in range(seq_len):
|
|
241
|
+
x_t = x[:, t, :] # (batch, input_size)
|
|
242
|
+
|
|
243
|
+
# Concatenate input and hidden state
|
|
244
|
+
combined = np.concatenate([h, x_t], axis=1) # (batch, hidden_size + input_size)
|
|
245
|
+
|
|
246
|
+
# Compute gates
|
|
247
|
+
f_t = self._sigmoid(np.dot(combined, self.W_f.T) + (self.b_f if self.use_bias else 0)) # Forget
|
|
248
|
+
i_t = self._sigmoid(np.dot(combined, self.W_i.T) + (self.b_i if self.use_bias else 0)) # Input
|
|
249
|
+
o_t = self._sigmoid(np.dot(combined, self.W_o.T) + (self.b_o if self.use_bias else 0)) # Output
|
|
250
|
+
c_tilde = np.tanh(np.dot(combined, self.W_c.T) + (self.b_c if self.use_bias else 0)) # Cell candidate
|
|
251
|
+
|
|
252
|
+
# Update cell state
|
|
253
|
+
c = f_t * c + i_t * c_tilde
|
|
254
|
+
|
|
255
|
+
# Update hidden state
|
|
256
|
+
h = o_t * np.tanh(c)
|
|
257
|
+
|
|
258
|
+
outputs.append(h)
|
|
259
|
+
|
|
260
|
+
# Stack outputs
|
|
261
|
+
output = np.stack(outputs, axis=1) # (batch, seq_len, hidden_size)
|
|
262
|
+
|
|
263
|
+
return output, (h, c)
|
|
264
|
+
|
|
265
|
+
@staticmethod
|
|
266
|
+
def _sigmoid(x: np.ndarray) -> np.ndarray:
|
|
267
|
+
"""Sigmoid activation function."""
|
|
268
|
+
return 1 / (1 + np.exp(-np.clip(x, -500, 500)))
|
|
269
|
+
|
|
270
|
+
def __call__(self, x: np.ndarray,
|
|
271
|
+
h0: Optional[np.ndarray] = None,
|
|
272
|
+
c0: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
|
|
273
|
+
return self.forward(x, h0, c0)
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
# ============================================================================
|
|
277
|
+
# GRU
|
|
278
|
+
# ============================================================================
|
|
279
|
+
|
|
280
|
+
class GRU:
|
|
281
|
+
"""
|
|
282
|
+
Gated Recurrent Unit.
|
|
283
|
+
|
|
284
|
+
Simplified alternative to LSTM with fewer parameters.
|
|
285
|
+
Combines forget and input gates into update gate.
|
|
286
|
+
|
|
287
|
+
Gates:
|
|
288
|
+
- Update gate: z(t) = σ(W_z * [h(t-1), x(t)] + b_z)
|
|
289
|
+
- Reset gate: r(t) = σ(W_r * [h(t-1), x(t)] + b_r)
|
|
290
|
+
- Candidate: h̃(t) = tanh(W_h * [r(t) ⊙ h(t-1), x(t)] + b_h)
|
|
291
|
+
|
|
292
|
+
State Update:
|
|
293
|
+
h(t) = (1 - z(t)) ⊙ h(t-1) + z(t) ⊙ h̃(t)
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
input_size: Size of input features
|
|
297
|
+
hidden_size: Size of hidden state
|
|
298
|
+
bias: Whether to use bias (default: True)
|
|
299
|
+
|
|
300
|
+
Example:
|
|
301
|
+
>>> gru = GRU(input_size=128, hidden_size=256)
|
|
302
|
+
>>> x = np.random.randn(32, 100, 128) # (batch, seq_len, input_size)
|
|
303
|
+
>>> output, hidden = gru.forward(x)
|
|
304
|
+
>>> print(output.shape) # (32, 100, 256)
|
|
305
|
+
>>> print(hidden.shape) # (32, 256)
|
|
306
|
+
|
|
307
|
+
Use Case:
|
|
308
|
+
Efficient sequence modeling, faster training than LSTM
|
|
309
|
+
|
|
310
|
+
Reference:
|
|
311
|
+
Cho et al., "Learning Phrase Representations using RNN Encoder-Decoder" (2014)
|
|
312
|
+
"""
|
|
313
|
+
|
|
314
|
+
def __init__(self, input_size: int, hidden_size: int, bias: bool = True):
|
|
315
|
+
self.input_size = input_size
|
|
316
|
+
self.hidden_size = hidden_size
|
|
317
|
+
self.use_bias = bias
|
|
318
|
+
|
|
319
|
+
# Combined input size
|
|
320
|
+
combined_size = input_size + hidden_size
|
|
321
|
+
|
|
322
|
+
# Initialize weights (Xavier initialization)
|
|
323
|
+
scale = np.sqrt(2.0 / (combined_size + hidden_size))
|
|
324
|
+
|
|
325
|
+
self.W_z = np.random.randn(hidden_size, combined_size) * scale # Update gate
|
|
326
|
+
self.W_r = np.random.randn(hidden_size, combined_size) * scale # Reset gate
|
|
327
|
+
self.W_h = np.random.randn(hidden_size, combined_size) * scale # Candidate
|
|
328
|
+
|
|
329
|
+
if bias:
|
|
330
|
+
self.b_z = np.zeros(hidden_size)
|
|
331
|
+
self.b_r = np.zeros(hidden_size)
|
|
332
|
+
self.b_h = np.zeros(hidden_size)
|
|
333
|
+
else:
|
|
334
|
+
self.b_z = self.b_r = self.b_h = None
|
|
335
|
+
|
|
336
|
+
self.cache = None
|
|
337
|
+
|
|
338
|
+
def forward(self, x: np.ndarray, h0: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
|
|
339
|
+
"""
|
|
340
|
+
Forward pass.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
x: Input tensor (batch, seq_len, input_size)
|
|
344
|
+
h0: Initial hidden state (batch, hidden_size)
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
Tuple of (output, hidden_state)
|
|
348
|
+
- output: (batch, seq_len, hidden_size)
|
|
349
|
+
- hidden_state: (batch, hidden_size)
|
|
350
|
+
"""
|
|
351
|
+
batch_size, seq_len, input_size = x.shape
|
|
352
|
+
|
|
353
|
+
if input_size != self.input_size:
|
|
354
|
+
raise ValueError(f"Expected input_size {self.input_size}, got {input_size}")
|
|
355
|
+
|
|
356
|
+
# Initialize hidden state
|
|
357
|
+
if h0 is None:
|
|
358
|
+
h = np.zeros((batch_size, self.hidden_size))
|
|
359
|
+
else:
|
|
360
|
+
h = h0
|
|
361
|
+
|
|
362
|
+
# Store outputs
|
|
363
|
+
outputs = []
|
|
364
|
+
|
|
365
|
+
# Process sequence
|
|
366
|
+
for t in range(seq_len):
|
|
367
|
+
x_t = x[:, t, :] # (batch, input_size)
|
|
368
|
+
|
|
369
|
+
# Concatenate input and hidden state
|
|
370
|
+
combined = np.concatenate([h, x_t], axis=1)
|
|
371
|
+
|
|
372
|
+
# Compute gates
|
|
373
|
+
z_t = self._sigmoid(np.dot(combined, self.W_z.T) + (self.b_z if self.use_bias else 0)) # Update
|
|
374
|
+
r_t = self._sigmoid(np.dot(combined, self.W_r.T) + (self.b_r if self.use_bias else 0)) # Reset
|
|
375
|
+
|
|
376
|
+
# Compute candidate hidden state
|
|
377
|
+
combined_reset = np.concatenate([r_t * h, x_t], axis=1)
|
|
378
|
+
h_tilde = np.tanh(np.dot(combined_reset, self.W_h.T) + (self.b_h if self.use_bias else 0))
|
|
379
|
+
|
|
380
|
+
# Update hidden state
|
|
381
|
+
h = (1 - z_t) * h + z_t * h_tilde
|
|
382
|
+
|
|
383
|
+
outputs.append(h)
|
|
384
|
+
|
|
385
|
+
# Stack outputs
|
|
386
|
+
output = np.stack(outputs, axis=1) # (batch, seq_len, hidden_size)
|
|
387
|
+
|
|
388
|
+
return output, h
|
|
389
|
+
|
|
390
|
+
@staticmethod
|
|
391
|
+
def _sigmoid(x: np.ndarray) -> np.ndarray:
|
|
392
|
+
"""Sigmoid activation function."""
|
|
393
|
+
return 1 / (1 + np.exp(-np.clip(x, -500, 500)))
|
|
394
|
+
|
|
395
|
+
def __call__(self, x: np.ndarray, h0: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
|
|
396
|
+
return self.forward(x, h0)
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
# ============================================================================
|
|
400
|
+
# BIDIRECTIONAL LSTM
|
|
401
|
+
# ============================================================================
|
|
402
|
+
|
|
403
|
+
class BiLSTM:
|
|
404
|
+
"""
|
|
405
|
+
Bidirectional LSTM.
|
|
406
|
+
|
|
407
|
+
Processes sequence in both forward and backward directions.
|
|
408
|
+
Concatenates outputs from both directions for richer context.
|
|
409
|
+
|
|
410
|
+
Args:
|
|
411
|
+
input_size: Size of input features
|
|
412
|
+
hidden_size: Size of hidden state (per direction)
|
|
413
|
+
bias: Whether to use bias (default: True)
|
|
414
|
+
|
|
415
|
+
Example:
|
|
416
|
+
>>> bilstm = BiLSTM(input_size=128, hidden_size=256)
|
|
417
|
+
>>> x = np.random.randn(32, 100, 128)
|
|
418
|
+
>>> output, (hidden_fwd, hidden_bwd) = bilstm.forward(x)
|
|
419
|
+
>>> print(output.shape) # (32, 100, 512) - 2 * hidden_size
|
|
420
|
+
|
|
421
|
+
Use Case:
|
|
422
|
+
NLP tasks, named entity recognition, sentiment analysis
|
|
423
|
+
|
|
424
|
+
Reference:
|
|
425
|
+
Schuster & Paliwal, "Bidirectional Recurrent Neural Networks" (1997)
|
|
426
|
+
"""
|
|
427
|
+
|
|
428
|
+
def __init__(self, input_size: int, hidden_size: int, bias: bool = True):
|
|
429
|
+
self.input_size = input_size
|
|
430
|
+
self.hidden_size = hidden_size
|
|
431
|
+
|
|
432
|
+
# Forward and backward LSTMs
|
|
433
|
+
self.lstm_forward = LSTM(input_size, hidden_size, bias)
|
|
434
|
+
self.lstm_backward = LSTM(input_size, hidden_size, bias)
|
|
435
|
+
|
|
436
|
+
def forward(self, x: np.ndarray) -> Tuple[np.ndarray, Tuple[Tuple, Tuple]]:
|
|
437
|
+
"""
|
|
438
|
+
Forward pass.
|
|
439
|
+
|
|
440
|
+
Args:
|
|
441
|
+
x: Input tensor (batch, seq_len, input_size)
|
|
442
|
+
|
|
443
|
+
Returns:
|
|
444
|
+
Tuple of (output, ((h_fwd, c_fwd), (h_bwd, c_bwd)))
|
|
445
|
+
- output: (batch, seq_len, 2 * hidden_size)
|
|
446
|
+
"""
|
|
447
|
+
# Forward direction
|
|
448
|
+
output_fwd, (h_fwd, c_fwd) = self.lstm_forward.forward(x)
|
|
449
|
+
|
|
450
|
+
# Backward direction (reverse sequence)
|
|
451
|
+
x_reversed = np.flip(x, axis=1)
|
|
452
|
+
output_bwd, (h_bwd, c_bwd) = self.lstm_backward.forward(x_reversed)
|
|
453
|
+
output_bwd = np.flip(output_bwd, axis=1) # Reverse back
|
|
454
|
+
|
|
455
|
+
# Concatenate outputs
|
|
456
|
+
output = np.concatenate([output_fwd, output_bwd], axis=2)
|
|
457
|
+
|
|
458
|
+
return output, ((h_fwd, c_fwd), (h_bwd, c_bwd))
|
|
459
|
+
|
|
460
|
+
def __call__(self, x: np.ndarray) -> Tuple[np.ndarray, Tuple[Tuple, Tuple]]:
|
|
461
|
+
return self.forward(x)
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
# ============================================================================
|
|
465
|
+
# BIDIRECTIONAL GRU
|
|
466
|
+
# ============================================================================
|
|
467
|
+
|
|
468
|
+
class BiGRU:
|
|
469
|
+
"""
|
|
470
|
+
Bidirectional GRU.
|
|
471
|
+
|
|
472
|
+
Efficient bidirectional processing with GRU cells.
|
|
473
|
+
|
|
474
|
+
Args:
|
|
475
|
+
input_size: Size of input features
|
|
476
|
+
hidden_size: Size of hidden state (per direction)
|
|
477
|
+
bias: Whether to use bias (default: True)
|
|
478
|
+
|
|
479
|
+
Example:
|
|
480
|
+
>>> bigru = BiGRU(input_size=128, hidden_size=256)
|
|
481
|
+
>>> x = np.random.randn(32, 100, 128)
|
|
482
|
+
>>> output, (hidden_fwd, hidden_bwd) = bigru.forward(x)
|
|
483
|
+
>>> print(output.shape) # (32, 100, 512) - 2 * hidden_size
|
|
484
|
+
|
|
485
|
+
Use Case:
|
|
486
|
+
Efficient bidirectional sequence modeling
|
|
487
|
+
|
|
488
|
+
Reference:
|
|
489
|
+
Cho et al., "Learning Phrase Representations using RNN Encoder-Decoder" (2014)
|
|
490
|
+
"""
|
|
491
|
+
|
|
492
|
+
def __init__(self, input_size: int, hidden_size: int, bias: bool = True):
|
|
493
|
+
self.input_size = input_size
|
|
494
|
+
self.hidden_size = hidden_size
|
|
495
|
+
|
|
496
|
+
# Forward and backward GRUs
|
|
497
|
+
self.gru_forward = GRU(input_size, hidden_size, bias)
|
|
498
|
+
self.gru_backward = GRU(input_size, hidden_size, bias)
|
|
499
|
+
|
|
500
|
+
def forward(self, x: np.ndarray) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
|
|
501
|
+
"""
|
|
502
|
+
Forward pass.
|
|
503
|
+
|
|
504
|
+
Args:
|
|
505
|
+
x: Input tensor (batch, seq_len, input_size)
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
Tuple of (output, (h_fwd, h_bwd))
|
|
509
|
+
- output: (batch, seq_len, 2 * hidden_size)
|
|
510
|
+
"""
|
|
511
|
+
# Forward direction
|
|
512
|
+
output_fwd, h_fwd = self.gru_forward.forward(x)
|
|
513
|
+
|
|
514
|
+
# Backward direction
|
|
515
|
+
x_reversed = np.flip(x, axis=1)
|
|
516
|
+
output_bwd, h_bwd = self.gru_backward.forward(x_reversed)
|
|
517
|
+
output_bwd = np.flip(output_bwd, axis=1)
|
|
518
|
+
|
|
519
|
+
# Concatenate outputs
|
|
520
|
+
output = np.concatenate([output_fwd, output_bwd], axis=2)
|
|
521
|
+
|
|
522
|
+
return output, (h_fwd, h_bwd)
|
|
523
|
+
|
|
524
|
+
def __call__(self, x: np.ndarray) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
|
|
525
|
+
return self.forward(x)
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
__all__ = [
|
|
529
|
+
'RNN',
|
|
530
|
+
'LSTM',
|
|
531
|
+
'GRU',
|
|
532
|
+
'BiLSTM',
|
|
533
|
+
'BiGRU',
|
|
534
|
+
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ilovetools
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.32
|
|
4
4
|
Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
|
|
5
5
|
Home-page: https://github.com/AliMehdi512/ilovetools
|
|
6
6
|
Author: Ali Mehdi
|
|
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
|
|
|
11
11
|
Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
|
|
12
12
|
Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
|
|
13
13
|
Project-URL: Source, https://github.com/AliMehdi512/ilovetools
|
|
14
|
-
Keywords: utilities,tools,ai,ml,data-processing,automation,
|
|
14
|
+
Keywords: utilities,tools,ai,ml,data-processing,automation,recurrent-neural-networks,rnn,lstm,gru,bilstm,bigru,long-short-term-memory,gated-recurrent-unit,bidirectional-rnn,bidirectional-lstm,bidirectional-gru,sequence-modeling,sequence-to-sequence,seq2seq,nlp,natural-language-processing,time-series,time-series-forecasting,speech-recognition,machine-translation,text-classification,sentiment-analysis,named-entity-recognition,ner,pos-tagging,vanishing-gradient,exploding-gradient,gates,forget-gate,input-gate,output-gate,update-gate,reset-gate,cell-state,hidden-state,deep-learning,neural-networks,pytorch,tensorflow,keras
|
|
15
15
|
Classifier: Development Status :: 3 - Alpha
|
|
16
16
|
Classifier: Intended Audience :: Developers
|
|
17
17
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
@@ -56,6 +56,7 @@ ilovetools/ml/optimizers.py
|
|
|
56
56
|
ilovetools/ml/pipeline.py
|
|
57
57
|
ilovetools/ml/pooling.py
|
|
58
58
|
ilovetools/ml/positional_encoding.py
|
|
59
|
+
ilovetools/ml/recurrent.py
|
|
59
60
|
ilovetools/ml/regularization.py
|
|
60
61
|
ilovetools/ml/rnn.py
|
|
61
62
|
ilovetools/ml/schedulers.py
|
|
@@ -93,6 +94,7 @@ tests/test_optimizers.py
|
|
|
93
94
|
tests/test_pooling.py
|
|
94
95
|
tests/test_positional_encoding.py
|
|
95
96
|
tests/test_pypi_installation.py
|
|
97
|
+
tests/test_recurrent.py
|
|
96
98
|
tests/test_regularization.py
|
|
97
99
|
tests/test_rnn.py
|
|
98
100
|
tests/test_schedulers.py
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "ilovetools"
|
|
7
|
-
version = "0.2.
|
|
7
|
+
version = "0.2.32"
|
|
8
8
|
description = "A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.8"
|
|
@@ -12,7 +12,7 @@ license = "MIT"
|
|
|
12
12
|
authors = [
|
|
13
13
|
{name = "Ali Mehdi", email = "ali.mehdi.dev579@gmail.com"}
|
|
14
14
|
]
|
|
15
|
-
keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "
|
|
15
|
+
keywords = ["utilities", "tools", "ai", "ml", "data-processing", "automation", "recurrent-neural-networks", "rnn", "lstm", "gru", "bilstm", "bigru", "long-short-term-memory", "gated-recurrent-unit", "bidirectional-rnn", "bidirectional-lstm", "bidirectional-gru", "sequence-modeling", "sequence-to-sequence", "seq2seq", "nlp", "natural-language-processing", "time-series", "time-series-forecasting", "speech-recognition", "machine-translation", "text-classification", "sentiment-analysis", "named-entity-recognition", "ner", "pos-tagging", "vanishing-gradient", "exploding-gradient", "gates", "forget-gate", "input-gate", "output-gate", "update-gate", "reset-gate", "cell-state", "hidden-state", "deep-learning", "neural-networks", "pytorch", "tensorflow", "keras"]
|
|
16
16
|
classifiers = [
|
|
17
17
|
"Development Status :: 3 - Alpha",
|
|
18
18
|
"Intended Audience :: Developers",
|
|
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
|
|
|
5
5
|
|
|
6
6
|
setup(
|
|
7
7
|
name="ilovetools",
|
|
8
|
-
version="0.2.
|
|
8
|
+
version="0.2.32",
|
|
9
9
|
author="Ali Mehdi",
|
|
10
10
|
author_email="ali.mehdi.dev579@gmail.com",
|
|
11
11
|
description="A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs",
|
|
@@ -58,7 +58,7 @@ setup(
|
|
|
58
58
|
"soundfile>=0.12.0",
|
|
59
59
|
],
|
|
60
60
|
},
|
|
61
|
-
keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks,
|
|
61
|
+
keywords="utilities, tools, ai, ml, data-processing, automation, python-library, neural-networks, recurrent-neural-networks, rnn, lstm, gru, bilstm, bigru, sequence-modeling, nlp, time-series, speech-recognition, machine-translation, vanishing-gradient, long-short-term-memory, gated-recurrent-unit, bidirectional-rnn, deep-learning, pytorch, tensorflow, keras",
|
|
62
62
|
project_urls={
|
|
63
63
|
"Bug Reports": "https://github.com/AliMehdi512/ilovetools/issues",
|
|
64
64
|
"Source": "https://github.com/AliMehdi512/ilovetools",
|