ilovetools 0.2.33__tar.gz → 0.2.34__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. {ilovetools-0.2.33/ilovetools.egg-info → ilovetools-0.2.34}/PKG-INFO +2 -2
  2. ilovetools-0.2.34/ilovetools/ml/gnn.py +621 -0
  3. {ilovetools-0.2.33 → ilovetools-0.2.34/ilovetools.egg-info}/PKG-INFO +2 -2
  4. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools.egg-info/SOURCES.txt +2 -0
  5. {ilovetools-0.2.33 → ilovetools-0.2.34}/pyproject.toml +2 -2
  6. {ilovetools-0.2.33 → ilovetools-0.2.34}/setup.py +2 -2
  7. ilovetools-0.2.34/tests/test_gnn.py +330 -0
  8. {ilovetools-0.2.33 → ilovetools-0.2.34}/LICENSE +0 -0
  9. {ilovetools-0.2.33 → ilovetools-0.2.34}/MANIFEST.in +0 -0
  10. {ilovetools-0.2.33 → ilovetools-0.2.34}/README.md +0 -0
  11. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/__init__.py +0 -0
  12. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ai/__init__.py +0 -0
  13. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ai/embeddings.py +0 -0
  14. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ai/inference.py +0 -0
  15. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ai/llm_helpers.py +0 -0
  16. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/audio/__init__.py +0 -0
  17. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/automation/__init__.py +0 -0
  18. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/automation/file_organizer.py +0 -0
  19. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/conversion/__init__.py +0 -0
  20. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/conversion/config_converter.py +0 -0
  21. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/conversion/config_converter_fixed_header.py +0 -0
  22. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/data/__init__.py +0 -0
  23. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/data/feature_engineering.py +0 -0
  24. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/data/preprocessing.py +0 -0
  25. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/database/__init__.py +0 -0
  26. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/datetime/__init__.py +0 -0
  27. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/email/__init__.py +0 -0
  28. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/email/template_engine.py +0 -0
  29. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/files/__init__.py +0 -0
  30. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/image/__init__.py +0 -0
  31. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/__init__.py +0 -0
  32. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/activations.py +0 -0
  33. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/anomaly_detection.py +0 -0
  34. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/attention.py +0 -0
  35. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/augmentation.py +0 -0
  36. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/clustering.py +0 -0
  37. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/cnn.py +0 -0
  38. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/convolution.py +0 -0
  39. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/cross_validation.py +0 -0
  40. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/dimensionality.py +0 -0
  41. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/dropout.py +0 -0
  42. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/embedding.py +0 -0
  43. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/ensemble.py +0 -0
  44. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/feature_selection.py +0 -0
  45. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/gradient_descent.py +0 -0
  46. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/imbalanced.py +0 -0
  47. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/interpretation.py +0 -0
  48. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/loss_functions.py +0 -0
  49. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/losses.py +0 -0
  50. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/lr_schedulers.py +0 -0
  51. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/metrics.py +0 -0
  52. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/neural_network.py +0 -0
  53. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/normalization.py +0 -0
  54. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/normalization_advanced.py +0 -0
  55. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/optimizers.py +0 -0
  56. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/pipeline.py +0 -0
  57. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/pooling.py +0 -0
  58. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/positional_encoding.py +0 -0
  59. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/recurrent.py +0 -0
  60. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/regularization.py +0 -0
  61. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/rnn.py +0 -0
  62. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/schedulers.py +0 -0
  63. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/timeseries.py +0 -0
  64. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/tuning.py +0 -0
  65. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/ml/weight_init.py +0 -0
  66. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/security/__init__.py +0 -0
  67. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/security/password_checker.py +0 -0
  68. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/text/__init__.py +0 -0
  69. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/utils/__init__.py +0 -0
  70. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/utils/cache_system.py +0 -0
  71. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/utils/logger.py +0 -0
  72. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/utils/rate_limiter.py +0 -0
  73. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/utils/retry.py +0 -0
  74. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/validation/__init__.py +0 -0
  75. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/validation/data_validator.py +0 -0
  76. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/web/__init__.py +0 -0
  77. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/web/scraper.py +0 -0
  78. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools/web/url_shortener.py +0 -0
  79. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools.egg-info/dependency_links.txt +0 -0
  80. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools.egg-info/requires.txt +0 -0
  81. {ilovetools-0.2.33 → ilovetools-0.2.34}/ilovetools.egg-info/top_level.txt +0 -0
  82. {ilovetools-0.2.33 → ilovetools-0.2.34}/requirements.txt +0 -0
  83. {ilovetools-0.2.33 → ilovetools-0.2.34}/setup.cfg +0 -0
  84. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/__init__.py +0 -0
  85. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_activations.py +0 -0
  86. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_attention.py +0 -0
  87. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_augmentation.py +0 -0
  88. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_cnn.py +0 -0
  89. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_convolution.py +0 -0
  90. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_dropout.py +0 -0
  91. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_embedding.py +0 -0
  92. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_gradient_descent.py +0 -0
  93. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_loss_functions.py +0 -0
  94. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_losses.py +0 -0
  95. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_lr_schedulers.py +0 -0
  96. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_neural_network.py +0 -0
  97. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_normalization.py +0 -0
  98. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_normalization_advanced.py +0 -0
  99. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_optimizers.py +0 -0
  100. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_pooling.py +0 -0
  101. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_positional_encoding.py +0 -0
  102. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_pypi_installation.py +0 -0
  103. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_recurrent.py +0 -0
  104. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_regularization.py +0 -0
  105. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_rnn.py +0 -0
  106. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_schedulers.py +0 -0
  107. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/test_weight_init.py +0 -0
  108. {ilovetools-0.2.33 → ilovetools-0.2.34}/tests/verify_positional_encoding.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.33
3
+ Version: 0.2.34
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,embeddings,word-embeddings,word2vec,glove,fasttext,positional-encoding,sinusoidal-encoding,learned-embeddings,token-embeddings,character-embeddings,segment-embeddings,token-type-embeddings,semantic-similarity,cosine-similarity,embedding-space,embedding-matrix,lookup-table,distributed-representations,dense-vectors,nlp,natural-language-processing,transformers,bert,gpt,attention,vocabulary,tokenization,pretrained-embeddings,transfer-learning,deep-learning,neural-networks,pytorch,tensorflow,keras
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,graph-neural-networks,gnn,gcn,gat,graphsage,gin,graph-convolutional-network,graph-attention-network,message-passing,neighborhood-aggregation,node-classification,link-prediction,graph-classification,node-embeddings,graph-embeddings,social-networks,knowledge-graphs,molecular-graphs,citation-networks,drug-discovery,molecular-property-prediction,friend-recommendation,product-recommendation,traffic-prediction,relational-data,graph-pooling,spectral-graph-theory,spatial-graph-convolution,attention-mechanism,inductive-learning,graph-isomorphism,deep-learning,neural-networks,pytorch,tensorflow
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -0,0 +1,621 @@
1
+ """
2
+ Graph Neural Networks Suite
3
+
4
+ This module implements various Graph Neural Network (GNN) architectures for processing
5
+ graph-structured data. GNNs learn node embeddings by aggregating information from
6
+ neighboring nodes through message passing.
7
+
8
+ Implemented GNN Types:
9
+ 1. GCN - Graph Convolutional Network
10
+ 2. GAT - Graph Attention Network
11
+ 3. GraphSAGE - Graph Sample and Aggregate
12
+ 4. GIN - Graph Isomorphism Network
13
+ 5. MessagePassing - Base message passing framework
14
+
15
+ Key Benefits:
16
+ - Process graph-structured data (social networks, molecules, knowledge graphs)
17
+ - Node classification, link prediction, graph classification
18
+ - Capture relational information
19
+ - Scalable to large graphs
20
+ - Inductive learning on unseen graphs
21
+
22
+ References:
23
+ - GCN: Kipf & Welling, "Semi-Supervised Classification with Graph Convolutional Networks" (2017)
24
+ - GAT: Veličković et al., "Graph Attention Networks" (2018)
25
+ - GraphSAGE: Hamilton et al., "Inductive Representation Learning on Large Graphs" (2017)
26
+ - GIN: Xu et al., "How Powerful are Graph Neural Networks?" (2019)
27
+
28
+ Author: Ali Mehdi
29
+ Date: January 31, 2026
30
+ """
31
+
32
+ import numpy as np
33
+ from typing import Optional, Tuple, List
34
+
35
+
36
+ # ============================================================================
37
+ # GRAPH CONVOLUTIONAL NETWORK (GCN)
38
+ # ============================================================================
39
+
40
+ class GCN:
41
+ """
42
+ Graph Convolutional Network.
43
+
44
+ Aggregates neighbor features with equal weights using normalized adjacency matrix.
45
+
46
+ Formula:
47
+ H^(l+1) = σ(D^(-1/2) A D^(-1/2) H^(l) W^(l))
48
+
49
+ where:
50
+ - A: Adjacency matrix with self-loops (A + I)
51
+ - D: Degree matrix
52
+ - H^(l): Node features at layer l
53
+ - W^(l): Learnable weight matrix
54
+ - σ: Activation function
55
+
56
+ Args:
57
+ in_features: Input feature dimension
58
+ hidden_features: Hidden feature dimension
59
+ out_features: Output feature dimension (optional, defaults to hidden_features)
60
+ num_layers: Number of GCN layers (default: 2)
61
+ dropout: Dropout rate (default: 0.5)
62
+
63
+ Example:
64
+ >>> gcn = GCN(in_features=128, hidden_features=256, out_features=64)
65
+ >>> node_features = np.random.randn(100, 128) # 100 nodes, 128 features
66
+ >>> adj_matrix = np.random.randint(0, 2, (100, 100)) # Adjacency matrix
67
+ >>> output = gcn.forward(node_features, adj_matrix)
68
+ >>> print(output.shape) # (100, 64)
69
+
70
+ Use Case:
71
+ Node classification, semi-supervised learning, citation networks
72
+
73
+ Reference:
74
+ Kipf & Welling, "Semi-Supervised Classification with GCN" (2017)
75
+ """
76
+
77
+ def __init__(self, in_features: int, hidden_features: int,
78
+ out_features: Optional[int] = None,
79
+ num_layers: int = 2, dropout: float = 0.5):
80
+ self.in_features = in_features
81
+ self.hidden_features = hidden_features
82
+ self.out_features = out_features or hidden_features
83
+ self.num_layers = num_layers
84
+ self.dropout = dropout
85
+
86
+ # Initialize weights for each layer
87
+ self.weights = []
88
+
89
+ # First layer
90
+ self.weights.append(
91
+ np.random.randn(in_features, hidden_features) * np.sqrt(2.0 / in_features)
92
+ )
93
+
94
+ # Hidden layers
95
+ for _ in range(num_layers - 2):
96
+ self.weights.append(
97
+ np.random.randn(hidden_features, hidden_features) * np.sqrt(2.0 / hidden_features)
98
+ )
99
+
100
+ # Output layer
101
+ if num_layers > 1:
102
+ self.weights.append(
103
+ np.random.randn(hidden_features, self.out_features) * np.sqrt(2.0 / hidden_features)
104
+ )
105
+
106
+ def normalize_adjacency(self, adj_matrix: np.ndarray) -> np.ndarray:
107
+ """
108
+ Normalize adjacency matrix with self-loops.
109
+
110
+ Formula:
111
+ A_norm = D^(-1/2) (A + I) D^(-1/2)
112
+ """
113
+ # Add self-loops
114
+ adj_with_self_loops = adj_matrix + np.eye(adj_matrix.shape[0])
115
+
116
+ # Compute degree matrix
117
+ degree = np.sum(adj_with_self_loops, axis=1)
118
+ degree_inv_sqrt = np.power(degree, -0.5)
119
+ degree_inv_sqrt[np.isinf(degree_inv_sqrt)] = 0.0
120
+
121
+ # D^(-1/2)
122
+ D_inv_sqrt = np.diag(degree_inv_sqrt)
123
+
124
+ # Normalize: D^(-1/2) A D^(-1/2)
125
+ adj_normalized = D_inv_sqrt @ adj_with_self_loops @ D_inv_sqrt
126
+
127
+ return adj_normalized
128
+
129
+ def forward(self, node_features: np.ndarray, adj_matrix: np.ndarray,
130
+ training: bool = True) -> np.ndarray:
131
+ """
132
+ Forward pass.
133
+
134
+ Args:
135
+ node_features: Node feature matrix (num_nodes, in_features)
136
+ adj_matrix: Adjacency matrix (num_nodes, num_nodes)
137
+ training: Whether in training mode (apply dropout)
138
+
139
+ Returns:
140
+ Node embeddings (num_nodes, out_features)
141
+ """
142
+ # Normalize adjacency matrix
143
+ adj_norm = self.normalize_adjacency(adj_matrix)
144
+
145
+ h = node_features
146
+
147
+ # Apply GCN layers
148
+ for i, weight in enumerate(self.weights):
149
+ # Graph convolution: A_norm @ H @ W
150
+ h = adj_norm @ h @ weight
151
+
152
+ # Apply ReLU activation (except last layer)
153
+ if i < len(self.weights) - 1:
154
+ h = np.maximum(0, h) # ReLU
155
+
156
+ # Apply dropout
157
+ if training and self.dropout > 0:
158
+ mask = np.random.binomial(1, 1 - self.dropout, size=h.shape)
159
+ h = h * mask / (1 - self.dropout)
160
+
161
+ return h
162
+
163
+ def __call__(self, node_features: np.ndarray, adj_matrix: np.ndarray,
164
+ training: bool = True) -> np.ndarray:
165
+ return self.forward(node_features, adj_matrix, training)
166
+
167
+
168
+ # ============================================================================
169
+ # GRAPH ATTENTION NETWORK (GAT)
170
+ # ============================================================================
171
+
172
+ class GAT:
173
+ """
174
+ Graph Attention Network.
175
+
176
+ Uses attention mechanism to weight neighbor contributions dynamically.
177
+
178
+ Formula:
179
+ α_ij = softmax(LeakyReLU(a^T [W h_i || W h_j]))
180
+ h_i' = σ(Σ_j α_ij W h_j)
181
+
182
+ where:
183
+ - α_ij: Attention coefficient from node j to node i
184
+ - W: Learnable weight matrix
185
+ - a: Attention vector
186
+ - ||: Concatenation
187
+
188
+ Args:
189
+ in_features: Input feature dimension
190
+ hidden_features: Hidden feature dimension per head
191
+ out_features: Output feature dimension (optional)
192
+ num_heads: Number of attention heads (default: 8)
193
+ dropout: Dropout rate (default: 0.6)
194
+ alpha: LeakyReLU negative slope (default: 0.2)
195
+
196
+ Example:
197
+ >>> gat = GAT(in_features=128, hidden_features=256, num_heads=8)
198
+ >>> node_features = np.random.randn(100, 128)
199
+ >>> adj_matrix = np.random.randint(0, 2, (100, 100))
200
+ >>> output = gat.forward(node_features, adj_matrix)
201
+ >>> print(output.shape) # (100, 256*8) - concatenated heads
202
+
203
+ Use Case:
204
+ Node classification with varying neighbor importance, citation networks
205
+
206
+ Reference:
207
+ Veličković et al., "Graph Attention Networks" (2018)
208
+ """
209
+
210
+ def __init__(self, in_features: int, hidden_features: int,
211
+ out_features: Optional[int] = None,
212
+ num_heads: int = 8, dropout: float = 0.6, alpha: float = 0.2):
213
+ self.in_features = in_features
214
+ self.hidden_features = hidden_features
215
+ self.out_features = out_features or (hidden_features * num_heads)
216
+ self.num_heads = num_heads
217
+ self.dropout = dropout
218
+ self.alpha = alpha
219
+
220
+ # Initialize weights for each attention head
221
+ self.W = []
222
+ self.a = []
223
+
224
+ for _ in range(num_heads):
225
+ # Weight matrix for features
226
+ self.W.append(
227
+ np.random.randn(in_features, hidden_features) * np.sqrt(2.0 / in_features)
228
+ )
229
+ # Attention vector
230
+ self.a.append(
231
+ np.random.randn(2 * hidden_features) * 0.01
232
+ )
233
+
234
+ def leaky_relu(self, x: np.ndarray) -> np.ndarray:
235
+ """LeakyReLU activation."""
236
+ return np.where(x > 0, x, self.alpha * x)
237
+
238
+ def attention(self, h: np.ndarray, adj_matrix: np.ndarray,
239
+ W: np.ndarray, a: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
240
+ """
241
+ Compute attention coefficients.
242
+
243
+ Returns:
244
+ Tuple of (attention_weights, aggregated_features)
245
+ """
246
+ num_nodes = h.shape[0]
247
+
248
+ # Transform features: h' = W @ h
249
+ h_transformed = h @ W # (num_nodes, hidden_features)
250
+
251
+ # Compute attention scores
252
+ attention_scores = np.zeros((num_nodes, num_nodes))
253
+
254
+ for i in range(num_nodes):
255
+ for j in range(num_nodes):
256
+ if adj_matrix[i, j] > 0 or i == j: # Only for neighbors and self
257
+ # Concatenate features
258
+ concat = np.concatenate([h_transformed[i], h_transformed[j]])
259
+ # Compute attention score
260
+ attention_scores[i, j] = np.dot(a, concat)
261
+
262
+ # Apply LeakyReLU
263
+ attention_scores = self.leaky_relu(attention_scores)
264
+
265
+ # Mask non-neighbors
266
+ mask = (adj_matrix + np.eye(num_nodes)) > 0
267
+ attention_scores = np.where(mask, attention_scores, -1e9)
268
+
269
+ # Softmax normalization
270
+ attention_weights = np.exp(attention_scores - np.max(attention_scores, axis=1, keepdims=True))
271
+ attention_weights = attention_weights / (np.sum(attention_weights, axis=1, keepdims=True) + 1e-8)
272
+
273
+ # Aggregate features
274
+ aggregated = attention_weights @ h_transformed
275
+
276
+ return attention_weights, aggregated
277
+
278
+ def forward(self, node_features: np.ndarray, adj_matrix: np.ndarray,
279
+ training: bool = True) -> np.ndarray:
280
+ """
281
+ Forward pass.
282
+
283
+ Args:
284
+ node_features: Node feature matrix (num_nodes, in_features)
285
+ adj_matrix: Adjacency matrix (num_nodes, num_nodes)
286
+ training: Whether in training mode
287
+
288
+ Returns:
289
+ Node embeddings (num_nodes, out_features)
290
+ """
291
+ # Multi-head attention
292
+ head_outputs = []
293
+
294
+ for W, a in zip(self.W, self.a):
295
+ _, aggregated = self.attention(node_features, adj_matrix, W, a)
296
+
297
+ # Apply dropout
298
+ if training and self.dropout > 0:
299
+ mask = np.random.binomial(1, 1 - self.dropout, size=aggregated.shape)
300
+ aggregated = aggregated * mask / (1 - self.dropout)
301
+
302
+ head_outputs.append(aggregated)
303
+
304
+ # Concatenate or average heads
305
+ output = np.concatenate(head_outputs, axis=1) # (num_nodes, hidden_features * num_heads)
306
+
307
+ # Apply ELU activation
308
+ output = np.where(output > 0, output, np.exp(output) - 1)
309
+
310
+ return output
311
+
312
+ def __call__(self, node_features: np.ndarray, adj_matrix: np.ndarray,
313
+ training: bool = True) -> np.ndarray:
314
+ return self.forward(node_features, adj_matrix, training)
315
+
316
+
317
+ # ============================================================================
318
+ # GRAPHSAGE
319
+ # ============================================================================
320
+
321
+ class GraphSAGE:
322
+ """
323
+ Graph Sample and Aggregate.
324
+
325
+ Samples fixed-size neighborhoods and aggregates features for scalability.
326
+
327
+ Formula:
328
+ h_N(v) = AGGREGATE({h_u, ∀u ∈ N(v)})
329
+ h_v' = σ(W · CONCAT(h_v, h_N(v)))
330
+
331
+ where:
332
+ - N(v): Sampled neighborhood of node v
333
+ - AGGREGATE: Mean, max, or LSTM aggregation
334
+ - CONCAT: Concatenation
335
+
336
+ Args:
337
+ in_features: Input feature dimension
338
+ hidden_features: Hidden feature dimension
339
+ out_features: Output feature dimension (optional)
340
+ num_layers: Number of GraphSAGE layers (default: 2)
341
+ aggregator: Aggregation function ('mean', 'max', 'sum') (default: 'mean')
342
+ num_samples: Number of neighbors to sample per layer (default: 25)
343
+
344
+ Example:
345
+ >>> sage = GraphSAGE(in_features=128, hidden_features=256, num_samples=10)
346
+ >>> node_features = np.random.randn(100, 128)
347
+ >>> adj_matrix = np.random.randint(0, 2, (100, 100))
348
+ >>> output = sage.forward(node_features, adj_matrix)
349
+ >>> print(output.shape) # (100, 256)
350
+
351
+ Use Case:
352
+ Large-scale graphs, inductive learning, unseen nodes
353
+
354
+ Reference:
355
+ Hamilton et al., "Inductive Representation Learning on Large Graphs" (2017)
356
+ """
357
+
358
+ def __init__(self, in_features: int, hidden_features: int,
359
+ out_features: Optional[int] = None,
360
+ num_layers: int = 2, aggregator: str = 'mean',
361
+ num_samples: int = 25):
362
+ self.in_features = in_features
363
+ self.hidden_features = hidden_features
364
+ self.out_features = out_features or hidden_features
365
+ self.num_layers = num_layers
366
+ self.aggregator = aggregator
367
+ self.num_samples = num_samples
368
+
369
+ # Initialize weights
370
+ self.weights = []
371
+
372
+ # First layer
373
+ self.weights.append(
374
+ np.random.randn(in_features * 2, hidden_features) * np.sqrt(2.0 / (in_features * 2))
375
+ )
376
+
377
+ # Hidden layers
378
+ for _ in range(num_layers - 2):
379
+ self.weights.append(
380
+ np.random.randn(hidden_features * 2, hidden_features) * np.sqrt(2.0 / (hidden_features * 2))
381
+ )
382
+
383
+ # Output layer
384
+ if num_layers > 1:
385
+ self.weights.append(
386
+ np.random.randn(hidden_features * 2, self.out_features) * np.sqrt(2.0 / (hidden_features * 2))
387
+ )
388
+
389
+ def sample_neighbors(self, adj_matrix: np.ndarray, node_idx: int) -> np.ndarray:
390
+ """Sample fixed number of neighbors for a node."""
391
+ neighbors = np.where(adj_matrix[node_idx] > 0)[0]
392
+
393
+ if len(neighbors) == 0:
394
+ return np.array([node_idx]) # Self-loop if no neighbors
395
+
396
+ if len(neighbors) <= self.num_samples:
397
+ return neighbors
398
+
399
+ # Random sampling
400
+ return np.random.choice(neighbors, size=self.num_samples, replace=False)
401
+
402
+ def aggregate(self, neighbor_features: np.ndarray) -> np.ndarray:
403
+ """Aggregate neighbor features."""
404
+ if self.aggregator == 'mean':
405
+ return np.mean(neighbor_features, axis=0)
406
+ elif self.aggregator == 'max':
407
+ return np.max(neighbor_features, axis=0)
408
+ elif self.aggregator == 'sum':
409
+ return np.sum(neighbor_features, axis=0)
410
+ else:
411
+ raise ValueError(f"Unknown aggregator: {self.aggregator}")
412
+
413
+ def forward(self, node_features: np.ndarray, adj_matrix: np.ndarray) -> np.ndarray:
414
+ """
415
+ Forward pass.
416
+
417
+ Args:
418
+ node_features: Node feature matrix (num_nodes, in_features)
419
+ adj_matrix: Adjacency matrix (num_nodes, num_nodes)
420
+
421
+ Returns:
422
+ Node embeddings (num_nodes, out_features)
423
+ """
424
+ num_nodes = node_features.shape[0]
425
+ h = node_features
426
+
427
+ for weight in self.weights:
428
+ h_new = np.zeros((num_nodes, weight.shape[1]))
429
+
430
+ for i in range(num_nodes):
431
+ # Sample neighbors
432
+ neighbors = self.sample_neighbors(adj_matrix, i)
433
+
434
+ # Aggregate neighbor features
435
+ neighbor_h = h[neighbors]
436
+ aggregated = self.aggregate(neighbor_h)
437
+
438
+ # Concatenate self and aggregated neighbor features
439
+ concat = np.concatenate([h[i], aggregated])
440
+
441
+ # Transform
442
+ h_new[i] = concat @ weight
443
+
444
+ # Apply ReLU
445
+ h = np.maximum(0, h_new)
446
+
447
+ # L2 normalization
448
+ norms = np.linalg.norm(h, axis=1, keepdims=True)
449
+ h = h / (norms + 1e-8)
450
+
451
+ return h
452
+
453
+ def __call__(self, node_features: np.ndarray, adj_matrix: np.ndarray) -> np.ndarray:
454
+ return self.forward(node_features, adj_matrix)
455
+
456
+
457
+ # ============================================================================
458
+ # GRAPH ISOMORPHISM NETWORK (GIN)
459
+ # ============================================================================
460
+
461
+ class GIN:
462
+ """
463
+ Graph Isomorphism Network.
464
+
465
+ Most expressive GNN architecture, uses MLPs to distinguish non-isomorphic graphs.
466
+
467
+ Formula:
468
+ h_v^(k) = MLP^(k)((1 + ε^(k)) · h_v^(k-1) + Σ_{u∈N(v)} h_u^(k-1))
469
+
470
+ where:
471
+ - ε: Learnable parameter or fixed scalar
472
+ - MLP: Multi-layer perceptron
473
+ - N(v): Neighbors of node v
474
+
475
+ Args:
476
+ in_features: Input feature dimension
477
+ hidden_features: Hidden feature dimension
478
+ out_features: Output feature dimension (optional)
479
+ num_layers: Number of GIN layers (default: 5)
480
+ epsilon: Initial epsilon value (default: 0.0)
481
+ learn_epsilon: Whether epsilon is learnable (default: True)
482
+
483
+ Example:
484
+ >>> gin = GIN(in_features=128, hidden_features=256, num_layers=5)
485
+ >>> node_features = np.random.randn(100, 128)
486
+ >>> adj_matrix = np.random.randint(0, 2, (100, 100))
487
+ >>> output = gin.forward(node_features, adj_matrix)
488
+ >>> print(output.shape) # (100, 256)
489
+
490
+ Use Case:
491
+ Graph classification, molecular property prediction, high expressiveness needed
492
+
493
+ Reference:
494
+ Xu et al., "How Powerful are Graph Neural Networks?" (2019)
495
+ """
496
+
497
+ def __init__(self, in_features: int, hidden_features: int,
498
+ out_features: Optional[int] = None,
499
+ num_layers: int = 5, epsilon: float = 0.0,
500
+ learn_epsilon: bool = True):
501
+ self.in_features = in_features
502
+ self.hidden_features = hidden_features
503
+ self.out_features = out_features or hidden_features
504
+ self.num_layers = num_layers
505
+ self.epsilon = epsilon if not learn_epsilon else np.random.randn() * 0.01
506
+ self.learn_epsilon = learn_epsilon
507
+
508
+ # Initialize MLP weights for each layer
509
+ self.mlp_weights = []
510
+
511
+ for i in range(num_layers):
512
+ layer_in = in_features if i == 0 else hidden_features
513
+ layer_out = self.out_features if i == num_layers - 1 else hidden_features
514
+
515
+ # Two-layer MLP
516
+ w1 = np.random.randn(layer_in, hidden_features) * np.sqrt(2.0 / layer_in)
517
+ w2 = np.random.randn(hidden_features, layer_out) * np.sqrt(2.0 / hidden_features)
518
+
519
+ self.mlp_weights.append((w1, w2))
520
+
521
+ def mlp(self, x: np.ndarray, w1: np.ndarray, w2: np.ndarray) -> np.ndarray:
522
+ """Two-layer MLP."""
523
+ h = np.maximum(0, x @ w1) # ReLU
524
+ return h @ w2
525
+
526
+ def forward(self, node_features: np.ndarray, adj_matrix: np.ndarray) -> np.ndarray:
527
+ """
528
+ Forward pass.
529
+
530
+ Args:
531
+ node_features: Node feature matrix (num_nodes, in_features)
532
+ adj_matrix: Adjacency matrix (num_nodes, num_nodes)
533
+
534
+ Returns:
535
+ Node embeddings (num_nodes, out_features)
536
+ """
537
+ h = node_features
538
+
539
+ for w1, w2 in self.mlp_weights:
540
+ # Aggregate neighbors
541
+ neighbor_sum = adj_matrix @ h
542
+
543
+ # Add self features with epsilon
544
+ h_new = (1 + self.epsilon) * h + neighbor_sum
545
+
546
+ # Apply MLP
547
+ h = self.mlp(h_new, w1, w2)
548
+
549
+ # Apply ReLU (except last layer)
550
+ if (w1, w2) != self.mlp_weights[-1]:
551
+ h = np.maximum(0, h)
552
+
553
+ return h
554
+
555
+ def __call__(self, node_features: np.ndarray, adj_matrix: np.ndarray) -> np.ndarray:
556
+ return self.forward(node_features, adj_matrix)
557
+
558
+
559
+ # ============================================================================
560
+ # UTILITY FUNCTIONS
561
+ # ============================================================================
562
+
563
+ def create_adjacency_matrix(edges: List[Tuple[int, int]], num_nodes: int) -> np.ndarray:
564
+ """
565
+ Create adjacency matrix from edge list.
566
+
567
+ Args:
568
+ edges: List of (source, target) tuples
569
+ num_nodes: Total number of nodes
570
+
571
+ Returns:
572
+ Adjacency matrix (num_nodes, num_nodes)
573
+
574
+ Example:
575
+ >>> edges = [(0, 1), (1, 2), (2, 0)]
576
+ >>> adj = create_adjacency_matrix(edges, num_nodes=3)
577
+ >>> print(adj)
578
+ """
579
+ adj_matrix = np.zeros((num_nodes, num_nodes))
580
+
581
+ for src, tgt in edges:
582
+ adj_matrix[src, tgt] = 1
583
+ adj_matrix[tgt, src] = 1 # Undirected graph
584
+
585
+ return adj_matrix
586
+
587
+
588
+ def graph_pooling(node_embeddings: np.ndarray, method: str = 'mean') -> np.ndarray:
589
+ """
590
+ Pool node embeddings to graph-level representation.
591
+
592
+ Args:
593
+ node_embeddings: Node embeddings (num_nodes, features)
594
+ method: Pooling method ('mean', 'max', 'sum')
595
+
596
+ Returns:
597
+ Graph embedding (features,)
598
+
599
+ Example:
600
+ >>> node_emb = np.random.randn(100, 256)
601
+ >>> graph_emb = graph_pooling(node_emb, method='mean')
602
+ >>> print(graph_emb.shape) # (256,)
603
+ """
604
+ if method == 'mean':
605
+ return np.mean(node_embeddings, axis=0)
606
+ elif method == 'max':
607
+ return np.max(node_embeddings, axis=0)
608
+ elif method == 'sum':
609
+ return np.sum(node_embeddings, axis=0)
610
+ else:
611
+ raise ValueError(f"Unknown pooling method: {method}")
612
+
613
+
614
+ __all__ = [
615
+ 'GCN',
616
+ 'GAT',
617
+ 'GraphSAGE',
618
+ 'GIN',
619
+ 'create_adjacency_matrix',
620
+ 'graph_pooling',
621
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.2.33
3
+ Version: 0.2.34
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -11,7 +11,7 @@ Project-URL: Repository, https://github.com/AliMehdi512/ilovetools
11
11
  Project-URL: Issues, https://github.com/AliMehdi512/ilovetools/issues
12
12
  Project-URL: Bug Reports, https://github.com/AliMehdi512/ilovetools/issues
13
13
  Project-URL: Source, https://github.com/AliMehdi512/ilovetools
14
- Keywords: utilities,tools,ai,ml,data-processing,automation,embeddings,word-embeddings,word2vec,glove,fasttext,positional-encoding,sinusoidal-encoding,learned-embeddings,token-embeddings,character-embeddings,segment-embeddings,token-type-embeddings,semantic-similarity,cosine-similarity,embedding-space,embedding-matrix,lookup-table,distributed-representations,dense-vectors,nlp,natural-language-processing,transformers,bert,gpt,attention,vocabulary,tokenization,pretrained-embeddings,transfer-learning,deep-learning,neural-networks,pytorch,tensorflow,keras
14
+ Keywords: utilities,tools,ai,ml,data-processing,automation,graph-neural-networks,gnn,gcn,gat,graphsage,gin,graph-convolutional-network,graph-attention-network,message-passing,neighborhood-aggregation,node-classification,link-prediction,graph-classification,node-embeddings,graph-embeddings,social-networks,knowledge-graphs,molecular-graphs,citation-networks,drug-discovery,molecular-property-prediction,friend-recommendation,product-recommendation,traffic-prediction,relational-data,graph-pooling,spectral-graph-theory,spatial-graph-convolution,attention-mechanism,inductive-learning,graph-isomorphism,deep-learning,neural-networks,pytorch,tensorflow
15
15
  Classifier: Development Status :: 3 - Alpha
16
16
  Classifier: Intended Audience :: Developers
17
17
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
@@ -43,6 +43,7 @@ ilovetools/ml/dropout.py
43
43
  ilovetools/ml/embedding.py
44
44
  ilovetools/ml/ensemble.py
45
45
  ilovetools/ml/feature_selection.py
46
+ ilovetools/ml/gnn.py
46
47
  ilovetools/ml/gradient_descent.py
47
48
  ilovetools/ml/imbalanced.py
48
49
  ilovetools/ml/interpretation.py
@@ -85,6 +86,7 @@ tests/test_cnn.py
85
86
  tests/test_convolution.py
86
87
  tests/test_dropout.py
87
88
  tests/test_embedding.py
89
+ tests/test_gnn.py
88
90
  tests/test_gradient_descent.py
89
91
  tests/test_loss_functions.py
90
92
  tests/test_losses.py