createsonline 0.1.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- createsonline/__init__.py +46 -0
- createsonline/admin/__init__.py +7 -0
- createsonline/admin/content.py +526 -0
- createsonline/admin/crud.py +805 -0
- createsonline/admin/field_builder.py +559 -0
- createsonline/admin/integration.py +482 -0
- createsonline/admin/interface.py +2562 -0
- createsonline/admin/model_creator.py +513 -0
- createsonline/admin/model_manager.py +388 -0
- createsonline/admin/modern_dashboard.py +498 -0
- createsonline/admin/permissions.py +264 -0
- createsonline/admin/user_forms.py +594 -0
- createsonline/ai/__init__.py +202 -0
- createsonline/ai/fields.py +1226 -0
- createsonline/ai/orm.py +325 -0
- createsonline/ai/services.py +1244 -0
- createsonline/app.py +506 -0
- createsonline/auth/__init__.py +8 -0
- createsonline/auth/management.py +228 -0
- createsonline/auth/models.py +552 -0
- createsonline/cli/__init__.py +5 -0
- createsonline/cli/commands/__init__.py +122 -0
- createsonline/cli/commands/database.py +416 -0
- createsonline/cli/commands/info.py +173 -0
- createsonline/cli/commands/initdb.py +218 -0
- createsonline/cli/commands/project.py +545 -0
- createsonline/cli/commands/serve.py +173 -0
- createsonline/cli/commands/shell.py +93 -0
- createsonline/cli/commands/users.py +148 -0
- createsonline/cli/main.py +2041 -0
- createsonline/cli/manage.py +274 -0
- createsonline/config/__init__.py +9 -0
- createsonline/config/app.py +2577 -0
- createsonline/config/database.py +179 -0
- createsonline/config/docs.py +384 -0
- createsonline/config/errors.py +160 -0
- createsonline/config/orm.py +43 -0
- createsonline/config/request.py +93 -0
- createsonline/config/settings.py +176 -0
- createsonline/data/__init__.py +23 -0
- createsonline/data/dataframe.py +925 -0
- createsonline/data/io.py +453 -0
- createsonline/data/series.py +557 -0
- createsonline/database/__init__.py +60 -0
- createsonline/database/abstraction.py +440 -0
- createsonline/database/assistant.py +585 -0
- createsonline/database/fields.py +442 -0
- createsonline/database/migrations.py +132 -0
- createsonline/database/models.py +604 -0
- createsonline/database.py +438 -0
- createsonline/http/__init__.py +28 -0
- createsonline/http/client.py +535 -0
- createsonline/ml/__init__.py +55 -0
- createsonline/ml/classification.py +552 -0
- createsonline/ml/clustering.py +680 -0
- createsonline/ml/metrics.py +542 -0
- createsonline/ml/neural.py +560 -0
- createsonline/ml/preprocessing.py +784 -0
- createsonline/ml/regression.py +501 -0
- createsonline/performance/__init__.py +19 -0
- createsonline/performance/cache.py +444 -0
- createsonline/performance/compression.py +335 -0
- createsonline/performance/core.py +419 -0
- createsonline/project_init.py +789 -0
- createsonline/routing.py +528 -0
- createsonline/security/__init__.py +34 -0
- createsonline/security/core.py +811 -0
- createsonline/security/encryption.py +349 -0
- createsonline/server.py +295 -0
- createsonline/static/css/admin.css +263 -0
- createsonline/static/css/common.css +358 -0
- createsonline/static/css/dashboard.css +89 -0
- createsonline/static/favicon.ico +0 -0
- createsonline/static/icons/icon-128x128.png +0 -0
- createsonline/static/icons/icon-128x128.webp +0 -0
- createsonline/static/icons/icon-16x16.png +0 -0
- createsonline/static/icons/icon-16x16.webp +0 -0
- createsonline/static/icons/icon-180x180.png +0 -0
- createsonline/static/icons/icon-180x180.webp +0 -0
- createsonline/static/icons/icon-192x192.png +0 -0
- createsonline/static/icons/icon-192x192.webp +0 -0
- createsonline/static/icons/icon-256x256.png +0 -0
- createsonline/static/icons/icon-256x256.webp +0 -0
- createsonline/static/icons/icon-32x32.png +0 -0
- createsonline/static/icons/icon-32x32.webp +0 -0
- createsonline/static/icons/icon-384x384.png +0 -0
- createsonline/static/icons/icon-384x384.webp +0 -0
- createsonline/static/icons/icon-48x48.png +0 -0
- createsonline/static/icons/icon-48x48.webp +0 -0
- createsonline/static/icons/icon-512x512.png +0 -0
- createsonline/static/icons/icon-512x512.webp +0 -0
- createsonline/static/icons/icon-64x64.png +0 -0
- createsonline/static/icons/icon-64x64.webp +0 -0
- createsonline/static/image/android-chrome-192x192.png +0 -0
- createsonline/static/image/android-chrome-512x512.png +0 -0
- createsonline/static/image/apple-touch-icon.png +0 -0
- createsonline/static/image/favicon-16x16.png +0 -0
- createsonline/static/image/favicon-32x32.png +0 -0
- createsonline/static/image/favicon.ico +0 -0
- createsonline/static/image/favicon.svg +17 -0
- createsonline/static/image/icon-128x128.png +0 -0
- createsonline/static/image/icon-128x128.webp +0 -0
- createsonline/static/image/icon-16x16.png +0 -0
- createsonline/static/image/icon-16x16.webp +0 -0
- createsonline/static/image/icon-180x180.png +0 -0
- createsonline/static/image/icon-180x180.webp +0 -0
- createsonline/static/image/icon-192x192.png +0 -0
- createsonline/static/image/icon-192x192.webp +0 -0
- createsonline/static/image/icon-256x256.png +0 -0
- createsonline/static/image/icon-256x256.webp +0 -0
- createsonline/static/image/icon-32x32.png +0 -0
- createsonline/static/image/icon-32x32.webp +0 -0
- createsonline/static/image/icon-384x384.png +0 -0
- createsonline/static/image/icon-384x384.webp +0 -0
- createsonline/static/image/icon-48x48.png +0 -0
- createsonline/static/image/icon-48x48.webp +0 -0
- createsonline/static/image/icon-512x512.png +0 -0
- createsonline/static/image/icon-512x512.webp +0 -0
- createsonline/static/image/icon-64x64.png +0 -0
- createsonline/static/image/icon-64x64.webp +0 -0
- createsonline/static/image/logo-header-h100.png +0 -0
- createsonline/static/image/logo-header-h100.webp +0 -0
- createsonline/static/image/logo-header-h200@2x.png +0 -0
- createsonline/static/image/logo-header-h200@2x.webp +0 -0
- createsonline/static/image/logo.png +0 -0
- createsonline/static/js/admin.js +274 -0
- createsonline/static/site.webmanifest +35 -0
- createsonline/static/templates/admin/base.html +87 -0
- createsonline/static/templates/admin/dashboard.html +217 -0
- createsonline/static/templates/admin/model_form.html +270 -0
- createsonline/static/templates/admin/model_list.html +202 -0
- createsonline/static/test_script.js +15 -0
- createsonline/static/test_styles.css +59 -0
- createsonline/static_files.py +365 -0
- createsonline/templates/404.html +100 -0
- createsonline/templates/admin_login.html +169 -0
- createsonline/templates/base.html +102 -0
- createsonline/templates/index.html +151 -0
- createsonline/templates.py +205 -0
- createsonline/testing.py +322 -0
- createsonline/utils.py +448 -0
- createsonline/validation/__init__.py +49 -0
- createsonline/validation/fields.py +598 -0
- createsonline/validation/models.py +504 -0
- createsonline/validation/validators.py +561 -0
- createsonline/views.py +184 -0
- createsonline-0.1.26.dist-info/METADATA +46 -0
- createsonline-0.1.26.dist-info/RECORD +152 -0
- createsonline-0.1.26.dist-info/WHEEL +5 -0
- createsonline-0.1.26.dist-info/entry_points.txt +2 -0
- createsonline-0.1.26.dist-info/licenses/LICENSE +21 -0
- createsonline-0.1.26.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,560 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CREATESONLINE Neural Networks
|
|
3
|
+
|
|
4
|
+
Pure Python neural network implementation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
from typing import List, Optional, Union, Tuple
|
|
9
|
+
import random
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class NeuralNetwork:
|
|
13
|
+
"""
|
|
14
|
+
Multi-layer Neural Network implementation
|
|
15
|
+
|
|
16
|
+
Pure Python feedforward neural network with backpropagation.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
hidden_layers: List[int] = [10],
|
|
22
|
+
activation: str = 'relu',
|
|
23
|
+
output_activation: str = 'linear',
|
|
24
|
+
learning_rate: float = 0.01,
|
|
25
|
+
max_iterations: int = 1000,
|
|
26
|
+
tolerance: float = 1e-6,
|
|
27
|
+
random_state: Optional[int] = None,
|
|
28
|
+
batch_size: Optional[int] = None
|
|
29
|
+
):
|
|
30
|
+
"""
|
|
31
|
+
Initialize Neural Network
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
hidden_layers: List of hidden layer sizes
|
|
35
|
+
activation: Activation function ('relu', 'sigmoid', 'tanh')
|
|
36
|
+
output_activation: Output activation ('linear', 'sigmoid', 'softmax')
|
|
37
|
+
learning_rate: Learning rate for gradient descent
|
|
38
|
+
max_iterations: Maximum training iterations
|
|
39
|
+
tolerance: Convergence tolerance
|
|
40
|
+
random_state: Random seed for reproducibility
|
|
41
|
+
batch_size: Batch size for mini-batch gradient descent (None for full batch)
|
|
42
|
+
"""
|
|
43
|
+
self.hidden_layers = hidden_layers
|
|
44
|
+
self.activation = activation
|
|
45
|
+
self.output_activation = output_activation
|
|
46
|
+
self.learning_rate = learning_rate
|
|
47
|
+
self.max_iterations = max_iterations
|
|
48
|
+
self.tolerance = tolerance
|
|
49
|
+
self.random_state = random_state
|
|
50
|
+
self.batch_size = batch_size
|
|
51
|
+
|
|
52
|
+
self.weights = []
|
|
53
|
+
self.biases = []
|
|
54
|
+
self.loss_history = []
|
|
55
|
+
self.fitted = False
|
|
56
|
+
|
|
57
|
+
# Set random seed
|
|
58
|
+
if self.random_state is not None:
|
|
59
|
+
np.random.seed(self.random_state)
|
|
60
|
+
random.seed(self.random_state)
|
|
61
|
+
|
|
62
|
+
def _initialize_weights(self, n_features: int, n_outputs: int):
|
|
63
|
+
"""Initialize network weights and biases"""
|
|
64
|
+
layers = [n_features] + self.hidden_layers + [n_outputs]
|
|
65
|
+
|
|
66
|
+
self.weights = []
|
|
67
|
+
self.biases = []
|
|
68
|
+
|
|
69
|
+
for i in range(len(layers) - 1):
|
|
70
|
+
# Xavier initialization
|
|
71
|
+
limit = np.sqrt(6.0 / (layers[i] + layers[i + 1]))
|
|
72
|
+
weight = np.random.uniform(-limit, limit, (layers[i], layers[i + 1]))
|
|
73
|
+
bias = np.zeros((1, layers[i + 1]))
|
|
74
|
+
|
|
75
|
+
self.weights.append(weight)
|
|
76
|
+
self.biases.append(bias)
|
|
77
|
+
|
|
78
|
+
def _relu(self, x: np.ndarray) -> np.ndarray:
|
|
79
|
+
"""ReLU activation function"""
|
|
80
|
+
return np.maximum(0, x)
|
|
81
|
+
|
|
82
|
+
def _relu_derivative(self, x: np.ndarray) -> np.ndarray:
|
|
83
|
+
"""ReLU derivative"""
|
|
84
|
+
return (x > 0).astype(float)
|
|
85
|
+
|
|
86
|
+
def _sigmoid(self, x: np.ndarray) -> np.ndarray:
|
|
87
|
+
"""Sigmoid activation function"""
|
|
88
|
+
# Clip to prevent overflow
|
|
89
|
+
x = np.clip(x, -500, 500)
|
|
90
|
+
return 1 / (1 + np.exp(-x))
|
|
91
|
+
|
|
92
|
+
def _sigmoid_derivative(self, x: np.ndarray) -> np.ndarray:
|
|
93
|
+
"""Sigmoid derivative"""
|
|
94
|
+
s = self._sigmoid(x)
|
|
95
|
+
return s * (1 - s)
|
|
96
|
+
|
|
97
|
+
def _tanh(self, x: np.ndarray) -> np.ndarray:
|
|
98
|
+
"""Tanh activation function"""
|
|
99
|
+
return np.tanh(x)
|
|
100
|
+
|
|
101
|
+
def _tanh_derivative(self, x: np.ndarray) -> np.ndarray:
|
|
102
|
+
"""Tanh derivative"""
|
|
103
|
+
return 1 - np.tanh(x) ** 2
|
|
104
|
+
|
|
105
|
+
def _softmax(self, x: np.ndarray) -> np.ndarray:
|
|
106
|
+
"""Softmax activation function"""
|
|
107
|
+
# Subtract max for numerical stability
|
|
108
|
+
exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))
|
|
109
|
+
return exp_x / np.sum(exp_x, axis=1, keepdims=True)
|
|
110
|
+
|
|
111
|
+
def _activation_function(self, x: np.ndarray, activation: str) -> np.ndarray:
|
|
112
|
+
"""Apply activation function"""
|
|
113
|
+
if activation == 'relu':
|
|
114
|
+
return self._relu(x)
|
|
115
|
+
elif activation == 'sigmoid':
|
|
116
|
+
return self._sigmoid(x)
|
|
117
|
+
elif activation == 'tanh':
|
|
118
|
+
return self._tanh(x)
|
|
119
|
+
elif activation == 'linear':
|
|
120
|
+
return x
|
|
121
|
+
elif activation == 'softmax':
|
|
122
|
+
return self._softmax(x)
|
|
123
|
+
else:
|
|
124
|
+
raise ValueError(f"Unknown activation: {activation}")
|
|
125
|
+
|
|
126
|
+
def _activation_derivative(self, x: np.ndarray, activation: str) -> np.ndarray:
|
|
127
|
+
"""Calculate activation derivative"""
|
|
128
|
+
if activation == 'relu':
|
|
129
|
+
return self._relu_derivative(x)
|
|
130
|
+
elif activation == 'sigmoid':
|
|
131
|
+
return self._sigmoid_derivative(x)
|
|
132
|
+
elif activation == 'tanh':
|
|
133
|
+
return self._tanh_derivative(x)
|
|
134
|
+
elif activation == 'linear':
|
|
135
|
+
return np.ones_like(x)
|
|
136
|
+
else:
|
|
137
|
+
raise ValueError(f"Derivative not implemented for: {activation}")
|
|
138
|
+
|
|
139
|
+
def _forward_pass(self, X: np.ndarray) -> Tuple[List[np.ndarray], List[np.ndarray]]:
|
|
140
|
+
"""
|
|
141
|
+
Forward pass through the network
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Tuple of (activations, z_values) for each layer
|
|
145
|
+
"""
|
|
146
|
+
activations = [X]
|
|
147
|
+
z_values = []
|
|
148
|
+
|
|
149
|
+
current_input = X
|
|
150
|
+
|
|
151
|
+
for i, (weight, bias) in enumerate(zip(self.weights, self.biases)):
|
|
152
|
+
z = current_input @ weight + bias
|
|
153
|
+
z_values.append(z)
|
|
154
|
+
|
|
155
|
+
if i == len(self.weights) - 1: # Output layer
|
|
156
|
+
activation = self._activation_function(z, self.output_activation)
|
|
157
|
+
else: # Hidden layers
|
|
158
|
+
activation = self._activation_function(z, self.activation)
|
|
159
|
+
|
|
160
|
+
activations.append(activation)
|
|
161
|
+
current_input = activation
|
|
162
|
+
|
|
163
|
+
return activations, z_values
|
|
164
|
+
|
|
165
|
+
def _backward_pass(
|
|
166
|
+
self,
|
|
167
|
+
X: np.ndarray,
|
|
168
|
+
y: np.ndarray,
|
|
169
|
+
activations: List[np.ndarray],
|
|
170
|
+
z_values: List[np.ndarray]
|
|
171
|
+
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
|
|
172
|
+
"""
|
|
173
|
+
Backward pass (backpropagation)
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
Tuple of (weight_gradients, bias_gradients)
|
|
177
|
+
"""
|
|
178
|
+
m = X.shape[0] # Number of samples
|
|
179
|
+
|
|
180
|
+
weight_gradients = []
|
|
181
|
+
bias_gradients = []
|
|
182
|
+
|
|
183
|
+
# Output layer error
|
|
184
|
+
if self.output_activation == 'softmax':
|
|
185
|
+
# For softmax with cross-entropy loss
|
|
186
|
+
delta = activations[-1] - y
|
|
187
|
+
else:
|
|
188
|
+
# For other activations with MSE loss
|
|
189
|
+
output_error = activations[-1] - y
|
|
190
|
+
if self.output_activation != 'linear':
|
|
191
|
+
output_derivative = self._activation_derivative(z_values[-1], self.output_activation)
|
|
192
|
+
delta = output_error * output_derivative
|
|
193
|
+
else:
|
|
194
|
+
delta = output_error
|
|
195
|
+
|
|
196
|
+
# Propagate error backwards
|
|
197
|
+
for i in range(len(self.weights) - 1, -1, -1):
|
|
198
|
+
# Calculate gradients
|
|
199
|
+
weight_gradient = activations[i].T @ delta / m
|
|
200
|
+
bias_gradient = np.mean(delta, axis=0, keepdims=True)
|
|
201
|
+
|
|
202
|
+
weight_gradients.insert(0, weight_gradient)
|
|
203
|
+
bias_gradients.insert(0, bias_gradient)
|
|
204
|
+
|
|
205
|
+
# Calculate error for previous layer
|
|
206
|
+
if i > 0:
|
|
207
|
+
delta = (delta @ self.weights[i].T) * self._activation_derivative(z_values[i-1], self.activation)
|
|
208
|
+
|
|
209
|
+
return weight_gradients, bias_gradients
|
|
210
|
+
|
|
211
|
+
def _calculate_loss(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
|
|
212
|
+
"""Calculate loss"""
|
|
213
|
+
if self.output_activation == 'softmax':
|
|
214
|
+
# Cross-entropy loss
|
|
215
|
+
epsilon = 1e-15 # Prevent log(0)
|
|
216
|
+
y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
|
|
217
|
+
return -np.mean(np.sum(y_true * np.log(y_pred), axis=1))
|
|
218
|
+
else:
|
|
219
|
+
# Mean squared error
|
|
220
|
+
return np.mean((y_true - y_pred) ** 2)
|
|
221
|
+
|
|
222
|
+
def fit(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> 'NeuralNetwork':
|
|
223
|
+
"""
|
|
224
|
+
Fit neural network
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
X: Training features (n_samples, n_features)
|
|
228
|
+
y: Training targets (n_samples, n_outputs)
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Self for method chaining
|
|
232
|
+
"""
|
|
233
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
234
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
235
|
+
|
|
236
|
+
if X.ndim == 1:
|
|
237
|
+
X = X.reshape(-1, 1)
|
|
238
|
+
|
|
239
|
+
if y.ndim == 1:
|
|
240
|
+
if self.output_activation == 'softmax':
|
|
241
|
+
# One-hot encode for multi-class classification
|
|
242
|
+
n_classes = len(np.unique(y))
|
|
243
|
+
y_encoded = np.zeros((len(y), n_classes))
|
|
244
|
+
for i, cls in enumerate(np.unique(y)):
|
|
245
|
+
y_encoded[y == cls, i] = 1
|
|
246
|
+
y = y_encoded
|
|
247
|
+
else:
|
|
248
|
+
y = y.reshape(-1, 1)
|
|
249
|
+
|
|
250
|
+
n_samples, n_features = X.shape
|
|
251
|
+
n_outputs = y.shape[1]
|
|
252
|
+
|
|
253
|
+
# Initialize weights
|
|
254
|
+
self._initialize_weights(n_features, n_outputs)
|
|
255
|
+
|
|
256
|
+
self.loss_history = []
|
|
257
|
+
|
|
258
|
+
# Training loop
|
|
259
|
+
for iteration in range(self.max_iterations):
|
|
260
|
+
if self.batch_size is None or self.batch_size >= n_samples:
|
|
261
|
+
# Full batch gradient descent
|
|
262
|
+
batch_X, batch_y = X, y
|
|
263
|
+
else:
|
|
264
|
+
# Mini-batch gradient descent
|
|
265
|
+
batch_indices = np.random.choice(n_samples, self.batch_size, replace=False)
|
|
266
|
+
batch_X, batch_y = X[batch_indices], y[batch_indices]
|
|
267
|
+
|
|
268
|
+
# Forward pass
|
|
269
|
+
activations, z_values = self._forward_pass(batch_X)
|
|
270
|
+
|
|
271
|
+
# Calculate loss
|
|
272
|
+
loss = self._calculate_loss(batch_y, activations[-1])
|
|
273
|
+
self.loss_history.append(loss)
|
|
274
|
+
|
|
275
|
+
# Backward pass
|
|
276
|
+
weight_gradients, bias_gradients = self._backward_pass(batch_X, batch_y, activations, z_values)
|
|
277
|
+
|
|
278
|
+
# Update weights and biases
|
|
279
|
+
for i, (w_grad, b_grad) in enumerate(zip(weight_gradients, bias_gradients)):
|
|
280
|
+
self.weights[i] -= self.learning_rate * w_grad
|
|
281
|
+
self.biases[i] -= self.learning_rate * b_grad
|
|
282
|
+
|
|
283
|
+
# Check for convergence
|
|
284
|
+
if iteration > 0 and abs(self.loss_history[-2] - self.loss_history[-1]) < self.tolerance:
|
|
285
|
+
break
|
|
286
|
+
|
|
287
|
+
self.fitted = True
|
|
288
|
+
return self
|
|
289
|
+
|
|
290
|
+
def predict(self, X: Union[np.ndarray, list]) -> np.ndarray:
|
|
291
|
+
"""
|
|
292
|
+
Make predictions
|
|
293
|
+
|
|
294
|
+
Args:
|
|
295
|
+
X: Features to predict on (n_samples, n_features)
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Predictions (n_samples, n_outputs)
|
|
299
|
+
"""
|
|
300
|
+
if not self.fitted:
|
|
301
|
+
raise RuntimeError("Network must be fitted before making predictions")
|
|
302
|
+
|
|
303
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
304
|
+
if X.ndim == 1:
|
|
305
|
+
X = X.reshape(-1, 1)
|
|
306
|
+
|
|
307
|
+
activations, _ = self._forward_pass(X)
|
|
308
|
+
predictions = activations[-1]
|
|
309
|
+
|
|
310
|
+
# For classification, return class predictions
|
|
311
|
+
if self.output_activation == 'softmax':
|
|
312
|
+
return np.argmax(predictions, axis=1)
|
|
313
|
+
elif predictions.shape[1] == 1:
|
|
314
|
+
return predictions.flatten()
|
|
315
|
+
else:
|
|
316
|
+
return predictions
|
|
317
|
+
|
|
318
|
+
def predict_proba(self, X: Union[np.ndarray, list]) -> np.ndarray:
|
|
319
|
+
"""
|
|
320
|
+
Predict class probabilities (for classification)
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
X: Features to predict on (n_samples, n_features)
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
Class probabilities (n_samples, n_classes)
|
|
327
|
+
"""
|
|
328
|
+
if not self.fitted:
|
|
329
|
+
raise RuntimeError("Network must be fitted before making predictions")
|
|
330
|
+
|
|
331
|
+
if self.output_activation not in ['sigmoid', 'softmax']:
|
|
332
|
+
raise ValueError("predict_proba only available for classification tasks")
|
|
333
|
+
|
|
334
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
335
|
+
if X.ndim == 1:
|
|
336
|
+
X = X.reshape(-1, 1)
|
|
337
|
+
|
|
338
|
+
activations, _ = self._forward_pass(X)
|
|
339
|
+
return activations[-1]
|
|
340
|
+
|
|
341
|
+
def score(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> float:
|
|
342
|
+
"""
|
|
343
|
+
Calculate accuracy for classification or R² for regression
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
X: Features
|
|
347
|
+
y: True targets
|
|
348
|
+
|
|
349
|
+
Returns:
|
|
350
|
+
Score
|
|
351
|
+
"""
|
|
352
|
+
predictions = self.predict(X)
|
|
353
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
354
|
+
|
|
355
|
+
if self.output_activation == 'softmax' or (self.output_activation == 'sigmoid' and len(np.unique(y)) == 2):
|
|
356
|
+
# Classification accuracy
|
|
357
|
+
return np.mean(predictions == y)
|
|
358
|
+
else:
|
|
359
|
+
# Regression R²
|
|
360
|
+
ss_res = np.sum((y - predictions) ** 2)
|
|
361
|
+
ss_tot = np.sum((y - np.mean(y)) ** 2)
|
|
362
|
+
return 1 - (ss_res / ss_tot) if ss_tot != 0 else 0.0
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
class MLPClassifier(NeuralNetwork):
|
|
366
|
+
"""
|
|
367
|
+
Multi-layer Perceptron Classifier
|
|
368
|
+
|
|
369
|
+
Specialized neural network for classification tasks.
|
|
370
|
+
"""
|
|
371
|
+
|
|
372
|
+
def __init__(
|
|
373
|
+
self,
|
|
374
|
+
hidden_layer_sizes: Tuple[int, ...] = (100,),
|
|
375
|
+
activation: str = 'relu',
|
|
376
|
+
learning_rate: float = 0.001,
|
|
377
|
+
max_iter: int = 200,
|
|
378
|
+
random_state: Optional[int] = None,
|
|
379
|
+
batch_size: Optional[int] = 'auto'
|
|
380
|
+
):
|
|
381
|
+
"""
|
|
382
|
+
Initialize MLP Classifier
|
|
383
|
+
|
|
384
|
+
Args:
|
|
385
|
+
hidden_layer_sizes: Sizes of hidden layers
|
|
386
|
+
activation: Activation function
|
|
387
|
+
learning_rate: Learning rate
|
|
388
|
+
max_iter: Maximum iterations
|
|
389
|
+
random_state: Random seed
|
|
390
|
+
batch_size: Batch size ('auto' or int)
|
|
391
|
+
"""
|
|
392
|
+
if batch_size == 'auto':
|
|
393
|
+
batch_size = min(200, None) # Will be set to None for small datasets
|
|
394
|
+
|
|
395
|
+
super().__init__(
|
|
396
|
+
hidden_layers=list(hidden_layer_sizes),
|
|
397
|
+
activation=activation,
|
|
398
|
+
output_activation='softmax',
|
|
399
|
+
learning_rate=learning_rate,
|
|
400
|
+
max_iterations=max_iter,
|
|
401
|
+
random_state=random_state,
|
|
402
|
+
batch_size=batch_size
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
class MLPRegressor(NeuralNetwork):
|
|
407
|
+
"""
|
|
408
|
+
Multi-layer Perceptron Regressor
|
|
409
|
+
|
|
410
|
+
Specialized neural network for regression tasks.
|
|
411
|
+
"""
|
|
412
|
+
|
|
413
|
+
def __init__(
|
|
414
|
+
self,
|
|
415
|
+
hidden_layer_sizes: Tuple[int, ...] = (100,),
|
|
416
|
+
activation: str = 'relu',
|
|
417
|
+
learning_rate: float = 0.001,
|
|
418
|
+
max_iter: int = 200,
|
|
419
|
+
random_state: Optional[int] = None,
|
|
420
|
+
batch_size: Optional[int] = 'auto'
|
|
421
|
+
):
|
|
422
|
+
"""
|
|
423
|
+
Initialize MLP Regressor
|
|
424
|
+
|
|
425
|
+
Args:
|
|
426
|
+
hidden_layer_sizes: Sizes of hidden layers
|
|
427
|
+
activation: Activation function
|
|
428
|
+
learning_rate: Learning rate
|
|
429
|
+
max_iter: Maximum iterations
|
|
430
|
+
random_state: Random seed
|
|
431
|
+
batch_size: Batch size ('auto' or int)
|
|
432
|
+
"""
|
|
433
|
+
if batch_size == 'auto':
|
|
434
|
+
batch_size = min(200, None) # Will be set to None for small datasets
|
|
435
|
+
|
|
436
|
+
super().__init__(
|
|
437
|
+
hidden_layers=list(hidden_layer_sizes),
|
|
438
|
+
activation=activation,
|
|
439
|
+
output_activation='linear',
|
|
440
|
+
learning_rate=learning_rate,
|
|
441
|
+
max_iterations=max_iter,
|
|
442
|
+
random_state=random_state,
|
|
443
|
+
batch_size=batch_size
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
class Perceptron:
|
|
448
|
+
"""
|
|
449
|
+
Simple Perceptron implementation
|
|
450
|
+
|
|
451
|
+
Single layer perceptron for binary classification.
|
|
452
|
+
"""
|
|
453
|
+
|
|
454
|
+
def __init__(self, learning_rate: float = 0.01, max_iter: int = 1000, random_state: Optional[int] = None):
|
|
455
|
+
"""
|
|
456
|
+
Initialize Perceptron
|
|
457
|
+
|
|
458
|
+
Args:
|
|
459
|
+
learning_rate: Learning rate
|
|
460
|
+
max_iter: Maximum iterations
|
|
461
|
+
random_state: Random seed
|
|
462
|
+
"""
|
|
463
|
+
self.learning_rate = learning_rate
|
|
464
|
+
self.max_iter = max_iter
|
|
465
|
+
self.random_state = random_state
|
|
466
|
+
|
|
467
|
+
self.weights = None
|
|
468
|
+
self.bias = None
|
|
469
|
+
self.fitted = False
|
|
470
|
+
|
|
471
|
+
if self.random_state is not None:
|
|
472
|
+
np.random.seed(self.random_state)
|
|
473
|
+
|
|
474
|
+
def fit(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> 'Perceptron':
|
|
475
|
+
"""
|
|
476
|
+
Fit perceptron
|
|
477
|
+
|
|
478
|
+
Args:
|
|
479
|
+
X: Training features (n_samples, n_features)
|
|
480
|
+
y: Training targets (n_samples,) - binary (0, 1) or (-1, 1)
|
|
481
|
+
|
|
482
|
+
Returns:
|
|
483
|
+
Self for method chaining
|
|
484
|
+
"""
|
|
485
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
486
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
487
|
+
|
|
488
|
+
if X.ndim == 1:
|
|
489
|
+
X = X.reshape(-1, 1)
|
|
490
|
+
|
|
491
|
+
# Convert labels to -1, 1 format
|
|
492
|
+
unique_labels = np.unique(y)
|
|
493
|
+
if len(unique_labels) != 2:
|
|
494
|
+
raise ValueError("Perceptron is for binary classification only")
|
|
495
|
+
|
|
496
|
+
y_binary = np.where(y == unique_labels[0], -1, 1)
|
|
497
|
+
|
|
498
|
+
n_samples, n_features = X.shape
|
|
499
|
+
|
|
500
|
+
# Initialize weights and bias
|
|
501
|
+
self.weights = np.random.normal(0, 0.01, n_features)
|
|
502
|
+
self.bias = 0.0
|
|
503
|
+
|
|
504
|
+
# Training loop
|
|
505
|
+
for iteration in range(self.max_iter):
|
|
506
|
+
errors = 0
|
|
507
|
+
|
|
508
|
+
for i in range(n_samples):
|
|
509
|
+
# Calculate prediction
|
|
510
|
+
linear_output = np.dot(X[i], self.weights) + self.bias
|
|
511
|
+
prediction = 1 if linear_output >= 0 else -1
|
|
512
|
+
|
|
513
|
+
# Update weights if prediction is wrong
|
|
514
|
+
if prediction != y_binary[i]:
|
|
515
|
+
self.weights += self.learning_rate * y_binary[i] * X[i]
|
|
516
|
+
self.bias += self.learning_rate * y_binary[i]
|
|
517
|
+
errors += 1
|
|
518
|
+
|
|
519
|
+
# Stop if no errors
|
|
520
|
+
if errors == 0:
|
|
521
|
+
break
|
|
522
|
+
|
|
523
|
+
self.fitted = True
|
|
524
|
+
return self
|
|
525
|
+
|
|
526
|
+
def predict(self, X: Union[np.ndarray, list]) -> np.ndarray:
|
|
527
|
+
"""
|
|
528
|
+
Make predictions
|
|
529
|
+
|
|
530
|
+
Args:
|
|
531
|
+
X: Features to predict on (n_samples, n_features)
|
|
532
|
+
|
|
533
|
+
Returns:
|
|
534
|
+
Binary predictions (n_samples,)
|
|
535
|
+
"""
|
|
536
|
+
if not self.fitted:
|
|
537
|
+
raise RuntimeError("Perceptron must be fitted before making predictions")
|
|
538
|
+
|
|
539
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
540
|
+
if X.ndim == 1:
|
|
541
|
+
X = X.reshape(-1, 1)
|
|
542
|
+
|
|
543
|
+
linear_output = X @ self.weights + self.bias
|
|
544
|
+
return (linear_output >= 0).astype(int)
|
|
545
|
+
|
|
546
|
+
def score(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> float:
|
|
547
|
+
"""
|
|
548
|
+
Calculate accuracy score
|
|
549
|
+
|
|
550
|
+
Args:
|
|
551
|
+
X: Features
|
|
552
|
+
y: True targets
|
|
553
|
+
|
|
554
|
+
Returns:
|
|
555
|
+
Accuracy score
|
|
556
|
+
"""
|
|
557
|
+
predictions = self.predict(X)
|
|
558
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
559
|
+
|
|
560
|
+
return np.mean(predictions == y)
|