createsonline 0.1.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- createsonline/__init__.py +46 -0
- createsonline/admin/__init__.py +7 -0
- createsonline/admin/content.py +526 -0
- createsonline/admin/crud.py +805 -0
- createsonline/admin/field_builder.py +559 -0
- createsonline/admin/integration.py +482 -0
- createsonline/admin/interface.py +2562 -0
- createsonline/admin/model_creator.py +513 -0
- createsonline/admin/model_manager.py +388 -0
- createsonline/admin/modern_dashboard.py +498 -0
- createsonline/admin/permissions.py +264 -0
- createsonline/admin/user_forms.py +594 -0
- createsonline/ai/__init__.py +202 -0
- createsonline/ai/fields.py +1226 -0
- createsonline/ai/orm.py +325 -0
- createsonline/ai/services.py +1244 -0
- createsonline/app.py +506 -0
- createsonline/auth/__init__.py +8 -0
- createsonline/auth/management.py +228 -0
- createsonline/auth/models.py +552 -0
- createsonline/cli/__init__.py +5 -0
- createsonline/cli/commands/__init__.py +122 -0
- createsonline/cli/commands/database.py +416 -0
- createsonline/cli/commands/info.py +173 -0
- createsonline/cli/commands/initdb.py +218 -0
- createsonline/cli/commands/project.py +545 -0
- createsonline/cli/commands/serve.py +173 -0
- createsonline/cli/commands/shell.py +93 -0
- createsonline/cli/commands/users.py +148 -0
- createsonline/cli/main.py +2041 -0
- createsonline/cli/manage.py +274 -0
- createsonline/config/__init__.py +9 -0
- createsonline/config/app.py +2577 -0
- createsonline/config/database.py +179 -0
- createsonline/config/docs.py +384 -0
- createsonline/config/errors.py +160 -0
- createsonline/config/orm.py +43 -0
- createsonline/config/request.py +93 -0
- createsonline/config/settings.py +176 -0
- createsonline/data/__init__.py +23 -0
- createsonline/data/dataframe.py +925 -0
- createsonline/data/io.py +453 -0
- createsonline/data/series.py +557 -0
- createsonline/database/__init__.py +60 -0
- createsonline/database/abstraction.py +440 -0
- createsonline/database/assistant.py +585 -0
- createsonline/database/fields.py +442 -0
- createsonline/database/migrations.py +132 -0
- createsonline/database/models.py +604 -0
- createsonline/database.py +438 -0
- createsonline/http/__init__.py +28 -0
- createsonline/http/client.py +535 -0
- createsonline/ml/__init__.py +55 -0
- createsonline/ml/classification.py +552 -0
- createsonline/ml/clustering.py +680 -0
- createsonline/ml/metrics.py +542 -0
- createsonline/ml/neural.py +560 -0
- createsonline/ml/preprocessing.py +784 -0
- createsonline/ml/regression.py +501 -0
- createsonline/performance/__init__.py +19 -0
- createsonline/performance/cache.py +444 -0
- createsonline/performance/compression.py +335 -0
- createsonline/performance/core.py +419 -0
- createsonline/project_init.py +789 -0
- createsonline/routing.py +528 -0
- createsonline/security/__init__.py +34 -0
- createsonline/security/core.py +811 -0
- createsonline/security/encryption.py +349 -0
- createsonline/server.py +295 -0
- createsonline/static/css/admin.css +263 -0
- createsonline/static/css/common.css +358 -0
- createsonline/static/css/dashboard.css +89 -0
- createsonline/static/favicon.ico +0 -0
- createsonline/static/icons/icon-128x128.png +0 -0
- createsonline/static/icons/icon-128x128.webp +0 -0
- createsonline/static/icons/icon-16x16.png +0 -0
- createsonline/static/icons/icon-16x16.webp +0 -0
- createsonline/static/icons/icon-180x180.png +0 -0
- createsonline/static/icons/icon-180x180.webp +0 -0
- createsonline/static/icons/icon-192x192.png +0 -0
- createsonline/static/icons/icon-192x192.webp +0 -0
- createsonline/static/icons/icon-256x256.png +0 -0
- createsonline/static/icons/icon-256x256.webp +0 -0
- createsonline/static/icons/icon-32x32.png +0 -0
- createsonline/static/icons/icon-32x32.webp +0 -0
- createsonline/static/icons/icon-384x384.png +0 -0
- createsonline/static/icons/icon-384x384.webp +0 -0
- createsonline/static/icons/icon-48x48.png +0 -0
- createsonline/static/icons/icon-48x48.webp +0 -0
- createsonline/static/icons/icon-512x512.png +0 -0
- createsonline/static/icons/icon-512x512.webp +0 -0
- createsonline/static/icons/icon-64x64.png +0 -0
- createsonline/static/icons/icon-64x64.webp +0 -0
- createsonline/static/image/android-chrome-192x192.png +0 -0
- createsonline/static/image/android-chrome-512x512.png +0 -0
- createsonline/static/image/apple-touch-icon.png +0 -0
- createsonline/static/image/favicon-16x16.png +0 -0
- createsonline/static/image/favicon-32x32.png +0 -0
- createsonline/static/image/favicon.ico +0 -0
- createsonline/static/image/favicon.svg +17 -0
- createsonline/static/image/icon-128x128.png +0 -0
- createsonline/static/image/icon-128x128.webp +0 -0
- createsonline/static/image/icon-16x16.png +0 -0
- createsonline/static/image/icon-16x16.webp +0 -0
- createsonline/static/image/icon-180x180.png +0 -0
- createsonline/static/image/icon-180x180.webp +0 -0
- createsonline/static/image/icon-192x192.png +0 -0
- createsonline/static/image/icon-192x192.webp +0 -0
- createsonline/static/image/icon-256x256.png +0 -0
- createsonline/static/image/icon-256x256.webp +0 -0
- createsonline/static/image/icon-32x32.png +0 -0
- createsonline/static/image/icon-32x32.webp +0 -0
- createsonline/static/image/icon-384x384.png +0 -0
- createsonline/static/image/icon-384x384.webp +0 -0
- createsonline/static/image/icon-48x48.png +0 -0
- createsonline/static/image/icon-48x48.webp +0 -0
- createsonline/static/image/icon-512x512.png +0 -0
- createsonline/static/image/icon-512x512.webp +0 -0
- createsonline/static/image/icon-64x64.png +0 -0
- createsonline/static/image/icon-64x64.webp +0 -0
- createsonline/static/image/logo-header-h100.png +0 -0
- createsonline/static/image/logo-header-h100.webp +0 -0
- createsonline/static/image/logo-header-h200@2x.png +0 -0
- createsonline/static/image/logo-header-h200@2x.webp +0 -0
- createsonline/static/image/logo.png +0 -0
- createsonline/static/js/admin.js +274 -0
- createsonline/static/site.webmanifest +35 -0
- createsonline/static/templates/admin/base.html +87 -0
- createsonline/static/templates/admin/dashboard.html +217 -0
- createsonline/static/templates/admin/model_form.html +270 -0
- createsonline/static/templates/admin/model_list.html +202 -0
- createsonline/static/test_script.js +15 -0
- createsonline/static/test_styles.css +59 -0
- createsonline/static_files.py +365 -0
- createsonline/templates/404.html +100 -0
- createsonline/templates/admin_login.html +169 -0
- createsonline/templates/base.html +102 -0
- createsonline/templates/index.html +151 -0
- createsonline/templates.py +205 -0
- createsonline/testing.py +322 -0
- createsonline/utils.py +448 -0
- createsonline/validation/__init__.py +49 -0
- createsonline/validation/fields.py +598 -0
- createsonline/validation/models.py +504 -0
- createsonline/validation/validators.py +561 -0
- createsonline/views.py +184 -0
- createsonline-0.1.26.dist-info/METADATA +46 -0
- createsonline-0.1.26.dist-info/RECORD +152 -0
- createsonline-0.1.26.dist-info/WHEEL +5 -0
- createsonline-0.1.26.dist-info/entry_points.txt +2 -0
- createsonline-0.1.26.dist-info/licenses/LICENSE +21 -0
- createsonline-0.1.26.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,501 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CREATESONLINE Regression Algorithms
|
|
3
|
+
|
|
4
|
+
Pure Python regression implementations.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
from typing import Union
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class LinearRegression:
|
|
12
|
+
"""
|
|
13
|
+
Linear Regression implementation using normal equation and gradient descent
|
|
14
|
+
|
|
15
|
+
Pure Python implementation with numpy for matrix operations.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, learning_rate: float = 0.01, max_iterations: int = 1000, tolerance: float = 1e-6):
|
|
19
|
+
"""
|
|
20
|
+
Initialize Linear Regression
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
learning_rate: Learning rate for gradient descent
|
|
24
|
+
max_iterations: Maximum iterations for gradient descent
|
|
25
|
+
tolerance: Convergence tolerance
|
|
26
|
+
"""
|
|
27
|
+
self.learning_rate = learning_rate
|
|
28
|
+
self.max_iterations = max_iterations
|
|
29
|
+
self.tolerance = tolerance
|
|
30
|
+
|
|
31
|
+
self.weights = None
|
|
32
|
+
self.bias = None
|
|
33
|
+
self.cost_history = []
|
|
34
|
+
self.fitted = False
|
|
35
|
+
|
|
36
|
+
def fit(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list], method: str = 'normal_equation') -> 'LinearRegression':
|
|
37
|
+
"""
|
|
38
|
+
Fit linear regression model
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
X: Training features (n_samples, n_features)
|
|
42
|
+
y: Training targets (n_samples,)
|
|
43
|
+
method: 'normal_equation' or 'gradient_descent'
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Self for method chaining
|
|
47
|
+
"""
|
|
48
|
+
# Convert to numpy arrays
|
|
49
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
50
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
51
|
+
|
|
52
|
+
# Ensure X is 2D
|
|
53
|
+
if X.ndim == 1:
|
|
54
|
+
X = X.reshape(-1, 1)
|
|
55
|
+
|
|
56
|
+
n_samples, n_features = X.shape
|
|
57
|
+
|
|
58
|
+
if method == 'normal_equation':
|
|
59
|
+
# Normal equation: θ = (X^T X)^(-1) X^T y
|
|
60
|
+
try:
|
|
61
|
+
# Add bias column
|
|
62
|
+
X_with_bias = np.column_stack([np.ones(n_samples), X])
|
|
63
|
+
|
|
64
|
+
# Calculate weights using normal equation
|
|
65
|
+
XtX = X_with_bias.T @ X_with_bias
|
|
66
|
+
Xty = X_with_bias.T @ y
|
|
67
|
+
|
|
68
|
+
# Check if matrix is invertible
|
|
69
|
+
if np.linalg.det(XtX) != 0:
|
|
70
|
+
weights_with_bias = np.linalg.solve(XtX, Xty)
|
|
71
|
+
self.bias = weights_with_bias[0]
|
|
72
|
+
self.weights = weights_with_bias[1:]
|
|
73
|
+
else:
|
|
74
|
+
# Fallback to gradient descent if matrix is singular
|
|
75
|
+
return self.fit(X, y, method='gradient_descent')
|
|
76
|
+
|
|
77
|
+
except np.linalg.LinAlgError:
|
|
78
|
+
# Fallback to gradient descent
|
|
79
|
+
return self.fit(X, y, method='gradient_descent')
|
|
80
|
+
|
|
81
|
+
elif method == 'gradient_descent':
|
|
82
|
+
# Initialize weights and bias
|
|
83
|
+
self.weights = np.random.normal(0, 0.01, n_features)
|
|
84
|
+
self.bias = 0.0
|
|
85
|
+
self.cost_history = []
|
|
86
|
+
|
|
87
|
+
# Gradient descent
|
|
88
|
+
for iteration in range(self.max_iterations):
|
|
89
|
+
# Forward pass
|
|
90
|
+
y_pred = X @ self.weights + self.bias
|
|
91
|
+
|
|
92
|
+
# Calculate cost (MSE)
|
|
93
|
+
cost = np.mean((y_pred - y) ** 2)
|
|
94
|
+
self.cost_history.append(cost)
|
|
95
|
+
|
|
96
|
+
# Calculate gradients
|
|
97
|
+
dw = (2 / n_samples) * X.T @ (y_pred - y)
|
|
98
|
+
db = (2 / n_samples) * np.sum(y_pred - y)
|
|
99
|
+
|
|
100
|
+
# Update parameters
|
|
101
|
+
self.weights -= self.learning_rate * dw
|
|
102
|
+
self.bias -= self.learning_rate * db
|
|
103
|
+
|
|
104
|
+
# Check for convergence
|
|
105
|
+
if iteration > 0 and abs(self.cost_history[-2] - self.cost_history[-1]) < self.tolerance:
|
|
106
|
+
break
|
|
107
|
+
|
|
108
|
+
else:
|
|
109
|
+
raise ValueError("Method must be 'normal_equation' or 'gradient_descent'")
|
|
110
|
+
|
|
111
|
+
self.fitted = True
|
|
112
|
+
return self
|
|
113
|
+
|
|
114
|
+
def predict(self, X: Union[np.ndarray, list]) -> np.ndarray:
|
|
115
|
+
"""
|
|
116
|
+
Make predictions
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
X: Features to predict on (n_samples, n_features)
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Predictions (n_samples,)
|
|
123
|
+
"""
|
|
124
|
+
if not self.fitted:
|
|
125
|
+
raise RuntimeError("Model must be fitted before making predictions")
|
|
126
|
+
|
|
127
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
128
|
+
if X.ndim == 1:
|
|
129
|
+
X = X.reshape(-1, 1)
|
|
130
|
+
|
|
131
|
+
return X @ self.weights + self.bias
|
|
132
|
+
|
|
133
|
+
def score(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> float:
|
|
134
|
+
"""
|
|
135
|
+
Calculate R-squared score
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
X: Features
|
|
139
|
+
y: True targets
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
R-squared score
|
|
143
|
+
"""
|
|
144
|
+
y_pred = self.predict(X)
|
|
145
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
146
|
+
|
|
147
|
+
ss_res = np.sum((y - y_pred) ** 2)
|
|
148
|
+
ss_tot = np.sum((y - np.mean(y)) ** 2)
|
|
149
|
+
|
|
150
|
+
return 1 - (ss_res / ss_tot) if ss_tot != 0 else 0.0
|
|
151
|
+
|
|
152
|
+
def get_params(self) -> dict:
|
|
153
|
+
"""Get model parameters"""
|
|
154
|
+
return {
|
|
155
|
+
'weights': self.weights.tolist() if self.weights is not None else None,
|
|
156
|
+
'bias': float(self.bias) if self.bias is not None else None,
|
|
157
|
+
'learning_rate': self.learning_rate,
|
|
158
|
+
'max_iterations': self.max_iterations,
|
|
159
|
+
'tolerance': self.tolerance
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class LogisticRegression:
|
|
164
|
+
"""
|
|
165
|
+
Logistic Regression implementation using gradient descent
|
|
166
|
+
|
|
167
|
+
Pure Python implementation with numpy for matrix operations.
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
def __init__(self, learning_rate: float = 0.01, max_iterations: int = 1000, tolerance: float = 1e-6):
|
|
171
|
+
"""
|
|
172
|
+
Initialize Logistic Regression
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
learning_rate: Learning rate for gradient descent
|
|
176
|
+
max_iterations: Maximum iterations for gradient descent
|
|
177
|
+
tolerance: Convergence tolerance
|
|
178
|
+
"""
|
|
179
|
+
self.learning_rate = learning_rate
|
|
180
|
+
self.max_iterations = max_iterations
|
|
181
|
+
self.tolerance = tolerance
|
|
182
|
+
|
|
183
|
+
self.weights = None
|
|
184
|
+
self.bias = None
|
|
185
|
+
self.cost_history = []
|
|
186
|
+
self.fitted = False
|
|
187
|
+
|
|
188
|
+
@staticmethod
|
|
189
|
+
def _sigmoid(z: np.ndarray) -> np.ndarray:
|
|
190
|
+
"""Sigmoid activation function"""
|
|
191
|
+
# Clip z to prevent overflow
|
|
192
|
+
z = np.clip(z, -500, 500)
|
|
193
|
+
return 1 / (1 + np.exp(-z))
|
|
194
|
+
|
|
195
|
+
def fit(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> 'LogisticRegression':
|
|
196
|
+
"""
|
|
197
|
+
Fit logistic regression model
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
X: Training features (n_samples, n_features)
|
|
201
|
+
y: Training targets (n_samples,) - binary (0, 1)
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
Self for method chaining
|
|
205
|
+
"""
|
|
206
|
+
# Convert to numpy arrays
|
|
207
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
208
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
209
|
+
|
|
210
|
+
# Ensure X is 2D
|
|
211
|
+
if X.ndim == 1:
|
|
212
|
+
X = X.reshape(-1, 1)
|
|
213
|
+
|
|
214
|
+
n_samples, n_features = X.shape
|
|
215
|
+
|
|
216
|
+
# Initialize weights and bias
|
|
217
|
+
self.weights = np.random.normal(0, 0.01, n_features)
|
|
218
|
+
self.bias = 0.0
|
|
219
|
+
self.cost_history = []
|
|
220
|
+
|
|
221
|
+
# Gradient descent
|
|
222
|
+
for iteration in range(self.max_iterations):
|
|
223
|
+
# Forward pass
|
|
224
|
+
z = X @ self.weights + self.bias
|
|
225
|
+
y_pred = self._sigmoid(z)
|
|
226
|
+
|
|
227
|
+
# Calculate cost (cross-entropy)
|
|
228
|
+
# Add small epsilon to prevent log(0)
|
|
229
|
+
epsilon = 1e-15
|
|
230
|
+
y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
|
|
231
|
+
cost = -np.mean(y * np.log(y_pred) + (1 - y) * np.log(1 - y_pred))
|
|
232
|
+
self.cost_history.append(cost)
|
|
233
|
+
|
|
234
|
+
# Calculate gradients
|
|
235
|
+
dw = (1 / n_samples) * X.T @ (y_pred - y)
|
|
236
|
+
db = (1 / n_samples) * np.sum(y_pred - y)
|
|
237
|
+
|
|
238
|
+
# Update parameters
|
|
239
|
+
self.weights -= self.learning_rate * dw
|
|
240
|
+
self.bias -= self.learning_rate * db
|
|
241
|
+
|
|
242
|
+
# Check for convergence
|
|
243
|
+
if iteration > 0 and abs(self.cost_history[-2] - self.cost_history[-1]) < self.tolerance:
|
|
244
|
+
break
|
|
245
|
+
|
|
246
|
+
self.fitted = True
|
|
247
|
+
return self
|
|
248
|
+
|
|
249
|
+
def predict_proba(self, X: Union[np.ndarray, list]) -> np.ndarray:
|
|
250
|
+
"""
|
|
251
|
+
Predict class probabilities
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
X: Features to predict on (n_samples, n_features)
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
Probabilities for class 1 (n_samples,)
|
|
258
|
+
"""
|
|
259
|
+
if not self.fitted:
|
|
260
|
+
raise RuntimeError("Model must be fitted before making predictions")
|
|
261
|
+
|
|
262
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
263
|
+
if X.ndim == 1:
|
|
264
|
+
X = X.reshape(-1, 1)
|
|
265
|
+
|
|
266
|
+
z = X @ self.weights + self.bias
|
|
267
|
+
return self._sigmoid(z)
|
|
268
|
+
|
|
269
|
+
def predict(self, X: Union[np.ndarray, list], threshold: float = 0.5) -> np.ndarray:
|
|
270
|
+
"""
|
|
271
|
+
Make binary predictions
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
X: Features to predict on (n_samples, n_features)
|
|
275
|
+
threshold: Decision threshold
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
Binary predictions (n_samples,)
|
|
279
|
+
"""
|
|
280
|
+
probabilities = self.predict_proba(X)
|
|
281
|
+
return (probabilities >= threshold).astype(int)
|
|
282
|
+
|
|
283
|
+
def score(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> float:
|
|
284
|
+
"""
|
|
285
|
+
Calculate accuracy score
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
X: Features
|
|
289
|
+
y: True targets
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Accuracy score
|
|
293
|
+
"""
|
|
294
|
+
y_pred = self.predict(X)
|
|
295
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
296
|
+
|
|
297
|
+
return np.mean(y_pred == y)
|
|
298
|
+
|
|
299
|
+
def get_params(self) -> dict:
|
|
300
|
+
"""Get model parameters"""
|
|
301
|
+
return {
|
|
302
|
+
'weights': self.weights.tolist() if self.weights is not None else None,
|
|
303
|
+
'bias': float(self.bias) if self.bias is not None else None,
|
|
304
|
+
'learning_rate': self.learning_rate,
|
|
305
|
+
'max_iterations': self.max_iterations,
|
|
306
|
+
'tolerance': self.tolerance
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
class PolynomialRegression:
|
|
311
|
+
"""
|
|
312
|
+
Polynomial Regression implementation
|
|
313
|
+
|
|
314
|
+
Uses LinearRegression with polynomial features.
|
|
315
|
+
"""
|
|
316
|
+
|
|
317
|
+
def __init__(self, degree: int = 2, **linear_kwargs):
|
|
318
|
+
"""
|
|
319
|
+
Initialize Polynomial Regression
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
degree: Degree of polynomial features
|
|
323
|
+
**linear_kwargs: Arguments passed to LinearRegression
|
|
324
|
+
"""
|
|
325
|
+
self.degree = degree
|
|
326
|
+
self.linear_model = LinearRegression(**linear_kwargs)
|
|
327
|
+
self.fitted = False
|
|
328
|
+
|
|
329
|
+
def _create_polynomial_features(self, X: np.ndarray) -> np.ndarray:
|
|
330
|
+
"""Create polynomial features"""
|
|
331
|
+
if X.ndim == 1:
|
|
332
|
+
X = X.reshape(-1, 1)
|
|
333
|
+
|
|
334
|
+
n_samples, n_features = X.shape
|
|
335
|
+
|
|
336
|
+
# For simplicity, only handle single feature polynomial for now
|
|
337
|
+
if n_features == 1:
|
|
338
|
+
poly_features = []
|
|
339
|
+
for i in range(1, self.degree + 1):
|
|
340
|
+
poly_features.append(X[:, 0] ** i)
|
|
341
|
+
return np.column_stack(poly_features)
|
|
342
|
+
else:
|
|
343
|
+
# For multiple features, just use powers of each feature
|
|
344
|
+
poly_features = [X]
|
|
345
|
+
for degree in range(2, self.degree + 1):
|
|
346
|
+
poly_features.append(X ** degree)
|
|
347
|
+
return np.column_stack(poly_features)
|
|
348
|
+
|
|
349
|
+
def fit(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> 'PolynomialRegression':
|
|
350
|
+
"""
|
|
351
|
+
Fit polynomial regression model
|
|
352
|
+
|
|
353
|
+
Args:
|
|
354
|
+
X: Training features
|
|
355
|
+
y: Training targets
|
|
356
|
+
|
|
357
|
+
Returns:
|
|
358
|
+
Self for method chaining
|
|
359
|
+
"""
|
|
360
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
361
|
+
poly_X = self._create_polynomial_features(X)
|
|
362
|
+
|
|
363
|
+
self.linear_model.fit(poly_X, y)
|
|
364
|
+
self.fitted = True
|
|
365
|
+
return self
|
|
366
|
+
|
|
367
|
+
def predict(self, X: Union[np.ndarray, list]) -> np.ndarray:
|
|
368
|
+
"""
|
|
369
|
+
Make predictions
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
X: Features to predict on
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
Predictions
|
|
376
|
+
"""
|
|
377
|
+
if not self.fitted:
|
|
378
|
+
raise RuntimeError("Model must be fitted before making predictions")
|
|
379
|
+
|
|
380
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
381
|
+
poly_X = self._create_polynomial_features(X)
|
|
382
|
+
|
|
383
|
+
return self.linear_model.predict(poly_X)
|
|
384
|
+
|
|
385
|
+
def score(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> float:
|
|
386
|
+
"""
|
|
387
|
+
Calculate R-squared score
|
|
388
|
+
|
|
389
|
+
Args:
|
|
390
|
+
X: Features
|
|
391
|
+
y: True targets
|
|
392
|
+
|
|
393
|
+
Returns:
|
|
394
|
+
R-squared score
|
|
395
|
+
"""
|
|
396
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
397
|
+
poly_X = self._create_polynomial_features(X)
|
|
398
|
+
|
|
399
|
+
return self.linear_model.score(poly_X, y)
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
class RidgeRegression:
|
|
403
|
+
"""
|
|
404
|
+
Ridge Regression (L2 regularization) implementation
|
|
405
|
+
|
|
406
|
+
Pure Python implementation with numpy.
|
|
407
|
+
"""
|
|
408
|
+
|
|
409
|
+
def __init__(self, alpha: float = 1.0, learning_rate: float = 0.01, max_iterations: int = 1000):
|
|
410
|
+
"""
|
|
411
|
+
Initialize Ridge Regression
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
alpha: Regularization strength
|
|
415
|
+
learning_rate: Learning rate for gradient descent
|
|
416
|
+
max_iterations: Maximum iterations
|
|
417
|
+
"""
|
|
418
|
+
self.alpha = alpha
|
|
419
|
+
self.learning_rate = learning_rate
|
|
420
|
+
self.max_iterations = max_iterations
|
|
421
|
+
|
|
422
|
+
self.weights = None
|
|
423
|
+
self.bias = None
|
|
424
|
+
self.fitted = False
|
|
425
|
+
|
|
426
|
+
def fit(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> 'RidgeRegression':
|
|
427
|
+
"""
|
|
428
|
+
Fit ridge regression model
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
X: Training features
|
|
432
|
+
y: Training targets
|
|
433
|
+
|
|
434
|
+
Returns:
|
|
435
|
+
Self for method chaining
|
|
436
|
+
"""
|
|
437
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
438
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
439
|
+
|
|
440
|
+
if X.ndim == 1:
|
|
441
|
+
X = X.reshape(-1, 1)
|
|
442
|
+
|
|
443
|
+
n_samples, n_features = X.shape
|
|
444
|
+
|
|
445
|
+
# Try normal equation approach first
|
|
446
|
+
try:
|
|
447
|
+
# Add bias column
|
|
448
|
+
X_with_bias = np.column_stack([np.ones(n_samples), X])
|
|
449
|
+
|
|
450
|
+
# Ridge normal equation: θ = (X^T X + αI)^(-1) X^T y
|
|
451
|
+
XtX = X_with_bias.T @ X_with_bias
|
|
452
|
+
|
|
453
|
+
# Add regularization (don't regularize bias term)
|
|
454
|
+
reg_matrix = self.alpha * np.eye(n_features + 1)
|
|
455
|
+
reg_matrix[0, 0] = 0 # Don't regularize bias
|
|
456
|
+
|
|
457
|
+
XtX_reg = XtX + reg_matrix
|
|
458
|
+
Xty = X_with_bias.T @ y
|
|
459
|
+
|
|
460
|
+
weights_with_bias = np.linalg.solve(XtX_reg, Xty)
|
|
461
|
+
self.bias = weights_with_bias[0]
|
|
462
|
+
self.weights = weights_with_bias[1:]
|
|
463
|
+
|
|
464
|
+
except np.linalg.LinAlgError:
|
|
465
|
+
# Fallback to gradient descent
|
|
466
|
+
self.weights = np.random.normal(0, 0.01, n_features)
|
|
467
|
+
self.bias = 0.0
|
|
468
|
+
|
|
469
|
+
for _ in range(self.max_iterations):
|
|
470
|
+
y_pred = X @ self.weights + self.bias
|
|
471
|
+
|
|
472
|
+
# Gradients with L2 regularization
|
|
473
|
+
dw = (2 / n_samples) * X.T @ (y_pred - y) + 2 * self.alpha * self.weights
|
|
474
|
+
db = (2 / n_samples) * np.sum(y_pred - y)
|
|
475
|
+
|
|
476
|
+
self.weights -= self.learning_rate * dw
|
|
477
|
+
self.bias -= self.learning_rate * db
|
|
478
|
+
|
|
479
|
+
self.fitted = True
|
|
480
|
+
return self
|
|
481
|
+
|
|
482
|
+
def predict(self, X: Union[np.ndarray, list]) -> np.ndarray:
|
|
483
|
+
"""Make predictions"""
|
|
484
|
+
if not self.fitted:
|
|
485
|
+
raise RuntimeError("Model must be fitted before making predictions")
|
|
486
|
+
|
|
487
|
+
X = np.array(X) if not isinstance(X, np.ndarray) else X
|
|
488
|
+
if X.ndim == 1:
|
|
489
|
+
X = X.reshape(-1, 1)
|
|
490
|
+
|
|
491
|
+
return X @ self.weights + self.bias
|
|
492
|
+
|
|
493
|
+
def score(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list]) -> float:
|
|
494
|
+
"""Calculate R-squared score"""
|
|
495
|
+
y_pred = self.predict(X)
|
|
496
|
+
y = np.array(y) if not isinstance(y, np.ndarray) else y
|
|
497
|
+
|
|
498
|
+
ss_res = np.sum((y - y_pred) ** 2)
|
|
499
|
+
ss_tot = np.sum((y - np.mean(y)) ** 2)
|
|
500
|
+
|
|
501
|
+
return 1 - (ss_res / ss_tot) if ss_tot != 0 else 0.0
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# createsonline/performance/__init__.py
|
|
2
|
+
"""
|
|
3
|
+
CREATESONLINE Performance Module
|
|
4
|
+
|
|
5
|
+
Ultra-high performance optimizations
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
'create_optimized_app',
|
|
10
|
+
'PerformanceOptimizer',
|
|
11
|
+
'CacheManager',
|
|
12
|
+
'ResponseCompression',
|
|
13
|
+
'ConnectionPool'
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
from .core import create_optimized_app, PerformanceOptimizer
|
|
17
|
+
from .cache import CacheManager
|
|
18
|
+
from .compression import ResponseCompression
|
|
19
|
+
from .connection_pool import ConnectionPool
|