blindscrambler 0.1.5__cp39-abi3-macosx_11_0_arm64.whl → 0.1.7__cp39-abi3-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- blindscrambler/__init__.py +1 -0
- blindscrambler/distributions/cvdistributions.py +3 -9
- blindscrambler/model/__init__.py +2 -0
- blindscrambler/model/regression.py +216 -0
- {blindscrambler-0.1.5.dist-info → blindscrambler-0.1.7.dist-info}/METADATA +4 -1
- {blindscrambler-0.1.5.dist-info → blindscrambler-0.1.7.dist-info}/RECORD +7 -5
- {blindscrambler-0.1.5.dist-info → blindscrambler-0.1.7.dist-info}/WHEEL +1 -1
blindscrambler/__init__.py
CHANGED
@@ -46,17 +46,11 @@ def poissondist(lam):
|
|
46
46
|
"""
|
47
47
|
Params:
|
48
48
|
(1) lam : double
|
49
|
-
The number lambda that will be used to generate samples of the
|
49
|
+
The number lambda that will be used to generate samples of the poisson distributions
|
50
50
|
|
51
51
|
Returns:
|
52
52
|
(2) x : double
|
53
|
-
A random sample that is
|
54
|
-
|
55
|
-
Function description: Basically, this function performs Inverse Transform Sampling. You take a random number generated between [0, 1]
|
56
|
-
This represets the CDF of some given distribution. Then you apply the inverse CDF function to generate a sample. The given sample will
|
57
|
-
be from the distribution in question.
|
58
|
-
|
59
|
-
NOTE: the inverse CDF used here is from the exponential distribution. Therefore, the generated numbers will be exponentially distributed
|
53
|
+
A random sample that is poisson distributed
|
60
54
|
"""
|
61
55
|
# make cumulative function and k
|
62
56
|
cum = 0
|
@@ -71,7 +65,7 @@ def poissondist(lam):
|
|
71
65
|
|
72
66
|
# keep adding probabilities unyil condition is met
|
73
67
|
while cum < u:
|
74
|
-
|
68
|
+
x += 1
|
75
69
|
p *= lam / x
|
76
70
|
cum += p
|
77
71
|
|
@@ -0,0 +1,216 @@
|
|
1
|
+
# Author metadata
|
2
|
+
|
3
|
+
__Name__ = "Syed Raza"
|
4
|
+
__email__ = "sar0033@uah.edu"
|
5
|
+
|
6
|
+
# import statements
|
7
|
+
import torch
|
8
|
+
import torch.nn as nn
|
9
|
+
import torch.optim as optim
|
10
|
+
import numpy as np
|
11
|
+
import matplotlib.pyplot as plt
|
12
|
+
from scipy import stats
|
13
|
+
from typing import Tuple, Optional
|
14
|
+
import warnings
|
15
|
+
|
16
|
+
# add a linear Regression class:
|
17
|
+
class LinearRegression:
|
18
|
+
"""
|
19
|
+
A PyTorch-based Linear Regression implementation for one variable.
|
20
|
+
|
21
|
+
Model: y = w_1 * x + w_0
|
22
|
+
Loss: Mean Squared Error
|
23
|
+
|
24
|
+
Features:
|
25
|
+
- Gradient-based optimization using PyTorch
|
26
|
+
- Confidence intervals for parameters w_1 and w_0
|
27
|
+
- Visualization with confidence bands
|
28
|
+
"""
|
29
|
+
|
30
|
+
def __init__(self, learning_rate: float = 0.01, max_epochs: int = 1000,
|
31
|
+
tolerance: float = 1e-6):
|
32
|
+
|
33
|
+
"""
|
34
|
+
"""
|
35
|
+
|
36
|
+
# the main variables
|
37
|
+
self.learning_rate = learning_rate
|
38
|
+
self.max_epochs = max_epochs
|
39
|
+
self.tolerance = tolerance
|
40
|
+
|
41
|
+
# Model parameters
|
42
|
+
self.w_0 = nn.Parameter(torch.randn(1, requires_grad=True)) # intercept
|
43
|
+
self.w_1 = nn.Parameter(torch.randn(1, requires_grad=True)) # slope
|
44
|
+
|
45
|
+
# training data storage
|
46
|
+
self.X_train = None
|
47
|
+
self.y_train = None
|
48
|
+
|
49
|
+
# Model statistics for confidence intervals
|
50
|
+
self.n_samples = None
|
51
|
+
self.residual_sum_squares = None
|
52
|
+
self.X_mean = None
|
53
|
+
self.X_var = None
|
54
|
+
self.fitted = False
|
55
|
+
|
56
|
+
# Loss function and optimizer
|
57
|
+
self.criterion = nn.MSELoss()
|
58
|
+
self.optimizer = optim.SGD([self.w_1, self.w_0], lr=self.learning_rate)
|
59
|
+
|
60
|
+
# Training history
|
61
|
+
self.loss_history = []
|
62
|
+
self.w0_history = []
|
63
|
+
self.w1_history = []
|
64
|
+
|
65
|
+
def forward(self, X: torch.tensor) -> torch.tensor:
|
66
|
+
"""
|
67
|
+
"""
|
68
|
+
return self.w_1 * X + self.w_0
|
69
|
+
|
70
|
+
def fit(self, X: np.ndarray, y: np.ndarray) -> 'LinearRegression':
|
71
|
+
"""
|
72
|
+
"""
|
73
|
+
# Convert to PyTorch tensors
|
74
|
+
self.X_train = torch.tensor(X, dtype=torch.float32)
|
75
|
+
self.y_train = torch.tensor(y, dtype=torch.float32)
|
76
|
+
self.n_samples = len(X)
|
77
|
+
|
78
|
+
# Store statistics for confidence intervals
|
79
|
+
self.X_mean = float(np.mean(X))
|
80
|
+
self.X_var = float(np.var(X, ddof=1)) # Sample variance
|
81
|
+
|
82
|
+
# Training loop
|
83
|
+
prev_loss = float('inf')
|
84
|
+
|
85
|
+
for epoch in range(self.max_epochs):
|
86
|
+
# Zero gradients
|
87
|
+
self.optimizer.zero_grad()
|
88
|
+
|
89
|
+
# Forward pass
|
90
|
+
y_pred = self.forward(self.X_train)
|
91
|
+
|
92
|
+
# Compute loss
|
93
|
+
loss = self.criterion(y_pred, self.y_train)
|
94
|
+
|
95
|
+
# Backward pass
|
96
|
+
loss.backward()
|
97
|
+
|
98
|
+
# Update parameters
|
99
|
+
self.optimizer.step()
|
100
|
+
|
101
|
+
# Store loss history
|
102
|
+
current_loss = loss.item()
|
103
|
+
self.loss_history.append(current_loss)
|
104
|
+
# Track parameter history (after update)
|
105
|
+
with torch.no_grad():
|
106
|
+
self.w0_history.append(float(self.w_0.item()))
|
107
|
+
self.w1_history.append(float(self.w_1.item()))
|
108
|
+
|
109
|
+
# Check for convergence
|
110
|
+
if abs(prev_loss - current_loss) < self.tolerance:
|
111
|
+
print(f"Converged after {epoch + 1} epochs")
|
112
|
+
break
|
113
|
+
|
114
|
+
prev_loss = current_loss
|
115
|
+
|
116
|
+
# Compute residual sum of squares for confidence intervals
|
117
|
+
with torch.no_grad():
|
118
|
+
y_pred = self.forward(self.X_train)
|
119
|
+
residuals = self.y_train - y_pred
|
120
|
+
self.residual_sum_squares = float(torch.sum(residuals ** 2))
|
121
|
+
|
122
|
+
self.fitted = True
|
123
|
+
return self
|
124
|
+
|
125
|
+
def predict(self, X: np.ndarray) -> np.ndarray:
|
126
|
+
"""
|
127
|
+
Make predictions on new data.
|
128
|
+
|
129
|
+
Args:
|
130
|
+
X: Input features of shape (n_samples,)
|
131
|
+
|
132
|
+
Returns:
|
133
|
+
Predictions as numpy array
|
134
|
+
"""
|
135
|
+
if not self.fitted:
|
136
|
+
raise ValueError("Model must be fitted before making predictions")
|
137
|
+
|
138
|
+
X_tensor = torch.tensor(X, dtype=torch.float32)
|
139
|
+
|
140
|
+
with torch.no_grad():
|
141
|
+
predictions = self.forward(X_tensor)
|
142
|
+
|
143
|
+
return predictions.numpy()
|
144
|
+
|
145
|
+
def analysis_plot(self, w_0: Optional[float] = None, w_1: Optional[float] = None):
|
146
|
+
"""
|
147
|
+
Create a 2x2 analysis figure showing:
|
148
|
+
- Original data and fitted regression line
|
149
|
+
- Training loss over epochs
|
150
|
+
- Intercept (w_0) trajectory over epochs
|
151
|
+
- Slope (w_1) trajectory over epochs
|
152
|
+
|
153
|
+
Args:
|
154
|
+
w_0: Intercept to plot final fit; if None, uses current self.w_0
|
155
|
+
w_1: Slope to plot final fit; if None, uses current self.w_1
|
156
|
+
"""
|
157
|
+
if self.X_train is None or self.y_train is None:
|
158
|
+
raise ValueError("No training data found. Fit the model before plotting.")
|
159
|
+
|
160
|
+
# Resolve parameters for plotting
|
161
|
+
if w_0 is None:
|
162
|
+
w_0 = float(self.w_0.detach().cpu().item())
|
163
|
+
if w_1 is None:
|
164
|
+
w_1 = float(self.w_1.detach().cpu().item())
|
165
|
+
|
166
|
+
X_np = self.X_train.detach().cpu().numpy().reshape(-1)
|
167
|
+
y_np = self.y_train.detach().cpu().numpy().reshape(-1)
|
168
|
+
|
169
|
+
# Build line for fit
|
170
|
+
x_line = np.linspace(X_np.min(), X_np.max(), 200)
|
171
|
+
y_line = w_1 * x_line + w_0
|
172
|
+
|
173
|
+
fig, axes = plt.subplots(2, 2, figsize=(12, 8))
|
174
|
+
|
175
|
+
# 1) Data + fit
|
176
|
+
ax = axes[0, 0]
|
177
|
+
ax.scatter(X_np, y_np, color='tab:blue', alpha=0.7, label='Data')
|
178
|
+
ax.plot(x_line, y_line, color='tab:red', label=f'Fit: y={w_1:.3f}x+{w_0:.3f}')
|
179
|
+
ax.set_title('Data and Fitted Line')
|
180
|
+
ax.set_xlabel('X')
|
181
|
+
ax.set_ylabel('y')
|
182
|
+
ax.legend()
|
183
|
+
|
184
|
+
# 2) Loss history
|
185
|
+
ax = axes[0, 1]
|
186
|
+
if len(self.loss_history) > 0:
|
187
|
+
ax.plot(range(1, len(self.loss_history) + 1), self.loss_history, color='tab:green')
|
188
|
+
ax.set_title('Training Loss')
|
189
|
+
ax.set_xlabel('Epoch')
|
190
|
+
ax.set_ylabel('MSE Loss')
|
191
|
+
ax.grid(True, linestyle='--', alpha=0.3)
|
192
|
+
|
193
|
+
# 3) w_0 history
|
194
|
+
ax = axes[1, 0]
|
195
|
+
if len(self.w0_history) > 0:
|
196
|
+
ax.plot(range(1, len(self.w0_history) + 1), self.w0_history, color='tab:purple')
|
197
|
+
ax.axhline(w_0, color='gray', linestyle='--', alpha=0.6, label='Final w_0')
|
198
|
+
ax.set_title('w_0 (Intercept) over Epochs')
|
199
|
+
ax.set_xlabel('Epoch')
|
200
|
+
ax.set_ylabel('w_0')
|
201
|
+
ax.legend()
|
202
|
+
ax.grid(True, linestyle='--', alpha=0.3)
|
203
|
+
|
204
|
+
# 4) w_1 history
|
205
|
+
ax = axes[1, 1]
|
206
|
+
if len(self.w1_history) > 0:
|
207
|
+
ax.plot(range(1, len(self.w1_history) + 1), self.w1_history, color='tab:orange')
|
208
|
+
ax.axhline(w_1, color='gray', linestyle='--', alpha=0.6, label='Final w_1')
|
209
|
+
ax.set_title('w_1 (Slope) over Epochs')
|
210
|
+
ax.set_xlabel('Epoch')
|
211
|
+
ax.set_ylabel('w_1')
|
212
|
+
ax.legend()
|
213
|
+
ax.grid(True, linestyle='--', alpha=0.3)
|
214
|
+
|
215
|
+
plt.tight_layout()
|
216
|
+
return fig, axes
|
@@ -1,8 +1,11 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: blindscrambler
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.7
|
4
4
|
Requires-Dist: matplotlib>=3.10.6
|
5
5
|
Requires-Dist: numpy>=2.3.3
|
6
|
+
Requires-Dist: polars>=1.34.0
|
7
|
+
Requires-Dist: scikit-learn>=1.7.2
|
8
|
+
Requires-Dist: scipy>=1.16.2
|
6
9
|
Requires-Dist: torch>=2.8.0
|
7
10
|
Requires-Dist: twine>=6.1.0
|
8
11
|
Summary: Add your description here
|
@@ -1,13 +1,15 @@
|
|
1
|
-
blindscrambler-0.1.
|
2
|
-
blindscrambler-0.1.
|
3
|
-
blindscrambler/__init__.py,sha256=
|
1
|
+
blindscrambler-0.1.7.dist-info/METADATA,sha256=Jnz2SO-4sk-teZcAoH5cHZ4zAMnmDISbMBmK8rhpidc,501
|
2
|
+
blindscrambler-0.1.7.dist-info/WHEEL,sha256=vpqC0tRn_8bTHidvtrPbrnFQPZnrhuKzsjDdeKwCd58,102
|
3
|
+
blindscrambler/__init__.py,sha256=fSGH3-DvmAl8iABUbfGYKYKfQ025MVuih4VPm_wbUqQ,148
|
4
4
|
blindscrambler/_core.abi3.so,sha256=4uKUtCwAO1Hbvzv0FXAt38rEHYbg-Quio8CdkJ_UMrk,440112
|
5
5
|
blindscrambler/_core.pyi,sha256=b6oJaUXUzEzqUE5rpqefV06hl8o_JCU8pgKgIIzQgmc,33
|
6
6
|
blindscrambler/differential/__init__.py,sha256=INnk5rX2ae6mG5yynAQYKzpQ0BYsHquUhA9ZzbPVLm8,45
|
7
7
|
blindscrambler/differential/discrete.py,sha256=mPJg6YrDVuXK-dLXgb_VDqKl1IvKfSKahMA_rRTVKQY,369
|
8
8
|
blindscrambler/distributions/__init__.py,sha256=8O4VQvymecRFRP1njwAfbD4yUACA25RcLqn0QtZEjaE,58
|
9
|
-
blindscrambler/distributions/cvdistributions.py,sha256=
|
9
|
+
blindscrambler/distributions/cvdistributions.py,sha256=lgZnlYdlCJEhk6K4cAkZmtIED81156ZnaJAQQbHx96c,2025
|
10
10
|
blindscrambler/matrix/__init__.py,sha256=qlItVU8AVj_mP2NUJ3gor-lsovxk3Wxf5tUfKynoUbg,157
|
11
11
|
blindscrambler/matrix/elementary.py,sha256=hArZLiBTA_vW1EZ0RniECf6ybJiJxO7KNuVHb_TZFQU,3987
|
12
|
+
blindscrambler/model/__init__.py,sha256=CUXjl7w9exeF60zz0pjhD2SX8BLlH4Q5NXjEx_azznQ,71
|
13
|
+
blindscrambler/model/regression.py,sha256=srWs8XueH8oc62k_8jJJtTnBWfH2tq1CSf0iO0j4JUE,7061
|
12
14
|
blindscrambler/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
|
-
blindscrambler-0.1.
|
15
|
+
blindscrambler-0.1.7.dist-info/RECORD,,
|