radioactiveshrimp 0.1.1__cp39-abi3-manylinux_2_28_x86_64.whl → 0.1.3__cp39-abi3-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  from radioactiveshrimp._core import hello_from_bin
2
-
2
+ from radioactiveshrimp.model import LinearRegression
3
3
 
4
4
  def hello() -> str:
5
5
  return hello_from_bin()
@@ -0,0 +1,5 @@
1
+ """
2
+ Distribution subpackage containing distrobution functions.
3
+ """
4
+ from .cvdistributions import uniform, exponentialdist, poissondist
5
+ __all__=['uniform,exponentialdist, poissondist']
@@ -0,0 +1,111 @@
1
+ import secrets
2
+ import numpy as np
3
+ # import matplotlib.pyplot as plt
4
+ import math as m
5
+
6
+ def uniform(a:float = 0.0, b:float = 1.0)->float:
7
+ """
8
+ Generate cryptographically Secure uniform sample
9
+
10
+ Args:
11
+ a (float): lower bound value (default 0.0)
12
+ b (float): upper bound value (default 1.0)
13
+
14
+ Returns:
15
+ float representing uniform sample
16
+
17
+ Example:
18
+ >>> uniform()
19
+ 0.3546927444895387
20
+ >>> uniform()
21
+ 0.21098189931876055
22
+ """
23
+ # 53 random bits gives 53-bit precision double
24
+ u = secrets.randbits(53)/(1<<53) # in [0,1)
25
+ return a+(b-a)*u
26
+
27
+ def exponentialdist(lamb):
28
+ """
29
+ Generate exponentially distributed sample
30
+
31
+ Args:
32
+ lamb (float or int >0): lambda value of exponential distrobution
33
+
34
+ Returns:
35
+ x: float representing the exponentially distributed sample
36
+
37
+ Raises:
38
+ ValueError: if lambda is not greater than 0
39
+
40
+ Example:
41
+ >>> uexponentialdist(1)
42
+ 0.6839328417240588
43
+ >>> uexponentialdist(1)
44
+ 2.901353101723147
45
+ >>> exponentialdist(.5)
46
+ 2.451134119865936
47
+ """
48
+ if lamb<=0:
49
+ raise ValueError('Lambda must be greater than 0')
50
+
51
+ y = uniform()
52
+ x = -(1/lamb)*np.log(y)
53
+ return x
54
+
55
+
56
+ def poissondist(lamb):
57
+ """
58
+ Generate poisson distributed sample
59
+
60
+ Args:
61
+ lamb (int or float > 0): lambda value of poisson distribution
62
+
63
+ Returns:
64
+ i: int representing poisson distributed sample
65
+
66
+ Example:
67
+ >>> poissondist(1)
68
+ 1
69
+ >>> poissondist(1)
70
+ 3
71
+ >>> poissondist(10)
72
+ 9
73
+ """
74
+ if lamb<=0:
75
+ raise ValueError('Lambda must be greater than 0')
76
+
77
+ elamb = np.exp(-lamb)
78
+ i = 0
79
+ y = uniform()
80
+ prob = elamb
81
+ while y>prob:
82
+ i = i+1
83
+ factorial = m.factorial(i)
84
+ power = lamb**i
85
+ # print(y,i,elamb, power, factorial)
86
+ prob = prob + (power/factorial)*elamb
87
+
88
+ return i
89
+
90
+
91
+ # # draw histogram of 100,000 randomly distributed samples using exponential distribution
92
+ # samplexs = []
93
+ # for _ in range(100000):
94
+ # samplexs.append(exponentialdist(1))
95
+
96
+ # plt.figure()
97
+ # hist = plt.hist(samplexs, bins='auto', orientation='horizontal', histtype='bar')
98
+ # plt.xlabel('count')
99
+ # plt.ylabel('X')
100
+ # plt.show()
101
+
102
+ # # draw histogram of 100,000 randomly distributed samples using exponential distribution
103
+ # samplexs = []
104
+ # for _ in range(100000):
105
+ # samplexs.append(poissondist(10))
106
+
107
+ # plt.figure()
108
+ # hist = plt.hist(samplexs, bins='auto', histtype='bar')
109
+ # plt.xlabel('X')
110
+ # plt.ylabel('count')
111
+ # plt.show()
@@ -0,0 +1,5 @@
1
+ """
2
+ Model subpackage containing model functions.
3
+ """
4
+ from .regression import LinearRegression
5
+ __all__=['LinearRegression']
@@ -0,0 +1,468 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from scipy import stats
7
+ from typing import Tuple, Optional
8
+ import warnings
9
+
10
+ class LinearRegression:
11
+ """
12
+ A PyTorch-based Linear Regression implementation for one variable.
13
+
14
+ Model: y = w_1 * x + w_0
15
+ Loss: Mean Squared Error
16
+
17
+ Features:
18
+ - Gradient-based optimization using PyTorch
19
+ - Confidence intervals for parameters w_1 and w_0
20
+ - Visualization with confidence bands
21
+ """
22
+
23
+ def __init__(self, learning_rate: float = 0.01, max_epochs: int = 1000,
24
+ tolerance: float = 1e-6):
25
+ """
26
+ Initialize the Linear Regression model.
27
+
28
+ Args:
29
+ learning_rate: Learning rate for gradient descent
30
+ max_epochs: Maximum number of training epochs
31
+ tolerance: Convergence tolerance for early stopping
32
+ """
33
+ self.learning_rate = learning_rate
34
+ self.max_epochs = max_epochs
35
+ self.tolerance = tolerance
36
+
37
+ # Model parameters
38
+ self.w_1 = nn.Parameter(torch.randn(1, requires_grad=True)) # slope
39
+ self.w_0 = nn.Parameter(torch.randn(1, requires_grad=True)) # intercept
40
+
41
+ # Training data storage
42
+ self.X_train = None
43
+ self.y_train = None
44
+
45
+ # Model statistics for confidence intervals
46
+ self.n_samples = None
47
+ self.residual_sum_squares = None
48
+ self.X_mean = None
49
+ self.X_var = None
50
+ self.fitted = False
51
+
52
+ # Loss function and optimizer
53
+ self.criterion = nn.MSELoss()
54
+ self.optimizer = optim.SGD([self.w_1, self.w_0], lr=self.learning_rate)
55
+
56
+ # Training history
57
+ self.loss_history = []
58
+
59
+ def forward(self, X: torch.Tensor) -> torch.Tensor:
60
+ """
61
+ Forward pass of the linear model.
62
+
63
+ Args:
64
+ X: Input tensor of shape (n_samples,)
65
+
66
+ Returns:
67
+ Predictions tensor of shape (n_samples,)
68
+ """
69
+ return self.w_1 * X + self.w_0
70
+
71
+ def fit(self, X: np.ndarray, y: np.ndarray, test_x:np.ndarray=None, test_y:np.ndarray=None) -> 'LinearRegression':
72
+ """
73
+ Fit the linear regression model to the training data.
74
+
75
+ Args:
76
+ X: Input features of shape (n_samples,)
77
+ y: Target values of shape (n_samples,)
78
+
79
+ Returns:
80
+ self: Returns the fitted model instance
81
+ """
82
+ # Convert to PyTorch tensors
83
+ self.X_train = torch.tensor(X, dtype=torch.float32)
84
+ self.y_train = torch.tensor(y, dtype=torch.float32)
85
+ self.n_samples = len(X)
86
+
87
+ # # Store statistics for confidence intervals
88
+ # self.X_mean = float(np.mean(X))
89
+ # self.X_var = float(np.var(X, ddof=1)) # Sample variance
90
+
91
+ # Training loop
92
+ prev_loss = float('inf')
93
+
94
+ for epoch in range(self.max_epochs):
95
+ # Zero gradients
96
+ self.optimizer.zero_grad()
97
+
98
+ # Forward pass
99
+ y_pred = self.forward(self.X_train)
100
+
101
+ # Compute loss
102
+ loss = self.criterion(y_pred, self.y_train)
103
+
104
+ # Backward pass
105
+ loss.backward()
106
+
107
+ # Update parameters
108
+ self.optimizer.step()
109
+
110
+ # Store loss history
111
+ current_loss = loss.item()
112
+ self.loss_history.append(current_loss)
113
+
114
+ # Check for convergence
115
+ if abs(prev_loss - current_loss) < self.tolerance:
116
+ print(f"Converged after {epoch + 1} epochs")
117
+ break
118
+
119
+ prev_loss = current_loss
120
+
121
+ # Compute residual sum of squares for confidence intervals
122
+ with torch.no_grad():
123
+ y_pred = self.forward(self.X_train)
124
+ residuals = self.y_train - y_pred
125
+ self.residual_sum_squares = float(torch.sum(residuals ** 2))
126
+
127
+ self.fitted = True
128
+
129
+ # compute r^2 on optional test data, print result
130
+ if ((test_x is not None) and (test_y is not None)):
131
+ y_mean = float(torch.mean(self.y_train))
132
+ ss_tot = float(torch.sum((self.y_train - y_mean) ** 2))
133
+ r_squared = 1 - (self.residual_sum_squares / ss_tot)
134
+ print('R^2 for test data: ',r_squared)
135
+
136
+ return self
137
+
138
+ def predict(self, X: np.ndarray) -> np.ndarray:
139
+ """
140
+ Make predictions on new data.
141
+
142
+ Args:
143
+ X: Input features of shape (n_samples,)
144
+
145
+ Returns:
146
+ Predictions as numpy array
147
+ """
148
+ if not self.fitted:
149
+ raise ValueError("Model must be fitted before making predictions")
150
+
151
+ X_tensor = torch.tensor(X, dtype=torch.float32)
152
+
153
+ with torch.no_grad():
154
+ predictions = self.forward(X_tensor)
155
+
156
+ return predictions.numpy()
157
+
158
+ def get_parameters(self) -> Tuple[float, float]:
159
+ """
160
+ Get the fitted parameters.
161
+
162
+ Returns:
163
+ Tuple of (w_1, w_0) - slope and intercept
164
+ """
165
+ if not self.fitted:
166
+ raise ValueError("Model must be fitted before accessing parameters")
167
+
168
+ return float(self.w_1.item()), float(self.w_0.item())
169
+
170
+ def parameter_confidence_intervals(self, confidence_level: float = 0.95) -> dict:
171
+ """
172
+ Compute confidence intervals for parameters w_1 and w_0.
173
+
174
+ Args:
175
+ confidence_level: Confidence level (e.g., 0.95 for 95%)
176
+
177
+ Returns:
178
+ Dictionary containing confidence intervals for both parameters
179
+ """
180
+ if not self.fitted:
181
+ raise ValueError("Model must be fitted before computing confidence intervals")
182
+
183
+ # Degrees of freedom
184
+ df = self.n_samples - 2
185
+
186
+ # Critical t-value
187
+ alpha = 1 - confidence_level
188
+ t_critical = stats.t.ppf(1 - alpha/2, df)
189
+
190
+ # Standard error of regression
191
+ mse = self.residual_sum_squares / df
192
+ se_regression = np.sqrt(mse)
193
+
194
+ # Standard error for w_1 (slope)
195
+ se_w1 = se_regression / np.sqrt(self.n_samples * self.X_var)
196
+
197
+ # Standard error for w_0 (intercept)
198
+ se_w0 = se_regression * np.sqrt(1/self.n_samples + self.X_mean**2 / (self.n_samples * self.X_var))
199
+
200
+ # Get current parameter values
201
+ w_1_val, w_0_val = self.get_parameters()
202
+
203
+ # Compute confidence intervals
204
+ w_1_ci = (
205
+ w_1_val - t_critical * se_w1,
206
+ w_1_val + t_critical * se_w1
207
+ )
208
+
209
+ w_0_ci = (
210
+ w_0_val - t_critical * se_w0,
211
+ w_0_val + t_critical * se_w0
212
+ )
213
+
214
+ return {
215
+ 'w_1_confidence_interval': w_1_ci,
216
+ 'w_0_confidence_interval': w_0_ci,
217
+ 'confidence_level': confidence_level,
218
+ 'standard_errors': {
219
+ 'se_w1': se_w1,
220
+ 'se_w0': se_w0,
221
+ 'se_regression': se_regression
222
+ }
223
+ }
224
+
225
+ def plot_regression_with_confidence_band(self, confidence_level: float = 0.95,
226
+ figsize: Tuple[int, int] = (10, 6),
227
+ title: Optional[str] = None) -> plt.Figure:
228
+ """
229
+ Plot the fitted regression line with confidence band.
230
+
231
+ Args:
232
+ confidence_level: Confidence level for the band
233
+ figsize: Figure size tuple
234
+ title: Optional plot title
235
+
236
+ Returns:
237
+ matplotlib Figure object
238
+ """
239
+ if not self.fitted:
240
+ raise ValueError("Model must be fitted before plotting")
241
+
242
+ # Create figure
243
+ fig, ax = plt.subplots(figsize=figsize)
244
+
245
+ # Convert training data to numpy for plotting
246
+ X_np = self.X_train.numpy()
247
+ y_np = self.y_train.numpy()
248
+
249
+ # Create prediction range
250
+ X_range = np.linspace(X_np.min(), X_np.max(), 100)
251
+ y_pred_range = self.predict(X_range)
252
+
253
+ # Compute confidence band
254
+ df = self.n_samples - 2
255
+ alpha = 1 - confidence_level
256
+ t_critical = stats.t.ppf(1 - alpha/2, df)
257
+
258
+ mse = self.residual_sum_squares / df
259
+ se_regression = np.sqrt(mse)
260
+
261
+ # Standard error for predictions (confidence band)
262
+ X_centered = X_range - self.X_mean
263
+ se_pred = se_regression * np.sqrt(1/self.n_samples + X_centered**2 / (self.n_samples * self.X_var))
264
+
265
+ # Confidence band bounds
266
+ margin_of_error = t_critical * se_pred
267
+ y_upper = y_pred_range + margin_of_error
268
+ y_lower = y_pred_range - margin_of_error
269
+
270
+ # Plot data points
271
+ ax.scatter(X_np, y_np, alpha=0.6, color='blue', label='Data points')
272
+
273
+ # Plot regression line
274
+ ax.plot(X_range, y_pred_range, 'r-', linewidth=2, label='Fitted line')
275
+
276
+ # Plot confidence band
277
+ ax.fill_between(X_range, y_lower, y_upper, alpha=0.3, color='red',
278
+ label=f'{int(confidence_level*100)}% Confidence band')
279
+
280
+ # Get parameter values for display
281
+ w_1_val, w_0_val = self.get_parameters()
282
+
283
+ # Labels and title
284
+ ax.set_xlabel('X')
285
+ ax.set_ylabel('y')
286
+ if title is None:
287
+ title = f'Linear Regression: y = {w_1_val:.3f}x + {w_0_val:.3f}'
288
+ ax.set_title(title)
289
+ ax.legend()
290
+ ax.grid(True, alpha=0.3)
291
+
292
+ plt.tight_layout()
293
+ return fig
294
+
295
+ def summary(self) -> dict:
296
+ """
297
+ Provide a summary of the fitted model.
298
+
299
+ Returns:
300
+ Dictionary containing model summary statistics
301
+ """
302
+ if not self.fitted:
303
+ raise ValueError("Model must be fitted before generating summary")
304
+
305
+ w_1_val, w_0_val = self.get_parameters()
306
+
307
+ # R-squared calculation
308
+ y_mean = float(torch.mean(self.y_train))
309
+ ss_tot = float(torch.sum((self.y_train - y_mean) ** 2))
310
+ r_squared = 1 - (self.residual_sum_squares / ss_tot)
311
+
312
+ # Adjusted R-squared
313
+ adj_r_squared = 1 - ((1 - r_squared) * (self.n_samples - 1) / (self.n_samples - 2))
314
+
315
+ # RMSE
316
+ rmse = np.sqrt(self.residual_sum_squares / self.n_samples)
317
+
318
+ return {
319
+ 'parameters': {
320
+ 'w_1 (slope)': w_1_val,
321
+ 'w_0 (intercept)': w_0_val
322
+ },
323
+ 'model_fit': {
324
+ 'r_squared': r_squared,
325
+ 'adjusted_r_squared': adj_r_squared,
326
+ 'rmse': rmse,
327
+ 'residual_sum_squares': self.residual_sum_squares
328
+ },
329
+ 'training_info': {
330
+ 'n_samples': self.n_samples,
331
+ 'epochs_trained': len(self.loss_history),
332
+ 'final_loss': self.loss_history[-1] if self.loss_history else None
333
+ }
334
+ }
335
+
336
+ def analysis_plot(self, figsize: Tuple[int, int] = (10, 6),
337
+ title: Optional[str] = None) -> plt.Figure:
338
+ """
339
+ Plot the fitted regression line.
340
+
341
+ Args:
342
+ figsize: Figure size tuple
343
+ title: Optional plot title
344
+
345
+ Returns:
346
+ matplotlib Figure object
347
+ """
348
+ if not self.fitted:
349
+ raise ValueError("Model must be fitted before plotting")
350
+
351
+ # Create figure
352
+ fig, ax = plt.subplots(3,1,figsize=figsize)
353
+
354
+ # Convert training data to numpy for plotting
355
+ X_np = self.X_train.numpy()
356
+ y_np = self.y_train.numpy()
357
+
358
+ # Create prediction range
359
+ X_range = np.linspace(X_np.min(), X_np.max(), 100)
360
+ y_pred_range = self.predict(X_range)
361
+
362
+ # Plot data points
363
+ ax[0].scatter(X_np, y_np, alpha=0.6, color='blue', label='Data points')
364
+
365
+ # Plot regression line with original data
366
+ ax[1].plot(X_range, y_pred_range, 'r-', linewidth=2, label='Fitted line')
367
+ ax[1].scatter(X_np, y_np, alpha=0.6, color='blue', label='Data points')
368
+
369
+
370
+ # Get parameter values for display
371
+ w_1_val, w_0_val = self.get_parameters()
372
+
373
+ # Plot loss as training occured
374
+ ax[2].plot(self.loss_history, label='Training Loss', color='green')
375
+
376
+ # Labels and title
377
+ ax[0].set_xlabel('X')
378
+ ax[0].set_ylabel('y')
379
+ ax[0].set_title("Original Data")
380
+ ax[0].legend()
381
+ ax[0].grid(True, alpha=0.3)
382
+
383
+ ax[1].set_xlabel('X')
384
+ ax[1].set_ylabel('y')
385
+ if title is None:
386
+ title = f'Linear Regression: y = {w_1_val:.3f}x + {w_0_val:.3f}'
387
+ ax[1].set_title(title)
388
+ ax[1].legend()
389
+ ax[1].grid(True, alpha=0.3)
390
+
391
+ ax[2].set_xlabel('Epoch')
392
+ ax[2].set_ylabel('Loss')
393
+ ax[2].set_title('Loss Though Training')
394
+ ax[2].legend()
395
+ ax[2].grid(True, alpha=0.3)
396
+
397
+ plt.tight_layout()
398
+ plt.show()
399
+ return fig
400
+
401
+
402
+ #-------------------------------------------------------------------------------------
403
+
404
+ # import polars as p
405
+ # from sklearn.model_selection import train_test_split
406
+
407
+ # L = LinearRegression()
408
+ # data = p.read_csv('/home/radioactiveshrimp/radioactiveshrimp/Hydropower.csv')
409
+ # x = data.get_column('BCR')
410
+ # y = data.get_column('AnnualProduction')
411
+
412
+ # X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.25)
413
+
414
+ # y_pred = L.forward(.9)
415
+
416
+
417
+ # fittedMod = L.fit(X_train, y_train, X_test, y_test)
418
+ # # fittedMod = L.fit(X_train, y_train)
419
+ # print(L.predict(.9))
420
+ # # print(L.loss_history)
421
+
422
+ # fig = L.analysis_plot()
423
+ # plt.show()
424
+
425
+ #----------------------------------------------------------------
426
+ # np.random.seed(42)
427
+ # torch.manual_seed(42)
428
+
429
+ # n_samples = 100
430
+ # X = np.random.uniform(-5, 5, n_samples)
431
+ # true_slope = 2.5
432
+ # true_intercept = -1.2
433
+ # noise = np.random.normal(0, 1, n_samples)
434
+ # y = true_slope * X + true_intercept + noise
435
+
436
+ # print("=== Linear Regression with PyTorch ===")
437
+ # print(f"True parameters: slope={true_slope}, intercept={true_intercept}")
438
+ # print()
439
+
440
+ # # Create and fit model
441
+ # model = LinearRegression(learning_rate=0.01, max_epochs=10)
442
+ # model.fit(X, y)
443
+
444
+ # # Get fitted parameters
445
+ # fitted_slope, fitted_intercept = model.get_parameters()
446
+ # print(f"Fitted parameters: slope={fitted_slope:.4f}, intercept={fitted_intercept:.4f}")
447
+
448
+ # # Model summary
449
+ # summary = model.summary()
450
+ # print(f"\nModel Summary:")
451
+ # print(f"R-squared: {summary['model_fit']['r_squared']:.4f}")
452
+ # print(f"RMSE: {summary['model_fit']['rmse']:.4f}")
453
+ # print(f"Training epochs: {summary['training_info']['epochs_trained']}")
454
+
455
+ # # Confidence intervals
456
+ # ci_results = model.parameter_confidence_intervals(confidence_level=0.95)
457
+ # print(f"\n95% Confidence Intervals:")
458
+ # print(f"w_1 (slope): [{ci_results['w_1_confidence_interval'][0]:.4f}, {ci_results['w_1_confidence_interval'][1]:.4f}]")
459
+ # print(f"w_0 (intercept): [{ci_results['w_0_confidence_interval'][0]:.4f}, {ci_results['w_0_confidence_interval'][1]:.4f}]")
460
+
461
+ # # Plot results
462
+ # fig = model.plot_regression_with_confidence_band(confidence_level=0.95)
463
+ # plt.show()
464
+
465
+ # # Test different confidence levels
466
+ # for conf_level in [0.90, 0.95, 0.99]:
467
+ # ci = model.parameter_confidence_intervals(confidence_level=conf_level)
468
+ # print(f"\n{int(conf_level*100)}% CI for slope: [{ci['w_1_confidence_interval'][0]:.4f}, {ci['w_1_confidence_interval'][1]:.4f}]")
@@ -1,7 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: radioactiveshrimp
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
+ Requires-Dist: matplotlib>=3.10.6
4
5
  Requires-Dist: numpy>=2.3.3
6
+ Requires-Dist: polars>=1.33.1
7
+ Requires-Dist: scikit-learn>=1.7.2
8
+ Requires-Dist: scipy>=1.16.2
5
9
  Requires-Dist: torch>=2.8.0
6
10
  Requires-Dist: twine>=6.1.0
7
11
  Summary: Add your description here
@@ -0,0 +1,15 @@
1
+ radioactiveshrimp-0.1.3.dist-info/METADATA,sha256=cl4YyqMwZB_g-18Sqq8wDLp6YKuNdGhHgjjq_lUTnYc,506
2
+ radioactiveshrimp-0.1.3.dist-info/WHEEL,sha256=whVUlzvauXZI_AddfJsPvOGSCqk5SsvrB6oOXoKFJ_s,97
3
+ radioactiveshrimp/__init__.py,sha256=Np_SVIHI6cGztPNmaAjmTydRaDgmbgW5SV8qmGEFMBY,153
4
+ radioactiveshrimp/_core.abi3.so,sha256=3IMwsP7UrgNWdKdw2Pl11_VUQF8NqPy1xyyPuzkREdA,489672
5
+ radioactiveshrimp/_core.pyi,sha256=b6oJaUXUzEzqUE5rpqefV06hl8o_JCU8pgKgIIzQgmc,33
6
+ radioactiveshrimp/differential/__init__.py,sha256=jOhQKr6xM_7TVUt0GWRddtkgBvNn5I873WZcArT-GtE,101
7
+ radioactiveshrimp/differential/discrete.py,sha256=9-w6Avz4MZfu8gFhJU8kTvryn9oT2xS1TvvYQ_8pmPU,1371
8
+ radioactiveshrimp/distributions/__init__.py,sha256=hZX2UFt54wJG4zDLuTTTonDSA-anWvfjhDmR-akYcn4,182
9
+ radioactiveshrimp/distributions/cvdistributions.py,sha256=UEiO_RYFQYqHybI_wcOdpOrxHToXRpLFsB58dwvI4vU,2599
10
+ radioactiveshrimp/matrix/__init__.py,sha256=EoerrXlfmd2lI9grb3jxqIhHn-bzLtYhRKyxI0RVgaE,166
11
+ radioactiveshrimp/matrix/elementary.py,sha256=zPiBG24F5zIncrrVWbiXEp1oFnHw2xNKy5quF0GBOuc,6333
12
+ radioactiveshrimp/model/__init__.py,sha256=OLj9ObpV0i7DFJOb8U12qnJKVAgFb8_hB9yBFV57IZQ,122
13
+ radioactiveshrimp/model/regression.py,sha256=i2CF6OVOb7eufzckAltRHJaoJlpproxLHqOPYSjpSJI,15685
14
+ radioactiveshrimp/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ radioactiveshrimp-0.1.3.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: maturin (1.9.4)
2
+ Generator: maturin (1.9.6)
3
3
  Root-Is-Purelib: false
4
4
  Tag: cp39-abi3-linux_x86_64
@@ -1,11 +0,0 @@
1
- radioactiveshrimp-0.1.1.dist-info/METADATA,sha256=4miZ8FtVUImI28hoh1lyXSbo-IuHwLBUmaK838D378Y,378
2
- radioactiveshrimp-0.1.1.dist-info/WHEEL,sha256=VCoUqQjnl5wExkC-2PG3I2s7-FBXYum-vf1jh8qiYg8,97
3
- radioactiveshrimp/__init__.py,sha256=QY4ait-Lo2WQdyx7MU8N6a1v2ZYyAmYRqzYqdNbiavI,101
4
- radioactiveshrimp/_core.abi3.so,sha256=3IMwsP7UrgNWdKdw2Pl11_VUQF8NqPy1xyyPuzkREdA,489672
5
- radioactiveshrimp/_core.pyi,sha256=b6oJaUXUzEzqUE5rpqefV06hl8o_JCU8pgKgIIzQgmc,33
6
- radioactiveshrimp/differential/__init.py__,sha256=jOhQKr6xM_7TVUt0GWRddtkgBvNn5I873WZcArT-GtE,101
7
- radioactiveshrimp/differential/discrete.py,sha256=9-w6Avz4MZfu8gFhJU8kTvryn9oT2xS1TvvYQ_8pmPU,1371
8
- radioactiveshrimp/matrix/__init.py__,sha256=EoerrXlfmd2lI9grb3jxqIhHn-bzLtYhRKyxI0RVgaE,166
9
- radioactiveshrimp/matrix/elementary.py,sha256=zPiBG24F5zIncrrVWbiXEp1oFnHw2xNKy5quF0GBOuc,6333
10
- radioactiveshrimp/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- radioactiveshrimp-0.1.1.dist-info/RECORD,,
File without changes