rb-deeplearning-lib 0.0.1__tar.gz → 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {rb_deeplearning_lib-0.0.1/src/rb_deeplearning_lib.egg-info → rb_deeplearning_lib-0.1.0}/PKG-INFO +2 -2
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/pyproject.toml +2 -2
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib/neural_net.py +17 -12
- rb_deeplearning_lib-0.1.0/src/rb_deeplearning_lib/optimizer.py +100 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0/src/rb_deeplearning_lib.egg-info}/PKG-INFO +2 -2
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib.egg-info/SOURCES.txt +1 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/LICENSE +0 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/README.md +0 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/setup.cfg +0 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib/__init__.py +0 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib/autogradient.py +0 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib/sequence.py +0 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib.egg-info/dependency_links.txt +0 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib.egg-info/requires.txt +0 -0
- {rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib.egg-info/top_level.txt +0 -0
{rb_deeplearning_lib-0.0.1/src/rb_deeplearning_lib.egg-info → rb_deeplearning_lib-0.1.0}/PKG-INFO
RENAMED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: rb-deeplearning-lib
|
|
3
|
-
Version: 0.0
|
|
3
|
+
Version: 0.1.0
|
|
4
4
|
Summary: This is a machine learning--more specifically deep learning--library from my independent study on deep learning. This library is both a result of my learning and a tool for AI development.
|
|
5
5
|
License-Expression: MIT
|
|
6
|
-
Project-URL: Homepage, https://github.com/rylan-berry/DeepLearningIndependentStudy/deeplearning_package
|
|
6
|
+
Project-URL: Homepage, https://github.com/rylan-berry/DeepLearningIndependentStudy/tree/main/deeplearning_package
|
|
7
7
|
Requires-Python: >=3.8
|
|
8
8
|
Description-Content-Type: text/markdown
|
|
9
9
|
License-File: LICENSE
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "rb-deeplearning-lib"
|
|
3
|
-
version = "0.0
|
|
3
|
+
version = "0.1.0"
|
|
4
4
|
description = "This is a machine learning--more specifically deep learning--library from my independent study on deep learning. This library is both a result of my learning and a tool for AI development."
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.8"
|
|
@@ -11,7 +11,7 @@ dependencies = [
|
|
|
11
11
|
]
|
|
12
12
|
|
|
13
13
|
[project.urls]
|
|
14
|
-
Homepage = "https://github.com/rylan-berry/DeepLearningIndependentStudy/deeplearning_package"
|
|
14
|
+
Homepage = "https://github.com/rylan-berry/DeepLearningIndependentStudy/tree/main/deeplearning_package"
|
|
15
15
|
|
|
16
16
|
[build-system]
|
|
17
17
|
requires = ["setuptools>=77.0.3"]
|
{rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib/neural_net.py
RENAMED
|
@@ -1,3 +1,6 @@
|
|
|
1
|
+
import optimizer
|
|
2
|
+
import sequence
|
|
3
|
+
|
|
1
4
|
class Layer:
|
|
2
5
|
def __init__(self, input,out,activ="_",rangeW=(-1,1),rangeB=(-1,1)):
|
|
3
6
|
self.weights = Values((rangeW[0]-rangeW[1])*np.random.rand(input,out)+rangeW[1])
|
|
@@ -17,13 +20,7 @@ class Layer:
|
|
|
17
20
|
|
|
18
21
|
def params(self):
|
|
19
22
|
return self.weights, self.bias
|
|
20
|
-
|
|
21
|
-
def updateParams(self, l_rate):
|
|
22
|
-
self.weights.vals = self.weights.vals - l_rate * self.weights.grad
|
|
23
|
-
self.bias.vals = self.bias.vals - l_rate * self.bias.grad
|
|
24
|
-
self.weights.grad = self.weights.grad * 0
|
|
25
|
-
self.bias.grad = self.bias.grad * 0
|
|
26
|
-
|
|
23
|
+
|
|
27
24
|
class Dense:
|
|
28
25
|
def __init__(self, layNum, inL, midL, outL, activ="_",f_activ="_",rangeW=(-0.1,0.1),rangeB=(-0.1,0.1)):
|
|
29
26
|
if layNum < 1:
|
|
@@ -46,8 +43,6 @@ class Dense:
|
|
|
46
43
|
|
|
47
44
|
def params(self):
|
|
48
45
|
return self.seq.params()
|
|
49
|
-
def updateParams(self, l_rate):
|
|
50
|
-
self.seq.updateParams(l_rate)
|
|
51
46
|
|
|
52
47
|
class Dropout:
|
|
53
48
|
def __init__(self, size, chance):
|
|
@@ -81,8 +76,15 @@ def mse_loss(y_true, y_pred):
|
|
|
81
76
|
|
|
82
77
|
|
|
83
78
|
class Model:
|
|
84
|
-
def __init__(self, blocks, regu = "", train = True, loss_fn=None, pen_fn = None):
|
|
79
|
+
def __init__(self, blocks, regu = "", train = True, loss_fn=None, pen_fn = None, optimizer=None):
|
|
85
80
|
self.blocks = Sequence(blocks)
|
|
81
|
+
|
|
82
|
+
# Handle optimizer instantiation
|
|
83
|
+
if optimizer is None:
|
|
84
|
+
self.optimizer = Optimizer()
|
|
85
|
+
else:
|
|
86
|
+
self.optimizer = optimizer
|
|
87
|
+
|
|
86
88
|
self.regu = regu
|
|
87
89
|
self.inTrain = train
|
|
88
90
|
self.train_loss = []
|
|
@@ -103,7 +105,7 @@ class Model:
|
|
|
103
105
|
x_ = x if isinstance(x, Values) else Values(x)
|
|
104
106
|
return self.blocks(x_)
|
|
105
107
|
|
|
106
|
-
def train(self, epochs, x_t, y_t, x_v, y_v,
|
|
108
|
+
def train(self, epochs, x_t, y_t, x_v, y_v, l_rate=0.01, val_run=1, _lambda=0.1, batch_size = None):
|
|
107
109
|
x_trn = x_t if isinstance(x_t, Values) else Values(x_t)
|
|
108
110
|
y_trn = y_t if isinstance(y_t, Values) else Values(y_t)
|
|
109
111
|
x_vl = x_v if isinstance(x_v, Values) else Values(x_v)
|
|
@@ -156,7 +158,10 @@ class Model:
|
|
|
156
158
|
penalized_loss = self.pen_fn(current_loss,self,_lambda)
|
|
157
159
|
penalized_loss.grad = np.ones_like(penalized_loss.vals)
|
|
158
160
|
penalized_loss.backward()
|
|
159
|
-
|
|
161
|
+
# Use the optimizer to step and clear gradients
|
|
162
|
+
# Retrieve parameters within the training loop before each optimization step
|
|
163
|
+
all_params = self.blocks.params()
|
|
164
|
+
self.optimizer.step(all_params, l_rate)
|
|
160
165
|
print("\r", end="")
|
|
161
166
|
|
|
162
167
|
for l in self.blocks.arr:
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import autogradient
|
|
3
|
+
|
|
4
|
+
class Optimizer:
|
|
5
|
+
def __init__(self):
|
|
6
|
+
pass
|
|
7
|
+
|
|
8
|
+
def step(self, params, learning_rate):
|
|
9
|
+
for p in params:
|
|
10
|
+
p.vals = p.vals - learning_rate * p.grad
|
|
11
|
+
p.grad = np.zeros_like(p.grad)
|
|
12
|
+
|
|
13
|
+
class Optim_SGD(Optimizer):
|
|
14
|
+
def __init__(self, finitters, fin_l_rate):
|
|
15
|
+
self.t = 0
|
|
16
|
+
self.finitter = finitters
|
|
17
|
+
self.fin_l_rate = fin_l_rate
|
|
18
|
+
|
|
19
|
+
def step(self, params, learning_rate):
|
|
20
|
+
self.t += 1
|
|
21
|
+
t = self.t
|
|
22
|
+
alpha = t/self.finitters
|
|
23
|
+
if(alpha < 1):
|
|
24
|
+
l_rate = learning_rate*(1-alpha) + alpha*self.fin_l_rate
|
|
25
|
+
else:
|
|
26
|
+
l_rate = self.fin_l_rate
|
|
27
|
+
for p in params:
|
|
28
|
+
p.vals = p.vals - l_rate * p.grad
|
|
29
|
+
p.grad = np.zeros_like(p.grad)
|
|
30
|
+
|
|
31
|
+
class Optim_SGD_Momentum(Optimizer):
|
|
32
|
+
def __init__(self,mom_beta=0.9):
|
|
33
|
+
self.v = {}
|
|
34
|
+
self.beta = mom_beta
|
|
35
|
+
|
|
36
|
+
def step(self, params, learning_rate):
|
|
37
|
+
v = self.v
|
|
38
|
+
for p in params:
|
|
39
|
+
if p not in self.v:
|
|
40
|
+
v[p] = np.zeros_like(p.vals)
|
|
41
|
+
v[p] = self.beta*v[p] - learning_rate*p.grad
|
|
42
|
+
p.vals = p.vals + self.v[p]
|
|
43
|
+
p.grad = np.zeros_like(p.grad)
|
|
44
|
+
self.v = v
|
|
45
|
+
|
|
46
|
+
class Optim_AdaGrad(Optimizer):
|
|
47
|
+
def __init__(self, gamma=0.0000001):
|
|
48
|
+
self.gamma = gamma
|
|
49
|
+
self.r = {}
|
|
50
|
+
|
|
51
|
+
def step(self, params, l_rate):
|
|
52
|
+
for p in params:
|
|
53
|
+
if p not in self.r:
|
|
54
|
+
self.r[p] = np.zeros_like(p.vals)
|
|
55
|
+
self.r[p] = self.r[p] + p.grad**2
|
|
56
|
+
p.vals = p.vals - l_rate * p.grad / (self.gamma + self.r[p]**0.5)
|
|
57
|
+
p.grad = np.zeros_like(p.grad)
|
|
58
|
+
|
|
59
|
+
class Optim_RMSPropclass(Optimizer):
|
|
60
|
+
def __init__(self,decay_rate, gamma=0.000001):
|
|
61
|
+
self.decay_rate = decay_rate
|
|
62
|
+
self.gamma = gamma
|
|
63
|
+
self.r = {}
|
|
64
|
+
def step(self, params, l_rate):
|
|
65
|
+
dr = self.decay_rate
|
|
66
|
+
for p in params:
|
|
67
|
+
if p not in self.r:
|
|
68
|
+
self.r[p] = np.zeros_like(p.vals)
|
|
69
|
+
self.r[p] = dr*self.r[p] + (1-dr)*p.grad**2
|
|
70
|
+
p.vals = p.vals - l_rate*p.grad/(self.gamma + self.r[p]**0.5)
|
|
71
|
+
p.grad = np.zeros_like(p.grad)
|
|
72
|
+
|
|
73
|
+
class Optim_Adam(Optimizer):
|
|
74
|
+
def __init__(self, beta1, beta2, gamma = 0.000001):
|
|
75
|
+
self.b1 = beta1
|
|
76
|
+
self.b2 = beta2
|
|
77
|
+
self.gamma = gamma
|
|
78
|
+
self.r = {}
|
|
79
|
+
self.s = {}
|
|
80
|
+
self.t = 0
|
|
81
|
+
|
|
82
|
+
def step(self, params, l_rate):
|
|
83
|
+
self.t += 1
|
|
84
|
+
t = self.t
|
|
85
|
+
beta1 = self.b1
|
|
86
|
+
beta2 = self.b2
|
|
87
|
+
for p in params:
|
|
88
|
+
if p not in self.r:
|
|
89
|
+
self.r[p] = np.zeros_like(p.vals)
|
|
90
|
+
if p not in self.s:
|
|
91
|
+
self.s[p] = np.zeros_like(p.vals)
|
|
92
|
+
|
|
93
|
+
self.s[p] = beta1*self.s[p] + (1-beta1)*p.grad
|
|
94
|
+
self.r[p] = beta2*self.r[p] + (1-beta2)*p.grad**2
|
|
95
|
+
|
|
96
|
+
s_hat = self.s[p]/(1-beta1**t)
|
|
97
|
+
r_hat = self.r[p]/(1-beta2**t)
|
|
98
|
+
|
|
99
|
+
p.vals = p.vals - l_rate*s_hat/(self.gamma + r_hat**0.5)
|
|
100
|
+
p.grad = np.zeros_like(p.grad)
|
{rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0/src/rb_deeplearning_lib.egg-info}/PKG-INFO
RENAMED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: rb-deeplearning-lib
|
|
3
|
-
Version: 0.0
|
|
3
|
+
Version: 0.1.0
|
|
4
4
|
Summary: This is a machine learning--more specifically deep learning--library from my independent study on deep learning. This library is both a result of my learning and a tool for AI development.
|
|
5
5
|
License-Expression: MIT
|
|
6
|
-
Project-URL: Homepage, https://github.com/rylan-berry/DeepLearningIndependentStudy/deeplearning_package
|
|
6
|
+
Project-URL: Homepage, https://github.com/rylan-berry/DeepLearningIndependentStudy/tree/main/deeplearning_package
|
|
7
7
|
Requires-Python: >=3.8
|
|
8
8
|
Description-Content-Type: text/markdown
|
|
9
9
|
License-File: LICENSE
|
{rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib.egg-info/SOURCES.txt
RENAMED
|
@@ -4,6 +4,7 @@ pyproject.toml
|
|
|
4
4
|
src/rb_deeplearning_lib/__init__.py
|
|
5
5
|
src/rb_deeplearning_lib/autogradient.py
|
|
6
6
|
src/rb_deeplearning_lib/neural_net.py
|
|
7
|
+
src/rb_deeplearning_lib/optimizer.py
|
|
7
8
|
src/rb_deeplearning_lib/sequence.py
|
|
8
9
|
src/rb_deeplearning_lib.egg-info/PKG-INFO
|
|
9
10
|
src/rb_deeplearning_lib.egg-info/SOURCES.txt
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{rb_deeplearning_lib-0.0.1 → rb_deeplearning_lib-0.1.0}/src/rb_deeplearning_lib/autogradient.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|