learngrad 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- learngrad-0.1.0/LICENSE +21 -0
- learngrad-0.1.0/PKG-INFO +81 -0
- learngrad-0.1.0/README.md +43 -0
- learngrad-0.1.0/learngrad/__init__.py +1 -0
- learngrad-0.1.0/learngrad/engine.py +109 -0
- learngrad-0.1.0/learngrad/nn.py +69 -0
- learngrad-0.1.0/learngrad/optimizers.py +72 -0
- learngrad-0.1.0/learngrad.egg-info/PKG-INFO +81 -0
- learngrad-0.1.0/learngrad.egg-info/SOURCES.txt +12 -0
- learngrad-0.1.0/learngrad.egg-info/dependency_links.txt +1 -0
- learngrad-0.1.0/learngrad.egg-info/requires.txt +1 -0
- learngrad-0.1.0/learngrad.egg-info/top_level.txt +1 -0
- learngrad-0.1.0/pyproject.toml +26 -0
- learngrad-0.1.0/setup.cfg +4 -0
learngrad-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Dev Parikh
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
learngrad-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: learngrad
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A minimal scalar-valued autograd engine with neural network primitives, built for learning purposes.
|
|
5
|
+
Author-email: Dev Parikh <dev.dpparikh@gmail.com>
|
|
6
|
+
License: MIT License
|
|
7
|
+
|
|
8
|
+
Copyright (c) 2026 Dev Parikh
|
|
9
|
+
|
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
+
in the Software without restriction, including without limitation the rights
|
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
+
furnished to do so, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
|
18
|
+
copies or substantial portions of the Software.
|
|
19
|
+
|
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
SOFTWARE.
|
|
27
|
+
|
|
28
|
+
Project-URL: Homepage, https://github.com/devparikh0506/learngrad
|
|
29
|
+
Keywords: autograd,neural-network,deep-learning,education,optimizers
|
|
30
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
31
|
+
Classifier: Programming Language :: Python :: 3
|
|
32
|
+
Classifier: Operating System :: OS Independent
|
|
33
|
+
Requires-Python: >=3.10
|
|
34
|
+
Description-Content-Type: text/markdown
|
|
35
|
+
License-File: LICENSE
|
|
36
|
+
Requires-Dist: numpy>=1.24
|
|
37
|
+
Dynamic: license-file
|
|
38
|
+
|
|
39
|
+
# learngrad
|
|
40
|
+
|
|
41
|
+
A minimal scalar-valued autograd engine with neural network primitives, built for learning purposes. Inspired by [micrograd](https://github.com/karpathy/micrograd).
|
|
42
|
+
|
|
43
|
+
## Install
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
pip install learngrad
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## What's inside
|
|
50
|
+
|
|
51
|
+
- **`Value`** — scalar with automatic differentiation via backprop
|
|
52
|
+
- **`MLP`** — multi-layer perceptron built on top of `Value`
|
|
53
|
+
- **Optimizers** — `SGD`, `SGDMomentum`, `RMSProp`, `Adam`
|
|
54
|
+
|
|
55
|
+
## Quick example
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
from learngrad.engine import Value
|
|
59
|
+
from learngrad.nn import MLP
|
|
60
|
+
from learngrad.optimizers import Adam
|
|
61
|
+
|
|
62
|
+
# Autograd
|
|
63
|
+
x = Value(2.0)
|
|
64
|
+
y = x ** 2 + x * 3
|
|
65
|
+
y.backward()
|
|
66
|
+
print(x.grad) # dy/dx = 2x + 3 = 7.0
|
|
67
|
+
|
|
68
|
+
# Neural net
|
|
69
|
+
model = MLP(2, [4, 4, 1])
|
|
70
|
+
opt = Adam(model.parameters(), lr=1e-3)
|
|
71
|
+
|
|
72
|
+
x = [Value(1.0), Value(0.5)]
|
|
73
|
+
out = model(x)
|
|
74
|
+
out.backward()
|
|
75
|
+
opt.step()
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## Requirements
|
|
79
|
+
|
|
80
|
+
- Python >= 3.10
|
|
81
|
+
- numpy
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# learngrad
|
|
2
|
+
|
|
3
|
+
A minimal scalar-valued autograd engine with neural network primitives, built for learning purposes. Inspired by [micrograd](https://github.com/karpathy/micrograd).
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install learngrad
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## What's inside
|
|
12
|
+
|
|
13
|
+
- **`Value`** — scalar with automatic differentiation via backprop
|
|
14
|
+
- **`MLP`** — multi-layer perceptron built on top of `Value`
|
|
15
|
+
- **Optimizers** — `SGD`, `SGDMomentum`, `RMSProp`, `Adam`
|
|
16
|
+
|
|
17
|
+
## Quick example
|
|
18
|
+
|
|
19
|
+
```python
|
|
20
|
+
from learngrad.engine import Value
|
|
21
|
+
from learngrad.nn import MLP
|
|
22
|
+
from learngrad.optimizers import Adam
|
|
23
|
+
|
|
24
|
+
# Autograd
|
|
25
|
+
x = Value(2.0)
|
|
26
|
+
y = x ** 2 + x * 3
|
|
27
|
+
y.backward()
|
|
28
|
+
print(x.grad) # dy/dx = 2x + 3 = 7.0
|
|
29
|
+
|
|
30
|
+
# Neural net
|
|
31
|
+
model = MLP(2, [4, 4, 1])
|
|
32
|
+
opt = Adam(model.parameters(), lr=1e-3)
|
|
33
|
+
|
|
34
|
+
x = [Value(1.0), Value(0.5)]
|
|
35
|
+
out = model(x)
|
|
36
|
+
out.backward()
|
|
37
|
+
opt.step()
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Requirements
|
|
41
|
+
|
|
42
|
+
- Python >= 3.10
|
|
43
|
+
- numpy
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
class Value:
|
|
3
|
+
"""
|
|
4
|
+
stores a singal scalar value and its gradient
|
|
5
|
+
"""
|
|
6
|
+
def __init__(self, data, children=(), op=''):
|
|
7
|
+
|
|
8
|
+
# payload
|
|
9
|
+
self.data = data
|
|
10
|
+
|
|
11
|
+
# children and op used to obtain this value
|
|
12
|
+
self._prev = set(children)
|
|
13
|
+
self._op = op
|
|
14
|
+
|
|
15
|
+
#inital grad for a value = 0
|
|
16
|
+
self.grad = 0
|
|
17
|
+
self._backward = lambda : None
|
|
18
|
+
|
|
19
|
+
def __add__(self, other):
|
|
20
|
+
other = other if isinstance(other, Value) else Value(other)
|
|
21
|
+
out = Value(self.data + other.data, (self, other), '+')
|
|
22
|
+
def backward():
|
|
23
|
+
self.grad += out.grad
|
|
24
|
+
other.grad += out.grad
|
|
25
|
+
out._backward = backward
|
|
26
|
+
return out
|
|
27
|
+
|
|
28
|
+
def __mul__(self, other):
|
|
29
|
+
other = other if isinstance(other, Value) else Value(other)
|
|
30
|
+
out = Value(self.data * other.data, (self, other), '*')
|
|
31
|
+
def backward():
|
|
32
|
+
self.grad += other.data * out.grad
|
|
33
|
+
other.grad += self.data * out.grad
|
|
34
|
+
|
|
35
|
+
out._backward = backward
|
|
36
|
+
return out
|
|
37
|
+
|
|
38
|
+
def __neg__(self):
|
|
39
|
+
return self * -1
|
|
40
|
+
|
|
41
|
+
def __sub__ (self, other):
|
|
42
|
+
return self + (-other)
|
|
43
|
+
|
|
44
|
+
def __pow__(self, other):
|
|
45
|
+
assert isinstance(other, (int, float)), "only supporting int/float powers for now"
|
|
46
|
+
out = Value(self.data**other, (self,), f'**{other}')
|
|
47
|
+
|
|
48
|
+
def backward():
|
|
49
|
+
self.grad += out.grad * (other * self.data ** (other -1))
|
|
50
|
+
|
|
51
|
+
out._backward = backward
|
|
52
|
+
return out
|
|
53
|
+
|
|
54
|
+
def __truediv__(self, other):
|
|
55
|
+
return self * other**-1
|
|
56
|
+
|
|
57
|
+
def __rmul__(self, other):
|
|
58
|
+
return self * other
|
|
59
|
+
|
|
60
|
+
def __radd__(self, other):
|
|
61
|
+
return self + other
|
|
62
|
+
|
|
63
|
+
def __rsub__(self, other):
|
|
64
|
+
return other + (-self)
|
|
65
|
+
|
|
66
|
+
def __repr__(self):
|
|
67
|
+
return f"Value(data={self.data}, grad={self.grad})"
|
|
68
|
+
|
|
69
|
+
def __rtruediv__(self, other):
|
|
70
|
+
return other * self**-1
|
|
71
|
+
|
|
72
|
+
def exp(self):
|
|
73
|
+
out = Value(np.exp(self.data), (self,), 'exp')
|
|
74
|
+
|
|
75
|
+
def backward():
|
|
76
|
+
self.grad += out.data * out.grad
|
|
77
|
+
|
|
78
|
+
out._backward = backward
|
|
79
|
+
return out
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def relu(self):
|
|
83
|
+
out = Value(0 if self.data < 0 else self.data, (self,), 'ReLU')
|
|
84
|
+
|
|
85
|
+
def _backward():
|
|
86
|
+
self.grad += (out.data > 0) * out.grad
|
|
87
|
+
out._backward = _backward
|
|
88
|
+
|
|
89
|
+
return out
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def backward(self):
|
|
93
|
+
topo = []
|
|
94
|
+
visited = set()
|
|
95
|
+
def build_topo(v):
|
|
96
|
+
if v not in visited:
|
|
97
|
+
visited.add(v)
|
|
98
|
+
for child in v._prev:
|
|
99
|
+
build_topo(child)
|
|
100
|
+
topo.append(v)
|
|
101
|
+
build_topo(self)
|
|
102
|
+
|
|
103
|
+
self.grad = 1
|
|
104
|
+
for val in reversed(topo):
|
|
105
|
+
val._backward()
|
|
106
|
+
|
|
107
|
+
def __repr__(self):
|
|
108
|
+
return f"Value(data={self.data}, grad={self.grad})"
|
|
109
|
+
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import random
|
|
3
|
+
from itertools import groupby
|
|
4
|
+
from .engine import Value
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Module():
|
|
8
|
+
def parameters(self): return []
|
|
9
|
+
|
|
10
|
+
def zero_grad(self):
|
|
11
|
+
for p in self.parameters():
|
|
12
|
+
p.grad = 0
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Neuron(Module):
|
|
16
|
+
|
|
17
|
+
def __init__(self, nin, nonlin=True) -> None:
|
|
18
|
+
super().__init__()
|
|
19
|
+
self.w = [Value(random.uniform(-1, 1)) for _ in range(nin)]
|
|
20
|
+
self.b = Value(random.uniform(-1, 1))
|
|
21
|
+
self.nonlin = nonlin
|
|
22
|
+
|
|
23
|
+
def __call__(self, x):
|
|
24
|
+
act = (sum((wi*xi for wi, xi in zip(self.w, x)), self.b))
|
|
25
|
+
return act.relu() if self.nonlin else act
|
|
26
|
+
|
|
27
|
+
def parameters(self):
|
|
28
|
+
return self.w + [self.b]
|
|
29
|
+
|
|
30
|
+
def __repr__(self):
|
|
31
|
+
return f"{'ReLU' if self.nonlin else 'Linear'}Neuron({len(self.w)})"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class Layer(Module):
|
|
35
|
+
def __init__(self, nin, nout, **kwargs) -> None:
|
|
36
|
+
super().__init__()
|
|
37
|
+
self.neurons = [Neuron(nin, **kwargs) for _ in range(nout)]
|
|
38
|
+
|
|
39
|
+
def __call__(self, x):
|
|
40
|
+
out = [n(x) for n in self.neurons]
|
|
41
|
+
return out[0] if len(out) == 1 else out
|
|
42
|
+
|
|
43
|
+
def parameters(self):
|
|
44
|
+
return [p for n in self.neurons for p in n.parameters()]
|
|
45
|
+
|
|
46
|
+
def __repr__(self) -> str:
|
|
47
|
+
result = []
|
|
48
|
+
for key, group in groupby(self.neurons, key=str):
|
|
49
|
+
count = sum(1 for _ in group)
|
|
50
|
+
result.append(f"{count} x {key}" if count > 1 else key)
|
|
51
|
+
return f"Layer({", ".join(result)})"
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class MLP(Module):
|
|
55
|
+
def __init__(self, nin, nouts) -> None:
|
|
56
|
+
sz = [nin] + nouts
|
|
57
|
+
self.layers = [Layer(nin=sz[i], nout=sz[i+1], nonlin=i !=
|
|
58
|
+
len(nouts)-1) for i in range(len(nouts))]
|
|
59
|
+
|
|
60
|
+
def __call__(self, x):
|
|
61
|
+
for layer in self.layers:
|
|
62
|
+
x = layer(x)
|
|
63
|
+
return x
|
|
64
|
+
|
|
65
|
+
def parameters(self):
|
|
66
|
+
return [p for layer in self.layers for p in layer.parameters()]
|
|
67
|
+
|
|
68
|
+
def __repr__(self):
|
|
69
|
+
return f"MLP(\n{',\n'.join(str(layer) for layer in self.layers)}\n)"
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
class SGD:
|
|
2
|
+
def __init__(self, parameters, lr=0.01):
|
|
3
|
+
self.lr = lr
|
|
4
|
+
self.parameters = parameters
|
|
5
|
+
|
|
6
|
+
def step(self):
|
|
7
|
+
for p in self.parameters:
|
|
8
|
+
p.data -= self.lr * p.grad
|
|
9
|
+
|
|
10
|
+
def zero_grad(self):
|
|
11
|
+
for p in self.parameters:
|
|
12
|
+
p.grad = 0
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class SGDMomentum:
|
|
16
|
+
def __init__(self, parameters, lr=0.01, beta=0.9):
|
|
17
|
+
self.lr = lr
|
|
18
|
+
self.parameters = parameters
|
|
19
|
+
self.beta = beta
|
|
20
|
+
self.v = [0] * len(parameters)
|
|
21
|
+
|
|
22
|
+
def step(self):
|
|
23
|
+
for i, p in enumerate(self.parameters):
|
|
24
|
+
self.v[i] = self.v[i] * self.beta + p.grad
|
|
25
|
+
p.data -= self.lr * self.v[i]
|
|
26
|
+
|
|
27
|
+
def zero_grad(self):
|
|
28
|
+
for p in self.parameters:
|
|
29
|
+
p.grad = 0
|
|
30
|
+
|
|
31
|
+
class RMSProp:
|
|
32
|
+
def __init__(self, parameters, lr=0.01, beta=0.9, eps = 1e-9) -> None:
|
|
33
|
+
self.parameters = parameters
|
|
34
|
+
self.lr = lr
|
|
35
|
+
self.beta = beta
|
|
36
|
+
self.eps = eps
|
|
37
|
+
self.s = [0] * len(parameters)
|
|
38
|
+
|
|
39
|
+
def step(self):
|
|
40
|
+
for i, p in enumerate(self.parameters):
|
|
41
|
+
self.s[i] = self.beta * self.s[i] + (1 - self.beta) * p.grad**2
|
|
42
|
+
p.data -= self.lr * (p.grad / (self.s[i] ** 0.5 + self.eps))
|
|
43
|
+
def zero_grad(self):
|
|
44
|
+
for p in self.parameters:
|
|
45
|
+
p.grad = 0
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class Adam:
|
|
49
|
+
def __init__(self, parameters, lr=1e-3, beta1=0.9, beta2=0.999, eps=1e-8):
|
|
50
|
+
self.parameters = parameters
|
|
51
|
+
self.lr = lr
|
|
52
|
+
self.beta1 = beta1
|
|
53
|
+
self.beta2 = beta2
|
|
54
|
+
self.eps = eps
|
|
55
|
+
self.m = [0.0 for _ in parameters]
|
|
56
|
+
self.v = [0.0 for _ in parameters]
|
|
57
|
+
self.t = 0
|
|
58
|
+
|
|
59
|
+
def zero_grad(self):
|
|
60
|
+
for p in self.parameters:
|
|
61
|
+
p.grad = 0
|
|
62
|
+
|
|
63
|
+
def step(self):
|
|
64
|
+
self.t += 1
|
|
65
|
+
for i, p in enumerate(self.parameters):
|
|
66
|
+
self.m[i] = self.beta1 * self.m[i] + (1 - self.beta1) * p.grad
|
|
67
|
+
self.v[i] = self.beta2 * self.v[i] + (1 - self.beta2) * p.grad**2
|
|
68
|
+
|
|
69
|
+
m_hat = self.m[i] / (1 - self.beta1**self.t)
|
|
70
|
+
v_hat = self.v[i] / (1 - self.beta2**self.t)
|
|
71
|
+
|
|
72
|
+
p.data -= self.lr * m_hat / (v_hat**0.5 + self.eps)
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: learngrad
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A minimal scalar-valued autograd engine with neural network primitives, built for learning purposes.
|
|
5
|
+
Author-email: Dev Parikh <dev.dpparikh@gmail.com>
|
|
6
|
+
License: MIT License
|
|
7
|
+
|
|
8
|
+
Copyright (c) 2026 Dev Parikh
|
|
9
|
+
|
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
+
in the Software without restriction, including without limitation the rights
|
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
+
furnished to do so, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
|
18
|
+
copies or substantial portions of the Software.
|
|
19
|
+
|
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
SOFTWARE.
|
|
27
|
+
|
|
28
|
+
Project-URL: Homepage, https://github.com/devparikh0506/learngrad
|
|
29
|
+
Keywords: autograd,neural-network,deep-learning,education,optimizers
|
|
30
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
31
|
+
Classifier: Programming Language :: Python :: 3
|
|
32
|
+
Classifier: Operating System :: OS Independent
|
|
33
|
+
Requires-Python: >=3.10
|
|
34
|
+
Description-Content-Type: text/markdown
|
|
35
|
+
License-File: LICENSE
|
|
36
|
+
Requires-Dist: numpy>=1.24
|
|
37
|
+
Dynamic: license-file
|
|
38
|
+
|
|
39
|
+
# learngrad
|
|
40
|
+
|
|
41
|
+
A minimal scalar-valued autograd engine with neural network primitives, built for learning purposes. Inspired by [micrograd](https://github.com/karpathy/micrograd).
|
|
42
|
+
|
|
43
|
+
## Install
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
pip install learngrad
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## What's inside
|
|
50
|
+
|
|
51
|
+
- **`Value`** — scalar with automatic differentiation via backprop
|
|
52
|
+
- **`MLP`** — multi-layer perceptron built on top of `Value`
|
|
53
|
+
- **Optimizers** — `SGD`, `SGDMomentum`, `RMSProp`, `Adam`
|
|
54
|
+
|
|
55
|
+
## Quick example
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
from learngrad.engine import Value
|
|
59
|
+
from learngrad.nn import MLP
|
|
60
|
+
from learngrad.optimizers import Adam
|
|
61
|
+
|
|
62
|
+
# Autograd
|
|
63
|
+
x = Value(2.0)
|
|
64
|
+
y = x ** 2 + x * 3
|
|
65
|
+
y.backward()
|
|
66
|
+
print(x.grad) # dy/dx = 2x + 3 = 7.0
|
|
67
|
+
|
|
68
|
+
# Neural net
|
|
69
|
+
model = MLP(2, [4, 4, 1])
|
|
70
|
+
opt = Adam(model.parameters(), lr=1e-3)
|
|
71
|
+
|
|
72
|
+
x = [Value(1.0), Value(0.5)]
|
|
73
|
+
out = model(x)
|
|
74
|
+
out.backward()
|
|
75
|
+
opt.step()
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## Requirements
|
|
79
|
+
|
|
80
|
+
- Python >= 3.10
|
|
81
|
+
- numpy
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
pyproject.toml
|
|
4
|
+
learngrad/__init__.py
|
|
5
|
+
learngrad/engine.py
|
|
6
|
+
learngrad/nn.py
|
|
7
|
+
learngrad/optimizers.py
|
|
8
|
+
learngrad.egg-info/PKG-INFO
|
|
9
|
+
learngrad.egg-info/SOURCES.txt
|
|
10
|
+
learngrad.egg-info/dependency_links.txt
|
|
11
|
+
learngrad.egg-info/requires.txt
|
|
12
|
+
learngrad.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
numpy>=1.24
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
learngrad
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "learngrad"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "A minimal scalar-valued autograd engine with neural network primitives, built for learning purposes."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
license = { file = "LICENSE" }
|
|
12
|
+
authors = [{ name = "Dev Parikh", email = "dev.dpparikh@gmail.com" }]
|
|
13
|
+
dependencies = ["numpy>=1.24"]
|
|
14
|
+
keywords = ["autograd", "neural-network", "deep-learning", "education", "optimizers"]
|
|
15
|
+
|
|
16
|
+
classifiers = [
|
|
17
|
+
"License :: OSI Approved :: MIT License",
|
|
18
|
+
"Programming Language :: Python :: 3",
|
|
19
|
+
"Operating System :: OS Independent",
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
[project.urls]
|
|
23
|
+
Homepage = "https://github.com/devparikh0506/learngrad"
|
|
24
|
+
|
|
25
|
+
[tool.setuptools.packages.find]
|
|
26
|
+
include = ["learngrad*"]
|