froog 0.4.0__py3-none-any.whl → 0.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- froog/ops.py +74 -0
- froog/optim.py +1 -1
- {froog-0.4.0.dist-info → froog-0.4.2.dist-info}/METADATA +14 -74
- {froog-0.4.0.dist-info → froog-0.4.2.dist-info}/RECORD +7 -7
- {froog-0.4.0.dist-info → froog-0.4.2.dist-info}/WHEEL +1 -1
- {froog-0.4.0.dist-info → froog-0.4.2.dist-info}/LICENSE +0 -0
- {froog-0.4.0.dist-info → froog-0.4.2.dist-info}/top_level.txt +0 -0
froog/ops.py
CHANGED
@@ -9,6 +9,7 @@
|
|
9
9
|
import numpy as np
|
10
10
|
from froog.tensor import Function, register
|
11
11
|
from froog.utils import im2col, col2im
|
12
|
+
from froog.tensor import Tensor
|
12
13
|
|
13
14
|
# *****************************************************
|
14
15
|
# ____ ___ _____ __________ ____ ____ _____
|
@@ -142,6 +143,29 @@ class Sigmoid(Function):
|
|
142
143
|
return grad_input
|
143
144
|
register("sigmoid", Sigmoid)
|
144
145
|
|
146
|
+
# class Dropout(Function):
|
147
|
+
# """
|
148
|
+
# Randomly zeroes some of the elements of the input tensor with probability p during training.
|
149
|
+
# The elements to zero are randomized on every forward call.
|
150
|
+
# During inference, dropout is disabled and the input is scaled by (1-p) to maintain the expected value.
|
151
|
+
# """
|
152
|
+
# @staticmethod
|
153
|
+
# def forward(ctx, input, p=0.5, training=True):
|
154
|
+
# if training:
|
155
|
+
# # Create a binary mask with probability (1-p) of being 1
|
156
|
+
# mask = (np.random.random(input.shape) > p).astype(np.float32)
|
157
|
+
# ctx.save_for_backward(mask)
|
158
|
+
# return input * mask
|
159
|
+
# else:
|
160
|
+
# # during inference, scale the input by (1-p)
|
161
|
+
# return input * (1-p)
|
162
|
+
|
163
|
+
# @staticmethod
|
164
|
+
# def backward(ctx, grad_output):
|
165
|
+
# mask, = ctx.saved_tensors
|
166
|
+
# return grad_output * mask
|
167
|
+
# register("dropout", Dropout)
|
168
|
+
|
145
169
|
class Reshape(Function):
|
146
170
|
@staticmethod
|
147
171
|
def forward(ctx, x, shape):
|
@@ -358,3 +382,53 @@ class AvgPool2D(Function):
|
|
358
382
|
ret[:, :, Y:my:py, X:mx:px] = grad_output / py / px # divide by avg of pool, e.g. for 2x2 pool /= 4
|
359
383
|
return ret
|
360
384
|
register('avg_pool2d', AvgPool2D)
|
385
|
+
|
386
|
+
# *************************************
|
387
|
+
# _ ___ __ ____ ____ _____
|
388
|
+
# / | / / | / / / __ \/ __ \/ ___/
|
389
|
+
# / |/ / |/ / / / / / /_/ /\__ \
|
390
|
+
# / /| / /| / / /_/ / ____/___/ /
|
391
|
+
# /_/ |_/_/ |_/ \____/_/ /____/
|
392
|
+
#
|
393
|
+
# ************* nn ops ************
|
394
|
+
|
395
|
+
def Linear(*x):
|
396
|
+
# random Glorot initialization
|
397
|
+
ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))
|
398
|
+
return ret.astype(np.float32)
|
399
|
+
|
400
|
+
def swish(x):
|
401
|
+
return x.mul(x.sigmoid())
|
402
|
+
|
403
|
+
class BatchNorm2D:
|
404
|
+
"""
|
405
|
+
__call__ follows the formula from the link below
|
406
|
+
pytorch version: https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html
|
407
|
+
|
408
|
+
self.weight = γ
|
409
|
+
self.bias = β
|
410
|
+
self.running_mean = E[x]
|
411
|
+
self.running_var = Var[x]
|
412
|
+
|
413
|
+
the reshaping step ensures that each channel of the input has its
|
414
|
+
own separate set of parameters (mean, variance, weight, and bias)
|
415
|
+
|
416
|
+
self.running_mean has shape [num_channels].
|
417
|
+
self.running_mean.reshape(shape=[1, -1, 1, 1]) reshapes it to [1, num_channels, 1, 1]
|
418
|
+
"""
|
419
|
+
def __init__(self, sz, eps=0.001):
|
420
|
+
self.eps = eps
|
421
|
+
self.weight = Tensor.zeros(sz)
|
422
|
+
self.bias = Tensor.zeros(sz)
|
423
|
+
|
424
|
+
# TODO: need running_mean and running_var
|
425
|
+
self.running_mean = Tensor.zeros(sz)
|
426
|
+
self.running_var = Tensor.zeros(sz)
|
427
|
+
self.num_batches_tracked = Tensor.zeros(1)
|
428
|
+
|
429
|
+
def __call__(self, x):
|
430
|
+
x = x.sub(self.running_mean.reshape(shape=[1, -1, 1, 1]))
|
431
|
+
x = x.mul(self.weight.reshape(shape=[1, -1, 1, 1]))
|
432
|
+
x = x.div(self.running_var.add(Tensor([self.eps], gpu=x.gpu)).reshape(shape=[1, -1, 1, 1]).sqrt())
|
433
|
+
x = x.add(self.bias.reshape(shape=[1, -1, 1, 1]))
|
434
|
+
return x
|
froog/optim.py
CHANGED
@@ -57,7 +57,7 @@ class RMSprop(Optimizer):
|
|
57
57
|
RMSprop divides the learning rate by an exponentially decaying average of squared gradients.
|
58
58
|
|
59
59
|
Notes:
|
60
|
-
The reason RPROP doesn
|
60
|
+
The reason RPROP doesn't work is that it violates the central idea behind stochastic gradient descent,
|
61
61
|
which is when we have small enough learning rate, it averages the gradients over successive mini-batches.
|
62
62
|
"""
|
63
63
|
def __init__(self, params, decay=0.9, lr=0.001, eps=1e-8):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: froog
|
3
|
-
Version: 0.4.
|
3
|
+
Version: 0.4.2
|
4
4
|
Summary: a toy tensor library with opencl support
|
5
5
|
Author: Kevin Buhler
|
6
6
|
License: MIT
|
@@ -26,9 +26,10 @@ Requires-Dist: matplotlib
|
|
26
26
|
<br/>
|
27
27
|
</div>
|
28
28
|
|
29
|
-
```froog``` is an easy-to-read tensor library (<a href="https://www.pepy.tech/projects/froog">25k pip installs!</a>)
|
29
|
+
```froog``` is an easy-to-read tensor library (<a href="https://www.pepy.tech/projects/froog">25k pip installs!</a>) with OpenCL support for GPU acceleration. Inspired by pytorch, tinygrad, and micrograd.
|
30
30
|
|
31
|
-
|
31
|
+
|
32
|
+
<!-- ```froog``` encapsulates everything from <a href="https://github.com/kevbuh/froog/blob/main/models/linear_regression.py">linear regression</a> to <a href="https://github.com/kevbuh/froog/blob/main/models/efficientnet.py">convolutional neural networks </a> in under 2000 lines. -->
|
32
33
|
|
33
34
|
# Installation
|
34
35
|
```bash
|
@@ -191,9 +192,9 @@ So there are two quick examples to get you up and running. You might have notice
|
|
191
192
|
- ```.max_pool2d()```
|
192
193
|
- ```.avg_pool2d()```
|
193
194
|
|
194
|
-
|
195
|
+
# GPU Support
|
195
196
|
|
196
|
-
Have a GPU and need a speedup? You're in good luck because we have GPU support
|
197
|
+
Have a GPU and need a speedup? You're in good luck because we have GPU support via OpenCL for our operations defined in <a href="https://github.com/kevbuh/froog/blob/main/froog/ops_gpu.py">```ops_gpu.py```</a>.
|
197
198
|
|
198
199
|
Here's how you can send data to the GPU during a forward pass and bring it back to the CPU.
|
199
200
|
|
@@ -204,75 +205,19 @@ if GPU:
|
|
204
205
|
out = model.forward(Tensor(img).to_gpu()).cpu()
|
205
206
|
```
|
206
207
|
|
207
|
-
|
208
|
+
# EfficientNet in froog!
|
209
|
+
|
210
|
+
<img src="assets/efficientnet_pug.png" alt="pug" height="300">
|
208
211
|
|
209
212
|
We have a really cool finished implementation of EfficientNet built entirely in ```froog```!
|
210
213
|
|
211
214
|
In order to run EfficientNet inference:
|
212
215
|
|
213
216
|
```bash
|
214
|
-
VIZ=1
|
215
|
-
```
|
216
|
-
|
217
|
-
I would recommend checking out the <a href="https://github.com/kevbuh/froog/blob/main/models/efficientnet.py">code</a>, it's highly documented and pretty cool. Here's some of the documentation
|
218
|
-
```
|
219
|
-
Paper : https://arxiv.org/abs/1905.11946
|
220
|
-
PyTorch version : https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py
|
221
|
-
|
222
|
-
ConvNets are commonly developed at a fixed resource cost, and then scaled up in order to achieve better accuracy when more resources are made available
|
223
|
-
The scaling method was found by performing a grid search to find the relationship between different scaling dimensions of the baseline network under a fixed resource constraint
|
224
|
-
"SE" stands for "Squeeze-and-Excitation." Introduced by the "Squeeze-and-Excitation Networks" paper by Jie Hu, Li Shen, and Gang Sun (CVPR 2018).
|
225
|
-
|
226
|
-
Environment Variables:
|
227
|
-
VIZ=1 --> plots processed image and output probabilities
|
228
|
-
|
229
|
-
How to Run:
|
230
|
-
'VIZ=1 python models/efficientnet.py https://your_image_url'
|
231
|
-
|
232
|
-
EfficientNet Hyper-Parameters and Weights:
|
233
|
-
url_map = {
|
234
|
-
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth',
|
235
|
-
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth',
|
236
|
-
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth',
|
237
|
-
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth',
|
238
|
-
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth',
|
239
|
-
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth',
|
240
|
-
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth',
|
241
|
-
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth',
|
242
|
-
}
|
243
|
-
|
244
|
-
params_dict = {
|
245
|
-
# Coefficients: width,depth,res,dropout
|
246
|
-
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
|
247
|
-
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
|
248
|
-
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
|
249
|
-
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
|
250
|
-
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
|
251
|
-
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
|
252
|
-
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
|
253
|
-
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
|
254
|
-
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
|
255
|
-
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
|
256
|
-
}
|
257
|
-
|
258
|
-
blocks_args = [
|
259
|
-
'r1_k3_s11_e1_i32_o16_se0.25',
|
260
|
-
'r2_k3_s22_e6_i16_o24_se0.25',
|
261
|
-
'r2_k5_s22_e6_i24_o40_se0.25',
|
262
|
-
'r3_k3_s22_e6_i40_o80_se0.25',
|
263
|
-
'r3_k5_s11_e6_i80_o112_se0.25',
|
264
|
-
'r4_k5_s22_e6_i112_o192_se0.25',
|
265
|
-
'r1_k3_s11_e6_i192_o320_se0.25',
|
266
|
-
]
|
217
|
+
VIZ=1 python3 models/efficientnet.py <https://put_your_image_url_here>
|
267
218
|
```
|
268
219
|
|
269
|
-
|
270
|
-
|
271
|
-
Doing linear regression in ```froog``` is pretty easy, check out the entire <a href="https://github.com/kevbuh/froog/blob/main/models/linear_regression.py">code</a>.
|
272
|
-
|
273
|
-
```bash
|
274
|
-
VIZ=1 python3 linear_regression.py
|
275
|
-
```
|
220
|
+
I would recommend checking out the <a href="https://github.com/kevbuh/froog/blob/main/models/efficientnet.py">code</a>, it's highly documented and pretty cool.
|
276
221
|
|
277
222
|
# Contributing
|
278
223
|
<!-- THERES LOT OF STUFF TO WORK ON! VISIT THE <a href="https://github.com/kevbuh/froog/blob/main/docs/bounties.md">BOUNTY SHOP</a> -->
|
@@ -282,12 +227,7 @@ Pull requests will be merged if they:
|
|
282
227
|
* increase functionality
|
283
228
|
* increase efficiency
|
284
229
|
|
285
|
-
More info on <a href="https://github.com/kevbuh/froog/blob/main/docs/contributing.md">contributing</a>.
|
286
|
-
|
287
|
-
# Documentation
|
288
|
-
|
289
|
-
Need more information about how ```froog``` works? Visit the <a href="https://github.com/kevbuh/froog/tree/main/docs">documentation</a>.
|
290
|
-
|
291
|
-
# Interested in more?
|
230
|
+
More info on <a href="https://github.com/kevbuh/froog/blob/main/docs/contributing.md">contributing</a>. Make sure to run ```python -m pytest``` before creating a PR.
|
292
231
|
|
293
|
-
|
232
|
+
<!-- # Documentation
|
233
|
+
Need more information about how ```froog``` works? Visit the <a href="https://github.com/kevbuh/froog/tree/main/docs">documentation</a>. -->
|
@@ -1,13 +1,13 @@
|
|
1
1
|
froog/__init__.py,sha256=Mzxgj9bA2G4kcmbmY8fY0KCKgimPucn3hTVRWBJ-5_Q,57
|
2
2
|
froog/gradcheck.py,sha256=HlA0VDKE-c44o0E73QsUTIVoNs-w_C9FyKFlHfoagIQ,2415
|
3
3
|
froog/nn.py,sha256=_5dzIoxz1L4yEnYfONVc8xIs8vqRpUBBwZwHLvBu9yY,2023
|
4
|
-
froog/ops.py,sha256=
|
4
|
+
froog/ops.py,sha256=1JtzHJf9fMy9ccmVhNIHIbanvoxMYPyZ5WCUliyj8tU,16890
|
5
5
|
froog/ops_gpu.py,sha256=ANDJiWS0e1ehcGCSDo_ZOOowaEPZrz2__FkX5z5uYf4,19367
|
6
|
-
froog/optim.py,sha256=
|
6
|
+
froog/optim.py,sha256=BucVi-j-kphiG4ao7aCMbtxgF6PGcCHITWkgr7Ao0QU,2448
|
7
7
|
froog/tensor.py,sha256=Wix4pE5-OIY8Pvv3bqNCSU_-c_wZV2HrmAtBwMPmAfE,7636
|
8
8
|
froog/utils.py,sha256=vs9bmBOyfy0_NR8jPl2DMWBCAqIacJ6a75Lbso2MAKs,3347
|
9
|
-
froog-0.4.
|
10
|
-
froog-0.4.
|
11
|
-
froog-0.4.
|
12
|
-
froog-0.4.
|
13
|
-
froog-0.4.
|
9
|
+
froog-0.4.2.dist-info/LICENSE,sha256=k_856uNmcNUoLC_HkI18c1WomqvQ1Ioqk6gwYfWQiaM,31
|
10
|
+
froog-0.4.2.dist-info/METADATA,sha256=Z0U4MY_eWhxH2VXnR876fySyTJRRTjp4wKHWSwSVoRY,10442
|
11
|
+
froog-0.4.2.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
12
|
+
froog-0.4.2.dist-info/top_level.txt,sha256=XPz35C_JWu20LlsVxIMdMZn8DD58Ak78LwgWFBGYZwY,6
|
13
|
+
froog-0.4.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|