froog 0.4.2__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
froog/utils.py CHANGED
@@ -9,8 +9,9 @@
9
9
  import numpy as np
10
10
  from functools import lru_cache
11
11
  import pathlib, hashlib, os, tempfile, urllib
12
+ from typing import Tuple
12
13
 
13
- def fetch(url):
14
+ def fetch(url: str) -> pathlib.Path:
14
15
  if url.startswith(("/", ".")): return pathlib.Path(url)
15
16
  else: fp = pathlib.Path("_cache_dir") / "froog" / "downloads" / (hashlib.md5(url.encode('utf-8')).hexdigest())
16
17
  if not fp.is_file():
@@ -25,7 +26,7 @@ def fetch(url):
25
26
  pathlib.Path(f.name).rename(fp)
26
27
  return fp
27
28
 
28
- def fetch_mnist():
29
+ def fetch_mnist() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
29
30
  import gzip
30
31
  parse = lambda file: np.frombuffer(gzip.open(file).read(), dtype=np.uint8).copy()
31
32
  BASE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/"
@@ -35,13 +36,13 @@ def fetch_mnist():
35
36
  Y_test = parse(fetch(f"{BASE_URL}t10k-labels-idx1-ubyte.gz"))[8:].astype(np.int8)
36
37
  return X_train, Y_train, X_test, Y_test
37
38
 
38
- def mask_like(like, mask_inx, mask_value=1.0):
39
+ def mask_like(like: np.ndarray, mask_inx: np.ndarray, mask_value: float = 1.0) -> np.ndarray:
39
40
  mask = np.zeros_like(like).reshape(-1) # flatten
40
41
  mask[mask_inx] = mask_value # fill
41
42
  return mask.reshape(like.shape)
42
43
 
43
44
  @lru_cache
44
- def get_im2col_index(oy, ox, cin, H, W):
45
+ def get_im2col_index(oy: int, ox: int, cin: int, H: int, W: int) -> np.ndarray:
45
46
  idx_channel = np.tile(np.arange(cin).repeat(H*W), oy*ox)
46
47
  idx_y = np.tile(np.arange(H).repeat(W), oy*ox*cin) + np.arange(oy).repeat(ox*cin*H*W)
47
48
  idx_x = np.tile(np.arange(W), oy*ox*cin*H) + np.tile(np.arange(ox), oy).repeat(cin*H*W)
@@ -50,7 +51,7 @@ def get_im2col_index(oy, ox, cin, H, W):
50
51
  return idx
51
52
 
52
53
  @lru_cache
53
- def rearrange_col2im_index(oy, ox, cin, H, W):
54
+ def rearrange_col2im_index(oy: int, ox: int, cin: int, H: int, W: int) -> np.ndarray:
54
55
  idx = get_im2col_index(oy, ox, cin, H, W)
55
56
  r_idx = np.zeros((np.max(idx)+1, H*W), dtype=idx.dtype)-1
56
57
  for i,x in enumerate(idx):
@@ -61,7 +62,7 @@ def rearrange_col2im_index(oy, ox, cin, H, W):
61
62
  return r_idx
62
63
 
63
64
  # im2col convolution helpers
64
- def im2col(x, H, W):
65
+ def im2col(x: np.ndarray, H: int, W: int) -> np.ndarray:
65
66
  bs, cin, oy, ox = x.shape[0], x.shape[1], x.shape[2]-(H-1), x.shape[3]-(W-1)
66
67
  idx = get_im2col_index(oy, ox, cin, H, W)
67
68
  tx = x.reshape(bs, -1)[:, idx]
@@ -71,7 +72,7 @@ def im2col(x, H, W):
71
72
  tx = tx.ravel()
72
73
  return tx.reshape(-1, cin*W*H)
73
74
 
74
- def col2im(tx, H, W, OY, OX):
75
+ def col2im(tx: np.ndarray, H: int, W: int, OY: int, OX: int) -> np.ndarray:
75
76
  oy, ox = OY-(H-1), OX-(W-1)
76
77
  bs = tx.shape[0] // (oy * ox)
77
78
  channels_in = tx.shape[1] // (H * W)
@@ -0,0 +1,205 @@
1
+ Metadata-Version: 2.1
2
+ Name: froog
3
+ Version: 0.5.0
4
+ Summary: tensor library with opencl and metal support
5
+ Author: Kevin Buhler
6
+ License: MIT
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Requires-Python: >=3.8
10
+ Description-Content-Type: text/markdown
11
+ Requires-Dist: numpy
12
+ Requires-Dist: requests
13
+ Requires-Dist: matplotlib
14
+
15
+ # froog <img src="https://github.com/kevbuh/froog/actions/workflows/test.yml/badge.svg" alt="unit test badge" > <img src="https://static.pepy.tech/badge/froog" alt="num downloads badge">
16
+ <div align="center" >
17
+ <img src="https://raw.githubusercontent.com/kevbuh/froog/main/assets/froog.png" alt="froog the frog" height="200">
18
+ <br/>
19
+ froog: fast real-time optimization of gradients
20
+ <br/>
21
+ a beautifully compact tensor library
22
+ <br/>
23
+ <a href="https://github.com/kevbuh/froog">homepage</a> | <a href="https://github.com/kevbuh/froog/tree/main/DOCS.md">documentation</a> | <a href="https://pypi.org/project/froog/">pip</a>
24
+ <br/>
25
+ <br/>
26
+ </div>
27
+
28
+ ```froog``` is an easy-to-read tensor library (<a href="https://www.pepy.tech/projects/froog">25k pip installs!</a>) with OpenCL support for GPU acceleration. Inspired by pytorch, tinygrad, and micrograd.
29
+
30
+ # Installation
31
+ ```bash
32
+ pip install froog
33
+ ```
34
+
35
+ # Features
36
+ - <a href="https://github.com/kevbuh/froog/blob/main/froog/tensor.py">Custom Tensors</a>
37
+ - Backpropagation
38
+ - Automatic Differentiation (autograd)
39
+ - Forward and backward passes
40
+ - <a href="https://github.com/kevbuh/froog/blob/main/froog/ops.py">ML Operations</a>
41
+ - 2D Convolutions (im2col)
42
+ - Numerical gradient checking
43
+ - Acceleration methods (Adam)
44
+ - Avg & Max pooling
45
+ - <a href="https://github.com/kevbuh/froog/blob/main/models/efficientnet.py">EfficientNet</a> inference
46
+ - <a href="https://github.com/kevbuh/froog/blob/main/froog/gpu/ops_gpu.py">GPU Support</a>
47
+ - <a href="https://github.com/kevbuh/froog/blob/main/docs/env.md">Configuration via Environment Variables</a>
48
+ - and a bunch <a href="https://github.com/kevbuh/froog/tree/main/froog">more</a>
49
+
50
+ # Example
51
+
52
+ Here's how you set up a simple multilayer perceptron for classification on MNIST. Looks pretty similar to pytorch, right?
53
+
54
+ ```python
55
+ from froog.tensor import Tensor
56
+ from froog.nn import Linear
57
+ import froog.optim as optim
58
+
59
+ class mnistMLP:
60
+ def __init__(self):
61
+ self.l1 = Tensor(Linear(784, 128)) # layer 1
62
+ self.l2 = Tensor(Linear(128, 10)) # layer 2
63
+
64
+ def forward(self, x):
65
+ # forward pass through both layers and softmax for output probabilities
66
+ return x.dot(self.l1).relu().dot(self.l2).logsoftmax()
67
+
68
+ model = mnistMLP() # create model
69
+ optim = optim.SGD([model.l1, model.l2], lr=0.001) # stochastic gradient descent optimizer
70
+ ```
71
+
72
+ # GPU Support
73
+
74
+ Device management is handled transparently and will automatically select one of ```[METAL, OPENCL, CPU]```. To use the GPU:
75
+
76
+ ```python
77
+ from froog.tensor import Tensor
78
+ from froog import get_device
79
+ # Check if GPU is available
80
+ has_gpu = get_device() is not None and get_device().name != "CPU"
81
+ # Create a tensor
82
+ x = Tensor([1, 2, 3])
83
+ # Push to GPU if available
84
+ if has_gpu: x = x.to_gpu()
85
+ # Operations run on GPU automatically
86
+ y = x + x
87
+ z = y * y
88
+ # Bring back to CPU when needed
89
+ result = z.to_cpu()
90
+ print(result.data)
91
+ ```
92
+
93
+ You can also check what devices are available:
94
+
95
+ ```python
96
+ from froog import get_available_devices
97
+ available_devices = get_available_devices()
98
+ print(f"Available devices: {available_devices}")
99
+ ```
100
+
101
+ Or set a specific device:
102
+
103
+ ```python
104
+ from froog import set_device
105
+ set_device("METAL") # or "OPENCL"
106
+ ```
107
+
108
+ # EfficientNet in froog!
109
+
110
+ <img src="assets/efficientnet_pug.png" alt="pug" height="300">
111
+
112
+ We have an implementation of [EfficientNet v2](https://arxiv.org/abs/2104.00298) built entirely in ```froog``` using the official PyTorch weights! Run inference with:
113
+
114
+ ```bash
115
+ python3 models/efficientnet.py <https://optional_image_url>
116
+
117
+ ***********output*************
118
+ inference 4.34 s
119
+
120
+ imagenet class: 254
121
+ prediction : pug, pug-dog
122
+ probability : 0.8902361
123
+ ******************************
124
+ ```
125
+
126
+ I would recommend checking out the <a href="https://github.com/kevbuh/froog/blob/main/models/efficientnet.py">code</a>, it's highly documented and pretty cool.
127
+
128
+ # Contributing
129
+ <!-- THERES LOT OF STUFF TO WORK ON! VISIT THE <a href="https://github.com/kevbuh/froog/blob/main/docs/bounties.md">BOUNTY SHOP</a> -->
130
+
131
+ Pull requests will be merged if they:
132
+ * increase simplicity
133
+ * increase functionality
134
+ * increase efficiency
135
+
136
+ More info on <a href="https://github.com/kevbuh/froog/blob/main/docs/contributing.md">contributing</a>. Make sure to run ```python -m pytest``` before creating a PR.
137
+
138
+ # API
139
+
140
+ ## Basic Math Operations
141
+ - ```.add(y)``` - Addition with y
142
+ - ```.sub(y)``` - Subtraction with y
143
+ - ```.mul(y)``` - Multiplication with y
144
+ - ```.div(y)``` - Division by y
145
+ - ```.pow(y)``` - Power function (raise to power y)
146
+ - ```.sum()``` - Sum all elements
147
+ - ```.mean()``` - Mean of all elements
148
+ - ```.sqrt()``` - Square root
149
+
150
+ ## Linear Algebra Operations
151
+ - ```.dot(y)``` - Matrix multiplication with y
152
+ - ```.matmul(y)``` - Alias for dot
153
+
154
+ ## Neural Network Operations
155
+ - ```.relu()``` - Rectified Linear Unit activation
156
+ - ```.sigmoid()``` - Sigmoid activation
157
+ - ```.dropout(p=0.5, training=True)``` - Dropout regularization
158
+ - ```.logsoftmax()``` - Log softmax function
159
+ - ```.swish()``` - Swish activation function (x * sigmoid(x))
160
+ - ```.conv2d(w, stride=1, groups=1)``` - 2D convolution
161
+ - ```.im2col2dconv(w)``` - Image to column for convolution
162
+
163
+ ## Pooling Operations
164
+ - ```.max_pool2d(kernel_size=(2,2))``` - 2D max pooling
165
+ - ```.avg_pool2d(kernel_size=(2,2))``` - 2D average pooling
166
+
167
+ ## Tensor Manipulation
168
+ - ```.reshape(*shape)``` - Change tensor shape
169
+ - ```.view(*shape)``` - Alternative to reshape
170
+ - ```.pad2d(padding=None)``` - Pad 2D tensors
171
+ - ```.flatten()``` - Returns a flattened 1D copy of the tensor
172
+ - ```.unsqueeze(dim)``` - Add dimension of size 1 at specified position
173
+ - ```.squeeze(dim=None)``` - Remove dimensions of size 1
174
+ - ```.detach()``` - Returns a tensor detached from computation graph
175
+ - ```.assign(x)``` - Assign values from tensor x to this tensor
176
+
177
+ ## Tensor Properties
178
+ - ```.shape``` - The shape of the tensor as a tuple
179
+ - ```.size``` - Total number of elements in the tensor
180
+ - ```.ndim``` - Number of dimensions (rank) of the tensor
181
+ - ```.transpose``` - Transpose of the tensor
182
+ - ```.dtype``` - Data type of the tensor
183
+ - ```.is_gpu``` - Whether tensor is on GPU
184
+ - ```.grad``` - Gradient of tensor with respect to some scalar value
185
+ - ```.data``` - Underlying NumPy array (or GPU buffer)
186
+
187
+ ## Device Management
188
+ - ```.to_cpu()``` - Moves tensor to CPU
189
+ - ```.to_gpu()``` - Moves tensor to GPU
190
+ - ```.gpu_()``` - In-place GPU conversion (modifies tensor)
191
+
192
+ ## Data Type Conversion
193
+ - ```.to_float()``` - Converts tensor to float32 data type
194
+ - ```.to_int()``` - Converts tensor to int32 data type
195
+ - ```.to_bool()``` - Converts tensor to boolean data type
196
+
197
+ ## Autograd Operations
198
+ - ```.backward(allow_fill=True)``` - Performs backpropagation
199
+
200
+ ## Tensor Creation Methods
201
+ - ```Tensor.zeros(*shape)``` - Create tensor of zeros
202
+ - ```Tensor.ones(*shape)``` - Create tensor of ones
203
+ - ```Tensor.randn(*shape)``` - Create tensor with random normal values
204
+ - ```Tensor.eye(dim)``` - Create identity matrix
205
+ - ```Tensor.arange(start, stop=None, step=1)``` - Create tensor with evenly spaced values
@@ -0,0 +1,10 @@
1
+ froog/__init__.py,sha256=kPFrHYPINIUmboWdWHWziYU7FRZhWCzrMjHKvusfvh0,1004
2
+ froog/gradient.py,sha256=QeSPpgMSOOSZnVghwYIvTtt1HVVsrc4eDr73uXFkE-M,2626
3
+ froog/ops.py,sha256=6CJWxZi9ksSVC_AWrXRBz_ndXC_b7lC206AbeaNYsdc,26241
4
+ froog/optim.py,sha256=rkibUce0rG5O1O4fLsNuS54fd3fzdjQarAyTqK61-1I,5443
5
+ froog/tensor.py,sha256=jD8rspVMFI__sFMAeKEu_hVhhnfvtgc92oAkNJVKEJg,9688
6
+ froog/utils.py,sha256=SUXMWOka-yJmib1jeC6KgHI_hOtSzgrakiRA0tCk4vc,3657
7
+ froog-0.5.0.dist-info/METADATA,sha256=8mSdbDlPXWl3ZzGTtPgTOGM0rwZ7zNlE-hOs1gt4s2c,7342
8
+ froog-0.5.0.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
9
+ froog-0.5.0.dist-info/top_level.txt,sha256=XPz35C_JWu20LlsVxIMdMZn8DD58Ak78LwgWFBGYZwY,6
10
+ froog-0.5.0.dist-info/RECORD,,
froog/nn.py DELETED
@@ -1,60 +0,0 @@
1
- # _______ ______ _______ _______ _______
2
- # | || _ | | || || |
3
- # | ___|| | || | _ || _ || ___|
4
- # | |___ | |_||_ | | | || | | || | __
5
- # | ___|| __ || |_| || |_| || || |
6
- # | | | | | || || || |_| |
7
- # |___| |___| |_||_______||_______||_______|
8
-
9
- from froog.tensor import Tensor
10
- import numpy as np
11
-
12
- def Linear(*x):
13
- # random Glorot initialization
14
- ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))
15
- return ret.astype(np.float32)
16
-
17
- def swish(x):
18
- return x.mul(x.sigmoid())
19
-
20
- # *************************************
21
- # _ ___ __ ____ ____ _____
22
- # / | / / | / / / __ \/ __ \/ ___/
23
- # / |/ / |/ / / / / / /_/ /\__ \
24
- # / /| / /| / / /_/ / ____/___/ /
25
- # /_/ |_/_/ |_/ \____/_/ /____/
26
- #
27
- # ************* nn ops ************
28
-
29
- class BatchNorm2D:
30
- """
31
- __call__ follows the formula from the link below
32
- pytorch version: https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html
33
-
34
- self.weight = γ
35
- self.bias = β
36
- self.running_mean = E[x]
37
- self.running_var = Var[x]
38
-
39
- the reshaping step ensures that each channel of the input has its
40
- own separate set of parameters (mean, variance, weight, and bias)
41
-
42
- self.running_mean has shape [num_channels].
43
- self.running_mean.reshape(shape=[1, -1, 1, 1]) reshapes it to [1, num_channels, 1, 1]
44
- """
45
- def __init__(self, sz, eps=0.001):
46
- self.eps = eps
47
- self.weight = Tensor.zeros(sz)
48
- self.bias = Tensor.zeros(sz)
49
-
50
- # TODO: need running_mean and running_var
51
- self.running_mean = Tensor.zeros(sz)
52
- self.running_var = Tensor.zeros(sz)
53
- self.num_batches_tracked = Tensor.zeros(1)
54
-
55
- def __call__(self, x):
56
- x = x.sub(self.running_mean.reshape(shape=[1, -1, 1, 1]))
57
- x = x.mul(self.weight.reshape(shape=[1, -1, 1, 1]))
58
- x = x.div(self.running_var.add(Tensor([self.eps], gpu=x.gpu)).reshape(shape=[1, -1, 1, 1]).sqrt())
59
- x = x.add(self.bias.reshape(shape=[1, -1, 1, 1]))
60
- return x