froog 0.4.2__tar.gz → 0.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- froog-0.5.0/PKG-INFO +202 -0
- froog-0.5.0/README.md +191 -0
- froog-0.5.0/froog/__init__.py +36 -0
- froog-0.4.2/froog/gradcheck.py → froog-0.5.0/froog/gradient.py +4 -11
- froog-0.5.0/froog/ops.py +627 -0
- froog-0.5.0/froog/optim.py +145 -0
- froog-0.5.0/froog/tensor.py +236 -0
- {froog-0.4.2 → froog-0.5.0}/froog/utils.py +8 -7
- froog-0.5.0/froog.egg-info/PKG-INFO +202 -0
- {froog-0.4.2 → froog-0.5.0}/froog.egg-info/SOURCES.txt +2 -9
- {froog-0.4.2 → froog-0.5.0}/setup.py +2 -2
- froog-0.4.2/LICENSE +0 -1
- froog-0.4.2/PKG-INFO +0 -230
- froog-0.4.2/README.md +0 -218
- froog-0.4.2/froog/__init__.py +0 -3
- froog-0.4.2/froog/ops.py +0 -434
- froog-0.4.2/froog/ops_gpu.py +0 -598
- froog-0.4.2/froog/optim.py +0 -73
- froog-0.4.2/froog/tensor.py +0 -236
- froog-0.4.2/froog.egg-info/PKG-INFO +0 -230
- froog-0.4.2/tests/test_conv_speed.py +0 -99
- froog-0.4.2/tests/test_models.py +0 -135
- froog-0.4.2/tests/test_ops.py +0 -100
- froog-0.4.2/tests/test_optim.py +0 -65
- froog-0.4.2/tests/test_tensor.py +0 -76
- {froog-0.4.2 → froog-0.5.0}/froog.egg-info/dependency_links.txt +0 -0
- {froog-0.4.2 → froog-0.5.0}/froog.egg-info/requires.txt +0 -0
- {froog-0.4.2 → froog-0.5.0}/froog.egg-info/top_level.txt +0 -0
- {froog-0.4.2 → froog-0.5.0}/setup.cfg +0 -0
froog-0.5.0/PKG-INFO
ADDED
@@ -0,0 +1,202 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: froog
|
3
|
+
Version: 0.5.0
|
4
|
+
Summary: tensor library with opencl and metal support
|
5
|
+
Author: Kevin Buhler
|
6
|
+
License: MIT
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
9
|
+
Requires-Python: >=3.8
|
10
|
+
Description-Content-Type: text/markdown
|
11
|
+
|
12
|
+
# froog <img src="https://github.com/kevbuh/froog/actions/workflows/test.yml/badge.svg" alt="unit test badge" > <img src="https://static.pepy.tech/badge/froog" alt="num downloads badge">
|
13
|
+
<div align="center" >
|
14
|
+
<img src="https://raw.githubusercontent.com/kevbuh/froog/main/assets/froog.png" alt="froog the frog" height="200">
|
15
|
+
<br/>
|
16
|
+
froog: fast real-time optimization of gradients
|
17
|
+
<br/>
|
18
|
+
a beautifully compact tensor library
|
19
|
+
<br/>
|
20
|
+
<a href="https://github.com/kevbuh/froog">homepage</a> | <a href="https://github.com/kevbuh/froog/tree/main/DOCS.md">documentation</a> | <a href="https://pypi.org/project/froog/">pip</a>
|
21
|
+
<br/>
|
22
|
+
<br/>
|
23
|
+
</div>
|
24
|
+
|
25
|
+
```froog``` is an easy-to-read tensor library (<a href="https://www.pepy.tech/projects/froog">25k pip installs!</a>) with OpenCL support for GPU acceleration. Inspired by pytorch, tinygrad, and micrograd.
|
26
|
+
|
27
|
+
# Installation
|
28
|
+
```bash
|
29
|
+
pip install froog
|
30
|
+
```
|
31
|
+
|
32
|
+
# Features
|
33
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/froog/tensor.py">Custom Tensors</a>
|
34
|
+
- Backpropagation
|
35
|
+
- Automatic Differentiation (autograd)
|
36
|
+
- Forward and backward passes
|
37
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/froog/ops.py">ML Operations</a>
|
38
|
+
- 2D Convolutions (im2col)
|
39
|
+
- Numerical gradient checking
|
40
|
+
- Acceleration methods (Adam)
|
41
|
+
- Avg & Max pooling
|
42
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/models/efficientnet.py">EfficientNet</a> inference
|
43
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/froog/gpu/ops_gpu.py">GPU Support</a>
|
44
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/docs/env.md">Configuration via Environment Variables</a>
|
45
|
+
- and a bunch <a href="https://github.com/kevbuh/froog/tree/main/froog">more</a>
|
46
|
+
|
47
|
+
# Example
|
48
|
+
|
49
|
+
Here's how you set up a simple multilayer perceptron for classification on MNIST. Looks pretty similar to pytorch, right?
|
50
|
+
|
51
|
+
```python
|
52
|
+
from froog.tensor import Tensor
|
53
|
+
from froog.nn import Linear
|
54
|
+
import froog.optim as optim
|
55
|
+
|
56
|
+
class mnistMLP:
|
57
|
+
def __init__(self):
|
58
|
+
self.l1 = Tensor(Linear(784, 128)) # layer 1
|
59
|
+
self.l2 = Tensor(Linear(128, 10)) # layer 2
|
60
|
+
|
61
|
+
def forward(self, x):
|
62
|
+
# forward pass through both layers and softmax for output probabilities
|
63
|
+
return x.dot(self.l1).relu().dot(self.l2).logsoftmax()
|
64
|
+
|
65
|
+
model = mnistMLP() # create model
|
66
|
+
optim = optim.SGD([model.l1, model.l2], lr=0.001) # stochastic gradient descent optimizer
|
67
|
+
```
|
68
|
+
|
69
|
+
# GPU Support
|
70
|
+
|
71
|
+
Device management is handled transparently and will automatically select one of ```[METAL, OPENCL, CPU]```. To use the GPU:
|
72
|
+
|
73
|
+
```python
|
74
|
+
from froog.tensor import Tensor
|
75
|
+
from froog import get_device
|
76
|
+
# Check if GPU is available
|
77
|
+
has_gpu = get_device() is not None and get_device().name != "CPU"
|
78
|
+
# Create a tensor
|
79
|
+
x = Tensor([1, 2, 3])
|
80
|
+
# Push to GPU if available
|
81
|
+
if has_gpu: x = x.to_gpu()
|
82
|
+
# Operations run on GPU automatically
|
83
|
+
y = x + x
|
84
|
+
z = y * y
|
85
|
+
# Bring back to CPU when needed
|
86
|
+
result = z.to_cpu()
|
87
|
+
print(result.data)
|
88
|
+
```
|
89
|
+
|
90
|
+
You can also check what devices are available:
|
91
|
+
|
92
|
+
```python
|
93
|
+
from froog import get_available_devices
|
94
|
+
available_devices = get_available_devices()
|
95
|
+
print(f"Available devices: {available_devices}")
|
96
|
+
```
|
97
|
+
|
98
|
+
Or set a specific device:
|
99
|
+
|
100
|
+
```python
|
101
|
+
from froog import set_device
|
102
|
+
set_device("METAL") # or "OPENCL"
|
103
|
+
```
|
104
|
+
|
105
|
+
# EfficientNet in froog!
|
106
|
+
|
107
|
+
<img src="assets/efficientnet_pug.png" alt="pug" height="300">
|
108
|
+
|
109
|
+
We have an implementation of [EfficientNet v2](https://arxiv.org/abs/2104.00298) built entirely in ```froog``` using the official PyTorch weights! Run inference with:
|
110
|
+
|
111
|
+
```bash
|
112
|
+
python3 models/efficientnet.py <https://optional_image_url>
|
113
|
+
|
114
|
+
***********output*************
|
115
|
+
inference 4.34 s
|
116
|
+
|
117
|
+
imagenet class: 254
|
118
|
+
prediction : pug, pug-dog
|
119
|
+
probability : 0.8902361
|
120
|
+
******************************
|
121
|
+
```
|
122
|
+
|
123
|
+
I would recommend checking out the <a href="https://github.com/kevbuh/froog/blob/main/models/efficientnet.py">code</a>, it's highly documented and pretty cool.
|
124
|
+
|
125
|
+
# Contributing
|
126
|
+
<!-- THERES LOT OF STUFF TO WORK ON! VISIT THE <a href="https://github.com/kevbuh/froog/blob/main/docs/bounties.md">BOUNTY SHOP</a> -->
|
127
|
+
|
128
|
+
Pull requests will be merged if they:
|
129
|
+
* increase simplicity
|
130
|
+
* increase functionality
|
131
|
+
* increase efficiency
|
132
|
+
|
133
|
+
More info on <a href="https://github.com/kevbuh/froog/blob/main/docs/contributing.md">contributing</a>. Make sure to run ```python -m pytest``` before creating a PR.
|
134
|
+
|
135
|
+
# API
|
136
|
+
|
137
|
+
## Basic Math Operations
|
138
|
+
- ```.add(y)``` - Addition with y
|
139
|
+
- ```.sub(y)``` - Subtraction with y
|
140
|
+
- ```.mul(y)``` - Multiplication with y
|
141
|
+
- ```.div(y)``` - Division by y
|
142
|
+
- ```.pow(y)``` - Power function (raise to power y)
|
143
|
+
- ```.sum()``` - Sum all elements
|
144
|
+
- ```.mean()``` - Mean of all elements
|
145
|
+
- ```.sqrt()``` - Square root
|
146
|
+
|
147
|
+
## Linear Algebra Operations
|
148
|
+
- ```.dot(y)``` - Matrix multiplication with y
|
149
|
+
- ```.matmul(y)``` - Alias for dot
|
150
|
+
|
151
|
+
## Neural Network Operations
|
152
|
+
- ```.relu()``` - Rectified Linear Unit activation
|
153
|
+
- ```.sigmoid()``` - Sigmoid activation
|
154
|
+
- ```.dropout(p=0.5, training=True)``` - Dropout regularization
|
155
|
+
- ```.logsoftmax()``` - Log softmax function
|
156
|
+
- ```.swish()``` - Swish activation function (x * sigmoid(x))
|
157
|
+
- ```.conv2d(w, stride=1, groups=1)``` - 2D convolution
|
158
|
+
- ```.im2col2dconv(w)``` - Image to column for convolution
|
159
|
+
|
160
|
+
## Pooling Operations
|
161
|
+
- ```.max_pool2d(kernel_size=(2,2))``` - 2D max pooling
|
162
|
+
- ```.avg_pool2d(kernel_size=(2,2))``` - 2D average pooling
|
163
|
+
|
164
|
+
## Tensor Manipulation
|
165
|
+
- ```.reshape(*shape)``` - Change tensor shape
|
166
|
+
- ```.view(*shape)``` - Alternative to reshape
|
167
|
+
- ```.pad2d(padding=None)``` - Pad 2D tensors
|
168
|
+
- ```.flatten()``` - Returns a flattened 1D copy of the tensor
|
169
|
+
- ```.unsqueeze(dim)``` - Add dimension of size 1 at specified position
|
170
|
+
- ```.squeeze(dim=None)``` - Remove dimensions of size 1
|
171
|
+
- ```.detach()``` - Returns a tensor detached from computation graph
|
172
|
+
- ```.assign(x)``` - Assign values from tensor x to this tensor
|
173
|
+
|
174
|
+
## Tensor Properties
|
175
|
+
- ```.shape``` - The shape of the tensor as a tuple
|
176
|
+
- ```.size``` - Total number of elements in the tensor
|
177
|
+
- ```.ndim``` - Number of dimensions (rank) of the tensor
|
178
|
+
- ```.transpose``` - Transpose of the tensor
|
179
|
+
- ```.dtype``` - Data type of the tensor
|
180
|
+
- ```.is_gpu``` - Whether tensor is on GPU
|
181
|
+
- ```.grad``` - Gradient of tensor with respect to some scalar value
|
182
|
+
- ```.data``` - Underlying NumPy array (or GPU buffer)
|
183
|
+
|
184
|
+
## Device Management
|
185
|
+
- ```.to_cpu()``` - Moves tensor to CPU
|
186
|
+
- ```.to_gpu()``` - Moves tensor to GPU
|
187
|
+
- ```.gpu_()``` - In-place GPU conversion (modifies tensor)
|
188
|
+
|
189
|
+
## Data Type Conversion
|
190
|
+
- ```.to_float()``` - Converts tensor to float32 data type
|
191
|
+
- ```.to_int()``` - Converts tensor to int32 data type
|
192
|
+
- ```.to_bool()``` - Converts tensor to boolean data type
|
193
|
+
|
194
|
+
## Autograd Operations
|
195
|
+
- ```.backward(allow_fill=True)``` - Performs backpropagation
|
196
|
+
|
197
|
+
## Tensor Creation Methods
|
198
|
+
- ```Tensor.zeros(*shape)``` - Create tensor of zeros
|
199
|
+
- ```Tensor.ones(*shape)``` - Create tensor of ones
|
200
|
+
- ```Tensor.randn(*shape)``` - Create tensor with random normal values
|
201
|
+
- ```Tensor.eye(dim)``` - Create identity matrix
|
202
|
+
- ```Tensor.arange(start, stop=None, step=1)``` - Create tensor with evenly spaced values
|
froog-0.5.0/README.md
ADDED
@@ -0,0 +1,191 @@
|
|
1
|
+
# froog <img src="https://github.com/kevbuh/froog/actions/workflows/test.yml/badge.svg" alt="unit test badge" > <img src="https://static.pepy.tech/badge/froog" alt="num downloads badge">
|
2
|
+
<div align="center" >
|
3
|
+
<img src="https://raw.githubusercontent.com/kevbuh/froog/main/assets/froog.png" alt="froog the frog" height="200">
|
4
|
+
<br/>
|
5
|
+
froog: fast real-time optimization of gradients
|
6
|
+
<br/>
|
7
|
+
a beautifully compact tensor library
|
8
|
+
<br/>
|
9
|
+
<a href="https://github.com/kevbuh/froog">homepage</a> | <a href="https://github.com/kevbuh/froog/tree/main/DOCS.md">documentation</a> | <a href="https://pypi.org/project/froog/">pip</a>
|
10
|
+
<br/>
|
11
|
+
<br/>
|
12
|
+
</div>
|
13
|
+
|
14
|
+
```froog``` is an easy-to-read tensor library (<a href="https://www.pepy.tech/projects/froog">25k pip installs!</a>) with OpenCL support for GPU acceleration. Inspired by pytorch, tinygrad, and micrograd.
|
15
|
+
|
16
|
+
# Installation
|
17
|
+
```bash
|
18
|
+
pip install froog
|
19
|
+
```
|
20
|
+
|
21
|
+
# Features
|
22
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/froog/tensor.py">Custom Tensors</a>
|
23
|
+
- Backpropagation
|
24
|
+
- Automatic Differentiation (autograd)
|
25
|
+
- Forward and backward passes
|
26
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/froog/ops.py">ML Operations</a>
|
27
|
+
- 2D Convolutions (im2col)
|
28
|
+
- Numerical gradient checking
|
29
|
+
- Acceleration methods (Adam)
|
30
|
+
- Avg & Max pooling
|
31
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/models/efficientnet.py">EfficientNet</a> inference
|
32
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/froog/gpu/ops_gpu.py">GPU Support</a>
|
33
|
+
- <a href="https://github.com/kevbuh/froog/blob/main/docs/env.md">Configuration via Environment Variables</a>
|
34
|
+
- and a bunch <a href="https://github.com/kevbuh/froog/tree/main/froog">more</a>
|
35
|
+
|
36
|
+
# Example
|
37
|
+
|
38
|
+
Here's how you set up a simple multilayer perceptron for classification on MNIST. Looks pretty similar to pytorch, right?
|
39
|
+
|
40
|
+
```python
|
41
|
+
from froog.tensor import Tensor
|
42
|
+
from froog.nn import Linear
|
43
|
+
import froog.optim as optim
|
44
|
+
|
45
|
+
class mnistMLP:
|
46
|
+
def __init__(self):
|
47
|
+
self.l1 = Tensor(Linear(784, 128)) # layer 1
|
48
|
+
self.l2 = Tensor(Linear(128, 10)) # layer 2
|
49
|
+
|
50
|
+
def forward(self, x):
|
51
|
+
# forward pass through both layers and softmax for output probabilities
|
52
|
+
return x.dot(self.l1).relu().dot(self.l2).logsoftmax()
|
53
|
+
|
54
|
+
model = mnistMLP() # create model
|
55
|
+
optim = optim.SGD([model.l1, model.l2], lr=0.001) # stochastic gradient descent optimizer
|
56
|
+
```
|
57
|
+
|
58
|
+
# GPU Support
|
59
|
+
|
60
|
+
Device management is handled transparently and will automatically select one of ```[METAL, OPENCL, CPU]```. To use the GPU:
|
61
|
+
|
62
|
+
```python
|
63
|
+
from froog.tensor import Tensor
|
64
|
+
from froog import get_device
|
65
|
+
# Check if GPU is available
|
66
|
+
has_gpu = get_device() is not None and get_device().name != "CPU"
|
67
|
+
# Create a tensor
|
68
|
+
x = Tensor([1, 2, 3])
|
69
|
+
# Push to GPU if available
|
70
|
+
if has_gpu: x = x.to_gpu()
|
71
|
+
# Operations run on GPU automatically
|
72
|
+
y = x + x
|
73
|
+
z = y * y
|
74
|
+
# Bring back to CPU when needed
|
75
|
+
result = z.to_cpu()
|
76
|
+
print(result.data)
|
77
|
+
```
|
78
|
+
|
79
|
+
You can also check what devices are available:
|
80
|
+
|
81
|
+
```python
|
82
|
+
from froog import get_available_devices
|
83
|
+
available_devices = get_available_devices()
|
84
|
+
print(f"Available devices: {available_devices}")
|
85
|
+
```
|
86
|
+
|
87
|
+
Or set a specific device:
|
88
|
+
|
89
|
+
```python
|
90
|
+
from froog import set_device
|
91
|
+
set_device("METAL") # or "OPENCL"
|
92
|
+
```
|
93
|
+
|
94
|
+
# EfficientNet in froog!
|
95
|
+
|
96
|
+
<img src="assets/efficientnet_pug.png" alt="pug" height="300">
|
97
|
+
|
98
|
+
We have an implementation of [EfficientNet v2](https://arxiv.org/abs/2104.00298) built entirely in ```froog``` using the official PyTorch weights! Run inference with:
|
99
|
+
|
100
|
+
```bash
|
101
|
+
python3 models/efficientnet.py <https://optional_image_url>
|
102
|
+
|
103
|
+
***********output*************
|
104
|
+
inference 4.34 s
|
105
|
+
|
106
|
+
imagenet class: 254
|
107
|
+
prediction : pug, pug-dog
|
108
|
+
probability : 0.8902361
|
109
|
+
******************************
|
110
|
+
```
|
111
|
+
|
112
|
+
I would recommend checking out the <a href="https://github.com/kevbuh/froog/blob/main/models/efficientnet.py">code</a>, it's highly documented and pretty cool.
|
113
|
+
|
114
|
+
# Contributing
|
115
|
+
<!-- THERES LOT OF STUFF TO WORK ON! VISIT THE <a href="https://github.com/kevbuh/froog/blob/main/docs/bounties.md">BOUNTY SHOP</a> -->
|
116
|
+
|
117
|
+
Pull requests will be merged if they:
|
118
|
+
* increase simplicity
|
119
|
+
* increase functionality
|
120
|
+
* increase efficiency
|
121
|
+
|
122
|
+
More info on <a href="https://github.com/kevbuh/froog/blob/main/docs/contributing.md">contributing</a>. Make sure to run ```python -m pytest``` before creating a PR.
|
123
|
+
|
124
|
+
# API
|
125
|
+
|
126
|
+
## Basic Math Operations
|
127
|
+
- ```.add(y)``` - Addition with y
|
128
|
+
- ```.sub(y)``` - Subtraction with y
|
129
|
+
- ```.mul(y)``` - Multiplication with y
|
130
|
+
- ```.div(y)``` - Division by y
|
131
|
+
- ```.pow(y)``` - Power function (raise to power y)
|
132
|
+
- ```.sum()``` - Sum all elements
|
133
|
+
- ```.mean()``` - Mean of all elements
|
134
|
+
- ```.sqrt()``` - Square root
|
135
|
+
|
136
|
+
## Linear Algebra Operations
|
137
|
+
- ```.dot(y)``` - Matrix multiplication with y
|
138
|
+
- ```.matmul(y)``` - Alias for dot
|
139
|
+
|
140
|
+
## Neural Network Operations
|
141
|
+
- ```.relu()``` - Rectified Linear Unit activation
|
142
|
+
- ```.sigmoid()``` - Sigmoid activation
|
143
|
+
- ```.dropout(p=0.5, training=True)``` - Dropout regularization
|
144
|
+
- ```.logsoftmax()``` - Log softmax function
|
145
|
+
- ```.swish()``` - Swish activation function (x * sigmoid(x))
|
146
|
+
- ```.conv2d(w, stride=1, groups=1)``` - 2D convolution
|
147
|
+
- ```.im2col2dconv(w)``` - Image to column for convolution
|
148
|
+
|
149
|
+
## Pooling Operations
|
150
|
+
- ```.max_pool2d(kernel_size=(2,2))``` - 2D max pooling
|
151
|
+
- ```.avg_pool2d(kernel_size=(2,2))``` - 2D average pooling
|
152
|
+
|
153
|
+
## Tensor Manipulation
|
154
|
+
- ```.reshape(*shape)``` - Change tensor shape
|
155
|
+
- ```.view(*shape)``` - Alternative to reshape
|
156
|
+
- ```.pad2d(padding=None)``` - Pad 2D tensors
|
157
|
+
- ```.flatten()``` - Returns a flattened 1D copy of the tensor
|
158
|
+
- ```.unsqueeze(dim)``` - Add dimension of size 1 at specified position
|
159
|
+
- ```.squeeze(dim=None)``` - Remove dimensions of size 1
|
160
|
+
- ```.detach()``` - Returns a tensor detached from computation graph
|
161
|
+
- ```.assign(x)``` - Assign values from tensor x to this tensor
|
162
|
+
|
163
|
+
## Tensor Properties
|
164
|
+
- ```.shape``` - The shape of the tensor as a tuple
|
165
|
+
- ```.size``` - Total number of elements in the tensor
|
166
|
+
- ```.ndim``` - Number of dimensions (rank) of the tensor
|
167
|
+
- ```.transpose``` - Transpose of the tensor
|
168
|
+
- ```.dtype``` - Data type of the tensor
|
169
|
+
- ```.is_gpu``` - Whether tensor is on GPU
|
170
|
+
- ```.grad``` - Gradient of tensor with respect to some scalar value
|
171
|
+
- ```.data``` - Underlying NumPy array (or GPU buffer)
|
172
|
+
|
173
|
+
## Device Management
|
174
|
+
- ```.to_cpu()``` - Moves tensor to CPU
|
175
|
+
- ```.to_gpu()``` - Moves tensor to GPU
|
176
|
+
- ```.gpu_()``` - In-place GPU conversion (modifies tensor)
|
177
|
+
|
178
|
+
## Data Type Conversion
|
179
|
+
- ```.to_float()``` - Converts tensor to float32 data type
|
180
|
+
- ```.to_int()``` - Converts tensor to int32 data type
|
181
|
+
- ```.to_bool()``` - Converts tensor to boolean data type
|
182
|
+
|
183
|
+
## Autograd Operations
|
184
|
+
- ```.backward(allow_fill=True)``` - Performs backpropagation
|
185
|
+
|
186
|
+
## Tensor Creation Methods
|
187
|
+
- ```Tensor.zeros(*shape)``` - Create tensor of zeros
|
188
|
+
- ```Tensor.ones(*shape)``` - Create tensor of ones
|
189
|
+
- ```Tensor.randn(*shape)``` - Create tensor with random normal values
|
190
|
+
- ```Tensor.eye(dim)``` - Create identity matrix
|
191
|
+
- ```Tensor.arange(start, stop=None, step=1)``` - Create tensor with evenly spaced values
|
@@ -0,0 +1,36 @@
|
|
1
|
+
import froog.optim
|
2
|
+
import froog.tensor
|
3
|
+
import froog.utils
|
4
|
+
|
5
|
+
# Import GPU packages
|
6
|
+
import froog.gpu.cl.cl_utils
|
7
|
+
|
8
|
+
# Try to import Metal utils if available
|
9
|
+
try:
|
10
|
+
import froog.gpu.metal.metal_utils
|
11
|
+
except ImportError:
|
12
|
+
pass
|
13
|
+
|
14
|
+
# Import device management functions
|
15
|
+
from froog.gpu import (
|
16
|
+
Device, OpenCLDevice, get_device, set_device,
|
17
|
+
upload_tensor, download_tensor, is_buffer,
|
18
|
+
allocate_buffer, synchronize, get_available_devices
|
19
|
+
)
|
20
|
+
|
21
|
+
# Try to import Metal device if available
|
22
|
+
try:
|
23
|
+
from froog.gpu.metal import MetalDevice
|
24
|
+
__all__ = [
|
25
|
+
'Device', 'OpenCLDevice', 'MetalDevice',
|
26
|
+
'get_device', 'set_device', 'upload_tensor',
|
27
|
+
'download_tensor', 'is_buffer',
|
28
|
+
'allocate_buffer', 'synchronize', 'get_available_devices'
|
29
|
+
]
|
30
|
+
except ImportError:
|
31
|
+
__all__ = [
|
32
|
+
'Device', 'OpenCLDevice',
|
33
|
+
'get_device', 'set_device', 'upload_tensor',
|
34
|
+
'download_tensor', 'is_buffer',
|
35
|
+
'allocate_buffer', 'synchronize', 'get_available_devices'
|
36
|
+
]
|
@@ -7,16 +7,15 @@
|
|
7
7
|
# |___| |___| |_||_______||_______||_______|
|
8
8
|
|
9
9
|
import numpy as np
|
10
|
+
from typing import Callable, Union, Any, Tuple
|
10
11
|
from froog.tensor import Tensor
|
11
12
|
from froog.utils import mask_like
|
12
13
|
|
13
|
-
def jacobian(model, input):
|
14
|
+
def jacobian(model: Callable[[Tensor], Tensor], input: Tensor) -> np.ndarray:
|
14
15
|
output = model(input)
|
15
|
-
|
16
16
|
ji = input.data.reshape(-1).shape[-1] # jacobian of input
|
17
17
|
jo = output.data.reshape(-1).shape[-1] # jacobian of output
|
18
18
|
J = np.zeros((jo, ji), dtype=np.float32)
|
19
|
-
|
20
19
|
for o in range(jo):
|
21
20
|
o_scalar = Tensor(mask_like(output.data, o, 1.)).mul(output).sum()
|
22
21
|
o_scalar.backward()
|
@@ -24,7 +23,7 @@ def jacobian(model, input):
|
|
24
23
|
J[o,i] = grad
|
25
24
|
return J
|
26
25
|
|
27
|
-
def numerical_jacobian(model, input, eps = 1e-6):
|
26
|
+
def numerical_jacobian(model: Callable[[Tensor], Tensor], input: Tensor, eps: float = 1e-6) -> np.ndarray:
|
28
27
|
# """
|
29
28
|
# https://timvieira.github.io/blog/post/2017/04/21/how-to-test-gradient-implementations/
|
30
29
|
# Computes :
|
@@ -37,24 +36,18 @@ def numerical_jacobian(model, input, eps = 1e-6):
|
|
37
36
|
# NJ : an approx. of the Jacobian
|
38
37
|
# """
|
39
38
|
output = model(input)
|
40
|
-
|
41
39
|
ji = input.data.reshape(-1).shape[-1]
|
42
40
|
jo = output.data.reshape(-1).shape[-1]
|
43
41
|
NJ = np.zeros((jo, ji), dtype=np.float32)
|
44
|
-
|
45
42
|
for i in range(ji):
|
46
43
|
eps_perturb = mask_like(input.data, i, mask_value = eps)
|
47
|
-
|
48
44
|
output_perturb_add = model(Tensor(input.data + eps_perturb)).data.reshape(-1)
|
49
45
|
output_perturb_sub = model(Tensor(input.data - eps_perturb)).data.reshape(-1)
|
50
|
-
|
51
46
|
grad_approx = ((output_perturb_add) - (output_perturb_sub)) / (2*eps) # CDM: (f(x + h) - f(x - h)) / (2 * h)
|
52
|
-
|
53
47
|
NJ[:,i] = grad_approx
|
54
|
-
|
55
48
|
return NJ
|
56
49
|
|
57
|
-
def gradcheck(model, input, eps = 1e-06, atol = 1e-5, rtol = 0.001):
|
50
|
+
def gradcheck(model: Callable[[Tensor], Tensor], input: Tensor, eps: float = 1e-06, atol: float = 1e-5, rtol: float = 0.001) -> bool:
|
58
51
|
"""
|
59
52
|
Checks whether computed gradient is close to numerical approximation of the Jacobian
|
60
53
|
Params:
|