comfi-fast-grnn-torch 0.0.1__tar.gz → 0.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- comfi_fast_grnn_torch-0.0.3/MANIFEST.in +1 -0
- {comfi_fast_grnn_torch-0.0.1 → comfi_fast_grnn_torch-0.0.3}/PKG-INFO +5 -17
- comfi_fast_grnn_torch-0.0.3/README.md +20 -0
- {comfi_fast_grnn_torch-0.0.1 → comfi_fast_grnn_torch-0.0.3}/pyproject.toml +3 -3
- comfi_fast_grnn_torch-0.0.3/src/comfi_fast_grnn_torch/__init__.py +1 -0
- {comfi_fast_grnn_torch-0.0.1 → comfi_fast_grnn_torch-0.0.3}/src/comfi_fast_grnn_torch.egg-info/PKG-INFO +5 -17
- {comfi_fast_grnn_torch-0.0.1 → comfi_fast_grnn_torch-0.0.3}/src/comfi_fast_grnn_torch.egg-info/SOURCES.txt +1 -0
- comfi_fast_grnn_torch-0.0.1/README.md +0 -32
- comfi_fast_grnn_torch-0.0.1/src/comfi_fast_grnn_torch/__init__.py +0 -0
- {comfi_fast_grnn_torch-0.0.1 → comfi_fast_grnn_torch-0.0.3}/setup.cfg +0 -0
- {comfi_fast_grnn_torch-0.0.1 → comfi_fast_grnn_torch-0.0.3}/src/comfi_fast_grnn_torch/ComfiFastGRNN.py +0 -0
- {comfi_fast_grnn_torch-0.0.1 → comfi_fast_grnn_torch-0.0.3}/src/comfi_fast_grnn_torch.egg-info/dependency_links.txt +0 -0
- {comfi_fast_grnn_torch-0.0.1 → comfi_fast_grnn_torch-0.0.3}/src/comfi_fast_grnn_torch.egg-info/requires.txt +0 -0
- {comfi_fast_grnn_torch-0.0.1 → comfi_fast_grnn_torch-0.0.3}/src/comfi_fast_grnn_torch.egg-info/top_level.txt +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
include README.md
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: comfi_fast_grnn_torch
|
|
3
|
-
Version: 0.0.
|
|
4
|
-
Summary: A PyTorch implementation of
|
|
3
|
+
Version: 0.0.3
|
|
4
|
+
Summary: A PyTorch implementation of Comfi-FastGRNN
|
|
5
5
|
Author-email: Nicolas Arrieta Larraza <NIAL@bang-olufsen.dk>, Niels de Koeijer <NEMK@bang-olufsen.dk>
|
|
6
6
|
License: MIT
|
|
7
7
|
Project-URL: Homepage, https://github.com/narrietal/Fast-ULCNet
|
|
@@ -12,8 +12,8 @@ Requires-Dist: libsegmenter==1.0.4
|
|
|
12
12
|
Requires-Dist: torchinfo==1.8.0
|
|
13
13
|
Requires-Dist: CRM_pytorch==0.1.0
|
|
14
14
|
|
|
15
|
-
# fast-
|
|
16
|
-
Implements
|
|
15
|
+
# comfi-fast-grnn-torch
|
|
16
|
+
Implements Comfi-FastGRNN in torch.
|
|
17
17
|
|
|
18
18
|
## Usage
|
|
19
19
|
|
|
@@ -26,21 +26,9 @@ Here is how to use the layer with default settings in a standard training loop:
|
|
|
26
26
|
import torch
|
|
27
27
|
from comfi_fast_grnn_torch import ComfiFastGRNN
|
|
28
28
|
|
|
29
|
-
|
|
30
|
-
# batch_first=True is the default for this implementation
|
|
31
|
-
model = ComfiFastGRNN(
|
|
29
|
+
comfi_fgrnn = ComfiFastGRNN(
|
|
32
30
|
input_size=32,
|
|
33
31
|
hidden_size=64,
|
|
34
32
|
num_layers=1
|
|
35
33
|
)
|
|
36
|
-
|
|
37
|
-
# 2. Create dummy input: (Batch Size, Sequence Length, Input Size)
|
|
38
|
-
x = torch.randn(10, 50, 32)
|
|
39
|
-
|
|
40
|
-
# 3. Forward pass
|
|
41
|
-
# Returns output (all timesteps) and final hidden state
|
|
42
|
-
output, h_n = model(x)
|
|
43
|
-
|
|
44
|
-
print(f"Output shape: {output.shape}") # torch.Size([10, 50, 64])
|
|
45
|
-
print(f"Hidden state shape: {h_n.shape}") # torch.Size([1, 10, 64])
|
|
46
34
|
```
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# comfi-fast-grnn-torch
|
|
2
|
+
Implements Comfi-FastGRNN in torch.
|
|
3
|
+
|
|
4
|
+
## Usage
|
|
5
|
+
|
|
6
|
+
The `ComfiFastGRNN` module is designed to be a drop-in replacement for standard PyTorch RNN layers (like `nn.LSTM` or `nn.GRU`), but with added support for low-rank factorization and complementary filtering.
|
|
7
|
+
|
|
8
|
+
### Basic Implementation
|
|
9
|
+
Here is how to use the layer with default settings in a standard training loop:
|
|
10
|
+
|
|
11
|
+
```python
|
|
12
|
+
import torch
|
|
13
|
+
from comfi_fast_grnn_torch import ComfiFastGRNN
|
|
14
|
+
|
|
15
|
+
comfi_fgrnn = ComfiFastGRNN(
|
|
16
|
+
input_size=32,
|
|
17
|
+
hidden_size=64,
|
|
18
|
+
num_layers=1
|
|
19
|
+
)
|
|
20
|
+
```
|
|
@@ -4,9 +4,9 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "comfi_fast_grnn_torch"
|
|
7
|
-
version = "0.0.
|
|
8
|
-
description = "A PyTorch implementation of
|
|
9
|
-
readme = "README.md"
|
|
7
|
+
version = "0.0.3"
|
|
8
|
+
description = "A PyTorch implementation of Comfi-FastGRNN"
|
|
9
|
+
readme = {file = "README.md", content-type = "text/markdown"}
|
|
10
10
|
requires-python = ">=3.10"
|
|
11
11
|
license = {text = "MIT"}
|
|
12
12
|
authors = [
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .ComfiFastGRNN import ComfiFastGRNN
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: comfi_fast_grnn_torch
|
|
3
|
-
Version: 0.0.
|
|
4
|
-
Summary: A PyTorch implementation of
|
|
3
|
+
Version: 0.0.3
|
|
4
|
+
Summary: A PyTorch implementation of Comfi-FastGRNN
|
|
5
5
|
Author-email: Nicolas Arrieta Larraza <NIAL@bang-olufsen.dk>, Niels de Koeijer <NEMK@bang-olufsen.dk>
|
|
6
6
|
License: MIT
|
|
7
7
|
Project-URL: Homepage, https://github.com/narrietal/Fast-ULCNet
|
|
@@ -12,8 +12,8 @@ Requires-Dist: libsegmenter==1.0.4
|
|
|
12
12
|
Requires-Dist: torchinfo==1.8.0
|
|
13
13
|
Requires-Dist: CRM_pytorch==0.1.0
|
|
14
14
|
|
|
15
|
-
# fast-
|
|
16
|
-
Implements
|
|
15
|
+
# comfi-fast-grnn-torch
|
|
16
|
+
Implements Comfi-FastGRNN in torch.
|
|
17
17
|
|
|
18
18
|
## Usage
|
|
19
19
|
|
|
@@ -26,21 +26,9 @@ Here is how to use the layer with default settings in a standard training loop:
|
|
|
26
26
|
import torch
|
|
27
27
|
from comfi_fast_grnn_torch import ComfiFastGRNN
|
|
28
28
|
|
|
29
|
-
|
|
30
|
-
# batch_first=True is the default for this implementation
|
|
31
|
-
model = ComfiFastGRNN(
|
|
29
|
+
comfi_fgrnn = ComfiFastGRNN(
|
|
32
30
|
input_size=32,
|
|
33
31
|
hidden_size=64,
|
|
34
32
|
num_layers=1
|
|
35
33
|
)
|
|
36
|
-
|
|
37
|
-
# 2. Create dummy input: (Batch Size, Sequence Length, Input Size)
|
|
38
|
-
x = torch.randn(10, 50, 32)
|
|
39
|
-
|
|
40
|
-
# 3. Forward pass
|
|
41
|
-
# Returns output (all timesteps) and final hidden state
|
|
42
|
-
output, h_n = model(x)
|
|
43
|
-
|
|
44
|
-
print(f"Output shape: {output.shape}") # torch.Size([10, 50, 64])
|
|
45
|
-
print(f"Hidden state shape: {h_n.shape}") # torch.Size([1, 10, 64])
|
|
46
34
|
```
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
# fast-ulcnet-torch
|
|
2
|
-
Implements FastULCNet and Comfi-FastGRNN in torch.
|
|
3
|
-
|
|
4
|
-
## Usage
|
|
5
|
-
|
|
6
|
-
The `ComfiFastGRNN` module is designed to be a drop-in replacement for standard PyTorch RNN layers (like `nn.LSTM` or `nn.GRU`), but with added support for low-rank factorization and complementary filtering.
|
|
7
|
-
|
|
8
|
-
### Basic Implementation
|
|
9
|
-
Here is how to use the layer with default settings in a standard training loop:
|
|
10
|
-
|
|
11
|
-
```python
|
|
12
|
-
import torch
|
|
13
|
-
from comfi_fast_grnn_torch import ComfiFastGRNN
|
|
14
|
-
|
|
15
|
-
# 1. Initialize the layer
|
|
16
|
-
# batch_first=True is the default for this implementation
|
|
17
|
-
model = ComfiFastGRNN(
|
|
18
|
-
input_size=32,
|
|
19
|
-
hidden_size=64,
|
|
20
|
-
num_layers=1
|
|
21
|
-
)
|
|
22
|
-
|
|
23
|
-
# 2. Create dummy input: (Batch Size, Sequence Length, Input Size)
|
|
24
|
-
x = torch.randn(10, 50, 32)
|
|
25
|
-
|
|
26
|
-
# 3. Forward pass
|
|
27
|
-
# Returns output (all timesteps) and final hidden state
|
|
28
|
-
output, h_n = model(x)
|
|
29
|
-
|
|
30
|
-
print(f"Output shape: {output.shape}") # torch.Size([10, 50, 64])
|
|
31
|
-
print(f"Hidden state shape: {h_n.shape}") # torch.Size([1, 10, 64])
|
|
32
|
-
```
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|