torch-transform-image 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- torch_transform_image/__init__.py +18 -0
- torch_transform_image/py.typed +5 -0
- torch_transform_image/transforms_2d.py +36 -0
- torch_transform_image/transforms_3d.py +36 -0
- torch_transform_image-0.0.2.dist-info/METADATA +157 -0
- torch_transform_image-0.0.2.dist-info/RECORD +8 -0
- torch_transform_image-0.0.2.dist-info/WHEEL +4 -0
- torch_transform_image-0.0.2.dist-info/licenses/LICENSE +28 -0
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""Real space transformations of 2D/3D images in PyTorch"""
|
|
2
|
+
|
|
3
|
+
from importlib.metadata import PackageNotFoundError, version
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
__version__ = version("torch-transform-image")
|
|
7
|
+
except PackageNotFoundError:
|
|
8
|
+
__version__ = "uninstalled"
|
|
9
|
+
__author__ = "Alister Burt"
|
|
10
|
+
__email__ = "alisterburt@gmail.com"
|
|
11
|
+
|
|
12
|
+
from torch_transform_image.transforms_2d import affine_transform_image_2d
|
|
13
|
+
from torch_transform_image.transforms_3d import affine_transform_image_3d
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
'affine_transform_image_2d',
|
|
17
|
+
'affine_transform_image_3d',
|
|
18
|
+
]
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
import einops
|
|
4
|
+
import torch
|
|
5
|
+
from torch_affine_utils import homogenise_coordinates
|
|
6
|
+
from torch_grid_utils import coordinate_grid
|
|
7
|
+
from torch_image_interpolation import sample_image_2d
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def affine_transform_image_2d(
|
|
11
|
+
image: torch.Tensor,
|
|
12
|
+
matrices: torch.Tensor,
|
|
13
|
+
interpolation: Literal['nearest', 'bilinear', 'bicubic'],
|
|
14
|
+
yx_matrices: bool = False,
|
|
15
|
+
) -> torch.Tensor:
|
|
16
|
+
# grab image dimensions
|
|
17
|
+
h, w = image.shape[-2:]
|
|
18
|
+
|
|
19
|
+
if not yx_matrices:
|
|
20
|
+
matrices[..., :2, :2] = (
|
|
21
|
+
torch.flip(matrices[..., :2, :2], dims=(-2, -1))
|
|
22
|
+
)
|
|
23
|
+
matrices[..., :2, 2] = torch.flip(matrices[..., :2, 2], dims=(-1,))
|
|
24
|
+
|
|
25
|
+
# generate grid of pixel coordinates
|
|
26
|
+
grid = coordinate_grid(image_shape=(h, w), device=image.device)
|
|
27
|
+
|
|
28
|
+
# apply matrix to coordinates
|
|
29
|
+
grid = homogenise_coordinates(grid) # (h, w, yxw)
|
|
30
|
+
grid = einops.rearrange(grid, 'h w yxw -> h w yxw 1')
|
|
31
|
+
grid = matrices @ grid
|
|
32
|
+
grid = grid[..., :2, 0] # dehomogenise coordinates: (..., h, w, yxw, 1) -> (..., h, w, yx)
|
|
33
|
+
|
|
34
|
+
# sample image at transformed positions
|
|
35
|
+
result = sample_image_2d(image, coordinates=grid, interpolation=interpolation)
|
|
36
|
+
return result
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
import einops
|
|
4
|
+
import torch
|
|
5
|
+
from torch_affine_utils import homogenise_coordinates
|
|
6
|
+
from torch_grid_utils import coordinate_grid
|
|
7
|
+
from torch_image_interpolation import sample_image_3d
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def affine_transform_image_3d(
|
|
11
|
+
image: torch.Tensor,
|
|
12
|
+
matrices: torch.Tensor,
|
|
13
|
+
interpolation: Literal['nearest', 'trilinear'],
|
|
14
|
+
zyx_matrices: bool = False,
|
|
15
|
+
) -> torch.Tensor:
|
|
16
|
+
# grab image dimensions
|
|
17
|
+
d, h, w = image.shape[-3:]
|
|
18
|
+
|
|
19
|
+
if not zyx_matrices:
|
|
20
|
+
matrices[..., :3, :3] = (
|
|
21
|
+
torch.flip(matrices[..., :3, :3], dims=(-2, -1))
|
|
22
|
+
)
|
|
23
|
+
matrices[..., :3, 3] = torch.flip(matrices[..., :3, 3], dims=(-1,))
|
|
24
|
+
|
|
25
|
+
# generate grid of pixel coordinates
|
|
26
|
+
grid = coordinate_grid(image_shape=(d, h, w), device=image.device)
|
|
27
|
+
|
|
28
|
+
# apply matrix to coordinates
|
|
29
|
+
grid = homogenise_coordinates(grid) # (d, h, w, zyxw)
|
|
30
|
+
grid = einops.rearrange(grid, 'd h w zyxw -> d h w zyxw 1')
|
|
31
|
+
grid = matrices @ grid
|
|
32
|
+
grid = grid[..., :3, 0] # dehomogenise coordinates: (..., d, h, w, zyxw, 1) -> (..., d, h, w, zyx)
|
|
33
|
+
|
|
34
|
+
# sample image at transformed positions
|
|
35
|
+
result = sample_image_3d(image, coordinates=grid, interpolation=interpolation)
|
|
36
|
+
return result
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: torch-transform-image
|
|
3
|
+
Version: 0.0.2
|
|
4
|
+
Summary: Real space transformations of 2D/3D images in PyTorch
|
|
5
|
+
Project-URL: homepage, https://github.com/teamtomo/torch-transform-image
|
|
6
|
+
Project-URL: repository, https://github.com/teamtomo/torch-transform-image
|
|
7
|
+
Author-email: Alister Burt <alisterburt@gmail.com>
|
|
8
|
+
License: BSD-3-Clause
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: License :: OSI Approved :: BSD License
|
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
18
|
+
Classifier: Typing :: Typed
|
|
19
|
+
Requires-Python: >=3.9
|
|
20
|
+
Requires-Dist: torch
|
|
21
|
+
Requires-Dist: torch-affine-utils
|
|
22
|
+
Requires-Dist: torch-grid-utils
|
|
23
|
+
Requires-Dist: torch-image-interpolation
|
|
24
|
+
Provides-Extra: dev
|
|
25
|
+
Requires-Dist: ipython; extra == 'dev'
|
|
26
|
+
Requires-Dist: pdbpp; extra == 'dev'
|
|
27
|
+
Requires-Dist: rich; extra == 'dev'
|
|
28
|
+
Provides-Extra: test
|
|
29
|
+
Requires-Dist: pytest; extra == 'test'
|
|
30
|
+
Requires-Dist: pytest-cov; extra == 'test'
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
|
|
33
|
+
# torch-transform-image
|
|
34
|
+
|
|
35
|
+
[](https://github.com/teamtomo/torch-transform-image/raw/main/LICENSE)
|
|
36
|
+
[](https://pypi.org/project/torch-transform-image)
|
|
37
|
+
[](https://python.org)
|
|
38
|
+
[](https://github.com/teamtomo/torch-transform-image/actions/workflows/ci.yml)
|
|
39
|
+
[](https://codecov.io/gh/teamtomo/torch-transform-image)
|
|
40
|
+
|
|
41
|
+
Real space transformations of 2D/3D images in PyTorch
|
|
42
|
+
|
|
43
|
+
## Motivation
|
|
44
|
+
|
|
45
|
+
This package provides a simple, consistent API for applying affine transformations to 2D/3D images in PyTorch.
|
|
46
|
+
It enables efficient, GPU-accelerated geometric transformations of images.
|
|
47
|
+
|
|
48
|
+
## Installation
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
pip install torch-transform-image
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## Features
|
|
55
|
+
|
|
56
|
+
- Apply arbitrary affine transformations to 2D and 3D images
|
|
57
|
+
- Support for various interpolation methods (nearest, bilinear, bicubic for 2D; nearest, trilinear for 3D)
|
|
58
|
+
- Batched operations for efficient processing
|
|
59
|
+
- Fully differentiable operations compatible with PyTorch's autograd
|
|
60
|
+
|
|
61
|
+
## Coordinate System
|
|
62
|
+
|
|
63
|
+
This package uses the same coordinate system as NumPy/PyTorch array indexing:
|
|
64
|
+
- For 2D images: coordinates are ordered as `[y, x]` for dimensions `(height, width)`
|
|
65
|
+
- For 3D images: coordinates are ordered as `[z, y, x]` for dimensions `(depth, height, width)`
|
|
66
|
+
|
|
67
|
+
Transformation matrices left-multiply homogeneous pixel coordinates (`[y, x, 1]` for 2D and `[z, y, x, 1]` for 3D).
|
|
68
|
+
|
|
69
|
+
### Generating Transformation Matrices
|
|
70
|
+
|
|
71
|
+
The companion package [torch-affine-utils](https://github.com/teamtomo/torch-affine-utils) provides convenient functions
|
|
72
|
+
to generate transformation matrices that work with homogenous pixel coordinates (`yxw`/`zyxw`):
|
|
73
|
+
|
|
74
|
+
```python
|
|
75
|
+
from torch_affine_utils.transforms_2d import R, T, S # Rotation, Translation, Scale for 2D
|
|
76
|
+
from torch_affine_utils.transforms_3d import Rx, Ry, Rz, T, S # Rotation, Translation, Scale for 3D
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## Usage
|
|
80
|
+
|
|
81
|
+
### 2D Transformations
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
import torch
|
|
85
|
+
from torch_transform_image import affine_transform_image_2d
|
|
86
|
+
from torch_affine_utils.transforms_2d import R, T, S # Rotation, Translation, Scale
|
|
87
|
+
|
|
88
|
+
# Create a test image (28×28)
|
|
89
|
+
image = torch.zeros((28, 28), dtype=torch.float32)
|
|
90
|
+
image[14, 14] = 1 # Place a dot at the center
|
|
91
|
+
|
|
92
|
+
# Create a transformation matrix to translate coordinates 4 pixels in y direction
|
|
93
|
+
translation = T([4, 0]) # Uses [y, x] coordinate order matching dimensions (h, w)
|
|
94
|
+
|
|
95
|
+
# Apply the transformation
|
|
96
|
+
result = affine_transform_image_2d(
|
|
97
|
+
image=image,
|
|
98
|
+
matrices=translation,
|
|
99
|
+
interpolation='bilinear', # Options: 'nearest', 'bilinear', 'bicubic'
|
|
100
|
+
yx_matrices=True, # The generated translations have [y, x] order
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# Compose multiple transformations
|
|
104
|
+
# First translate to origin, then rotate, then translate back
|
|
105
|
+
T1 = T([-14, -14]) # Move center to origin
|
|
106
|
+
R1 = R(45, yx=True) # Rotate 45 degrees
|
|
107
|
+
T2 = T([14, 14]) # Move back
|
|
108
|
+
transform = T2 @ R1 @ T1 # Matrix composition (applied right-to-left)
|
|
109
|
+
|
|
110
|
+
# Apply the composed transformation
|
|
111
|
+
rotated = affine_transform_image_2d(
|
|
112
|
+
image=image,
|
|
113
|
+
matrices=transform,
|
|
114
|
+
interpolation='bicubic',
|
|
115
|
+
yx_matrices=True,
|
|
116
|
+
)
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### 3D Transformations
|
|
120
|
+
|
|
121
|
+
```python
|
|
122
|
+
import torch
|
|
123
|
+
from torch_transform_image import affine_transform_image_3d
|
|
124
|
+
from torch_affine_utils.transforms_3d import R, T, S # Rotation, Translation, Scale
|
|
125
|
+
|
|
126
|
+
# Create a test volume (64×64×64)
|
|
127
|
+
volume = torch.zeros((64, 64, 64), dtype=torch.float32)
|
|
128
|
+
volume[32, 32, 32] = 1 # Place a dot at the center
|
|
129
|
+
|
|
130
|
+
# Create a transformation matrix (translate coordinates 5 voxels in z direction)
|
|
131
|
+
translation = T([5, 0, 0]) # Uses [z, y, x] coordinate order matching dimensions (d, h, w)
|
|
132
|
+
|
|
133
|
+
# Apply the transformation
|
|
134
|
+
result = affine_transform_image_3d(
|
|
135
|
+
image=volume,
|
|
136
|
+
matrices=translation,
|
|
137
|
+
interpolation='trilinear', # Options: 'nearest', 'trilinear'
|
|
138
|
+
zyx_matrices=True, # The generated translations have [z, y, x] order
|
|
139
|
+
)
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
## How It Works
|
|
143
|
+
|
|
144
|
+
Under the hood, the package:
|
|
145
|
+
1. Creates a coordinate grid for the input image
|
|
146
|
+
2. Applies the transformation matrix to these coordinates
|
|
147
|
+
3. Samples the original image at the transformed coordinates using the specified interpolation method
|
|
148
|
+
|
|
149
|
+
All operations are performed in PyTorch, making them fully differentiable and GPU-compatible.
|
|
150
|
+
|
|
151
|
+
## License
|
|
152
|
+
|
|
153
|
+
This project is licensed under the BSD 3-Clause License - see the LICENSE file for details.
|
|
154
|
+
|
|
155
|
+
## Contributing
|
|
156
|
+
|
|
157
|
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
torch_transform_image/__init__.py,sha256=CvPvmi8gnRML8YXeBgFn5PCLu3hoFuYDBJ60yADGbhM,534
|
|
2
|
+
torch_transform_image/py.typed,sha256=esB4cHc6c07uVkGtqf8at7ttEnprwRxwk8obY8Qumq4,187
|
|
3
|
+
torch_transform_image/transforms_2d.py,sha256=GBQB4MRpMQTy_RG3M9-7zI7T7U_fGGCEVh23Z_8B1eg,1211
|
|
4
|
+
torch_transform_image/transforms_3d.py,sha256=qM20WNKGGDm_Yy1wCE0b2JfoDXV1Mk4G4YkRwbBi-W0,1227
|
|
5
|
+
torch_transform_image-0.0.2.dist-info/METADATA,sha256=-5g55-h433MtUWl-_3dg3blzD2BPAweX6vpHRKsWQ-s,5968
|
|
6
|
+
torch_transform_image-0.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
7
|
+
torch_transform_image-0.0.2.dist-info/licenses/LICENSE,sha256=Kbo_h3sPum8rDAhMerH9fl4hzFn-QUCekJf05zk2epY,1499
|
|
8
|
+
torch_transform_image-0.0.2.dist-info/RECORD,,
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
BSD 3-Clause License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2023, Alister Burt
|
|
4
|
+
|
|
5
|
+
Redistribution and use in source and binary forms, with or without
|
|
6
|
+
modification, are permitted provided that the following conditions are met:
|
|
7
|
+
|
|
8
|
+
1. Redistributions of source code must retain the above copyright notice, this
|
|
9
|
+
list of conditions and the following disclaimer.
|
|
10
|
+
|
|
11
|
+
2. Redistributions in binary form must reproduce the above copyright notice,
|
|
12
|
+
this list of conditions and the following disclaimer in the documentation
|
|
13
|
+
and/or other materials provided with the distribution.
|
|
14
|
+
|
|
15
|
+
3. Neither the name of the copyright holder nor the names of its
|
|
16
|
+
contributors may be used to endorse or promote products derived from
|
|
17
|
+
this software without specific prior written permission.
|
|
18
|
+
|
|
19
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
20
|
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
21
|
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
22
|
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
23
|
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
24
|
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
25
|
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
26
|
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
27
|
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
28
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|