kuva-reader 1.0.4__tar.gz → 1.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kuva-reader might be problematic. Click here for more details.
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/PKG-INFO +5 -7
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/README.md +2 -2
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/__init__.py +2 -8
- kuva_reader-1.1.1/kuva_reader/reader/image.py +28 -0
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/reader/level0.py +38 -51
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/reader/level1.py +27 -33
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/reader/level2.py +15 -17
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/reader/product_base.py +6 -7
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/pyproject.toml +3 -5
- kuva_reader-1.0.4/kuva_reader/reader/image.py +0 -203
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/py.typed +0 -0
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/reader/__init__.py +0 -0
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/reader/py.typed +0 -0
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/reader/read.py +0 -0
- {kuva_reader-1.0.4 → kuva_reader-1.1.1}/kuva_reader/reader/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: kuva-reader
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.1.1
|
|
4
4
|
Summary: Manipulate the Kuva Space image and metadata formats
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Guillem Ballesteros
|
|
@@ -11,14 +11,12 @@ Classifier: Programming Language :: Python :: 3
|
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
-
Requires-Dist: kuva-geometry
|
|
15
|
-
Requires-Dist: kuva-metadata
|
|
14
|
+
Requires-Dist: kuva-geometry (>=1.0.1,<2.0.0)
|
|
15
|
+
Requires-Dist: kuva-metadata (>=1.1.1,<2.0.0)
|
|
16
16
|
Requires-Dist: numpy (>=1.26.4,<2.0.0)
|
|
17
17
|
Requires-Dist: numpy-quaternion (>=2022.4.4,<2023.0.0)
|
|
18
18
|
Requires-Dist: pint (>=0.22,<0.23)
|
|
19
19
|
Requires-Dist: rasterio (>=1.4.1,<2.0.0)
|
|
20
|
-
Requires-Dist: rioxarray (>=0.12.4,<0.13.0)
|
|
21
|
-
Requires-Dist: xarray (>=2022.12.0,<2023.0.0)
|
|
22
20
|
Description-Content-Type: text/markdown
|
|
23
21
|
|
|
24
22
|
<div align="center">
|
|
@@ -36,7 +34,7 @@ The Kuva Space images are in GeoTIFF format. The products consist of an image or
|
|
|
36
34
|
images along with its metadata to give all the necessary information to use the products.
|
|
37
35
|
The metadata lives either in a Kuva Space database, or alternatively in a sidecar JSON file.
|
|
38
36
|
|
|
39
|
-
This library allows the reading of the image GeoTIFFs into `
|
|
37
|
+
This library allows the reading of the image GeoTIFFs into `rasterio.DatasetReader` objects that
|
|
40
38
|
allow convenient raster manipulations, along with their `kuva-metadata` metadata objects.
|
|
41
39
|
|
|
42
40
|
# Installation
|
|
@@ -56,7 +54,7 @@ pip install kuva-reader
|
|
|
56
54
|
This is a minimal example that allows you to read and print the image shape of a L2 product.
|
|
57
55
|
|
|
58
56
|
The result product is in this case an L2A product (as seen from the folder name).
|
|
59
|
-
The loaded product is stored in a `
|
|
57
|
+
The loaded product is stored in a `rasterio.DatasetReader` object, which contains extensive GIS functionalities [(examples for usage)](https://rasterio.readthedocs.io/en/stable/api/rasterio.io.html#rasterio.io.DatasetReader).
|
|
60
58
|
|
|
61
59
|
```python
|
|
62
60
|
from kuva_reader import read_product
|
|
@@ -13,7 +13,7 @@ The Kuva Space images are in GeoTIFF format. The products consist of an image or
|
|
|
13
13
|
images along with its metadata to give all the necessary information to use the products.
|
|
14
14
|
The metadata lives either in a Kuva Space database, or alternatively in a sidecar JSON file.
|
|
15
15
|
|
|
16
|
-
This library allows the reading of the image GeoTIFFs into `
|
|
16
|
+
This library allows the reading of the image GeoTIFFs into `rasterio.DatasetReader` objects that
|
|
17
17
|
allow convenient raster manipulations, along with their `kuva-metadata` metadata objects.
|
|
18
18
|
|
|
19
19
|
# Installation
|
|
@@ -33,7 +33,7 @@ pip install kuva-reader
|
|
|
33
33
|
This is a minimal example that allows you to read and print the image shape of a L2 product.
|
|
34
34
|
|
|
35
35
|
The result product is in this case an L2A product (as seen from the folder name).
|
|
36
|
-
The loaded product is stored in a `
|
|
36
|
+
The loaded product is stored in a `rasterio.DatasetReader` object, which contains extensive GIS functionalities [(examples for usage)](https://rasterio.readthedocs.io/en/stable/api/rasterio.io.html#rasterio.io.DatasetReader).
|
|
37
37
|
|
|
38
38
|
```python
|
|
39
39
|
from kuva_reader import read_product
|
|
@@ -17,17 +17,14 @@ Key Features
|
|
|
17
17
|
Dependencies
|
|
18
18
|
- kuva-metadata: A specialized library that handles the extraction and
|
|
19
19
|
parsing of metadata associated with Kuva Space products.
|
|
20
|
-
-
|
|
21
|
-
including
|
|
20
|
+
- rasterio: Used for loading image data as arrays with extra functionality,
|
|
21
|
+
including GIS specific functions and metadata, which are useful for analysis and
|
|
22
22
|
visualization.
|
|
23
23
|
"""
|
|
24
24
|
|
|
25
25
|
__version__ = "0.1.0"
|
|
26
26
|
|
|
27
27
|
from .reader.image import (
|
|
28
|
-
image_to_dtype_range,
|
|
29
|
-
image_to_original_range,
|
|
30
|
-
image_to_uint16_range,
|
|
31
28
|
image_footprint,
|
|
32
29
|
)
|
|
33
30
|
from .reader.level0 import Level0Product
|
|
@@ -40,9 +37,6 @@ __all__ = [
|
|
|
40
37
|
"Level1ABProduct",
|
|
41
38
|
"Level1CProduct",
|
|
42
39
|
"Level2AProduct",
|
|
43
|
-
"image_to_dtype_range",
|
|
44
|
-
"image_to_original_range",
|
|
45
|
-
"image_to_uint16_range",
|
|
46
40
|
"image_footprint",
|
|
47
41
|
"read_product",
|
|
48
42
|
]
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Utilities to process images related to product processing."""
|
|
2
|
+
|
|
3
|
+
import rasterio as rio
|
|
4
|
+
from shapely.geometry import box, Polygon
|
|
5
|
+
from rasterio.warp import transform_bounds
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def image_footprint(image: rio.DatasetReader, crs: str = "") -> Polygon:
|
|
9
|
+
"""Return a product footprint as a shapely polygon
|
|
10
|
+
|
|
11
|
+
Parameters
|
|
12
|
+
----------
|
|
13
|
+
image
|
|
14
|
+
The product image
|
|
15
|
+
crs, optional
|
|
16
|
+
CRS to convert to, by default "", keeping the image's CRS
|
|
17
|
+
|
|
18
|
+
Returns
|
|
19
|
+
-------
|
|
20
|
+
A shapely polygon footprint
|
|
21
|
+
"""
|
|
22
|
+
if crs:
|
|
23
|
+
# Transform the bounds to the new CRS using rasterio's built-in function
|
|
24
|
+
bounds = transform_bounds(image.crs, crs, *image.bounds)
|
|
25
|
+
footprint = box(*bounds)
|
|
26
|
+
else:
|
|
27
|
+
footprint = box(*image.bounds)
|
|
28
|
+
return footprint
|
|
@@ -2,13 +2,12 @@ from pathlib import Path
|
|
|
2
2
|
from typing import cast
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
|
-
import
|
|
6
|
-
import xarray
|
|
5
|
+
import rasterio as rio
|
|
7
6
|
from kuva_metadata import MetadataLevel0
|
|
8
7
|
from pint import UnitRegistry
|
|
9
8
|
from shapely import Polygon
|
|
10
9
|
|
|
11
|
-
from kuva_reader import
|
|
10
|
+
from kuva_reader import image_footprint
|
|
12
11
|
|
|
13
12
|
from .product_base import ProductBase
|
|
14
13
|
|
|
@@ -39,13 +38,6 @@ class Level0Product(ProductBase[MetadataLevel0]):
|
|
|
39
38
|
target_ureg, optional
|
|
40
39
|
Pint Unit Registry to swap to. This is only relevant when parsing data from a
|
|
41
40
|
JSON file, which by default uses the kuva-metadata ureg.
|
|
42
|
-
as_physical_unit
|
|
43
|
-
Whether to denormalize data from full data type range back to the physical
|
|
44
|
-
units stored with the data, by default False
|
|
45
|
-
target_dtype
|
|
46
|
-
Target data type to normalize data to. This will first denormalize the data
|
|
47
|
-
to its original range and then normalize to new data type range to keep a
|
|
48
|
-
scale and offset, by default None
|
|
49
41
|
|
|
50
42
|
Attributes
|
|
51
43
|
----------
|
|
@@ -53,10 +45,9 @@ class Level0Product(ProductBase[MetadataLevel0]):
|
|
|
53
45
|
Path to the folder containing the images.
|
|
54
46
|
metadata: MetadataLevel0
|
|
55
47
|
The metadata associated with the images
|
|
56
|
-
images: Dict[str,
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
can be retrieved like so: `ds.rio.get_gcps()`
|
|
48
|
+
images: Dict[str, rasterio.DatasetReader]
|
|
49
|
+
A dictionary that maps camera names to their respective Rasterio DatasetReader
|
|
50
|
+
objects.
|
|
60
51
|
data_tags: Dict[str, Any]
|
|
61
52
|
Tags stored along with the data. These can be used e.g. to check the physical
|
|
62
53
|
units of pixels or normalisation factors.
|
|
@@ -67,55 +58,42 @@ class Level0Product(ProductBase[MetadataLevel0]):
|
|
|
67
58
|
image_path: Path,
|
|
68
59
|
metadata: MetadataLevel0 | None = None,
|
|
69
60
|
target_ureg: UnitRegistry | None = None,
|
|
70
|
-
as_physical_unit: bool = False,
|
|
71
|
-
target_dtype: np.dtype | None = None,
|
|
72
61
|
) -> None:
|
|
73
62
|
super().__init__(image_path, metadata, target_ureg)
|
|
74
63
|
|
|
75
64
|
self.images = {
|
|
76
65
|
camera: cast(
|
|
77
|
-
|
|
78
|
-
|
|
66
|
+
rio.DatasetReader,
|
|
67
|
+
rio.open(
|
|
79
68
|
self.image_path / (cube.camera.name + ".tif"),
|
|
80
69
|
),
|
|
81
70
|
)
|
|
82
71
|
for camera, cube in self.metadata.image.data_cubes.items() # type: ignore
|
|
83
72
|
}
|
|
84
|
-
self.crs = self.images[list(self.images.keys())[0]].
|
|
73
|
+
self.crs = self.images[list(self.images.keys())[0]].crs
|
|
85
74
|
|
|
86
75
|
# Read tags for images and denormalize / renormalize if needed
|
|
87
|
-
self.data_tags = {camera:
|
|
88
|
-
if as_physical_unit or target_dtype:
|
|
89
|
-
for camera, img in self.images.items():
|
|
90
|
-
# Move from normalized full scale back to original data float values.
|
|
91
|
-
# pop() since values not true anymore after denormalization.
|
|
92
|
-
norm_img = image_to_original_range(
|
|
93
|
-
img,
|
|
94
|
-
self.data_tags[camera].pop("data_offset"),
|
|
95
|
-
self.data_tags[camera].pop("data_scale"),
|
|
96
|
-
)
|
|
97
|
-
self.images[camera] = norm_img
|
|
98
|
-
|
|
99
|
-
if target_dtype:
|
|
100
|
-
# For algorithm needs, cast and normalize to a specific dtype range
|
|
101
|
-
# NOTE: This may remove data precision e.g. uint16 -> uint8
|
|
102
|
-
norm_img, offset, scale = image_to_dtype_range(img, target_dtype)
|
|
103
|
-
self.data_tags[camera]["data_offset"] = offset
|
|
104
|
-
self.data_tags[camera]["data_scale"] = scale
|
|
76
|
+
self.data_tags = {camera: src.tags() for camera, src in self.images.items()}
|
|
105
77
|
|
|
106
78
|
def __repr__(self):
|
|
107
79
|
"""Pretty printing of the object with the most important info"""
|
|
108
80
|
if self.images is not None and len(self.images):
|
|
81
|
+
image_shapes = []
|
|
82
|
+
for camera_name, image in self.images.items():
|
|
83
|
+
shape_str = f"({image.count}, {image.height}, {image.width})"
|
|
84
|
+
image_shapes.append(f"{camera_name.upper()} shape {shape_str}")
|
|
85
|
+
|
|
86
|
+
shapes_description = " and ".join(image_shapes)
|
|
87
|
+
|
|
109
88
|
return (
|
|
110
|
-
f"{self.__class__.__name__}"
|
|
111
|
-
f"with
|
|
112
|
-
f"
|
|
113
|
-
f"(CRS '{self.crs}'). Loaded from: '{self.image_path}'."
|
|
89
|
+
f"{self.__class__.__name__} "
|
|
90
|
+
f"with {shapes_description} and "
|
|
91
|
+
f"CRS: '{self.crs}'. Loaded from: '{self.image_path}'."
|
|
114
92
|
)
|
|
115
93
|
else:
|
|
116
94
|
return f"{self.__class__.__name__} loaded from '{self.image_path}'."
|
|
117
95
|
|
|
118
|
-
def __getitem__(self, camera: str) ->
|
|
96
|
+
def __getitem__(self, camera: str) -> rio.DatasetReader:
|
|
119
97
|
"""Return the datarray for the chosen camera."""
|
|
120
98
|
return self.images[camera]
|
|
121
99
|
|
|
@@ -192,7 +170,11 @@ class Level0Product(ProductBase[MetadataLevel0]):
|
|
|
192
170
|
def read_frame(self, cube: str, band_id: int, frame_idx: int) -> np.ndarray:
|
|
193
171
|
"""Extract a specific frame from a cube and band."""
|
|
194
172
|
frame_offset = self.calculate_frame_offset(cube, band_id, frame_idx)
|
|
195
|
-
|
|
173
|
+
|
|
174
|
+
# Rasterio index starts at 1
|
|
175
|
+
frame_offset += 1
|
|
176
|
+
|
|
177
|
+
return self[cube].read(frame_offset)
|
|
196
178
|
|
|
197
179
|
def read_band(self, cube: str, band_id: int) -> np.ndarray:
|
|
198
180
|
"""Extract a specific band from a cube"""
|
|
@@ -201,7 +183,12 @@ class Level0Product(ProductBase[MetadataLevel0]):
|
|
|
201
183
|
# Calculate the final frame offset for this band and frame
|
|
202
184
|
band_offset_ll = band_offsets[band_id]
|
|
203
185
|
band_offset_ul = band_offset_ll + band_n_frames[band_id]
|
|
204
|
-
|
|
186
|
+
|
|
187
|
+
# Rasterio index starts at 1
|
|
188
|
+
band_offset_ll += 1
|
|
189
|
+
band_offset_ul += 1
|
|
190
|
+
|
|
191
|
+
return self[cube].read(list(np.arange(band_offset_ll, band_offset_ul)))
|
|
205
192
|
|
|
206
193
|
def read_data_units(self) -> np.ndarray:
|
|
207
194
|
"""Read unit of product and validate they match between cameras"""
|
|
@@ -213,7 +200,7 @@ class Level0Product(ProductBase[MetadataLevel0]):
|
|
|
213
200
|
e_ = "Cameras have different physical units stored to them."
|
|
214
201
|
raise ValueError(e_)
|
|
215
202
|
|
|
216
|
-
def get_bad_pixel_mask(self, camera: str | None = None) ->
|
|
203
|
+
def get_bad_pixel_mask(self, camera: str | None = None) -> rio.DatasetReader:
|
|
217
204
|
"""Get the bad pixel mask associated to each camera of the L0 product
|
|
218
205
|
|
|
219
206
|
Returns
|
|
@@ -226,7 +213,7 @@ class Level0Product(ProductBase[MetadataLevel0]):
|
|
|
226
213
|
bad_pixel_filename = f"{camera}_per_frame_bad_pixel_mask.tif"
|
|
227
214
|
return self._read_array(self.image_path / bad_pixel_filename)
|
|
228
215
|
|
|
229
|
-
def get_cloud_mask(self, camera: str | None = None) ->
|
|
216
|
+
def get_cloud_mask(self, camera: str | None = None) -> rio.DatasetReader:
|
|
230
217
|
"""Get the cloud mask associated to the product.
|
|
231
218
|
|
|
232
219
|
Returns
|
|
@@ -240,12 +227,12 @@ class Level0Product(ProductBase[MetadataLevel0]):
|
|
|
240
227
|
return self._read_array(self.image_path / bad_pixel_filename)
|
|
241
228
|
|
|
242
229
|
def release_memory(self):
|
|
243
|
-
"""Explicitely
|
|
244
|
-
|
|
245
|
-
NOTE: this function is implemented because of a memory leak inside the Rioxarray
|
|
246
|
-
library that doesn't release memory properly. Only use it when the image data is
|
|
247
|
-
not needed anymore.
|
|
230
|
+
"""Explicitely closes the Rasterio DatasetReaders and releases the memory of
|
|
231
|
+
the `images` variable.
|
|
248
232
|
"""
|
|
233
|
+
for k in self.images.keys():
|
|
234
|
+
self.images[k].close()
|
|
235
|
+
|
|
249
236
|
del self.images
|
|
250
237
|
self.images = None
|
|
251
238
|
|
|
@@ -1,13 +1,12 @@
|
|
|
1
1
|
from pathlib import Path
|
|
2
2
|
from typing import cast
|
|
3
3
|
|
|
4
|
-
import
|
|
5
|
-
import xarray
|
|
6
|
-
from kuva_reader import image_footprint
|
|
4
|
+
import rasterio as rio
|
|
7
5
|
from kuva_metadata import MetadataLevel1AB, MetadataLevel1C
|
|
8
6
|
from pint import UnitRegistry
|
|
9
7
|
from shapely import Polygon
|
|
10
|
-
|
|
8
|
+
|
|
9
|
+
from kuva_reader import image_footprint
|
|
11
10
|
|
|
12
11
|
from .product_base import ProductBase
|
|
13
12
|
|
|
@@ -36,10 +35,8 @@ class Level1ABProduct(ProductBase[MetadataLevel1AB]):
|
|
|
36
35
|
Path to the folder containing the image.
|
|
37
36
|
metadata: MetadataLevel1AB
|
|
38
37
|
The metadata associated with the images
|
|
39
|
-
image:
|
|
40
|
-
The
|
|
41
|
-
them so lots of GIS functionality are available on them. For example, the GCPs
|
|
42
|
-
if any could be retrieved like so: `ds.rio.get_gcps()`
|
|
38
|
+
image: rasterio.DatasetReader
|
|
39
|
+
The Rasterio DatasetReader to open the image and other metadata with.
|
|
43
40
|
data_tags: dict
|
|
44
41
|
Tags saved along with the product. The tag "data_unit" shows what the unit of
|
|
45
42
|
the product actually is.
|
|
@@ -54,20 +51,22 @@ class Level1ABProduct(ProductBase[MetadataLevel1AB]):
|
|
|
54
51
|
super().__init__(image_path, metadata, target_ureg)
|
|
55
52
|
|
|
56
53
|
self.image = cast(
|
|
57
|
-
|
|
58
|
-
|
|
54
|
+
rio.DatasetReader,
|
|
55
|
+
rio.open(self.image_path / "L1B.tif"),
|
|
59
56
|
)
|
|
60
|
-
|
|
57
|
+
|
|
58
|
+
self.data_tags = self.image.tags()
|
|
61
59
|
self.wavelengths = [
|
|
62
60
|
b.wavelength.to("nm").magnitude for b in self.metadata.image.bands
|
|
63
61
|
]
|
|
64
|
-
self.crs = self.image.
|
|
62
|
+
self.crs = self.image.crs
|
|
65
63
|
|
|
66
64
|
def __repr__(self):
|
|
67
65
|
"""Pretty printing of the object with the most important info"""
|
|
68
66
|
if self.image is not None:
|
|
67
|
+
shape_str = f"({self.image.count}, {self.image.height}, {self.image.width})"
|
|
69
68
|
return (
|
|
70
|
-
f"{self.__class__.__name__} with shape {
|
|
69
|
+
f"{self.__class__.__name__} with shape {shape_str} "
|
|
71
70
|
f"and wavelengths {self.wavelengths} (CRS: '{self.crs}'). "
|
|
72
71
|
f"Loaded from: '{self.image_path}'."
|
|
73
72
|
)
|
|
@@ -110,7 +109,7 @@ class Level1ABProduct(ProductBase[MetadataLevel1AB]):
|
|
|
110
109
|
|
|
111
110
|
def get_bad_pixel_mask(
|
|
112
111
|
self, camera: str | None = None, per_band: bool = False
|
|
113
|
-
) ->
|
|
112
|
+
) -> rio.DatasetReader:
|
|
114
113
|
"""Get the bad pixel mask associated to each camera of the L0 product
|
|
115
114
|
Returns
|
|
116
115
|
-------
|
|
@@ -128,11 +127,8 @@ class Level1ABProduct(ProductBase[MetadataLevel1AB]):
|
|
|
128
127
|
return self._read_array(self.image_path / bad_pixel_filename)
|
|
129
128
|
|
|
130
129
|
def release_memory(self):
|
|
131
|
-
"""Explicitely
|
|
132
|
-
|
|
133
|
-
NOTE: this function is implemented because of a memory leak inside the Rioxarray
|
|
134
|
-
library that doesn't release memory properly. Only use it when the image data is
|
|
135
|
-
not needed anymore.
|
|
130
|
+
"""Explicitely closes the Rasterio DatasetReader and releases the memory of
|
|
131
|
+
the `image` variable.
|
|
136
132
|
"""
|
|
137
133
|
del self.image
|
|
138
134
|
self.image = None
|
|
@@ -159,10 +155,8 @@ class Level1CProduct(ProductBase[MetadataLevel1C]):
|
|
|
159
155
|
Path to the folder containing the image.
|
|
160
156
|
metadata: MetadataLevel1C
|
|
161
157
|
The metadata associated with the images
|
|
162
|
-
image:
|
|
163
|
-
The
|
|
164
|
-
them so lots of GIS functionality are available on them. For example, the GCPs
|
|
165
|
-
if any could be retrieved like so: `ds.rio.get_gcps()`
|
|
158
|
+
image: rio.DatasetReader
|
|
159
|
+
The Rasterio DatasetReader to open the image and other metadata with.
|
|
166
160
|
data_tags: dict
|
|
167
161
|
Tags saved along with the product. The tag "data_unit" shows what the unit of
|
|
168
162
|
the product actually is.
|
|
@@ -177,20 +171,22 @@ class Level1CProduct(ProductBase[MetadataLevel1C]):
|
|
|
177
171
|
super().__init__(image_path, metadata, target_ureg)
|
|
178
172
|
|
|
179
173
|
self.image = cast(
|
|
180
|
-
|
|
181
|
-
|
|
174
|
+
rio.DatasetReader,
|
|
175
|
+
rio.open(self.image_path / "L1C.tif"),
|
|
182
176
|
)
|
|
183
|
-
self.data_tags = self.image.
|
|
177
|
+
self.data_tags = self.image.tags()
|
|
178
|
+
|
|
184
179
|
self.wavelengths = [
|
|
185
180
|
b.wavelength.to("nm").magnitude for b in self.metadata.image.bands
|
|
186
181
|
]
|
|
187
|
-
self.crs = self.image.
|
|
182
|
+
self.crs = self.image.crs
|
|
188
183
|
|
|
189
184
|
def __repr__(self):
|
|
190
185
|
"""Pretty printing of the object with the most important info"""
|
|
191
186
|
if self.image is not None:
|
|
187
|
+
shape_str = f"({self.image.count}, {self.image.height}, {self.image.width})"
|
|
192
188
|
return (
|
|
193
|
-
f"{self.__class__.__name__} with shape {
|
|
189
|
+
f"{self.__class__.__name__} with shape {shape_str} "
|
|
194
190
|
f"and wavelengths {self.wavelengths} (CRS: '{self.crs}'). "
|
|
195
191
|
f"Loaded from: '{self.image_path}'."
|
|
196
192
|
)
|
|
@@ -232,12 +228,10 @@ class Level1CProduct(ProductBase[MetadataLevel1C]):
|
|
|
232
228
|
return metadata
|
|
233
229
|
|
|
234
230
|
def release_memory(self):
|
|
235
|
-
"""Explicitely
|
|
236
|
-
|
|
237
|
-
NOTE: this function is implemented because of a memory leak inside the Rioxarray
|
|
238
|
-
library that doesn't release memory properly. Only use it when the image data is
|
|
239
|
-
not needed anymore.
|
|
231
|
+
"""Explicitely closes the Rasterio DatasetReader and releases the memory of
|
|
232
|
+
the `image` variable.
|
|
240
233
|
"""
|
|
234
|
+
self.image.close()
|
|
241
235
|
del self.image
|
|
242
236
|
self.image = None
|
|
243
237
|
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
from pathlib import Path
|
|
2
2
|
from typing import cast
|
|
3
3
|
|
|
4
|
-
import
|
|
5
|
-
from kuva_reader import image_footprint
|
|
4
|
+
import rasterio as rio
|
|
6
5
|
from kuva_metadata import MetadataLevel2A
|
|
7
6
|
from pint import UnitRegistry
|
|
8
7
|
from shapely import Polygon
|
|
9
|
-
|
|
8
|
+
|
|
9
|
+
from kuva_reader import image_footprint
|
|
10
10
|
|
|
11
11
|
from .product_base import ProductBase
|
|
12
12
|
|
|
@@ -32,10 +32,8 @@ class Level2AProduct(ProductBase[MetadataLevel2A]):
|
|
|
32
32
|
Path to the folder containing the image.
|
|
33
33
|
metadata: MetadataLevel2A
|
|
34
34
|
The metadata associated with the images
|
|
35
|
-
image:
|
|
36
|
-
The
|
|
37
|
-
them so lots of GIS functionality are available on them. For example, the GCPs
|
|
38
|
-
if any could be retrieved like so: `ds.rio.get_gcps()`
|
|
35
|
+
image: rasterio.DatasetReader
|
|
36
|
+
The Rasterio DatasetReader to open the image and other metadata with.
|
|
39
37
|
data_tags: dict
|
|
40
38
|
Tags saved along with the product. The tag "data_unit" shows what the unit of
|
|
41
39
|
the product actually is.
|
|
@@ -50,20 +48,22 @@ class Level2AProduct(ProductBase[MetadataLevel2A]):
|
|
|
50
48
|
super().__init__(image_path, metadata, target_ureg)
|
|
51
49
|
|
|
52
50
|
self.image = cast(
|
|
53
|
-
|
|
54
|
-
|
|
51
|
+
rio.DatasetReader,
|
|
52
|
+
rio.open(self.image_path / "L2A.tif"),
|
|
55
53
|
)
|
|
56
|
-
self.data_tags = self.image.
|
|
54
|
+
self.data_tags = self.image.tags()
|
|
55
|
+
|
|
57
56
|
self.wavelengths = [
|
|
58
57
|
b.wavelength.to("nm").magnitude for b in self.metadata.image.bands
|
|
59
58
|
]
|
|
60
|
-
self.crs = self.image.
|
|
59
|
+
self.crs = self.image.crs
|
|
61
60
|
|
|
62
61
|
def __repr__(self):
|
|
63
62
|
"""Pretty printing of the object with the most important info"""
|
|
64
63
|
if self.image is not None:
|
|
64
|
+
shape_str = f"({self.image.count}, {self.image.height}, {self.image.width})"
|
|
65
65
|
return (
|
|
66
|
-
f"{self.__class__.__name__} with shape {
|
|
66
|
+
f"{self.__class__.__name__} with shape {shape_str} "
|
|
67
67
|
f"and wavelengths {self.wavelengths} (CRS: '{self.crs}'). "
|
|
68
68
|
f"Loaded from: '{self.image_path}'."
|
|
69
69
|
)
|
|
@@ -105,12 +105,10 @@ class Level2AProduct(ProductBase[MetadataLevel2A]):
|
|
|
105
105
|
return metadata
|
|
106
106
|
|
|
107
107
|
def release_memory(self):
|
|
108
|
-
"""Explicitely
|
|
109
|
-
|
|
110
|
-
NOTE: this function is implemented because of a memory leak inside the Rioxarray
|
|
111
|
-
library that doesn't release memory properly. Only use it when the image data is
|
|
112
|
-
not needed anymore.
|
|
108
|
+
"""Explicitely closes the Rasterio DatasetReader and releases the memory of
|
|
109
|
+
the `image` variable.
|
|
113
110
|
"""
|
|
111
|
+
self.image.close()
|
|
114
112
|
del self.image
|
|
115
113
|
self.image = None
|
|
116
114
|
|
|
@@ -2,11 +2,10 @@ from abc import ABCMeta, abstractmethod
|
|
|
2
2
|
from pathlib import Path
|
|
3
3
|
from typing import Generic, TypeVar, cast
|
|
4
4
|
|
|
5
|
-
import
|
|
5
|
+
import rasterio as rio
|
|
6
6
|
from kuva_metadata.sections_common import MetadataBase
|
|
7
7
|
from pint import UnitRegistry
|
|
8
8
|
from pydantic import BaseModel
|
|
9
|
-
from xarray import Dataset
|
|
10
9
|
|
|
11
10
|
TMetadata = TypeVar("TMetadata", bound=BaseModel)
|
|
12
11
|
|
|
@@ -66,17 +65,17 @@ class ProductBase(Generic[TMetadata], metaclass=ABCMeta):
|
|
|
66
65
|
pass
|
|
67
66
|
|
|
68
67
|
@staticmethod
|
|
69
|
-
def _read_array(array_path: Path) ->
|
|
68
|
+
def _read_array(array_path: Path) -> rio.DatasetReader:
|
|
70
69
|
if array_path.exists():
|
|
71
70
|
return cast(
|
|
72
|
-
|
|
73
|
-
|
|
71
|
+
rio.DatasetReader,
|
|
72
|
+
rio.open(array_path),
|
|
74
73
|
)
|
|
75
74
|
else:
|
|
76
75
|
e_ = f"Product does not contain the array to be read at '{array_path}'"
|
|
77
76
|
raise ValueError(e_)
|
|
78
77
|
|
|
79
|
-
def get_bad_pixel_mask(self, camera: str | None = None) ->
|
|
78
|
+
def get_bad_pixel_mask(self, camera: str | None = None) -> rio.DatasetReader:
|
|
80
79
|
"""Get the bad pixel mask associated to the product.
|
|
81
80
|
|
|
82
81
|
Parameters
|
|
@@ -94,7 +93,7 @@ class ProductBase(Generic[TMetadata], metaclass=ABCMeta):
|
|
|
94
93
|
raise ValueError(e_)
|
|
95
94
|
return self._read_array(self.image_path / "bad_pixel_mask_aggregated.tif")
|
|
96
95
|
|
|
97
|
-
def get_cloud_mask(self, camera: str | None = None) ->
|
|
96
|
+
def get_cloud_mask(self, camera: str | None = None) -> rio.DatasetReader:
|
|
98
97
|
"""Get the cloud mask associated to the product.
|
|
99
98
|
|
|
100
99
|
Parameters
|
|
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
|
|
|
4
4
|
|
|
5
5
|
[tool.poetry]
|
|
6
6
|
name = "kuva-reader"
|
|
7
|
-
version = "1.
|
|
7
|
+
version = "1.1.1"
|
|
8
8
|
description = "Manipulate the Kuva Space image and metadata formats"
|
|
9
9
|
authors = ["Guillem Ballesteros <guillem@kuvaspace.com>" , "Lennert Antson <lennert.antson@kuvaspace.com>", "Arthur Vandenhoeke <arthur.vandenhoeke@kuvaspace.com>", "Olli Eloranta <olli.eloranta@kuvaspace.com>"]
|
|
10
10
|
readme = "README.md"
|
|
@@ -28,10 +28,8 @@ numpy = "^1.26.4"
|
|
|
28
28
|
numpy-quaternion = "^2022.4.4"
|
|
29
29
|
pint = "^0.22"
|
|
30
30
|
rasterio = "^1.4.1"
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
kuva-geometry = "*"
|
|
34
|
-
kuva-metadata = "*"
|
|
31
|
+
kuva-geometry = "^1.0.1"
|
|
32
|
+
kuva-metadata = "^1.1.1"
|
|
35
33
|
|
|
36
34
|
[tool.ruff.lint]
|
|
37
35
|
select = [ "E", "F", "A", "DTZ", "NPY", "I", "ISC", "B003", "B004", "B015", "PTH", "D100", "D101", "D102", "D103", "D104", "D105", "D200", "W191", "W291", "W293", "N801", "N804", "N805", "T100", "S105", "S106", "S108", "S604", "S602", "S609", "UP003", "UP005", "UP006", "UP007", "UP008", "UP032", "UP035", "RUF001", "RUF200", "RUF013", "C901", "COM818", "RSE102", "EM101",]
|
|
@@ -1,203 +0,0 @@
|
|
|
1
|
-
"""Utilities to process images related to product processing."""
|
|
2
|
-
|
|
3
|
-
from typing import cast, overload
|
|
4
|
-
|
|
5
|
-
import numpy as np
|
|
6
|
-
import xarray
|
|
7
|
-
from shapely.geometry import box
|
|
8
|
-
from pyproj import Transformer
|
|
9
|
-
from shapely import Polygon
|
|
10
|
-
|
|
11
|
-
# Helper type for image processing purposes. The same operations work both for EO
|
|
12
|
-
# DataArrays and Numpy arrays.
|
|
13
|
-
ImageArray_ = np.ndarray | xarray.DataArray
|
|
14
|
-
|
|
15
|
-
def image_footprint(image: xarray.DataArray, crs: str = "") -> Polygon:
|
|
16
|
-
"""Return a product footprint as a shapely polygon
|
|
17
|
-
|
|
18
|
-
Parameters
|
|
19
|
-
----------
|
|
20
|
-
image
|
|
21
|
-
The product image
|
|
22
|
-
crs, optional
|
|
23
|
-
CRS to convert to, by default "", keeping the image's CRS
|
|
24
|
-
|
|
25
|
-
Returns
|
|
26
|
-
-------
|
|
27
|
-
A shapely polygon footprint
|
|
28
|
-
"""
|
|
29
|
-
if crs:
|
|
30
|
-
transformer = Transformer.from_crs(image.rio.crs, crs, always_xy=True)
|
|
31
|
-
bounds = image.rio.bounds()
|
|
32
|
-
minx, miny = transformer.transform(bounds[0], bounds[1])
|
|
33
|
-
maxx, maxy = transformer.transform(bounds[2], bounds[3])
|
|
34
|
-
footprint = box(minx, miny, maxx, maxy)
|
|
35
|
-
else:
|
|
36
|
-
footprint = box(*image.rio.bounds())
|
|
37
|
-
return footprint
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
@overload
|
|
41
|
-
def image_to_dtype_range(
|
|
42
|
-
img: np.ndarray,
|
|
43
|
-
dtype: np.dtype,
|
|
44
|
-
offset: float | None = None,
|
|
45
|
-
scale: float | None = None,
|
|
46
|
-
) -> tuple[xarray.DataArray, float, float]: ...
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
@overload
|
|
50
|
-
def image_to_dtype_range(
|
|
51
|
-
img: xarray.DataArray,
|
|
52
|
-
dtype: np.dtype,
|
|
53
|
-
offset: float | None = None,
|
|
54
|
-
scale: float | None = None,
|
|
55
|
-
) -> tuple[xarray.DataArray, float, float]: ...
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
def image_to_dtype_range(
|
|
59
|
-
img: ImageArray_,
|
|
60
|
-
dtype: np.dtype,
|
|
61
|
-
offset: float | None = None,
|
|
62
|
-
scale: float | None = None,
|
|
63
|
-
) -> tuple[ImageArray_, float, float]:
|
|
64
|
-
"""Normalize an image to the bounds of whatever numpy datatype. E.g. np.uint16
|
|
65
|
-
results in a np.uint16 image with values between entire range [0, 65535]
|
|
66
|
-
|
|
67
|
-
Parameters
|
|
68
|
-
----------
|
|
69
|
-
img
|
|
70
|
-
Image to normalize
|
|
71
|
-
dtype
|
|
72
|
-
Target data type, only integer subtypes currently sensible and are supported
|
|
73
|
-
offset, optional
|
|
74
|
-
Offset if that was already precomputed. If not, it will be calculated from `arr`
|
|
75
|
-
scale, optional
|
|
76
|
-
Scale if that was already precomputed. If not, it will be calculated from `arr`
|
|
77
|
-
|
|
78
|
-
Returns
|
|
79
|
-
-------
|
|
80
|
-
The normalized image along casted to given data type, along with the offset and
|
|
81
|
-
scale used to normalize it
|
|
82
|
-
|
|
83
|
-
Raises
|
|
84
|
-
------
|
|
85
|
-
ValueError
|
|
86
|
-
Unsupported data type
|
|
87
|
-
"""
|
|
88
|
-
if np.issubdtype(dtype, np.integer):
|
|
89
|
-
type_info = np.iinfo(dtype)
|
|
90
|
-
else:
|
|
91
|
-
e_ = f"Unsupported dtype {dtype} for normalization"
|
|
92
|
-
raise ValueError(e_)
|
|
93
|
-
|
|
94
|
-
dtype_min = type_info.min
|
|
95
|
-
dtype_max = type_info.max
|
|
96
|
-
|
|
97
|
-
if offset is None or scale is None:
|
|
98
|
-
offset_ = cast(float, np.min(img))
|
|
99
|
-
scale_ = cast(float, np.max(img) - offset_)
|
|
100
|
-
else:
|
|
101
|
-
offset_ = offset
|
|
102
|
-
scale_ = scale
|
|
103
|
-
|
|
104
|
-
normed_to_0_1 = (img - offset_) / scale_
|
|
105
|
-
|
|
106
|
-
normalized_image = normed_to_0_1 * (dtype_max - dtype_min) + dtype_min
|
|
107
|
-
normalized_image = normalized_image.astype(dtype)
|
|
108
|
-
|
|
109
|
-
return normalized_image, offset_, scale_
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
@overload
|
|
113
|
-
def image_to_uint16_range(img: np.ndarray) -> tuple[np.ndarray, float, float]: ...
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
@overload
|
|
117
|
-
def image_to_uint16_range(
|
|
118
|
-
img: xarray.DataArray,
|
|
119
|
-
) -> tuple[xarray.DataArray, float, float]: ...
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
def image_to_uint16_range(img: ImageArray_) -> tuple[ImageArray_, float, float]:
|
|
123
|
-
"""Normalise image to bounds of uint16, see above function for details
|
|
124
|
-
|
|
125
|
-
Parameters
|
|
126
|
-
----------
|
|
127
|
-
img
|
|
128
|
-
Image to normalize
|
|
129
|
-
|
|
130
|
-
Returns
|
|
131
|
-
-------
|
|
132
|
-
The normalized image along casted to given data type, along with the offset and
|
|
133
|
-
scale used to normalize it
|
|
134
|
-
"""
|
|
135
|
-
return image_to_dtype_range(img, np.dtype(np.uint16))
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
@overload
|
|
139
|
-
def image_to_original_range(
|
|
140
|
-
img: np.ndarray,
|
|
141
|
-
offset: float,
|
|
142
|
-
scale: float,
|
|
143
|
-
dtype: np.dtype | None = None,
|
|
144
|
-
) -> xarray.DataArray: ...
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
@overload
|
|
148
|
-
def image_to_original_range(
|
|
149
|
-
img: xarray.DataArray,
|
|
150
|
-
offset: float,
|
|
151
|
-
scale: float,
|
|
152
|
-
dtype: np.dtype | None = None,
|
|
153
|
-
) -> xarray.DataArray: ...
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def image_to_original_range(
|
|
157
|
-
img: ImageArray_,
|
|
158
|
-
offset: float,
|
|
159
|
-
scale: float,
|
|
160
|
-
dtype: np.dtype | None = None,
|
|
161
|
-
) -> ImageArray_:
|
|
162
|
-
"""Revert normalisation applied to an image. The image 'arr' must have the same
|
|
163
|
-
data type as the result from normalization, or it must be given separately
|
|
164
|
-
|
|
165
|
-
Parameters
|
|
166
|
-
----------
|
|
167
|
-
arr
|
|
168
|
-
Image to revert back to original values
|
|
169
|
-
offset
|
|
170
|
-
Offset that was applied to the image
|
|
171
|
-
scale
|
|
172
|
-
Scale that was applied to the image
|
|
173
|
-
dtype, optional
|
|
174
|
-
The data type that the image was casted to during normalization, by default None
|
|
175
|
-
where the data type of `arr` will be assumed to be correct.
|
|
176
|
-
|
|
177
|
-
Returns
|
|
178
|
-
-------
|
|
179
|
-
Image that is back in original range of values before normalization
|
|
180
|
-
|
|
181
|
-
Raises
|
|
182
|
-
------
|
|
183
|
-
ValueError
|
|
184
|
-
Unsupported data type
|
|
185
|
-
"""
|
|
186
|
-
if not dtype:
|
|
187
|
-
dtype = img.dtype
|
|
188
|
-
|
|
189
|
-
# Check real bounds from numpy data types
|
|
190
|
-
if np.issubdtype(dtype, np.integer) and isinstance(dtype, np.dtype):
|
|
191
|
-
type_info = np.iinfo(dtype)
|
|
192
|
-
else:
|
|
193
|
-
e_ = f"Unsupported dtype {dtype} for normalization"
|
|
194
|
-
raise ValueError(e_)
|
|
195
|
-
|
|
196
|
-
dtype_min = type_info.min
|
|
197
|
-
dtype_max = type_info.max
|
|
198
|
-
|
|
199
|
-
# Reverse the normalization
|
|
200
|
-
denormed_to_0_1 = (img - dtype_min) / (dtype_max - dtype_min)
|
|
201
|
-
original_image = denormed_to_0_1 * scale + offset
|
|
202
|
-
|
|
203
|
-
return original_image
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|