processinator 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,20 @@
1
+ Metadata-Version: 2.3
2
+ Name: processinator
3
+ Version: 0.1.0
4
+ Summary: Astronomy image processing library
5
+ Author: Steven
6
+ Author-email: Steven <erewhon@flatland.org>
7
+ Requires-Dist: astropy>=7.0
8
+ Requires-Dist: numpy>=1.26
9
+ Requires-Dist: pillow>=11.0
10
+ Requires-Dist: pytest>=8.0 ; extra == 'dev'
11
+ Requires-Dist: ruff>=0.9 ; extra == 'dev'
12
+ Requires-Python: >=3.12
13
+ Provides-Extra: dev
14
+ Description-Content-Type: text/markdown
15
+
16
+ # processinator
17
+
18
+ Much photo. So process. Wow!
19
+
20
+ This is mostly a library for processing astrophotographs automatically. It has some of its own algorithms, but it will also leverage other tools, including Siril and Seti Astro Pro.
@@ -0,0 +1,5 @@
1
+ # processinator
2
+
3
+ Much photo. So process. Wow!
4
+
5
+ This is mostly a library for processing astrophotographs automatically. It has some of its own algorithms, but it will also leverage other tools, including Siril and Seti Astro Pro.
@@ -0,0 +1,34 @@
1
+ [project]
2
+ name = "processinator"
3
+ version = "0.1.0"
4
+ description = "Astronomy image processing library"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "Steven", email = "erewhon@flatland.org" }
8
+ ]
9
+ requires-python = ">=3.12"
10
+ dependencies = [
11
+ "astropy>=7.0",
12
+ "numpy>=1.26",
13
+ "pillow>=11.0",
14
+ ]
15
+
16
+ [project.optional-dependencies]
17
+ dev = [
18
+ "pytest>=8.0",
19
+ "ruff>=0.9",
20
+ ]
21
+
22
+ [build-system]
23
+ requires = ["uv_build>=0.10.9,<0.11.0"]
24
+ build-backend = "uv_build"
25
+
26
+ [tool.ruff]
27
+ line-length = 100
28
+ target-version = "py312"
29
+
30
+ [tool.ruff.lint]
31
+ select = ["E", "F", "I", "UP"]
32
+
33
+ [tool.pytest.ini_options]
34
+ testpaths = ["tests"]
@@ -0,0 +1,10 @@
1
+ """Processinator - astronomy image processing library."""
2
+
3
+ from processinator.stretching import StretchAlgorithm, fits_to_image, read_fits, stretch
4
+
5
+ __all__ = [
6
+ "StretchAlgorithm",
7
+ "fits_to_image",
8
+ "read_fits",
9
+ "stretch",
10
+ ]
File without changes
@@ -0,0 +1,9 @@
1
+ from processinator.stretching.algorithms import StretchAlgorithm, stretch
2
+ from processinator.stretching.fits_io import fits_to_image, read_fits
3
+
4
+ __all__ = [
5
+ "StretchAlgorithm",
6
+ "stretch",
7
+ "read_fits",
8
+ "fits_to_image",
9
+ ]
@@ -0,0 +1,252 @@
1
+ """Image stretching algorithms for astronomy images.
2
+
3
+ Converts linear FITS data (where most detail is in low pixel values) into
4
+ visually useful images by applying nonlinear transfer functions.
5
+ """
6
+
7
+ from enum import Enum
8
+
9
+ import numpy as np
10
+ from numpy.typing import NDArray
11
+
12
+
13
+ class StretchAlgorithm(Enum):
14
+ """Available stretch algorithms."""
15
+
16
+ MTF = "mtf"
17
+ """Midtones Transfer Function (GraXpert-style). Default. Good all-around choice."""
18
+
19
+ ARCSINH = "arcsinh"
20
+ """Inverse hyperbolic sine. Preserves color ratios well."""
21
+
22
+ LOG = "log"
23
+ """Logarithmic stretch. Good for high dynamic range images."""
24
+
25
+ LINEAR = "linear"
26
+ """Simple percentile-based linear stretch."""
27
+
28
+ STATISTICAL = "statistical"
29
+ """Gamma correction targeting a specific median brightness."""
30
+
31
+
32
+ def stretch(
33
+ data: NDArray[np.floating],
34
+ algorithm: StretchAlgorithm = StretchAlgorithm.MTF,
35
+ **kwargs: float,
36
+ ) -> NDArray[np.floating]:
37
+ """Stretch image data from linear to nonlinear for display.
38
+
39
+ Args:
40
+ data: Image array, shape (H, W) or (H, W, 3). Values should be in
41
+ their original FITS range (not pre-normalized).
42
+ algorithm: Which stretch algorithm to use.
43
+ **kwargs: Algorithm-specific parameters (see individual functions).
44
+
45
+ Returns:
46
+ Stretched image normalized to [0.0, 1.0], same shape as input.
47
+ """
48
+ normalized = _normalize_to_01(data)
49
+
50
+ match algorithm:
51
+ case StretchAlgorithm.MTF:
52
+ return _stretch_mtf(normalized, **kwargs)
53
+ case StretchAlgorithm.ARCSINH:
54
+ return _stretch_arcsinh(normalized, **kwargs)
55
+ case StretchAlgorithm.LOG:
56
+ return _stretch_log(normalized, **kwargs)
57
+ case StretchAlgorithm.LINEAR:
58
+ return _stretch_linear(normalized, **kwargs)
59
+ case StretchAlgorithm.STATISTICAL:
60
+ return _stretch_statistical(normalized, **kwargs)
61
+
62
+
63
+ def _normalize_to_01(data: NDArray[np.floating]) -> NDArray[np.floating]:
64
+ """Normalize raw FITS data to [0, 1] range."""
65
+ result = data.astype(np.float64)
66
+ vmin = np.nanmin(result)
67
+ vmax = np.nanmax(result)
68
+ if vmax - vmin == 0:
69
+ return np.zeros_like(result)
70
+ return (result - vmin) / (vmax - vmin)
71
+
72
+
73
+ # ---------------------------------------------------------------------------
74
+ # MTF (Midtones Transfer Function) - adapted from pyscopinator/GraXpert
75
+ # ---------------------------------------------------------------------------
76
+
77
+
78
+ def _mtf(m: float, x: NDArray[np.floating]) -> NDArray[np.floating]:
79
+ """Apply Midtones Transfer Function.
80
+
81
+ MTF(m, x) = (m - 1) * x / ((2m - 1) * x - m)
82
+ """
83
+ numerator = (m - 1.0) * x
84
+ denominator = (2.0 * m - 1.0) * x - m
85
+ # Avoid division by zero
86
+ safe = np.where(np.abs(denominator) < 1e-10, x, numerator / denominator)
87
+ return np.clip(safe, 0.0, 1.0)
88
+
89
+
90
+ def _stretch_mtf(
91
+ data: NDArray[np.floating],
92
+ bg_percent: float = 0.15,
93
+ sigma: float = 3.0,
94
+ ) -> NDArray[np.floating]:
95
+ """MTF stretch using background/sigma clipping.
96
+
97
+ Args:
98
+ data: Normalized [0, 1] image data.
99
+ bg_percent: Target background level (0-1). Default 0.15.
100
+ sigma: Number of sigma above background for shadow clipping. Default 3.0.
101
+ """
102
+ result = data.copy()
103
+
104
+ # Process each channel (or the single channel for grayscale)
105
+ if result.ndim == 2:
106
+ channels = [result]
107
+ else:
108
+ channels = [result[:, :, i] for i in range(result.shape[2])]
109
+
110
+ processed = []
111
+ for channel in channels:
112
+ flat = channel.ravel()
113
+ valid = flat[(flat > 0.0) & (flat < 1.0)]
114
+
115
+ if len(valid) == 0:
116
+ processed.append(channel)
117
+ continue
118
+
119
+ median = np.median(valid)
120
+ mad = np.median(np.abs(valid - median))
121
+
122
+ shadow_clip = max(0.0, median - sigma * mad * 1.4826)
123
+ highlight_clip = 1.0
124
+
125
+ # Normalize between clipping points
126
+ stretched = np.clip(channel, shadow_clip, highlight_clip)
127
+ if highlight_clip - shadow_clip > 0:
128
+ stretched = (stretched - shadow_clip) / (highlight_clip - shadow_clip)
129
+
130
+ # Calculate midtone balance for target background
131
+ median_norm = (median - shadow_clip) / (highlight_clip - shadow_clip)
132
+ if 0 < median_norm < 1 and bg_percent > 0:
133
+ midtone = (
134
+ median_norm
135
+ * (bg_percent - 1.0)
136
+ / (2.0 * bg_percent * median_norm - bg_percent - median_norm)
137
+ )
138
+ midtone = np.clip(midtone, 0.01, 0.99)
139
+ else:
140
+ midtone = 0.5
141
+
142
+ stretched = _mtf(midtone, stretched)
143
+ processed.append(stretched)
144
+
145
+ if result.ndim == 2:
146
+ return processed[0]
147
+ for i, ch in enumerate(processed):
148
+ result[:, :, i] = ch
149
+ return result
150
+
151
+
152
+ # ---------------------------------------------------------------------------
153
+ # Arcsinh stretch - adapted from astra
154
+ # ---------------------------------------------------------------------------
155
+
156
+
157
+ def _stretch_arcsinh(
158
+ data: NDArray[np.floating],
159
+ factor: float = 0.15,
160
+ ) -> NDArray[np.floating]:
161
+ """Inverse hyperbolic sine stretch. Preserves color ratios.
162
+
163
+ Args:
164
+ data: Normalized [0, 1] image data.
165
+ factor: Controls stretch aggressiveness. Smaller = more aggressive. Default 0.15.
166
+ """
167
+ scale = 1.0 / factor
168
+ result = np.arcsinh(data * scale) / np.arcsinh(scale)
169
+ return np.clip(result, 0.0, 1.0)
170
+
171
+
172
+ # ---------------------------------------------------------------------------
173
+ # Log stretch - adapted from astra/pyscopinator
174
+ # ---------------------------------------------------------------------------
175
+
176
+
177
+ def _stretch_log(
178
+ data: NDArray[np.floating],
179
+ factor: float = 0.15,
180
+ ) -> NDArray[np.floating]:
181
+ """Logarithmic stretch. Good for high dynamic range.
182
+
183
+ Args:
184
+ data: Normalized [0, 1] image data.
185
+ factor: Controls stretch aggressiveness. Smaller = more aggressive. Default 0.15.
186
+ """
187
+ offset = factor * 0.01
188
+ result = np.log1p(data / offset) / np.log1p(1.0 / offset)
189
+ return np.clip(result, 0.0, 1.0)
190
+
191
+
192
+ # ---------------------------------------------------------------------------
193
+ # Linear stretch
194
+ # ---------------------------------------------------------------------------
195
+
196
+
197
+ def _stretch_linear(
198
+ data: NDArray[np.floating],
199
+ low_percentile: float = 1.0,
200
+ high_percentile: float = 99.0,
201
+ ) -> NDArray[np.floating]:
202
+ """Simple percentile-based linear stretch.
203
+
204
+ Args:
205
+ data: Normalized [0, 1] image data.
206
+ low_percentile: Lower clipping percentile. Default 1.0.
207
+ high_percentile: Upper clipping percentile. Default 99.0.
208
+ """
209
+ vmin = np.percentile(data, low_percentile)
210
+ vmax = np.percentile(data, high_percentile)
211
+ if vmax - vmin == 0:
212
+ return data
213
+ result = (data - vmin) / (vmax - vmin)
214
+ return np.clip(result, 0.0, 1.0)
215
+
216
+
217
+ # ---------------------------------------------------------------------------
218
+ # Statistical stretch (gamma correction) - adapted from astra
219
+ # ---------------------------------------------------------------------------
220
+
221
+
222
+ def _stretch_statistical(
223
+ data: NDArray[np.floating],
224
+ target_median: float = 0.15,
225
+ low_percentile: float = 0.5,
226
+ high_percentile: float = 99.9,
227
+ ) -> NDArray[np.floating]:
228
+ """Stretch using percentile clipping then gamma correction.
229
+
230
+ Clips to percentile range, then applies gamma correction to achieve
231
+ a target median brightness.
232
+
233
+ Args:
234
+ data: Normalized [0, 1] image data.
235
+ target_median: Desired median value after stretch. Default 0.15.
236
+ low_percentile: Black point percentile. Default 0.5.
237
+ high_percentile: White point percentile. Default 99.9.
238
+ """
239
+ vmin = np.percentile(data, low_percentile)
240
+ vmax = np.percentile(data, high_percentile)
241
+ if vmax - vmin == 0:
242
+ return data
243
+
244
+ result = np.clip((data - vmin) / (vmax - vmin), 0.0, 1.0)
245
+
246
+ current_median = np.median(result[result > 0])
247
+ if current_median > 0 and current_median != target_median:
248
+ gamma = np.log(target_median) / np.log(current_median)
249
+ gamma = np.clip(gamma, 0.2, 5.0)
250
+ result = np.power(result, gamma)
251
+
252
+ return np.clip(result, 0.0, 1.0)
@@ -0,0 +1,105 @@
1
+ """FITS file reading and image output."""
2
+
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ from astropy.io import fits
7
+ from numpy.typing import NDArray
8
+ from PIL import Image
9
+
10
+ from processinator.stretching.algorithms import StretchAlgorithm, stretch
11
+
12
+
13
+ def read_fits(file_path: str | Path) -> tuple[NDArray[np.floating], dict]:
14
+ """Read a FITS file and return image data and header metadata.
15
+
16
+ Handles common FITS layouts:
17
+ - (H, W) grayscale
18
+ - (3, H, W) RGB channels-first
19
+ - (H, W, 3) RGB channels-last
20
+
21
+ Args:
22
+ file_path: Path to the FITS file.
23
+
24
+ Returns:
25
+ Tuple of (image_data as float64, header as dict).
26
+
27
+ Raises:
28
+ FileNotFoundError: If the file does not exist.
29
+ ValueError: If the FITS file contains no image data.
30
+ """
31
+ path = Path(file_path)
32
+ if not path.exists():
33
+ raise FileNotFoundError(f"FITS file not found: {path}")
34
+
35
+ with fits.open(path) as hdul:
36
+ # Find the first HDU with image data
37
+ image_data = None
38
+ header = {}
39
+
40
+ for hdu in hdul:
41
+ if hdu.data is not None and hdu.data.ndim >= 2:
42
+ image_data = hdu.data.astype(np.float64)
43
+ header = dict(hdu.header)
44
+ break
45
+
46
+ if image_data is None:
47
+ raise ValueError(f"No image data found in FITS file: {path}")
48
+
49
+ # Normalize array layout to (H, W) or (H, W, 3)
50
+ if image_data.ndim == 3:
51
+ if image_data.shape[0] == 3:
52
+ # (3, H, W) -> (H, W, 3)
53
+ image_data = np.transpose(image_data, (1, 2, 0))
54
+ elif image_data.shape[2] != 3:
55
+ raise ValueError(
56
+ f"Unexpected FITS shape: {image_data.shape}. "
57
+ "Expected (H, W), (3, H, W), or (H, W, 3)."
58
+ )
59
+ elif image_data.ndim != 2:
60
+ raise ValueError(f"Unexpected FITS dimensions: {image_data.ndim}. Expected 2 or 3.")
61
+
62
+ return image_data, header
63
+
64
+
65
+ def fits_to_image(
66
+ fits_path: str | Path,
67
+ output_path: str | Path | None = None,
68
+ algorithm: StretchAlgorithm = StretchAlgorithm.MTF,
69
+ output_format: str = "PNG",
70
+ **stretch_kwargs: float,
71
+ ) -> Image.Image:
72
+ """Read a FITS file, apply stretching, and produce a displayable image.
73
+
74
+ Args:
75
+ fits_path: Path to the input FITS file.
76
+ output_path: If provided, save the image to this path.
77
+ algorithm: Stretch algorithm to use.
78
+ output_format: Image format for saving ("PNG" or "JPEG").
79
+ **stretch_kwargs: Passed through to the stretch algorithm.
80
+
81
+ Returns:
82
+ PIL Image object.
83
+ """
84
+ data, _header = read_fits(fits_path)
85
+ stretched = stretch(data, algorithm=algorithm, **stretch_kwargs)
86
+
87
+ # Convert to 8-bit
88
+ img_8bit = (stretched * 255.0).clip(0, 255).astype(np.uint8)
89
+
90
+ if img_8bit.ndim == 2:
91
+ pil_image = Image.fromarray(img_8bit, mode="L")
92
+ else:
93
+ pil_image = Image.fromarray(img_8bit, mode="RGB")
94
+
95
+ if output_path is not None:
96
+ output_path = Path(output_path)
97
+ save_kwargs = {}
98
+ if output_format.upper() == "JPEG":
99
+ save_kwargs["quality"] = 95
100
+ # JPEG doesn't support 'L' with alpha or palette issues, convert if needed
101
+ if pil_image.mode == "L":
102
+ pil_image = pil_image.convert("RGB")
103
+ pil_image.save(output_path, format=output_format, **save_kwargs)
104
+
105
+ return pil_image