setiastrosuitepro 1.8.0.post3__py3-none-any.whl → 1.8.1.post2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of setiastrosuitepro might be problematic. Click here for more details.
- setiastro/saspro/__main__.py +12 -1
- setiastro/saspro/_generated/build_info.py +2 -2
- setiastro/saspro/cosmicclarity_engines/darkstar_engine.py +22 -2
- setiastro/saspro/cosmicclarity_engines/denoise_engine.py +68 -15
- setiastro/saspro/cosmicclarity_engines/satellite_engine.py +7 -3
- setiastro/saspro/cosmicclarity_engines/sharpen_engine.py +371 -98
- setiastro/saspro/cosmicclarity_engines/superres_engine.py +1 -0
- setiastro/saspro/model_manager.py +65 -0
- setiastro/saspro/model_workers.py +58 -24
- setiastro/saspro/ops/settings.py +45 -8
- setiastro/saspro/planetprojection.py +68 -36
- setiastro/saspro/resources.py +18 -14
- setiastro/saspro/runtime_torch.py +571 -127
- setiastro/saspro/star_alignment.py +262 -210
- setiastro/saspro/widgets/spinboxes.py +5 -7
- {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.1.post2.dist-info}/METADATA +1 -1
- {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.1.post2.dist-info}/RECORD +21 -21
- {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.1.post2.dist-info}/WHEEL +0 -0
- {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.1.post2.dist-info}/entry_points.txt +0 -0
- {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.1.post2.dist-info}/licenses/LICENSE +0 -0
- {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.1.post2.dist-info}/licenses/license.txt +0 -0
setiastro/saspro/__main__.py
CHANGED
|
@@ -538,7 +538,18 @@ def _bootstrap_imports():
|
|
|
538
538
|
add_runtime_to_sys_path(status_cb=lambda *_: None)
|
|
539
539
|
_ban_shadow_torch_paths(status_cb=lambda *_: None)
|
|
540
540
|
_purge_bad_torch_from_sysmodules(status_cb=lambda *_: None)
|
|
541
|
-
|
|
541
|
+
_update_splash(QCoreApplication.translate("Splash", "Preparing AI runtime cache..."), 7)
|
|
542
|
+
try:
|
|
543
|
+
from setiastro.saspro.runtime_torch import prewarm_torch_cache
|
|
544
|
+
prewarm_torch_cache(
|
|
545
|
+
status_cb=lambda *_: None, # keep console clean during splash
|
|
546
|
+
require_torchaudio=True,
|
|
547
|
+
ensure_venv=True,
|
|
548
|
+
ensure_numpy=False,
|
|
549
|
+
validate_marker=True,
|
|
550
|
+
)
|
|
551
|
+
except Exception:
|
|
552
|
+
pass
|
|
542
553
|
_update_splash(QCoreApplication.translate("Splash", "Loading standard libraries..."), 10)
|
|
543
554
|
|
|
544
555
|
# ----------------------------------------
|
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
# Auto-generated at build time. Do not edit.
|
|
2
|
-
BUILD_TIMESTAMP = "2026-01-
|
|
3
|
-
APP_VERSION = "1.8.
|
|
2
|
+
BUILD_TIMESTAMP = "2026-01-28T14:34:03Z"
|
|
3
|
+
APP_VERSION = "1.8.1.post2"
|
|
@@ -7,6 +7,7 @@ from typing import Any, Callable, Optional
|
|
|
7
7
|
import numpy as np
|
|
8
8
|
|
|
9
9
|
from setiastro.saspro.resources import get_resources
|
|
10
|
+
from setiastro.saspro.runtime_torch import _user_runtime_dir, _venv_paths, _check_cuda_in_venv
|
|
10
11
|
|
|
11
12
|
# Optional deps
|
|
12
13
|
try:
|
|
@@ -44,13 +45,18 @@ def _autocast_context(torch, device) -> Any:
|
|
|
44
45
|
major, minor = torch.cuda.get_device_capability()
|
|
45
46
|
cap = float(f"{major}.{minor}")
|
|
46
47
|
if cap >= 8.0:
|
|
47
|
-
# Preferred API (torch >= 1.10-ish; definitely in 2.x)
|
|
48
48
|
if hasattr(torch, "amp") and hasattr(torch.amp, "autocast"):
|
|
49
49
|
return torch.amp.autocast(device_type="cuda")
|
|
50
|
-
# Fallback for older torch
|
|
51
50
|
return torch.cuda.amp.autocast()
|
|
51
|
+
|
|
52
|
+
elif hasattr(device, "type") and device.type == "mps":
|
|
53
|
+
# MPS often benefits from autocast in newer torch versions
|
|
54
|
+
if hasattr(torch, "amp") and hasattr(torch.amp, "autocast"):
|
|
55
|
+
return torch.amp.autocast(device_type="mps")
|
|
56
|
+
|
|
52
57
|
except Exception:
|
|
53
58
|
pass
|
|
59
|
+
|
|
54
60
|
return _nullcontext()
|
|
55
61
|
|
|
56
62
|
|
|
@@ -365,7 +371,21 @@ def load_darkstar_models(*, use_gpu: bool, color: bool, status_cb=print) -> Dark
|
|
|
365
371
|
m = DarkStarModels(device=dev, is_onnx=False, model=net, torch=torch, chunk_size=512)
|
|
366
372
|
_MODELS_CACHE[key] = m
|
|
367
373
|
return m
|
|
374
|
+
# ---------------- MPS (torch) ----------------
|
|
375
|
+
if use_gpu and hasattr(torch, "backends") and hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
|
376
|
+
backend_id = "mps"
|
|
377
|
+
key = (tag, backend_id)
|
|
378
|
+
if key in _MODELS_CACHE:
|
|
379
|
+
return _MODELS_CACHE[key]
|
|
368
380
|
|
|
381
|
+
dev = torch.device("mps")
|
|
382
|
+
status_cb("Dark Star: using MPS")
|
|
383
|
+
Net = _build_darkstar_torch_models(torch)
|
|
384
|
+
net = Net(pth, None).eval().to(dev)
|
|
385
|
+
|
|
386
|
+
m = DarkStarModels(device=dev, is_onnx=False, model=net, torch=torch, chunk_size=512)
|
|
387
|
+
_MODELS_CACHE[key] = m
|
|
388
|
+
return m
|
|
369
389
|
# ---------------- DirectML (torch-directml) ----------------
|
|
370
390
|
if use_gpu and is_windows:
|
|
371
391
|
try:
|
|
@@ -12,6 +12,7 @@ import cv2
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
from setiastro.saspro.resources import get_resources
|
|
15
|
+
from setiastro.saspro.runtime_torch import _user_runtime_dir, _venv_paths, _check_cuda_in_venv
|
|
15
16
|
|
|
16
17
|
warnings.filterwarnings("ignore")
|
|
17
18
|
|
|
@@ -50,15 +51,19 @@ def _autocast_context(torch, device) -> Any:
|
|
|
50
51
|
major, minor = torch.cuda.get_device_capability()
|
|
51
52
|
cap = float(f"{major}.{minor}")
|
|
52
53
|
if cap >= 8.0:
|
|
53
|
-
# Preferred API (torch >= 1.10-ish; definitely in 2.x)
|
|
54
54
|
if hasattr(torch, "amp") and hasattr(torch.amp, "autocast"):
|
|
55
55
|
return torch.amp.autocast(device_type="cuda")
|
|
56
|
-
# Fallback for older torch
|
|
57
56
|
return torch.cuda.amp.autocast()
|
|
57
|
+
|
|
58
|
+
elif hasattr(device, "type") and device.type == "mps":
|
|
59
|
+
# MPS often benefits from autocast in newer torch versions
|
|
60
|
+
if hasattr(torch, "amp") and hasattr(torch.amp, "autocast"):
|
|
61
|
+
return torch.amp.autocast(device_type="mps")
|
|
62
|
+
|
|
58
63
|
except Exception:
|
|
59
64
|
pass
|
|
60
|
-
return _nullcontext()
|
|
61
65
|
|
|
66
|
+
return _nullcontext()
|
|
62
67
|
|
|
63
68
|
|
|
64
69
|
# ----------------------------
|
|
@@ -148,7 +153,18 @@ def load_models(use_gpu: bool = True, status_cb=print) -> Dict[str, Any]:
|
|
|
148
153
|
f"{'onnx' if models['is_onnx'] else 'torch'} / device={models['device']!r}")
|
|
149
154
|
_cached_models[key] = models
|
|
150
155
|
return models
|
|
151
|
-
|
|
156
|
+
# >>> ADD THIS BLOCK HERE <<<
|
|
157
|
+
# 2) MPS (macOS Apple Silicon)
|
|
158
|
+
if use_gpu and hasattr(torch, "backends") and hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
|
159
|
+
device = torch.device("mps")
|
|
160
|
+
status_cb("CosmicClarity Denoise: using MPS")
|
|
161
|
+
mono_model = _load_torch_model(torch, device, R.CC_DENOISE_PTH)
|
|
162
|
+
models = {"device": device, "is_onnx": False, "mono_model": mono_model, "torch": torch}
|
|
163
|
+
status_cb(f"Denoise backend resolved: "
|
|
164
|
+
f"{'onnx' if models['is_onnx'] else 'torch'} / device={models['device']!r}")
|
|
165
|
+
_cached_models[key] = models
|
|
166
|
+
return models
|
|
167
|
+
# >>> END INSERT <<<
|
|
152
168
|
# 2) Torch-DirectML (Windows)
|
|
153
169
|
if use_gpu and is_windows:
|
|
154
170
|
try:
|
|
@@ -320,16 +336,19 @@ def denoise_chroma(cb: np.ndarray,
|
|
|
320
336
|
def split_image_into_chunks_with_overlap(image, chunk_size, overlap):
|
|
321
337
|
height, width = image.shape[:2]
|
|
322
338
|
chunks = []
|
|
323
|
-
step_size = chunk_size - overlap
|
|
339
|
+
step_size = chunk_size - overlap
|
|
324
340
|
|
|
325
341
|
for i in range(0, height, step_size):
|
|
326
342
|
for j in range(0, width, step_size):
|
|
327
343
|
end_i = min(i + chunk_size, height)
|
|
328
344
|
end_j = min(j + chunk_size, width)
|
|
345
|
+
if end_i <= i or end_j <= j:
|
|
346
|
+
continue
|
|
329
347
|
chunk = image[i:end_i, j:end_j]
|
|
330
|
-
chunks.append((chunk, i, j))
|
|
348
|
+
chunks.append((chunk, i, j))
|
|
331
349
|
return chunks
|
|
332
350
|
|
|
351
|
+
|
|
333
352
|
def blend_images(before, after, amount):
|
|
334
353
|
return (1 - amount) * before + amount * after
|
|
335
354
|
|
|
@@ -337,29 +356,60 @@ def stitch_chunks_ignore_border(chunks, image_shape, border_size: int = 16):
|
|
|
337
356
|
"""
|
|
338
357
|
chunks: list of (chunk, i, j) or (chunk, i, j, is_edge)
|
|
339
358
|
image_shape: (H,W)
|
|
359
|
+
Robust to boundary clipping (prevents 256x256 -> 256x0 broadcasts).
|
|
340
360
|
"""
|
|
341
361
|
H, W = image_shape
|
|
342
362
|
stitched = np.zeros((H, W), dtype=np.float32)
|
|
343
363
|
weights = np.zeros((H, W), dtype=np.float32)
|
|
344
364
|
|
|
345
365
|
for entry in chunks:
|
|
346
|
-
# accept both 3-tuple and 4-tuple
|
|
347
366
|
if len(entry) == 3:
|
|
348
367
|
chunk, i, j = entry
|
|
349
368
|
else:
|
|
350
369
|
chunk, i, j, _ = entry
|
|
351
370
|
|
|
352
371
|
h, w = chunk.shape[:2]
|
|
372
|
+
if h <= 0 or w <= 0:
|
|
373
|
+
continue
|
|
374
|
+
|
|
353
375
|
bh = min(border_size, h // 2)
|
|
354
376
|
bw = min(border_size, w // 2)
|
|
355
377
|
|
|
378
|
+
# inner region in chunk coords
|
|
379
|
+
y0 = i + bh
|
|
380
|
+
y1 = i + h - bh
|
|
381
|
+
x0 = j + bw
|
|
382
|
+
x1 = j + w - bw
|
|
383
|
+
|
|
384
|
+
if y1 <= y0 or x1 <= x0:
|
|
385
|
+
continue
|
|
386
|
+
|
|
356
387
|
inner = chunk[bh:h-bh, bw:w-bw]
|
|
357
|
-
|
|
358
|
-
|
|
388
|
+
|
|
389
|
+
# clip destination to image bounds
|
|
390
|
+
yy0 = max(0, y0)
|
|
391
|
+
yy1 = min(H, y1)
|
|
392
|
+
xx0 = max(0, x0)
|
|
393
|
+
xx1 = min(W, x1)
|
|
394
|
+
|
|
395
|
+
if yy1 <= yy0 or xx1 <= xx0:
|
|
396
|
+
continue
|
|
397
|
+
|
|
398
|
+
# clip source to match clipped destination
|
|
399
|
+
sy0 = yy0 - y0
|
|
400
|
+
sy1 = sy0 + (yy1 - yy0)
|
|
401
|
+
sx0 = xx0 - x0
|
|
402
|
+
sx1 = sx0 + (xx1 - xx0)
|
|
403
|
+
|
|
404
|
+
src = inner[sy0:sy1, sx0:sx1]
|
|
405
|
+
|
|
406
|
+
stitched[yy0:yy1, xx0:xx1] += src
|
|
407
|
+
weights[yy0:yy1, xx0:xx1] += 1.0
|
|
359
408
|
|
|
360
409
|
stitched /= np.maximum(weights, 1.0)
|
|
361
410
|
return stitched
|
|
362
411
|
|
|
412
|
+
|
|
363
413
|
def replace_border(original_image, processed_image, border_size=16):
|
|
364
414
|
# Ensure the dimensions of both images match
|
|
365
415
|
if original_image.shape != processed_image.shape:
|
|
@@ -476,16 +526,19 @@ def denoise_channel(channel: np.ndarray, models: Dict[str, Any], *, progress_cb:
|
|
|
476
526
|
original_chunk_shape = chunk.shape
|
|
477
527
|
|
|
478
528
|
if is_onnx:
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
529
|
+
h, w = original_chunk_shape # <- the real chunk size
|
|
530
|
+
|
|
531
|
+
chunk_input = chunk[np.newaxis, np.newaxis, :, :].astype(np.float32) # (1,1,h,w)
|
|
532
|
+
chunk_input = np.tile(chunk_input, (1, 3, 1, 1)) # (1,3,h,w)
|
|
533
|
+
|
|
534
|
+
if h != chunk_size or w != chunk_size:
|
|
482
535
|
padded = np.zeros((1, 3, chunk_size, chunk_size), dtype=np.float32)
|
|
483
|
-
padded[:, :, :
|
|
536
|
+
padded[:, :, :h, :w] = chunk_input
|
|
484
537
|
chunk_input = padded
|
|
485
538
|
|
|
486
539
|
input_name = model.get_inputs()[0].name
|
|
487
|
-
out = model.run(None, {input_name: chunk_input})[0]
|
|
488
|
-
denoised_chunk = out[0, 0, :
|
|
540
|
+
out = model.run(None, {input_name: chunk_input})[0] # (1,3,256,256) usually
|
|
541
|
+
denoised_chunk = out[0, 0, :h, :w]
|
|
489
542
|
|
|
490
543
|
else:
|
|
491
544
|
torch = models["torch"]
|
|
@@ -46,15 +46,19 @@ def _autocast_context(torch, device) -> Any:
|
|
|
46
46
|
major, minor = torch.cuda.get_device_capability()
|
|
47
47
|
cap = float(f"{major}.{minor}")
|
|
48
48
|
if cap >= 8.0:
|
|
49
|
-
# Preferred API (torch >= 1.10-ish; definitely in 2.x)
|
|
50
49
|
if hasattr(torch, "amp") and hasattr(torch.amp, "autocast"):
|
|
51
50
|
return torch.amp.autocast(device_type="cuda")
|
|
52
|
-
# Fallback for older torch
|
|
53
51
|
return torch.cuda.amp.autocast()
|
|
52
|
+
|
|
53
|
+
elif hasattr(device, "type") and device.type == "mps":
|
|
54
|
+
# MPS often benefits from autocast in newer torch versions
|
|
55
|
+
if hasattr(torch, "amp") and hasattr(torch.amp, "autocast"):
|
|
56
|
+
return torch.amp.autocast(device_type="mps")
|
|
57
|
+
|
|
54
58
|
except Exception:
|
|
55
59
|
pass
|
|
56
|
-
return _nullcontext()
|
|
57
60
|
|
|
61
|
+
return _nullcontext()
|
|
58
62
|
|
|
59
63
|
|
|
60
64
|
# ----------------------------
|