setiastrosuitepro 1.8.1.post2__py3-none-any.whl → 1.8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of setiastrosuitepro might be problematic. Click here for more details.

Files changed (40) hide show
  1. setiastro/images/finderchart.png +0 -0
  2. setiastro/images/magnitude.png +0 -0
  3. setiastro/saspro/__main__.py +29 -38
  4. setiastro/saspro/_generated/build_info.py +2 -2
  5. setiastro/saspro/abe.py +1 -1
  6. setiastro/saspro/backgroundneutral.py +54 -16
  7. setiastro/saspro/blink_comparator_pro.py +3 -1
  8. setiastro/saspro/bright_stars.py +305 -0
  9. setiastro/saspro/continuum_subtract.py +2 -1
  10. setiastro/saspro/cosmicclarity_preset.py +2 -1
  11. setiastro/saspro/doc_manager.py +8 -0
  12. setiastro/saspro/exoplanet_detector.py +22 -17
  13. setiastro/saspro/finder_chart.py +1650 -0
  14. setiastro/saspro/gui/main_window.py +131 -17
  15. setiastro/saspro/gui/mixins/header_mixin.py +40 -15
  16. setiastro/saspro/gui/mixins/menu_mixin.py +3 -0
  17. setiastro/saspro/gui/mixins/toolbar_mixin.py +16 -1
  18. setiastro/saspro/imageops/stretch.py +1 -1
  19. setiastro/saspro/legacy/image_manager.py +18 -4
  20. setiastro/saspro/legacy/xisf.py +3 -3
  21. setiastro/saspro/magnitude_tool.py +1724 -0
  22. setiastro/saspro/main_helpers.py +18 -0
  23. setiastro/saspro/memory_utils.py +18 -14
  24. setiastro/saspro/remove_stars.py +13 -30
  25. setiastro/saspro/resources.py +177 -161
  26. setiastro/saspro/runtime_torch.py +71 -10
  27. setiastro/saspro/sfcc.py +86 -77
  28. setiastro/saspro/stacking_suite.py +4 -3
  29. setiastro/saspro/star_alignment.py +4 -2
  30. setiastro/saspro/texture_clarity.py +1 -1
  31. setiastro/saspro/torch_rejection.py +59 -28
  32. setiastro/saspro/widgets/image_utils.py +12 -4
  33. setiastro/saspro/wimi.py +2 -1
  34. setiastro/saspro/xisf.py +3 -3
  35. {setiastrosuitepro-1.8.1.post2.dist-info → setiastrosuitepro-1.8.3.dist-info}/METADATA +4 -4
  36. {setiastrosuitepro-1.8.1.post2.dist-info → setiastrosuitepro-1.8.3.dist-info}/RECORD +40 -35
  37. {setiastrosuitepro-1.8.1.post2.dist-info → setiastrosuitepro-1.8.3.dist-info}/WHEEL +0 -0
  38. {setiastrosuitepro-1.8.1.post2.dist-info → setiastrosuitepro-1.8.3.dist-info}/entry_points.txt +0 -0
  39. {setiastrosuitepro-1.8.1.post2.dist-info → setiastrosuitepro-1.8.3.dist-info}/licenses/LICENSE +0 -0
  40. {setiastrosuitepro-1.8.1.post2.dist-info → setiastrosuitepro-1.8.3.dist-info}/licenses/license.txt +0 -0
@@ -106,6 +106,26 @@ def _user_runtime_dir(status_cb=print) -> Path:
106
106
  _rt_dbg(f"_user_runtime_dir() -> {chosen}", status_cb)
107
107
  return chosen
108
108
 
109
+ def best_device(torch, *, prefer_cuda=True, prefer_dml=False, prefer_xpu=False):
110
+ if prefer_cuda and getattr(torch, "cuda", None) and torch.cuda.is_available():
111
+ return torch.device("cuda")
112
+
113
+ # DirectML (Windows)
114
+ if prefer_dml and platform.system() == "Windows":
115
+ try:
116
+ import torch_directml
117
+ d = torch_directml.device()
118
+ _ = (torch.ones(1, device=d) + 1).item()
119
+ return d
120
+ except Exception:
121
+ pass
122
+
123
+ # MPS
124
+ if getattr(getattr(torch, "backends", None), "mps", None) and torch.backends.mps.is_available():
125
+ return torch.device("mps")
126
+
127
+ return torch.device("cpu")
128
+
109
129
 
110
130
  # ──────────────────────────────────────────────────────────────────────────────
111
131
  # Shadowing & sanity checks
@@ -228,31 +248,71 @@ def _pip_ok(venv_python: Path, args: list[str], status_cb=print) -> bool:
228
248
 
229
249
  def _ensure_numpy(venv_python: Path, status_cb=print) -> None:
230
250
  """
231
- Torch wheels may not pull NumPy; ensure NumPy is present in the SAME venv.
251
+ Ensure NumPy exists in the runtime venv AND is ABI-compatible with common
252
+ torch/vision/audio wheels. In practice: enforce numpy<2.
253
+
232
254
  Safe to call repeatedly.
233
255
  """
234
256
  def _numpy_present() -> bool:
235
257
  code = "import importlib.util; print('OK' if importlib.util.find_spec('numpy') else 'MISS')"
236
258
  try:
237
259
  out = subprocess.check_output([str(venv_python), "-c", code], text=True).strip()
238
- return (out == "OK")
260
+ return out == "OK"
239
261
  except Exception:
240
262
  return False
241
263
 
242
- if _numpy_present():
243
- return
264
+ def _numpy_major() -> int | None:
265
+ code = (
266
+ "import numpy as np\n"
267
+ "v = np.__version__.split('+',1)[0]\n"
268
+ "print(int(v.split('.',1)[0]))\n"
269
+ )
270
+ try:
271
+ out = subprocess.check_output([str(venv_python), "-c", code], text=True).strip()
272
+ return int(out)
273
+ except Exception:
274
+ return None
244
275
 
245
- # Keep tools fresh, then install a compatible NumPy (Torch 2.x is fine with NumPy 1.26–2.x)
276
+ # Keep tools fresh
246
277
  _pip_ok(venv_python, ["install", "--upgrade", "pip", "setuptools", "wheel"], status_cb=status_cb)
247
278
 
248
- # Prefer latest available in [1.26, 3.0)
249
- if not _pip_ok(venv_python, ["install", "--prefer-binary", "--no-cache-dir", "numpy>=1.26,<3"], status_cb=status_cb):
250
- # Final fallback to a broadly available pin
251
- _pip_ok(venv_python, ["install", "--prefer-binary", "--no-cache-dir", "numpy==1.26.*"], status_cb=status_cb)
279
+ # 1) If NumPy missing install safe pin
280
+ if not _numpy_present():
281
+ status_cb("[RT] Installing NumPy (pinning to numpy<2 for torch wheel compatibility)…")
282
+ if not _pip_ok(
283
+ venv_python,
284
+ ["install", "--prefer-binary", "--no-cache-dir", "numpy<2"],
285
+ status_cb=status_cb,
286
+ ):
287
+ # last-ditch fallback (very widely available)
288
+ _pip_ok(
289
+ venv_python,
290
+ ["install", "--prefer-binary", "--no-cache-dir", "numpy==1.26.*"],
291
+ status_cb=status_cb,
292
+ )
252
293
 
253
- # Post-install verification
294
+ # 2) If NumPy present but major>=2 → downgrade to numpy<2
295
+ maj = _numpy_major()
296
+ if maj is not None and maj >= 2:
297
+ status_cb("[RT] NumPy 2.x detected in runtime venv; downgrading to numpy<2…")
298
+ if not _pip_ok(
299
+ venv_python,
300
+ ["install", "--prefer-binary", "--no-cache-dir", "--force-reinstall", "numpy<2"],
301
+ status_cb=status_cb,
302
+ ):
303
+ _pip_ok(
304
+ venv_python,
305
+ ["install", "--prefer-binary", "--no-cache-dir", "--force-reinstall", "numpy==1.26.*"],
306
+ status_cb=status_cb,
307
+ )
308
+
309
+ # Post verification
254
310
  if not _numpy_present():
255
311
  raise RuntimeError("Failed to install NumPy into the SASpro runtime venv.")
312
+ maj2 = _numpy_major()
313
+ if maj2 is not None and maj2 >= 2:
314
+ raise RuntimeError("NumPy is still 2.x in the SASpro runtime venv after pinning; torch stack may not import.")
315
+
256
316
 
257
317
 
258
318
  def _is_access_denied(exc: BaseException) -> bool:
@@ -1076,6 +1136,7 @@ def import_torch(
1076
1136
  prefer_dml=prefer_dml,
1077
1137
  status_cb=status_cb,
1078
1138
  )
1139
+ _ensure_numpy(vp, status_cb=status_cb)
1079
1140
  except Exception as e:
1080
1141
  if _is_access_denied(e):
1081
1142
  raise OSError(_access_denied_msg(rt)) from e
setiastro/saspro/sfcc.py CHANGED
@@ -11,6 +11,8 @@
11
11
 
12
12
  from __future__ import annotations
13
13
 
14
+ from setiastro.saspro.main_helpers import non_blocking_sleep
15
+
14
16
  import os
15
17
  import re
16
18
  import cv2
@@ -526,7 +528,7 @@ class SFCCDialog(QDialog):
526
528
  self.run_spcc_btn.clicked.connect(self.run_spcc)
527
529
  row4.addWidget(self.run_spcc_btn)
528
530
 
529
- self.neutralize_chk = QCheckBox(self.tr("Background Neutralization")); self.neutralize_chk.setChecked(True); row4.addWidget(self.neutralize_chk)
531
+ self.neutralize_chk = QCheckBox(self.tr("Background Neutralization")); self.neutralize_chk.setChecked(False); row4.addWidget(self.neutralize_chk)
530
532
 
531
533
  self.run_grad_btn = QPushButton(self.tr("Run Gradient Extraction (Beta)"))
532
534
  f3 = self.run_grad_btn.font(); f3.setBold(True); self.run_grad_btn.setFont(f3)
@@ -820,65 +822,6 @@ class SFCCDialog(QDialog):
820
822
  idx = self.sens_combo.findText(current_s); self.sens_combo.setCurrentIndex(idx if idx != -1 else 0)
821
823
 
822
824
  # ── WCS utilities ──────────────────────────────────────────────────
823
- def initialize_wcs_from_header(self, header):
824
- if header is None:
825
- print("No FITS header available; cannot build WCS.")
826
- return
827
- try:
828
- hdr = header.copy()
829
-
830
- # --- normalize deprecated keywords ---
831
- if "RADECSYS" in hdr and "RADESYS" not in hdr:
832
- radesys_val = str(hdr["RADECSYS"]).strip()
833
- hdr["RADESYS"] = radesys_val
834
- try:
835
- del hdr["RADECSYS"]
836
- except Exception:
837
- pass
838
-
839
- alt_letters = {
840
- k[-1]
841
- for k in hdr.keys()
842
- if re.match(r"^CTYPE[12][A-Z]$", k)
843
- }
844
- for a in alt_letters:
845
- key = f"RADESYS{a}"
846
- if key not in hdr:
847
- hdr[key] = radesys_val
848
-
849
- if "EPOCH" in hdr and "EQUINOX" not in hdr:
850
- hdr["EQUINOX"] = hdr["EPOCH"]
851
- try:
852
- del hdr["EPOCH"]
853
- except Exception:
854
- pass
855
-
856
- # IMPORTANT: use the normalized hdr, not the original header
857
- self.wcs = WCS(hdr, naxis=2, relax=True)
858
-
859
- psm = self.wcs.pixel_scale_matrix
860
- self.pixscale = np.hypot(psm[0, 0], psm[1, 0]) * 3600.0
861
- self.center_ra, self.center_dec = self.wcs.wcs.crval
862
- self.wcs_header = self.wcs.to_header(relax=True)
863
-
864
- # Orientation from normalized header
865
- if "CROTA2" in hdr:
866
- try:
867
- self.orientation = float(hdr["CROTA2"])
868
- except Exception:
869
- self.orientation = None
870
- else:
871
- self.orientation = self.calculate_orientation(hdr)
872
-
873
- if self.orientation is not None:
874
- self.orientation_label.setText(f"Orientation: {self.orientation:.2f}°")
875
- else:
876
- self.orientation_label.setText("Orientation: N/A")
877
-
878
- except Exception as e:
879
- print("WCS initialization error:\n", e)
880
-
881
-
882
825
  def calculate_orientation(self, header):
883
826
  try:
884
827
  cd1_1 = float(header.get("CD1_1", 0.0))
@@ -1149,7 +1092,7 @@ class SFCCDialog(QDialog):
1149
1092
  break
1150
1093
  except Exception:
1151
1094
  QApplication.processEvents()
1152
- time.sleep(0.8)
1095
+ non_blocking_sleep(0.8)
1153
1096
 
1154
1097
  if not ok:
1155
1098
  for _ in range(5):
@@ -1159,7 +1102,7 @@ class SFCCDialog(QDialog):
1159
1102
  break
1160
1103
  except Exception:
1161
1104
  QApplication.processEvents()
1162
- time.sleep(0.8)
1105
+ non_blocking_sleep(0.8)
1163
1106
 
1164
1107
  if not ok:
1165
1108
  QMessageBox.critical(self, "SIMBAD Error", "Could not configure SIMBAD votable fields.")
@@ -1178,7 +1121,7 @@ class SFCCDialog(QDialog):
1178
1121
  break
1179
1122
  except Exception:
1180
1123
  QApplication.processEvents()
1181
- time.sleep(1.2)
1124
+ non_blocking_sleep(1.2)
1182
1125
  result = None
1183
1126
 
1184
1127
  if result is None or len(result) == 0:
@@ -1320,7 +1263,11 @@ class SFCCDialog(QDialog):
1320
1263
  # --- plot / UI feedback (unchanged) ---
1321
1264
  if getattr(self, "figure", None) is not None:
1322
1265
  self.figure.clf()
1323
-
1266
+ doc = self.doc_manager.get_active_document()
1267
+ if doc is not None:
1268
+ meta = dict(doc.metadata or {})
1269
+ meta["SFCC_star_list"] = list(self.star_list) # keep it JSON-ish
1270
+ self.doc_manager.update_active_document(doc.image, metadata=meta, step_name="SFCC Stars Cached", doc=doc)
1324
1271
  if templates_for_hist:
1325
1272
  uniq, cnt = np.unique(templates_for_hist, return_counts=True)
1326
1273
  types_str = ", ".join([str(u) for u in uniq])
@@ -1440,14 +1387,24 @@ class SFCCDialog(QDialog):
1440
1387
  dy = sources["y"] - star["y"]
1441
1388
  j = int(np.argmin(dx * dx + dy * dy))
1442
1389
  if (dx[j] * dx[j] + dy[j] * dy[j]) < (3.0 ** 2):
1443
- xi, yi = int(round(float(sources["x"][j]))), int(round(float(sources["y"][j])))
1444
- if 0 <= xi < W and 0 <= yi < H:
1445
- raw_matches.append({
1446
- "sim_index": i,
1447
- "template": star.get("pickles_match") or star["sp_clean"],
1448
- "x_pix": xi,
1449
- "y_pix": yi
1450
- })
1390
+ x_c = float(sources["x"][j])
1391
+ y_c = float(sources["y"][j])
1392
+
1393
+ raw_matches.append({
1394
+ "sim_index": i,
1395
+ "template": star.get("pickles_match") or star["sp_clean"],
1396
+ "src_index": j,
1397
+
1398
+ # New canonical centroid keys
1399
+ "x": x_c,
1400
+ "y": y_c,
1401
+
1402
+ # Back-compat keys used elsewhere (gradient step, older code paths)
1403
+ "x_pix": x_c,
1404
+ "y_pix": y_c,
1405
+
1406
+ "a": float(sources["a"][j]),
1407
+ })
1451
1408
 
1452
1409
  if not raw_matches:
1453
1410
  QMessageBox.warning(self, "No Matches", "No SIMBAD star matched to SEP detections.")
@@ -1523,14 +1480,65 @@ class SFCCDialog(QDialog):
1523
1480
  except Exception as e:
1524
1481
  print(f"[SFCC] Warning: failed to load/integrate template {pname}: {e}")
1525
1482
 
1483
+ def measure_star_rgb_aperture(img_rgb_f32: np.ndarray, x: float, y: float, r: float,
1484
+ rin: float, rout: float) -> tuple[float, float, float]:
1485
+ # SEP expects float32, C-contiguous, and (x,y) in pixel coords
1486
+ R = np.ascontiguousarray(img_rgb_f32[..., 0], dtype=np.float32)
1487
+ G = np.ascontiguousarray(img_rgb_f32[..., 1], dtype=np.float32)
1488
+ B = np.ascontiguousarray(img_rgb_f32[..., 2], dtype=np.float32)
1489
+
1490
+ # sum_circle returns (flux, fluxerr, flag) when err not provided; handle either form
1491
+ def _sum(ch):
1492
+ out = sep.sum_circle(ch, np.array([x]), np.array([y]), r,
1493
+ subpix=5, bkgann=(rin, rout))
1494
+ # Depending on sep version, out can be (flux, fluxerr, flag) or (flux, flag)
1495
+ if len(out) == 3:
1496
+ flux, _fluxerr, flag = out
1497
+ else:
1498
+ flux, flag = out
1499
+ return float(flux[0]), int(flag[0])
1500
+
1501
+ fR, flR = _sum(R)
1502
+ fG, flG = _sum(G)
1503
+ fB, flB = _sum(B)
1504
+
1505
+ # If any flags set, you can reject (edge, etc.)
1506
+ if (flR | flG | flB) != 0:
1507
+ return None, None, None
1508
+
1509
+ return fR, fG, fB
1510
+
1511
+
1526
1512
  # ---- Main match loop (measure from 'base' only) ----
1527
1513
  for m in raw_matches:
1528
- xi, yi, sp = m["x_pix"], m["y_pix"], m["template"]
1514
+ xi = float(m.get("x_pix", m["x"]))
1515
+ yi = float(m.get("y_pix", m["y"]))
1516
+ sp = m["template"]
1529
1517
 
1530
1518
  # measure on the SEP working copy (already BN’d, only one pedestal handling)
1531
- Rm = float(base[yi, xi, 0])
1532
- Gm = float(base[yi, xi, 1])
1533
- Bm = float(base[yi, xi, 2])
1519
+ x = float(m["x"])
1520
+ y = float(m["y"])
1521
+
1522
+ # Aperture radius choice (simple + robust)
1523
+ # sources["a"] is roughly semi-major sigma-ish from SEP; a common quick rule:
1524
+ # r ~ 2.5 * a, with sane clamps.
1525
+ a = float(m.get("a", 1.5))
1526
+ r = float(np.clip(2.5 * a, 2.0, 12.0))
1527
+
1528
+ # Annulus (your “kicker”): inner/outer in pixels
1529
+ rin = float(np.clip(3.0 * r, 6.0, 40.0))
1530
+ rout = float(np.clip(5.0 * r, rin + 2.0, 60.0))
1531
+
1532
+ meas = measure_star_rgb_aperture(base, x, y, r, rin, rout)
1533
+ if meas[0] is None:
1534
+ continue
1535
+ Rm, Gm, Bm = meas
1536
+
1537
+ if Gm <= 0:
1538
+ continue
1539
+ meas_RG = Rm / Gm
1540
+ meas_BG = Bm / Gm
1541
+
1534
1542
  if Gm <= 0:
1535
1543
  continue
1536
1544
 
@@ -1654,7 +1662,8 @@ class SFCCDialog(QDialog):
1654
1662
  QApplication.processEvents()
1655
1663
 
1656
1664
  eps = 1e-8
1657
- calibrated = base.copy()
1665
+ #calibrated = base.copy()
1666
+ calibrated = img_float.copy()
1658
1667
 
1659
1668
  R = calibrated[..., 0]
1660
1669
  G = calibrated[..., 1]
@@ -6728,7 +6728,7 @@ class StackingSuiteDialog(QDialog):
6728
6728
  s = s[:-1]
6729
6729
  try:
6730
6730
  return float(s)
6731
- except Exception:
6731
+ except (ValueError, TypeError):
6732
6732
  return 2.0
6733
6733
  return 2.0
6734
6734
 
@@ -6737,7 +6737,7 @@ class StackingSuiteDialog(QDialog):
6737
6737
  def _set_drizzle_scale(self, r: float | str) -> None:
6738
6738
  if isinstance(r, str):
6739
6739
  try: r = float(r.rstrip("xX"))
6740
- except: r = 2.0
6740
+ except ValueError: r = 2.0
6741
6741
  r = float(max(1.0, min(3.0, r)))
6742
6742
  # store as “Nx” so the combo’s string stays in sync
6743
6743
  self.settings.setValue("stacking/drizzle_scale", f"{int(r)}x")
@@ -6754,7 +6754,8 @@ class StackingSuiteDialog(QDialog):
6754
6754
  if cb is not None:
6755
6755
  try:
6756
6756
  return bool(cb.isChecked())
6757
- except Exception:
6757
+ except (RuntimeError, AttributeError):
6758
+ # Wrapped object might be deleted or invalid
6758
6759
  pass
6759
6760
  # fallback to settings (headless / older flows)
6760
6761
  return bool(self.settings.value("stacking/drizzle_enabled", False, type=bool))
@@ -7172,6 +7172,7 @@ class MosaicMasterDialog(QDialog):
7172
7172
  return None
7173
7173
 
7174
7174
  def poll_submission_status(self, subid):
7175
+ from setiastro.saspro.main_helpers import non_blocking_sleep
7175
7176
  url = ASTROMETRY_API_URL + f"submissions/{subid}"
7176
7177
  for attempt in range(90): # up to ~15 minutes
7177
7178
  response = robust_api_request("GET", url)
@@ -7180,11 +7181,12 @@ class MosaicMasterDialog(QDialog):
7180
7181
  if jobs and jobs[0] is not None:
7181
7182
  return jobs[0]
7182
7183
  print(f"Polling attempt {attempt+1}: Job ID not ready yet.")
7183
- time.sleep(10)
7184
+ non_blocking_sleep(10)
7184
7185
  QMessageBox.critical(self, "Blind Solve Failed", "Failed to retrieve job ID from Astrometry.net after multiple attempts.")
7185
7186
  return None
7186
7187
 
7187
7188
  def poll_calibration_data(self, job_id):
7189
+ from setiastro.saspro.main_helpers import non_blocking_sleep
7188
7190
  url = ASTROMETRY_API_URL + f"jobs/{job_id}/calibration/"
7189
7191
  for attempt in range(90):
7190
7192
  response = robust_api_request("GET", url)
@@ -7192,7 +7194,7 @@ class MosaicMasterDialog(QDialog):
7192
7194
  print("Calibration data retrieved:", response)
7193
7195
  return response
7194
7196
  print(f"Calibration data not available yet (attempt {attempt+1})")
7195
- time.sleep(10)
7197
+ non_blocking_sleep(10)
7196
7198
  QMessageBox.critical(self, "Blind Solve Failed", "Calibration data did not complete in the expected timeframe.")
7197
7199
  return None
7198
7200
 
@@ -154,7 +154,7 @@ def _apply_clarity(image: np.ndarray, amount: float, radius: float) -> np.ndarra
154
154
  print(f"Bilateral Filter failed: {e}")
155
155
  try:
156
156
  base = cv2.GaussianBlur(img_f32, (0, 0), sigma_space_target)
157
- except:
157
+ except Exception:
158
158
  return image
159
159
  else:
160
160
  return image
@@ -5,7 +5,7 @@ import numpy as np
5
5
 
6
6
  # Always route through our runtime shim so ALL GPU users share the same backend.
7
7
  # Nothing heavy happens at import; we only resolve Torch when needed.
8
- from .runtime_torch import import_torch, add_runtime_to_sys_path
8
+ from .runtime_torch import import_torch, add_runtime_to_sys_path, best_device
9
9
 
10
10
  # Algorithms supported by the GPU path here (names match your UI/CPU counterparts)
11
11
  _SUPPORTED = {
@@ -31,47 +31,37 @@ _SUPPORTED = {
31
31
  _TORCH = None
32
32
  _DEVICE = None
33
33
 
34
- def _get_torch(prefer_cuda: bool = True):
35
- """
36
- Resolve and cache the torch module via the SAS runtime shim.
37
- This may install/repair torch into the per-user runtime if needed.
38
- """
34
+
35
+
36
+ def _get_torch(prefer_cuda: bool = True, prefer_dml: bool = False):
39
37
  global _TORCH, _DEVICE
40
38
  if _TORCH is not None:
41
39
  return _TORCH
42
40
 
43
- # In frozen builds, help the process see the runtime site-packages first.
44
41
  try:
45
42
  add_runtime_to_sys_path(lambda *_: None)
46
43
  except Exception:
47
44
  pass
48
45
 
49
- # Import (and if necessary, install) torch using the unified runtime.
50
- torch = import_torch(prefer_cuda=prefer_cuda, status_cb=lambda *_: None)
46
+ # Let runtime_torch install the right stack
47
+ torch = import_torch(prefer_cuda=prefer_cuda, prefer_dml=prefer_dml, status_cb=lambda *_: None)
48
+ _DEVICE = best_device(torch, prefer_cuda=True, prefer_dml=prefer_dml)
51
49
  _TORCH = torch
52
50
  _force_fp32_policy(torch)
53
51
 
54
- # Choose the best device once; cheap calls, but cached anyway
55
- try:
56
- if hasattr(torch, "cuda") and torch.cuda.is_available():
57
- _DEVICE = torch.device("cuda")
58
- elif getattr(getattr(torch, "backends", None), "mps", None) and torch.backends.mps.is_available():
59
- _DEVICE = torch.device("mps")
60
- else:
61
- # Try DirectML for AMD/Intel GPUs on Windows
62
- try:
63
- import torch_directml
64
- dml_device = torch_directml.device()
65
- # Quick sanity check
66
- _ = (torch.ones(1, device=dml_device) + 1).item()
67
- _DEVICE = dml_device
68
- except Exception:
69
- _DEVICE = torch.device("cpu")
70
- except Exception:
52
+ # Device selection: CUDA first, else CPU.
53
+ # (If runtime_torch installed DML, then your app should choose DML device elsewhere
54
+ # OR you add a runtime_torch helper to return it.)
55
+ if hasattr(torch, "cuda") and torch.cuda.is_available():
56
+ _DEVICE = torch.device("cuda")
57
+ elif getattr(getattr(torch, "backends", None), "mps", None) and torch.backends.mps.is_available():
58
+ _DEVICE = torch.device("mps")
59
+ else:
71
60
  _DEVICE = torch.device("cpu")
72
61
 
73
62
  return _TORCH
74
63
 
64
+
75
65
  def _device():
76
66
  if _DEVICE is not None:
77
67
  return _DEVICE
@@ -246,11 +236,52 @@ def torch_reduce_tile(
246
236
  ts = torch.from_numpy(ts_np).to(dev, dtype=torch.float32, non_blocking=True)
247
237
 
248
238
  # Weights broadcast to 4D
239
+ # Weights -> broadcastable 4D tensor (F,1,1,1) or (F,1,1,C) or (F,H,W,1) or (F,H,W,C)
249
240
  weights_np = np.asarray(weights_np, dtype=np.float32)
241
+
242
+ if weights_np.ndim == 0:
243
+ # scalar -> per-frame scalar (rare)
244
+ weights_np = np.full((F,), float(weights_np), dtype=np.float32)
245
+
250
246
  if weights_np.ndim == 1:
251
- w = torch.from_numpy(weights_np).to(dev, dtype=torch.float32, non_blocking=True).view(F,1,1,1)
247
+ if weights_np.shape[0] != F:
248
+ raise ValueError(f"weights shape {weights_np.shape} does not match F={F}")
249
+ w_np = weights_np.reshape(F, 1, 1, 1)
250
+
251
+ elif weights_np.ndim == 2:
252
+ # Most important fix: (F,C) per-channel weights
253
+ if weights_np.shape == (F, C):
254
+ w_np = weights_np.reshape(F, 1, 1, C)
255
+ # Sometimes people accidentally pass (F,H) or (F,W) -> reject loudly
256
+ else:
257
+ raise ValueError(
258
+ f"Unsupported 2D weights shape {weights_np.shape}. "
259
+ f"Expected (F,C)=({F},{C}) for per-channel weights."
260
+ )
261
+
262
+ elif weights_np.ndim == 3:
263
+ # (F,H,W) -> treat as single-channel weight map
264
+ if weights_np.shape == (F, H, W):
265
+ w_np = weights_np[..., None] # (F,H,W,1)
266
+ else:
267
+ raise ValueError(
268
+ f"Unsupported 3D weights shape {weights_np.shape}. Expected (F,H,W)=({F},{H},{W})."
269
+ )
270
+
271
+ elif weights_np.ndim == 4:
272
+ if weights_np.shape == (F, H, W, 1) or weights_np.shape == (F, H, W, C):
273
+ w_np = weights_np
274
+ else:
275
+ raise ValueError(
276
+ f"Unsupported 4D weights shape {weights_np.shape}. "
277
+ f"Expected (F,H,W,1) or (F,H,W,C)=({F},{H},{W},{C})."
278
+ )
252
279
  else:
253
- w = torch.from_numpy(weights_np).to(dev, dtype=torch.float32, non_blocking=True)
280
+ raise ValueError(f"Unsupported weights ndim={weights_np.ndim} shape={weights_np.shape}")
281
+
282
+ # Host -> device
283
+ w = torch.from_numpy(w_np).to(dev, dtype=torch.float32, non_blocking=False)
284
+
254
285
 
255
286
  algo = algo_name
256
287
  valid = torch.isfinite(ts)
@@ -66,7 +66,9 @@ def numpy_to_qimage(arr: np.ndarray, normalize: bool = True) -> QImage:
66
66
  if arr.ndim == 2:
67
67
  # Grayscale
68
68
  h, w = arr.shape
69
- return QImage(arr.data, w, h, w, QImage.Format.Format_Grayscale8).copy()
69
+ img = QImage(arr.data, w, h, w, QImage.Format.Format_Grayscale8)
70
+ img._buf = arr # Keep alive
71
+ return img
70
72
 
71
73
  elif arr.ndim == 3:
72
74
  h, w, c = arr.shape
@@ -74,17 +76,23 @@ def numpy_to_qimage(arr: np.ndarray, normalize: bool = True) -> QImage:
74
76
  if c == 1:
75
77
  # Grayscale with channel dim
76
78
  arr = arr.squeeze()
77
- return QImage(arr.data, w, h, w, QImage.Format.Format_Grayscale8).copy()
79
+ img = QImage(arr.data, w, h, w, QImage.Format.Format_Grayscale8)
80
+ img._buf = arr
81
+ return img
78
82
 
79
83
  elif c == 3:
80
84
  # RGB
81
85
  bytes_per_line = 3 * w
82
- return QImage(arr.data, w, h, bytes_per_line, QImage.Format.Format_RGB888).copy()
86
+ img = QImage(arr.data, w, h, bytes_per_line, QImage.Format.Format_RGB888)
87
+ img._buf = arr
88
+ return img
83
89
 
84
90
  elif c == 4:
85
91
  # RGBA
86
92
  bytes_per_line = 4 * w
87
- return QImage(arr.data, w, h, bytes_per_line, QImage.Format.Format_RGBA8888).copy()
93
+ img = QImage(arr.data, w, h, bytes_per_line, QImage.Format.Format_RGBA8888)
94
+ img._buf = arr
95
+ return img
88
96
 
89
97
  else:
90
98
  raise ValueError(f"Unsupported number of channels: {c}")
setiastro/saspro/wimi.py CHANGED
@@ -28,6 +28,7 @@ import lz4.block
28
28
  import zstandard
29
29
  import base64
30
30
  import ast
31
+ from setiastro.saspro.main_helpers import non_blocking_sleep
31
32
  import platform
32
33
  from pathlib import Path
33
34
  import glob
@@ -6789,7 +6790,7 @@ class WIMIDialog(QDialog):
6789
6790
  except Exception as e:
6790
6791
  last_err = e
6791
6792
  if attempt < 4:
6792
- time.sleep(1) # or QThread.msleep(1000) if you want less UI freeze
6793
+ non_blocking_sleep(1)
6793
6794
  else:
6794
6795
  # After 5 attempts total, stop with a helpful message
6795
6796
  err_txt = str(last_err) if last_err is not None else "Unknown error"
setiastro/saspro/xisf.py CHANGED
@@ -1072,7 +1072,7 @@ class XISF:
1072
1072
  }
1073
1073
  try:
1074
1074
  return _dtypes[s]
1075
- except:
1075
+ except KeyError:
1076
1076
  raise NotImplementedError(f"sampleFormat {s} not implemented")
1077
1077
 
1078
1078
  # Return XISF data type from numpy dtype
@@ -1087,7 +1087,7 @@ class XISF:
1087
1087
  }
1088
1088
  try:
1089
1089
  return _sampleFormats[str(dtype)]
1090
- except:
1090
+ except KeyError:
1091
1091
  raise NotImplementedError(f"sampleFormat for {dtype} not implemented")
1092
1092
 
1093
1093
  @staticmethod
@@ -1121,7 +1121,7 @@ class XISF:
1121
1121
  }
1122
1122
  try:
1123
1123
  return _dtypes[type_prefix]
1124
- except:
1124
+ except KeyError:
1125
1125
  raise NotImplementedError(f"data type {type_name} not implemented")
1126
1126
 
1127
1127
  # __/ Auxiliary functions for compression/shuffling \________
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: setiastrosuitepro
3
- Version: 1.8.1.post2
3
+ Version: 1.8.3
4
4
  Summary: Seti Astro Suite Pro - Advanced astrophotography toolkit for image calibration, stacking, registration, photometry, and visualization
5
5
  License: GPL-3.0
6
6
  License-File: LICENSE
@@ -68,9 +68,9 @@ Description-Content-Type: text/markdown
68
68
 
69
69
  ### Other contributors:
70
70
  - [Fabio Tempera](https://github.com/Ft2801) 🥇
71
- - Complete code refactoring of `setiastrosuitepro.py` (20,000+ lines), and duplicated code removal across the project
72
- - Addition of AstroSpikes tool, secret minigame, system resources monitor, app statistics, and 10+ language translations
73
- - Implementation of UI elements, startup window, caching methods, lazy imports, utils functions, better memory management, and other important code optimizations across the entire project
71
+ - Complete code refactoring of `setiastrosuitepro.py` (20,000+ lines), and duplicated code removal across the entire project
72
+ - Addition of AstroSpikes tool, Texture and Clarity, secret minigame, system resources monitor, app statistics, and 10+ language translations
73
+ - Implementation of UI elements, startup optimizations, startup window, caching methods, lazy imports, utils functions, better memory management, and other important code optimizations across the entire project
74
74
  - [Joaquin Rodriguez](https://github.com/jrhuerta)
75
75
  - Project migration to Poetry
76
76
  - [Tim Dicke](https://github.com/dickett)