setiastrosuitepro 1.8.0.post3__py3-none-any.whl → 1.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of setiastrosuitepro might be problematic. Click here for more details.

Files changed (43) hide show
  1. setiastro/images/finderchart.png +0 -0
  2. setiastro/saspro/__main__.py +41 -39
  3. setiastro/saspro/_generated/build_info.py +2 -2
  4. setiastro/saspro/abe.py +1 -1
  5. setiastro/saspro/blink_comparator_pro.py +3 -1
  6. setiastro/saspro/bright_stars.py +305 -0
  7. setiastro/saspro/continuum_subtract.py +2 -1
  8. setiastro/saspro/cosmicclarity_engines/darkstar_engine.py +22 -2
  9. setiastro/saspro/cosmicclarity_engines/denoise_engine.py +68 -15
  10. setiastro/saspro/cosmicclarity_engines/satellite_engine.py +7 -3
  11. setiastro/saspro/cosmicclarity_engines/sharpen_engine.py +371 -98
  12. setiastro/saspro/cosmicclarity_engines/superres_engine.py +1 -0
  13. setiastro/saspro/cosmicclarity_preset.py +2 -1
  14. setiastro/saspro/doc_manager.py +8 -0
  15. setiastro/saspro/exoplanet_detector.py +22 -17
  16. setiastro/saspro/finder_chart.py +1639 -0
  17. setiastro/saspro/gui/main_window.py +36 -14
  18. setiastro/saspro/gui/mixins/menu_mixin.py +2 -0
  19. setiastro/saspro/gui/mixins/toolbar_mixin.py +9 -1
  20. setiastro/saspro/legacy/image_manager.py +18 -4
  21. setiastro/saspro/legacy/xisf.py +3 -3
  22. setiastro/saspro/main_helpers.py +18 -0
  23. setiastro/saspro/memory_utils.py +18 -14
  24. setiastro/saspro/model_manager.py +65 -0
  25. setiastro/saspro/model_workers.py +58 -24
  26. setiastro/saspro/ops/settings.py +45 -8
  27. setiastro/saspro/planetprojection.py +68 -36
  28. setiastro/saspro/resources.py +193 -175
  29. setiastro/saspro/runtime_torch.py +622 -137
  30. setiastro/saspro/sfcc.py +5 -3
  31. setiastro/saspro/stacking_suite.py +4 -3
  32. setiastro/saspro/star_alignment.py +266 -212
  33. setiastro/saspro/texture_clarity.py +1 -1
  34. setiastro/saspro/widgets/image_utils.py +12 -4
  35. setiastro/saspro/widgets/spinboxes.py +5 -7
  36. setiastro/saspro/wimi.py +2 -1
  37. setiastro/saspro/xisf.py +3 -3
  38. {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.2.dist-info}/METADATA +4 -4
  39. {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.2.dist-info}/RECORD +43 -40
  40. {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.2.dist-info}/WHEEL +0 -0
  41. {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.2.dist-info}/entry_points.txt +0 -0
  42. {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.2.dist-info}/licenses/LICENSE +0 -0
  43. {setiastrosuitepro-1.8.0.post3.dist-info → setiastrosuitepro-1.8.2.dist-info}/licenses/license.txt +0 -0
@@ -16,16 +16,13 @@ from contextlib import contextmanager
16
16
  import platform as _plat
17
17
  from pathlib import Path as _Path
18
18
 
19
- def _maybe_find_torch_shm_manager(torch_mod) -> str | None:
20
- # Only Linux wheels include/use this helper binary.
21
- if _plat.system() != "Linux":
22
- return None
19
+
20
+ def _rt_dbg(msg: str, status_cb=print):
23
21
  try:
24
- base = _Path(getattr(torch_mod, "__file__", "")).parent
25
- p = base / "bin" / "torch_shm_manager"
26
- return str(p) if p.exists() else None
22
+ status_cb(f"[RT] {msg}")
27
23
  except Exception:
28
- return None
24
+ print(f"[RT] {msg}", flush=True)
25
+
29
26
 
30
27
  # ──────────────────────────────────────────────────────────────────────────────
31
28
  # Paths & runtime selection
@@ -67,14 +64,25 @@ def _runtime_base_dir() -> Path:
67
64
  def _current_tag() -> str:
68
65
  return f"py{sys.version_info.major}{sys.version_info.minor}"
69
66
 
70
- def _discover_existing_runtime_dir() -> Path | None:
67
+ def _discover_existing_runtime_dir(status_cb=print) -> Path | None:
71
68
  """
72
- Return the newest existing runtime dir that already has a venv python,
73
- using the venv interpreter's REAL version instead of just the folder name.
69
+ Prefer an existing runtime that MATCHES the current interpreter minor.
70
+ Only if none exists, fall back to the highest available.
74
71
  """
75
72
  base = _runtime_base_dir()
76
73
  if not base.exists():
77
74
  return None
75
+
76
+ cur_tag = _current_tag() # e.g. py311, py312
77
+ cur_dir = base / cur_tag
78
+ cur_vpy = cur_dir / "venv" / ("Scripts/python.exe" if platform.system() == "Windows" else "bin/python")
79
+
80
+ # 1) If matching-current exists and has a venv python, use it.
81
+ if cur_vpy.exists():
82
+ _rt_dbg(f"Found matching runtime for current interpreter: {cur_dir}", status_cb)
83
+ return cur_dir
84
+
85
+ # 2) Otherwise, fall back to "newest existing"
78
86
  candidates: list[tuple[int, int, Path]] = []
79
87
  for p in base.glob("py*"):
80
88
  vpy = p / "venv" / ("Scripts/python.exe" if platform.system() == "Windows" else "bin/python")
@@ -83,22 +91,21 @@ def _discover_existing_runtime_dir() -> Path | None:
83
91
  ver = _venv_pyver(vpy)
84
92
  if ver:
85
93
  candidates.append((ver[0], ver[1], p))
94
+
86
95
  if not candidates:
87
96
  return None
88
- candidates.sort() # pick the highest Python (major, minor)
89
- return candidates[-1][2]
90
97
 
91
- def _user_runtime_dir() -> Path:
92
- """
93
- Use an existing runtime if we find one; otherwise select a directory for the
94
- current interpreter version (py310/py311/py312...).
95
- """
96
- existing = _discover_existing_runtime_dir()
97
- return existing or (_runtime_base_dir() / _current_tag())
98
+ candidates.sort()
99
+ chosen = candidates[-1][2]
100
+ _rt_dbg(f"No matching runtime; using newest existing: {chosen}", status_cb)
101
+ return chosen
102
+
103
+ def _user_runtime_dir(status_cb=print) -> Path:
104
+ existing = _discover_existing_runtime_dir(status_cb=status_cb)
105
+ chosen = existing or (_runtime_base_dir() / _current_tag())
106
+ _rt_dbg(f"_user_runtime_dir() -> {chosen}", status_cb)
107
+ return chosen
98
108
 
99
- # ──────────────────────────────────────────────────────────────────────────────
100
- # Shadowing & sanity checks
101
- # ──────────────────────────────────────────────────────────────────────────────
102
109
 
103
110
  # ──────────────────────────────────────────────────────────────────────────────
104
111
  # Shadowing & sanity checks
@@ -172,23 +179,6 @@ def _purge_bad_torch_from_sysmodules(status_cb=print) -> None:
172
179
  except Exception:
173
180
  pass
174
181
 
175
- def _torch_stack_sanity_check(status_cb=print) -> None:
176
- """
177
- Ensure torch imports sanely AND torchvision/torchaudio are importable.
178
- (Satellite engine requires torchvision; we install torchaudio too for safety.)
179
- """
180
- _torch_sanity_check(status_cb=status_cb)
181
-
182
- try:
183
- import torchvision # noqa
184
- except Exception as e:
185
- raise RuntimeError(f"torchvision import failed: {e}") from e
186
-
187
- try:
188
- import torchaudio # noqa
189
- except Exception as e:
190
- raise RuntimeError(f"torchaudio import failed: {e}") from e
191
-
192
182
 
193
183
  def _torch_sanity_check(status_cb=print):
194
184
  try:
@@ -238,31 +228,71 @@ def _pip_ok(venv_python: Path, args: list[str], status_cb=print) -> bool:
238
228
 
239
229
  def _ensure_numpy(venv_python: Path, status_cb=print) -> None:
240
230
  """
241
- Torch wheels may not pull NumPy; ensure NumPy is present in the SAME venv.
231
+ Ensure NumPy exists in the runtime venv AND is ABI-compatible with common
232
+ torch/vision/audio wheels. In practice: enforce numpy<2.
233
+
242
234
  Safe to call repeatedly.
243
235
  """
244
236
  def _numpy_present() -> bool:
245
237
  code = "import importlib.util; print('OK' if importlib.util.find_spec('numpy') else 'MISS')"
246
238
  try:
247
239
  out = subprocess.check_output([str(venv_python), "-c", code], text=True).strip()
248
- return (out == "OK")
240
+ return out == "OK"
249
241
  except Exception:
250
242
  return False
251
243
 
252
- if _numpy_present():
253
- return
244
+ def _numpy_major() -> int | None:
245
+ code = (
246
+ "import numpy as np\n"
247
+ "v = np.__version__.split('+',1)[0]\n"
248
+ "print(int(v.split('.',1)[0]))\n"
249
+ )
250
+ try:
251
+ out = subprocess.check_output([str(venv_python), "-c", code], text=True).strip()
252
+ return int(out)
253
+ except Exception:
254
+ return None
254
255
 
255
- # Keep tools fresh, then install a compatible NumPy (Torch 2.x is fine with NumPy 1.26–2.x)
256
+ # Keep tools fresh
256
257
  _pip_ok(venv_python, ["install", "--upgrade", "pip", "setuptools", "wheel"], status_cb=status_cb)
257
258
 
258
- # Prefer latest available in [1.26, 3.0)
259
- if not _pip_ok(venv_python, ["install", "--prefer-binary", "--no-cache-dir", "numpy>=1.26,<3"], status_cb=status_cb):
260
- # Final fallback to a broadly available pin
261
- _pip_ok(venv_python, ["install", "--prefer-binary", "--no-cache-dir", "numpy==1.26.*"], status_cb=status_cb)
259
+ # 1) If NumPy missing install safe pin
260
+ if not _numpy_present():
261
+ status_cb("[RT] Installing NumPy (pinning to numpy<2 for torch wheel compatibility)…")
262
+ if not _pip_ok(
263
+ venv_python,
264
+ ["install", "--prefer-binary", "--no-cache-dir", "numpy<2"],
265
+ status_cb=status_cb,
266
+ ):
267
+ # last-ditch fallback (very widely available)
268
+ _pip_ok(
269
+ venv_python,
270
+ ["install", "--prefer-binary", "--no-cache-dir", "numpy==1.26.*"],
271
+ status_cb=status_cb,
272
+ )
273
+
274
+ # 2) If NumPy present but major>=2 → downgrade to numpy<2
275
+ maj = _numpy_major()
276
+ if maj is not None and maj >= 2:
277
+ status_cb("[RT] NumPy 2.x detected in runtime venv; downgrading to numpy<2…")
278
+ if not _pip_ok(
279
+ venv_python,
280
+ ["install", "--prefer-binary", "--no-cache-dir", "--force-reinstall", "numpy<2"],
281
+ status_cb=status_cb,
282
+ ):
283
+ _pip_ok(
284
+ venv_python,
285
+ ["install", "--prefer-binary", "--no-cache-dir", "--force-reinstall", "numpy==1.26.*"],
286
+ status_cb=status_cb,
287
+ )
262
288
 
263
- # Post-install verification
289
+ # Post verification
264
290
  if not _numpy_present():
265
291
  raise RuntimeError("Failed to install NumPy into the SASpro runtime venv.")
292
+ maj2 = _numpy_major()
293
+ if maj2 is not None and maj2 >= 2:
294
+ raise RuntimeError("NumPy is still 2.x in the SASpro runtime venv after pinning; torch stack may not import.")
295
+
266
296
 
267
297
 
268
298
  def _is_access_denied(exc: BaseException) -> bool:
@@ -631,129 +661,518 @@ def _install_torch(venv_python: Path, prefer_cuda: bool, prefer_xpu: bool, prefe
631
661
  # Public entry points
632
662
  # ──────────────────────────────────────────────────────────────────────────────
633
663
 
634
- def import_torch(prefer_cuda: bool = True, prefer_xpu: bool = False, prefer_dml: bool = False, status_cb=print):
664
+ def _venv_import_probe(venv_python: Path, modname: str) -> tuple[bool, str]:
635
665
  """
636
- Ensure a per-user venv exists with torch installed; return the imported module.
637
- Hardened against shadow imports, broken wheels, concurrent installs, and partial markers.
666
+ Try importing a module INSIDE the runtime venv python.
667
+ Returns (ok, output_or_error_tail).
638
668
  """
639
- # Before any attempt, demote shadowing paths (CWD / random folders)
640
- _ban_shadow_torch_paths(status_cb=status_cb)
641
- _purge_bad_torch_from_sysmodules(status_cb=status_cb)
669
+ code = (
670
+ "import importlib, sys\n"
671
+ f"m=importlib.import_module('{modname}')\n"
672
+ "print('OK', getattr(m,'__version__',None), getattr(m,'__file__',None))\n"
673
+ )
674
+ r = subprocess.run([str(venv_python), "-c", code],
675
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
676
+ out = (r.stdout or "").strip()
677
+ if r.returncode == 0 and out.startswith("OK"):
678
+ return True, out
679
+ return False, out[-4000:] if out else "no output"
680
+
642
681
 
643
- add_runtime_to_sys_path(status_cb=lambda *_: None)
682
+ def _write_torch_marker(marker: Path, status_cb=print) -> None:
683
+ """
684
+ Create torch_installed.json based on runtime venv imports.
685
+ Safe to call repeatedly.
686
+ """
687
+ rt = marker.parent
688
+ vp = _venv_paths(rt)["python"]
689
+
690
+ ok_t, out_t = _venv_import_probe(vp, "torch")
691
+ ok_v, out_v = _venv_import_probe(vp, "torchvision")
692
+ ok_a, out_a = _venv_import_probe(vp, "torchaudio")
693
+
694
+ payload = {
695
+ "installed": bool(ok_t),
696
+ "when": int(time.time()),
697
+ "python": None,
698
+ "torch": None,
699
+ "torchvision": None,
700
+ "torchaudio": None,
701
+ "torch_file": None,
702
+ "torchvision_file": None,
703
+ "torchaudio_file": None,
704
+ "probe": {
705
+ "torch": out_t,
706
+ "torchvision": out_v,
707
+ "torchaudio": out_a,
708
+ }
709
+ }
644
710
 
645
- # Fast path: if torch already importable and sane, use it
711
+ # get venv python version
646
712
  try:
647
- import torch # noqa
648
- _torch_stack_sanity_check(status_cb=status_cb)
649
- return torch
713
+ r = subprocess.run([str(vp), "-c", "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"],
714
+ stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True)
715
+ if r.returncode == 0:
716
+ payload["python"] = (r.stdout or "").strip()
650
717
  except Exception:
651
718
  pass
652
719
 
653
- rt = _user_runtime_dir()
654
- vp = _ensure_venv(rt, status_cb=status_cb)
655
- site = _site_packages(vp)
656
- marker = rt / "torch_installed.json"
720
+ # parse "OK ver file" lines
721
+ def _parse_ok(s: str):
722
+ # format: "OK <ver> <file>"
723
+ try:
724
+ parts = s.split(" ", 2)
725
+ ver = parts[1] if len(parts) > 1 else None
726
+ f = parts[2] if len(parts) > 2 else None
727
+ return ver, f
728
+ except Exception:
729
+ return None, None
730
+
731
+ if ok_t:
732
+ payload["torch"], payload["torch_file"] = _parse_ok(out_t)
733
+ if ok_v:
734
+ payload["torchvision"], payload["torchvision_file"] = _parse_ok(out_v)
735
+ if ok_a:
736
+ payload["torchaudio"], payload["torchaudio_file"] = _parse_ok(out_a)
657
737
 
658
738
  try:
659
- _ensure_numpy(vp, status_cb=status_cb)
660
- except Exception:
661
- # Non-fatal; we'll try again if torch complains at runtime
662
- pass
739
+ marker.write_text(json.dumps(payload, indent=2), encoding="utf-8")
740
+ status_cb(f"[RT] Wrote marker: {marker}")
741
+ except Exception as e:
742
+ status_cb(f"[RT] Failed to write marker {marker}: {e!r}")
743
+
744
+ def _venv_has_torch_stack(
745
+ vp: Path,
746
+ status_cb=print,
747
+ *,
748
+ require_torchaudio: bool = True
749
+ ) -> tuple[bool, dict]:
750
+ """
751
+ Definitive check: can the RUNTIME VENV import torch/torchvision/(torchaudio)?
752
+ This does NOT use the frozen app interpreter to decide installation state.
753
+ """
754
+ ok_t, out_t = _venv_import_probe(vp, "torch")
755
+ ok_v, out_v = _venv_import_probe(vp, "torchvision")
756
+ ok_a, out_a = _venv_import_probe(vp, "torchaudio")
757
+
758
+ info = {
759
+ "torch": (ok_t, out_t),
760
+ "torchvision": (ok_v, out_v),
761
+ "torchaudio": (ok_a, out_a),
762
+ }
763
+
764
+ ok_all = (ok_t and ok_v and ok_a) if require_torchaudio else (ok_t and ok_v)
765
+ return ok_all, info
766
+
767
+ def _marker_says_ready(
768
+ marker: Path,
769
+ site: Path,
770
+ venv_ver: tuple[int, int] | None,
771
+ *,
772
+ require_torchaudio: bool = True,
773
+ max_age_days: int = 180,
774
+ ) -> bool:
775
+ """
776
+ Advisory fast-path gate ONLY.
777
+
778
+ Returns True if the marker looks sane enough that an in-process import attempt
779
+ is worth trying *without* doing the expensive subprocess venv probes.
780
+
781
+ IMPORTANT:
782
+ - This must NOT be used to decide whether to install/uninstall anything.
783
+ - If this returns True and the in-process import fails, we fall back to the
784
+ definitive venv probe (_venv_has_torch_stack).
785
+ """
786
+ try:
787
+ if not marker.exists():
788
+ return False
789
+
790
+ raw = marker.read_text(encoding="utf-8", errors="replace")
791
+ data = json.loads(raw) if raw else {}
792
+ if not isinstance(data, dict):
793
+ return False
794
+
795
+ if not data.get("installed", False):
796
+ return False
797
+
798
+ # Age gate (advisory only).
799
+ when = data.get("when")
800
+ if isinstance(when, (int, float)):
801
+ age_s = max(0.0, time.time() - float(when))
802
+ if age_s > (max_age_days * 86400):
803
+ return False
804
+
805
+ # Marker python version should match the RUNTIME VENV python (not the app interpreter).
806
+ py = data.get("python")
807
+ if not (isinstance(py, str) and py.strip()):
808
+ return False
663
809
 
664
- # If no marker, perform install under a lock
665
- if not marker.exists():
666
810
  try:
667
- with _install_lock(rt):
668
- # Re-check inside lock in case another process finished
669
- if not marker.exists():
670
- _install_torch(vp, prefer_cuda=prefer_cuda, prefer_xpu=prefer_xpu, prefer_dml=prefer_dml, status_cb=status_cb)
671
- except Exception as e:
672
- if _is_access_denied(e):
673
- raise OSError(_access_denied_msg(rt)) from e
674
- raise
811
+ maj_s, min_s = py.strip().split(".", 1)
812
+ marker_ver = (int(maj_s), int(min_s))
813
+ except Exception:
814
+ return False
675
815
 
676
- # Ensure the venv site is first on sys.path, then demote shadowers again
677
- if str(site) not in sys.path:
678
- sys.path.insert(0, str(site))
679
- _demote_shadow_torch_paths(status_cb=status_cb)
816
+ # If we can't determine venv version, treat marker as unreliable for fast-path.
817
+ if venv_ver is None:
818
+ return False
819
+
820
+ if marker_ver != venv_ver:
821
+ return False
822
+
823
+ # Check that recorded files (if present) live under the computed site-packages path.
824
+ site_s = str(site)
825
+ tf = data.get("torch_file")
826
+ tvf = data.get("torchvision_file")
827
+ taf = data.get("torchaudio_file")
828
+
829
+ def _under_site(p: str | None) -> bool:
830
+ if not p or not isinstance(p, str):
831
+ return False
832
+ return site_s in p
833
+
834
+ if not _under_site(tf):
835
+ return False
836
+ if not _under_site(tvf):
837
+ return False
838
+ if require_torchaudio and not _under_site(taf):
839
+ return False
840
+
841
+ return True
842
+ except Exception:
843
+ return False
844
+
845
+
846
+ def _qt_settings():
847
+ """
848
+ Create QSettings without importing PyQt6 at module import time.
849
+ We only import it inside the function so runtime_torch stays usable
850
+ in non-GUI contexts.
851
+ """
852
+ try:
853
+ from PyQt6.QtCore import QSettings
854
+ # Must match what your app sets via QCoreApplication.setOrganizationName / setApplicationName
855
+ return QSettings()
856
+ except Exception:
857
+ return None
858
+
859
+
860
+ def _qcache_get():
861
+ s = _qt_settings()
862
+ if not s:
863
+ return None
864
+ s.beginGroup("runtime_torch")
865
+ data = {
866
+ "tag": s.value("tag", "", str),
867
+ "rt_dir": s.value("rt_dir", "", str),
868
+ "site": s.value("site", "", str),
869
+ "python": s.value("python", "", str),
870
+ "torch": s.value("torch", "", str),
871
+ "torchvision": s.value("torchvision", "", str),
872
+ "torchaudio": s.value("torchaudio", "", str),
873
+ "when": s.value("when", 0, int),
874
+ "require_torchaudio": s.value("require_torchaudio", True, bool),
875
+ }
876
+ s.endGroup()
877
+ return data
878
+
879
+
880
+ def _qcache_set(*, tag: str, rt_dir: Path, site: Path, python_ver: str | None,
881
+ torch_ver: str | None, tv_ver: str | None, ta_ver: str | None,
882
+ require_torchaudio: bool):
883
+ s = _qt_settings()
884
+ if not s:
885
+ return
886
+ s.beginGroup("runtime_torch")
887
+ s.setValue("tag", tag)
888
+ s.setValue("rt_dir", str(rt_dir))
889
+ s.setValue("site", str(site))
890
+ s.setValue("python", python_ver or "")
891
+ s.setValue("torch", torch_ver or "")
892
+ s.setValue("torchvision", tv_ver or "")
893
+ s.setValue("torchaudio", ta_ver or "")
894
+ s.setValue("when", int(time.time()))
895
+ s.setValue("require_torchaudio", bool(require_torchaudio))
896
+ s.endGroup()
897
+ s.sync()
898
+
899
+
900
+ def _qcache_clear():
901
+ s = _qt_settings()
902
+ if not s:
903
+ return
904
+ s.beginGroup("runtime_torch")
905
+ s.remove("") # remove all keys in group
906
+ s.endGroup()
907
+ s.sync()
908
+
909
+
910
+ # module-level cache (optional but recommended)
911
+ # module-level cache (optional but recommended)
912
+ _TORCH_CACHED = None
913
+
914
+
915
+ def import_torch(
916
+ prefer_cuda: bool = True,
917
+ prefer_xpu: bool = False,
918
+ prefer_dml: bool = False,
919
+ status_cb=print,
920
+ *,
921
+ require_torchaudio: bool = True,
922
+ ):
923
+ """
924
+ Ensure a per-user venv exists with torch installed; return the imported torch module.
925
+
926
+ ULTRA FAST PATH:
927
+ - Use QSettings cached site-packages (no subprocess at all) and attempt in-process import.
928
+
929
+ FAST PATH:
930
+ - If marker looks valid, compute site-packages (1 subprocess) and try in-process imports.
931
+ - If that works, skip expensive subprocess probes.
932
+
933
+ SLOW PATH:
934
+ - Probe runtime venv via subprocess (torch/torchvision/torchaudio).
935
+ - Install only if missing, then re-probe.
936
+ - Finally import in-process from venv site-packages.
680
937
 
681
- # Import + sanity. If broken, force a clean repair (all OSes).
682
- def _force_repair():
938
+ NEW RULES:
939
+ - Marker/QSettings are advisory only (fast path gates).
940
+ - If torch/torchvision(/torchaudio) exist in the runtime venv, USE THEM. Do nothing else.
941
+ - Only if missing in the runtime venv should we install.
942
+ - NEVER auto-uninstall user torch/torchvision/torchaudio. No automatic repair.
943
+ """
944
+ global _TORCH_CACHED
945
+ if _TORCH_CACHED is not None:
946
+ return _TORCH_CACHED
947
+
948
+ def _write_qcache_best_effort(rt: Path, site: Path, venv_ver: tuple[int,int] | None):
949
+ """
950
+ Write QSettings cache only after we have proven imports work in-process.
951
+ """
683
952
  try:
684
- status_cb("Detected broken/shadowed Torch import attempting clean repair…")
953
+ import torch as _t # noqa
954
+ import torchvision as _tv # noqa
955
+ _ta = None
956
+ if require_torchaudio:
957
+ import torchaudio as _ta # noqa
958
+
959
+ _qcache_set(
960
+ tag=rt.name, # IMPORTANT: runtime tag, not sys.version_info tag
961
+ rt_dir=rt,
962
+ site=site,
963
+ python_ver=(f"{venv_ver[0]}.{venv_ver[1]}" if venv_ver else ""),
964
+ torch_ver=getattr(_t, "__version__", None),
965
+ tv_ver=getattr(_tv, "__version__", None),
966
+ ta_ver=getattr(_ta, "__version__", None) if _ta else None,
967
+ require_torchaudio=require_torchaudio,
968
+ )
685
969
  except Exception:
686
970
  pass
687
971
 
688
- # remove marker so future launches don't skip install
972
+ _rt_dbg(f"sys.frozen={getattr(sys,'frozen',False)}", status_cb)
973
+ _rt_dbg(f"sys.executable={sys.executable}", status_cb)
974
+ _rt_dbg(f"sys.version={sys.version}", status_cb)
975
+ _rt_dbg(f"current_tag={_current_tag()}", status_cb)
976
+ _rt_dbg(f"SASPRO_RUNTIME_DIR={os.getenv('SASPRO_RUNTIME_DIR')!r}", status_cb)
977
+
978
+ # Remove obvious shadowing paths (repo folders / cwd torch trees)
979
+ _ban_shadow_torch_paths(status_cb=status_cb)
980
+ _purge_bad_torch_from_sysmodules(status_cb=status_cb)
981
+
982
+ # ------------------------------------------------------------
983
+ # Choose runtime + ensure venv exists
984
+ # ------------------------------------------------------------
985
+ rt = _user_runtime_dir(status_cb=status_cb)
986
+ vp = _ensure_venv(rt, status_cb=status_cb)
987
+
988
+ # ------------------------------------------------------------
989
+ # ULTRA FAST PATH (runtime-aware): QSettings cache.
990
+ # Now we can compare the cache tag against the RUNTIME tag, not sys.version_info.
991
+ # This stays correct for "app python != runtime venv python" cases.
992
+ # ------------------------------------------------------------
993
+ try:
994
+ qc = _qcache_get()
995
+ if qc:
996
+ site_s = (qc.get("site") or "").strip()
997
+ rt_s = (qc.get("rt_dir") or "").strip()
998
+ req_ta = bool(qc.get("require_torchaudio", True))
999
+ tag = (qc.get("tag") or "").strip()
1000
+
1001
+ # Accept cache only if it matches this runtime folder tag
1002
+ if (
1003
+ tag == rt.name
1004
+ and site_s and Path(site_s).exists()
1005
+ and rt_s and Path(rt_s).exists()
1006
+ and (req_ta == require_torchaudio)
1007
+ ):
1008
+ status_cb("[RT] QSettings cache hit (runtime tag match); attempting zero-subprocess import.")
1009
+
1010
+ if site_s not in sys.path:
1011
+ sys.path.insert(0, site_s)
1012
+
1013
+ _demote_shadow_torch_paths(status_cb=status_cb)
1014
+ _purge_bad_torch_from_sysmodules(status_cb=status_cb)
1015
+
1016
+ import torch # noqa
1017
+ import torchvision # noqa
1018
+ if require_torchaudio:
1019
+ import torchaudio # noqa
1020
+
1021
+ _TORCH_CACHED = torch
1022
+
1023
+ return torch
1024
+
1025
+ except Exception as e:
1026
+ status_cb(f"[RT] QSettings fast-path failed: {type(e).__name__}: {e}. Continuing…")
689
1027
  try:
690
- marker.unlink()
1028
+ _qcache_clear()
691
1029
  except Exception:
692
1030
  pass
693
1031
 
694
- subprocess.run([str(vp), "-m", "pip", "uninstall", "-y",
695
- "torch", "torchvision", "torchaudio"], check=False)
696
- subprocess.run([str(vp), "-m", "pip", "cache", "purge"], check=False)
697
- with _install_lock(rt):
698
- _install_torch(
699
- vp,
700
- prefer_cuda=prefer_cuda,
701
- prefer_xpu=prefer_xpu,
702
- prefer_dml=prefer_dml,
703
- status_cb=status_cb,
704
- )
705
- importlib.invalidate_caches()
706
- _demote_shadow_torch_paths(status_cb=status_cb)
1032
+ # site-packages path (subprocess but relatively cheap)
1033
+ site = _site_packages(vp)
1034
+ marker = rt / "torch_installed.json"
1035
+ venv_ver = _venv_pyver(vp)
707
1036
 
1037
+ _rt_dbg(f"venv_ver={venv_ver}", status_cb)
1038
+ _rt_dbg(f"rt={rt}", status_cb)
1039
+ _rt_dbg(f"venv_python={vp}", status_cb)
1040
+ _rt_dbg(f"marker={marker} exists={marker.exists()}", status_cb)
1041
+ _rt_dbg(f"site={site}", status_cb)
708
1042
 
1043
+ # Best-effort ensure numpy in venv (harmless if already there)
709
1044
  try:
710
1045
  _ensure_numpy(vp, status_cb=status_cb)
711
1046
  except Exception:
712
1047
  pass
713
1048
 
1049
+ # ------------------------------------------------------------
1050
+ # FAST PATH: if marker looks valid, try in-process import NOW.
1051
+ # This avoids the 3 subprocess probes on every launch.
1052
+ # ------------------------------------------------------------
714
1053
  try:
715
- import torch # noqa
716
- _torch_stack_sanity_check(status_cb=status_cb)
717
- # write/update marker only when sane
718
- if not marker.exists():
719
- pyver = f"{sys.version_info.major}.{sys.version_info.minor}"
1054
+ if _marker_says_ready(marker, site, venv_ver, require_torchaudio=require_torchaudio):
1055
+ status_cb("[RT] Marker valid; attempting fast in-process import (skipping venv probe).")
1056
+
1057
+ sp = str(site)
1058
+ if sp not in sys.path:
1059
+ sys.path.insert(0, sp)
1060
+
1061
+ _demote_shadow_torch_paths(status_cb=status_cb)
1062
+ _purge_bad_torch_from_sysmodules(status_cb=status_cb)
1063
+
1064
+ import torch # noqa
1065
+ import torchvision # noqa
1066
+ if require_torchaudio:
1067
+ import torchaudio # noqa
1068
+
1069
+ # refresh marker (best-effort)
720
1070
  try:
721
- import torch, torchvision, torchaudio
722
- marker.write_text(json.dumps({
723
- "installed": True,
724
- "python": pyver,
725
- "when": int(time.time()),
726
- "torch": getattr(torch, "__version__", None),
727
- "torchvision": getattr(torchvision, "__version__", None),
728
- "torchaudio": getattr(torchaudio, "__version__", None),
729
- }), encoding="utf-8")
1071
+ _write_torch_marker(marker, status_cb=status_cb)
730
1072
  except Exception:
731
- marker.write_text(json.dumps({"installed": True, "python": pyver, "when": int(time.time())}), encoding="utf-8")
1073
+ pass
732
1074
 
733
- return torch
1075
+ _TORCH_CACHED = torch
1076
+ _write_qcache_best_effort(rt, site, venv_ver)
1077
+ return torch
1078
+
1079
+ except Exception as e:
1080
+ status_cb(f"[RT] Marker fast-path failed: {type(e).__name__}: {e}. Falling back to full probe…")
1081
+ # if marker fast path fails, your cached site-packages may also be stale
1082
+ try:
1083
+ _qcache_clear()
1084
+ except Exception:
1085
+ pass
1086
+
1087
+ # ------------------------------------------------------------
1088
+ # SLOW PATH: Probe the runtime venv definitively.
1089
+ # If it has torch stack, we're DONE (no installs, no repair).
1090
+ # ------------------------------------------------------------
1091
+ ok_all, info = _venv_has_torch_stack(vp, status_cb=status_cb, require_torchaudio=require_torchaudio)
1092
+ status_cb(
1093
+ "[RT] venv probe: "
1094
+ f"torch={info['torch'][0]} "
1095
+ f"torchvision={info['torchvision'][0]} "
1096
+ f"torchaudio={info['torchaudio'][0]}"
1097
+ )
1098
+
1099
+ if not ok_all:
1100
+ missing = []
1101
+ if not info["torch"][0]:
1102
+ missing.append("torch")
1103
+ if not info["torchvision"][0]:
1104
+ missing.append("torchvision")
1105
+ if require_torchaudio and (not info["torchaudio"][0]):
1106
+ missing.append("torchaudio")
1107
+
1108
+ status_cb(f"[RT] Missing in runtime venv: {missing}. Installing…")
1109
+
1110
+ try:
1111
+ with _install_lock(rt):
1112
+ _install_torch(
1113
+ vp,
1114
+ prefer_cuda=prefer_cuda,
1115
+ prefer_xpu=prefer_xpu,
1116
+ prefer_dml=prefer_dml,
1117
+ status_cb=status_cb,
1118
+ )
1119
+ _ensure_numpy(vp, status_cb=status_cb)
1120
+ except Exception as e:
1121
+ if _is_access_denied(e):
1122
+ raise OSError(_access_denied_msg(rt)) from e
1123
+ raise
1124
+
1125
+ # Re-probe after install
1126
+ ok_all, info = _venv_has_torch_stack(vp, status_cb=status_cb, require_torchaudio=require_torchaudio)
1127
+ status_cb(
1128
+ "[RT] venv re-probe: "
1129
+ f"torch={info['torch'][0]} "
1130
+ f"torchvision={info['torchvision'][0]} "
1131
+ f"torchaudio={info['torchaudio'][0]}"
1132
+ )
1133
+ if not ok_all:
1134
+ msg = "\n".join([f"{k}: ok={ok} :: {out}" for k, (ok, out) in info.items()])
1135
+ raise RuntimeError("Torch stack still not importable in runtime venv after install:\n" + msg)
1136
+
1137
+ # Always write/update marker for convenience, but never trust it for decisions.
1138
+ try:
1139
+ _write_torch_marker(marker, status_cb=status_cb)
734
1140
  except Exception:
735
- _force_repair()
736
- _purge_bad_torch_from_sysmodules(status_cb=status_cb)
737
- _ban_shadow_torch_paths(status_cb=status_cb)
738
- import torch # retry
739
- _torch_stack_sanity_check(status_cb=status_cb)
740
- if not marker.exists():
741
- pyver = f"{sys.version_info.major}.{sys.version_info.minor}"
742
- try:
743
- import torch, torchvision, torchaudio
744
- marker.write_text(json.dumps({
745
- "installed": True,
746
- "python": pyver,
747
- "when": int(time.time()),
748
- "torch": getattr(torch, "__version__", None),
749
- "torchvision": getattr(torchvision, "__version__", None),
750
- "torchaudio": getattr(torchaudio, "__version__", None),
751
- }), encoding="utf-8")
752
- except Exception:
753
- marker.write_text(json.dumps({"installed": True, "python": pyver, "when": int(time.time())}), encoding="utf-8")
1141
+ pass
754
1142
 
1143
+ # ------------------------------------------------------------
1144
+ # Now import torch in-process, but ONLY after putting runtime site first.
1145
+ # ------------------------------------------------------------
1146
+ sp = str(site)
1147
+ if sp not in sys.path:
1148
+ sys.path.insert(0, sp)
1149
+
1150
+ _demote_shadow_torch_paths(status_cb=status_cb)
1151
+ _purge_bad_torch_from_sysmodules(status_cb=status_cb)
1152
+
1153
+ try:
1154
+ import torch # noqa
1155
+
1156
+ _TORCH_CACHED = torch
1157
+ _write_qcache_best_effort(rt, site, venv_ver)
755
1158
  return torch
756
1159
 
1160
+ except Exception as e:
1161
+ # prevent repeatedly hitting a bad cached site path on next launch
1162
+ try:
1163
+ _qcache_clear()
1164
+ except Exception:
1165
+ pass
1166
+
1167
+ msg = "\n".join([f"{k}: ok={ok} :: {out}" for k, (ok, out) in info.items()])
1168
+ raise RuntimeError(
1169
+ "Runtime venv probe says torch stack exists, but in-process import failed.\n"
1170
+ "This typically indicates a frozen-stdlib / PyInstaller packaging issue, not a bad torch install.\n\n"
1171
+ f"Original error: {type(e).__name__}: {e}\n\n"
1172
+ "Runtime venv probe:\n" + msg
1173
+ ) from e
1174
+
1175
+
757
1176
  def _find_system_python_cmd() -> list[str]:
758
1177
  import platform as _plat
759
1178
  if _plat.system() == "Darwin":
@@ -814,7 +1233,7 @@ def add_runtime_to_sys_path(status_cb=print) -> None:
814
1233
  """
815
1234
  Warm up sys.path so a fresh launch can see the runtime immediately.
816
1235
  """
817
- rt = _user_runtime_dir()
1236
+ rt = _user_runtime_dir(status_cb=status_cb)
818
1237
  p = _venv_paths(rt)
819
1238
  vpy = p["python"]
820
1239
  if not vpy.exists():
@@ -837,3 +1256,69 @@ def add_runtime_to_sys_path(status_cb=print) -> None:
837
1256
  _demote_shadow_torch_paths(status_cb=status_cb)
838
1257
  except Exception:
839
1258
  return
1259
+
1260
+ def prewarm_torch_cache(
1261
+ status_cb=print,
1262
+ *,
1263
+ require_torchaudio: bool = True,
1264
+ ensure_venv: bool = True,
1265
+ ensure_numpy: bool = False,
1266
+ validate_marker: bool = True,
1267
+ ) -> None:
1268
+ """
1269
+ Build and persist the QSettings cache early (startup), so the first real
1270
+ import_torch() call can be zero-subprocess.
1271
+
1272
+ By default this does NOT import torch (keeps startup lighter).
1273
+ It only computes runtime rt/vpy/site and writes QSettings.
1274
+ """
1275
+ try:
1276
+ _ban_shadow_torch_paths(status_cb=status_cb)
1277
+ _purge_bad_torch_from_sysmodules(status_cb=status_cb)
1278
+
1279
+ rt = _user_runtime_dir(status_cb=status_cb)
1280
+ p = _venv_paths(rt)
1281
+ vp = p["python"]
1282
+
1283
+ if ensure_venv:
1284
+ vp = _ensure_venv(rt, status_cb=status_cb)
1285
+
1286
+ if not vp.exists():
1287
+ return
1288
+
1289
+ if ensure_numpy:
1290
+ try:
1291
+ _ensure_numpy(vp, status_cb=status_cb)
1292
+ except Exception:
1293
+ pass
1294
+
1295
+ site = _site_packages(vp)
1296
+ marker = rt / "torch_installed.json"
1297
+ venv_ver = _venv_pyver(vp)
1298
+
1299
+ # Optionally only cache if marker looks valid (recommended),
1300
+ # otherwise you may cache a site-packages that doesn't actually contain torch yet.
1301
+ if validate_marker:
1302
+ if not _marker_says_ready(marker, site, venv_ver, require_torchaudio=require_torchaudio):
1303
+ status_cb("[RT] prewarm: marker not valid; skipping QSettings cache write.")
1304
+ return
1305
+
1306
+ # IMPORTANT: use runtime tag, not app interpreter tag, for mixed-version scenarios
1307
+ cache_tag = rt.name # e.g. "py312"
1308
+
1309
+ _qcache_set(
1310
+ tag=cache_tag,
1311
+ rt_dir=rt,
1312
+ site=site,
1313
+ python_ver=(f"{venv_ver[0]}.{venv_ver[1]}" if venv_ver else ""),
1314
+ torch_ver=None,
1315
+ tv_ver=None,
1316
+ ta_ver=None,
1317
+ require_torchaudio=require_torchaudio,
1318
+ )
1319
+ status_cb("[RT] prewarm: QSettings cache written.")
1320
+ except Exception as e:
1321
+ try:
1322
+ status_cb(f"[RT] prewarm failed: {type(e).__name__}: {e}")
1323
+ except Exception:
1324
+ pass