canns 0.13.1__py3-none-any.whl → 0.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- canns/analyzer/data/__init__.py +5 -1
- canns/analyzer/data/asa/__init__.py +27 -12
- canns/analyzer/data/asa/cohospace.py +336 -10
- canns/analyzer/data/asa/config.py +3 -0
- canns/analyzer/data/asa/embedding.py +48 -45
- canns/analyzer/data/asa/path.py +104 -2
- canns/analyzer/data/asa/plotting.py +88 -19
- canns/analyzer/data/asa/tda.py +11 -4
- canns/analyzer/data/cell_classification/__init__.py +97 -0
- canns/analyzer/data/cell_classification/core/__init__.py +26 -0
- canns/analyzer/data/cell_classification/core/grid_cells.py +633 -0
- canns/analyzer/data/cell_classification/core/grid_modules_leiden.py +288 -0
- canns/analyzer/data/cell_classification/core/head_direction.py +347 -0
- canns/analyzer/data/cell_classification/core/spatial_analysis.py +431 -0
- canns/analyzer/data/cell_classification/io/__init__.py +5 -0
- canns/analyzer/data/cell_classification/io/matlab_loader.py +417 -0
- canns/analyzer/data/cell_classification/utils/__init__.py +39 -0
- canns/analyzer/data/cell_classification/utils/circular_stats.py +383 -0
- canns/analyzer/data/cell_classification/utils/correlation.py +318 -0
- canns/analyzer/data/cell_classification/utils/geometry.py +442 -0
- canns/analyzer/data/cell_classification/utils/image_processing.py +416 -0
- canns/analyzer/data/cell_classification/visualization/__init__.py +19 -0
- canns/analyzer/data/cell_classification/visualization/grid_plots.py +292 -0
- canns/analyzer/data/cell_classification/visualization/hd_plots.py +200 -0
- canns/analyzer/metrics/__init__.py +2 -1
- canns/analyzer/visualization/core/config.py +46 -4
- canns/data/__init__.py +6 -1
- canns/data/datasets.py +154 -1
- canns/data/loaders.py +37 -0
- canns/pipeline/__init__.py +13 -9
- canns/pipeline/__main__.py +6 -0
- canns/pipeline/asa/runner.py +105 -41
- canns/pipeline/asa_gui/__init__.py +68 -0
- canns/pipeline/asa_gui/__main__.py +6 -0
- canns/pipeline/asa_gui/analysis_modes/__init__.py +42 -0
- canns/pipeline/asa_gui/analysis_modes/base.py +39 -0
- canns/pipeline/asa_gui/analysis_modes/batch_mode.py +21 -0
- canns/pipeline/asa_gui/analysis_modes/cohomap_mode.py +56 -0
- canns/pipeline/asa_gui/analysis_modes/cohospace_mode.py +194 -0
- canns/pipeline/asa_gui/analysis_modes/decode_mode.py +52 -0
- canns/pipeline/asa_gui/analysis_modes/fr_mode.py +81 -0
- canns/pipeline/asa_gui/analysis_modes/frm_mode.py +92 -0
- canns/pipeline/asa_gui/analysis_modes/gridscore_mode.py +123 -0
- canns/pipeline/asa_gui/analysis_modes/pathcompare_mode.py +199 -0
- canns/pipeline/asa_gui/analysis_modes/tda_mode.py +112 -0
- canns/pipeline/asa_gui/app.py +29 -0
- canns/pipeline/asa_gui/controllers/__init__.py +6 -0
- canns/pipeline/asa_gui/controllers/analysis_controller.py +59 -0
- canns/pipeline/asa_gui/controllers/preprocess_controller.py +89 -0
- canns/pipeline/asa_gui/core/__init__.py +15 -0
- canns/pipeline/asa_gui/core/cache.py +14 -0
- canns/pipeline/asa_gui/core/runner.py +1936 -0
- canns/pipeline/asa_gui/core/state.py +324 -0
- canns/pipeline/asa_gui/core/worker.py +260 -0
- canns/pipeline/asa_gui/main_window.py +184 -0
- canns/pipeline/asa_gui/models/__init__.py +7 -0
- canns/pipeline/asa_gui/models/config.py +14 -0
- canns/pipeline/asa_gui/models/job.py +31 -0
- canns/pipeline/asa_gui/models/presets.py +21 -0
- canns/pipeline/asa_gui/resources/__init__.py +16 -0
- canns/pipeline/asa_gui/resources/dark.qss +167 -0
- canns/pipeline/asa_gui/resources/light.qss +163 -0
- canns/pipeline/asa_gui/resources/styles.qss +130 -0
- canns/pipeline/asa_gui/utils/__init__.py +1 -0
- canns/pipeline/asa_gui/utils/formatters.py +15 -0
- canns/pipeline/asa_gui/utils/io_adapters.py +40 -0
- canns/pipeline/asa_gui/utils/validators.py +41 -0
- canns/pipeline/asa_gui/views/__init__.py +1 -0
- canns/pipeline/asa_gui/views/help_content.py +171 -0
- canns/pipeline/asa_gui/views/pages/__init__.py +6 -0
- canns/pipeline/asa_gui/views/pages/analysis_page.py +565 -0
- canns/pipeline/asa_gui/views/pages/preprocess_page.py +492 -0
- canns/pipeline/asa_gui/views/panels/__init__.py +1 -0
- canns/pipeline/asa_gui/views/widgets/__init__.py +21 -0
- canns/pipeline/asa_gui/views/widgets/artifacts_tab.py +44 -0
- canns/pipeline/asa_gui/views/widgets/drop_zone.py +80 -0
- canns/pipeline/asa_gui/views/widgets/file_list.py +27 -0
- canns/pipeline/asa_gui/views/widgets/gridscore_tab.py +308 -0
- canns/pipeline/asa_gui/views/widgets/help_dialog.py +27 -0
- canns/pipeline/asa_gui/views/widgets/image_tab.py +50 -0
- canns/pipeline/asa_gui/views/widgets/image_viewer.py +97 -0
- canns/pipeline/asa_gui/views/widgets/log_box.py +16 -0
- canns/pipeline/asa_gui/views/widgets/pathcompare_tab.py +200 -0
- canns/pipeline/asa_gui/views/widgets/popup_combo.py +25 -0
- canns/pipeline/gallery/__init__.py +15 -5
- canns/pipeline/gallery/__main__.py +11 -0
- canns/pipeline/gallery/app.py +705 -0
- canns/pipeline/gallery/runner.py +790 -0
- canns/pipeline/gallery/state.py +51 -0
- canns/pipeline/gallery/styles.tcss +123 -0
- canns/pipeline/launcher.py +81 -0
- {canns-0.13.1.dist-info → canns-0.14.0.dist-info}/METADATA +11 -1
- canns-0.14.0.dist-info/RECORD +163 -0
- canns-0.14.0.dist-info/entry_points.txt +5 -0
- canns/pipeline/_base.py +0 -50
- canns-0.13.1.dist-info/RECORD +0 -89
- canns-0.13.1.dist-info/entry_points.txt +0 -3
- {canns-0.13.1.dist-info → canns-0.14.0.dist-info}/WHEEL +0 -0
- {canns-0.13.1.dist-info → canns-0.14.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
"""State management for ASA GUI.
|
|
2
|
+
|
|
3
|
+
This module provides centralized workflow state management with Qt signals
|
|
4
|
+
for reactive UI updates. All file paths are stored relative to the working
|
|
5
|
+
directory for portability.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from copy import deepcopy
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import TYPE_CHECKING, Any
|
|
14
|
+
|
|
15
|
+
import numpy as np
|
|
16
|
+
from PySide6.QtCore import QObject, Signal
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from numpy.typing import NDArray
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class WorkflowState:
|
|
24
|
+
"""Centralized state for ASA analysis workflow.
|
|
25
|
+
|
|
26
|
+
All file paths are relative to workdir for portability.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
# Core paths
|
|
30
|
+
workdir: Path = field(default_factory=lambda: Path(".").resolve())
|
|
31
|
+
|
|
32
|
+
# Input configuration
|
|
33
|
+
input_mode: str = "asa" # "asa" | "neuron_traj" | "batch"
|
|
34
|
+
preset: str = "grid" # "grid" | "hd" | "none"
|
|
35
|
+
|
|
36
|
+
# File paths (relative to workdir)
|
|
37
|
+
asa_file: Path | None = None
|
|
38
|
+
neuron_file: Path | None = None
|
|
39
|
+
traj_file: Path | None = None
|
|
40
|
+
|
|
41
|
+
# Preprocessing
|
|
42
|
+
preprocess_method: str = "none" # "none" | "embed_spike_trains"
|
|
43
|
+
preprocess_params: dict[str, Any] = field(default_factory=dict)
|
|
44
|
+
preclass: str = "none" # "none" | "grid" | "hd"
|
|
45
|
+
preclass_params: dict[str, Any] = field(default_factory=dict)
|
|
46
|
+
|
|
47
|
+
# Preprocessed data (in-memory)
|
|
48
|
+
embed_data: NDArray[np.floating] | None = None
|
|
49
|
+
aligned_pos: dict[str, NDArray[np.floating]] | None = None
|
|
50
|
+
|
|
51
|
+
# Analysis configuration
|
|
52
|
+
analysis_mode: str = "tda"
|
|
53
|
+
analysis_params: dict[str, Any] = field(default_factory=dict)
|
|
54
|
+
|
|
55
|
+
# Results
|
|
56
|
+
artifacts: dict[str, Path] = field(default_factory=dict)
|
|
57
|
+
|
|
58
|
+
# Runtime state
|
|
59
|
+
is_running: bool = False
|
|
60
|
+
current_stage: str = ""
|
|
61
|
+
progress: int = 0
|
|
62
|
+
|
|
63
|
+
def copy(self) -> WorkflowState:
|
|
64
|
+
"""Create a shallow copy of the state (excluding large arrays)."""
|
|
65
|
+
return WorkflowState(
|
|
66
|
+
workdir=self.workdir,
|
|
67
|
+
input_mode=self.input_mode,
|
|
68
|
+
preset=self.preset,
|
|
69
|
+
asa_file=self.asa_file,
|
|
70
|
+
neuron_file=self.neuron_file,
|
|
71
|
+
traj_file=self.traj_file,
|
|
72
|
+
preprocess_method=self.preprocess_method,
|
|
73
|
+
preprocess_params=deepcopy(self.preprocess_params),
|
|
74
|
+
preclass=self.preclass,
|
|
75
|
+
preclass_params=deepcopy(self.preclass_params),
|
|
76
|
+
embed_data=None, # Don't copy large arrays
|
|
77
|
+
aligned_pos=None,
|
|
78
|
+
analysis_mode=self.analysis_mode,
|
|
79
|
+
analysis_params=deepcopy(self.analysis_params),
|
|
80
|
+
artifacts=deepcopy(self.artifacts),
|
|
81
|
+
is_running=self.is_running,
|
|
82
|
+
current_stage=self.current_stage,
|
|
83
|
+
progress=self.progress,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class StateManager(QObject):
|
|
88
|
+
"""Reactive state manager with Qt signals.
|
|
89
|
+
|
|
90
|
+
Emits signals when state changes to enable reactive UI updates.
|
|
91
|
+
Supports undo/redo through state history.
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
# Signal emitted when any state field changes: (field_name, new_value)
|
|
95
|
+
state_changed = Signal(str, object)
|
|
96
|
+
|
|
97
|
+
# Signal emitted when state is fully replaced (e.g., undo/redo)
|
|
98
|
+
state_replaced = Signal()
|
|
99
|
+
|
|
100
|
+
def __init__(self, parent: QObject | None = None) -> None:
|
|
101
|
+
super().__init__(parent)
|
|
102
|
+
self._state = WorkflowState()
|
|
103
|
+
self._history: list[WorkflowState] = []
|
|
104
|
+
self._history_index = -1
|
|
105
|
+
self._max_history = 50
|
|
106
|
+
|
|
107
|
+
@property
|
|
108
|
+
def state(self) -> WorkflowState:
|
|
109
|
+
"""Get current workflow state."""
|
|
110
|
+
return self._state
|
|
111
|
+
|
|
112
|
+
def update(self, **kwargs: Any) -> None:
|
|
113
|
+
"""Update state fields and emit signals.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
**kwargs: Field names and their new values
|
|
117
|
+
"""
|
|
118
|
+
for key, value in kwargs.items():
|
|
119
|
+
if hasattr(self._state, key):
|
|
120
|
+
old_value = getattr(self._state, key)
|
|
121
|
+
if not self._is_equal(old_value, value):
|
|
122
|
+
setattr(self._state, key, value)
|
|
123
|
+
self.state_changed.emit(key, value)
|
|
124
|
+
|
|
125
|
+
def batch_update(self, **kwargs: Any) -> None:
|
|
126
|
+
"""Update multiple fields without emitting individual signals.
|
|
127
|
+
|
|
128
|
+
Emits state_replaced at the end.
|
|
129
|
+
"""
|
|
130
|
+
for key, value in kwargs.items():
|
|
131
|
+
if hasattr(self._state, key):
|
|
132
|
+
setattr(self._state, key, value)
|
|
133
|
+
self.state_replaced.emit()
|
|
134
|
+
|
|
135
|
+
def push_history(self) -> None:
|
|
136
|
+
"""Save current state for undo."""
|
|
137
|
+
# Truncate forward history
|
|
138
|
+
self._history = self._history[: self._history_index + 1]
|
|
139
|
+
# Save state snapshot
|
|
140
|
+
self._history.append(self._state.copy())
|
|
141
|
+
# Limit history size
|
|
142
|
+
if len(self._history) > self._max_history:
|
|
143
|
+
self._history = self._history[-self._max_history :]
|
|
144
|
+
self._history_index = len(self._history) - 1
|
|
145
|
+
|
|
146
|
+
def undo(self) -> bool:
|
|
147
|
+
"""Restore previous state.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
True if undo was successful
|
|
151
|
+
"""
|
|
152
|
+
if self._history_index > 0:
|
|
153
|
+
self._history_index -= 1
|
|
154
|
+
self._restore(self._history[self._history_index])
|
|
155
|
+
return True
|
|
156
|
+
return False
|
|
157
|
+
|
|
158
|
+
def redo(self) -> bool:
|
|
159
|
+
"""Restore next state.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
True if redo was successful
|
|
163
|
+
"""
|
|
164
|
+
if self._history_index < len(self._history) - 1:
|
|
165
|
+
self._history_index += 1
|
|
166
|
+
self._restore(self._history[self._history_index])
|
|
167
|
+
return True
|
|
168
|
+
return False
|
|
169
|
+
|
|
170
|
+
def can_undo(self) -> bool:
|
|
171
|
+
"""Check if undo is available."""
|
|
172
|
+
return self._history_index > 0
|
|
173
|
+
|
|
174
|
+
def can_redo(self) -> bool:
|
|
175
|
+
"""Check if redo is available."""
|
|
176
|
+
return self._history_index < len(self._history) - 1
|
|
177
|
+
|
|
178
|
+
def _restore(self, snapshot: WorkflowState) -> None:
|
|
179
|
+
"""Restore state from snapshot."""
|
|
180
|
+
# Preserve large arrays from current state
|
|
181
|
+
embed_data = self._state.embed_data
|
|
182
|
+
aligned_pos = self._state.aligned_pos
|
|
183
|
+
|
|
184
|
+
self._state = snapshot.copy()
|
|
185
|
+
self._state.embed_data = embed_data
|
|
186
|
+
self._state.aligned_pos = aligned_pos
|
|
187
|
+
|
|
188
|
+
self.state_replaced.emit()
|
|
189
|
+
|
|
190
|
+
def reset(self) -> None:
|
|
191
|
+
"""Reset state to defaults."""
|
|
192
|
+
self._state = WorkflowState()
|
|
193
|
+
self._history.clear()
|
|
194
|
+
self._history_index = -1
|
|
195
|
+
self.state_replaced.emit()
|
|
196
|
+
|
|
197
|
+
@staticmethod
|
|
198
|
+
def _is_equal(a: Any, b: Any) -> bool:
|
|
199
|
+
"""Safe equality check that handles numpy arrays and containers."""
|
|
200
|
+
if a is b:
|
|
201
|
+
return True
|
|
202
|
+
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
|
|
203
|
+
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
|
|
204
|
+
return np.array_equal(a, b)
|
|
205
|
+
return False
|
|
206
|
+
if isinstance(a, dict) and isinstance(b, dict):
|
|
207
|
+
if a.keys() != b.keys():
|
|
208
|
+
return False
|
|
209
|
+
return all(StateManager._is_equal(a[k], b[k]) for k in a.keys())
|
|
210
|
+
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
|
|
211
|
+
if len(a) != len(b):
|
|
212
|
+
return False
|
|
213
|
+
return all(StateManager._is_equal(x, y) for x, y in zip(a, b, strict=False))
|
|
214
|
+
try:
|
|
215
|
+
return a == b
|
|
216
|
+
except Exception:
|
|
217
|
+
return False
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
# --- Path utilities ---
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def relative_path(state: WorkflowState, path: Path) -> Path:
|
|
224
|
+
"""Convert absolute path to workdir-relative path.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
state: Current workflow state
|
|
228
|
+
path: Absolute path to convert
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Path relative to workdir
|
|
232
|
+
"""
|
|
233
|
+
try:
|
|
234
|
+
return path.relative_to(state.workdir)
|
|
235
|
+
except ValueError:
|
|
236
|
+
# Path is not relative to workdir, return as-is
|
|
237
|
+
return path
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def resolve_path(state: WorkflowState, path: Path | None) -> Path | None:
|
|
241
|
+
"""Convert relative path to absolute path.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
state: Current workflow state
|
|
245
|
+
path: Relative path to convert
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
Absolute path or None if path is None
|
|
249
|
+
"""
|
|
250
|
+
if path is None:
|
|
251
|
+
return None
|
|
252
|
+
|
|
253
|
+
if path.is_absolute():
|
|
254
|
+
return path
|
|
255
|
+
|
|
256
|
+
return state.workdir / path
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
# --- Validation utilities ---
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def validate_files(state: WorkflowState) -> tuple[bool, str]:
|
|
263
|
+
"""Check if required files exist and are valid.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
state: Current workflow state
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
Tuple of (is_valid, error_message)
|
|
270
|
+
"""
|
|
271
|
+
if state.input_mode == "asa":
|
|
272
|
+
if state.asa_file is None:
|
|
273
|
+
return False, "ASA file not selected"
|
|
274
|
+
|
|
275
|
+
asa_path = resolve_path(state, state.asa_file)
|
|
276
|
+
if asa_path is None or not asa_path.exists():
|
|
277
|
+
return False, f"ASA file not found: {asa_path}"
|
|
278
|
+
|
|
279
|
+
# Validate .npz structure
|
|
280
|
+
try:
|
|
281
|
+
data = np.load(asa_path, allow_pickle=True)
|
|
282
|
+
required_keys = ["spike", "t"]
|
|
283
|
+
missing = [k for k in required_keys if k not in data.files]
|
|
284
|
+
if missing:
|
|
285
|
+
return False, f"ASA file missing required keys: {missing}"
|
|
286
|
+
except Exception as e:
|
|
287
|
+
return False, f"Failed to load ASA file: {e}"
|
|
288
|
+
|
|
289
|
+
elif state.input_mode == "neuron_traj":
|
|
290
|
+
if state.neuron_file is None:
|
|
291
|
+
return False, "Neuron file not selected"
|
|
292
|
+
if state.traj_file is None:
|
|
293
|
+
return False, "Trajectory file not selected"
|
|
294
|
+
|
|
295
|
+
neuron_path = resolve_path(state, state.neuron_file)
|
|
296
|
+
traj_path = resolve_path(state, state.traj_file)
|
|
297
|
+
|
|
298
|
+
if neuron_path is None or not neuron_path.exists():
|
|
299
|
+
return False, f"Neuron file not found: {neuron_path}"
|
|
300
|
+
if traj_path is None or not traj_path.exists():
|
|
301
|
+
return False, f"Trajectory file not found: {traj_path}"
|
|
302
|
+
|
|
303
|
+
return True, ""
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
def validate_preprocessing(state: WorkflowState) -> tuple[bool, str]:
|
|
307
|
+
"""Check if preprocessing is complete.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
state: Current workflow state
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
Tuple of (is_valid, error_message)
|
|
314
|
+
"""
|
|
315
|
+
if state.preprocess_method == "none":
|
|
316
|
+
# Need raw data loaded
|
|
317
|
+
if state.embed_data is None:
|
|
318
|
+
return False, "No data loaded"
|
|
319
|
+
else:
|
|
320
|
+
# Need preprocessed data
|
|
321
|
+
if state.embed_data is None:
|
|
322
|
+
return False, "Preprocessing not complete"
|
|
323
|
+
|
|
324
|
+
return True, ""
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
"""Async worker infrastructure for ASA GUI.
|
|
2
|
+
|
|
3
|
+
This module provides QThread-based workers for running analysis
|
|
4
|
+
in the background without blocking the UI.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from collections.abc import Callable
|
|
10
|
+
from typing import TYPE_CHECKING, Any
|
|
11
|
+
|
|
12
|
+
from PySide6.QtCore import QCoreApplication, QObject, QThread, Signal, Slot
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AnalysisWorker(QObject):
|
|
19
|
+
"""Background worker for analysis execution.
|
|
20
|
+
|
|
21
|
+
Runs analysis in a separate thread and emits signals for
|
|
22
|
+
progress updates, logging, and completion.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
# Signals
|
|
26
|
+
log = Signal(str) # Log message
|
|
27
|
+
progress = Signal(int) # Progress percentage (0-100)
|
|
28
|
+
finished = Signal(object) # JobResult on success
|
|
29
|
+
error = Signal(str) # Error message on failure
|
|
30
|
+
|
|
31
|
+
def __init__(self, parent: QObject | None = None) -> None:
|
|
32
|
+
super().__init__(parent)
|
|
33
|
+
self._cancelled = False
|
|
34
|
+
self._task: Callable[..., Any] | None = None
|
|
35
|
+
self._args: tuple[Any, ...] = ()
|
|
36
|
+
self._kwargs: dict[str, Any] = {}
|
|
37
|
+
|
|
38
|
+
def setup(
|
|
39
|
+
self,
|
|
40
|
+
task: Callable[..., Any],
|
|
41
|
+
*args: Any,
|
|
42
|
+
**kwargs: Any,
|
|
43
|
+
) -> None:
|
|
44
|
+
"""Configure the task to run.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
task: Callable to execute
|
|
48
|
+
*args: Positional arguments for task
|
|
49
|
+
**kwargs: Keyword arguments for task
|
|
50
|
+
"""
|
|
51
|
+
self._task = task
|
|
52
|
+
self._args = args
|
|
53
|
+
self._kwargs = kwargs
|
|
54
|
+
self._cancelled = False
|
|
55
|
+
|
|
56
|
+
@Slot()
|
|
57
|
+
def run(self) -> None:
|
|
58
|
+
"""Execute the configured task."""
|
|
59
|
+
if self._task is None:
|
|
60
|
+
self.error.emit("No task configured")
|
|
61
|
+
return
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
# Inject callbacks into kwargs
|
|
65
|
+
self._kwargs["log_callback"] = self._emit_log
|
|
66
|
+
self._kwargs["progress_callback"] = self._emit_progress
|
|
67
|
+
self._kwargs["cancel_check"] = self._is_cancelled
|
|
68
|
+
|
|
69
|
+
result = self._task(*self._args, **self._kwargs)
|
|
70
|
+
|
|
71
|
+
if self._cancelled:
|
|
72
|
+
self.error.emit("Cancelled by user")
|
|
73
|
+
else:
|
|
74
|
+
self.finished.emit(result)
|
|
75
|
+
|
|
76
|
+
except Exception as e:
|
|
77
|
+
self.error.emit(str(e))
|
|
78
|
+
|
|
79
|
+
def request_cancel(self) -> None:
|
|
80
|
+
"""Request cancellation of running task."""
|
|
81
|
+
self._cancelled = True
|
|
82
|
+
|
|
83
|
+
def _is_cancelled(self) -> bool:
|
|
84
|
+
"""Check if cancellation was requested."""
|
|
85
|
+
return self._cancelled
|
|
86
|
+
|
|
87
|
+
def _emit_log(self, msg: str) -> None:
|
|
88
|
+
"""Emit log signal (thread-safe)."""
|
|
89
|
+
self.log.emit(msg)
|
|
90
|
+
|
|
91
|
+
def _emit_progress(self, pct: int) -> None:
|
|
92
|
+
"""Emit progress signal (thread-safe)."""
|
|
93
|
+
self.progress.emit(max(0, min(100, pct)))
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class _UiDispatcher(QObject):
|
|
97
|
+
"""Dispatch worker signals onto the UI thread."""
|
|
98
|
+
|
|
99
|
+
def __init__(
|
|
100
|
+
self,
|
|
101
|
+
manager: WorkerManager,
|
|
102
|
+
*,
|
|
103
|
+
on_log: Callable[[str], None] | None,
|
|
104
|
+
on_progress: Callable[[int], None] | None,
|
|
105
|
+
on_finished: Callable[[Any], None] | None,
|
|
106
|
+
on_error: Callable[[str], None] | None,
|
|
107
|
+
) -> None:
|
|
108
|
+
super().__init__()
|
|
109
|
+
self._manager = manager
|
|
110
|
+
self._on_log = on_log
|
|
111
|
+
self._on_progress = on_progress
|
|
112
|
+
self._on_finished = on_finished
|
|
113
|
+
self._on_error = on_error
|
|
114
|
+
|
|
115
|
+
@Slot(str)
|
|
116
|
+
def handle_log(self, msg: str) -> None:
|
|
117
|
+
if self._on_log:
|
|
118
|
+
self._on_log(msg)
|
|
119
|
+
|
|
120
|
+
@Slot(int)
|
|
121
|
+
def handle_progress(self, pct: int) -> None:
|
|
122
|
+
if self._on_progress:
|
|
123
|
+
self._on_progress(pct)
|
|
124
|
+
|
|
125
|
+
@Slot(object)
|
|
126
|
+
def handle_finished(self, result: Any) -> None:
|
|
127
|
+
try:
|
|
128
|
+
if self._on_finished:
|
|
129
|
+
self._on_finished(result)
|
|
130
|
+
except Exception:
|
|
131
|
+
import traceback
|
|
132
|
+
|
|
133
|
+
traceback.print_exc()
|
|
134
|
+
finally:
|
|
135
|
+
self._manager._cleanup()
|
|
136
|
+
|
|
137
|
+
@Slot(str)
|
|
138
|
+
def handle_error(self, msg: str) -> None:
|
|
139
|
+
try:
|
|
140
|
+
if self._on_error:
|
|
141
|
+
self._on_error(msg)
|
|
142
|
+
except Exception:
|
|
143
|
+
import traceback
|
|
144
|
+
|
|
145
|
+
traceback.print_exc()
|
|
146
|
+
finally:
|
|
147
|
+
self._manager._cleanup()
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class WorkerManager:
|
|
151
|
+
"""Manages worker thread lifecycle.
|
|
152
|
+
|
|
153
|
+
Ensures only one worker runs at a time and handles
|
|
154
|
+
proper cleanup on completion or cancellation.
|
|
155
|
+
"""
|
|
156
|
+
|
|
157
|
+
def __init__(self) -> None:
|
|
158
|
+
self._thread: QThread | None = None
|
|
159
|
+
self._worker: AnalysisWorker | None = None
|
|
160
|
+
self._dispatcher: _UiDispatcher | None = None
|
|
161
|
+
self._on_cleanup: Callable[[], None] | None = None
|
|
162
|
+
|
|
163
|
+
def is_running(self) -> bool:
|
|
164
|
+
"""Check if a worker is currently running."""
|
|
165
|
+
return self._thread is not None and self._thread.isRunning()
|
|
166
|
+
|
|
167
|
+
def start(
|
|
168
|
+
self,
|
|
169
|
+
task: Callable[..., Any],
|
|
170
|
+
*args: Any,
|
|
171
|
+
on_log: Callable[[str], None] | None = None,
|
|
172
|
+
on_progress: Callable[[int], None] | None = None,
|
|
173
|
+
on_finished: Callable[[Any], None] | None = None,
|
|
174
|
+
on_error: Callable[[str], None] | None = None,
|
|
175
|
+
on_cleanup: Callable[[], None] | None = None,
|
|
176
|
+
**kwargs: Any,
|
|
177
|
+
) -> None:
|
|
178
|
+
"""Start a task in a background thread.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
task: Callable to execute
|
|
182
|
+
*args: Positional arguments for task
|
|
183
|
+
on_log: Callback for log messages
|
|
184
|
+
on_progress: Callback for progress updates
|
|
185
|
+
on_finished: Callback on successful completion
|
|
186
|
+
on_error: Callback on error
|
|
187
|
+
on_cleanup: Callback after thread cleanup
|
|
188
|
+
**kwargs: Keyword arguments for task
|
|
189
|
+
|
|
190
|
+
Raises:
|
|
191
|
+
RuntimeError: If a task is already running
|
|
192
|
+
"""
|
|
193
|
+
if self.is_running():
|
|
194
|
+
raise RuntimeError("A task is already running")
|
|
195
|
+
|
|
196
|
+
self._on_cleanup = on_cleanup
|
|
197
|
+
|
|
198
|
+
# Create thread and worker
|
|
199
|
+
self._thread = QThread()
|
|
200
|
+
self._worker = AnalysisWorker()
|
|
201
|
+
self._worker.setup(task, *args, **kwargs)
|
|
202
|
+
self._worker.moveToThread(self._thread)
|
|
203
|
+
|
|
204
|
+
# Dispatch signals onto UI thread
|
|
205
|
+
self._dispatcher = _UiDispatcher(
|
|
206
|
+
self,
|
|
207
|
+
on_log=on_log,
|
|
208
|
+
on_progress=on_progress,
|
|
209
|
+
on_finished=on_finished,
|
|
210
|
+
on_error=on_error,
|
|
211
|
+
)
|
|
212
|
+
app = QCoreApplication.instance()
|
|
213
|
+
if app is not None:
|
|
214
|
+
self._dispatcher.moveToThread(app.thread())
|
|
215
|
+
|
|
216
|
+
self._worker.log.connect(self._dispatcher.handle_log)
|
|
217
|
+
self._worker.progress.connect(self._dispatcher.handle_progress)
|
|
218
|
+
self._worker.finished.connect(self._dispatcher.handle_finished)
|
|
219
|
+
self._worker.error.connect(self._dispatcher.handle_error)
|
|
220
|
+
|
|
221
|
+
# Start execution
|
|
222
|
+
self._thread.started.connect(self._worker.run)
|
|
223
|
+
self._thread.start()
|
|
224
|
+
|
|
225
|
+
def request_cancel(self) -> None:
|
|
226
|
+
"""Request cancellation of running task."""
|
|
227
|
+
if self._worker:
|
|
228
|
+
self._worker.request_cancel()
|
|
229
|
+
|
|
230
|
+
def wait(self, timeout_ms: int = 5000) -> bool:
|
|
231
|
+
"""Wait for worker to finish.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
timeout_ms: Maximum time to wait in milliseconds
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
True if worker finished, False if timeout
|
|
238
|
+
"""
|
|
239
|
+
if self._thread:
|
|
240
|
+
return self._thread.wait(timeout_ms)
|
|
241
|
+
return True
|
|
242
|
+
|
|
243
|
+
def _cleanup(self) -> None:
|
|
244
|
+
"""Clean up thread and worker after completion."""
|
|
245
|
+
if self._thread:
|
|
246
|
+
self._thread.quit()
|
|
247
|
+
self._thread.wait(3000)
|
|
248
|
+
self._thread.deleteLater()
|
|
249
|
+
self._thread = None
|
|
250
|
+
|
|
251
|
+
if self._worker:
|
|
252
|
+
self._worker.deleteLater()
|
|
253
|
+
self._worker = None
|
|
254
|
+
if self._dispatcher:
|
|
255
|
+
self._dispatcher.deleteLater()
|
|
256
|
+
self._dispatcher = None
|
|
257
|
+
|
|
258
|
+
if self._on_cleanup:
|
|
259
|
+
self._on_cleanup()
|
|
260
|
+
self._on_cleanup = None
|