senoquant 1.0.0b3__py3-none-any.whl → 1.0.0b4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,6 +6,7 @@ import numpy as np
6
6
  from scipy import ndimage as ndi
7
7
  from skimage.filters import laplace
8
8
  from skimage.morphology import local_maxima
9
+ from skimage.restoration import denoise_wavelet
9
10
  from skimage.segmentation import watershed
10
11
 
11
12
  from ..base import SenoQuantSpotDetector
@@ -15,6 +16,16 @@ from senoquant.tabs.spots.ufish_utils import UFishConfig, enhance_image
15
16
 
16
17
  DEFAULT_THRESHOLD = 0.5
17
18
  USE_LAPLACE_FOR_PEAKS = False
19
+ DEFAULT_DENOISE_ENABLED = False
20
+ DEFAULT_SPOT_SIZE = 1.0
21
+ MIN_SPOT_SIZE = 0.25
22
+ MAX_SPOT_SIZE = 4.0
23
+ EPS = 1e-6
24
+ NOISE_FLOOR_SIGMA = 1.5
25
+ MIN_SCALE_SIGMA = 5.0
26
+ SIGNAL_SCALE_QUANTILE = 99.9
27
+ INPUT_LOW_PERCENTILE = 0.05
28
+ INPUT_HIGH_PERCENTILE = 99.95
18
29
 
19
30
 
20
31
  def _clamp_threshold(value: float) -> float:
@@ -22,16 +33,175 @@ def _clamp_threshold(value: float) -> float:
22
33
  return float(np.clip(value, 0.0, 1.0))
23
34
 
24
35
 
25
- def _normalize_unit(image: np.ndarray) -> np.ndarray:
26
- """Normalize to float32 in [0, 1] using 0.05/99.95 percentiles."""
36
+ def _normalize_input_percentile(image: np.ndarray) -> np.ndarray:
37
+ """Normalize input image to [0, 1] via percentile clipping."""
27
38
  data = np.asarray(image, dtype=np.float32)
28
- low, high = np.nanpercentile(data, [0.05, 99.95])
39
+ finite_mask = np.isfinite(data)
40
+ if not np.any(finite_mask):
41
+ return np.zeros_like(data, dtype=np.float32)
42
+
43
+ valid = data[finite_mask]
44
+ low, high = np.nanpercentile(valid, [INPUT_LOW_PERCENTILE, INPUT_HIGH_PERCENTILE])
29
45
  low = float(low)
30
46
  high = float(high)
31
- if high <= low:
47
+ if (not np.isfinite(low)) or (not np.isfinite(high)) or high <= low:
32
48
  return np.zeros_like(data, dtype=np.float32)
49
+
33
50
  normalized = (data - low) / (high - low)
34
- return np.clip(normalized, 0.0, 1.0).astype(np.float32, copy=False)
51
+ normalized = np.clip(normalized, 0.0, 1.0)
52
+ normalized = np.where(finite_mask, normalized, 0.0)
53
+ return normalized.astype(np.float32, copy=False)
54
+
55
+
56
+ def _normalize_enhanced_unit(image: np.ndarray) -> np.ndarray:
57
+ """Normalize enhanced image to [0, 1] with robust background suppression."""
58
+ data = np.asarray(image, dtype=np.float32)
59
+ finite_mask = np.isfinite(data)
60
+ if not np.any(finite_mask):
61
+ return np.zeros_like(data, dtype=np.float32)
62
+
63
+ valid = data[finite_mask]
64
+ background = float(np.nanmedian(valid))
65
+ sigma = 1.4826 * float(np.nanmedian(np.abs(valid - background)))
66
+
67
+ if (not np.isfinite(sigma)) or sigma <= EPS:
68
+ sigma = float(np.nanstd(valid))
69
+ if (not np.isfinite(sigma)) or sigma <= EPS:
70
+ return np.zeros_like(data, dtype=np.float32)
71
+
72
+ # Gate out most background fluctuations before scaling.
73
+ noise_floor = background + (NOISE_FLOOR_SIGMA * sigma)
74
+ residual = np.clip(data - noise_floor, 0.0, None)
75
+ residual = np.where(finite_mask, residual, 0.0)
76
+
77
+ positive = residual[residual > 0.0]
78
+ if positive.size == 0:
79
+ return np.zeros_like(data, dtype=np.float32)
80
+ high = float(np.nanpercentile(positive, SIGNAL_SCALE_QUANTILE))
81
+ if (not np.isfinite(high)) or high <= EPS:
82
+ high = float(np.nanmax(positive))
83
+ if (not np.isfinite(high)) or high <= EPS:
84
+ return np.zeros_like(data, dtype=np.float32)
85
+
86
+ scale = max(high, MIN_SCALE_SIGMA * sigma, EPS)
87
+ normalized = np.clip(residual / scale, 0.0, 1.0)
88
+ return normalized.astype(np.float32, copy=False)
89
+
90
+
91
+ def _clamp_spot_size(value: float) -> float:
92
+ """Clamp spot-size control to a safe positive range."""
93
+ return float(np.clip(value, MIN_SPOT_SIZE, MAX_SPOT_SIZE))
94
+
95
+
96
+ def _spot_size_to_detection_scale(spot_size: float) -> float:
97
+ """Convert user spot-size control to internal image scaling.
98
+
99
+ spot_size > 1 means detect larger spots (zoom out input),
100
+ spot_size < 1 means detect smaller spots (zoom in input).
101
+ """
102
+ return 1.0 / _clamp_spot_size(spot_size)
103
+
104
+
105
+ def _denoise_input(
106
+ image: np.ndarray,
107
+ *,
108
+ enabled: bool,
109
+ ) -> np.ndarray:
110
+ """Optionally denoise image to suppress tiny bright peaks.
111
+
112
+ Uses wavelet denoising with BayesShrink.
113
+ """
114
+ if not enabled:
115
+ return image.astype(np.float32, copy=False)
116
+ data = image.astype(np.float32, copy=False)
117
+ if data.ndim == 2:
118
+ denoised = denoise_wavelet(
119
+ data,
120
+ method="BayesShrink",
121
+ mode="soft",
122
+ rescale_sigma=True,
123
+ channel_axis=None,
124
+ )
125
+ return np.asarray(denoised, dtype=np.float32)
126
+
127
+ denoised = np.empty_like(data, dtype=np.float32)
128
+ for z in range(data.shape[0]):
129
+ denoised[z] = np.asarray(
130
+ denoise_wavelet(
131
+ data[z],
132
+ method="BayesShrink",
133
+ mode="soft",
134
+ rescale_sigma=True,
135
+ channel_axis=None,
136
+ ),
137
+ dtype=np.float32,
138
+ )
139
+ return denoised.astype(np.float32, copy=False)
140
+
141
+
142
+ def _scale_image_for_detection(
143
+ image: np.ndarray,
144
+ scale: float,
145
+ ) -> np.ndarray:
146
+ """Rescale image before U-FISH inference.
147
+
148
+ For 3D stacks, scale is applied to y/x only and z is preserved.
149
+ """
150
+ if abs(scale - 1.0) < 1e-6:
151
+ return image.astype(np.float32, copy=False)
152
+ if image.ndim == 2:
153
+ target_shape = tuple(max(1, int(round(dim * scale))) for dim in image.shape)
154
+ else:
155
+ target_shape = (
156
+ image.shape[0],
157
+ max(1, int(round(image.shape[1] * scale))),
158
+ max(1, int(round(image.shape[2] * scale))),
159
+ )
160
+ zoom_factors = tuple(
161
+ target / source for target, source in zip(target_shape, image.shape)
162
+ )
163
+ scaled = ndi.zoom(
164
+ image.astype(np.float32, copy=False),
165
+ zoom=zoom_factors,
166
+ order=1,
167
+ mode="nearest",
168
+ )
169
+ return scaled.astype(np.float32, copy=False)
170
+
171
+
172
+ def _fit_to_shape(array: np.ndarray, target_shape: tuple[int, ...]) -> np.ndarray:
173
+ """Crop/pad array to exactly match target shape."""
174
+ if array.shape == target_shape:
175
+ return array
176
+
177
+ src_slices = tuple(slice(0, min(src, tgt)) for src, tgt in zip(array.shape, target_shape))
178
+ cropped = array[src_slices]
179
+ if cropped.shape == target_shape:
180
+ return cropped
181
+
182
+ fitted = np.zeros(target_shape, dtype=array.dtype)
183
+ dst_slices = tuple(slice(0, dim) for dim in cropped.shape)
184
+ fitted[dst_slices] = cropped
185
+ return fitted
186
+
187
+ def _restore_image_to_input_scale(
188
+ image: np.ndarray,
189
+ original_shape: tuple[int, ...],
190
+ ) -> np.ndarray:
191
+ """Restore floating-point image to original input scale."""
192
+ if image.shape == original_shape:
193
+ return image.astype(np.float32, copy=False)
194
+ zoom_factors = tuple(
195
+ target / source for target, source in zip(original_shape, image.shape)
196
+ )
197
+ restored = ndi.zoom(
198
+ image.astype(np.float32, copy=False),
199
+ zoom=zoom_factors,
200
+ order=1,
201
+ mode="nearest",
202
+ )
203
+ restored = _fit_to_shape(restored, original_shape)
204
+ return restored.astype(np.float32, copy=False)
35
205
 
36
206
 
37
207
  def _markers_from_local_maxima(
@@ -102,28 +272,56 @@ class UFishDetector(SenoQuantSpotDetector):
102
272
  settings = kwargs.get("settings", {}) or {}
103
273
  threshold = _clamp_threshold(float(settings.get("threshold", DEFAULT_THRESHOLD)))
104
274
  use_laplace = USE_LAPLACE_FOR_PEAKS
275
+ denoise_enabled = bool(settings.get("denoise_enabled", DEFAULT_DENOISE_ENABLED))
276
+ spot_size = _clamp_spot_size(
277
+ float(settings.get("spot_size", DEFAULT_SPOT_SIZE))
278
+ )
279
+ scale = _spot_size_to_detection_scale(spot_size)
105
280
 
106
281
  data = layer_data_asarray(layer)
107
282
  if data.ndim not in (2, 3):
108
283
  raise ValueError("U-FISH detector expects 2D images or 3D stacks.")
109
284
 
110
- # Percentile normalize the data
111
- data = _normalize_unit(data)
285
+ data = _normalize_input_percentile(data)
286
+ denoised = _denoise_input(
287
+ data,
288
+ enabled=denoise_enabled,
289
+ )
290
+ scaled_input = _scale_image_for_detection(denoised, scale)
112
291
 
113
- enhanced = enhance_image(np.asarray(data, dtype=np.float32), config=UFishConfig())
114
- enhanced = np.asarray(enhanced, dtype=np.float32)
292
+ enhanced_raw = enhance_image(
293
+ np.asarray(scaled_input, dtype=np.float32),
294
+ config=UFishConfig(),
295
+ )
296
+ enhanced_raw = np.asarray(enhanced_raw, dtype=np.float32)
115
297
 
116
298
  # Re-normalize after enhancement
117
- enhanced = _normalize_unit(enhanced)
299
+ enhanced_normalized = _normalize_enhanced_unit(enhanced_raw)
300
+
301
+ # Segment in original resolution to avoid blocky label upsampling artifacts.
302
+ enhanced_for_seg = _restore_image_to_input_scale(
303
+ enhanced_normalized,
304
+ data.shape,
305
+ )
118
306
 
119
307
  markers = _markers_from_local_maxima(
120
- enhanced,
308
+ enhanced_for_seg,
121
309
  threshold,
122
310
  use_laplace=use_laplace,
123
311
  )
124
312
  labels = _segment_from_markers(
125
- enhanced,
313
+ enhanced_for_seg,
126
314
  markers,
127
315
  threshold,
128
316
  )
129
- return {"mask": labels}
317
+ # debug_enhanced = _restore_image_to_input_scale(enhanced_raw, data.shape)
318
+ # debug_enhanced_normalized = enhanced_for_seg
319
+ return {
320
+ "mask": labels,
321
+ # "debug_images": {
322
+ # # "debug_normalized_image": normalized.astype(np.float32, copy=False),
323
+ # "debug_denoised_image": denoised.astype(np.float32, copy=False),
324
+ # "debug_enhanced_image": debug_enhanced,
325
+ # "debug_enhanced_image_normalized": debug_enhanced_normalized,
326
+ # },
327
+ }
@@ -20,6 +20,7 @@ Weight loading priority is:
20
20
  from __future__ import annotations
21
21
 
22
22
  from dataclasses import dataclass
23
+ import logging
23
24
  from pathlib import Path
24
25
  import sys
25
26
  from types import MethodType
@@ -92,6 +93,7 @@ class _UFishState:
92
93
 
93
94
  _UFISH_STATE = _UFishState()
94
95
  _UFISH_HF_FILENAME = "ufish.onnx"
96
+ _LOGGER = logging.getLogger(__name__)
95
97
 
96
98
 
97
99
  def _ensure_ufish_available() -> None:
@@ -353,5 +355,33 @@ def enhance_image(
353
355
  model = _get_ufish(config)
354
356
  _ensure_weights(model, config)
355
357
  image = np.asarray(image)
356
- _pred_spots, enhanced = model.predict(image)
358
+ model_any = cast("Any", model)
359
+ predict_chunks = getattr(model_any, "predict_chunks", None)
360
+
361
+ def _run_inference() -> tuple[Any, Any]:
362
+ if callable(predict_chunks):
363
+ return predict_chunks(image)
364
+ return model_any.predict(image)
365
+
366
+ try:
367
+ _pred_spots, enhanced = _run_inference()
368
+ except Exception as exc:
369
+ # If ONNX inference fails on GPU, retry once on CPU provider.
370
+ weight_path = getattr(model_any, "weight_path", None)
371
+ can_retry_on_cpu = (
372
+ ort is not None
373
+ and isinstance(weight_path, str)
374
+ and weight_path.endswith(".onnx")
375
+ and hasattr(model_any, "_load_onnx")
376
+ and "CPUExecutionProvider" in set(ort.get_available_providers())
377
+ )
378
+ if not can_retry_on_cpu:
379
+ raise
380
+ _LOGGER.warning(
381
+ "UFish inference failed; retrying with ONNX CPUExecutionProvider. "
382
+ "Original error: %s",
383
+ exc,
384
+ )
385
+ model_any._load_onnx(weight_path, providers=["CPUExecutionProvider"])
386
+ _pred_spots, enhanced = _run_inference()
357
387
  return np.asarray(enhanced)
@@ -0,0 +1 @@
1
+ """Visualization tab modules."""
@@ -0,0 +1,306 @@
1
+ """Backend logic for the Visualization tab."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from pathlib import Path
7
+ from typing import Iterable
8
+ import shutil
9
+ import tempfile
10
+
11
+ from .plots import PlotConfig
12
+
13
+
14
+ @dataclass
15
+ class PlotExportResult:
16
+ """Output metadata for a single plot export.
17
+
18
+ Attributes
19
+ ----------
20
+ plot_id : str
21
+ Stable identifier for the exported plot instance.
22
+ plot_type : str
23
+ Plot type name used for routing (e.g., ``"UMAP"``).
24
+ temp_dir : Path
25
+ Temporary directory where the plot wrote its outputs.
26
+ outputs : list of Path
27
+ Explicit file paths returned by the plot processor.
28
+ """
29
+
30
+ plot_id: str
31
+ plot_type: str
32
+ temp_dir: Path
33
+ outputs: list[Path] = field(default_factory=list)
34
+
35
+
36
+ @dataclass
37
+ class VisualizationResult:
38
+ """Aggregated output information for a visualization run.
39
+
40
+ Attributes
41
+ ----------
42
+ input_root : Path
43
+ Root input directory.
44
+ output_root : Path
45
+ Root output directory for the run.
46
+ temp_root : Path
47
+ Temporary root directory used during processing.
48
+ plot_outputs : list of PlotExportResult
49
+ Per-plot export metadata for the run.
50
+ """
51
+ input_root: Path
52
+ output_root: Path
53
+ temp_root: Path
54
+ plot_outputs: list[PlotExportResult]
55
+
56
+
57
+ class VisualizationBackend:
58
+ """Backend orchestrator for visualization exports.
59
+
60
+ Notes
61
+ -----
62
+ Feature export routines live with their feature implementations. The
63
+ backend iterates through configured feature contexts, asks each feature
64
+ handler to export into a temporary directory, and then routes those
65
+ outputs into a final output structure.
66
+ """
67
+
68
+ def __init__(self) -> None:
69
+ """Initialize the backend state.
70
+
71
+ Attributes
72
+ ----------
73
+ metrics : list
74
+ Placeholder container for computed metrics.
75
+ """
76
+ self.metrics: list[object] = []
77
+
78
+ def process(
79
+ self,
80
+ plots: Iterable[object],
81
+ input_path: Path,
82
+ output_path: str,
83
+ output_name: str,
84
+ export_format: str,
85
+ markers: list[str] | None = None,
86
+ thresholds: dict[str, float] | None = None,
87
+ save: bool = True,
88
+ cleanup: bool = True,
89
+ ) -> VisualizationResult:
90
+ """Run plot exports and route their outputs.
91
+
92
+ Parameters
93
+ ----------
94
+ plots : iterable of object
95
+ Plot UI contexts with ``state`` and ``plot_handler``.
96
+ Each handler should implement ``plot(temp_dir, input_path, export_format)``.
97
+ input_path : Path
98
+ Path to the input folder containing quantification files.
99
+ output_path : str
100
+ Base output folder path.
101
+ output_name : str
102
+ Folder name used to group exported outputs.
103
+ export_format : str
104
+ File format requested by the user (``"png"`` or ``"svg"``).
105
+ markers : list of str, optional
106
+ List of selected markers to include.
107
+ thresholds : dict, optional
108
+ Dictionary of {marker_name: threshold_value} for filtering.
109
+ save : bool, optional
110
+ Whether to save (route) the outputs to the final destination immediately.
111
+ cleanup : bool, optional
112
+ Whether to delete temporary export folders after routing.
113
+
114
+ Returns
115
+ -------
116
+ VisualizationResult
117
+ Output metadata for the completed run.
118
+
119
+ Notes
120
+ -----
121
+ If a plot export does not return explicit output paths, the backend
122
+ will move all files found in the plot's temp directory. This allows
123
+ plot implementations to either return specific files or simply write
124
+ into the provided temporary directory.
125
+ """
126
+ input_root = input_path.parent
127
+ # Treat `output_path` as the folder and `output_name` as an optional
128
+ # filename base. Resolve output_root without using output_name so
129
+ # output_name can be applied as a file name rather than a subfolder.
130
+ output_root = self._resolve_output_root(output_path, "")
131
+ output_root.mkdir(parents=True, exist_ok=True)
132
+ temp_root = Path(tempfile.mkdtemp(prefix="senoquant-plot-"))
133
+
134
+ plot_outputs: list[PlotExportResult] = []
135
+ for context in plots:
136
+ plot = getattr(context, "state", None)
137
+ handler = getattr(context, "plot_handler", None)
138
+ if not isinstance(plot, PlotConfig):
139
+ continue
140
+ print(f"[Backend] Processing plot: {plot.type_name}")
141
+ print(f"[Backend] Handler: {handler}")
142
+ print(f"[Backend] Handler has plot method: {hasattr(handler, 'plot') if handler else False}")
143
+ temp_dir = temp_root / plot.plot_id
144
+ temp_dir.mkdir(parents=True, exist_ok=True)
145
+ outputs: list[Path] = []
146
+ if handler is not None and hasattr(handler, "plot"):
147
+ print(f"[Backend] Calling handler.plot() with input_path={input_path}, format={export_format}")
148
+ outputs = [
149
+ Path(path)
150
+ for path in handler.plot(
151
+ temp_dir,
152
+ input_path,
153
+ export_format,
154
+ markers=markers,
155
+ thresholds=thresholds
156
+ )
157
+ ]
158
+ print(f"[Backend] Handler returned {len(outputs)} outputs: {outputs}")
159
+ else:
160
+ print(f"[Backend] Skipping: handler is None or has no plot method")
161
+ plot_outputs.append(
162
+ PlotExportResult(
163
+ plot_id=plot.plot_id,
164
+ plot_type=plot.type_name,
165
+ temp_dir=temp_dir,
166
+ outputs=outputs,
167
+ )
168
+ )
169
+
170
+ if save:
171
+ print(f"[Backend] About to route {len(plot_outputs)} plot outputs")
172
+ self._route_plot_outputs(output_root, plot_outputs, output_name)
173
+ if cleanup:
174
+ shutil.rmtree(temp_root, ignore_errors=True)
175
+ return VisualizationResult(
176
+ input_root=input_root,
177
+ output_root=output_root,
178
+ temp_root=temp_root,
179
+ plot_outputs=plot_outputs,
180
+ )
181
+
182
+ def save_result(
183
+ self,
184
+ result: VisualizationResult,
185
+ output_path: str,
186
+ output_name: str
187
+ ) -> None:
188
+ """Save an existing visualization result to the specified output.
189
+
190
+ This moves/copies files from the result's temporary directory (or previous location)
191
+ to the new output path.
192
+ """
193
+ output_root = self._resolve_output_root(output_path, "")
194
+ output_root.mkdir(parents=True, exist_ok=True)
195
+ result.output_root = output_root
196
+ self._route_plot_outputs(output_root, result.plot_outputs, output_name)
197
+
198
+ def _resolve_output_root(self, output_path: str, output_name: str) -> Path:
199
+ """Resolve the final output root directory.
200
+
201
+ Parameters
202
+ ----------
203
+ output_path : str
204
+ Base output folder path.
205
+ output_name : str
206
+ Folder name used to group exported outputs.
207
+
208
+ Returns
209
+ -------
210
+ Path
211
+ Resolved output directory path.
212
+ """
213
+ if output_path and output_path.strip():
214
+ base = Path(output_path)
215
+ else:
216
+ # Default to repository root (current working directory)
217
+ base = Path.cwd()
218
+ if output_name and output_name.strip():
219
+ return base / output_name
220
+ return base
221
+
222
+ def _route_plot_outputs(
223
+ self,
224
+ output_root: Path,
225
+ plot_outputs: Iterable[PlotExportResult],
226
+ output_name: str = "",
227
+ ) -> None:
228
+ """Move plot outputs from temp folders to the final location.
229
+
230
+ Parameters
231
+ ----------
232
+ output_root : Path
233
+ Destination root folder.
234
+ plot_outputs : iterable of PlotExportResult
235
+ Export results to route.
236
+
237
+ Notes
238
+ -----
239
+ When a plot returns no explicit output list, all files present
240
+ in the temporary directory are routed instead. Subdirectories are
241
+ not traversed.
242
+ """
243
+ for plot_output in plot_outputs:
244
+ print(f"[Backend] Routing {plot_output.plot_type} to {output_root}")
245
+ final_paths: list[Path] = []
246
+ outputs = plot_output.outputs
247
+ # Choose source list: explicit outputs if provided, otherwise files
248
+ # from the temp directory.
249
+ source_files: list[Path] = []
250
+ if outputs:
251
+ source_files = [p for p in outputs if Path(p).exists()]
252
+ else:
253
+ source_files = [p for p in plot_output.temp_dir.glob("*") if p.is_file()]
254
+
255
+ if not source_files:
256
+ print(f"[Backend] No files to route for {plot_output.plot_type}")
257
+ plot_output.outputs = []
258
+ continue
259
+
260
+ # If the caller provided output_name, use it as the base filename.
261
+ for idx, src in enumerate(source_files):
262
+ src = Path(src)
263
+ ext = src.suffix
264
+ if output_name and output_name.strip():
265
+ # If multiple files, append an index to avoid collisions.
266
+ if len(source_files) == 1:
267
+ dest_name = f"{output_name}{ext}"
268
+ else:
269
+ dest_name = f"{output_name}_{idx+1}{ext}"
270
+ else:
271
+ # Fallback: prefix with plot type for clarity
272
+ safe_type = plot_output.plot_type.replace(' ', '_')
273
+ dest_name = f"{safe_type}_{src.name}"
274
+ dest = output_root / dest_name
275
+ print(f"[Backend] Copying {src} -> {dest}")
276
+ try:
277
+ shutil.copy2(str(src), dest)
278
+ except shutil.SameFileError:
279
+ print(f"[Backend] Skipping copy: source and destination are the same ({dest})")
280
+ final_paths.append(dest)
281
+
282
+ # Update plot_output.outputs to point at final routed files
283
+ plot_output.outputs = final_paths
284
+
285
+ def _plot_dir_name(self, plot_output: PlotExportResult) -> str:
286
+ """Build a filesystem-friendly folder name for a plot.
287
+
288
+ Parameters
289
+ ----------
290
+ plot_output : PlotExportResult
291
+ Export result metadata.
292
+
293
+ Returns
294
+ -------
295
+ str
296
+ Directory name for the plot outputs.
297
+
298
+ Notes
299
+ -----
300
+ Non-alphanumeric characters are replaced to avoid filesystem issues.
301
+ """
302
+ name = plot_output.plot_type.strip()
303
+ safe = "".join(
304
+ char if char.isalnum() or char in "-_ " else "_" for char in name
305
+ )
306
+ return safe.replace(" ", "_").lower()