imspy-vis 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
imspy_vis/__init__.py ADDED
@@ -0,0 +1,57 @@
1
+ """
2
+ imspy_vis - Lightweight visualization tools for timsTOF proteomics data.
3
+
4
+ This package provides visualization tools using Plotly, Matplotlib, and ipywidgets
5
+ for interactive exploration of timsTOF mass spectrometry data.
6
+
7
+ Features:
8
+ - Frame rendering with DDA/DIA annotation overlays
9
+ - Interactive 3D point cloud visualization
10
+ - Video generation from timsTOF datasets
11
+ - Jupyter notebook integration
12
+
13
+ Example:
14
+ >>> from imspy_vis import generate_preview_video, DDAFrameRenderer
15
+ >>> generate_preview_video('/path/to/data.d', '/path/to/output.mp4', mode='dda')
16
+ """
17
+
18
+ __version__ = "0.4.0"
19
+
20
+ # Point cloud visualization (Plotly/ipywidgets)
21
+ from imspy_vis.pointcloud import (
22
+ ImsPointCloudVisualizer,
23
+ DDAPrecursorPointCloudVis,
24
+ calculate_mz_tick_spacing,
25
+ )
26
+
27
+ # Frame rendering and video generation
28
+ from imspy_vis.frame_rendering import (
29
+ BaseFrameRenderer,
30
+ DDAFrameRenderer,
31
+ DIAFrameRenderer,
32
+ generate_preview_video,
33
+ get_frame_matrix,
34
+ configure_gpu_memory,
35
+ overlay_precursor,
36
+ overlay_fragment,
37
+ overlay_windows,
38
+ )
39
+
40
+ __all__ = [
41
+ # Version
42
+ '__version__',
43
+ # Point cloud visualization
44
+ 'ImsPointCloudVisualizer',
45
+ 'DDAPrecursorPointCloudVis',
46
+ 'calculate_mz_tick_spacing',
47
+ # Frame rendering
48
+ 'BaseFrameRenderer',
49
+ 'DDAFrameRenderer',
50
+ 'DIAFrameRenderer',
51
+ 'generate_preview_video',
52
+ 'get_frame_matrix',
53
+ 'configure_gpu_memory',
54
+ 'overlay_precursor',
55
+ 'overlay_fragment',
56
+ 'overlay_windows',
57
+ ]
@@ -0,0 +1,464 @@
1
+ import os
2
+ import io
3
+ import numpy as np
4
+ import matplotlib
5
+ matplotlib.use('Agg') # Non-interactive backend for direct rendering
6
+ import matplotlib.pyplot as plt
7
+ import matplotlib.patches as patches
8
+ import imageio.v2 as imageio
9
+ from matplotlib import colors as mcolors
10
+ from tqdm import tqdm
11
+ from imspy_core.timstof import TimsDatasetDDA
12
+ from imspy_core.timstof.dia import TimsDatasetDIA
13
+ from typing import Optional, List, Union
14
+
15
+ # Distinct colors for overlays
16
+ COLORS = list(mcolors.TABLEAU_COLORS.values()) + [
17
+ mcolors.CSS4_COLORS[c] for c in [
18
+ "crimson", "mediumseagreen", "darkorange", "slateblue", "turquoise",
19
+ "goldenrod", "orchid", "dodgerblue", "tomato", "limegreen"
20
+ ]
21
+ ]
22
+
23
+ def configure_gpu_memory(limit_gb: float = 4.0):
24
+ """Configure GPU memory for PyTorch (no-op, PyTorch manages memory dynamically)."""
25
+ try:
26
+ import torch
27
+ if torch.cuda.is_available():
28
+ # PyTorch manages GPU memory dynamically, no explicit configuration needed
29
+ pass
30
+ except ImportError:
31
+ pass
32
+
33
+
34
+ def get_frame_matrix(handle, frame_id: int,
35
+ index_max: int = 1700, scan_max: int = 972) -> np.ndarray:
36
+ frame = handle[frame_id]
37
+
38
+ frame = frame.filter(mz_max=float(index_max))
39
+
40
+ return frame.vectorized(0).get_tensor_repr(
41
+ dense=True, zero_indexed=False, re_index=False,
42
+ index_max=index_max, scan_max=scan_max
43
+ )
44
+
45
+
46
+ def choose_color(id_val: int) -> str:
47
+ return COLORS[int(id_val) % len(COLORS)]
48
+
49
+
50
+ def overlay_precursor(ax, meta_df, image_width: int):
51
+ """
52
+ Overlay precursor isolation boxes (DDA): m/z-based windows per precursor.
53
+ """
54
+ for _, row in meta_df.iterrows():
55
+ color = choose_color(row['Frame'])
56
+ mz_mid = row['IsolationMz']
57
+ iso_w = row['IsolationWidth']
58
+ # calculate m/z-based window
59
+ x = int(np.floor(mz_mid - iso_w/2)) - 2
60
+ y = row['ScanNumBegin']
61
+ w = int(np.ceil(iso_w)) + 4
62
+ h = row['ScanNumEnd'] - y
63
+ rect = patches.Rectangle((x, y), w, h,
64
+ linewidth=1.0, edgecolor=color,
65
+ facecolor='none', linestyle='-', alpha=0.6)
66
+ ax.add_patch(rect)
67
+ lbl = (
68
+ f"ID:{int(row['Precursor'])}"
69
+ f"CE:{row['CollisionEnergy']:.1f}eV"
70
+ f"m/z:{mz_mid:.2f}±{iso_w:.2f}"
71
+ )
72
+ ax.text(x + 25, y + 10, lbl,
73
+ fontsize=8, color='white', backgroundcolor='black',
74
+ verticalalignment='top', horizontalalignment='left', alpha=0.9)
75
+
76
+
77
+ def overlay_fragment(ax, meta_df, image_width: int):
78
+ """
79
+ Overlay fragment selection windows across full m/z axis (DDA).
80
+ """
81
+ for _, row in meta_df.iterrows():
82
+ color = choose_color(row['Frame'])
83
+ y = row['ScanNumBegin']
84
+ h = row['ScanNumEnd'] - y
85
+ rect = patches.Rectangle((0, y), image_width, h,
86
+ linewidth=1.0, edgecolor=color,
87
+ facecolor='none', linestyle='--', alpha=0.6)
88
+ ax.add_patch(rect)
89
+ lbl = (
90
+ f"ID:{int(row['Precursor'])}, "
91
+ f"CE:{row['CollisionEnergy']:.1f}eV,"
92
+ f"m/z:{row['IsolationMz']:.2f}±{row['IsolationWidth']:.2f}"
93
+ )
94
+ ax.text(2, y + 2, lbl,
95
+ fontsize=8, color='white', backgroundcolor='black',
96
+ verticalalignment='bottom', horizontalalignment='left', alpha=0.9)
97
+
98
+
99
+ def overlay_windows(ax, df_windows, image_width: int,
100
+ text_dx: int = 2, text_dy: int = 2,
101
+ ha: str = 'left', va: str = 'top'):
102
+ """
103
+ Overlay DIA selection windows (m/z-based boxes).
104
+ """
105
+ for _, row in df_windows.iterrows():
106
+ group = int(row['WindowGroup'])
107
+ color = choose_color(group)
108
+ mz_mid = row['IsolationMz']
109
+ iso_w = row['IsolationWidth']
110
+ x = int(np.floor(mz_mid - iso_w/2)) - 2
111
+ y = row['ScanNumBegin']
112
+ w = int(np.ceil(iso_w)) + 4
113
+ h = row['ScanNumEnd'] - y
114
+ rect = patches.Rectangle((x, y), w, h,
115
+ linewidth=1.2, edgecolor=color,
116
+ facecolor='none', linestyle='-', alpha=0.8)
117
+ ax.add_patch(rect)
118
+ lbl = f"WG:{group} CE:{row['CollisionEnergy']:.1f}eV m/z:{mz_mid:.2f}±{iso_w:.2f}"
119
+ ax.text(x + text_dx, y + text_dy, lbl,
120
+ fontsize=8, color='white', backgroundcolor='black',
121
+ horizontalalignment=ha, verticalalignment=va, alpha=0.9)
122
+
123
+
124
+ class BaseFrameRenderer:
125
+ def __init__(self, memory_limit_gb: float = 4.0):
126
+ configure_gpu_memory(memory_limit_gb)
127
+
128
+ def batch_render(self, out_dir: str, frame_ids=None, **render_kwargs):
129
+ if frame_ids is None:
130
+ frame_ids = self._all_frame_ids
131
+ os.makedirs(out_dir, exist_ok=True)
132
+ for fid in tqdm(frame_ids, desc='Rendering frames', ncols=80):
133
+ out_path = os.path.join(out_dir, f"frame_{fid:04d}.png")
134
+ self._render_frame(fid, save_path=out_path, **render_kwargs)
135
+
136
+ def create_video(self, frames_dir: str, output_path: str,
137
+ fps: int = 10, ext: str = 'png'):
138
+ files = sorted(f for f in os.listdir(frames_dir) if f.endswith(f'.{ext}'))
139
+ if not files:
140
+ raise RuntimeError(f"No .{ext} frames in {frames_dir}")
141
+ writer = imageio.get_writer(output_path, fps=fps)
142
+ for fname in tqdm(files, desc='Creating video', ncols=80):
143
+ img = imageio.imread(os.path.join(frames_dir, fname))
144
+ writer.append_data(img)
145
+ writer.close()
146
+
147
+ def render_to_video(
148
+ self,
149
+ output_path: str,
150
+ frame_ids: Optional[List[int]] = None,
151
+ fps: int = 10,
152
+ dpi: int = 100,
153
+ cmap: str = 'inferno',
154
+ annotate: bool = True,
155
+ max_frames: Optional[int] = None,
156
+ codec: str = 'libx264',
157
+ quality: int = 8,
158
+ show_progress: bool = True
159
+ ) -> str:
160
+ """
161
+ Render frames directly to video without intermediate files.
162
+
163
+ Args:
164
+ output_path: Path for output video file (.mp4)
165
+ frame_ids: List of frame IDs to render. If None, uses all frames.
166
+ fps: Frames per second in output video
167
+ dpi: Resolution of rendered frames
168
+ cmap: Matplotlib colormap
169
+ annotate: Whether to draw annotation overlays
170
+ max_frames: Limit to first N frames (for quick previews)
171
+ codec: Video codec (libx264, mpeg4, etc.)
172
+ quality: Video quality (1-10, higher = better)
173
+ show_progress: Show progress bar
174
+
175
+ Returns:
176
+ Path to the created video file
177
+ """
178
+ if frame_ids is None:
179
+ frame_ids = self._all_frame_ids
180
+
181
+ if max_frames is not None and len(frame_ids) > max_frames:
182
+ # Take frames from the middle of the experiment where peptides are eluting
183
+ total = len(frame_ids)
184
+ start = (total - max_frames) // 2
185
+ frame_ids = frame_ids[start:start + max_frames]
186
+
187
+ os.makedirs(os.path.dirname(output_path) or '.', exist_ok=True)
188
+
189
+ writer = imageio.get_writer(
190
+ output_path,
191
+ fps=fps,
192
+ codec=codec,
193
+ quality=quality,
194
+ pixelformat='yuv420p'
195
+ )
196
+
197
+ iterator = tqdm(frame_ids, desc='Rendering video', ncols=80) if show_progress else frame_ids
198
+
199
+ for fid in iterator:
200
+ # Render frame to memory buffer
201
+ img_array = self._render_frame_to_array(fid, dpi=dpi, cmap=cmap, annotate=annotate)
202
+ writer.append_data(img_array)
203
+
204
+ writer.close()
205
+ return output_path
206
+
207
+ def _render_frame_to_array(self, frame_id: int, dpi: int = 100,
208
+ cmap: str = 'inferno', annotate: bool = True) -> np.ndarray:
209
+ """Render a single frame to numpy array (RGB)."""
210
+ raise NotImplementedError("Subclasses must implement _render_frame_to_array")
211
+
212
+
213
+ class DDAFrameRenderer(BaseFrameRenderer):
214
+ def __init__(self, data_path: str,
215
+ memory_limit_gb: float = 4.0,
216
+ use_bruker_sdk: bool = True,
217
+ in_memory: bool = False):
218
+ super().__init__(memory_limit_gb)
219
+ self.handle = TimsDatasetDDA(data_path, use_bruker_sdk=use_bruker_sdk, in_memory=in_memory)
220
+ self.meta = self.handle._load_pasef_meta_data()
221
+ diffs = np.diff(self.handle.precursor_frames)
222
+ self.fragments_map = {f: set(range(f+1, f+d)) for f, d in zip(self.handle.precursor_frames[:-1], diffs)}
223
+ self.precursor_frames = set(self.handle.precursor_frames)
224
+ self.fragment_frames = set(self.handle.fragment_frames)
225
+ self._all_frame_ids = list(self.handle.meta_data.frame_id)
226
+
227
+ def _render_frame_to_array(self, frame_id: int, dpi: int = 100,
228
+ cmap: str = 'inferno', annotate: bool = True) -> np.ndarray:
229
+ """Render frame directly to numpy array without saving to disk."""
230
+ F = get_frame_matrix(self.handle, frame_id, scan_max=self.handle.num_scans)
231
+ # Use fixed 16:9 aspect ratio for consistent video frames
232
+ fig, ax = plt.subplots(figsize=(16, 9), dpi=dpi)
233
+ ax.imshow(np.cbrt(F), cmap=cmap, origin='upper', aspect='auto')
234
+ ax.set(xlabel='m/z (1 Th bins)', ylabel='Scan Num',
235
+ title=(f"Frame {frame_id}"
236
+ f"{' [Precursor]' if frame_id in self.precursor_frames else ''}"
237
+ f"{' [+Fragment]' if frame_id in self.fragment_frames else ''}"))
238
+ if annotate:
239
+ if frame_id in self.precursor_frames:
240
+ fragments = self.fragments_map.get(frame_id, set())
241
+ dfp = self.meta[self.meta['Frame'].isin(fragments)]
242
+ overlay_precursor(ax, dfp, F.shape[1])
243
+ if frame_id in self.fragment_frames:
244
+ dff = self.meta[self.meta['Frame'] == frame_id]
245
+ overlay_fragment(ax, dff, F.shape[1])
246
+ plt.tight_layout()
247
+
248
+ # Render to buffer
249
+ fig.canvas.draw()
250
+ img_array = np.asarray(fig.canvas.buffer_rgba())[:, :, :3].copy()
251
+ plt.close(fig)
252
+ return img_array
253
+
254
+ def _render_frame(self, frame_id: int, save_path=None,
255
+ dpi: int = 150, cmap: str = 'inferno',
256
+ annotate: bool = True):
257
+ F = get_frame_matrix(self.handle, frame_id, scan_max=self.handle.num_scans)
258
+ # Use fixed 16:9 aspect ratio for consistent video frames
259
+ fig, ax = plt.subplots(figsize=(16, 9), dpi=dpi)
260
+ ax.imshow(np.cbrt(F), cmap=cmap, origin='upper', aspect='auto')
261
+ ax.set(xlabel='m/z (1 Th bins)', ylabel='Scan Num',
262
+ title=(f"Frame {frame_id}"
263
+ f"{' [Precursor]' if frame_id in self.precursor_frames else ''}"
264
+ f"{' [+Fragment]' if frame_id in self.fragment_frames else ''}"))
265
+ if annotate:
266
+ if frame_id in self.precursor_frames:
267
+ fragments = self.fragments_map.get(frame_id, set())
268
+ dfp = self.meta[self.meta['Frame'].isin(fragments)]
269
+ overlay_precursor(ax, dfp, F.shape[1])
270
+ if frame_id in self.fragment_frames:
271
+ dff = self.meta[self.meta['Frame'] == frame_id]
272
+ overlay_fragment(ax, dff, F.shape[1])
273
+ plt.tight_layout()
274
+ if save_path:
275
+ os.makedirs(os.path.dirname(save_path), exist_ok=True)
276
+ fig.savefig(save_path)
277
+ plt.close(fig)
278
+ else:
279
+ plt.show()
280
+
281
+
282
+ class DIAFrameRenderer(BaseFrameRenderer):
283
+ def __init__(self, data_path: str,
284
+ memory_limit_gb: float = 4.0,
285
+ use_bruker_sdk: bool = True,
286
+ in_memory: bool = False):
287
+ super().__init__(memory_limit_gb)
288
+ self.handle = TimsDatasetDIA(data_path, use_bruker_sdk=use_bruker_sdk, in_memory=in_memory)
289
+ self.windows = self.handle.dia_ms_ms_windows.copy()
290
+ self.frame_to_group = dict(zip(self.handle.dia_ms_ms_info.Frame, self.handle.dia_ms_ms_info.WindowGroup))
291
+ self.precursor_to_fragments = {}
292
+ current = None
293
+ for _, row in self.handle.meta_data.iterrows():
294
+ idx, typ = row.Id, row.MsMsType
295
+ if typ == 0:
296
+ current = idx
297
+ self.precursor_to_fragments[current] = set()
298
+ else:
299
+ self.precursor_to_fragments[current].add(idx)
300
+ self.precursor_frames = set(self.precursor_to_fragments.keys())
301
+ self.fragment_frames = set(self.handle.meta_data[self.handle.meta_data.MsMsType == 1].Id)
302
+ self._all_frame_ids = list(self.handle.meta_data.Id)
303
+
304
+ def _render_frame_to_array(self, frame_id: int, dpi: int = 100,
305
+ cmap: str = 'inferno', annotate: bool = True) -> np.ndarray:
306
+ """Render frame directly to numpy array without saving to disk."""
307
+ F = get_frame_matrix(self.handle, frame_id, scan_max=self.handle.num_scans)
308
+ # Use fixed 16:9 aspect ratio for consistent video frames
309
+ fig, ax = plt.subplots(figsize=(16, 9), dpi=dpi)
310
+ ax.imshow(np.sqrt(F), cmap=cmap, origin='upper', aspect='auto')
311
+ ax.set(xlabel='m/z (1 Th bins)', ylabel='Scan Num',
312
+ title=(f"Frame {frame_id}"
313
+ f"{' [Precursor]' if frame_id in self.precursor_frames else ''}"
314
+ f"{' [+Fragment]' if frame_id in self.fragment_frames else ''}"))
315
+ if annotate:
316
+ if frame_id in self.precursor_frames:
317
+ df_w = self.windows.copy()
318
+ df_w['Frame'] = frame_id
319
+ else:
320
+ grp = self.frame_to_group.get(frame_id)
321
+ df_w = self.windows[self.windows.WindowGroup == grp].copy()
322
+ df_w['Frame'] = frame_id
323
+ overlay_windows(ax, df_w, F.shape[1], text_dx=-20, text_dy=-20, ha='right', va='bottom')
324
+ plt.tight_layout()
325
+
326
+ # Render to buffer
327
+ fig.canvas.draw()
328
+ img_array = np.asarray(fig.canvas.buffer_rgba())[:, :, :3].copy()
329
+ plt.close(fig)
330
+ return img_array
331
+
332
+ def _render_frame(self, frame_id: int, save_path=None,
333
+ dpi: int = 150, cmap: str = 'inferno',
334
+ annotate: bool = True):
335
+ F = get_frame_matrix(self.handle, frame_id, scan_max=self.handle.num_scans)
336
+ # Use fixed 16:9 aspect ratio for consistent video frames
337
+ fig, ax = plt.subplots(figsize=(16, 9), dpi=dpi)
338
+ ax.imshow(np.sqrt(F), cmap=cmap, origin='upper', aspect='auto')
339
+ ax.set(xlabel='m/z (1 Th bins)', ylabel='Scan Num',
340
+ title=(f"Frame {frame_id}"
341
+ f"{' [Precursor]' if frame_id in self.precursor_frames else ''}"
342
+ f"{' [+Fragment]' if frame_id in self.fragment_frames else ''}"))
343
+ if annotate:
344
+ if frame_id in self.precursor_frames:
345
+ df_w = self.windows.copy()
346
+ df_w['Frame'] = frame_id
347
+ else:
348
+ grp = self.frame_to_group.get(frame_id)
349
+ df_w = self.windows[self.windows.WindowGroup == grp].copy()
350
+ df_w['Frame'] = frame_id
351
+ overlay_windows(ax, df_w, F.shape[1], text_dx=-20, text_dy=-20, ha='right', va='bottom')
352
+ plt.tight_layout()
353
+ if save_path:
354
+ os.makedirs(os.path.dirname(save_path), exist_ok=True)
355
+ fig.savefig(save_path)
356
+ plt.close(fig)
357
+ else:
358
+ plt.show()
359
+
360
+ def generate_preview_video(
361
+ data_path: str,
362
+ output_path: str,
363
+ mode: str = 'dda',
364
+ max_frames: int = 100,
365
+ fps: int = 10,
366
+ dpi: int = 80,
367
+ annotate: bool = True,
368
+ use_bruker_sdk: bool = True,
369
+ show_progress: bool = True
370
+ ) -> str:
371
+ """
372
+ One-call function to generate a preview video from a TimsTOF dataset.
373
+
374
+ This is designed for quick visual inspection in evaluation loops.
375
+
376
+ Args:
377
+ data_path: Path to .d folder
378
+ output_path: Path for output video file (.mp4)
379
+ mode: 'dda' or 'dia'
380
+ max_frames: Maximum number of frames to include (first N frames)
381
+ fps: Frames per second
382
+ dpi: Resolution (lower = faster, 80 is good for previews)
383
+ annotate: Whether to overlay annotation boxes
384
+ use_bruker_sdk: Use Bruker SDK for reading
385
+ show_progress: Show progress bar
386
+
387
+ Returns:
388
+ Path to the created video file
389
+
390
+ Example:
391
+ >>> from imspy_vis.frame_rendering import generate_preview_video
392
+ >>> generate_preview_video(
393
+ ... '/path/to/data.d',
394
+ ... '/path/to/preview.mp4',
395
+ ... mode='dda',
396
+ ... max_frames=50
397
+ ... )
398
+ """
399
+ if mode == 'dda':
400
+ renderer = DDAFrameRenderer(data_path, use_bruker_sdk=use_bruker_sdk)
401
+ elif mode == 'dia':
402
+ renderer = DIAFrameRenderer(data_path, use_bruker_sdk=use_bruker_sdk)
403
+ else:
404
+ raise ValueError(f"mode must be 'dda' or 'dia', got '{mode}'")
405
+
406
+ return renderer.render_to_video(
407
+ output_path=output_path,
408
+ max_frames=max_frames,
409
+ fps=fps,
410
+ dpi=dpi,
411
+ annotate=annotate,
412
+ show_progress=show_progress
413
+ )
414
+
415
+
416
+ if __name__ == '__main__':
417
+ import argparse
418
+ parser = argparse.ArgumentParser(
419
+ description='Render TIMS frames (DDA or DIA) & build video'
420
+ )
421
+ parser.add_argument('mode', choices=['dda', 'dia'],
422
+ help='Acquisition mode')
423
+ parser.add_argument('data_path', help='Path to .d folder')
424
+ parser.add_argument('--out_dir', default='./frames',
425
+ help='Output directory for PNGs (used with --legacy)')
426
+ parser.add_argument('--frames', type=int, nargs='+',
427
+ help='List of frame IDs to render')
428
+ parser.add_argument('--video_out', help='Path for output video file')
429
+ parser.add_argument('--fps', type=int, default=10,
430
+ help='Frames per second')
431
+ parser.add_argument('--dpi', type=int, default=100,
432
+ help='Resolution for video frames')
433
+ parser.add_argument('--max_frames', type=int, default=None,
434
+ help='Max frames for preview (samples evenly)')
435
+ parser.add_argument('--mem', type=float, default=4.0,
436
+ help='GPU memory limit (GB)')
437
+ parser.add_argument('--no-annotate', dest='annotate', action='store_false',
438
+ help='Disable drawing of window annotations')
439
+ parser.add_argument('--legacy', action='store_true',
440
+ help='Use legacy two-pass rendering (PNG files + video)')
441
+ args = parser.parse_args()
442
+
443
+ if args.mode == 'dda':
444
+ renderer = DDAFrameRenderer(args.data_path, memory_limit_gb=args.mem)
445
+ else:
446
+ renderer = DIAFrameRenderer(args.data_path, memory_limit_gb=args.mem)
447
+
448
+ if args.legacy or not args.video_out:
449
+ # Legacy mode: render to PNG files, then optionally create video
450
+ common = {'annotate': args.annotate}
451
+ renderer.batch_render(args.out_dir, frame_ids=args.frames, **common)
452
+ if args.video_out:
453
+ renderer.create_video(args.out_dir, args.video_out, fps=args.fps)
454
+ else:
455
+ # Direct mode: render directly to video (no intermediate files)
456
+ renderer.render_to_video(
457
+ output_path=args.video_out,
458
+ frame_ids=args.frames,
459
+ fps=args.fps,
460
+ dpi=args.dpi,
461
+ annotate=args.annotate,
462
+ max_frames=args.max_frames
463
+ )
464
+ print(f"Video saved to: {args.video_out}")
@@ -0,0 +1,98 @@
1
+ import plotly.graph_objects as go
2
+ from ipywidgets import widgets
3
+ import numpy as np
4
+ import abc
5
+ import plotly.express as px
6
+
7
+
8
+ def calculate_mz_tick_spacing(mz_min, mz_max, num_ticks=10):
9
+ return np.round((mz_max - mz_min) / num_ticks, 1)
10
+
11
+
12
+ class ImsPointCloudVisualizer(abc.ABC):
13
+ def __init__(self, data):
14
+ self.data = data
15
+ self.__create_widgets()
16
+
17
+ def __create_widgets(self):
18
+ points = np.array([[1.0, 1.0, 1.0, 1.0]])
19
+ point_cloud = go.Scatter3d(
20
+ x=points[:, 0],
21
+ y=points[:, 1],
22
+ z=points[:, 2],
23
+ mode='markers',
24
+ marker=dict(size=5, color=np.log(points[:, 3]), colorscale='inferno', opacity=1))
25
+
26
+ self.points_widget = go.FigureWidget(data=[point_cloud])
27
+ self.points_widget.update_layout(margin=dict(l=0, r=0, b=0, t=0),
28
+ scene={'xaxis': {'title': 'X'},
29
+ 'yaxis': {'title': 'Y'},
30
+ 'zaxis': {'title': 'Z', 'dtick': 1}},
31
+ width=800, # Set the width here
32
+ height=600, # Set the height here
33
+ template="plotly_white")
34
+
35
+ self.opacity_slider = widgets.FloatSlider(value=0.5, min=0.1, max=1, step=0.1, description='opacity:',
36
+ continuous_update=False)
37
+
38
+ self.point_size_slider = widgets.FloatSlider(value=1, min=0.1, max=5.0, step=0.1, description='point size:',
39
+ continuous_update=False)
40
+
41
+ self.color_scale = widgets.Dropdown(options=sorted(px.colors.named_colorscales()),
42
+ value='inferno', description='color scale:', disabled=False)
43
+
44
+ self.point_controls = widgets.HBox(children=[self.opacity_slider, self.point_size_slider, self.color_scale])
45
+
46
+ self.update_button = widgets.Button(description='Update')
47
+ self.update_button.on_click(self.on_update_clicked)
48
+
49
+ self.box = widgets.VBox(children=[self.point_controls, self.update_button, self.points_widget])
50
+
51
+ @abc.abstractmethod
52
+ def on_update_clicked(self, change):
53
+ """
54
+ """
55
+ pass
56
+
57
+ @abc.abstractmethod
58
+ def display_widgets(self):
59
+ """
60
+ """
61
+ pass
62
+
63
+
64
+ class DDAPrecursorPointCloudVis(ImsPointCloudVisualizer, abc.ABC):
65
+ def __init__(self, data):
66
+ super().__init__(data)
67
+ self.opacity_slider.value = 0.3
68
+ self.point_size_slider.value = 0.5
69
+
70
+ def on_update_clicked(self, change):
71
+
72
+ points = self.data
73
+ f = np.sort(np.unique(np.copy(points.frame.values)))
74
+ f_idx = dict(np.c_[f, np.arange(f.shape[0])])
75
+ rt_indices = [f_idx[x] for x in points.frame.values]
76
+
77
+ self.points_widget.data[0].x = rt_indices
78
+ self.points_widget.data[0].y = points.scan.values
79
+ self.points_widget.data[0].z = points.mz.values
80
+ self.points_widget.data[0].marker = dict(size=self.point_size_slider.value,
81
+ color=np.log(points.intensity.values + 0.0001),
82
+ colorscale=self.color_scale.value,
83
+ line=dict(width=0),
84
+ opacity=self.opacity_slider.value)
85
+
86
+ tick_spacing = calculate_mz_tick_spacing(np.min(points.mz.values), np.max(points.mz.values))
87
+
88
+ self.points_widget.update_layout(margin=dict(l=0, r=0, b=0, t=0),
89
+ scene={'xaxis': {'title': 'Rt-Index'},
90
+ 'yaxis': {'title': 'Mobility-Index'},
91
+ 'zaxis': {'title': 'm/z', 'dtick': tick_spacing}},
92
+ template="plotly_white")
93
+
94
+ def display_widgets(self):
95
+ try:
96
+ display(self.box)
97
+ except Exception as e:
98
+ print(e)
@@ -0,0 +1,104 @@
1
+ Metadata-Version: 2.4
2
+ Name: imspy-vis
3
+ Version: 0.4.0
4
+ Summary: Lightweight visualization tools for timsTOF proteomics data.
5
+ License-Expression: MIT
6
+ Author: theGreatHerrLebert
7
+ Author-email: davidteschner@googlemail.com
8
+ Requires-Python: >=3.11,<3.14
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.11
11
+ Classifier: Programming Language :: Python :: 3.12
12
+ Classifier: Programming Language :: Python :: 3.13
13
+ Provides-Extra: notebook
14
+ Requires-Dist: imageio (>=2.25)
15
+ Requires-Dist: imageio-ffmpeg (>=0.4)
16
+ Requires-Dist: imspy-core (>=0.4.0)
17
+ Requires-Dist: ipywidgets (>=8.0)
18
+ Requires-Dist: jupyter (>=1.0) ; extra == "notebook"
19
+ Requires-Dist: matplotlib (>=3.5)
20
+ Requires-Dist: notebook (>=7.0) ; extra == "notebook"
21
+ Requires-Dist: numpy (>=1.24)
22
+ Requires-Dist: pandas (>=2.0)
23
+ Requires-Dist: plotly (>=5.0)
24
+ Requires-Dist: tqdm (>=4.66)
25
+ Description-Content-Type: text/markdown
26
+
27
+ # imspy-vis
28
+
29
+ Lightweight visualization tools for timsTOF proteomics data.
30
+
31
+ ## Installation
32
+
33
+ ```bash
34
+ pip install imspy-vis
35
+ ```
36
+
37
+ For Jupyter notebook support:
38
+
39
+ ```bash
40
+ pip install imspy-vis[notebook]
41
+ ```
42
+
43
+ ## Features
44
+
45
+ - **Frame Rendering**: DDA and DIA frame visualization with annotation overlays
46
+ - **Point Cloud Visualization**: Interactive 3D visualization using Plotly
47
+ - **Video Generation**: Generate preview videos from timsTOF datasets
48
+ - **Jupyter Integration**: Interactive widgets for notebook-based exploration
49
+
50
+ ## Quick Start
51
+
52
+ ### Frame Rendering
53
+
54
+ ```python
55
+ from imspy_vis import DDAFrameRenderer, DIAFrameRenderer, generate_preview_video
56
+
57
+ # Generate a quick preview video
58
+ generate_preview_video(
59
+ '/path/to/data.d',
60
+ '/path/to/output.mp4',
61
+ mode='dda',
62
+ max_frames=100,
63
+ fps=10
64
+ )
65
+
66
+ # Or use the renderer directly
67
+ renderer = DDAFrameRenderer('/path/to/data.d')
68
+ renderer.render_to_video('/path/to/output.mp4', max_frames=50)
69
+ ```
70
+
71
+ ### Point Cloud Visualization (Jupyter)
72
+
73
+ ```python
74
+ from imspy_vis import DDAPrecursorPointCloudVis
75
+
76
+ # In a Jupyter notebook
77
+ visualizer = DDAPrecursorPointCloudVis(precursor_data)
78
+ visualizer.display_widgets()
79
+ ```
80
+
81
+ ## Modules
82
+
83
+ - **pointcloud**: Interactive 3D point cloud visualization using Plotly
84
+ - **frame_rendering**: Frame-by-frame rendering and video generation
85
+
86
+ ## Dependencies
87
+
88
+ - **imspy-core**: Core data structures (required)
89
+ - **plotly**: Interactive plotting
90
+ - **matplotlib**: Static plotting and frame rendering
91
+ - **imageio**: Video generation
92
+ - **ipywidgets**: Jupyter notebook widgets
93
+
94
+ ## Related Packages
95
+
96
+ - **imspy-core**: Core data structures and timsTOF readers
97
+ - **imspy-predictors**: ML-based predictors
98
+ - **imspy-simulation**: TimSim simulation tools
99
+ - **imspy-search**: Database search functionality
100
+
101
+ ## License
102
+
103
+ MIT License - see LICENSE file for details.
104
+
@@ -0,0 +1,6 @@
1
+ imspy_vis/__init__.py,sha256=X_6tWx_2jFLvFiR5FZ79XgrROUovtXGpyJMHuNSCFV8,1504
2
+ imspy_vis/frame_rendering.py,sha256=6KjerGAm_24ygsyXQGsh9bOwf8xzNYK41AyNaZ4jghI,19548
3
+ imspy_vis/pointcloud.py,sha256=POQGW4jdiHiBikROm9lKYhHOkbTAafcOF9GQpoTBE90,4085
4
+ imspy_vis-0.4.0.dist-info/METADATA,sha256=-O3aSHNvX-uMr6AUdIdlXs-SdE-FHjxD2D6py2R2JNk,2745
5
+ imspy_vis-0.4.0.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
6
+ imspy_vis-0.4.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: poetry-core 2.3.1
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any