FlowCyPy 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- FlowCyPy/__init__.py +13 -0
- FlowCyPy/_version.py +16 -0
- FlowCyPy/acquisition.py +652 -0
- FlowCyPy/classifier.py +208 -0
- FlowCyPy/coupling_mechanism/__init__.py +4 -0
- FlowCyPy/coupling_mechanism/empirical.py +47 -0
- FlowCyPy/coupling_mechanism/mie.py +207 -0
- FlowCyPy/coupling_mechanism/rayleigh.py +116 -0
- FlowCyPy/coupling_mechanism/uniform.py +40 -0
- FlowCyPy/coupling_mechanism.py +205 -0
- FlowCyPy/cytometer.py +314 -0
- FlowCyPy/detector.py +439 -0
- FlowCyPy/directories.py +36 -0
- FlowCyPy/distribution/__init__.py +16 -0
- FlowCyPy/distribution/base_class.py +79 -0
- FlowCyPy/distribution/delta.py +104 -0
- FlowCyPy/distribution/lognormal.py +124 -0
- FlowCyPy/distribution/normal.py +128 -0
- FlowCyPy/distribution/particle_size_distribution.py +132 -0
- FlowCyPy/distribution/uniform.py +117 -0
- FlowCyPy/distribution/weibull.py +115 -0
- FlowCyPy/flow_cell.py +198 -0
- FlowCyPy/helper.py +81 -0
- FlowCyPy/logger.py +136 -0
- FlowCyPy/noises.py +34 -0
- FlowCyPy/particle_count.py +127 -0
- FlowCyPy/peak_locator/__init__.py +4 -0
- FlowCyPy/peak_locator/base_class.py +163 -0
- FlowCyPy/peak_locator/basic.py +108 -0
- FlowCyPy/peak_locator/derivative.py +143 -0
- FlowCyPy/peak_locator/moving_average.py +166 -0
- FlowCyPy/physical_constant.py +19 -0
- FlowCyPy/plottings.py +269 -0
- FlowCyPy/population.py +136 -0
- FlowCyPy/populations_instances.py +65 -0
- FlowCyPy/scatterer_collection.py +306 -0
- FlowCyPy/signal_digitizer.py +90 -0
- FlowCyPy/source.py +249 -0
- FlowCyPy/units.py +30 -0
- FlowCyPy/utils.py +191 -0
- FlowCyPy-0.7.0.dist-info/LICENSE +21 -0
- FlowCyPy-0.7.0.dist-info/METADATA +252 -0
- FlowCyPy-0.7.0.dist-info/RECORD +45 -0
- FlowCyPy-0.7.0.dist-info/WHEEL +5 -0
- FlowCyPy-0.7.0.dist-info/top_level.txt +1 -0
FlowCyPy/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
try:
|
|
2
|
+
from ._version import version as __version__ # noqa: F401
|
|
3
|
+
|
|
4
|
+
except ImportError:
|
|
5
|
+
__version__ = "0.0.0"
|
|
6
|
+
|
|
7
|
+
from .cytometer import FlowCytometer
|
|
8
|
+
from .scatterer_collection import ScattererCollection, CouplingModel
|
|
9
|
+
from .population import Population
|
|
10
|
+
from .detector import Detector
|
|
11
|
+
from .flow_cell import FlowCell
|
|
12
|
+
from .source import GaussianBeam
|
|
13
|
+
from .noises import NoiseSetting
|
FlowCyPy/_version.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# file generated by setuptools_scm
|
|
2
|
+
# don't change, don't track in version control
|
|
3
|
+
TYPE_CHECKING = False
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
from typing import Tuple, Union
|
|
6
|
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
7
|
+
else:
|
|
8
|
+
VERSION_TUPLE = object
|
|
9
|
+
|
|
10
|
+
version: str
|
|
11
|
+
__version__: str
|
|
12
|
+
__version_tuple__: VERSION_TUPLE
|
|
13
|
+
version_tuple: VERSION_TUPLE
|
|
14
|
+
|
|
15
|
+
__version__ = version = '0.7.0'
|
|
16
|
+
__version_tuple__ = version_tuple = (0, 7, 0)
|
FlowCyPy/acquisition.py
ADDED
|
@@ -0,0 +1,652 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Optional, Union, List
|
|
3
|
+
from MPSPlots.styles import mps
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import numpy as np
|
|
6
|
+
from FlowCyPy import units
|
|
7
|
+
from FlowCyPy.units import Quantity
|
|
8
|
+
from scipy.signal import find_peaks
|
|
9
|
+
import matplotlib.pyplot as plt
|
|
10
|
+
import seaborn as sns
|
|
11
|
+
from tabulate import tabulate
|
|
12
|
+
import warnings
|
|
13
|
+
|
|
14
|
+
class DataAccessor:
|
|
15
|
+
def __init__(self, outer):
|
|
16
|
+
self._outer = outer
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Acquisition:
|
|
20
|
+
"""
|
|
21
|
+
Represents a flow cytometry experiment, including runtime, dataframes, logging, and visualization.
|
|
22
|
+
|
|
23
|
+
Attributes
|
|
24
|
+
----------
|
|
25
|
+
run_time : Quantity
|
|
26
|
+
Total runtime of the experiment.
|
|
27
|
+
scatterer_dataframe : pd.DataFrame
|
|
28
|
+
DataFrame containing scatterer data, indexed by population and time.
|
|
29
|
+
detector_dataframe : pd.DataFrame
|
|
30
|
+
DataFrame containing detector signal data, indexed by detector and time.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, run_time: Quantity, cytometer: object, scatterer_dataframe: pd.DataFrame, detector_dataframe: pd.DataFrame):
|
|
34
|
+
"""
|
|
35
|
+
Initializes the Experiment instance.
|
|
36
|
+
|
|
37
|
+
Parameters
|
|
38
|
+
----------
|
|
39
|
+
run_time : Quantity
|
|
40
|
+
Total runtime of the experiment.
|
|
41
|
+
scatterer_dataframe : pd.DataFrame
|
|
42
|
+
DataFrame with scatterer data.
|
|
43
|
+
detector_dataframe : pd.DataFrame
|
|
44
|
+
DataFrame with detector signal data.
|
|
45
|
+
"""
|
|
46
|
+
self.cytometer = cytometer
|
|
47
|
+
|
|
48
|
+
self.data = DataAccessor(self)
|
|
49
|
+
self.plot = self.PlotInterface(self)
|
|
50
|
+
self.logger = self.LoggerInterface(self)
|
|
51
|
+
|
|
52
|
+
self.data.continuous = detector_dataframe
|
|
53
|
+
self.data.scatterer = scatterer_dataframe
|
|
54
|
+
self.run_time = run_time
|
|
55
|
+
|
|
56
|
+
@property
|
|
57
|
+
def n_detectors(self) -> int:
|
|
58
|
+
return len(self.data.continuous.index.get_level_values('Detector').unique())
|
|
59
|
+
|
|
60
|
+
def detect_peaks(self, multi_peak_strategy: str = 'max') -> None:
|
|
61
|
+
"""
|
|
62
|
+
Detects peaks for each segment and stores results in a DataFrame.
|
|
63
|
+
|
|
64
|
+
Parameters
|
|
65
|
+
----------
|
|
66
|
+
multi_peak_strategy : str, optional
|
|
67
|
+
Strategy for handling multiple peaks in a segment. Options are:
|
|
68
|
+
- 'mean': Take the average of the peaks in the segment.
|
|
69
|
+
- 'max': Take the maximum peak in the segment.
|
|
70
|
+
- 'sum': Sum all peaks in the segment.
|
|
71
|
+
- 'discard': Remove entries with multiple peaks.
|
|
72
|
+
- 'keep': Keep all peaks without aggregation.
|
|
73
|
+
Default is 'mean'.
|
|
74
|
+
"""
|
|
75
|
+
if multi_peak_strategy not in {'max', }:
|
|
76
|
+
raise ValueError("Invalid multi_peak_strategy. Choose from 'max'.")
|
|
77
|
+
|
|
78
|
+
def process_segment(segment):
|
|
79
|
+
signal = segment['DigitizedSignal'].values
|
|
80
|
+
time = segment['Time'].values
|
|
81
|
+
peaks, properties = find_peaks(signal, width=1)
|
|
82
|
+
|
|
83
|
+
return pd.DataFrame({
|
|
84
|
+
"SegmentID": segment.name[1],
|
|
85
|
+
"Detector": segment.name[0],
|
|
86
|
+
"Height": signal[peaks],
|
|
87
|
+
"Time": time[peaks],
|
|
88
|
+
**{k: v for k, v in properties.items()}
|
|
89
|
+
})
|
|
90
|
+
|
|
91
|
+
# Process peaks for each group
|
|
92
|
+
results = self.data.triggered.groupby(level=['Detector', 'SegmentID']).apply(process_segment)
|
|
93
|
+
results = results.reset_index(drop=True)
|
|
94
|
+
|
|
95
|
+
# Check for multiple peaks and issue a warning
|
|
96
|
+
peak_counts = results.groupby(['Detector', 'SegmentID']).size()
|
|
97
|
+
multiple_peak_segments = peak_counts[peak_counts > 1]
|
|
98
|
+
if not multiple_peak_segments.empty:
|
|
99
|
+
warnings.warn(
|
|
100
|
+
f"Multiple peaks detected in the following segments: {multiple_peak_segments.index.tolist()}",
|
|
101
|
+
UserWarning
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
_temp = results.reset_index()[['Detector', 'SegmentID', 'Height']].pint.dequantify().droplevel('unit', axis=1)
|
|
105
|
+
|
|
106
|
+
self.data.peaks = (
|
|
107
|
+
results.reset_index()
|
|
108
|
+
.loc[_temp.groupby(['Detector', 'SegmentID'])['Height'].idxmax()]
|
|
109
|
+
.set_index(['Detector', 'SegmentID'])
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def _get_trigger_indices(
|
|
113
|
+
self,
|
|
114
|
+
threshold: units.Quantity,
|
|
115
|
+
trigger_detector_name: str = None,
|
|
116
|
+
pre_buffer: int = 64,
|
|
117
|
+
post_buffer: int = 64) -> tuple[np.ndarray, np.ndarray]:
|
|
118
|
+
"""
|
|
119
|
+
Calculate start and end indices for triggered segments.
|
|
120
|
+
"""
|
|
121
|
+
if trigger_detector_name not in self.data.continuous.index.get_level_values('Detector').unique():
|
|
122
|
+
raise ValueError(f"Detector '{trigger_detector_name}' not found.")
|
|
123
|
+
|
|
124
|
+
signal = self.data.continuous.xs(trigger_detector_name)['Signal']
|
|
125
|
+
trigger_signal = signal > threshold.to(signal.pint.units)
|
|
126
|
+
|
|
127
|
+
crossings = np.where(np.diff(trigger_signal.astype(int)) == 1)[0]
|
|
128
|
+
start_indices = np.clip(crossings - pre_buffer, 0, len(trigger_signal) - 1)
|
|
129
|
+
end_indices = np.clip(crossings + post_buffer, 0, len(trigger_signal) - 1)
|
|
130
|
+
|
|
131
|
+
return start_indices, end_indices
|
|
132
|
+
|
|
133
|
+
def run_triggering(self,
|
|
134
|
+
threshold: units.Quantity,
|
|
135
|
+
trigger_detector_name: str,
|
|
136
|
+
pre_buffer: int = 64,
|
|
137
|
+
post_buffer: int = 64,
|
|
138
|
+
max_triggers: int = None) -> None:
|
|
139
|
+
"""
|
|
140
|
+
Executes the triggered acquisition analysis.
|
|
141
|
+
|
|
142
|
+
Parameters
|
|
143
|
+
----------
|
|
144
|
+
threshold : units.Quantity
|
|
145
|
+
Trigger threshold value.
|
|
146
|
+
trigger_detector_name : str, optional
|
|
147
|
+
Detector used for triggering, by default None.
|
|
148
|
+
custom_trigger : np.ndarray, optional
|
|
149
|
+
Custom trigger array, by default None.
|
|
150
|
+
pre_buffer : int, optional
|
|
151
|
+
Points before trigger, by default 64.
|
|
152
|
+
post_buffer : int, optional
|
|
153
|
+
Points after trigger, by default 64.
|
|
154
|
+
max_triggers : int, optional
|
|
155
|
+
Maximum number of triggers to process, by default None.
|
|
156
|
+
"""
|
|
157
|
+
self.threshold = threshold
|
|
158
|
+
self.trigger_detector_name = trigger_detector_name
|
|
159
|
+
start_indices, end_indices = self._get_trigger_indices(
|
|
160
|
+
threshold, trigger_detector_name, pre_buffer, post_buffer
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
if max_triggers is not None:
|
|
164
|
+
start_indices = start_indices[:max_triggers]
|
|
165
|
+
end_indices = end_indices[:max_triggers]
|
|
166
|
+
|
|
167
|
+
segments = []
|
|
168
|
+
for detector_name in self.data.continuous.index.get_level_values('Detector').unique():
|
|
169
|
+
detector_data = self.data.continuous.xs(detector_name)
|
|
170
|
+
time, digitized, signal = detector_data['Time'], detector_data['DigitizedSignal'], detector_data['Signal']
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
for idx, (start, end) in enumerate(zip(start_indices, end_indices)):
|
|
174
|
+
|
|
175
|
+
segment = pd.DataFrame({
|
|
176
|
+
'Time': time[start:end + 1],
|
|
177
|
+
'DigitizedSignal': digitized[start:end + 1],
|
|
178
|
+
'Signal': signal[start:end + 1],
|
|
179
|
+
'Detector': detector_name,
|
|
180
|
+
'SegmentID': idx
|
|
181
|
+
})
|
|
182
|
+
segments.append(segment)
|
|
183
|
+
|
|
184
|
+
if len(segments) !=0:
|
|
185
|
+
self.data.triggered = pd.concat(segments).set_index(['Detector', 'SegmentID'])
|
|
186
|
+
else:
|
|
187
|
+
warnings.warn(
|
|
188
|
+
f"No signal were triggered during the run time, try changing the threshold. Signal min-max value is: {self.data.continuous['Signal'].min().to_compact()}, {self.data.continuous['Signal'].max().to_compact()}",
|
|
189
|
+
UserWarning
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
self.detect_peaks()
|
|
193
|
+
|
|
194
|
+
class LoggerInterface:
|
|
195
|
+
"""
|
|
196
|
+
A nested class for logging statistical information about the experiment.
|
|
197
|
+
|
|
198
|
+
Methods
|
|
199
|
+
-------
|
|
200
|
+
scatterer()
|
|
201
|
+
Logs statistics about the scatterer populations.
|
|
202
|
+
detector()
|
|
203
|
+
Logs statistics about the detector signals.
|
|
204
|
+
"""
|
|
205
|
+
|
|
206
|
+
def __init__(self, experiment: object):
|
|
207
|
+
self.experiment = experiment
|
|
208
|
+
|
|
209
|
+
def scatterer(self, table_format: str = "grid") -> None:
|
|
210
|
+
"""
|
|
211
|
+
Logs detailed information about scatterer populations.
|
|
212
|
+
|
|
213
|
+
Parameters
|
|
214
|
+
----------
|
|
215
|
+
table_format : str, optional
|
|
216
|
+
The format for the table display (default: 'grid').
|
|
217
|
+
Options include 'plain', 'github', 'grid', 'fancy_grid', etc.
|
|
218
|
+
|
|
219
|
+
Returns
|
|
220
|
+
-------
|
|
221
|
+
None
|
|
222
|
+
Logs scatterer population information, including refractive index, size, particle count,
|
|
223
|
+
number of events, and time statistics.
|
|
224
|
+
"""
|
|
225
|
+
logging.info("\n=== Scatterer Population Properties ===")
|
|
226
|
+
|
|
227
|
+
# Collect general population data
|
|
228
|
+
general_table_data = [
|
|
229
|
+
self._get_population_properties(population)
|
|
230
|
+
for population in self.experiment.data.scatterer.groupby("Population")
|
|
231
|
+
]
|
|
232
|
+
general_headers = [
|
|
233
|
+
"Name",
|
|
234
|
+
"Refractive Index",
|
|
235
|
+
"Medium Refractive Index",
|
|
236
|
+
"Size",
|
|
237
|
+
"Particle Count",
|
|
238
|
+
"Number of Events",
|
|
239
|
+
"Min Time Between Events",
|
|
240
|
+
"Avg Time Between Events",
|
|
241
|
+
]
|
|
242
|
+
|
|
243
|
+
formatted_general_table = tabulate(
|
|
244
|
+
general_table_data, headers=general_headers, tablefmt=table_format, floatfmt=".4f"
|
|
245
|
+
)
|
|
246
|
+
logging.info("\n" + formatted_general_table)
|
|
247
|
+
|
|
248
|
+
def _get_population_properties(self, population_group: tuple) -> List[Union[str, float]]:
|
|
249
|
+
"""
|
|
250
|
+
Extracts key properties of a scatterer population for the general properties table.
|
|
251
|
+
|
|
252
|
+
Parameters
|
|
253
|
+
----------
|
|
254
|
+
population_group : tuple
|
|
255
|
+
A tuple containing the population name and its corresponding DataFrame.
|
|
256
|
+
|
|
257
|
+
Returns
|
|
258
|
+
-------
|
|
259
|
+
list
|
|
260
|
+
List of scatterer properties: [name, refractive index, medium refractive index, size,
|
|
261
|
+
particle count, number of events, min time between events, avg time between events].
|
|
262
|
+
"""
|
|
263
|
+
population_name, population_df = population_group
|
|
264
|
+
|
|
265
|
+
name = population_name
|
|
266
|
+
refractive_index = f"{population_df['RefractiveIndex'].mean():~P}"
|
|
267
|
+
medium_refractive_index = f"{self.experiment.run_time:~P}" # Replace with actual medium refractive index if stored elsewhere
|
|
268
|
+
size = f"{population_df['Size'].mean():~P}"
|
|
269
|
+
particle_count = len(population_df)
|
|
270
|
+
num_events = particle_count
|
|
271
|
+
|
|
272
|
+
min_delta_position = population_df["Time"].diff().abs().min()
|
|
273
|
+
avg_delta_position = population_df["Time"].diff().mean()
|
|
274
|
+
|
|
275
|
+
return [
|
|
276
|
+
name,
|
|
277
|
+
refractive_index,
|
|
278
|
+
medium_refractive_index,
|
|
279
|
+
size,
|
|
280
|
+
particle_count,
|
|
281
|
+
num_events,
|
|
282
|
+
min_delta_position,
|
|
283
|
+
avg_delta_position,
|
|
284
|
+
]
|
|
285
|
+
|
|
286
|
+
def detector(self, table_format: str = "grid", include_totals: bool = True) -> None:
|
|
287
|
+
"""
|
|
288
|
+
Logs statistics about detector signals.
|
|
289
|
+
|
|
290
|
+
Parameters
|
|
291
|
+
----------
|
|
292
|
+
table_format : str, optional
|
|
293
|
+
The format for the table display (default: 'grid').
|
|
294
|
+
Options include 'plain', 'github', 'grid', 'fancy_grid', etc.
|
|
295
|
+
include_totals : bool, optional
|
|
296
|
+
If True, logs the total number of events across all detectors (default: True).
|
|
297
|
+
|
|
298
|
+
Returns
|
|
299
|
+
-------
|
|
300
|
+
None
|
|
301
|
+
Logs details about detector signals, including event counts,
|
|
302
|
+
timing statistics, and mean event rates.
|
|
303
|
+
"""
|
|
304
|
+
logging.info("\n=== Detector Signal Statistics ===")
|
|
305
|
+
|
|
306
|
+
# Compute statistics for each detector
|
|
307
|
+
df = self.experiment.data.continuous
|
|
308
|
+
table_data = [
|
|
309
|
+
self._get_detector_stats(detector_name, df.xs(detector_name, level="Detector"))
|
|
310
|
+
for detector_name in df.index.levels[0]
|
|
311
|
+
]
|
|
312
|
+
headers = [
|
|
313
|
+
"Detector",
|
|
314
|
+
"Number of Acquisition",
|
|
315
|
+
"First Event Time",
|
|
316
|
+
"Last Event Time",
|
|
317
|
+
"Time Between Events",
|
|
318
|
+
]
|
|
319
|
+
|
|
320
|
+
formatted_table = tabulate(table_data, headers=headers, tablefmt=table_format, floatfmt=".3f")
|
|
321
|
+
logging.info("\n" + formatted_table)
|
|
322
|
+
|
|
323
|
+
if include_totals:
|
|
324
|
+
total_events = sum(stat[1] for stat in table_data)
|
|
325
|
+
logging.info(f"\nTotal number of events detected across all detectors: {total_events}")
|
|
326
|
+
|
|
327
|
+
def _get_detector_stats(self, detector_name: str, group: pd.DataFrame) -> list:
|
|
328
|
+
"""
|
|
329
|
+
Computes statistics for a detector.
|
|
330
|
+
|
|
331
|
+
Parameters
|
|
332
|
+
----------
|
|
333
|
+
detector_name : str
|
|
334
|
+
Name of the detector.
|
|
335
|
+
group : pd.DataFrame
|
|
336
|
+
DataFrame containing the detector data.
|
|
337
|
+
|
|
338
|
+
Returns
|
|
339
|
+
-------
|
|
340
|
+
list
|
|
341
|
+
List of computed statistics: [detector_name, num_events, first_event_time,
|
|
342
|
+
last_event_time, avg_time_between_events, min_time_between_events, mean_event_rate].
|
|
343
|
+
"""
|
|
344
|
+
num_acquisition = len(group["Time"])
|
|
345
|
+
first_event_time = group["Time"].min()
|
|
346
|
+
last_event_time = group["Time"].max()
|
|
347
|
+
|
|
348
|
+
time_diffs = group["Time"].diff().dropna()
|
|
349
|
+
time_between_events = time_diffs.mean()
|
|
350
|
+
|
|
351
|
+
return [
|
|
352
|
+
detector_name,
|
|
353
|
+
num_acquisition,
|
|
354
|
+
first_event_time,
|
|
355
|
+
last_event_time,
|
|
356
|
+
time_between_events,
|
|
357
|
+
]
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
class PlotInterface:
|
|
361
|
+
"""
|
|
362
|
+
A nested class for handling visualization and plotting.
|
|
363
|
+
|
|
364
|
+
Methods
|
|
365
|
+
-------
|
|
366
|
+
signals(figure_size=(10, 6), add_peak_locator=False, show=True)
|
|
367
|
+
Visualizes raw signals for detector channels and scatterer distributions.
|
|
368
|
+
coupling_distribution(log_scale=False, show=True, equal_limits=False, save_path=None)
|
|
369
|
+
Plots the density distribution of optical coupling between two detector channels.
|
|
370
|
+
"""
|
|
371
|
+
|
|
372
|
+
def __init__(self, acquisition: object):
|
|
373
|
+
self.acquisition = acquisition
|
|
374
|
+
|
|
375
|
+
def signals(self, figure_size: tuple = (10, 6), show: bool = True) -> None:
|
|
376
|
+
"""
|
|
377
|
+
Visualizes raw signals for all detector channels and the scatterer distribution.
|
|
378
|
+
|
|
379
|
+
Parameters
|
|
380
|
+
----------
|
|
381
|
+
figure_size : tuple, optional
|
|
382
|
+
Size of the plot (default: (10, 6)).
|
|
383
|
+
add_peak_locator : bool, optional
|
|
384
|
+
Adds peak location markers to the signals if True (default: False).
|
|
385
|
+
show : bool, optional
|
|
386
|
+
Displays the plot immediately if True (default: True).
|
|
387
|
+
"""
|
|
388
|
+
n_plots = self.acquisition.n_detectors + 1
|
|
389
|
+
|
|
390
|
+
time_units = self.acquisition.data.continuous.Time.max().to_compact().units
|
|
391
|
+
|
|
392
|
+
with plt.style.context(mps):
|
|
393
|
+
fig, axes = plt.subplots(
|
|
394
|
+
ncols=1,
|
|
395
|
+
nrows=n_plots,
|
|
396
|
+
figsize=figure_size,
|
|
397
|
+
sharex=True,
|
|
398
|
+
height_ratios=[1] * (n_plots - 1) + [0.5],
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
for ax, (detector_name, group) in zip(axes[:-1], self.acquisition.data.continuous.groupby("Detector")):
|
|
402
|
+
detector = self.get_detector(detector_name)
|
|
403
|
+
|
|
404
|
+
ax.step(group["Time"].pint.to(time_units), group["DigitizedSignal"], label="Digitized Signal", where='mid')
|
|
405
|
+
ax.set_ylabel(detector_name)
|
|
406
|
+
ax.set_ylim([0, self.acquisition.cytometer.signal_digitizer._bit_depth])
|
|
407
|
+
|
|
408
|
+
ax2 = ax.twinx()
|
|
409
|
+
ax2_x = self.acquisition.data.continuous.loc[detector_name, 'Time']
|
|
410
|
+
ax2_y = self.acquisition.data.continuous.loc[detector_name, 'Signal']
|
|
411
|
+
ax2_y_units = ax2_y.max().to_compact().units
|
|
412
|
+
ax2.plot(
|
|
413
|
+
ax2_x.pint.to(time_units),
|
|
414
|
+
ax2_y.pint.to(ax2_y_units),
|
|
415
|
+
color='black',
|
|
416
|
+
linewidth=1,
|
|
417
|
+
linestyle='-',
|
|
418
|
+
label='Continuous signal',
|
|
419
|
+
zorder=0,
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
ax2.set_ylim(detector._saturation_levels)
|
|
423
|
+
|
|
424
|
+
self._add_event_to_ax(ax=axes[-1], time_units=time_units)
|
|
425
|
+
|
|
426
|
+
axes[-1].set_xlabel(f"Time [{time_units}]")
|
|
427
|
+
if show:
|
|
428
|
+
plt.show()
|
|
429
|
+
|
|
430
|
+
def _add_event_to_ax(self, ax: plt.Axes, time_units: units.Quantity, palette: str = 'tab10') -> None:
|
|
431
|
+
unique_populations = self.acquisition.data.scatterer.index.get_level_values('Population').unique()
|
|
432
|
+
color_mapping = dict(zip(unique_populations, sns.color_palette(palette, len(unique_populations))))
|
|
433
|
+
|
|
434
|
+
for population_name, group in self.acquisition.data.scatterer.groupby('Population'):
|
|
435
|
+
x = group.Time.pint.to(time_units)
|
|
436
|
+
color = color_mapping[population_name]
|
|
437
|
+
ax.vlines(x, ymin=0, ymax=1, transform=ax.get_xaxis_transform(), label=population_name, color=color)
|
|
438
|
+
|
|
439
|
+
ax.tick_params(axis='y', left=False, labelleft=False)
|
|
440
|
+
|
|
441
|
+
ax.get_yaxis().set_visible(False)
|
|
442
|
+
ax.set_xlabel(f"Time [{time_units}]")
|
|
443
|
+
|
|
444
|
+
ax.legend()
|
|
445
|
+
|
|
446
|
+
def coupling_distribution(self, x_detector: str, y_detector: str, log_scale: bool = False, show: bool = True, equal_limits: bool = False, save_path: str = None) -> None:
|
|
447
|
+
"""
|
|
448
|
+
Plots the density distribution of optical coupling between two detector channels.
|
|
449
|
+
|
|
450
|
+
Parameters
|
|
451
|
+
----------
|
|
452
|
+
log_scale : bool, optional
|
|
453
|
+
Applies logarithmic scaling to axes if True (default: False).
|
|
454
|
+
show : bool, optional
|
|
455
|
+
Displays the plot immediately if True (default: True).
|
|
456
|
+
equal_limits : bool, optional
|
|
457
|
+
Ensures equal axis limits if True (default: False).
|
|
458
|
+
save_path : str, optional
|
|
459
|
+
Saves the plot to the specified path if provided.
|
|
460
|
+
"""
|
|
461
|
+
df = self.acquisition.data.scatterer
|
|
462
|
+
|
|
463
|
+
x_units = df[x_detector].max().to_compact().units
|
|
464
|
+
y_units = df[y_detector].max().to_compact().units
|
|
465
|
+
x = df[x_detector].pint.to(x_units)
|
|
466
|
+
y = df[y_detector].pint.to(y_units)
|
|
467
|
+
|
|
468
|
+
with plt.style.context(mps):
|
|
469
|
+
joint_plot = sns.jointplot(data=df, x=x, y=y, hue="Population", alpha=0.8)
|
|
470
|
+
|
|
471
|
+
if log_scale:
|
|
472
|
+
joint_plot.ax_joint.set_xscale("log")
|
|
473
|
+
joint_plot.ax_joint.set_yscale("log")
|
|
474
|
+
|
|
475
|
+
if equal_limits:
|
|
476
|
+
min_limit = min(x.min(), y.min())
|
|
477
|
+
max_limit = max(x.max(), y.max())
|
|
478
|
+
joint_plot.ax_joint.set_xlim(min_limit, max_limit)
|
|
479
|
+
joint_plot.ax_joint.set_ylim(min_limit, max_limit)
|
|
480
|
+
|
|
481
|
+
joint_plot.ax_joint.set_xlabel(f"Signal {x_detector} [{x_units}]")
|
|
482
|
+
joint_plot.ax_joint.set_ylabel(f"Signal {y_detector} [{y_units}]")
|
|
483
|
+
|
|
484
|
+
plt.tight_layout()
|
|
485
|
+
|
|
486
|
+
if save_path:
|
|
487
|
+
joint_plot.figure.savefig(save_path, dpi=300, bbox_inches="tight")
|
|
488
|
+
logging.info(f"Plot saved to {save_path}")
|
|
489
|
+
|
|
490
|
+
if show:
|
|
491
|
+
plt.show()
|
|
492
|
+
|
|
493
|
+
def scatterer(self, show: bool = True, alpha: float = 0.8, bandwidth_adjust: float = 1, log_scale: bool = False, color_palette: Optional[Union[str, dict]] = None) -> None:
|
|
494
|
+
"""
|
|
495
|
+
Visualizes the joint distribution of scatterer sizes and refractive indices using a Seaborn jointplot.
|
|
496
|
+
|
|
497
|
+
Parameters
|
|
498
|
+
----------
|
|
499
|
+
ax : matplotlib.axes.Axes, optional
|
|
500
|
+
Existing matplotlib axes to plot on. If `None`, a new figure and axes are created. Default is `None`.
|
|
501
|
+
show : bool, optional
|
|
502
|
+
If `True`, displays the plot after creation. Default is `True`.
|
|
503
|
+
alpha : float, optional
|
|
504
|
+
Transparency level for the scatter plot points, ranging from 0 (fully transparent) to 1 (fully opaque). Default is 0.8.
|
|
505
|
+
bandwidth_adjust : float, optional
|
|
506
|
+
Bandwidth adjustment factor for the kernel density estimate of the marginal distributions. Higher values produce smoother density estimates. Default is 1.
|
|
507
|
+
log_scale : bool, optional
|
|
508
|
+
If `True`, applies a logarithmic scale to both axes of the joint plot and their marginal distributions. Default is `False`.
|
|
509
|
+
color_palette : str or dict, optional
|
|
510
|
+
The color palette to use for the hue in the scatterplot. Can be a seaborn palette name
|
|
511
|
+
(e.g., 'viridis', 'coolwarm') or a dictionary mapping hue levels to specific colors. Default is None.
|
|
512
|
+
|
|
513
|
+
Returns
|
|
514
|
+
-------
|
|
515
|
+
None
|
|
516
|
+
This function does not return any value. It either displays the plot (if `show=True`) or simply creates it for later use.
|
|
517
|
+
|
|
518
|
+
Notes
|
|
519
|
+
-----
|
|
520
|
+
This method resets the index of the internal dataframe and extracts units from the 'Size' column.
|
|
521
|
+
The plot uses the specified matplotlib style (`mps`) for consistent styling.
|
|
522
|
+
|
|
523
|
+
"""
|
|
524
|
+
df_reset = self.acquisition.data.scatterer.reset_index()
|
|
525
|
+
|
|
526
|
+
if len(df_reset) == 1:
|
|
527
|
+
return
|
|
528
|
+
|
|
529
|
+
x_unit = df_reset['Size'].pint.units
|
|
530
|
+
|
|
531
|
+
with plt.style.context(mps):
|
|
532
|
+
grid = sns.jointplot(
|
|
533
|
+
data=df_reset,
|
|
534
|
+
x='Size',
|
|
535
|
+
y='RefractiveIndex',
|
|
536
|
+
hue='Population',
|
|
537
|
+
palette=color_palette,
|
|
538
|
+
kind='scatter',
|
|
539
|
+
alpha=alpha,
|
|
540
|
+
marginal_kws=dict(bw_adjust=bandwidth_adjust)
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
grid.ax_joint.set_xlabel(f"Size [{x_unit}]")
|
|
544
|
+
|
|
545
|
+
if log_scale:
|
|
546
|
+
grid.ax_joint.set_xscale('log')
|
|
547
|
+
grid.ax_joint.set_yscale('log')
|
|
548
|
+
grid.ax_marg_x.set_xscale('log')
|
|
549
|
+
grid.ax_marg_y.set_yscale('log')
|
|
550
|
+
|
|
551
|
+
plt.tight_layout()
|
|
552
|
+
|
|
553
|
+
if show:
|
|
554
|
+
plt.show()
|
|
555
|
+
|
|
556
|
+
def peaks(self, x_detector: str, y_detector: str, signal: str = 'Height', bandwidth_adjust: float = 0.8) -> None:
|
|
557
|
+
"""
|
|
558
|
+
Plot the joint KDE distribution of the specified signal between two detectors using seaborn,
|
|
559
|
+
optionally overlaying scatter points.
|
|
560
|
+
|
|
561
|
+
Parameters
|
|
562
|
+
----------
|
|
563
|
+
x_detector : str
|
|
564
|
+
Name of the detector to use for the x-axis.
|
|
565
|
+
y_detector : str
|
|
566
|
+
Name of the detector to use for the y-axis.
|
|
567
|
+
signal : str, optional
|
|
568
|
+
The signal column to plot, by default 'Height'.
|
|
569
|
+
bandwidth_adjust : float, optional
|
|
570
|
+
Bandwidth adjustment factor for KDE, by default 0.8.
|
|
571
|
+
"""
|
|
572
|
+
# Filter to only include rows for the specified detectors
|
|
573
|
+
x_data = self.acquisition.data.peaks.loc[x_detector, signal]
|
|
574
|
+
y_data = self.acquisition.data.peaks.loc[y_detector, signal]
|
|
575
|
+
|
|
576
|
+
x_units = x_data.pint.units
|
|
577
|
+
y_units = y_data.pint.units
|
|
578
|
+
|
|
579
|
+
with plt.style.context(mps):
|
|
580
|
+
# Create joint KDE plot with scatter points overlay
|
|
581
|
+
grid = sns.jointplot(x=x_data, y=y_data, kind='kde', fill=True, cmap="Blues",
|
|
582
|
+
joint_kws={'bw_adjust': bandwidth_adjust, 'alpha': 0.7}
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
grid.ax_joint.scatter(x_data, y_data, color='C1', alpha=0.6)
|
|
586
|
+
|
|
587
|
+
grid.set_axis_labels(f"{signal} ({x_detector}) [{x_units}]", f"{signal} ({y_detector}) [{y_units}]", fontsize=12)
|
|
588
|
+
plt.tight_layout()
|
|
589
|
+
plt.show()
|
|
590
|
+
|
|
591
|
+
def trigger(self, show: bool = True) -> None:
|
|
592
|
+
"""Plot detected peaks on signal segments."""
|
|
593
|
+
n_plots = self.acquisition.n_detectors + 1
|
|
594
|
+
with plt.style.context(mps):
|
|
595
|
+
_, axes = plt.subplots(
|
|
596
|
+
nrows=n_plots,
|
|
597
|
+
ncols=1,
|
|
598
|
+
height_ratios=[1] * (n_plots - 1) + [0.5],
|
|
599
|
+
figsize=(10, 6),
|
|
600
|
+
sharex=True,
|
|
601
|
+
constrained_layout=True
|
|
602
|
+
)
|
|
603
|
+
|
|
604
|
+
time_units = self.acquisition.data.triggered['Time'].max().to_compact().units
|
|
605
|
+
|
|
606
|
+
for ax, (detector_name, group) in zip(axes, self.acquisition.data.triggered.groupby(level=['Detector'])):
|
|
607
|
+
detector = self.get_detector(detector_name)
|
|
608
|
+
|
|
609
|
+
ax.set_ylabel(detector_name)
|
|
610
|
+
|
|
611
|
+
for _, sub_group in group.groupby(level=['SegmentID']):
|
|
612
|
+
x = sub_group['Time'].pint.to(time_units)
|
|
613
|
+
digitized = sub_group['DigitizedSignal']
|
|
614
|
+
ax.step(x, digitized, where='mid', linewidth=2)
|
|
615
|
+
ax.set_ylim([0, self.acquisition.cytometer.signal_digitizer._bit_depth])
|
|
616
|
+
|
|
617
|
+
ax2 = ax.twinx()
|
|
618
|
+
ax2_x = self.acquisition.data.continuous.loc[detector_name, 'Time']
|
|
619
|
+
ax2_y = self.acquisition.data.continuous.loc[detector_name, 'Signal']
|
|
620
|
+
ax2_y_units = ax2_y.max().to_compact().units
|
|
621
|
+
ax2.plot(
|
|
622
|
+
ax2_x.pint.to(time_units),
|
|
623
|
+
ax2_y.pint.to(ax2_y_units),
|
|
624
|
+
color='black',
|
|
625
|
+
linewidth=1,
|
|
626
|
+
linestyle='-',
|
|
627
|
+
label='Continuous signal',
|
|
628
|
+
zorder=0,
|
|
629
|
+
)
|
|
630
|
+
|
|
631
|
+
if detector_name == self.acquisition.trigger_detector_name:
|
|
632
|
+
ax2.axhline(y=self.acquisition.threshold.to(ax2_y_units), color='black', linestyle='--', label='Trigger')
|
|
633
|
+
|
|
634
|
+
ax2.set_ylim(detector._saturation_levels)
|
|
635
|
+
|
|
636
|
+
ax2.legend()
|
|
637
|
+
|
|
638
|
+
|
|
639
|
+
for ax, (detector_name, group) in zip(axes, self.acquisition.data.peaks.groupby(level=['Detector'], axis=0)):
|
|
640
|
+
x = group['Time'].pint.to(time_units)
|
|
641
|
+
y = group['Height']
|
|
642
|
+
ax.scatter(x, y, color='C1')
|
|
643
|
+
|
|
644
|
+
self._add_event_to_ax(ax=axes[-1], time_units=time_units)
|
|
645
|
+
|
|
646
|
+
if show:
|
|
647
|
+
plt.show()
|
|
648
|
+
|
|
649
|
+
def get_detector(self, name: str):
|
|
650
|
+
for detector in self.acquisition.cytometer.detectors:
|
|
651
|
+
if detector.name == name:
|
|
652
|
+
return detector
|