FlowCyPy 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- FlowCyPy/__init__.py +13 -0
- FlowCyPy/_version.py +16 -0
- FlowCyPy/acquisition.py +652 -0
- FlowCyPy/classifier.py +208 -0
- FlowCyPy/coupling_mechanism/__init__.py +4 -0
- FlowCyPy/coupling_mechanism/empirical.py +47 -0
- FlowCyPy/coupling_mechanism/mie.py +207 -0
- FlowCyPy/coupling_mechanism/rayleigh.py +116 -0
- FlowCyPy/coupling_mechanism/uniform.py +40 -0
- FlowCyPy/coupling_mechanism.py +205 -0
- FlowCyPy/cytometer.py +314 -0
- FlowCyPy/detector.py +439 -0
- FlowCyPy/directories.py +36 -0
- FlowCyPy/distribution/__init__.py +16 -0
- FlowCyPy/distribution/base_class.py +79 -0
- FlowCyPy/distribution/delta.py +104 -0
- FlowCyPy/distribution/lognormal.py +124 -0
- FlowCyPy/distribution/normal.py +128 -0
- FlowCyPy/distribution/particle_size_distribution.py +132 -0
- FlowCyPy/distribution/uniform.py +117 -0
- FlowCyPy/distribution/weibull.py +115 -0
- FlowCyPy/flow_cell.py +198 -0
- FlowCyPy/helper.py +81 -0
- FlowCyPy/logger.py +136 -0
- FlowCyPy/noises.py +34 -0
- FlowCyPy/particle_count.py +127 -0
- FlowCyPy/peak_locator/__init__.py +4 -0
- FlowCyPy/peak_locator/base_class.py +163 -0
- FlowCyPy/peak_locator/basic.py +108 -0
- FlowCyPy/peak_locator/derivative.py +143 -0
- FlowCyPy/peak_locator/moving_average.py +166 -0
- FlowCyPy/physical_constant.py +19 -0
- FlowCyPy/plottings.py +269 -0
- FlowCyPy/population.py +136 -0
- FlowCyPy/populations_instances.py +65 -0
- FlowCyPy/scatterer_collection.py +306 -0
- FlowCyPy/signal_digitizer.py +90 -0
- FlowCyPy/source.py +249 -0
- FlowCyPy/units.py +30 -0
- FlowCyPy/utils.py +191 -0
- FlowCyPy-0.7.0.dist-info/LICENSE +21 -0
- FlowCyPy-0.7.0.dist-info/METADATA +252 -0
- FlowCyPy-0.7.0.dist-info/RECORD +45 -0
- FlowCyPy-0.7.0.dist-info/WHEEL +5 -0
- FlowCyPy-0.7.0.dist-info/top_level.txt +1 -0
FlowCyPy/detector.py
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
from typing import Optional, Union
|
|
4
|
+
import matplotlib.pyplot as plt
|
|
5
|
+
from FlowCyPy import units
|
|
6
|
+
from FlowCyPy.units import AU, volt, watt, degree, ampere, coulomb, particle, meter
|
|
7
|
+
from FlowCyPy.utils import PropertiesReport
|
|
8
|
+
from pydantic.dataclasses import dataclass
|
|
9
|
+
from pydantic import field_validator
|
|
10
|
+
import pint_pandas
|
|
11
|
+
from FlowCyPy.physical_constant import PhysicalConstant
|
|
12
|
+
from PyMieSim.units import Quantity
|
|
13
|
+
from FlowCyPy.noises import NoiseSetting
|
|
14
|
+
from FlowCyPy.helper import plot_helper
|
|
15
|
+
from FlowCyPy.peak_locator import BasePeakLocator
|
|
16
|
+
import logging
|
|
17
|
+
from copy import copy
|
|
18
|
+
from FlowCyPy.signal_digitizer import SignalDigitizer
|
|
19
|
+
|
|
20
|
+
config_dict = dict(
|
|
21
|
+
arbitrary_types_allowed=True,
|
|
22
|
+
kw_only=True,
|
|
23
|
+
slots=True,
|
|
24
|
+
extra='forbid'
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass(config=config_dict, unsafe_hash=True)
|
|
29
|
+
class Detector(PropertiesReport):
|
|
30
|
+
"""
|
|
31
|
+
A class representing a signal detector used in flow cytometry.
|
|
32
|
+
|
|
33
|
+
This class models a photodetector, simulating signal acquisition, noise addition, and signal processing
|
|
34
|
+
for analysis. It can optionally simulate different noise sources: shot noise, thermal noise, and dark current noise.
|
|
35
|
+
|
|
36
|
+
Parameters
|
|
37
|
+
----------
|
|
38
|
+
name : str
|
|
39
|
+
The name or identifier of the detector.
|
|
40
|
+
phi_angle : Quantity
|
|
41
|
+
The detection angle in degrees.
|
|
42
|
+
numerical_aperture : Quantity
|
|
43
|
+
The numerical aperture of the detector, a unitless value.
|
|
44
|
+
responsitivity : Quantity
|
|
45
|
+
Detector's responsivity, default is 1 volt per watt.
|
|
46
|
+
dark_current : Quantity
|
|
47
|
+
The dark current of the detector, default is 0 amperes.
|
|
48
|
+
resistance : Quantity
|
|
49
|
+
Resistance of the detector, used for thermal noise simulation.
|
|
50
|
+
temperature : Quantity
|
|
51
|
+
Temperature of the detector in Kelvin, used for thermal noise simulation.
|
|
52
|
+
"""
|
|
53
|
+
phi_angle: Quantity
|
|
54
|
+
numerical_aperture: Quantity
|
|
55
|
+
|
|
56
|
+
gamma_angle: Optional[Quantity] = Quantity(0, degree)
|
|
57
|
+
sampling: Optional[Quantity] = 100 * AU
|
|
58
|
+
responsitivity: Optional[Quantity] = Quantity(1, ampere / watt)
|
|
59
|
+
baseline_shift: Optional[Quantity] = Quantity(0.0, volt)
|
|
60
|
+
dark_current: Optional[Quantity] = Quantity(0.0, ampere) # Dark current
|
|
61
|
+
resistance: Optional[Quantity] = Quantity(50.0, 'ohm') # Resistance for thermal noise
|
|
62
|
+
temperature: Optional[Quantity] = Quantity(0.0, 'kelvin') # Temperature for thermal noise
|
|
63
|
+
name: Optional[str] = None
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@field_validator('phi_angle', 'gamma_angle')
|
|
67
|
+
def _validate_angles(cls, value):
|
|
68
|
+
"""
|
|
69
|
+
Validates that the provided angles are in degrees.
|
|
70
|
+
|
|
71
|
+
Parameters
|
|
72
|
+
----------
|
|
73
|
+
value : Quantity
|
|
74
|
+
The angle value to validate.
|
|
75
|
+
|
|
76
|
+
Returns
|
|
77
|
+
-------
|
|
78
|
+
Quantity
|
|
79
|
+
The validated angle.
|
|
80
|
+
|
|
81
|
+
Raises:
|
|
82
|
+
ValueError: If the angle is not in degrees.
|
|
83
|
+
"""
|
|
84
|
+
if not value.check('degree'):
|
|
85
|
+
raise ValueError(f"Angle must be in degrees, but got {value.units}")
|
|
86
|
+
return value
|
|
87
|
+
|
|
88
|
+
@field_validator('responsitivity')
|
|
89
|
+
def _validate_responsitivity(cls, value):
|
|
90
|
+
"""
|
|
91
|
+
Validates that the detector's responsivity is provided in volts per watt.
|
|
92
|
+
|
|
93
|
+
Parameters
|
|
94
|
+
----------
|
|
95
|
+
value : Quantity
|
|
96
|
+
The responsivity value to validate.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Quantity: The validated responsivity.
|
|
100
|
+
|
|
101
|
+
Raises:
|
|
102
|
+
ValueError: If the responsivity is not in volts per watt.
|
|
103
|
+
"""
|
|
104
|
+
if not value.check('A / W'):
|
|
105
|
+
raise ValueError(f"Responsitivity must be in ampere per watt, but got {value.units}")
|
|
106
|
+
return value
|
|
107
|
+
|
|
108
|
+
@field_validator('baseline_shift')
|
|
109
|
+
def _validate_voltage_attributes(cls, value):
|
|
110
|
+
"""
|
|
111
|
+
Validates that noise level, baseline shift, and saturation level are all in volts.
|
|
112
|
+
|
|
113
|
+
Parameters
|
|
114
|
+
----------
|
|
115
|
+
value : Quantity
|
|
116
|
+
The voltage attribute to validate (noise level, baseline shift, or saturation).
|
|
117
|
+
|
|
118
|
+
Returns
|
|
119
|
+
-------
|
|
120
|
+
Quantity
|
|
121
|
+
The validated voltage attribute.
|
|
122
|
+
|
|
123
|
+
Raises:
|
|
124
|
+
ValueError: If the attribute is not in volts.
|
|
125
|
+
"""
|
|
126
|
+
if not value.check('volt'):
|
|
127
|
+
raise ValueError(f"Voltage attributes must be in volts, but got {value.units}")
|
|
128
|
+
return value
|
|
129
|
+
|
|
130
|
+
def __post_init__(self) -> None:
|
|
131
|
+
"""
|
|
132
|
+
Finalizes the initialization of the detector object and processes the number of bins.
|
|
133
|
+
"""
|
|
134
|
+
if self.name is None:
|
|
135
|
+
self.name = str(id(self))
|
|
136
|
+
|
|
137
|
+
def _convert_attr_to_SI(self) -> None:
|
|
138
|
+
# Convert all Quantity attributes to base SI units (without any prefixes)
|
|
139
|
+
for attr_name, attr_value in vars(self).items():
|
|
140
|
+
if isinstance(attr_value, Quantity):
|
|
141
|
+
setattr(self, attr_name, attr_value.to_base_units())
|
|
142
|
+
|
|
143
|
+
@property
|
|
144
|
+
def dataframe(self) -> pd.DataFrame:
|
|
145
|
+
return self.cytometer.dataframe.xs(self.name)
|
|
146
|
+
|
|
147
|
+
def get_initialized_signal(self, signal_digitizer: SignalDigitizer, run_time: Quantity) -> pd.DataFrame:
|
|
148
|
+
"""
|
|
149
|
+
Initializes the raw signal for each detector based on the source and flow cell configuration.
|
|
150
|
+
|
|
151
|
+
This method prepares the detectors for signal capture by associating each detector with the
|
|
152
|
+
light source and generating a time-dependent raw signal placeholder.
|
|
153
|
+
|
|
154
|
+
Effects
|
|
155
|
+
-------
|
|
156
|
+
Each detector's `raw_signal` attribute is initialized with time-dependent values
|
|
157
|
+
based on the flow cell's runtime.
|
|
158
|
+
|
|
159
|
+
"""
|
|
160
|
+
self.signal_digitizer = signal_digitizer
|
|
161
|
+
|
|
162
|
+
time_points = int(self.signal_digitizer.sampling_freq * run_time)
|
|
163
|
+
time = np.linspace(0, run_time, time_points)
|
|
164
|
+
|
|
165
|
+
return pd.DataFrame(
|
|
166
|
+
data=dict(
|
|
167
|
+
Time=pint_pandas.PintArray(time, dtype=units.second),
|
|
168
|
+
Signal=pint_pandas.PintArray(np.zeros_like(time), dtype=units.volt),
|
|
169
|
+
)
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
def _add_thermal_noise_to_raw_signal(self, signal: pd.Series) -> np.ndarray:
|
|
173
|
+
r"""
|
|
174
|
+
Generates thermal noise (Johnson-Nyquist noise) based on temperature, resistance, and bandwidth.
|
|
175
|
+
|
|
176
|
+
Thermal noise is caused by the thermal agitation of charge carriers. It is given by:
|
|
177
|
+
\[
|
|
178
|
+
\sigma_{\text{thermal}} = \sqrt{4 k_B T B R}
|
|
179
|
+
\]
|
|
180
|
+
Where:
|
|
181
|
+
- \( k_B \) is the Boltzmann constant (\(1.38 \times 10^{-23}\) J/K),
|
|
182
|
+
- \( T \) is the temperature in Kelvin,
|
|
183
|
+
- \( B \) is the bandwidth,
|
|
184
|
+
- \( R \) is the resistance.
|
|
185
|
+
|
|
186
|
+
Returns
|
|
187
|
+
-------
|
|
188
|
+
np.ndarray
|
|
189
|
+
An array of thermal noise values.
|
|
190
|
+
"""
|
|
191
|
+
if self.resistance.magnitude == 0 or self.temperature.magnitude == 0 or not NoiseSetting.include_thermal_noise or not NoiseSetting.include_noises:
|
|
192
|
+
return
|
|
193
|
+
|
|
194
|
+
noise_std = np.sqrt(
|
|
195
|
+
4 * PhysicalConstant.kb * self.temperature * self.resistance * self.signal_digitizer.bandwidth
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
thermal_noise = np.random.normal(0, noise_std.to(volt).magnitude, size=len(signal)) * volt
|
|
199
|
+
|
|
200
|
+
signal += thermal_noise
|
|
201
|
+
|
|
202
|
+
return thermal_noise
|
|
203
|
+
|
|
204
|
+
def _add_dark_current_noise_to_raw_signal(self, signal: pd.Series) -> np.ndarray:
|
|
205
|
+
r"""
|
|
206
|
+
Generates dark current noise (shot noise from dark current).
|
|
207
|
+
|
|
208
|
+
Dark current noise is a type of shot noise caused by the random generation of electrons in a detector,
|
|
209
|
+
even in the absence of light. It is given by:
|
|
210
|
+
\[
|
|
211
|
+
\sigma_{\text{dark current}} = \sqrt{2 e I_{\text{dark}} B}
|
|
212
|
+
\]
|
|
213
|
+
Where:
|
|
214
|
+
- \( e \) is the elementary charge,
|
|
215
|
+
- \( I_{\text{dark}} \) is the dark current,
|
|
216
|
+
- \( B \) is the bandwidth.
|
|
217
|
+
|
|
218
|
+
Returns
|
|
219
|
+
-------
|
|
220
|
+
np.ndarray
|
|
221
|
+
An array of dark current noise values.
|
|
222
|
+
"""
|
|
223
|
+
if self.dark_current.magnitude == 0 or not NoiseSetting.include_dark_current_noise or not NoiseSetting.include_noises:
|
|
224
|
+
return
|
|
225
|
+
|
|
226
|
+
dark_current_std = np.sqrt(
|
|
227
|
+
2 * 1.602176634e-19 * coulomb * self.dark_current * self.signal_digitizer.bandwidth
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
dark_current_noise = np.random.normal(0, dark_current_std.to(ampere).magnitude, size=len(signal)) * ampere
|
|
231
|
+
|
|
232
|
+
dark_voltage_noise = dark_current_noise * self.resistance
|
|
233
|
+
|
|
234
|
+
signal += dark_voltage_noise
|
|
235
|
+
|
|
236
|
+
return dark_voltage_noise
|
|
237
|
+
|
|
238
|
+
def _add_optical_power_to_raw_signal(self, signal: pd.Series, optical_power: Quantity, wavelength: Quantity) -> None:
|
|
239
|
+
r"""
|
|
240
|
+
Simulates photon shot noise based on the given optical power and detector bandwidth, and adds
|
|
241
|
+
the corresponding voltage noise to the raw signal.
|
|
242
|
+
|
|
243
|
+
Photon shot noise arises from the random and discrete arrival of photons at the detector. The noise
|
|
244
|
+
follows Poisson statistics. This method computes the photon shot noise and adds it to the raw signal.
|
|
245
|
+
|
|
246
|
+
Parameters
|
|
247
|
+
----------
|
|
248
|
+
optical_power : Quantity
|
|
249
|
+
The optical power incident on the detector, in watts (W).
|
|
250
|
+
|
|
251
|
+
Returns
|
|
252
|
+
-------
|
|
253
|
+
np.ndarray
|
|
254
|
+
An array representing the voltage noise due to photon shot noise, in volts (V).
|
|
255
|
+
|
|
256
|
+
Physics:
|
|
257
|
+
- The number of photons arriving at the detector \( N_{\text{ph}} \) is given by:
|
|
258
|
+
\[
|
|
259
|
+
N_{\text{ph}} = \frac{P_{\text{opt}}}{E_{\text{photon}}}
|
|
260
|
+
\]
|
|
261
|
+
where:
|
|
262
|
+
- \( P_{\text{opt}} \) is the optical power (W),
|
|
263
|
+
- \( E_{\text{photon}} = \frac{h \cdot c}{\lambda} \) is the energy of a photon (J),
|
|
264
|
+
- \( h \) is Planck's constant (\(6.626 \times 10^{-34}\, \text{J} \cdot \text{s}\)),
|
|
265
|
+
- \( c \) is the speed of light (\(3 \times 10^8 \, \text{m/s}\)),
|
|
266
|
+
- \( \lambda \) is the wavelength of the incident light.
|
|
267
|
+
|
|
268
|
+
- The photocurrent is computed as:
|
|
269
|
+
\[
|
|
270
|
+
I_{\text{photon}} = R_{\text{det}} \cdot N_{\text{photon}}
|
|
271
|
+
\]
|
|
272
|
+
where:
|
|
273
|
+
- \( R_{\text{det}} \) is the detector responsivity (A/W).
|
|
274
|
+
|
|
275
|
+
- The voltage shot noise is then given by:
|
|
276
|
+
\[
|
|
277
|
+
V_{\text{shot}} = I_{\text{photon}} \cdot R_{\text{load}}
|
|
278
|
+
\]
|
|
279
|
+
where:
|
|
280
|
+
- \( R_{\text{load}} \) is the load resistance of the detector (Ohms).
|
|
281
|
+
"""
|
|
282
|
+
if not NoiseSetting.include_shot_noise or not NoiseSetting.include_noises:
|
|
283
|
+
signal += optical_power * self.responsitivity * self.resistance
|
|
284
|
+
return None
|
|
285
|
+
|
|
286
|
+
else:
|
|
287
|
+
# Step 1: Compute photon energy
|
|
288
|
+
energy_photon = PhysicalConstant.h * PhysicalConstant.c / wavelength # Photon energy (J)
|
|
289
|
+
|
|
290
|
+
# Step 2: Compute mean photon count per sampling interval
|
|
291
|
+
photon_rate = optical_power / energy_photon # Photon rate (photons/s)
|
|
292
|
+
|
|
293
|
+
sampling_interval = 1 / self.signal_digitizer.sampling_freq # Sampling interval (s)
|
|
294
|
+
mean_photon_count = photon_rate * sampling_interval # Mean photons per sample
|
|
295
|
+
|
|
296
|
+
# Step 3: Simulate photon arrivals using Poisson statistics
|
|
297
|
+
photon_counts_distribution = np.random.poisson(mean_photon_count.to('').magnitude, size=len(signal))
|
|
298
|
+
|
|
299
|
+
# Step 4: Convert photon counts to photocurrent
|
|
300
|
+
photon_power_distribution = photon_counts_distribution * energy_photon * self.signal_digitizer.sampling_freq
|
|
301
|
+
|
|
302
|
+
photocurrent_distribution = self.responsitivity * photon_power_distribution # Current (A)
|
|
303
|
+
# Step 5: Convert photocurrent to shot noise voltage
|
|
304
|
+
shot_noise_voltage_distribution = photocurrent_distribution * self.resistance # Voltage (V)
|
|
305
|
+
|
|
306
|
+
# Step 6: Add shot noise voltage to the raw signal
|
|
307
|
+
signal += shot_noise_voltage_distribution
|
|
308
|
+
|
|
309
|
+
return shot_noise_voltage_distribution
|
|
310
|
+
|
|
311
|
+
def capture_signal(self, signal: pd.Series) -> None:
|
|
312
|
+
"""
|
|
313
|
+
Processes and captures the final signal by applying noise, baseline shifts, and saturation.
|
|
314
|
+
"""
|
|
315
|
+
if self.signal_digitizer.saturation_levels == 'auto':
|
|
316
|
+
min_level, max_level = signal.min(), signal.max()
|
|
317
|
+
elif isinstance(self.signal_digitizer.saturation_levels, tuple) and len(self.signal_digitizer.saturation_levels) == 2:
|
|
318
|
+
min_level, max_level = self.signal_digitizer.saturation_levels
|
|
319
|
+
else:
|
|
320
|
+
raise ValueError("saturation_levels must be 'auto' or a tuple of two Quantities.")
|
|
321
|
+
|
|
322
|
+
self._saturation_levels = min_level, max_level
|
|
323
|
+
# Generate bins for discretization
|
|
324
|
+
bins = np.linspace(
|
|
325
|
+
min_level.to(signal.pint.units).magnitude,
|
|
326
|
+
max_level.to(signal.pint.units).magnitude,
|
|
327
|
+
self.signal_digitizer._bit_depth
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
digitized_signal = np.digitize(signal.pint.magnitude, bins, right=True)
|
|
331
|
+
|
|
332
|
+
self.is_saturated = np.any((signal < min_level) | (signal > max_level))
|
|
333
|
+
|
|
334
|
+
# Throw a warning if saturated
|
|
335
|
+
if self.is_saturated:
|
|
336
|
+
logging.info("Signal values have been clipped to the saturation boundaries.")
|
|
337
|
+
|
|
338
|
+
return digitized_signal
|
|
339
|
+
|
|
340
|
+
@plot_helper
|
|
341
|
+
def plot_raw(
|
|
342
|
+
self,
|
|
343
|
+
ax: Optional[plt.Axes] = None,
|
|
344
|
+
time_unit: Optional[Union[str, Quantity]] = None,
|
|
345
|
+
signal_unit: Optional[Union[str, Quantity]] = None,
|
|
346
|
+
add_peak_locator: bool = False
|
|
347
|
+
) -> None:
|
|
348
|
+
"""
|
|
349
|
+
Visualizes the signal and optional components (peaks, raw signal) over time.
|
|
350
|
+
|
|
351
|
+
This method generates a customizable plot of the processed signal as a function of time.
|
|
352
|
+
Additional components like raw signals and detected peaks can also be overlaid.
|
|
353
|
+
|
|
354
|
+
Parameters
|
|
355
|
+
----------
|
|
356
|
+
ax : matplotlib.axes.Axes, optional
|
|
357
|
+
An existing Matplotlib Axes object to plot on. If None, a new Axes will be created.
|
|
358
|
+
time_unit : str or Quantity, optional
|
|
359
|
+
Desired unit for the time axis. If None, defaults to the most compact unit of the `Time` column.
|
|
360
|
+
signal_unit : str or Quantity, optional
|
|
361
|
+
Desired unit for the signal axis. If None, defaults to the most compact unit of the `Signal` column.
|
|
362
|
+
add_peak_locator : bool, optional
|
|
363
|
+
If True, adds the detected peaks (if available) to the plot. Default is False.
|
|
364
|
+
|
|
365
|
+
Returns
|
|
366
|
+
-------
|
|
367
|
+
tuple[Quantity, Quantity]
|
|
368
|
+
A tuple containing the units used for the time and signal axes, respectively.
|
|
369
|
+
|
|
370
|
+
Notes
|
|
371
|
+
-----
|
|
372
|
+
- The `Time` and `Signal` data are automatically converted to the specified units for consistency.
|
|
373
|
+
- If no `ax` is provided, a new figure and axis will be generated.
|
|
374
|
+
- Warnings are logged if peak locator data is unavailable when `add_peak_locator` is True.
|
|
375
|
+
"""
|
|
376
|
+
# Set default units if not provided
|
|
377
|
+
signal_unit = signal_unit or self.dataframe['Signal'].max().to_compact().units
|
|
378
|
+
time_unit = time_unit or self.dataframe['Time'].max().to_compact().units
|
|
379
|
+
|
|
380
|
+
x = self.dataframe['Time'].pint.to(time_unit)
|
|
381
|
+
|
|
382
|
+
ax.plot(x, self.dataframe['Signal'].pint.to(signal_unit), color='C1', linestyle='--', label=f'{self.name}: Raw', linewidth=1)
|
|
383
|
+
ax.legend(loc='upper right')
|
|
384
|
+
|
|
385
|
+
# Overlay peak locator positions, if requested
|
|
386
|
+
if add_peak_locator:
|
|
387
|
+
if not hasattr(self, 'algorithm'):
|
|
388
|
+
logging.warning("The detector does not have a peak locator algorithm. Peaks cannot be plotted.")
|
|
389
|
+
|
|
390
|
+
self.algorithm._add_to_ax(ax=ax, signal_unit=signal_unit, time_unit=time_unit)
|
|
391
|
+
|
|
392
|
+
# Customize labels
|
|
393
|
+
ax.set_xlabel(f"Time [{time_unit:P}]")
|
|
394
|
+
ax.set_ylabel(f"{self.name} [{signal_unit:P}]")
|
|
395
|
+
|
|
396
|
+
def set_peak_locator(self, algorithm: BasePeakLocator, compute_peak_area: bool = True) -> None:
|
|
397
|
+
"""
|
|
398
|
+
Assigns a peak detection algorithm to the detector, analyzes the signal,
|
|
399
|
+
and extracts peak features such as height, width, and area.
|
|
400
|
+
|
|
401
|
+
Parameters
|
|
402
|
+
----------
|
|
403
|
+
algorithm : BasePeakLocator
|
|
404
|
+
An instance of a peak detection algorithm derived from BasePeakLocator.
|
|
405
|
+
compute_peak_area : bool, optional
|
|
406
|
+
Whether to compute the area under the detected peaks (default is True).
|
|
407
|
+
|
|
408
|
+
Raises
|
|
409
|
+
------
|
|
410
|
+
TypeError
|
|
411
|
+
If the provided algorithm is not an instance of BasePeakLocator.
|
|
412
|
+
ValueError
|
|
413
|
+
If the algorithm has already been initialized with peak data.
|
|
414
|
+
RuntimeError
|
|
415
|
+
If the detector's signal data (dataframe) is not available.
|
|
416
|
+
|
|
417
|
+
Notes
|
|
418
|
+
-----
|
|
419
|
+
- The `algorithm` parameter should be a fresh instance of a peak detection algorithm.
|
|
420
|
+
- The method will analyze the detector's signal immediately upon setting the algorithm.
|
|
421
|
+
- Peak detection results are stored in the algorithm's `peak_properties` attribute.
|
|
422
|
+
"""
|
|
423
|
+
|
|
424
|
+
# Ensure the algorithm is an instance of BasePeakLocator
|
|
425
|
+
if not isinstance(algorithm, BasePeakLocator):
|
|
426
|
+
raise TypeError("The algorithm must be an instance of BasePeakLocator.")
|
|
427
|
+
|
|
428
|
+
# Ensure the detector has signal data available for analysis
|
|
429
|
+
if not hasattr(self, 'dataframe') or self.dataframe is None:
|
|
430
|
+
raise RuntimeError("The detector does not have signal data available for peak detection.")
|
|
431
|
+
|
|
432
|
+
# Set the algorithm and perform peak detection
|
|
433
|
+
self.algorithm = copy(algorithm)
|
|
434
|
+
self.algorithm.init_data(self.dataframe)
|
|
435
|
+
self.algorithm.detect_peaks(compute_area=compute_peak_area)
|
|
436
|
+
|
|
437
|
+
# Log the result of peak detection
|
|
438
|
+
peak_count = len(self.algorithm.peak_properties) if hasattr(self.algorithm, 'peak_properties') else 0
|
|
439
|
+
logging.info(f"Detector {self.name}: Detected {peak_count} peaks.")
|
FlowCyPy/directories.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
import FlowCyPy
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
'root_path',
|
|
10
|
+
'project_path',
|
|
11
|
+
'doc_path',
|
|
12
|
+
'doc_css_path',
|
|
13
|
+
'logo_path'
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
root_path = Path(FlowCyPy.__path__[0])
|
|
17
|
+
|
|
18
|
+
project_path = root_path.parents[0]
|
|
19
|
+
|
|
20
|
+
example_directory = root_path.joinpath('examples')
|
|
21
|
+
|
|
22
|
+
doc_path = project_path.joinpath('docs')
|
|
23
|
+
|
|
24
|
+
doc_css_path = doc_path.joinpath('source/_static/default.css')
|
|
25
|
+
|
|
26
|
+
logo_path = doc_path.joinpath('images/logo.png')
|
|
27
|
+
|
|
28
|
+
examples_path = root_path.joinpath('examples')
|
|
29
|
+
|
|
30
|
+
if __name__ == '__main__':
|
|
31
|
+
for path_name in __all__:
|
|
32
|
+
path = locals()[path_name]
|
|
33
|
+
print(path)
|
|
34
|
+
assert path.exists(), f"Path {path_name} do not exists"
|
|
35
|
+
|
|
36
|
+
# -
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from .normal import Normal
|
|
2
|
+
from .lognormal import LogNormal
|
|
3
|
+
from .uniform import Uniform
|
|
4
|
+
from .delta import Delta
|
|
5
|
+
from .weibull import Weibull
|
|
6
|
+
from .base_class import Base
|
|
7
|
+
from .particle_size_distribution import RosinRammler
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
Normal,
|
|
11
|
+
LogNormal,
|
|
12
|
+
Weibull,
|
|
13
|
+
Delta,
|
|
14
|
+
Uniform,
|
|
15
|
+
RosinRammler
|
|
16
|
+
]
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from typing import Optional, Tuple, Callable
|
|
3
|
+
import matplotlib.pyplot as plt
|
|
4
|
+
from MPSPlots.styles import mps
|
|
5
|
+
from FlowCyPy.units import particle, Quantity
|
|
6
|
+
|
|
7
|
+
config_dict = dict(
|
|
8
|
+
arbitrary_types_allowed=True,
|
|
9
|
+
kw_only=True,
|
|
10
|
+
slots=True,
|
|
11
|
+
extra='forbid'
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
class Base:
|
|
15
|
+
"""
|
|
16
|
+
Base class for distributions used to define particle sizes in the flow cytometer.
|
|
17
|
+
|
|
18
|
+
This class provides a structure for generating random scatterer sizes based on different statistical distributions.
|
|
19
|
+
Each subclass must implement the `generate` method to generate a distribution of sizes and `get_pdf` to compute the
|
|
20
|
+
probability density function (PDF) values.
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
scale_factor : float
|
|
25
|
+
A scaling factor applied to the PDF of the distribution. By default, it is set to 1 (equal weight).
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
scale_factor: Optional[float] = 1.0
|
|
29
|
+
|
|
30
|
+
def generate(self, n_samples: int) -> np.ndarray:
|
|
31
|
+
"""Generate a distribution of scatterer sizes."""
|
|
32
|
+
raise NotImplementedError("Must be implemented by subclasses")
|
|
33
|
+
|
|
34
|
+
def get_pdf(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
|
35
|
+
"""Compute the probability density function (PDF) values."""
|
|
36
|
+
raise NotImplementedError("Must be implemented by subclasses")
|
|
37
|
+
|
|
38
|
+
def plot(self, n_samples: int = 4000, bins: int = 50) -> None:
|
|
39
|
+
"""
|
|
40
|
+
Plots a histogram of the generated particle sizes based on the log-normal distribution.
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
n_samples : int, optional
|
|
45
|
+
The number of particle sizes to generate for the histogram (default is 1000).
|
|
46
|
+
bins : int, optional
|
|
47
|
+
The number of bins in the histogram (default is 50).
|
|
48
|
+
"""
|
|
49
|
+
samples = self.generate(Quantity(n_samples, particle))
|
|
50
|
+
|
|
51
|
+
# Plotting the PDF
|
|
52
|
+
with plt.style.context(mps): # Assuming mps is a custom style
|
|
53
|
+
figure, ax = plt.subplots(1, 1)
|
|
54
|
+
ax.hist(samples, bins=bins, color='blue', edgecolor='black', alpha=0.7)
|
|
55
|
+
|
|
56
|
+
ax.set(
|
|
57
|
+
title='Distribution',
|
|
58
|
+
xlabel=f'Distributed parameter [{self._units}]',
|
|
59
|
+
ylabel='Probability Density Function (PDF)'
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
plt.show()
|
|
63
|
+
|
|
64
|
+
def __str__(self) -> str:
|
|
65
|
+
return self.__repr__()
|
|
66
|
+
|
|
67
|
+
def pre_generate(function: Callable) -> Callable:
|
|
68
|
+
def wrapper(self, n_samples: Quantity):
|
|
69
|
+
|
|
70
|
+
# Validate inputs
|
|
71
|
+
if not isinstance(n_samples, Quantity) or not n_samples.check("particle"):
|
|
72
|
+
raise ValueError("n_sample must be a dimensionless Quantity.")
|
|
73
|
+
|
|
74
|
+
# if n_samples.magnitude < 2:
|
|
75
|
+
# raise ValueError("n_samples must be at least 2.")
|
|
76
|
+
|
|
77
|
+
return function(self=self, n_samples=n_samples.magnitude) * self._units
|
|
78
|
+
|
|
79
|
+
return wrapper
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
from FlowCyPy.distribution.base_class import Base, config_dict
|
|
2
|
+
import numpy as np
|
|
3
|
+
from typing import Tuple
|
|
4
|
+
from PyMieSim.units import Quantity
|
|
5
|
+
from pydantic.dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass(config=config_dict)
|
|
9
|
+
class Delta(Base):
|
|
10
|
+
r"""
|
|
11
|
+
Represents a delta Dirac distribution for particle sizes.
|
|
12
|
+
|
|
13
|
+
In a delta Dirac distribution, all particle sizes are the same, represented by the Dirac delta function:
|
|
14
|
+
|
|
15
|
+
.. math::
|
|
16
|
+
f(x) = \delta(x - x_0)
|
|
17
|
+
|
|
18
|
+
where:
|
|
19
|
+
- :math:`x_0` is the singular particle size.
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
position : Quantity
|
|
24
|
+
The particle size for the delta distribution in meters.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
position: Quantity
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def _units(self) -> Quantity:
|
|
31
|
+
return self.position.units
|
|
32
|
+
|
|
33
|
+
def __post_init__(self):
|
|
34
|
+
self._main_units = self.position.units
|
|
35
|
+
|
|
36
|
+
@Base.pre_generate
|
|
37
|
+
def generate(self, n_samples: int) -> Quantity:
|
|
38
|
+
r"""
|
|
39
|
+
Generates a singular distribution of scatterer sizes.
|
|
40
|
+
|
|
41
|
+
All sizes generated will be exactly the same as `position`.
|
|
42
|
+
|
|
43
|
+
Parameters
|
|
44
|
+
----------
|
|
45
|
+
n_samples : int
|
|
46
|
+
The number of particle sizes to generate.
|
|
47
|
+
|
|
48
|
+
Returns
|
|
49
|
+
-------
|
|
50
|
+
Quantity
|
|
51
|
+
An array of identical scatterer sizes in meters.
|
|
52
|
+
"""
|
|
53
|
+
return np.ones(n_samples) * self.position.magnitude
|
|
54
|
+
|
|
55
|
+
def _generate_default_x(self, x_min_factor: float = 0.9, x_max_factor: float = 1.1, n_samples: int = 100) -> Quantity:
|
|
56
|
+
"""
|
|
57
|
+
Generates a default range of x-values around the `position`.
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
x_min_factor : float, optional
|
|
62
|
+
Factor for the minimum x-value relative to the `position`. Default is 0.9 (90% of the position).
|
|
63
|
+
x_max_factor : float, optional
|
|
64
|
+
Factor for the maximum x-value relative to the `position`. Default is 1.1 (110% of the position).
|
|
65
|
+
n_samples : int, optional
|
|
66
|
+
Number of points in the generated range. Default is 100.
|
|
67
|
+
|
|
68
|
+
Returns
|
|
69
|
+
-------
|
|
70
|
+
Quantity
|
|
71
|
+
A range of x-values with appropriate units.
|
|
72
|
+
"""
|
|
73
|
+
if x_min_factor >= x_max_factor:
|
|
74
|
+
raise ValueError("x_min_factor must be less than x_max_factor.")
|
|
75
|
+
|
|
76
|
+
x_min = self.position.magnitude * x_min_factor
|
|
77
|
+
x_max = self.position.magnitude * x_max_factor
|
|
78
|
+
return np.linspace(x_min, x_max, n_samples) * self.position.units
|
|
79
|
+
|
|
80
|
+
def get_pdf(self, x_min_factor: float = 0.99, x_max_factor: float = 1.01, n_samples: int = 21) -> Tuple[Quantity, np.ndarray]:
|
|
81
|
+
r"""
|
|
82
|
+
Returns the x-values and the scaled PDF values for the singular distribution.
|
|
83
|
+
|
|
84
|
+
Returns
|
|
85
|
+
-------
|
|
86
|
+
Tuple[Quantity, np.ndarray]
|
|
87
|
+
The input x-values and the corresponding scaled PDF values.
|
|
88
|
+
"""
|
|
89
|
+
x = self._generate_default_x(
|
|
90
|
+
x_min_factor=x_min_factor,
|
|
91
|
+
x_max_factor=x_max_factor,
|
|
92
|
+
n_samples=n_samples
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
common_units = x.units
|
|
96
|
+
pdf = np.zeros_like(x.magnitude)
|
|
97
|
+
|
|
98
|
+
# Find the closest x-value to the delta position
|
|
99
|
+
idx = (np.abs(x.magnitude - self.position.to(common_units).magnitude)).argmin()
|
|
100
|
+
pdf[idx] = 1.0 # Dirac delta spike at the closest x-value
|
|
101
|
+
return x, pdf
|
|
102
|
+
|
|
103
|
+
def __repr__(self) -> str:
|
|
104
|
+
return f"Delta(position={self.position:.3f~P})"
|