FlowCyPy 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. FlowCyPy/__init__.py +15 -0
  2. FlowCyPy/_version.py +16 -0
  3. FlowCyPy/classifier.py +196 -0
  4. FlowCyPy/coupling_mechanism/__init__.py +4 -0
  5. FlowCyPy/coupling_mechanism/empirical.py +47 -0
  6. FlowCyPy/coupling_mechanism/mie.py +205 -0
  7. FlowCyPy/coupling_mechanism/rayleigh.py +115 -0
  8. FlowCyPy/coupling_mechanism/uniform.py +39 -0
  9. FlowCyPy/cytometer.py +198 -0
  10. FlowCyPy/detector.py +616 -0
  11. FlowCyPy/directories.py +36 -0
  12. FlowCyPy/distribution/__init__.py +16 -0
  13. FlowCyPy/distribution/base_class.py +59 -0
  14. FlowCyPy/distribution/delta.py +86 -0
  15. FlowCyPy/distribution/lognormal.py +94 -0
  16. FlowCyPy/distribution/normal.py +95 -0
  17. FlowCyPy/distribution/particle_size_distribution.py +110 -0
  18. FlowCyPy/distribution/uniform.py +96 -0
  19. FlowCyPy/distribution/weibull.py +80 -0
  20. FlowCyPy/event_correlator.py +244 -0
  21. FlowCyPy/flow_cell.py +122 -0
  22. FlowCyPy/helper.py +85 -0
  23. FlowCyPy/logger.py +322 -0
  24. FlowCyPy/noises.py +29 -0
  25. FlowCyPy/particle_count.py +102 -0
  26. FlowCyPy/peak_locator/__init__.py +4 -0
  27. FlowCyPy/peak_locator/base_class.py +163 -0
  28. FlowCyPy/peak_locator/basic.py +108 -0
  29. FlowCyPy/peak_locator/derivative.py +143 -0
  30. FlowCyPy/peak_locator/moving_average.py +114 -0
  31. FlowCyPy/physical_constant.py +19 -0
  32. FlowCyPy/plottings.py +270 -0
  33. FlowCyPy/population.py +239 -0
  34. FlowCyPy/populations_instances.py +49 -0
  35. FlowCyPy/report.py +236 -0
  36. FlowCyPy/scatterer.py +373 -0
  37. FlowCyPy/source.py +249 -0
  38. FlowCyPy/units.py +26 -0
  39. FlowCyPy/utils.py +191 -0
  40. FlowCyPy-0.5.0.dist-info/LICENSE +21 -0
  41. FlowCyPy-0.5.0.dist-info/METADATA +252 -0
  42. FlowCyPy-0.5.0.dist-info/RECORD +44 -0
  43. FlowCyPy-0.5.0.dist-info/WHEEL +5 -0
  44. FlowCyPy-0.5.0.dist-info/top_level.txt +1 -0
FlowCyPy/__init__.py ADDED
@@ -0,0 +1,15 @@
1
+ try:
2
+ from ._version import version as __version__ # noqa: F401
3
+
4
+ except ImportError:
5
+ __version__ = "0.0.0"
6
+
7
+ from .units import ureg, watt, meter, second, liter, particle
8
+ from .cytometer import FlowCytometer
9
+ from .event_correlator import EventCorrelator
10
+ from .scatterer import Scatterer, CouplingModel
11
+ from .population import Population
12
+ from .detector import Detector
13
+ from .flow_cell import FlowCell
14
+ from .source import GaussianBeam
15
+ from .noises import NoiseSetting
FlowCyPy/_version.py ADDED
@@ -0,0 +1,16 @@
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ TYPE_CHECKING = False
4
+ if TYPE_CHECKING:
5
+ from typing import Tuple, Union
6
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
7
+ else:
8
+ VERSION_TUPLE = object
9
+
10
+ version: str
11
+ __version__: str
12
+ __version_tuple__: VERSION_TUPLE
13
+ version_tuple: VERSION_TUPLE
14
+
15
+ __version__ = version = '0.5.0'
16
+ __version_tuple__ = version_tuple = (0, 5, 0)
FlowCyPy/classifier.py ADDED
@@ -0,0 +1,196 @@
1
+ from sklearn.cluster import KMeans
2
+ from sklearn.cluster import DBSCAN
3
+ import pandas as pd
4
+ from typing import List, Dict, Tuple
5
+
6
+
7
+ class BaseClassifier:
8
+ def filter_dataframe(self, features: list, detectors: list = None) -> object:
9
+ """
10
+ Filter the DataFrame based on the selected features and detectors.
11
+
12
+ Parameters
13
+ ----------
14
+ features : list
15
+ List of features to use for filtering. Options include 'Heights', 'Widths', 'Areas'.
16
+ detectors : list, optional
17
+ List of detectors to use. If None, use all detectors.
18
+
19
+ Returns
20
+ -------
21
+ DataFrame
22
+ A filtered DataFrame containing only the selected detectors and features.
23
+
24
+ Raises
25
+ ------
26
+ ValueError
27
+ If no matching features are found for the given detectors and features.
28
+ """
29
+ # Determine detectors to use
30
+ if detectors is None:
31
+ detectors = self.dataframe.columns.get_level_values(0).unique().tolist()
32
+
33
+ # Build the list of selected columns
34
+ selected_features = [
35
+ (detector, feature) for detector in detectors for feature in features
36
+ if (detector, feature) in self.dataframe.columns
37
+ ]
38
+
39
+ if not selected_features:
40
+ raise ValueError("No matching features found for the given detectors and features.")
41
+
42
+ # Return the filtered DataFrame
43
+ return self.dataframe[selected_features]
44
+
45
+
46
+ class KmeansClassifier(BaseClassifier):
47
+ def __init__(self, dataframe: object) -> None:
48
+ """
49
+ Initialize the Classifier.
50
+
51
+ Parameters
52
+ ----------
53
+ dataframe : DataFrame
54
+ The input dataframe with multi-index columns.
55
+ """
56
+ self.dataframe = dataframe
57
+ self.dataframe['Label'] = 0 # Initialize labels as 0
58
+
59
+
60
+
61
+ def run(self, number_of_cluster: int, features: list = ['Heights'], detectors: list = None, random_state: int = 42) -> None:
62
+ """
63
+ Run KMeans clustering on the selected features and detectors.
64
+
65
+ Parameters
66
+ ----------
67
+ number_of_cluster : int
68
+ Number of clusters for KMeans.
69
+ features : list
70
+ List of features to use for clustering. Options include 'Heights', 'Widths', 'Areas'.
71
+ detectors : list, optional
72
+ List of detectors to use. If None, use all detectors.
73
+ random_state : int, optional
74
+ Random state for KMeans, by default 42.
75
+ """
76
+ # Filter the DataFrame
77
+ X = self.filter_dataframe(features=features, detectors=detectors)
78
+
79
+ # Ensure data is dequantified if it uses Pint quantities
80
+ if hasattr(X, 'pint'):
81
+ X = X.pint.dequantify()
82
+
83
+ # Run KMeans
84
+ kmeans = KMeans(n_clusters=number_of_cluster, random_state=random_state)
85
+ self.dataframe['Label'] = kmeans.fit_predict(X)
86
+
87
+ class DBScanClassifier(BaseClassifier):
88
+ def __init__(self, dataframe: object) -> None:
89
+ """
90
+ Initialize the DBScanClassifier.
91
+
92
+ Parameters
93
+ ----------
94
+ dataframe : DataFrame
95
+ The input dataframe with multi-index columns.
96
+ """
97
+ self.dataframe = dataframe
98
+ self.dataframe['Label'] = -1 # Initialize labels as -1 (noise for DBSCAN)
99
+
100
+ def run(self, eps: float = 0.5, min_samples: int = 5, features: list = ['Heights'], detectors: list = None) -> None:
101
+ """
102
+ Run DBSCAN clustering on the selected features and detectors.
103
+
104
+ Parameters
105
+ ----------
106
+ eps : float, optional
107
+ The maximum distance between two samples for them to be considered as in the same neighborhood, by default 0.5.
108
+ min_samples : int, optional
109
+ The number of samples in a neighborhood for a point to be considered a core point, by default 5.
110
+ features : list
111
+ List of features to use for clustering. Options include 'Heights', 'Widths', 'Areas'.
112
+ detectors : list, optional
113
+ List of detectors to use. If None, use all detectors.
114
+ """
115
+ # Filter the DataFrame
116
+ X = self.filter_dataframe(features=features, detectors=detectors)
117
+
118
+ # Ensure data is dequantified if it uses Pint quantities
119
+ if hasattr(X, 'pint'):
120
+ X = X.pint.dequantify()
121
+
122
+ # Handle missing values (if necessary)
123
+ X = X.fillna(0).to_numpy()
124
+
125
+ # Run DBSCAN
126
+ dbscan = DBSCAN(eps=eps, min_samples=min_samples)
127
+ self.dataframe['Label'] = dbscan.fit_predict(X)
128
+
129
+
130
+ class RangeClassifier:
131
+ """
132
+ A classifier for assigning population labels based on defined ranges.
133
+
134
+ Parameters
135
+ ----------
136
+ dataframe : pd.DataFrame
137
+ The input dataframe with features to classify.
138
+ feature : str
139
+ The column name of the feature to classify.
140
+
141
+ Attributes
142
+ ----------
143
+ dataframe : pd.DataFrame
144
+ The dataframe with an added 'Label' column.
145
+ ranges : List[Tuple[float, float, str]]
146
+ The list of ranges and their associated labels.
147
+ """
148
+
149
+ def __init__(self, dataframe: pd.DataFrame) -> None:
150
+ """
151
+ Initialize the classifier.
152
+
153
+ Parameters
154
+ ----------
155
+ dataframe : pd.DataFrame
156
+ The input dataframe with features to classify.
157
+ feature : str
158
+ The column name of the feature to classify.
159
+ """
160
+ self.dataframe = dataframe
161
+ self.ranges = [] # To store the ranges and their labels
162
+
163
+ def run(self, ranges: Dict[str, Tuple[float, float]]) -> None:
164
+ """
165
+ Classify the dataframe by assigning population labels based on specified ranges applied to the index.
166
+
167
+ Parameters
168
+ ----------
169
+ ranges : dict
170
+ A dictionary where keys are population names (labels) and values are tuples
171
+ specifying the (lower, upper) bounds of the range for that population.
172
+
173
+ Example
174
+ -------
175
+ >>> ranges = {
176
+ >>> 'Population 0': (0, 100),
177
+ >>> 'Population 1': (100, 150),
178
+ >>> 'Population 2': (150, 200)
179
+ >>> }
180
+ >>> classifier.run(ranges)
181
+ """
182
+ # Create conditions and corresponding labels
183
+ conditions = []
184
+ labels = []
185
+ for label, (lower, upper) in ranges.items():
186
+ conditions.append((self.dataframe.index >= lower) & (self.dataframe.index < upper))
187
+ labels.append(label)
188
+
189
+ # Use np.select to efficiently apply conditions
190
+ self.dataframe['Label'] = pd.Series(
191
+ pd.cut(self.dataframe.index,
192
+ bins=[float('-inf')] + [upper for _, (_, upper) in ranges.items()],
193
+ labels=list(ranges.keys()),
194
+ include_lowest=True),
195
+ index=self.dataframe.index)
196
+
@@ -0,0 +1,4 @@
1
+ from . import uniform
2
+ from . import rayleigh
3
+ from . import mie
4
+ from . import empirical
@@ -0,0 +1,47 @@
1
+ import numpy as np
2
+ from FlowCyPy import Scatterer, Detector
3
+ from FlowCyPy.source import BaseBeam
4
+ from FlowCyPy.units import watt, meter
5
+
6
+
7
+ def compute_detected_signal(source: BaseBeam, detector: Detector, scatterer: Scatterer, granularity: float = 1.0, A: float = 1.5, n: float = 2.0) -> float:
8
+ """
9
+ Empirical model for scattering intensity based on particle size, granularity, and detector angle.
10
+
11
+ This function models forward scatter (FSC) as proportional to the particle's size squared and
12
+ side scatter (SSC) as proportional to the granularity and modulated by angular dependence
13
+ (sin^n(theta)). Granularity is a dimensionless measure of the particle's internal complexity or
14
+ surface irregularities:
15
+
16
+ - A default value of 1.0 is used for moderate granularity (e.g., typical white blood cells).
17
+ - Granularity values < 1.0 represent smoother particles with less internal complexity (e.g., bacteria).
18
+ - Granularity values > 1.0 represent particles with higher internal complexity or surface irregularities (e.g., granulocytes).
19
+
20
+ Parameters
21
+ ----------
22
+ detector : Detector
23
+ The detector object containing theta_angle (in radians).
24
+ particle_size : float
25
+ The size of the particle (in meters).
26
+ granularity : float, optional
27
+ A measure of the particle's internal complexity or surface irregularities (dimensionless).
28
+ Default is 1.0.
29
+ A : float, optional
30
+ Empirical scaling factor for angular dependence. Default is 1.5.
31
+ n : float, optional
32
+ Power of sine function for angular dependence. Default is 2.0.
33
+
34
+ Returns
35
+ -------
36
+ float
37
+ The detected scattering intensity for the given particle and detector.
38
+ """
39
+ size_list = scatterer.dataframe['Size'].pint.to(meter).values.numpy_data
40
+
41
+ # Forward scatter is proportional to size^2
42
+ fsc_intensity = size_list**2
43
+
44
+ # Side scatter is proportional to granularity and modulated by angular dependence
45
+ ssc_intensity = granularity * (1 + A * np.sin(np.radians(detector.phi_angle))**n) * np.ones_like(size_list)
46
+
47
+ return fsc_intensity * watt if detector.phi_angle < np.radians(10) else ssc_intensity * watt
@@ -0,0 +1,205 @@
1
+ import numpy as np
2
+ from FlowCyPy import Scatterer, Detector
3
+ from FlowCyPy.source import BaseBeam
4
+ from PyMieSim.experiment.scatterer import Sphere as PMS_SPHERE
5
+ from PyMieSim.experiment.source import PlaneWave
6
+ from PyMieSim.experiment.detector import Photodiode as PMS_PHOTODIODE
7
+ from PyMieSim.experiment import Setup
8
+ from PyMieSim.units import degree, watt, AU, hertz
9
+ from FlowCyPy.noises import NoiseSetting
10
+
11
+
12
+ def apply_rin_noise(source: BaseBeam, total_size: int, bandwidth: float) -> np.ndarray:
13
+ r"""
14
+ Applies Relative Intensity Noise (RIN) to the source amplitude if enabled, accounting for detection bandwidth.
15
+
16
+ Parameters
17
+ ----------
18
+ source : BaseBeam
19
+ The light source containing amplitude and RIN information.
20
+ total_size : int
21
+ The number of particles being simulated.
22
+ bandwidth : float
23
+ The detection bandwidth in Hz.
24
+
25
+ Returns
26
+ -------
27
+ np.ndarray
28
+ Array of amplitudes with RIN noise applied.
29
+
30
+ Equations
31
+ ---------
32
+ 1. Relative Intensity Noise (RIN):
33
+ RIN quantifies the fluctuations in the laser's intensity relative to its mean intensity.
34
+ RIN is typically specified as a power spectral density (PSD) in units of dB/Hz:
35
+ \[
36
+ \text{RIN (dB/Hz)} = 10 \cdot \log_{10}\left(\frac{\text{Noise Power (per Hz)}}{\text{Mean Power}}\right)
37
+ \]
38
+
39
+ 2. Conversion from dB/Hz to Linear Scale:
40
+ To compute noise power, RIN must be converted from dB to a linear scale:
41
+ \[
42
+ \text{RIN (linear)} = 10^{\text{RIN (dB/Hz)} / 10}
43
+ \]
44
+
45
+ 3. Total Noise Power:
46
+ The total noise power depends on the bandwidth (\(B\)) of the detection system:
47
+ \[
48
+ P_{\text{noise}} = \text{RIN (linear)} \cdot B
49
+ \]
50
+
51
+ 4. Standard Deviation of Amplitude Fluctuations:
52
+ The noise standard deviation for amplitude is derived from the total noise power:
53
+ \[
54
+ \sigma_{\text{amplitude}} = \sqrt{P_{\text{noise}}} \cdot \text{Amplitude}
55
+ \]
56
+ Substituting \(P_{\text{noise}}\), we get:
57
+ \[
58
+ \sigma_{\text{amplitude}} = \sqrt{\text{RIN (linear)} \cdot B} \cdot \text{Amplitude}
59
+ \]
60
+
61
+ Implementation
62
+ --------------
63
+ - The RIN value from the source is converted to linear scale using:
64
+ \[
65
+ \text{RIN (linear)} = 10^{\text{source.RIN} / 10}
66
+ \]
67
+ - The noise standard deviation is scaled by the detection bandwidth (\(B\)) in Hz:
68
+ \[
69
+ \sigma_{\text{amplitude}} = \sqrt{\text{RIN (linear)} \cdot B} \cdot \text{source.amplitude}
70
+ \]
71
+ - Gaussian noise with mean \(0\) and standard deviation \(\sigma_{\text{amplitude}}\) is applied to the source amplitude.
72
+
73
+ Notes
74
+ -----
75
+ - The bandwidth parameter (\(B\)) must be in Hz and reflects the frequency range of the detection system.
76
+ - The function assumes that RIN is specified in dB/Hz. If RIN is already in linear scale, the conversion step can be skipped.
77
+ """
78
+ amplitude_with_rin = np.ones(total_size) * source.amplitude
79
+
80
+ if NoiseSetting.include_RIN_noise and NoiseSetting.include_noises:
81
+ # Convert RIN from dB/Hz to linear scale if necessary
82
+ rin_linear = 10**(source.RIN / 10)
83
+
84
+ # Compute noise standard deviation, scaled by bandwidth
85
+ std_dev_amplitude = np.sqrt(rin_linear * bandwidth.to(hertz).magnitude) * source.amplitude
86
+
87
+ # Apply Gaussian noise to the amplitude
88
+ amplitude_with_rin += np.random.normal(
89
+ loc=0,
90
+ scale=std_dev_amplitude.to(source.amplitude.units).magnitude,
91
+ size=total_size
92
+ ) * source.amplitude.units
93
+
94
+ return amplitude_with_rin
95
+
96
+
97
+ def initialize_scatterer(scatterer: Scatterer, source: PlaneWave) -> PMS_SPHERE:
98
+ """
99
+ Initializes the scatterer object for the PyMieSim experiment.
100
+
101
+ Parameters
102
+ ----------
103
+ scatterer : Scatterer
104
+ The scatterer object containing particle data.
105
+ source : PlaneWave
106
+ The light source for the simulation.
107
+
108
+ Returns
109
+ -------
110
+ PMS_SPHERE
111
+ Initialized scatterer for the experiment.
112
+ """
113
+ size_list = scatterer.dataframe['Size'].values
114
+ ri_list = scatterer.dataframe['RefractiveIndex'].values
115
+
116
+ if len(size_list) == 0:
117
+ raise ValueError("Scatterer size list is empty.")
118
+
119
+ size_list = size_list.quantity.magnitude * size_list.units
120
+ ri_list = ri_list.quantity.magnitude * ri_list.units
121
+
122
+ return PMS_SPHERE(
123
+ diameter=size_list,
124
+ property=ri_list,
125
+ medium_property=np.ones(len(size_list)) * scatterer.medium_refractive_index,
126
+ source=source
127
+ )
128
+
129
+
130
+ def initialize_detector(detector: Detector, total_size: int) -> PMS_PHOTODIODE:
131
+ """
132
+ Initializes the detector object for the PyMieSim experiment.
133
+
134
+ Parameters
135
+ ----------
136
+ detector : Detector
137
+ The detector object containing configuration data.
138
+ total_size : int
139
+ The number of particles being simulated.
140
+
141
+ Returns
142
+ -------
143
+ PMS_PHOTODIODE
144
+ Initialized detector for the experiment.
145
+ """
146
+ ONES = np.ones(total_size)
147
+
148
+ return PMS_PHOTODIODE(
149
+ NA=ONES * detector.numerical_aperture,
150
+ cache_NA=ONES * 0 * AU,
151
+ gamma_offset=ONES * detector.gamma_angle,
152
+ phi_offset=ONES * detector.phi_angle,
153
+ polarization_filter=ONES * np.nan * degree,
154
+ sampling=ONES * detector.sampling
155
+ )
156
+
157
+
158
+ def compute_detected_signal(source: BaseBeam, detector: Detector, scatterer: Scatterer, tolerance: float = 1e-5) -> np.ndarray:
159
+ """
160
+ Computes the detected signal by analyzing the scattering properties of particles.
161
+
162
+ Parameters
163
+ ----------
164
+ source : BaseBeam
165
+ The light source object containing wavelength, power, and other optical properties.
166
+ detector : Detector
167
+ The detector object containing properties such as numerical aperture and angles.
168
+ scatterer : Scatterer
169
+ The scatterer object containing particle size and refractive index data.
170
+ tolerance : float, optional
171
+ The tolerance for deciding if two values of size and refractive index are "close enough" to be cached.
172
+
173
+ Returns
174
+ -------
175
+ np.ndarray
176
+ Array of coupling values for each particle, based on the detected signal.
177
+ """
178
+ size_list = scatterer.dataframe['Size'].values
179
+
180
+ if len(size_list) == 0:
181
+ return np.array([]) * watt
182
+
183
+ total_size = len(size_list)
184
+ amplitude_with_rin = apply_rin_noise(source, total_size, detector.bandwidth)
185
+
186
+ pms_source = PlaneWave(
187
+ wavelength=np.ones(total_size) * source.wavelength,
188
+ polarization=np.ones(total_size) * 0 * degree,
189
+ amplitude=amplitude_with_rin
190
+ )
191
+
192
+ pms_scatterer = initialize_scatterer(scatterer, pms_source)
193
+ pms_detector = initialize_detector(detector, total_size)
194
+
195
+ # Configure the detector
196
+ pms_detector.mode_number = ['NC00'] * total_size
197
+ pms_detector.rotation = np.ones(total_size) * 0 * degree
198
+ pms_detector.__post_init__()
199
+
200
+ # Set up the experiment
201
+ experiment = Setup(source=pms_source, scatterer=pms_scatterer, detector=pms_detector)
202
+
203
+ # Compute coupling values
204
+ coupling_value = experiment.get_sequential('coupling').squeeze()
205
+ return np.atleast_1d(coupling_value) * watt
@@ -0,0 +1,115 @@
1
+
2
+ import numpy as np
3
+ from FlowCyPy import Scatterer, Detector
4
+ from FlowCyPy.source import BaseBeam
5
+ from FlowCyPy.units import meter
6
+
7
+
8
+ def compute_scattering_cross_section(scatterer: Scatterer, source: BaseBeam, detector: Detector) -> np.ndarray:
9
+ r"""
10
+ Computes the Rayleigh scattering cross-section for a spherical particle with angle dependency.
11
+
12
+ The Rayleigh scattering cross-section depends on the angle at which the scattered light is observed.
13
+ The total scattering cross-section is modified by a factor of :math:`\sin^2(\theta)`, where :math:`\theta`
14
+ is the angle between the direction of the incident light and the scattered light, as observed by the detector.
15
+
16
+ The Rayleigh scattering cross-section is given by the formula:
17
+
18
+ .. math::
19
+ \sigma_s(\theta) = \sigma_0 \sin^2(\theta)
20
+
21
+ Where:
22
+ - :math:`\sigma_s(\theta)` is the scattering cross-section at angle :math:`\theta` (in m²),
23
+ - :math:`\sigma_0 = \frac{8 \pi}{3} \left( \frac{2 \pi}{\lambda} \right)^4 \left( \frac{n^2 - 1}{n^2 + 2} \right)^2 r^6` is the total Rayleigh scattering cross-section (in m²),
24
+ - :math:`r` is the radius of the particle (in m),
25
+ - :math:`\lambda` is the wavelength of the incident light (in m),
26
+ - :math:`n` is the refractive index of the scatterer (dimensionless),
27
+ - :math:`\theta` is the angle of observation (in radians).
28
+
29
+ Parameters
30
+ ----------
31
+ scatterer : Scatterer
32
+ An instance of `Scatterer` containing the scatterer properties such as size and refractive index.
33
+ source : BaseBeam
34
+ An instance of `BaseBeam` containing the laser properties, including the wavelength.
35
+ detector : Detector
36
+ An instance of `Detector` that contains the angle of observation (`theta_angle` in radians).
37
+
38
+ Returns
39
+ -------
40
+ np.ndarray
41
+ The angle-dependent Rayleigh scattering cross-section (in square meters, m²).
42
+ """
43
+
44
+ size_list = scatterer.dataframe['Size'].pint.to(meter).values.numpy_data
45
+ ri_list = scatterer.dataframe['RefractiveIndex'].values.numpy_data
46
+
47
+ # Extract properties
48
+ wavelength = source.wavelength
49
+ phi = detector.phi_angle # Angle of observation in radians
50
+
51
+ # Rayleigh scattering cross-section formula components
52
+ factor_0 = 8 * np.pi / 3
53
+ factor_1 = (2 * np.pi / wavelength) ** 4
54
+
55
+ factor_2 = ((ri_list ** 2 - 1) / (ri_list ** 2 + 2)) ** 2
56
+
57
+ # Compute the total Rayleigh scattering cross-section (assuming size in meters)
58
+ sigma_0 = factor_0 * factor_1 * factor_2 * size_list ** 6
59
+
60
+ # Modify by the angular dependency: sin^2(theta)
61
+ cross_section = sigma_0 * np.sin(phi * np.pi / 180) ** 2
62
+
63
+ return cross_section.magnitude * meter**2
64
+
65
+
66
+ def compute_detected_signal(source: BaseBeam, detector: Detector, scatterer: Scatterer) -> float:
67
+ r"""
68
+ Computes the power detected by a detector from a Rayleigh scattering event.
69
+
70
+ The power scattered by a particle is proportional to the power density of the incident light
71
+ and the scattering cross-section of the particle:
72
+
73
+ .. math::
74
+ P_s = I_{\text{incident}} \cdot \sigma_s
75
+
76
+ The power detected by the detector depends on the solid angle subtended by the detector
77
+ and the total solid angle over which the power is scattered (assumed to be \( 4\pi \) steradians):
78
+
79
+ .. math::
80
+ P_{\text{detected}} = P_s \cdot \frac{\Omega}{4 \pi} \cdot \eta
81
+
82
+ Where:
83
+ - :math:`P_{\text{detected}}` is the power detected by the detector (in watts, W).
84
+ - :math:`\Omega` is the solid angle subtended by the detector (in steradians, sr).
85
+ - :math:`\eta` is the detector efficiency (dimensionless, between 0 and 1).
86
+
87
+ Parameters
88
+ ----------
89
+ source : BaseBeam
90
+ An instance of `BaseBeam` containing the laser properties, including the optical power and numerical aperture.
91
+ detector : Detector
92
+ An instance of `Detector` containing the detector properties, including numerical aperture and responsitivity.
93
+
94
+ Returns
95
+ -------
96
+ float
97
+ The power detected by the detector (in watts, W).
98
+ """
99
+ scattering_cross_section = compute_scattering_cross_section(
100
+ source=source,
101
+ scatterer=scatterer,
102
+ detector=detector
103
+ )
104
+
105
+ # Calculate incident power density at the beam waist
106
+ incident_power_density = (2 * source.optical_power) / (np.pi * source.waist ** 2)
107
+
108
+ # Calculate the scattered power using the scattering cross-section
109
+ scattered_power = incident_power_density * scattering_cross_section
110
+
111
+ # Detector captures a portion of scattered power proportional to its solid angle over 4π steradians
112
+ solid_angle = detector.numerical_aperture ** 2
113
+ detected_power = scattered_power * (solid_angle / (4 * np.pi))
114
+
115
+ return detected_power
@@ -0,0 +1,39 @@
1
+ import numpy as np
2
+ from FlowCyPy import Scatterer, Detector, ureg
3
+ from FlowCyPy.source import BaseBeam
4
+
5
+
6
+ def compute_detected_signal(source: BaseBeam, detector: Detector, scatterer: Scatterer) -> np.ndarray:
7
+ r"""
8
+ Computes the power detected by a detector from a uniform distribution.
9
+
10
+ The power scattered by a particle is proportional to the power density of the incident light
11
+ and the scattering cross-section of the particle:
12
+
13
+ .. math::
14
+ P_s = I_{\text{incident}} \cdot \sigma_s
15
+
16
+ The power detected by the detector depends on the solid angle subtended by the detector
17
+ and the total solid angle over which the power is scattered (assumed to be \( 4\pi \) steradians):
18
+
19
+ .. math::
20
+ P_{\text{detected}} = P_s \cdot \frac{\Omega}{4 \pi} \cdot \eta
21
+
22
+ Where:
23
+ - :math:`P_{\text{detected}}` is the power detected by the detector (in watts, W).
24
+ - :math:`\Omega` is the solid angle subtended by the detector (in steradians, sr).
25
+ - :math:`\eta` is the detector efficiency (dimensionless, between 0 and 1).
26
+
27
+ Parameters
28
+ ----------
29
+ source : BaseBeam
30
+ An instance of `BaseBeam` containing the laser properties, including the optical power and numerical aperture.
31
+ detector : Detector
32
+ An instance of `Detector` containing the detector properties, including numerical aperture and responsitivity.
33
+
34
+ Returns
35
+ -------
36
+ float
37
+ The power detected by the detector (in watts, W).
38
+ """
39
+ return np.ones(len(scatterer.dataframe)) * ureg.watt