AMS-BP 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. AMS_BP/__init__.py +13 -0
  2. AMS_BP/cells/__init__.py +5 -0
  3. AMS_BP/cells/base_cell.py +55 -0
  4. AMS_BP/cells/rectangular_cell.py +82 -0
  5. AMS_BP/cells/rod_cell.py +98 -0
  6. AMS_BP/cells/spherical_cell.py +74 -0
  7. AMS_BP/configio/__init__.py +0 -0
  8. AMS_BP/configio/configmodels.py +93 -0
  9. AMS_BP/configio/convertconfig.py +910 -0
  10. AMS_BP/configio/experiments.py +121 -0
  11. AMS_BP/configio/saving.py +32 -0
  12. AMS_BP/metadata/__init__.py +0 -0
  13. AMS_BP/metadata/metadata.py +87 -0
  14. AMS_BP/motion/__init__.py +4 -0
  15. AMS_BP/motion/condensate_movement.py +356 -0
  16. AMS_BP/motion/movement/__init__.py +10 -0
  17. AMS_BP/motion/movement/boundary_conditions.py +75 -0
  18. AMS_BP/motion/movement/fbm_BP.py +244 -0
  19. AMS_BP/motion/track_gen.py +541 -0
  20. AMS_BP/optics/__init__.py +0 -0
  21. AMS_BP/optics/camera/__init__.py +4 -0
  22. AMS_BP/optics/camera/detectors.py +320 -0
  23. AMS_BP/optics/camera/quantum_eff.py +66 -0
  24. AMS_BP/optics/filters/__init__.py +17 -0
  25. AMS_BP/optics/filters/channels/__init__.py +0 -0
  26. AMS_BP/optics/filters/channels/channelschema.py +27 -0
  27. AMS_BP/optics/filters/filters.py +184 -0
  28. AMS_BP/optics/lasers/__init__.py +28 -0
  29. AMS_BP/optics/lasers/laser_profiles.py +691 -0
  30. AMS_BP/optics/psf/__init__.py +7 -0
  31. AMS_BP/optics/psf/psf_engine.py +215 -0
  32. AMS_BP/photophysics/__init__.py +0 -0
  33. AMS_BP/photophysics/photon_physics.py +181 -0
  34. AMS_BP/photophysics/state_kinetics.py +146 -0
  35. AMS_BP/probabilityfuncs/__init__.py +0 -0
  36. AMS_BP/probabilityfuncs/markov_chain.py +143 -0
  37. AMS_BP/probabilityfuncs/probability_functions.py +350 -0
  38. AMS_BP/run_cell_simulation.py +217 -0
  39. AMS_BP/sample/__init__.py +0 -0
  40. AMS_BP/sample/flurophores/__init__.py +16 -0
  41. AMS_BP/sample/flurophores/flurophore_schema.py +290 -0
  42. AMS_BP/sample/sim_sampleplane.py +334 -0
  43. AMS_BP/sim_config.toml +418 -0
  44. AMS_BP/sim_microscopy.py +453 -0
  45. AMS_BP/utils/__init__.py +0 -0
  46. AMS_BP/utils/constants.py +11 -0
  47. AMS_BP/utils/decorators.py +227 -0
  48. AMS_BP/utils/errors.py +37 -0
  49. AMS_BP/utils/maskMaker.py +12 -0
  50. AMS_BP/utils/util_functions.py +319 -0
  51. ams_bp-0.0.2.dist-info/METADATA +173 -0
  52. ams_bp-0.0.2.dist-info/RECORD +55 -0
  53. ams_bp-0.0.2.dist-info/WHEEL +4 -0
  54. ams_bp-0.0.2.dist-info/entry_points.txt +2 -0
  55. ams_bp-0.0.2.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,121 @@
1
+ from dataclasses import dataclass
2
+ from typing import List, Optional, Tuple
3
+
4
+ import numpy as np
5
+
6
+ from ..metadata.metadata import MetaData
7
+ from ..sim_microscopy import VirtualMicroscope
8
+
9
+
10
+ @dataclass
11
+ class BaseExpConfig:
12
+ name: str
13
+ description: str
14
+
15
+
16
+ @dataclass
17
+ class TimeSeriesExpConfig(BaseExpConfig):
18
+ z_position: float
19
+ laser_names_active: List[str]
20
+ laser_powers_active: List[float]
21
+ laser_positions_active: List
22
+ xyoffset: Tuple[float, float]
23
+
24
+ exposure_time: Optional[int] = None
25
+ interval_time: Optional[int] = None
26
+ duration_time: Optional[int] = None
27
+
28
+ def __post_init__(self):
29
+ len_ln = len(self.laser_names_active)
30
+ len_lpow = len(self.laser_powers_active)
31
+ len_lpos = len(self.laser_positions_active)
32
+ if len_ln != len_lpos or len_ln != len_lpow:
33
+ raise ValueError(
34
+ f"Length mismatch among lists: "
35
+ f"laser_names_active({len_ln}), "
36
+ f"laser_powers_active({len_lpow}), "
37
+ f"laser_positions_active({len_lpos})"
38
+ )
39
+ if self.exposure_time or self.interval_time or self.duration_time:
40
+ raise ValueError(
41
+ "Please do not define exposure_time, interval_time, or duration_time in a time series experiment component. Use the GlobalParameters to set this."
42
+ )
43
+ self.laser_powers = {
44
+ self.laser_names_active[i]: self.laser_powers_active[i]
45
+ for i in range(len(self.laser_names_active))
46
+ }
47
+ self.laser_positions = {
48
+ self.laser_names_active[i]: self.laser_positions_active[i]
49
+ for i in range(len(self.laser_names_active))
50
+ }
51
+
52
+
53
+ @dataclass
54
+ class zStackExpConfig(BaseExpConfig):
55
+ z_position: List[float]
56
+ laser_names_active: List[str]
57
+ laser_powers_active: List[float]
58
+ laser_positions_active: List
59
+ xyoffset: Tuple[float, float]
60
+
61
+ exposure_time: int
62
+ interval_time: int
63
+
64
+ def __post_init__(self):
65
+ len_ln = len(self.laser_names_active)
66
+ len_lpow = len(self.laser_powers_active)
67
+ len_lpos = len(self.laser_positions_active)
68
+ if len_ln != len_lpos or len_ln != len_lpow:
69
+ raise ValueError(
70
+ f"Length mismatch among lists: "
71
+ f"laser_names_active({len_ln}), "
72
+ f"laser_powers_active({len_lpow}), "
73
+ f"laser_positions_active({len_lpos})"
74
+ )
75
+ self.laser_powers = {
76
+ self.laser_names_active[i]: self.laser_powers_active[i]
77
+ for i in range(len(self.laser_names_active))
78
+ }
79
+ self.laser_positions = {
80
+ self.laser_names_active[i]: self.laser_positions_active[i]
81
+ for i in range(len(self.laser_names_active))
82
+ }
83
+
84
+
85
+ def timeseriesEXP(
86
+ microscope: VirtualMicroscope,
87
+ config: TimeSeriesExpConfig,
88
+ ) -> Tuple[np.ndarray, MetaData]:
89
+ frames, metadata = microscope.run_sim(
90
+ z_val=config.z_position,
91
+ laser_power=config.laser_powers,
92
+ laser_position=config.laser_positions,
93
+ xyoffset=config.xyoffset,
94
+ duration_total=config.duration_time,
95
+ exposure_time=config.exposure_time,
96
+ interval_time=config.interval_time,
97
+ )
98
+ return np.array([frames]), metadata
99
+
100
+
101
+ def zseriesEXP(
102
+ microscope: VirtualMicroscope,
103
+ config: zStackExpConfig,
104
+ ) -> Tuple[np.ndarray, MetaData]:
105
+ frames = []
106
+ for i in config.z_position:
107
+ f, m = microscope.run_sim(
108
+ z_val=i,
109
+ laser_power=config.laser_powers,
110
+ laser_position=config.laser_positions,
111
+ xyoffset=config.xyoffset,
112
+ duration_total=config.exposure_time + config.interval_time,
113
+ exposure_time=config.exposure_time,
114
+ interval_time=config.interval_time,
115
+ )
116
+ frames.append(f)
117
+ # m.Channel = {"name": microscope.channels.names}
118
+ # m.TimeIncrementUnit = None
119
+ # m.TimeIncrement = None
120
+ metadata = m
121
+ return np.array(frames), metadata
@@ -0,0 +1,32 @@
1
+ import json
2
+ import os
3
+
4
+ import numpy as np
5
+ from tifffile import TiffWriter
6
+
7
+ from ..configio.configmodels import OutputParameters
8
+ from ..metadata.metadata import MetaData
9
+
10
+
11
+ def save_config_frames(
12
+ config: MetaData, frames: np.ndarray, outputparams: OutputParameters
13
+ ) -> None:
14
+ cd = outputparams.output_path
15
+ # make the directory if it does not exist
16
+ if not os.path.exists(cd):
17
+ os.makedirs(cd)
18
+
19
+ with TiffWriter(
20
+ os.path.join(cd, outputparams.output_name + ".ome" + ".tiff"), bigtiff=True
21
+ ) as f:
22
+ f.write(
23
+ frames,
24
+ metadata=config.model_dump(exclude={"notes"}),
25
+ )
26
+ # make json ster. from the MetaData
27
+ metadata_json = config.model_dump()
28
+
29
+ # save json
30
+ json_path = os.path.join(cd, "metadata.json")
31
+ with open(json_path, "w") as f:
32
+ json.dump(metadata_json, f)
File without changes
@@ -0,0 +1,87 @@
1
+ from typing import Iterator, List, Literal, Union
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class StringNode:
7
+ def __init__(self, value: Union[str, List["StringNode"]] = None) -> None:
8
+ """Initialize a node with either a string or a nested list of nodes."""
9
+ if isinstance(value, str):
10
+ self.value = value
11
+ self.children = None
12
+ elif isinstance(value, list):
13
+ self.value = None
14
+ self.children = value
15
+ elif value is None:
16
+ self.value = None
17
+ self.children = []
18
+ else:
19
+ raise ValueError("Value must be a string, a list of StringNode, or None.")
20
+
21
+ def is_leaf(self) -> bool:
22
+ """Check if this node is a leaf node (contains a string)."""
23
+ return self.value is not None
24
+
25
+ def add(self, child: "StringNode") -> None:
26
+ """Add a child node to this node."""
27
+ if self.is_leaf():
28
+ raise TypeError("Cannot add children to a leaf node.")
29
+ self.children.append(child)
30
+
31
+ def remove(self, child: "StringNode") -> None:
32
+ """Remove a child node."""
33
+ if self.is_leaf():
34
+ raise TypeError("Cannot remove children from a leaf node.")
35
+ self.children.remove(child)
36
+
37
+ def __repr__(self) -> str:
38
+ """Return a string representation of the node."""
39
+ if self.is_leaf():
40
+ return f"StringNode(value={self.value!r})"
41
+ return f"StringNode(children={self.children!r})"
42
+
43
+ def __iter__(self) -> Iterator["StringNode"]:
44
+ """Iterate over children if this is not a leaf node."""
45
+ if not self.is_leaf():
46
+ return iter(self.children)
47
+ raise TypeError("Leaf nodes are not iterable.")
48
+
49
+
50
+ def to_string_node(nested: Union[str, List]) -> StringNode:
51
+ """
52
+ Convert a nested list of strings into a StringNode structure.
53
+
54
+ Args:
55
+ nested: A string or a nested list of strings.
56
+
57
+ Returns:
58
+ A StringNode representing the nested structure.
59
+ """
60
+ if isinstance(nested, str):
61
+ # Base case: If it's a string, create a leaf node
62
+ return StringNode(nested)
63
+ elif isinstance(nested, list):
64
+ # Recursive case: If it's a list, create a parent node with children
65
+ children = [to_string_node(item) for item in nested]
66
+ return StringNode(children)
67
+ else:
68
+ raise ValueError("Input must be a string or a nested list of strings.")
69
+
70
+
71
+ class MetaData(BaseModel):
72
+ notes: StringNode | list | str
73
+ axes: str
74
+ TimeIncrement: float
75
+ TimeIncrementUnit: Literal["s", "ms"]
76
+ PhysicalSizeX: float
77
+ PhysicalSizeXUnit: Literal["nm", "m"]
78
+ PhysicalSizeY: float
79
+ PhysicalSizeYUnit: Literal["nm", "m"]
80
+ # Channel: Dict[Literal["Name"], List[str]]
81
+
82
+ def __post_init__(self):
83
+ if isinstance(self.notes, (list, str)):
84
+ self.notes = to_string_node(self.notes)
85
+
86
+ class Config:
87
+ arbitrary_types_allowed = True
@@ -0,0 +1,4 @@
1
+ from .condensate_movement import Condensate, create_condensate_dict
2
+ from .track_gen import Track_generator
3
+
4
+ __all__ = ["Condensate", "create_condensate_dict", "Track_generator"]
@@ -0,0 +1,356 @@
1
+ """
2
+ Contains class for storing condensate data. Condensates are defined as spherical always; defined by a
3
+ center (x,y,z), radius (r), and time (t). The complete description of the condensate at any time (t) is:
4
+ (x,y,z,r,t).
5
+
6
+ Usage:
7
+ ------
8
+ Initialize the class as follows:
9
+ condensate = Condensate(**{
10
+ "initial_position":np.array([0, 0, 0]),
11
+ "initial_time":0,
12
+ "diffusion_coefficient":0,
13
+ "hurst_exponent":0,
14
+ "units_time":'ms',
15
+ "units_position":'um',
16
+ "condensate_id":0,
17
+ "initial_scale":0,
18
+ })
19
+ Call the class object as follows to get the position and scale of the condensate at a given time:
20
+ condensate(times, time_unit) -> dict{"Position":np.ndarray, "Scale":float}
21
+ """
22
+
23
+ import matplotlib.pyplot as plt
24
+ import numpy as np
25
+
26
+ from ..cells.rectangular_cell import RectangularCell
27
+ from ..utils.decorators import cache
28
+ from .track_gen import Track_generator as sf
29
+
30
+
31
+ def create_condensate_dict(
32
+ initial_centers: np.ndarray,
33
+ initial_scale: np.ndarray,
34
+ diffusion_coefficient: np.ndarray,
35
+ hurst_exponent: np.ndarray,
36
+ cell: RectangularCell,
37
+ **kwargs,
38
+ ) -> dict:
39
+ """
40
+ Creates a dictionary of condensates for simulation.
41
+
42
+ Parameters:
43
+ -----------
44
+ initial_centers : np.ndarray
45
+ Array of shape (num_condensates, 2) representing the initial centers of the condensates.
46
+ initial_scale : np.ndarray
47
+ Array of shape (num_condensates, 2) representing the initial scales of the condensates.
48
+ diffusion_coefficient : np.ndarray
49
+ Array of shape (num_condensates, 2) representing the diffusion coefficients of the condensates.
50
+ hurst_exponent : np.ndarray
51
+ Array of shape (num_condensates, 2) representing the Hurst exponents of the condensates.
52
+ cell : RectangularCell
53
+ The rectangular cell that contains the condensates.
54
+ **kwargs : dict
55
+ Additional arguments passed to `Condensate` class.
56
+
57
+ Returns:
58
+ --------
59
+ dict
60
+ A dictionary of `Condensate` objects with keys as condensate IDs.
61
+ """
62
+ # check the length of diffusion_coefficient to find the number of condensates
63
+ num_condensates = len(diffusion_coefficient)
64
+ condensates = {}
65
+ units_time = kwargs.get("units_time", ["ms"] * num_condensates)
66
+ for i in range(num_condensates):
67
+ condensates[str(i)] = Condensate(
68
+ initial_position=initial_centers[i],
69
+ initial_scale=initial_scale[i],
70
+ diffusion_coefficient=diffusion_coefficient[i],
71
+ hurst_exponent=hurst_exponent[i],
72
+ condensate_id=int(str(i)),
73
+ units_time=units_time[i],
74
+ cell=cell,
75
+ )
76
+ return condensates
77
+
78
+
79
+ class Condensate:
80
+ """Condensate class for storing condensate data.
81
+
82
+ Parameters:
83
+ -----------
84
+ initial_position: np.ndarray = np.array([0, 0, 0])
85
+ Initial position of the condensate.
86
+ initial_time: float = 0
87
+ Initial time of the condensates.
88
+ diffusion_coefficient: float = 0
89
+ Diffusion coefficient of the condensate.
90
+ hurst_exponent: float = 0
91
+ Hurst exponent of the condensate.
92
+ units_time: str = 's'
93
+ Units of time. Units work as follows: in the class reference frame, start from 0 and iterate by 1 each time.
94
+ For a units_time of "ms", 1 represents 1ms.
95
+ For a units_time of "s", 1 represents 1s.
96
+ For a units_time of "20ms", 1 represents 20ms.
97
+ units_position: str = 'um'
98
+ Units of position.
99
+ condensate_id: int = 0
100
+ ID of the condensate.
101
+ initial_scale: float = 0
102
+ Initial scale of the condensate.
103
+ cell: RectangularCell = None
104
+ The rectangular cell that contains the condensates.
105
+
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ initial_position: np.ndarray = np.array([0, 0, 0]),
111
+ initial_time: int = 0,
112
+ diffusion_coefficient: float = 0, # same units as position and time
113
+ hurst_exponent: float = 0, # 0<hurst_exponent<1
114
+ units_time: str = "ms",
115
+ units_position: str = "um",
116
+ condensate_id: int = 0,
117
+ initial_scale: float = 0,
118
+ cell: RectangularCell = None,
119
+ ):
120
+ self.initial_position = (
121
+ np.array(initial_position)
122
+ if not isinstance(initial_position, np.ndarray)
123
+ else initial_position
124
+ )
125
+ self.initial_time = (
126
+ int(initial_time) if not isinstance(initial_time, int) else initial_time
127
+ )
128
+ self.diffusion_coefficient = (
129
+ np.array(diffusion_coefficient)
130
+ if not isinstance(diffusion_coefficient, np.ndarray)
131
+ else diffusion_coefficient
132
+ )
133
+ self.hurst_exponent = (
134
+ np.array(hurst_exponent)
135
+ if not isinstance(hurst_exponent, np.ndarray)
136
+ else hurst_exponent
137
+ )
138
+ self.units_time = units_time
139
+ self.units_position = units_position
140
+ self.condensate_id = condensate_id
141
+ self.initial_scale = initial_scale
142
+ if cell is None:
143
+ cell = RectangularCell(
144
+ origin=np.array([0, 0]), dimensions=np.array([0, 0, 0])
145
+ )
146
+ self.cell = cell
147
+ self.dim = self.initial_position.shape[0]
148
+
149
+ # initialize the properties of the condensate
150
+ self._initialize_properties()
151
+
152
+ def _initialize_properties(self) -> None:
153
+ """Initializes the properties of the condensate."""
154
+ self.times = np.array([self.initial_time])
155
+ self.condensate_positions = np.array([self.initial_position])
156
+ self.scale = np.array([self.initial_scale])
157
+
158
+ @property
159
+ def times(self) -> np.ndarray:
160
+ """Returns the times of the condensate."""
161
+ return self._times
162
+
163
+ @times.setter
164
+ def times(self, value) -> None:
165
+ # make sure this is a numpy array
166
+ if not isinstance(value, np.ndarray):
167
+ raise TypeError("Times must be a numpy array.")
168
+ self._times = value
169
+
170
+ @property
171
+ def condensate_positions(self) -> np.ndarray:
172
+ """Returns the positions of the condensate."""
173
+ # make sure this is a numpy array and that it is the same dimension as the initial position
174
+ return self._condensate_positions
175
+
176
+ @condensate_positions.setter
177
+ def condensate_positions(self, value) -> None:
178
+ if not isinstance(value, np.ndarray):
179
+ raise TypeError("Condensate positions must be a numpy array.")
180
+ if value.shape[1] != self.dim:
181
+ raise ValueError(
182
+ "Condensate positions must be the same dimension as the initial position."
183
+ )
184
+ self._condensate_positions = value
185
+
186
+ @property
187
+ def scale(self) -> np.ndarray:
188
+ """Returns the scale of the condensate."""
189
+ return self._scale
190
+
191
+ @scale.setter
192
+ def scale(self, value) -> None:
193
+ self._scale = value
194
+
195
+ def add_positions(
196
+ self, time: np.ndarray, position: np.ndarray, scale: np.ndarray
197
+ ) -> None:
198
+ """Adds positions to the condensate.
199
+
200
+ Parameters:
201
+ -----------
202
+ time: np.ndarray
203
+ Times at which to add positions.
204
+ position: np.ndarray
205
+ Positions to add to the condensate.
206
+ scale: np.ndarray
207
+ Scale to add to the condensate.
208
+ """
209
+ self.times = np.append(self.times, time)
210
+ self.condensate_positions = np.append(
211
+ self.condensate_positions, position, axis=0
212
+ )
213
+ self.scale = np.append(self.scale, scale)
214
+
215
+ @cache
216
+ def __call__(self, time: int, time_unit: str) -> dict:
217
+ """Returns the position and scale of the condensate at a given time.
218
+
219
+ Parameters:
220
+ -----------
221
+ time: float
222
+ Time at which to return the position of the condensate. User needs to convert to the reference frame of the condensate class.
223
+ time_unit: str
224
+ Units of time.
225
+ Just to make sure the user is aware of the conversion they need to do to get into the reference frame of the condensate class.
226
+
227
+ Returns:
228
+ --------
229
+ Dict of the position and scale of the condensate at the given time.
230
+ Keys:
231
+ Position: np.ndarray
232
+ Position of the condensate at the given time.
233
+ Scale: float
234
+ Scale of the condensate at the given time.
235
+ """
236
+ if time_unit != self.units_time:
237
+ # raise error that you need to ask for the time units in the condensates reference frame
238
+ raise ValueError("Time units do not match to the condensate.")
239
+ # check if the _condensate_positions exists
240
+ if not hasattr(self, "_condensate_positions"):
241
+ # if it doesn't then we need to generate the condensate positions
242
+ self.times = np.array([self.initial_time])
243
+ self.condensate_positions = np.array([self.initial_position])
244
+ self.scale = np.array([self.initial_scale])
245
+ # if the time larger than the last time in the condensate_positions then we need to generate more positions
246
+ if time > self.times[-1]:
247
+ self.generate_condensate_positions(time)
248
+
249
+ return {
250
+ "Position": self.condensate_positions[self.times == time][0],
251
+ "Scale": self.scale[self.times == time][0],
252
+ }
253
+
254
+ def generate_condensate_positions(self, time: int) -> None:
255
+ """Generates the condensate positions up to a given time.
256
+
257
+ Parameters:
258
+ -----------
259
+ time: int
260
+ Time up to which to generate the condensate positions.
261
+ """
262
+ # find the time difference
263
+ time_difference = time - self.times[-1]
264
+ # make a time array starting from the last time +1 and goin to the time inclusive
265
+ time_array = np.arange(self.times[-1] + 1, time + 1)
266
+ # Get cell bounds for track generator
267
+ min_bound, max_bound = self.cell.get_bounds()
268
+ cell_space = np.array(
269
+ [
270
+ [min_bound[0], max_bound[0]], # x bounds
271
+ [min_bound[1], max_bound[1]], # y bounds
272
+ ]
273
+ )
274
+ cell_axial_range = (max_bound[2] - min_bound[2]) / 2.0
275
+ track_generator = sf.Track_generator(
276
+ cell_space=cell_space,
277
+ cell_axial_range=cell_axial_range,
278
+ cycle_count=500,
279
+ exposure_time=20,
280
+ interval_time=0,
281
+ oversample_motion_time=20,
282
+ )
283
+ track = track_generator.track_generation_no_transition(
284
+ diffusion_coefficient=self.diffusion_coefficient,
285
+ hurst_exponent=self.hurst_exponent,
286
+ track_length=time_difference,
287
+ initials=self.condensate_positions[-1],
288
+ start_time=self.times[-1],
289
+ )
290
+ track_xyz = track["xy"][:]
291
+ # take all the x,y,z
292
+ track_xyz = track_xyz[:, :]
293
+ # get the scale for the time array and positions
294
+ scales = self.calculate_scale(time_array, track_xyz)
295
+ # add the positions to the condensate_positions
296
+ self.add_positions(time_array, track_xyz, scales)
297
+
298
+ def calculate_scale(self, time: np.ndarray, position: np.ndarray) -> np.ndarray:
299
+ """Calculates the scale of the condensate at a given time.
300
+
301
+ Parameters:
302
+ -----------
303
+ time: np.ndarray
304
+ Times at which to calculate the scale.
305
+ position: np.ndarray
306
+ Positions at which to calculate the scale.
307
+ """
308
+ # find the last scale in the scale array
309
+ last_scale = self.scale[-1]
310
+ # make array of length time with the last scale
311
+ scale = np.full(time.shape, last_scale)
312
+ return scale
313
+
314
+ def plot_condensate(self, ax, **kwargs):
315
+ """
316
+ Plots the condensate
317
+
318
+ Parameters:
319
+ -----------
320
+ ax: plt.Axes
321
+ Axes to plot the condensate on.
322
+ **kwargs:
323
+ Keyword arguments to pass to the plot function.
324
+ """
325
+ # check if the _condensate_positions exists
326
+ if not hasattr(self, "_condensate_positions"):
327
+ # if it doesn't then we need to generate the condensate positions
328
+ self.times = np.array([self.initial_time])
329
+ self.condensate_positions = np.array([self.initial_position])
330
+ self.scale = np.array([self.initial_scale])
331
+
332
+ # plot the condensate positions
333
+ ax.plot(
334
+ self.condensate_positions[:, 0], self.condensate_positions[:, 1], **kwargs
335
+ )
336
+
337
+ # plot a circle at all the positions with the scale as the radius
338
+ for i in range(len(self.condensate_positions)):
339
+ ax.add_patch(
340
+ plt.Circle(
341
+ self.condensate_positions[i], self.scale[i], color="r", fill=False
342
+ )
343
+ )
344
+
345
+ # plot the initial position in a different colour
346
+ ax.scatter(self.initial_position[0], self.initial_position[1], color="g")
347
+ # plot the final position in a different colour
348
+ ax.scatter(
349
+ self.condensate_positions[-1][0],
350
+ self.condensate_positions[-1][1],
351
+ color="b",
352
+ )
353
+ if "save_path" in kwargs:
354
+ plt.savefig(kwargs["save_path"])
355
+ # plt.show()
356
+ return ax
@@ -0,0 +1,10 @@
1
+ from .fbm_BP import FBM_BP
2
+ from .boundary_conditions import _refecting_boundary, _absorbing_boundary
3
+ from ...probabilityfuncs.markov_chain import MCMC_state_selection
4
+
5
+ __all__ = [
6
+ "FBM_BP",
7
+ "_refecting_boundary",
8
+ "_absorbing_boundary",
9
+ "MCMC_state_selection",
10
+ ]
@@ -0,0 +1,75 @@
1
+ import numpy as np
2
+
3
+ from ...utils.decorators import _catch_recursion_error
4
+
5
+ # Reflecting boundary condition which is a recursive function so that even if the first candidate
6
+ # is out of the space limit, the function will keep calling itself until the candidate is within the space limit
7
+
8
+
9
+ @_catch_recursion_error
10
+ def _refecting_boundary(
11
+ fbm_store_last: float, fbm_candidate: float, space_lim: np.ndarray
12
+ ) -> float:
13
+ """Reflecting boundary condition for the FBM 1D
14
+
15
+ Parameters:
16
+ -----------
17
+ fbm_store_last : float
18
+ Last value of the FBM
19
+ fbm_candidate : float
20
+ Candidate value of the FBM
21
+ space_lim : np.ndarray
22
+ Space limit (min, max) for the FBM
23
+
24
+ Returns:
25
+ --------
26
+ float
27
+ New value of the FBM
28
+ """
29
+ if fbm_candidate > space_lim[1]:
30
+ # if the candidate is greater than the space limit then reflect the difference back into the space limit
31
+ return _refecting_boundary(
32
+ fbm_store_last,
33
+ space_lim[1] - np.abs(fbm_candidate - space_lim[1]),
34
+ space_lim,
35
+ )
36
+ elif fbm_candidate < space_lim[0]:
37
+ # if the candidate is less than the negative space limit then reflect the difference back into the space limit
38
+ return _refecting_boundary(
39
+ fbm_store_last,
40
+ space_lim[0] + np.abs(fbm_candidate - space_lim[0]),
41
+ space_lim,
42
+ )
43
+ else:
44
+ return fbm_candidate
45
+
46
+
47
+ # Boundary condition where the step is set at the boundary limit if the candidate is out of the space limit
48
+
49
+
50
+ @_catch_recursion_error
51
+ def _absorbing_boundary(
52
+ fbm_store_last: float, fbm_candidate: float, space_lim: np.ndarray
53
+ ) -> float:
54
+ """Absorbing boundary condition for the FBM 1D
55
+
56
+ Parameters:
57
+ -----------
58
+ fbm_store_last : float
59
+ Last value of the FBM
60
+ fbm_candidate : float
61
+ Candidate value of the FBM
62
+ space_lim : np.ndarray
63
+ Space limit (min, max) for the FBM
64
+
65
+ Returns:
66
+ --------
67
+ float
68
+ New value of the FBM
69
+ """
70
+ if fbm_candidate > space_lim[1]:
71
+ return space_lim[1]
72
+ elif fbm_candidate < space_lim[0]:
73
+ return space_lim[0]
74
+ else:
75
+ return fbm_candidate