simcats-datasets 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,110 @@
1
+ """Helper functions for converting lines into different representations
2
+
3
+ @author: f.fuchs
4
+ """
5
+
6
+ from copy import deepcopy
7
+ from typing import List, Union
8
+
9
+ import numpy as np
10
+
11
+
12
+
13
+ def lines_voltage_to_pixel_space(lines: Union[List[np.ndarray], np.ndarray],
14
+ voltage_range_x: np.ndarray,
15
+ voltage_range_y: np.ndarray,
16
+ image_width: int,
17
+ image_height: int,
18
+ round_to_int: bool = False, ) -> np.ndarray:
19
+ """Convert lines from voltage space to image/pixel space.
20
+ This method makes a deepcopy of the supplied lines. Therefore, the original input won't be modified.
21
+
22
+ Args:
23
+ lines: Array or list of lines to convert, shape: (n, 4). \n
24
+ Example: \n
25
+ [[x_start, y_start, x_stop, y_stop], ...]
26
+ voltage_range_x: Voltage range in x direction.
27
+ voltage_range_y: Voltage range in y direction.
28
+ image_width: Width of the image/pixel space.
29
+ image_height: Height of the image/pixel space.
30
+ round_to_int: Toggles if the lines are returned as floats (False) or are rounded and then returned as integers
31
+ (True). Defaults to false.
32
+
33
+ Returns:
34
+ Array with rows containing the converted lines.
35
+ """
36
+ pixel_space = deepcopy(np.array(lines))
37
+ for _i, line in enumerate(pixel_space):
38
+ # change x coordinates of the line
39
+ line[0] = (
40
+ (image_width - 1) * (line[0] - voltage_range_x.min()) / (voltage_range_x.max() - voltage_range_x.min()))
41
+ line[2] = (
42
+ (image_width - 1) * (line[2] - voltage_range_x.min()) / (voltage_range_x.max() - voltage_range_x.min()))
43
+ # change y coordinates of the line
44
+ line[1] = ((image_height - 1) * (line[1] - voltage_range_y.min()) / (
45
+ voltage_range_y.max() - voltage_range_y.min()))
46
+ line[3] = ((image_height - 1) * (line[3] - voltage_range_y.min()) / (
47
+ voltage_range_y.max() - voltage_range_y.min()))
48
+ if round_to_int:
49
+ return np.array(pixel_space).round(decimals=0).astype(int)
50
+ else:
51
+ return pixel_space
52
+
53
+
54
+ def lines_pixel_to_voltage_space(lines: Union[List[np.ndarray], np.ndarray],
55
+ voltage_range_x: np.ndarray,
56
+ voltage_range_y: np.ndarray,
57
+ image_width: int,
58
+ image_height: int, ) -> np.ndarray:
59
+ """Convert lines from image/pixel space to voltage space.
60
+ This method makes a deepcopy of the supplied lines. Therefore, the original input won't be modified.
61
+
62
+ Args:
63
+ lines: Array or list of lines to convert, shape: (n, 4). \n
64
+ Example: \n
65
+ [[x_start, y_start, x_stop, y_stop], ...]
66
+ voltage_range_x: Voltage range in x direction.
67
+ voltage_range_y: Voltage range in y direction.
68
+ image_width: Width of the image/pixel space.
69
+ image_height: Height of the image/pixel space.
70
+
71
+ Returns:
72
+ Array with rows containing the converted lines.
73
+ """
74
+ voltage_space = deepcopy(np.array(lines)).astype(np.float32)
75
+ for _i, line in enumerate(voltage_space):
76
+ # change x coordinates of the line
77
+ line[0] = (line[0] / (image_width - 1)) * (voltage_range_x[1] - voltage_range_x[0]) + voltage_range_x[0]
78
+ line[2] = (line[2] / (image_width - 1)) * (voltage_range_x[1] - voltage_range_x[0]) + voltage_range_x[0]
79
+ # change y coordinates of the line
80
+ line[1] = (line[1] / (image_height - 1)) * (voltage_range_y[1] - voltage_range_y[0]) + voltage_range_y[0]
81
+ line[3] = (line[3] / (image_height - 1)) * (voltage_range_y[1] - voltage_range_y[0]) + voltage_range_y[0]
82
+ return voltage_space
83
+
84
+
85
+ def lines_convert_two_coordinates_to_coordinate_plus_change(lines: Union[List[np.ndarray], np.ndarray]) -> np.ndarray:
86
+ """Change the format from x,y,x,y to x,y,dx,dy.
87
+ Order: top point > bottom point and if same y coordinate, right point > left point.
88
+
89
+ Args:
90
+ lines: Array or list of lines to convert, shape: (n, 4). \n
91
+ Example: \n
92
+ [[x_start, y_start, x_stop, y_stop], ...]
93
+
94
+ Returns:
95
+ Array with rows of lines in x,y,dx,dy format.
96
+ """
97
+ new_lines_pairs = []
98
+ for line in lines:
99
+ p1 = line[0], line[1]
100
+ p2 = line[2], line[3]
101
+ if p1[0] < p2[0]:
102
+ new_lines_pairs.append([p1[0], p1[1], p2[0] - p1[0], p2[1] - p1[1]])
103
+ elif p1[0] > p2[0]:
104
+ new_lines_pairs.append([p2[0], p2[1], p1[0] - p2[0], p1[1] - p2[1]])
105
+ else:
106
+ if p1[1] < p2[1]:
107
+ new_lines_pairs.append([p1[0], p1[1], p2[0] - p1[0], p2[1] - p1[1]])
108
+ else:
109
+ new_lines_pairs.append([p2[0], p2[1], p1[0] - p2[0], p1[1] - p2[1]])
110
+ return np.array(new_lines_pairs)
@@ -0,0 +1,351 @@
1
+ """Data preprocessors to be used with the **Pytorch Dataset class**.
2
+
3
+ Every preprocessor must accept either a single array or a list of arrays as input. Output type should always be the same
4
+ as the input type. Please try to use -=, +=, *=, and /=, as these are way faster than data = data + ... etc.. Avoid
5
+ using map(function, data), as this will return a copy and copying will slow down your code.
6
+ **Please look at example_preprocessor for a reference.**
7
+ """
8
+
9
+ from typing import List, Union, Tuple
10
+
11
+ import numpy as np
12
+ import cv2
13
+ import skimage.restoration
14
+ import bm3d
15
+ from scipy.signal import resample, decimate
16
+
17
+
18
+ def example_preprocessor(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
19
+ """Example (reference) for preprocessor implementations.
20
+
21
+ Args:
22
+ data: Numpy array to be preprocessed (or a list of such).
23
+
24
+ Returns:
25
+ Preprocessed numpy array (or a list of such).
26
+ """
27
+ # handle list here, for example with list comprehension
28
+ if isinstance(data, list):
29
+ data = [_data for _data in data]
30
+ else:
31
+ data = data
32
+ return data
33
+
34
+
35
+ def cast_to_float32(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
36
+ """Cast the data to float32. Especially useful to reduce memory usage for preloaded datasets.
37
+
38
+ Args:
39
+ data: Numpy array to be cast to float32 (or a list of such).
40
+
41
+ Returns:
42
+ Float32 numpy array (or a list of such).
43
+ """
44
+ # handle list here, for example with list comprehension
45
+ if isinstance(data, list):
46
+ data = [_data.astype(np.float32) for _data in data]
47
+ else:
48
+ data = data.astype(np.float32)
49
+ return data
50
+
51
+
52
+ def cast_to_float16(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
53
+ """Cast the data to float16. Especially useful to reduce memory usage for preloaded datasets.
54
+
55
+ Args:
56
+ data: Numpy array to be cast to float16 (or a list of such).
57
+
58
+ Returns:
59
+ Float16 numpy array (or a list of such).
60
+ """
61
+ # handle list here, for example with list comprehension
62
+ if isinstance(data, list):
63
+ data = [_data.astype(np.float16) for _data in data]
64
+ else:
65
+ data = data.astype(np.float16)
66
+ return data
67
+
68
+
69
+ def standardization(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
70
+ """Standardization of the data (mean=0, std=1).
71
+
72
+ If a list of data is passed, each data is standardized individually (no global standardization).
73
+
74
+ Args:
75
+ data: Numpy array to be standardized (or a list of such).
76
+
77
+ Returns:
78
+ Standardized numpy array (or a list of such).
79
+ """
80
+ if isinstance(data, list):
81
+ for _data in data:
82
+ _data -= np.mean(_data)
83
+ _data /= np.std(_data)
84
+ else:
85
+ data -= np.mean(data)
86
+ data /= np.std(data)
87
+ return data
88
+
89
+
90
+ def min_max_0_1(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
91
+ """Min max scaling of the data to [0, 1].
92
+
93
+ If a list of data is passed, each data is scaled individually (no global scaling).
94
+
95
+ Args:
96
+ data: Numpy array to be scaled (or a list of such).
97
+
98
+ Returns:
99
+ Rescaled numpy array (or a list of such).
100
+ """
101
+ if isinstance(data, list):
102
+ for _data in data:
103
+ _data -= np.min(_data)
104
+ _data /= np.max(_data)
105
+ else:
106
+ data -= np.min(data)
107
+ data /= np.max(data)
108
+ return data
109
+
110
+
111
+ def min_max_minus_one_one(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
112
+ """Min max scaling of the data to [-1, 1].
113
+
114
+ If a list of data is passed, each data is scaled individually (no global scaling).
115
+
116
+ Args:
117
+ data: Numpy array to be scaled (or a list of such).
118
+
119
+ Returns:
120
+ Rescaled numpy array (or a list of such).
121
+ """
122
+ data = min_max_0_1(data)
123
+ if isinstance(data, list):
124
+ for _data in data:
125
+ _data -= 0.5
126
+ _data *= 2
127
+ else:
128
+ data -= 0.5
129
+ data *= 2
130
+ return data
131
+
132
+
133
+ def add_newaxis(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
134
+ """Adds a new axis to the data (basically the missing color channel).
135
+
136
+ Args:
137
+ data: Numpy array to which the axis will be added (or a list of such).
138
+
139
+ Returns:
140
+ Numpy array with additional axis (or a list of such).
141
+ """
142
+ if isinstance(data, list):
143
+ return [_data[np.newaxis, ...] for _data in data]
144
+ return data[np.newaxis, ...]
145
+
146
+
147
+ def only_two_classes(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
148
+ """Sets all mask labels that are larger than or equal 1 to 1 and all other pixels to zero.
149
+
150
+ Args:
151
+ data: Numpy array to be processed (or a list of such).
152
+
153
+ Returns:
154
+ Numpy array with only two classes (or a list of such).
155
+ """
156
+ if isinstance(data, list):
157
+ for _data in data:
158
+ _data[_data >= 1] = 1
159
+ _data[_data < 1] = 0
160
+ else:
161
+ data[data >= 1] = 1
162
+ data[data < 1] = 0
163
+ return data
164
+
165
+
166
+ def shrink_to_shape(data: Union[np.ndarray, List[np.ndarray]], shape: Tuple[int, int]) -> Union[
167
+ np.ndarray, List[np.ndarray]]:
168
+ """Cut off required number of rows/columns of pixels at each edge of the image to get the desired shape.
169
+
170
+ **Warning**: This preprocessor can't be used by supplying a string with the name to the class SimcatsDataset from
171
+ the simcats_datasets.pytorch module, as this requires that preprocessors need no additional parameters but only the
172
+ data. If a list of data is passed, it is expected, that all images in the list have the same shape!
173
+
174
+ Args:
175
+ data: Numpy array to be preprocessed (or a list of such).
176
+ shape: The shape to which the data will be reshaped.
177
+
178
+ Returns:
179
+ Shrinked numpy array (or a list of such).
180
+ """
181
+ if isinstance(data, list) and data[0].shape != shape:
182
+ axis0_start = (data[0].shape[0] - shape[0]) // 2
183
+ axis0_stop = -data[0].shape[0] + shape[0] + axis0_start
184
+ axis1_start = (data[0].shape[1] - shape[1]) // 2
185
+ axis1_stop = -data[0].shape[1] + shape[1] + axis1_start
186
+ data = [_data[axis0_start:axis0_stop, axis1_start:axis1_stop] for _data in data]
187
+ elif data.shape != shape:
188
+ axis0_start = (data.shape[0] - shape[0]) // 2
189
+ axis0_stop = -data.shape[0] + shape[0] + axis0_start
190
+ axis1_start = (data.shape[1] - shape[1]) // 2
191
+ axis1_stop = -data.shape[1] + shape[1] + axis1_start
192
+ data = data[axis0_start:axis0_stop, axis1_start:axis1_stop]
193
+ return data
194
+
195
+
196
+ def shrink_to_shape_96x96(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
197
+ """Cut off required number of rows/columns of pixels at each edge of the image to get shape 96x96.
198
+
199
+ **Warning**: If a list of data is passed, it is expected, that all images in the list have the same shape!
200
+
201
+ Args:
202
+ data: Numpy array to be preprocessed (or a list of such).
203
+
204
+ Returns:
205
+ Shrinked numpy array (or a list of such).
206
+ """
207
+ return shrink_to_shape(data=data, shape=(96, 96))
208
+
209
+
210
+ def resample_image(data: Union[np.ndarray, List[np.ndarray]], target_size: Tuple[int, int]) -> Union[
211
+ np.ndarray, List[np.ndarray]]:
212
+ """Resample an image to target size using scipy.signal.resample.
213
+
214
+ **Warning**: This preprocessor can't be used by supplying a string with the name to the class SimcatsDataset from
215
+ the simcats_datasets.pytorch module, as it requires that preprocessors need no additional parameters but only the
216
+ data.
217
+
218
+ Args:
219
+ data: The image to resample.
220
+ target_size: The target size to resample to.
221
+
222
+ Returns:
223
+ The resampled image or a list of such.
224
+ """
225
+ if isinstance(data, list):
226
+ data = [resample_image(temp_data) for temp_data in data]
227
+ else:
228
+ if data.shape[0] > target_size[0]:
229
+ data = resample(data, target_size[0], axis=0)
230
+ if data.shape[1] > target_size[1]:
231
+ data = resample(data, target_size[1], axis=1)
232
+ return data
233
+
234
+
235
+ def decimate_image(data: Union[np.ndarray, List[np.ndarray]], target_size: Tuple[int, int]) -> Union[
236
+ np.ndarray, List[np.ndarray]]:
237
+ """Decimate an image to target size using scipy.signal.decimate.
238
+
239
+ **Warning**: This preprocessor can't be used by supplying a string with the name to the class SimcatsDataset from
240
+ the simcats_datasets.pytorch module, as it requires that preprocessors need no additional parameters but only the
241
+ data.
242
+
243
+ Args:
244
+ data: The image to decimate.
245
+ target_size: The target size to decimate to.
246
+
247
+ Returns:
248
+ The decimated image or a list of such.
249
+ """
250
+ if isinstance(data, list):
251
+ data = [decimate_image(temp_data) for temp_data in data]
252
+ else:
253
+ q = [data.shape[0] / target_size[0], data.shape[1] / target_size[1]]
254
+ while q[0] > 1 or q[1] > 1:
255
+ if q[0] > 1:
256
+ data = decimate(data.T, min(13, int(np.ceil(q[0]))), axis=1, ftype="iir").T
257
+ if q[1] > 1:
258
+ data = decimate(data.T, min(13, int(np.ceil(q[1]))), axis=0, ftype="iir").T
259
+ q = [data.shape[0] / target_size[0], data.shape[1] / target_size[1]]
260
+ return data
261
+
262
+
263
+ def standardize_to_dataset(data: Union[np.ndarray, List[np.ndarray]], mean: float, std: float) -> Union[
264
+ np.ndarray, List[np.ndarray]]:
265
+ """Standardization of the data not per image but for a whole dataset.
266
+
267
+ **Warning**: This preprocessor can't be used by supplying a string with the name to the class SimcatsDataset from
268
+ the simcats_datasets.pytorch module, as it requires that preprocessors need no additional parameters but only the
269
+ data.
270
+
271
+ Args:
272
+ data (Union[np.ndarray, List[np.ndarray]]): Numpy array to be standardized (or a list of such).
273
+ mean (float): The mean to subtract.
274
+ std (float): The standard deviation to divide by.
275
+
276
+ Returns:
277
+ Union[np.ndarray, List[np.ndarray]]: Standardized numpy array (or a list of such).
278
+ """
279
+ if isinstance(data, list):
280
+ for _data in data:
281
+ _data -= mean
282
+ _data /= std
283
+ else:
284
+ data -= mean
285
+ data /= std
286
+
287
+ return data
288
+
289
+
290
+ def _bm3d_smoothing_single_img(img: np.ndarray) -> np.ndarray:
291
+ """BM3D smoothing helper function, which performs the actual BM3D smoothing in the bm3d_smoothing preprocessor.
292
+
293
+ Args:
294
+ img: Numpy array to be smoothed.
295
+
296
+ Returns:
297
+ Smoothed image.
298
+ """
299
+ sigma = 0.4 * skimage.restoration.estimate_sigma(img, average_sigmas=True)
300
+ img = bm3d.bm3d(img, sigma_psd=sigma, stage_arg=bm3d.BM3DStages.HARD_THRESHOLDING)
301
+ return img
302
+
303
+
304
+ def bm3d_smoothing(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
305
+ """Smoothing of the data using the BM3D algorithm.
306
+
307
+ Args:
308
+ data: Numpy array to be smoothed (or a list of such)
309
+
310
+ Returns:
311
+ BM3D-smoothed numpy array (or a list of such)
312
+ """
313
+ if isinstance(data, list):
314
+ for i in range(len(data)):
315
+ data[i] = _bm3d_smoothing_single_img(data[i])
316
+ else:
317
+ data = _bm3d_smoothing_single_img(data)
318
+ return data
319
+
320
+
321
+ def _vertical_median_smoothing_single_img(img: np.ndarray) -> np.ndarray:
322
+ """Vertical median smoothing helper function, which performs the actual smoothing in the vertical_median_smoothing preprocessor.
323
+
324
+ Args:
325
+ img: Numpy array to be smoothed.
326
+
327
+ Returns:
328
+ Smoothed image.
329
+ """
330
+ for i in range(img.shape[1]):
331
+ img[:, i] = cv2.medianBlur(img[:, i], 3).flatten()
332
+ return img
333
+
334
+
335
+ def vertical_median_smoothing(data: Union[np.ndarray, List[np.ndarray]]) -> Union[np.ndarray, List[np.ndarray]]:
336
+ """Median-smoothing of the data, for each vertical column independently.
337
+
338
+ Args:
339
+ data: Numpy array to be smoothed (or a list of such).
340
+
341
+ Returns:
342
+ Smoothed numpy array (or a list of such).
343
+ """
344
+ if isinstance(data, list):
345
+ for i in range(len(data)):
346
+ data[i] = data[i].astype(np.float32)
347
+ data[i] = _vertical_median_smoothing_single_img(data[i])
348
+ else:
349
+ data = data.astype(np.float32)
350
+ data = _vertical_median_smoothing_single_img(data)
351
+ return data
@@ -0,0 +1,102 @@
1
+ """Functionalities for extracting labeled transition lines from a SimCATS CSD (using the metadata).
2
+
3
+ @author: f.hader
4
+ """
5
+
6
+ from typing import Dict, List, Tuple
7
+
8
+ import numpy as np
9
+ from simcats.ideal_csd import IdealCSDInterface
10
+ from simcats.ideal_csd.geometric import calculate_all_bezier_anchors as calc_anchors
11
+
12
+ from simcats_datasets.support_functions.clip_line_to_rectangle import clip_point_line_to_rectangle, \
13
+ clip_slope_line_to_rectangle, create_rectangle_corners
14
+
15
+
16
+ def get_lead_transition_labels(sweep_range_g1: np.ndarray,
17
+ sweep_range_g2: np.ndarray,
18
+ ideal_csd_config: IdealCSDInterface,
19
+ lead_transition_mask: np.ndarray) -> Tuple[np.ndarray, List[Dict]]:
20
+ """Function for calculating the line coordinates and labels for all linear parts in a simulated CSD.
21
+
22
+ **Warning**: This function expects that IdealCSDGeometric has been used for the simulation. Dot jumps or similar
23
+ distortions are not taken into account in the calculation of the line coordinates. This means, that the returned
24
+ lines are the ideal (undisturbed) lines.
25
+
26
+ Args:
27
+ sweep_range_g1: The sweep range for gate 1. Required to know where the boundaries are.
28
+ sweep_range_g2: The sweep range for gate 2. Required to know where the boundaries are.
29
+ ideal_csd_config: The IdealCSDInterface implementation that was used during the simulation. It is
30
+ required to calculate the bezier anchors from the configured TCTs.
31
+ lead_transition_mask: Lead transition mask (TCT mask), used to identify involved TCTs.
32
+
33
+ Returns:
34
+ np.ndarray, list[dict]: Array with the line coordinates and list containing dictionaries with corresponding
35
+ labels. Every row of the array represents one line as [x_start, y_start, x_stop, y_stop].
36
+
37
+ """
38
+ # retrieve which TCTs are contained in the simulated csd
39
+ tct_ids = np.unique(lead_transition_mask).astype(int).tolist()
40
+ tct_ids.remove(0)
41
+
42
+ # retrieve TCT rotation
43
+ rotation = ideal_csd_config.rotation
44
+
45
+ # get CSD corner point
46
+ rect_corners = create_rectangle_corners(x_range=sweep_range_g1, y_range=sweep_range_g2)
47
+
48
+ # list to collect labels
49
+ line_points = []
50
+ labels = []
51
+
52
+ # for every tct find the linear parts that are included in the csd (to be used as labels for line detection)
53
+ for i in tct_ids:
54
+ # retrieve tct parameters
55
+ tct_params = ideal_csd_config.tct_params[i - 1]
56
+ # retrieve all bezier anchors. Linear parts are always bound by anchors of two subsequent triple points,
57
+ # or by one anchor and infinte linear prolongation in the single dot regions.
58
+ anchors = calc_anchors(tct_params=tct_params, max_peaks=i)
59
+
60
+ # iterate all lead transitions / linear parts of the current tct and check if they are in the image
61
+ for trans_id in range(i * 2):
62
+ # the first lead transition only has one bezier anchor, as it is infinitively prolonged in the single dot
63
+ # regime
64
+ if trans_id == 0:
65
+ anchor = anchors[i][trans_id, 0, :]
66
+ slope = tct_params[2]
67
+ # rotate slope into image space
68
+ angle = np.arctan(slope) + rotation
69
+ if slope < 0:
70
+ angle += np.pi
71
+ slope = np.tan(angle)
72
+ clipped_start, clipped_end = clip_slope_line_to_rectangle(slope=slope, point=anchor,
73
+ rect_corners=rect_corners, is_start=False)
74
+ if clipped_start is not None and clipped_end is not None:
75
+ line_points.append(np.array([clipped_start[0], clipped_start[1], clipped_end[0], clipped_end[1]]))
76
+ labels.append({"tct_id": i, "transition_id": trans_id})
77
+ # the last lead transition only has one bezier anchor, as it is infinitively prolonged in the single dot
78
+ # regime
79
+ elif trans_id == i * 2 - 1:
80
+ anchor = anchors[i][trans_id - 1, 2, :]
81
+ slope = tct_params[3]
82
+ # rotate slope into image space
83
+ angle = np.arctan(slope) + rotation
84
+ if slope < 0:
85
+ angle += np.pi
86
+ slope = np.tan(angle)
87
+ clipped_start, clipped_end = clip_slope_line_to_rectangle(slope=slope, point=anchor,
88
+ rect_corners=rect_corners, is_start=True)
89
+ if clipped_start is not None and clipped_end is not None:
90
+ line_points.append(np.array([clipped_start[0], clipped_start[1], clipped_end[0], clipped_end[1]]))
91
+ labels.append({"tct_id": i, "transition_id": trans_id})
92
+ # all other transitions are in the double dot regime and have two anchors defining the line
93
+ else:
94
+ anchor_start = anchors[i][trans_id - 1, 2, :]
95
+ anchor_stop = anchors[i][trans_id, 0, :]
96
+ clipped_start, clipped_end = clip_point_line_to_rectangle(start=anchor_start, end=anchor_stop,
97
+ rect_corners=rect_corners)
98
+ if clipped_start is not None and clipped_end is not None:
99
+ line_points.append(np.array([clipped_start[0], clipped_start[1], clipped_end[0], clipped_end[1]]))
100
+ labels.append({"tct_id": i, "transition_id": trans_id})
101
+
102
+ return np.array(line_points), labels