celldetective 1.4.2__py3-none-any.whl → 1.5.0b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celldetective/__init__.py +25 -0
- celldetective/__main__.py +62 -43
- celldetective/_version.py +1 -1
- celldetective/extra_properties.py +477 -399
- celldetective/filters.py +192 -97
- celldetective/gui/InitWindow.py +541 -411
- celldetective/gui/__init__.py +0 -15
- celldetective/gui/about.py +44 -39
- celldetective/gui/analyze_block.py +120 -84
- celldetective/gui/base/__init__.py +0 -0
- celldetective/gui/base/channel_norm_generator.py +335 -0
- celldetective/gui/base/components.py +249 -0
- celldetective/gui/base/feature_choice.py +92 -0
- celldetective/gui/base/figure_canvas.py +52 -0
- celldetective/gui/base/list_widget.py +133 -0
- celldetective/gui/{styles.py → base/styles.py} +92 -36
- celldetective/gui/base/utils.py +33 -0
- celldetective/gui/base_annotator.py +900 -767
- celldetective/gui/classifier_widget.py +6 -22
- celldetective/gui/configure_new_exp.py +777 -671
- celldetective/gui/control_panel.py +635 -524
- celldetective/gui/dynamic_progress.py +449 -0
- celldetective/gui/event_annotator.py +2023 -1662
- celldetective/gui/generic_signal_plot.py +1292 -944
- celldetective/gui/gui_utils.py +899 -1289
- celldetective/gui/interactions_block.py +658 -0
- celldetective/gui/interactive_timeseries_viewer.py +447 -0
- celldetective/gui/json_readers.py +48 -15
- celldetective/gui/layouts/__init__.py +5 -0
- celldetective/gui/layouts/background_model_free_layout.py +537 -0
- celldetective/gui/layouts/channel_offset_layout.py +134 -0
- celldetective/gui/layouts/local_correction_layout.py +91 -0
- celldetective/gui/layouts/model_fit_layout.py +372 -0
- celldetective/gui/layouts/operation_layout.py +68 -0
- celldetective/gui/layouts/protocol_designer_layout.py +96 -0
- celldetective/gui/pair_event_annotator.py +3130 -2435
- celldetective/gui/plot_measurements.py +586 -267
- celldetective/gui/plot_signals_ui.py +724 -506
- celldetective/gui/preprocessing_block.py +395 -0
- celldetective/gui/process_block.py +1678 -1831
- celldetective/gui/seg_model_loader.py +580 -473
- celldetective/gui/settings/__init__.py +0 -7
- celldetective/gui/settings/_cellpose_model_params.py +181 -0
- celldetective/gui/settings/_event_detection_model_params.py +95 -0
- celldetective/gui/settings/_segmentation_model_params.py +159 -0
- celldetective/gui/settings/_settings_base.py +77 -65
- celldetective/gui/settings/_settings_event_model_training.py +752 -526
- celldetective/gui/settings/_settings_measurements.py +1133 -964
- celldetective/gui/settings/_settings_neighborhood.py +574 -488
- celldetective/gui/settings/_settings_segmentation_model_training.py +779 -564
- celldetective/gui/settings/_settings_signal_annotator.py +329 -305
- celldetective/gui/settings/_settings_tracking.py +1304 -1094
- celldetective/gui/settings/_stardist_model_params.py +98 -0
- celldetective/gui/survival_ui.py +422 -312
- celldetective/gui/tableUI.py +1665 -1701
- celldetective/gui/table_ops/_maths.py +295 -0
- celldetective/gui/table_ops/_merge_groups.py +140 -0
- celldetective/gui/table_ops/_merge_one_hot.py +95 -0
- celldetective/gui/table_ops/_query_table.py +43 -0
- celldetective/gui/table_ops/_rename_col.py +44 -0
- celldetective/gui/thresholds_gui.py +382 -179
- celldetective/gui/viewers/__init__.py +0 -0
- celldetective/gui/viewers/base_viewer.py +700 -0
- celldetective/gui/viewers/channel_offset_viewer.py +331 -0
- celldetective/gui/viewers/contour_viewer.py +394 -0
- celldetective/gui/viewers/size_viewer.py +153 -0
- celldetective/gui/viewers/spot_detection_viewer.py +341 -0
- celldetective/gui/viewers/threshold_viewer.py +309 -0
- celldetective/gui/workers.py +304 -126
- celldetective/log_manager.py +92 -0
- celldetective/measure.py +1895 -1478
- celldetective/napari/__init__.py +0 -0
- celldetective/napari/utils.py +1025 -0
- celldetective/neighborhood.py +1914 -1448
- celldetective/preprocessing.py +1620 -1220
- celldetective/processes/__init__.py +0 -0
- celldetective/processes/background_correction.py +271 -0
- celldetective/processes/compute_neighborhood.py +894 -0
- celldetective/processes/detect_events.py +246 -0
- celldetective/processes/measure_cells.py +565 -0
- celldetective/processes/segment_cells.py +760 -0
- celldetective/processes/track_cells.py +435 -0
- celldetective/processes/train_segmentation_model.py +694 -0
- celldetective/processes/train_signal_model.py +265 -0
- celldetective/processes/unified_process.py +292 -0
- celldetective/regionprops/_regionprops.py +358 -317
- celldetective/relative_measurements.py +987 -710
- celldetective/scripts/measure_cells.py +313 -212
- celldetective/scripts/measure_relative.py +90 -46
- celldetective/scripts/segment_cells.py +165 -104
- celldetective/scripts/segment_cells_thresholds.py +96 -68
- celldetective/scripts/track_cells.py +198 -149
- celldetective/scripts/train_segmentation_model.py +324 -201
- celldetective/scripts/train_signal_model.py +87 -45
- celldetective/segmentation.py +844 -749
- celldetective/signals.py +3514 -2861
- celldetective/tracking.py +30 -15
- celldetective/utils/__init__.py +0 -0
- celldetective/utils/cellpose_utils/__init__.py +133 -0
- celldetective/utils/color_mappings.py +42 -0
- celldetective/utils/data_cleaning.py +630 -0
- celldetective/utils/data_loaders.py +450 -0
- celldetective/utils/dataset_helpers.py +207 -0
- celldetective/utils/downloaders.py +197 -0
- celldetective/utils/event_detection/__init__.py +8 -0
- celldetective/utils/experiment.py +1782 -0
- celldetective/utils/image_augmenters.py +308 -0
- celldetective/utils/image_cleaning.py +74 -0
- celldetective/utils/image_loaders.py +926 -0
- celldetective/utils/image_transforms.py +335 -0
- celldetective/utils/io.py +62 -0
- celldetective/utils/mask_cleaning.py +348 -0
- celldetective/utils/mask_transforms.py +5 -0
- celldetective/utils/masks.py +184 -0
- celldetective/utils/maths.py +351 -0
- celldetective/utils/model_getters.py +325 -0
- celldetective/utils/model_loaders.py +296 -0
- celldetective/utils/normalization.py +380 -0
- celldetective/utils/parsing.py +465 -0
- celldetective/utils/plots/__init__.py +0 -0
- celldetective/utils/plots/regression.py +53 -0
- celldetective/utils/resources.py +34 -0
- celldetective/utils/stardist_utils/__init__.py +104 -0
- celldetective/utils/stats.py +90 -0
- celldetective/utils/types.py +21 -0
- {celldetective-1.4.2.dist-info → celldetective-1.5.0b0.dist-info}/METADATA +1 -1
- celldetective-1.5.0b0.dist-info/RECORD +187 -0
- {celldetective-1.4.2.dist-info → celldetective-1.5.0b0.dist-info}/WHEEL +1 -1
- tests/gui/test_new_project.py +129 -117
- tests/gui/test_project.py +127 -79
- tests/test_filters.py +39 -15
- tests/test_notebooks.py +8 -0
- tests/test_tracking.py +232 -13
- tests/test_utils.py +123 -77
- celldetective/gui/base_components.py +0 -23
- celldetective/gui/layouts.py +0 -1602
- celldetective/gui/processes/compute_neighborhood.py +0 -594
- celldetective/gui/processes/measure_cells.py +0 -360
- celldetective/gui/processes/segment_cells.py +0 -499
- celldetective/gui/processes/track_cells.py +0 -303
- celldetective/gui/processes/train_segmentation_model.py +0 -270
- celldetective/gui/processes/train_signal_model.py +0 -108
- celldetective/gui/table_ops/merge_groups.py +0 -118
- celldetective/gui/viewers.py +0 -1354
- celldetective/io.py +0 -3663
- celldetective/utils.py +0 -3108
- celldetective-1.4.2.dist-info/RECORD +0 -123
- /celldetective/{gui/processes → processes}/downloader.py +0 -0
- {celldetective-1.4.2.dist-info → celldetective-1.5.0b0.dist-info}/entry_points.txt +0 -0
- {celldetective-1.4.2.dist-info → celldetective-1.5.0b0.dist-info}/licenses/LICENSE +0 -0
- {celldetective-1.4.2.dist-info → celldetective-1.5.0b0.dist-info}/top_level.txt +0 -0
|
@@ -1,468 +1,677 @@
|
|
|
1
1
|
import pandas as pd
|
|
2
2
|
import numpy as np
|
|
3
|
-
from celldetective.utils import derivative
|
|
3
|
+
from celldetective.utils.maths import derivative
|
|
4
|
+
from celldetective.utils.data_cleaning import extract_identity_col
|
|
4
5
|
import os
|
|
5
6
|
import subprocess
|
|
6
|
-
|
|
7
|
+
|
|
8
|
+
abs_path = os.sep.join(
|
|
9
|
+
[os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], "celldetective"]
|
|
10
|
+
)
|
|
11
|
+
|
|
7
12
|
|
|
8
13
|
def measure_pairs(pos, neighborhood_protocol):
|
|
9
|
-
|
|
10
|
-
reference_population = neighborhood_protocol['reference']
|
|
11
|
-
neighbor_population = neighborhood_protocol['neighbor']
|
|
12
|
-
neighborhood_type = neighborhood_protocol['type']
|
|
13
|
-
neighborhood_distance = neighborhood_protocol['distance']
|
|
14
|
-
neighborhood_description = neighborhood_protocol['description']
|
|
15
|
-
|
|
16
|
-
relative_measurements = []
|
|
17
|
-
|
|
18
|
-
tab_ref = pos + os.sep.join(['output', 'tables', f'trajectories_{reference_population}.pkl'])
|
|
19
|
-
if os.path.exists(tab_ref):
|
|
20
|
-
df_reference = np.load(tab_ref, allow_pickle=True)
|
|
21
|
-
else:
|
|
22
|
-
df_reference = None
|
|
23
|
-
|
|
24
|
-
if os.path.exists(tab_ref.replace(reference_population, neighbor_population)):
|
|
25
|
-
df_neighbor = np.load(tab_ref.replace(reference_population, neighbor_population), allow_pickle=True)
|
|
26
|
-
else:
|
|
27
|
-
if os.path.exists(tab_ref.replace(reference_population, neighbor_population).replace('.pkl','.csv')):
|
|
28
|
-
df_neighbor = pd.read_csv(tab_ref.replace(reference_population, neighbor_population).replace('.pkl','.csv'))
|
|
29
|
-
else:
|
|
30
|
-
df_neighbor = None
|
|
31
|
-
|
|
32
|
-
if df_reference is None:
|
|
33
|
-
return None
|
|
34
|
-
|
|
35
|
-
assert str(neighborhood_description) in list(df_reference.columns)
|
|
36
|
-
neighborhood = df_reference.loc[:,f'{neighborhood_description}'].to_numpy()
|
|
37
|
-
|
|
38
|
-
ref_id_col = extract_identity_col(df_reference)
|
|
39
|
-
ref_tracked = False
|
|
40
|
-
if ref_id_col is None:
|
|
41
|
-
return None
|
|
42
|
-
elif ref_id_col=='TRACK_ID':
|
|
43
|
-
ref_tracked = True
|
|
44
|
-
neigh_id_col = extract_identity_col(df_neighbor)
|
|
45
|
-
neigh_tracked = False
|
|
46
|
-
if neigh_id_col is None:
|
|
47
|
-
return None
|
|
48
|
-
elif neigh_id_col=='TRACK_ID':
|
|
49
|
-
neigh_tracked = True
|
|
50
|
-
|
|
51
|
-
centre_of_mass_columns = [(c,c.replace('POSITION_X','POSITION_Y')) for c in list(df_neighbor.columns) if c.endswith('centre_of_mass_POSITION_X')]
|
|
52
|
-
centre_of_mass_labels = [c.replace('_centre_of_mass_POSITION_X','') for c in list(df_neighbor.columns) if c.endswith('centre_of_mass_POSITION_X')]
|
|
53
|
-
|
|
54
|
-
for t in np.unique(list(df_reference['FRAME'].unique())+list(df_neighbor['FRAME'])):
|
|
55
|
-
|
|
56
|
-
group_reference = df_reference.loc[df_reference['FRAME']==t,:]
|
|
57
|
-
group_neighbors = df_neighbor.loc[df_neighbor['FRAME']==t, :]
|
|
58
|
-
|
|
59
|
-
for tid, group in group_reference.groupby(ref_id_col):
|
|
60
|
-
|
|
61
|
-
neighborhood = group.loc[: , f'{neighborhood_description}'].to_numpy()[0]
|
|
62
|
-
coords_reference = group[['POSITION_X', 'POSITION_Y']].to_numpy()[0]
|
|
63
|
-
|
|
64
|
-
neighbors = []
|
|
65
|
-
if isinstance(neighborhood, float) or neighborhood!=neighborhood:
|
|
66
|
-
pass
|
|
67
|
-
else:
|
|
68
|
-
for neigh in neighborhood:
|
|
69
|
-
neighbors.append(neigh['id'])
|
|
70
|
-
|
|
71
|
-
unique_neigh = list(np.unique(neighbors))
|
|
72
|
-
print(f'{unique_neigh=}')
|
|
73
|
-
|
|
74
|
-
neighbor_properties = group_neighbors.loc[group_neighbors[neigh_id_col].isin(unique_neigh)]
|
|
75
|
-
|
|
76
|
-
for nc, group_neigh in neighbor_properties.groupby(neigh_id_col):
|
|
77
|
-
|
|
78
|
-
neighbor_vector = np.zeros((2))
|
|
79
|
-
neighbor_vector[:] = np.nan
|
|
80
|
-
mass_displacement_vector = np.zeros((len(centre_of_mass_columns), 2))
|
|
81
|
-
|
|
82
|
-
coords_centre_of_mass = []
|
|
83
|
-
for col in centre_of_mass_columns:
|
|
84
|
-
coords_centre_of_mass.append(group_neigh[[col[0],col[1]]].to_numpy()[0])
|
|
85
|
-
|
|
86
|
-
dot_product_vector = np.zeros((len(centre_of_mass_columns)))
|
|
87
|
-
dot_product_vector[:] = np.nan
|
|
88
|
-
|
|
89
|
-
cosine_dot_vector = np.zeros((len(centre_of_mass_columns)))
|
|
90
|
-
cosine_dot_vector[:] = np.nan
|
|
91
|
-
|
|
92
|
-
coords_neighbor = group_neigh[['POSITION_X', 'POSITION_Y']].to_numpy()[0]
|
|
93
|
-
intersection = np.nan
|
|
94
|
-
if 'intersection' in list(group_neigh.columns):
|
|
95
|
-
intersection = group_neigh['intersection'].values[0]
|
|
96
|
-
|
|
97
|
-
neighbor_vector[0] = coords_neighbor[0] - coords_reference[0]
|
|
98
|
-
neighbor_vector[1] = coords_neighbor[1] - coords_reference[1]
|
|
99
|
-
|
|
100
|
-
if neighbor_vector[0]==neighbor_vector[0] and neighbor_vector[1]==neighbor_vector[1]:
|
|
101
|
-
angle = np.arctan2(neighbor_vector[1], neighbor_vector[0])
|
|
102
|
-
relative_distance = np.sqrt(neighbor_vector[0]**2 + neighbor_vector[1]**2)
|
|
103
|
-
|
|
104
|
-
for z,cols in enumerate(centre_of_mass_columns):
|
|
105
|
-
|
|
106
|
-
mass_displacement_vector[z,0] = coords_centre_of_mass[z][0] - coords_neighbor[0]
|
|
107
|
-
mass_displacement_vector[z,1] = coords_centre_of_mass[z][1] - coords_neighbor[1]
|
|
108
|
-
|
|
109
|
-
dot_product_vector[z] = np.dot(mass_displacement_vector[z], -neighbor_vector)
|
|
110
|
-
cosine_dot_vector[z] = np.dot(mass_displacement_vector[z], -neighbor_vector) / (np.linalg.norm(mass_displacement_vector[z])*np.linalg.norm(-neighbor_vector))
|
|
111
|
-
|
|
112
|
-
relative_measurements.append(
|
|
113
|
-
{'REFERENCE_ID': tid, 'NEIGHBOR_ID': nc,
|
|
114
|
-
'reference_population': reference_population,
|
|
115
|
-
'neighbor_population': neighbor_population,
|
|
116
|
-
'FRAME': t, 'distance': relative_distance, 'intersection': intersection,
|
|
117
|
-
'angle': angle * 180 / np.pi,
|
|
118
|
-
f'status_{neighborhood_description}': 1,
|
|
119
|
-
f'class_{neighborhood_description}': 0,
|
|
120
|
-
'reference_tracked': ref_tracked, 'neighbors_tracked': neigh_tracked,
|
|
121
|
-
})
|
|
122
|
-
for z,lbl in enumerate(centre_of_mass_labels):
|
|
123
|
-
relative_measurements[-1].update({lbl+'_centre_of_mass_dot_product': dot_product_vector[z], lbl+'_centre_of_mass_dot_cosine': cosine_dot_vector[z]})
|
|
124
|
-
|
|
125
|
-
df_pairs = pd.DataFrame(relative_measurements)
|
|
126
|
-
|
|
127
|
-
return df_pairs
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs={'window': 3, 'mode': 'bi'}):
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
reference_population = neighborhood_protocol['reference']
|
|
135
|
-
neighbor_population = neighborhood_protocol['neighbor']
|
|
136
|
-
neighborhood_type = neighborhood_protocol['type']
|
|
137
|
-
neighborhood_distance = neighborhood_protocol['distance']
|
|
138
|
-
neighborhood_description = neighborhood_protocol['description']
|
|
139
|
-
|
|
140
|
-
relative_measurements = []
|
|
141
|
-
|
|
142
|
-
tab_ref = pos + os.sep.join(['output', 'tables', f'trajectories_{reference_population}.pkl'])
|
|
143
|
-
if os.path.exists(tab_ref):
|
|
144
|
-
df_reference = np.load(tab_ref, allow_pickle=True)
|
|
145
|
-
else:
|
|
146
|
-
df_reference = None
|
|
147
|
-
|
|
148
|
-
if os.path.exists(tab_ref.replace(reference_population, neighbor_population)):
|
|
149
|
-
df_neighbor = np.load(tab_ref.replace(reference_population, neighbor_population), allow_pickle=True)
|
|
150
|
-
else:
|
|
151
|
-
if os.path.exists(tab_ref.replace(reference_population, neighbor_population).replace('.pkl','.csv')):
|
|
152
|
-
df_neighbor = pd.read_csv(tab_ref.replace(reference_population, neighbor_population).replace('.pkl','.csv'))
|
|
153
|
-
else:
|
|
154
|
-
df_neighbor = None
|
|
155
|
-
|
|
156
|
-
if df_reference is None:
|
|
157
|
-
return None
|
|
158
|
-
|
|
159
|
-
assert str(neighborhood_description) in list(df_reference.columns)
|
|
160
|
-
neighborhood = df_reference.loc[:,f'{neighborhood_description}'].to_numpy()
|
|
161
|
-
|
|
162
|
-
ref_id_col = extract_identity_col(df_reference)
|
|
163
|
-
if ref_id_col is not None:
|
|
164
|
-
df_reference = df_reference.sort_values(by=[ref_id_col, 'FRAME'])
|
|
165
|
-
|
|
166
|
-
ref_tracked = False
|
|
167
|
-
if ref_id_col=='TRACK_ID':
|
|
168
|
-
compute_velocity = True
|
|
169
|
-
ref_tracked = True
|
|
170
|
-
elif ref_id_col=='ID':
|
|
171
|
-
df_pairs = measure_pairs(pos, neighborhood_protocol)
|
|
172
|
-
return df_pairs
|
|
173
|
-
else:
|
|
174
|
-
print('ID or TRACK ID column could not be found in neighbor table. Abort.')
|
|
175
|
-
return None
|
|
176
|
-
|
|
177
|
-
print(f'Measuring pair signals...')
|
|
178
|
-
|
|
179
|
-
neigh_id_col = extract_identity_col(df_neighbor)
|
|
180
|
-
neigh_tracked = False
|
|
181
|
-
if neigh_id_col=='TRACK_ID':
|
|
182
|
-
compute_velocity = True
|
|
183
|
-
neigh_tracked = True
|
|
184
|
-
elif neigh_id_col=='ID':
|
|
185
|
-
df_pairs = measure_pairs(pos, neighborhood_protocol)
|
|
186
|
-
return df_pairs
|
|
187
|
-
else:
|
|
188
|
-
print('ID or TRACK ID column could not be found in neighbor table. Abort.')
|
|
189
|
-
return None
|
|
190
|
-
|
|
191
|
-
try:
|
|
192
|
-
for tid, group in df_reference.groupby(ref_id_col):
|
|
193
|
-
|
|
194
|
-
neighbor_dicts = group.loc[: , f'{neighborhood_description}'].values
|
|
195
|
-
timeline_reference = group['FRAME'].to_numpy()
|
|
196
|
-
coords_reference = group[['POSITION_X', 'POSITION_Y']].to_numpy()
|
|
197
|
-
if "area" in list(group.columns):
|
|
198
|
-
ref_area = group['area'].to_numpy()
|
|
199
|
-
else:
|
|
200
|
-
ref_area = [np.nan]*len(coords_reference)
|
|
201
|
-
|
|
202
|
-
neighbor_ids = []
|
|
203
|
-
neighbor_ids_per_t = []
|
|
204
|
-
intersection_values = []
|
|
205
|
-
|
|
206
|
-
time_of_first_entrance_in_neighborhood = {}
|
|
207
|
-
t_departure={}
|
|
208
|
-
|
|
209
|
-
for t in range(len(timeline_reference)):
|
|
210
|
-
|
|
211
|
-
neighbors_at_t = neighbor_dicts[t]
|
|
212
|
-
neighs_t = []
|
|
213
|
-
if isinstance(neighbors_at_t, float) or neighbors_at_t!=neighbors_at_t:
|
|
214
|
-
pass
|
|
215
|
-
else:
|
|
216
|
-
for neigh in neighbors_at_t:
|
|
217
|
-
if neigh['id'] not in neighbor_ids:
|
|
218
|
-
time_of_first_entrance_in_neighborhood[neigh['id']]=t
|
|
219
|
-
if 'intersection' in neigh:
|
|
220
|
-
intersection_values.append({"frame": t, "neigh_id": neigh['id'], "intersection": neigh['intersection']})
|
|
221
|
-
else:
|
|
222
|
-
intersection_values.append({"frame": t, "neigh_id": neigh['id'], "intersection": np.nan})
|
|
223
|
-
neighbor_ids.append(neigh['id'])
|
|
224
|
-
neighs_t.append(neigh['id'])
|
|
225
|
-
neighbor_ids_per_t.append(neighs_t)
|
|
226
|
-
|
|
227
|
-
intersection_values = pd.DataFrame(intersection_values)
|
|
228
|
-
|
|
229
|
-
#print(neighbor_ids_per_t)
|
|
230
|
-
unique_neigh = list(np.unique(neighbor_ids))
|
|
231
|
-
print(f'Reference cell {tid}: found {len(unique_neigh)} neighbour cells: {unique_neigh}...')
|
|
232
|
-
|
|
233
|
-
neighbor_properties = df_neighbor.loc[df_neighbor[neigh_id_col].isin(unique_neigh)]
|
|
234
|
-
|
|
235
|
-
for nc, group_neigh in neighbor_properties.groupby(neigh_id_col):
|
|
236
|
-
|
|
237
|
-
coords_neighbor = group_neigh[['POSITION_X', 'POSITION_Y']].to_numpy()
|
|
238
|
-
timeline_neighbor = group_neigh['FRAME'].to_numpy()
|
|
239
|
-
if "area" in list(group_neigh.columns):
|
|
240
|
-
neigh_area = group_neigh['area'].to_numpy()
|
|
241
|
-
else:
|
|
242
|
-
neigh_area = [np.nan]*len(timeline_neighbor)
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
# # Perform timeline matching to have same start-end points and no gaps
|
|
246
|
-
full_timeline, _, _ = timeline_matching(timeline_reference, timeline_neighbor)
|
|
247
|
-
|
|
248
|
-
neighbor_vector = np.zeros((len(full_timeline), 2))
|
|
249
|
-
neighbor_vector[:,:] = np.nan
|
|
250
|
-
|
|
251
|
-
intersection_vector = np.zeros((len(full_timeline)))
|
|
252
|
-
intersection_vector[:] = np.nan
|
|
253
|
-
|
|
254
|
-
centre_of_mass_columns = [(c,c.replace('POSITION_X','POSITION_Y')) for c in list(neighbor_properties.columns) if c.endswith('centre_of_mass_POSITION_X')]
|
|
255
|
-
centre_of_mass_labels = [c.replace('_centre_of_mass_POSITION_X','') for c in list(neighbor_properties.columns) if c.endswith('centre_of_mass_POSITION_X')]
|
|
256
|
-
|
|
257
|
-
mass_displacement_vector = np.zeros((len(centre_of_mass_columns), len(full_timeline), 2))
|
|
258
|
-
mass_displacement_vector[:,:,:] = np.nan
|
|
259
|
-
|
|
260
|
-
dot_product_vector = np.zeros((len(centre_of_mass_columns), len(full_timeline)))
|
|
261
|
-
dot_product_vector[:,:] = np.nan
|
|
262
|
-
|
|
263
|
-
cosine_dot_vector = np.zeros((len(centre_of_mass_columns), len(full_timeline)))
|
|
264
|
-
cosine_dot_vector[:,:] = np.nan
|
|
265
|
-
|
|
266
|
-
coords_centre_of_mass = []
|
|
267
|
-
for col in centre_of_mass_columns:
|
|
268
|
-
coords_centre_of_mass.append(group_neigh[[col[0],col[1]]].to_numpy())
|
|
269
|
-
|
|
270
|
-
# Relative distance
|
|
271
|
-
for t in range(len(full_timeline)):
|
|
272
|
-
|
|
273
|
-
if t in timeline_reference and t in timeline_neighbor: # meaning position exists on both sides
|
|
274
|
-
|
|
275
|
-
idx_reference = list(timeline_reference).index(t) #index_reference[list(full_timeline).index(t)]
|
|
276
|
-
idx_neighbor = list(timeline_neighbor).index(t) #index_neighbor[list(full_timeline).index(t)]
|
|
277
|
-
|
|
278
|
-
neighbor_vector[t, 0] = coords_neighbor[idx_neighbor, 0] - coords_reference[idx_reference, 0]
|
|
279
|
-
neighbor_vector[t, 1] = coords_neighbor[idx_neighbor, 1] - coords_reference[idx_reference, 1]
|
|
280
|
-
|
|
281
|
-
for z,cols in enumerate(centre_of_mass_columns):
|
|
282
|
-
|
|
283
|
-
mass_displacement_vector[z,t,0] = coords_centre_of_mass[z][idx_neighbor, 0] - coords_neighbor[idx_neighbor, 0]
|
|
284
|
-
mass_displacement_vector[z,t,1] = coords_centre_of_mass[z][idx_neighbor, 1] - coords_neighbor[idx_neighbor, 1]
|
|
285
|
-
|
|
286
|
-
dot_product_vector[z,t] = np.dot(mass_displacement_vector[z,t], -neighbor_vector[t])
|
|
287
|
-
cosine_dot_vector[z,t] = np.dot(mass_displacement_vector[z,t], -neighbor_vector[t]) / (np.linalg.norm(mass_displacement_vector[z,t])*np.linalg.norm(-neighbor_vector[t]))
|
|
288
|
-
if tid==44.0 and nc==173.0:
|
|
289
|
-
print(f'{centre_of_mass_columns[z]=} {mass_displacement_vector[z,t]=} {-neighbor_vector[t]=} {dot_product_vector[z,t]=} {cosine_dot_vector[z,t]=}')
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
angle = np.zeros(len(full_timeline))
|
|
293
|
-
angle[:] = np.nan
|
|
294
|
-
|
|
295
|
-
exclude = neighbor_vector[:,1]!=neighbor_vector[:,1]
|
|
296
|
-
angle[~exclude] = np.arctan2(neighbor_vector[:, 1][~exclude], neighbor_vector[:, 0][~exclude])
|
|
297
|
-
#print(f'Angle before unwrap: {angle}')
|
|
298
|
-
angle[~exclude] = np.unwrap(angle[~exclude])
|
|
299
|
-
#print(f'Angle after unwrap: {angle}')
|
|
300
|
-
relative_distance = np.sqrt(neighbor_vector[:,0]**2 + neighbor_vector[:, 1]**2)
|
|
301
|
-
#print(f'Timeline: {full_timeline}; Distance: {relative_distance}')
|
|
302
|
-
|
|
303
|
-
if compute_velocity:
|
|
304
|
-
rel_velocity = derivative(relative_distance, full_timeline, **velocity_kwargs)
|
|
305
|
-
rel_velocity_long_timescale = derivative(relative_distance, full_timeline, window = 7, mode='bi')
|
|
306
|
-
#rel_velocity = np.insert(rel_velocity, 0, np.nan)[:-1]
|
|
307
|
-
|
|
308
|
-
angular_velocity = np.zeros(len(full_timeline))
|
|
309
|
-
angular_velocity[:] = np.nan
|
|
310
|
-
angular_velocity_long_timescale = np.zeros(len(full_timeline))
|
|
311
|
-
angular_velocity_long_timescale[:] = np.nan
|
|
312
|
-
|
|
313
|
-
angular_velocity[~exclude] = derivative(angle[~exclude], full_timeline[~exclude], **velocity_kwargs)
|
|
314
|
-
angular_velocity_long_timescale[~exclude] = derivative(angle[~exclude], full_timeline[~exclude], window = 7, mode='bi')
|
|
315
|
-
|
|
316
|
-
# angular_velocity = np.zeros(len(full_timeline))
|
|
317
|
-
# angular_velocity[:] = np.nan
|
|
318
|
-
|
|
319
|
-
# for t in range(1, len(relative_angle1)):
|
|
320
|
-
# if not np.isnan(relative_angle1[t]) and not np.isnan(relative_angle1[t - 1]):
|
|
321
|
-
# delta_angle = relative_angle1[t] - relative_angle1[t - 1]
|
|
322
|
-
# delta_time = full_timeline[t] - full_timeline[t - 1]
|
|
323
|
-
# if delta_time != 0:
|
|
324
|
-
# angular_velocity[t] = delta_angle / delta_time
|
|
325
|
-
|
|
326
|
-
duration_in_neigh = list(neighbor_ids).count(nc)
|
|
327
|
-
#print(nc, duration_in_neigh, ' frames')
|
|
328
|
-
|
|
329
|
-
cum_sum = 0
|
|
330
|
-
for t in range(len(full_timeline)):
|
|
331
|
-
|
|
332
|
-
if t in timeline_reference and t in timeline_neighbor: # meaning position exists on both sides
|
|
333
|
-
|
|
334
|
-
idx_reference = list(timeline_reference).index(t)
|
|
335
|
-
idx_neighbor = list(timeline_neighbor).index(t)
|
|
336
|
-
inter = intersection_values.loc[(intersection_values['neigh_id']==nc)&(intersection_values["frame"]==t),"intersection"].values
|
|
337
|
-
if len(inter)==0:
|
|
338
|
-
inter = np.nan
|
|
339
|
-
else:
|
|
340
|
-
inter = inter[0]
|
|
341
|
-
|
|
342
|
-
neigh_inter_fraction = np.nan
|
|
343
|
-
if inter==inter and neigh_area[idx_neighbor]==neigh_area[idx_neighbor]:
|
|
344
|
-
neigh_inter_fraction = inter / neigh_area[idx_neighbor]
|
|
345
|
-
|
|
346
|
-
ref_inter_fraction = np.nan
|
|
347
|
-
if inter==inter and ref_area[idx_reference]==ref_area[idx_reference]:
|
|
348
|
-
ref_inter_fraction = inter / ref_area[idx_reference]
|
|
349
|
-
|
|
350
|
-
if nc in neighbor_ids_per_t[idx_reference]:
|
|
351
|
-
|
|
352
|
-
cum_sum+=1
|
|
353
|
-
relative_measurements.append(
|
|
354
|
-
{'REFERENCE_ID': tid, 'NEIGHBOR_ID': nc,
|
|
355
|
-
'reference_population': reference_population,
|
|
356
|
-
'neighbor_population': neighbor_population,
|
|
357
|
-
'FRAME': t, 'distance': relative_distance[t], 'intersection': inter, 'reference_frac_area_intersection': ref_inter_fraction, 'neighbor_frac_area_intersection': neigh_inter_fraction,
|
|
358
|
-
'velocity': rel_velocity[t],
|
|
359
|
-
'velocity_smooth': rel_velocity_long_timescale[t],
|
|
360
|
-
'angle': angle[t] * 180 / np.pi,
|
|
361
|
-
#'angle-neigh-ref': angle[t] * 180 / np.pi,
|
|
362
|
-
'angular_velocity': angular_velocity[t],
|
|
363
|
-
'angular_velocity_smooth': angular_velocity_long_timescale[t],
|
|
364
|
-
f'status_{neighborhood_description}': 1,
|
|
365
|
-
f'residence_time_in_{neighborhood_description}': cum_sum,
|
|
366
|
-
f'class_{neighborhood_description}': 0,
|
|
367
|
-
f't0_{neighborhood_description}': time_of_first_entrance_in_neighborhood[nc],
|
|
368
|
-
'reference_tracked': ref_tracked, 'neighbors_tracked': neigh_tracked,
|
|
369
|
-
})
|
|
370
|
-
for z,lbl in enumerate(centre_of_mass_labels):
|
|
371
|
-
relative_measurements[-1].update({lbl+'_centre_of_mass_dot_product': dot_product_vector[z,t], lbl+'_centre_of_mass_dot_cosine': cosine_dot_vector[z,t]})
|
|
372
|
-
|
|
373
|
-
else:
|
|
374
|
-
relative_measurements.append(
|
|
375
|
-
{'REFERENCE_ID': tid, 'NEIGHBOR_ID': nc,
|
|
376
|
-
'reference_population': reference_population,
|
|
377
|
-
'neighbor_population': neighbor_population,
|
|
378
|
-
'FRAME': t, 'distance': relative_distance[t], 'intersection': inter, 'reference_frac_area_intersection': ref_inter_fraction, 'neighbor_frac_area_intersection': neigh_inter_fraction,
|
|
379
|
-
'velocity': rel_velocity[t],
|
|
380
|
-
'velocity_smooth': rel_velocity_long_timescale[t],
|
|
381
|
-
'angle': angle[t] * 180 / np.pi,
|
|
382
|
-
#'angle-neigh-ref': angle[t] * 180 / np.pi,
|
|
383
|
-
'angular_velocity': angular_velocity[t],
|
|
384
|
-
'angular_velocity_smooth': angular_velocity_long_timescale[t],
|
|
385
|
-
f'status_{neighborhood_description}': 0,
|
|
386
|
-
f'residence_time_in_{neighborhood_description}': cum_sum,
|
|
387
|
-
f'class_{neighborhood_description}': 0,
|
|
388
|
-
f't0_{neighborhood_description}': time_of_first_entrance_in_neighborhood[nc],
|
|
389
|
-
'reference_tracked': ref_tracked, 'neighbors_tracked': neigh_tracked,
|
|
390
|
-
})
|
|
391
|
-
for z,lbl in enumerate(centre_of_mass_labels):
|
|
392
|
-
relative_measurements[-1].update({lbl+'_centre_of_mass_dot_product': dot_product_vector[z,t], lbl+'_centre_of_mass_dot_cosine': cosine_dot_vector[z,t]})
|
|
393
|
-
|
|
394
|
-
df_pairs = pd.DataFrame(relative_measurements)
|
|
395
|
-
|
|
396
|
-
return df_pairs
|
|
397
|
-
|
|
398
|
-
except KeyError:
|
|
399
|
-
print(f"Neighborhood not found in data frame. Measurements for this neighborhood will not be calculated")
|
|
400
14
|
|
|
15
|
+
reference_population = neighborhood_protocol["reference"]
|
|
16
|
+
neighbor_population = neighborhood_protocol["neighbor"]
|
|
17
|
+
neighborhood_type = neighborhood_protocol["type"]
|
|
18
|
+
neighborhood_distance = neighborhood_protocol["distance"]
|
|
19
|
+
neighborhood_description = neighborhood_protocol["description"]
|
|
20
|
+
|
|
21
|
+
relative_measurements = []
|
|
22
|
+
|
|
23
|
+
tab_ref = pos + os.sep.join(
|
|
24
|
+
["output", "tables", f"trajectories_{reference_population}.pkl"]
|
|
25
|
+
)
|
|
26
|
+
if os.path.exists(tab_ref):
|
|
27
|
+
df_reference = np.load(tab_ref, allow_pickle=True)
|
|
28
|
+
else:
|
|
29
|
+
df_reference = None
|
|
30
|
+
|
|
31
|
+
if os.path.exists(tab_ref.replace(reference_population, neighbor_population)):
|
|
32
|
+
df_neighbor = np.load(
|
|
33
|
+
tab_ref.replace(reference_population, neighbor_population),
|
|
34
|
+
allow_pickle=True,
|
|
35
|
+
)
|
|
36
|
+
else:
|
|
37
|
+
if os.path.exists(
|
|
38
|
+
tab_ref.replace(reference_population, neighbor_population).replace(
|
|
39
|
+
".pkl", ".csv"
|
|
40
|
+
)
|
|
41
|
+
):
|
|
42
|
+
df_neighbor = pd.read_csv(
|
|
43
|
+
tab_ref.replace(reference_population, neighbor_population).replace(
|
|
44
|
+
".pkl", ".csv"
|
|
45
|
+
)
|
|
46
|
+
)
|
|
47
|
+
else:
|
|
48
|
+
df_neighbor = None
|
|
49
|
+
|
|
50
|
+
if df_reference is None:
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
assert str(neighborhood_description) in list(df_reference.columns)
|
|
54
|
+
neighborhood = df_reference.loc[:, f"{neighborhood_description}"].to_numpy()
|
|
55
|
+
|
|
56
|
+
ref_id_col = extract_identity_col(df_reference)
|
|
57
|
+
ref_tracked = False
|
|
58
|
+
if ref_id_col is None:
|
|
59
|
+
return None
|
|
60
|
+
elif ref_id_col == "TRACK_ID":
|
|
61
|
+
ref_tracked = True
|
|
62
|
+
neigh_id_col = extract_identity_col(df_neighbor)
|
|
63
|
+
neigh_tracked = False
|
|
64
|
+
if neigh_id_col is None:
|
|
65
|
+
return None
|
|
66
|
+
elif neigh_id_col == "TRACK_ID":
|
|
67
|
+
neigh_tracked = True
|
|
68
|
+
|
|
69
|
+
centre_of_mass_columns = [
|
|
70
|
+
(c, c.replace("POSITION_X", "POSITION_Y"))
|
|
71
|
+
for c in list(df_neighbor.columns)
|
|
72
|
+
if c.endswith("centre_of_mass_POSITION_X")
|
|
73
|
+
]
|
|
74
|
+
centre_of_mass_labels = [
|
|
75
|
+
c.replace("_centre_of_mass_POSITION_X", "")
|
|
76
|
+
for c in list(df_neighbor.columns)
|
|
77
|
+
if c.endswith("centre_of_mass_POSITION_X")
|
|
78
|
+
]
|
|
79
|
+
|
|
80
|
+
for t in np.unique(
|
|
81
|
+
list(df_reference["FRAME"].unique()) + list(df_neighbor["FRAME"])
|
|
82
|
+
):
|
|
83
|
+
|
|
84
|
+
group_reference = df_reference.loc[df_reference["FRAME"] == t, :]
|
|
85
|
+
group_neighbors = df_neighbor.loc[df_neighbor["FRAME"] == t, :]
|
|
86
|
+
|
|
87
|
+
for tid, group in group_reference.groupby(ref_id_col):
|
|
88
|
+
|
|
89
|
+
neighborhood = group.loc[:, f"{neighborhood_description}"].to_numpy()[0]
|
|
90
|
+
coords_reference = group[["POSITION_X", "POSITION_Y"]].to_numpy()[0]
|
|
91
|
+
|
|
92
|
+
neighbors = []
|
|
93
|
+
if isinstance(neighborhood, float) or neighborhood != neighborhood:
|
|
94
|
+
pass
|
|
95
|
+
else:
|
|
96
|
+
for neigh in neighborhood:
|
|
97
|
+
neighbors.append(neigh["id"])
|
|
98
|
+
|
|
99
|
+
unique_neigh = list(np.unique(neighbors))
|
|
100
|
+
print(f"{unique_neigh=}")
|
|
101
|
+
|
|
102
|
+
neighbor_properties = group_neighbors.loc[
|
|
103
|
+
group_neighbors[neigh_id_col].isin(unique_neigh)
|
|
104
|
+
]
|
|
105
|
+
|
|
106
|
+
for nc, group_neigh in neighbor_properties.groupby(neigh_id_col):
|
|
107
|
+
|
|
108
|
+
neighbor_vector = np.zeros((2))
|
|
109
|
+
neighbor_vector[:] = np.nan
|
|
110
|
+
mass_displacement_vector = np.zeros((len(centre_of_mass_columns), 2))
|
|
111
|
+
|
|
112
|
+
coords_centre_of_mass = []
|
|
113
|
+
for col in centre_of_mass_columns:
|
|
114
|
+
coords_centre_of_mass.append(
|
|
115
|
+
group_neigh[[col[0], col[1]]].to_numpy()[0]
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
dot_product_vector = np.zeros((len(centre_of_mass_columns)))
|
|
119
|
+
dot_product_vector[:] = np.nan
|
|
120
|
+
|
|
121
|
+
cosine_dot_vector = np.zeros((len(centre_of_mass_columns)))
|
|
122
|
+
cosine_dot_vector[:] = np.nan
|
|
123
|
+
|
|
124
|
+
coords_neighbor = group_neigh[["POSITION_X", "POSITION_Y"]].to_numpy()[
|
|
125
|
+
0
|
|
126
|
+
]
|
|
127
|
+
intersection = np.nan
|
|
128
|
+
if "intersection" in list(group_neigh.columns):
|
|
129
|
+
intersection = group_neigh["intersection"].values[0]
|
|
130
|
+
|
|
131
|
+
neighbor_vector[0] = coords_neighbor[0] - coords_reference[0]
|
|
132
|
+
neighbor_vector[1] = coords_neighbor[1] - coords_reference[1]
|
|
133
|
+
|
|
134
|
+
if (
|
|
135
|
+
neighbor_vector[0] == neighbor_vector[0]
|
|
136
|
+
and neighbor_vector[1] == neighbor_vector[1]
|
|
137
|
+
):
|
|
138
|
+
angle = np.arctan2(neighbor_vector[1], neighbor_vector[0])
|
|
139
|
+
relative_distance = np.sqrt(
|
|
140
|
+
neighbor_vector[0] ** 2 + neighbor_vector[1] ** 2
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
for z, cols in enumerate(centre_of_mass_columns):
|
|
144
|
+
|
|
145
|
+
mass_displacement_vector[z, 0] = (
|
|
146
|
+
coords_centre_of_mass[z][0] - coords_neighbor[0]
|
|
147
|
+
)
|
|
148
|
+
mass_displacement_vector[z, 1] = (
|
|
149
|
+
coords_centre_of_mass[z][1] - coords_neighbor[1]
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
dot_product_vector[z] = np.dot(
|
|
153
|
+
mass_displacement_vector[z], -neighbor_vector
|
|
154
|
+
)
|
|
155
|
+
cosine_dot_vector[z] = np.dot(
|
|
156
|
+
mass_displacement_vector[z], -neighbor_vector
|
|
157
|
+
) / (
|
|
158
|
+
np.linalg.norm(mass_displacement_vector[z])
|
|
159
|
+
* np.linalg.norm(-neighbor_vector)
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
relative_measurements.append(
|
|
163
|
+
{
|
|
164
|
+
"REFERENCE_ID": tid,
|
|
165
|
+
"NEIGHBOR_ID": nc,
|
|
166
|
+
"reference_population": reference_population,
|
|
167
|
+
"neighbor_population": neighbor_population,
|
|
168
|
+
"FRAME": t,
|
|
169
|
+
"distance": relative_distance,
|
|
170
|
+
"intersection": intersection,
|
|
171
|
+
"angle": angle * 180 / np.pi,
|
|
172
|
+
f"status_{neighborhood_description}": 1,
|
|
173
|
+
f"class_{neighborhood_description}": 0,
|
|
174
|
+
"reference_tracked": ref_tracked,
|
|
175
|
+
"neighbors_tracked": neigh_tracked,
|
|
176
|
+
}
|
|
177
|
+
)
|
|
178
|
+
for z, lbl in enumerate(centre_of_mass_labels):
|
|
179
|
+
relative_measurements[-1].update(
|
|
180
|
+
{
|
|
181
|
+
lbl
|
|
182
|
+
+ "_centre_of_mass_dot_product": dot_product_vector[z],
|
|
183
|
+
lbl
|
|
184
|
+
+ "_centre_of_mass_dot_cosine": cosine_dot_vector[z],
|
|
185
|
+
}
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
df_pairs = pd.DataFrame(relative_measurements)
|
|
189
|
+
|
|
190
|
+
return df_pairs
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def measure_pair_signals_at_position(
|
|
194
|
+
pos, neighborhood_protocol, velocity_kwargs={"window": 3, "mode": "bi"}
|
|
195
|
+
):
|
|
196
|
+
|
|
197
|
+
reference_population = neighborhood_protocol["reference"]
|
|
198
|
+
neighbor_population = neighborhood_protocol["neighbor"]
|
|
199
|
+
neighborhood_type = neighborhood_protocol["type"]
|
|
200
|
+
neighborhood_distance = neighborhood_protocol["distance"]
|
|
201
|
+
neighborhood_description = neighborhood_protocol["description"]
|
|
202
|
+
|
|
203
|
+
relative_measurements = []
|
|
204
|
+
|
|
205
|
+
tab_ref = pos + os.sep.join(
|
|
206
|
+
["output", "tables", f"trajectories_{reference_population}.pkl"]
|
|
207
|
+
)
|
|
208
|
+
if os.path.exists(tab_ref):
|
|
209
|
+
df_reference = np.load(tab_ref, allow_pickle=True)
|
|
210
|
+
else:
|
|
211
|
+
df_reference = None
|
|
212
|
+
|
|
213
|
+
if os.path.exists(tab_ref.replace(reference_population, neighbor_population)):
|
|
214
|
+
df_neighbor = np.load(
|
|
215
|
+
tab_ref.replace(reference_population, neighbor_population),
|
|
216
|
+
allow_pickle=True,
|
|
217
|
+
)
|
|
218
|
+
else:
|
|
219
|
+
if os.path.exists(
|
|
220
|
+
tab_ref.replace(reference_population, neighbor_population).replace(
|
|
221
|
+
".pkl", ".csv"
|
|
222
|
+
)
|
|
223
|
+
):
|
|
224
|
+
df_neighbor = pd.read_csv(
|
|
225
|
+
tab_ref.replace(reference_population, neighbor_population).replace(
|
|
226
|
+
".pkl", ".csv"
|
|
227
|
+
)
|
|
228
|
+
)
|
|
229
|
+
else:
|
|
230
|
+
df_neighbor = None
|
|
231
|
+
|
|
232
|
+
if df_reference is None:
|
|
233
|
+
return None
|
|
234
|
+
|
|
235
|
+
assert str(neighborhood_description) in list(df_reference.columns)
|
|
236
|
+
neighborhood = df_reference.loc[:, f"{neighborhood_description}"].to_numpy()
|
|
237
|
+
|
|
238
|
+
ref_id_col = extract_identity_col(df_reference)
|
|
239
|
+
if ref_id_col is not None:
|
|
240
|
+
df_reference = df_reference.sort_values(by=[ref_id_col, "FRAME"])
|
|
241
|
+
|
|
242
|
+
ref_tracked = False
|
|
243
|
+
if ref_id_col == "TRACK_ID":
|
|
244
|
+
compute_velocity = True
|
|
245
|
+
ref_tracked = True
|
|
246
|
+
elif ref_id_col == "ID":
|
|
247
|
+
df_pairs = measure_pairs(pos, neighborhood_protocol)
|
|
248
|
+
return df_pairs
|
|
249
|
+
else:
|
|
250
|
+
print("ID or TRACK ID column could not be found in neighbor table. Abort.")
|
|
251
|
+
return None
|
|
252
|
+
|
|
253
|
+
print(f"Measuring pair signals...")
|
|
254
|
+
|
|
255
|
+
neigh_id_col = extract_identity_col(df_neighbor)
|
|
256
|
+
neigh_tracked = False
|
|
257
|
+
if neigh_id_col == "TRACK_ID":
|
|
258
|
+
compute_velocity = True
|
|
259
|
+
neigh_tracked = True
|
|
260
|
+
elif neigh_id_col == "ID":
|
|
261
|
+
df_pairs = measure_pairs(pos, neighborhood_protocol)
|
|
262
|
+
return df_pairs
|
|
263
|
+
else:
|
|
264
|
+
print("ID or TRACK ID column could not be found in neighbor table. Abort.")
|
|
265
|
+
return None
|
|
266
|
+
|
|
267
|
+
try:
|
|
268
|
+
for tid, group in df_reference.groupby(ref_id_col):
|
|
269
|
+
|
|
270
|
+
neighbor_dicts = group.loc[:, f"{neighborhood_description}"].values
|
|
271
|
+
timeline_reference = group["FRAME"].to_numpy()
|
|
272
|
+
coords_reference = group[["POSITION_X", "POSITION_Y"]].to_numpy()
|
|
273
|
+
if "area" in list(group.columns):
|
|
274
|
+
ref_area = group["area"].to_numpy()
|
|
275
|
+
else:
|
|
276
|
+
ref_area = [np.nan] * len(coords_reference)
|
|
277
|
+
|
|
278
|
+
neighbor_ids = []
|
|
279
|
+
neighbor_ids_per_t = []
|
|
280
|
+
intersection_values = []
|
|
281
|
+
|
|
282
|
+
time_of_first_entrance_in_neighborhood = {}
|
|
283
|
+
t_departure = {}
|
|
284
|
+
|
|
285
|
+
for t in range(len(timeline_reference)):
|
|
286
|
+
|
|
287
|
+
neighbors_at_t = neighbor_dicts[t]
|
|
288
|
+
neighs_t = []
|
|
289
|
+
if (
|
|
290
|
+
isinstance(neighbors_at_t, float)
|
|
291
|
+
or neighbors_at_t != neighbors_at_t
|
|
292
|
+
):
|
|
293
|
+
pass
|
|
294
|
+
else:
|
|
295
|
+
for neigh in neighbors_at_t:
|
|
296
|
+
if neigh["id"] not in neighbor_ids:
|
|
297
|
+
time_of_first_entrance_in_neighborhood[neigh["id"]] = t
|
|
298
|
+
if "intersection" in neigh:
|
|
299
|
+
intersection_values.append(
|
|
300
|
+
{
|
|
301
|
+
"frame": t,
|
|
302
|
+
"neigh_id": neigh["id"],
|
|
303
|
+
"intersection": neigh["intersection"],
|
|
304
|
+
}
|
|
305
|
+
)
|
|
306
|
+
else:
|
|
307
|
+
intersection_values.append(
|
|
308
|
+
{
|
|
309
|
+
"frame": t,
|
|
310
|
+
"neigh_id": neigh["id"],
|
|
311
|
+
"intersection": np.nan,
|
|
312
|
+
}
|
|
313
|
+
)
|
|
314
|
+
neighbor_ids.append(neigh["id"])
|
|
315
|
+
neighs_t.append(neigh["id"])
|
|
316
|
+
neighbor_ids_per_t.append(neighs_t)
|
|
317
|
+
|
|
318
|
+
intersection_values = pd.DataFrame(intersection_values)
|
|
319
|
+
|
|
320
|
+
# print(neighbor_ids_per_t)
|
|
321
|
+
unique_neigh = list(np.unique(neighbor_ids))
|
|
322
|
+
print(
|
|
323
|
+
f"Reference cell {tid}: found {len(unique_neigh)} neighbour cells: {unique_neigh}..."
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
neighbor_properties = df_neighbor.loc[
|
|
327
|
+
df_neighbor[neigh_id_col].isin(unique_neigh)
|
|
328
|
+
]
|
|
329
|
+
|
|
330
|
+
for nc, group_neigh in neighbor_properties.groupby(neigh_id_col):
|
|
331
|
+
|
|
332
|
+
coords_neighbor = group_neigh[["POSITION_X", "POSITION_Y"]].to_numpy()
|
|
333
|
+
timeline_neighbor = group_neigh["FRAME"].to_numpy()
|
|
334
|
+
if "area" in list(group_neigh.columns):
|
|
335
|
+
neigh_area = group_neigh["area"].to_numpy()
|
|
336
|
+
else:
|
|
337
|
+
neigh_area = [np.nan] * len(timeline_neighbor)
|
|
338
|
+
|
|
339
|
+
# # Perform timeline matching to have same start-end points and no gaps
|
|
340
|
+
full_timeline, _, _ = timeline_matching(
|
|
341
|
+
timeline_reference, timeline_neighbor
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
neighbor_vector = np.zeros((len(full_timeline), 2))
|
|
345
|
+
neighbor_vector[:, :] = np.nan
|
|
346
|
+
|
|
347
|
+
intersection_vector = np.zeros((len(full_timeline)))
|
|
348
|
+
intersection_vector[:] = np.nan
|
|
349
|
+
|
|
350
|
+
centre_of_mass_columns = [
|
|
351
|
+
(c, c.replace("POSITION_X", "POSITION_Y"))
|
|
352
|
+
for c in list(neighbor_properties.columns)
|
|
353
|
+
if c.endswith("centre_of_mass_POSITION_X")
|
|
354
|
+
]
|
|
355
|
+
centre_of_mass_labels = [
|
|
356
|
+
c.replace("_centre_of_mass_POSITION_X", "")
|
|
357
|
+
for c in list(neighbor_properties.columns)
|
|
358
|
+
if c.endswith("centre_of_mass_POSITION_X")
|
|
359
|
+
]
|
|
360
|
+
|
|
361
|
+
mass_displacement_vector = np.zeros(
|
|
362
|
+
(len(centre_of_mass_columns), len(full_timeline), 2)
|
|
363
|
+
)
|
|
364
|
+
mass_displacement_vector[:, :, :] = np.nan
|
|
365
|
+
|
|
366
|
+
dot_product_vector = np.zeros(
|
|
367
|
+
(len(centre_of_mass_columns), len(full_timeline))
|
|
368
|
+
)
|
|
369
|
+
dot_product_vector[:, :] = np.nan
|
|
370
|
+
|
|
371
|
+
cosine_dot_vector = np.zeros(
|
|
372
|
+
(len(centre_of_mass_columns), len(full_timeline))
|
|
373
|
+
)
|
|
374
|
+
cosine_dot_vector[:, :] = np.nan
|
|
375
|
+
|
|
376
|
+
coords_centre_of_mass = []
|
|
377
|
+
for col in centre_of_mass_columns:
|
|
378
|
+
coords_centre_of_mass.append(
|
|
379
|
+
group_neigh[[col[0], col[1]]].to_numpy()
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
# Relative distance
|
|
383
|
+
for t in range(len(full_timeline)):
|
|
384
|
+
|
|
385
|
+
if (
|
|
386
|
+
t in timeline_reference and t in timeline_neighbor
|
|
387
|
+
): # meaning position exists on both sides
|
|
388
|
+
|
|
389
|
+
idx_reference = list(timeline_reference).index(
|
|
390
|
+
t
|
|
391
|
+
) # index_reference[list(full_timeline).index(t)]
|
|
392
|
+
idx_neighbor = list(timeline_neighbor).index(
|
|
393
|
+
t
|
|
394
|
+
) # index_neighbor[list(full_timeline).index(t)]
|
|
395
|
+
|
|
396
|
+
neighbor_vector[t, 0] = (
|
|
397
|
+
coords_neighbor[idx_neighbor, 0]
|
|
398
|
+
- coords_reference[idx_reference, 0]
|
|
399
|
+
)
|
|
400
|
+
neighbor_vector[t, 1] = (
|
|
401
|
+
coords_neighbor[idx_neighbor, 1]
|
|
402
|
+
- coords_reference[idx_reference, 1]
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
for z, cols in enumerate(centre_of_mass_columns):
|
|
406
|
+
|
|
407
|
+
mass_displacement_vector[z, t, 0] = (
|
|
408
|
+
coords_centre_of_mass[z][idx_neighbor, 0]
|
|
409
|
+
- coords_neighbor[idx_neighbor, 0]
|
|
410
|
+
)
|
|
411
|
+
mass_displacement_vector[z, t, 1] = (
|
|
412
|
+
coords_centre_of_mass[z][idx_neighbor, 1]
|
|
413
|
+
- coords_neighbor[idx_neighbor, 1]
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
dot_product_vector[z, t] = np.dot(
|
|
417
|
+
mass_displacement_vector[z, t], -neighbor_vector[t]
|
|
418
|
+
)
|
|
419
|
+
cosine_dot_vector[z, t] = np.dot(
|
|
420
|
+
mass_displacement_vector[z, t], -neighbor_vector[t]
|
|
421
|
+
) / (
|
|
422
|
+
np.linalg.norm(mass_displacement_vector[z, t])
|
|
423
|
+
* np.linalg.norm(-neighbor_vector[t])
|
|
424
|
+
)
|
|
425
|
+
if tid == 44.0 and nc == 173.0:
|
|
426
|
+
print(
|
|
427
|
+
f"{centre_of_mass_columns[z]=} {mass_displacement_vector[z,t]=} {-neighbor_vector[t]=} {dot_product_vector[z,t]=} {cosine_dot_vector[z,t]=}"
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
angle = np.zeros(len(full_timeline))
|
|
431
|
+
angle[:] = np.nan
|
|
432
|
+
|
|
433
|
+
exclude = neighbor_vector[:, 1] != neighbor_vector[:, 1]
|
|
434
|
+
angle[~exclude] = np.arctan2(
|
|
435
|
+
neighbor_vector[:, 1][~exclude], neighbor_vector[:, 0][~exclude]
|
|
436
|
+
)
|
|
437
|
+
# print(f'Angle before unwrap: {angle}')
|
|
438
|
+
angle[~exclude] = np.unwrap(angle[~exclude])
|
|
439
|
+
# print(f'Angle after unwrap: {angle}')
|
|
440
|
+
relative_distance = np.sqrt(
|
|
441
|
+
neighbor_vector[:, 0] ** 2 + neighbor_vector[:, 1] ** 2
|
|
442
|
+
)
|
|
443
|
+
# print(f'Timeline: {full_timeline}; Distance: {relative_distance}')
|
|
444
|
+
|
|
445
|
+
if compute_velocity:
|
|
446
|
+
rel_velocity = derivative(
|
|
447
|
+
relative_distance, full_timeline, **velocity_kwargs
|
|
448
|
+
)
|
|
449
|
+
rel_velocity_long_timescale = derivative(
|
|
450
|
+
relative_distance, full_timeline, window=7, mode="bi"
|
|
451
|
+
)
|
|
452
|
+
# rel_velocity = np.insert(rel_velocity, 0, np.nan)[:-1]
|
|
453
|
+
|
|
454
|
+
angular_velocity = np.zeros(len(full_timeline))
|
|
455
|
+
angular_velocity[:] = np.nan
|
|
456
|
+
angular_velocity_long_timescale = np.zeros(len(full_timeline))
|
|
457
|
+
angular_velocity_long_timescale[:] = np.nan
|
|
458
|
+
|
|
459
|
+
angular_velocity[~exclude] = derivative(
|
|
460
|
+
angle[~exclude], full_timeline[~exclude], **velocity_kwargs
|
|
461
|
+
)
|
|
462
|
+
angular_velocity_long_timescale[~exclude] = derivative(
|
|
463
|
+
angle[~exclude], full_timeline[~exclude], window=7, mode="bi"
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
# angular_velocity = np.zeros(len(full_timeline))
|
|
467
|
+
# angular_velocity[:] = np.nan
|
|
468
|
+
|
|
469
|
+
# for t in range(1, len(relative_angle1)):
|
|
470
|
+
# if not np.isnan(relative_angle1[t]) and not np.isnan(relative_angle1[t - 1]):
|
|
471
|
+
# delta_angle = relative_angle1[t] - relative_angle1[t - 1]
|
|
472
|
+
# delta_time = full_timeline[t] - full_timeline[t - 1]
|
|
473
|
+
# if delta_time != 0:
|
|
474
|
+
# angular_velocity[t] = delta_angle / delta_time
|
|
475
|
+
|
|
476
|
+
duration_in_neigh = list(neighbor_ids).count(nc)
|
|
477
|
+
# print(nc, duration_in_neigh, ' frames')
|
|
478
|
+
|
|
479
|
+
cum_sum = 0
|
|
480
|
+
for t in range(len(full_timeline)):
|
|
481
|
+
|
|
482
|
+
if (
|
|
483
|
+
t in timeline_reference and t in timeline_neighbor
|
|
484
|
+
): # meaning position exists on both sides
|
|
485
|
+
|
|
486
|
+
idx_reference = list(timeline_reference).index(t)
|
|
487
|
+
idx_neighbor = list(timeline_neighbor).index(t)
|
|
488
|
+
inter = intersection_values.loc[
|
|
489
|
+
(intersection_values["neigh_id"] == nc)
|
|
490
|
+
& (intersection_values["frame"] == t),
|
|
491
|
+
"intersection",
|
|
492
|
+
].values
|
|
493
|
+
if len(inter) == 0:
|
|
494
|
+
inter = np.nan
|
|
495
|
+
else:
|
|
496
|
+
inter = inter[0]
|
|
497
|
+
|
|
498
|
+
neigh_inter_fraction = np.nan
|
|
499
|
+
if (
|
|
500
|
+
inter == inter
|
|
501
|
+
and neigh_area[idx_neighbor] == neigh_area[idx_neighbor]
|
|
502
|
+
):
|
|
503
|
+
neigh_inter_fraction = inter / neigh_area[idx_neighbor]
|
|
504
|
+
|
|
505
|
+
ref_inter_fraction = np.nan
|
|
506
|
+
if (
|
|
507
|
+
inter == inter
|
|
508
|
+
and ref_area[idx_reference] == ref_area[idx_reference]
|
|
509
|
+
):
|
|
510
|
+
ref_inter_fraction = inter / ref_area[idx_reference]
|
|
511
|
+
|
|
512
|
+
if nc in neighbor_ids_per_t[idx_reference]:
|
|
513
|
+
|
|
514
|
+
cum_sum += 1
|
|
515
|
+
relative_measurements.append(
|
|
516
|
+
{
|
|
517
|
+
"REFERENCE_ID": tid,
|
|
518
|
+
"NEIGHBOR_ID": nc,
|
|
519
|
+
"reference_population": reference_population,
|
|
520
|
+
"neighbor_population": neighbor_population,
|
|
521
|
+
"FRAME": t,
|
|
522
|
+
"distance": relative_distance[t],
|
|
523
|
+
"intersection": inter,
|
|
524
|
+
"reference_frac_area_intersection": ref_inter_fraction,
|
|
525
|
+
"neighbor_frac_area_intersection": neigh_inter_fraction,
|
|
526
|
+
"velocity": rel_velocity[t],
|
|
527
|
+
"velocity_smooth": rel_velocity_long_timescale[t],
|
|
528
|
+
"angle": angle[t] * 180 / np.pi,
|
|
529
|
+
#'angle-neigh-ref': angle[t] * 180 / np.pi,
|
|
530
|
+
"angular_velocity": angular_velocity[t],
|
|
531
|
+
"angular_velocity_smooth": angular_velocity_long_timescale[
|
|
532
|
+
t
|
|
533
|
+
],
|
|
534
|
+
f"status_{neighborhood_description}": 1,
|
|
535
|
+
f"residence_time_in_{neighborhood_description}": cum_sum,
|
|
536
|
+
f"class_{neighborhood_description}": 0,
|
|
537
|
+
f"t0_{neighborhood_description}": time_of_first_entrance_in_neighborhood[
|
|
538
|
+
nc
|
|
539
|
+
],
|
|
540
|
+
"reference_tracked": ref_tracked,
|
|
541
|
+
"neighbors_tracked": neigh_tracked,
|
|
542
|
+
}
|
|
543
|
+
)
|
|
544
|
+
for z, lbl in enumerate(centre_of_mass_labels):
|
|
545
|
+
relative_measurements[-1].update(
|
|
546
|
+
{
|
|
547
|
+
lbl
|
|
548
|
+
+ "_centre_of_mass_dot_product": dot_product_vector[
|
|
549
|
+
z, t
|
|
550
|
+
],
|
|
551
|
+
lbl
|
|
552
|
+
+ "_centre_of_mass_dot_cosine": cosine_dot_vector[
|
|
553
|
+
z, t
|
|
554
|
+
],
|
|
555
|
+
}
|
|
556
|
+
)
|
|
557
|
+
|
|
558
|
+
else:
|
|
559
|
+
relative_measurements.append(
|
|
560
|
+
{
|
|
561
|
+
"REFERENCE_ID": tid,
|
|
562
|
+
"NEIGHBOR_ID": nc,
|
|
563
|
+
"reference_population": reference_population,
|
|
564
|
+
"neighbor_population": neighbor_population,
|
|
565
|
+
"FRAME": t,
|
|
566
|
+
"distance": relative_distance[t],
|
|
567
|
+
"intersection": inter,
|
|
568
|
+
"reference_frac_area_intersection": ref_inter_fraction,
|
|
569
|
+
"neighbor_frac_area_intersection": neigh_inter_fraction,
|
|
570
|
+
"velocity": rel_velocity[t],
|
|
571
|
+
"velocity_smooth": rel_velocity_long_timescale[t],
|
|
572
|
+
"angle": angle[t] * 180 / np.pi,
|
|
573
|
+
#'angle-neigh-ref': angle[t] * 180 / np.pi,
|
|
574
|
+
"angular_velocity": angular_velocity[t],
|
|
575
|
+
"angular_velocity_smooth": angular_velocity_long_timescale[
|
|
576
|
+
t
|
|
577
|
+
],
|
|
578
|
+
f"status_{neighborhood_description}": 0,
|
|
579
|
+
f"residence_time_in_{neighborhood_description}": cum_sum,
|
|
580
|
+
f"class_{neighborhood_description}": 0,
|
|
581
|
+
f"t0_{neighborhood_description}": time_of_first_entrance_in_neighborhood[
|
|
582
|
+
nc
|
|
583
|
+
],
|
|
584
|
+
"reference_tracked": ref_tracked,
|
|
585
|
+
"neighbors_tracked": neigh_tracked,
|
|
586
|
+
}
|
|
587
|
+
)
|
|
588
|
+
for z, lbl in enumerate(centre_of_mass_labels):
|
|
589
|
+
relative_measurements[-1].update(
|
|
590
|
+
{
|
|
591
|
+
lbl
|
|
592
|
+
+ "_centre_of_mass_dot_product": dot_product_vector[
|
|
593
|
+
z, t
|
|
594
|
+
],
|
|
595
|
+
lbl
|
|
596
|
+
+ "_centre_of_mass_dot_cosine": cosine_dot_vector[
|
|
597
|
+
z, t
|
|
598
|
+
],
|
|
599
|
+
}
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
df_pairs = pd.DataFrame(relative_measurements)
|
|
603
|
+
|
|
604
|
+
return df_pairs
|
|
605
|
+
|
|
606
|
+
except KeyError:
|
|
607
|
+
print(
|
|
608
|
+
f"Neighborhood not found in data frame. Measurements for this neighborhood will not be calculated"
|
|
609
|
+
)
|
|
401
610
|
|
|
402
|
-
def timeline_matching(timeline1, timeline2):
|
|
403
611
|
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
612
|
+
def timeline_matching(timeline1, timeline2):
|
|
613
|
+
"""
|
|
614
|
+
Match two timelines and create a unified timeline with corresponding indices.
|
|
615
|
+
|
|
616
|
+
Parameters
|
|
617
|
+
----------
|
|
618
|
+
timeline1 : array-like
|
|
619
|
+
The first timeline to be matched.
|
|
620
|
+
timeline2 : array-like
|
|
621
|
+
The second timeline to be matched.
|
|
622
|
+
|
|
623
|
+
Returns
|
|
624
|
+
-------
|
|
625
|
+
tuple
|
|
626
|
+
A tuple containing:
|
|
627
|
+
|
|
628
|
+
- full_timeline : numpy.ndarray
|
|
629
|
+
The unified timeline spanning from the minimum to the maximum time point in the input timelines.
|
|
630
|
+
- index1 : list of int
|
|
631
|
+
The indices of `timeline1` in the `full_timeline`.
|
|
632
|
+
- index2 : list of int
|
|
633
|
+
The indices of `timeline2` in the `full_timeline`.
|
|
634
|
+
|
|
635
|
+
Examples
|
|
636
|
+
--------
|
|
637
|
+
>>> timeline1 = [1, 2, 5, 6]
|
|
638
|
+
>>> timeline2 = [2, 3, 4, 6]
|
|
639
|
+
>>> full_timeline, index1, index2 = timeline_matching(timeline1, timeline2)
|
|
640
|
+
>>> print(full_timeline)
|
|
641
|
+
[1 2 3 4 5 6]
|
|
642
|
+
>>> print(index1)
|
|
643
|
+
[0, 1, 4, 5]
|
|
644
|
+
>>> print(index2)
|
|
645
|
+
[1, 2, 3, 5]
|
|
646
|
+
|
|
647
|
+
Notes
|
|
648
|
+
-----
|
|
649
|
+
- The function combines the two timelines and generates a continuous range from the minimum to the maximum time point.
|
|
650
|
+
- It then finds the indices of the original timelines in this unified timeline.
|
|
651
|
+
- The function assumes that the input timelines consist of integer values.
|
|
652
|
+
|
|
653
|
+
"""
|
|
654
|
+
|
|
655
|
+
min_t = np.amin(np.concatenate((timeline1, timeline2)))
|
|
656
|
+
max_t = np.amax(np.concatenate((timeline1, timeline2)))
|
|
657
|
+
full_timeline = np.arange(min_t, max_t + 1)
|
|
658
|
+
|
|
659
|
+
index1 = [list(np.where(full_timeline == int(t))[0])[0] for t in timeline1]
|
|
660
|
+
index2 = [list(np.where(full_timeline == int(t))[0])[0] for t in timeline2]
|
|
661
|
+
|
|
662
|
+
return full_timeline, index1, index2
|
|
454
663
|
|
|
455
664
|
|
|
456
665
|
def rel_measure_at_position(pos):
|
|
457
666
|
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
667
|
+
pos = pos.replace("\\", "/")
|
|
668
|
+
pos = rf"{pos}"
|
|
669
|
+
assert os.path.exists(pos), f"Position {pos} is not a valid path."
|
|
670
|
+
if not pos.endswith("/"):
|
|
671
|
+
pos += "/"
|
|
672
|
+
script_path = os.sep.join([abs_path, "scripts", "measure_relative.py"])
|
|
673
|
+
cmd = f'python "{script_path}" --pos "{pos}"'
|
|
674
|
+
subprocess.call(cmd, shell=True)
|
|
466
675
|
|
|
467
676
|
|
|
468
677
|
# def mcf7_size_model(x,x0,x2):
|
|
@@ -520,265 +729,333 @@ def rel_measure_at_position(pos):
|
|
|
520
729
|
# probs.append(group)
|
|
521
730
|
# return probs
|
|
522
731
|
|
|
732
|
+
|
|
523
733
|
def update_effector_table(df_relative, df_effector):
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
734
|
+
df_effector["group_neighborhood"] = 1
|
|
735
|
+
effectors = np.unique(df_relative["EFFECTOR_ID"].to_numpy())
|
|
736
|
+
for effector in effectors:
|
|
737
|
+
try:
|
|
738
|
+
# Set group_neighborhood to 0 where TRACK_ID matches effector
|
|
739
|
+
df_effector.loc[
|
|
740
|
+
df_effector["TRACK_ID"] == effector, "group_neighborhood"
|
|
741
|
+
] = 0
|
|
742
|
+
except:
|
|
743
|
+
df_effector.loc[df_effector["ID"] == effector, "group_neighborhood"] = 0
|
|
744
|
+
return df_effector
|
|
745
|
+
|
|
746
|
+
|
|
747
|
+
def extract_neighborhoods_from_pickles(pos, populations=["targets", "effectors"]):
|
|
748
|
+
"""
|
|
749
|
+
Extract neighborhood protocols from pickle files located at a given position.
|
|
750
|
+
|
|
751
|
+
Parameters
|
|
752
|
+
----------
|
|
753
|
+
pos : str
|
|
754
|
+
The base directory path where the pickle files are located.
|
|
755
|
+
|
|
756
|
+
Returns
|
|
757
|
+
-------
|
|
758
|
+
list of dict
|
|
759
|
+
A list of dictionaries, each containing a neighborhood protocol. Each dictionary has the keys:
|
|
760
|
+
|
|
761
|
+
- 'reference' : str
|
|
762
|
+
The reference population ('targets' or 'effectors').
|
|
763
|
+
- 'neighbor' : str
|
|
764
|
+
The neighbor population.
|
|
765
|
+
- 'type' : str
|
|
766
|
+
The type of neighborhood ('circle' or 'contact').
|
|
767
|
+
- 'distance' : float
|
|
768
|
+
The distance parameter for the neighborhood.
|
|
769
|
+
- 'description' : str
|
|
770
|
+
The original neighborhood string.
|
|
771
|
+
|
|
772
|
+
Notes
|
|
773
|
+
-----
|
|
774
|
+
- The function checks for the existence of pickle files containing target and effector trajectory data.
|
|
775
|
+
- If the files exist, it loads the data and extracts columns that start with 'neighborhood'.
|
|
776
|
+
- The neighborhood settings are extracted using the `extract_neighborhood_settings` function.
|
|
777
|
+
- The function assumes the presence of subdirectories 'output/tables' under the provided `pos`.
|
|
778
|
+
|
|
779
|
+
Examples
|
|
780
|
+
--------
|
|
781
|
+
>>> protocols = extract_neighborhoods_from_pickles('/path/to/data')
|
|
782
|
+
>>> for protocol in protocols:
|
|
783
|
+
>>> print(protocol)
|
|
784
|
+
{'reference': 'targets', 'neighbor': 'targets', 'type': 'contact', 'distance': 5.0, 'description': 'neighborhood_self_contact_5_px'}
|
|
785
|
+
|
|
786
|
+
"""
|
|
787
|
+
|
|
788
|
+
neighborhood_protocols = []
|
|
789
|
+
|
|
790
|
+
for pop in populations:
|
|
791
|
+
tab_pop = pos + os.sep.join(["output", "tables", f"trajectories_{pop}.pkl"])
|
|
792
|
+
if os.path.exists(tab_pop):
|
|
793
|
+
df_pop = np.load(tab_pop, allow_pickle=True)
|
|
794
|
+
for column in list(df_pop.columns):
|
|
795
|
+
if column.startswith("neighborhood"):
|
|
796
|
+
neigh_protocol = extract_neighborhood_settings(
|
|
797
|
+
column, population=pop
|
|
798
|
+
)
|
|
799
|
+
neighborhood_protocols.append(neigh_protocol)
|
|
800
|
+
|
|
801
|
+
# tab_tc = pos + os.sep.join(['output', 'tables', 'trajectories_targets.pkl'])
|
|
802
|
+
# if os.path.exists(tab_tc):
|
|
803
|
+
# df_targets = np.load(tab_tc, allow_pickle=True)
|
|
804
|
+
# else:
|
|
805
|
+
# df_targets = None
|
|
806
|
+
# if os.path.exists(tab_tc.replace('targets','effectors')):
|
|
807
|
+
# df_effectors = np.load(tab_tc.replace('targets','effectors'), allow_pickle=True)
|
|
808
|
+
# else:
|
|
809
|
+
# df_effectors = None
|
|
810
|
+
|
|
811
|
+
# neighborhood_protocols=[]
|
|
812
|
+
|
|
813
|
+
# if df_targets is not None:
|
|
814
|
+
# for column in list(df_targets.columns):
|
|
815
|
+
# if column.startswith('neighborhood'):
|
|
816
|
+
# neigh_protocol = extract_neighborhood_settings(column, population='targets')
|
|
817
|
+
# neighborhood_protocols.append(neigh_protocol)
|
|
818
|
+
|
|
819
|
+
# if df_effectors is not None:
|
|
820
|
+
# for column in list(df_effectors.columns):
|
|
821
|
+
# if column.startswith('neighborhood'):
|
|
822
|
+
# neigh_protocol = extract_neighborhood_settings(column, population='effectors')
|
|
823
|
+
# neighborhood_protocols.append(neigh_protocol)
|
|
824
|
+
|
|
825
|
+
return neighborhood_protocols
|
|
826
|
+
|
|
827
|
+
|
|
828
|
+
def extract_neighborhood_settings(neigh_string, population="targets"):
|
|
829
|
+
"""
|
|
830
|
+
Extract neighborhood settings from a given string.
|
|
831
|
+
|
|
832
|
+
Parameters
|
|
833
|
+
----------
|
|
834
|
+
neigh_string : str
|
|
835
|
+
The string describing the neighborhood settings. Must start with 'neighborhood'.
|
|
836
|
+
population : str, optional
|
|
837
|
+
The population type ('targets' by default). Can be either 'targets' or 'effectors'.
|
|
838
|
+
|
|
839
|
+
Returns
|
|
840
|
+
-------
|
|
841
|
+
dict
|
|
842
|
+
A dictionary containing the neighborhood protocol with keys:
|
|
843
|
+
|
|
844
|
+
- 'reference' : str
|
|
845
|
+
The reference population.
|
|
846
|
+
- 'neighbor' : str
|
|
847
|
+
The neighbor population.
|
|
848
|
+
- 'type' : str
|
|
849
|
+
The type of neighborhood ('circle' or 'contact').
|
|
850
|
+
- 'distance' : float
|
|
851
|
+
The distance parameter for the neighborhood.
|
|
852
|
+
- 'description' : str
|
|
853
|
+
The original neighborhood string.
|
|
854
|
+
|
|
855
|
+
Raises
|
|
856
|
+
------
|
|
857
|
+
AssertionError
|
|
858
|
+
If the `neigh_string` does not start with 'neighborhood'.
|
|
859
|
+
|
|
860
|
+
Notes
|
|
861
|
+
-----
|
|
862
|
+
- The function determines the neighbor population based on the given population.
|
|
863
|
+
- The neighborhood type and distance are extracted from the `neigh_string`.
|
|
864
|
+
- The description field in the returned dictionary contains the original neighborhood string.
|
|
865
|
+
|
|
866
|
+
Examples
|
|
867
|
+
--------
|
|
868
|
+
>>> extract_neighborhood_settings('neighborhood_self_contact_5_px', 'targets')
|
|
869
|
+
{'reference': 'targets', 'neighbor': 'targets', 'type': 'contact', 'distance': 5.0, 'description': 'neighborhood_self_contact_5_px'}
|
|
870
|
+
|
|
871
|
+
"""
|
|
872
|
+
|
|
873
|
+
assert neigh_string.startswith("neighborhood")
|
|
874
|
+
print(f"{neigh_string=}")
|
|
875
|
+
|
|
876
|
+
if "_(" in neigh_string and ")_" in neigh_string:
|
|
877
|
+
# determine neigh pop from string
|
|
878
|
+
neighbor_population = neigh_string.split("_(")[-1].split(")_")[0].split("-")[-1]
|
|
879
|
+
print(f"{neighbor_population=}")
|
|
880
|
+
else:
|
|
881
|
+
# old method
|
|
882
|
+
if population == "targets":
|
|
883
|
+
neighbor_population = "effectors"
|
|
884
|
+
elif population == "effectors":
|
|
885
|
+
neighbor_population = "targets"
|
|
886
|
+
|
|
887
|
+
if "self" in neigh_string:
|
|
888
|
+
|
|
889
|
+
if "circle" in neigh_string:
|
|
890
|
+
|
|
891
|
+
distance = float(neigh_string.split("circle_")[1].replace("_px", ""))
|
|
892
|
+
neigh_protocol = {
|
|
893
|
+
"reference": population,
|
|
894
|
+
"neighbor": population,
|
|
895
|
+
"type": "circle",
|
|
896
|
+
"distance": distance,
|
|
897
|
+
"description": neigh_string,
|
|
898
|
+
}
|
|
899
|
+
elif "contact" in neigh_string:
|
|
900
|
+
distance = float(neigh_string.split("contact_")[1].replace("_px", ""))
|
|
901
|
+
neigh_protocol = {
|
|
902
|
+
"reference": population,
|
|
903
|
+
"neighbor": population,
|
|
904
|
+
"type": "contact",
|
|
905
|
+
"distance": distance,
|
|
906
|
+
"description": neigh_string,
|
|
907
|
+
}
|
|
908
|
+
else:
|
|
909
|
+
|
|
910
|
+
if "circle" in neigh_string:
|
|
911
|
+
|
|
912
|
+
distance = float(neigh_string.split("circle_")[1].replace("_px", ""))
|
|
913
|
+
neigh_protocol = {
|
|
914
|
+
"reference": population,
|
|
915
|
+
"neighbor": neighbor_population,
|
|
916
|
+
"type": "circle",
|
|
917
|
+
"distance": distance,
|
|
918
|
+
"description": neigh_string,
|
|
919
|
+
}
|
|
920
|
+
elif "contact" in neigh_string:
|
|
921
|
+
|
|
922
|
+
distance = float(neigh_string.split("contact_")[1].replace("_px", ""))
|
|
923
|
+
neigh_protocol = {
|
|
924
|
+
"reference": population,
|
|
925
|
+
"neighbor": neighbor_population,
|
|
926
|
+
"type": "contact",
|
|
927
|
+
"distance": distance,
|
|
928
|
+
"description": neigh_string,
|
|
929
|
+
}
|
|
930
|
+
|
|
931
|
+
return neigh_protocol
|
|
694
932
|
|
|
695
933
|
|
|
696
934
|
def expand_pair_table(data):
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
935
|
+
"""
|
|
936
|
+
Expands a pair table by merging reference and neighbor trajectory data from CSV files based on the specified
|
|
937
|
+
reference and neighbor populations, and their associated positions and frames.
|
|
938
|
+
|
|
939
|
+
Parameters
|
|
940
|
+
----------
|
|
941
|
+
data : pandas.DataFrame
|
|
942
|
+
DataFrame containing the pair table
|
|
943
|
+
|
|
944
|
+
Returns
|
|
945
|
+
-------
|
|
946
|
+
pandas.DataFrame
|
|
947
|
+
Expanded DataFrame that includes merged reference and neighbor data, sorted by position, reference population,
|
|
948
|
+
neighbor population, and frame. Rows without values in `REFERENCE_ID`, `NEIGHBOR_ID`, `reference_population`,
|
|
949
|
+
or `neighbor_population` are dropped.
|
|
950
|
+
|
|
951
|
+
Notes
|
|
952
|
+
-----
|
|
953
|
+
- For each unique pair of `reference_population` and `neighbor_population`, the function identifies corresponding
|
|
954
|
+
trajectories CSV files based on the position identifier.
|
|
955
|
+
- The function reads the trajectories CSV files, prefixes columns with `reference_` or `neighbor_` to avoid
|
|
956
|
+
conflicts, and merges data from reference and neighbor tables based on `TRACK_ID` or `ID`, and `FRAME`.
|
|
957
|
+
- Merges are performed in an outer join manner to retain all rows, regardless of missing values in the target files.
|
|
958
|
+
- The final DataFrame is sorted and cleaned to ensure only valid pairings are included.
|
|
959
|
+
|
|
960
|
+
Example
|
|
961
|
+
-------
|
|
962
|
+
>>> expanded_df = expand_pair_table(pair_table)
|
|
963
|
+
>>> expanded_df.head()
|
|
964
|
+
|
|
965
|
+
Raises
|
|
966
|
+
------
|
|
967
|
+
AssertionError
|
|
968
|
+
If `reference_population` or `neighbor_population` is not found in the columns of `data`.
|
|
969
|
+
|
|
970
|
+
"""
|
|
971
|
+
|
|
972
|
+
assert "reference_population" in list(
|
|
973
|
+
data.columns
|
|
974
|
+
), "Please provide a valid pair table..."
|
|
975
|
+
assert "neighbor_population" in list(
|
|
976
|
+
data.columns
|
|
977
|
+
), "Please provide a valid pair table..."
|
|
978
|
+
|
|
979
|
+
data.__dict__.update(
|
|
980
|
+
data.astype({"reference_population": str, "neighbor_population": str}).__dict__
|
|
981
|
+
)
|
|
982
|
+
|
|
983
|
+
expanded_table = []
|
|
984
|
+
|
|
985
|
+
for neigh, group in data.groupby(["reference_population", "neighbor_population"]):
|
|
986
|
+
|
|
987
|
+
ref_pop = neigh[0]
|
|
988
|
+
neigh_pop = neigh[1]
|
|
989
|
+
|
|
990
|
+
for pos, pos_group in group.groupby("position"):
|
|
991
|
+
|
|
992
|
+
ref_tab = os.sep.join(
|
|
993
|
+
[pos, "output", "tables", f"trajectories_{ref_pop}.csv"]
|
|
994
|
+
)
|
|
995
|
+
neigh_tab = os.sep.join(
|
|
996
|
+
[pos, "output", "tables", f"trajectories_{neigh_pop}.csv"]
|
|
997
|
+
)
|
|
998
|
+
|
|
999
|
+
if os.path.exists(ref_tab):
|
|
1000
|
+
df_ref = pd.read_csv(ref_tab)
|
|
1001
|
+
if "TRACK_ID" in df_ref.columns:
|
|
1002
|
+
if not np.all(df_ref["TRACK_ID"].isnull()):
|
|
1003
|
+
ref_merge_cols = ["TRACK_ID", "FRAME"]
|
|
1004
|
+
else:
|
|
1005
|
+
ref_merge_cols = ["ID", "FRAME"]
|
|
1006
|
+
else:
|
|
1007
|
+
ref_merge_cols = ["ID", "FRAME"]
|
|
1008
|
+
|
|
1009
|
+
if os.path.exists(neigh_tab):
|
|
1010
|
+
df_neigh = pd.read_csv(neigh_tab)
|
|
1011
|
+
if "TRACK_ID" in df_neigh.columns:
|
|
1012
|
+
if not np.all(df_neigh["TRACK_ID"].isnull()):
|
|
1013
|
+
neigh_merge_cols = ["TRACK_ID", "FRAME"]
|
|
1014
|
+
else:
|
|
1015
|
+
neigh_merge_cols = ["ID", "FRAME"]
|
|
1016
|
+
else:
|
|
1017
|
+
neigh_merge_cols = ["ID", "FRAME"]
|
|
1018
|
+
|
|
1019
|
+
df_ref = df_ref.add_prefix("reference_", axis=1)
|
|
1020
|
+
df_neigh = df_neigh.add_prefix("neighbor_", axis=1)
|
|
1021
|
+
ref_merge_cols = ["reference_" + c for c in ref_merge_cols]
|
|
1022
|
+
neigh_merge_cols = ["neighbor_" + c for c in neigh_merge_cols]
|
|
1023
|
+
|
|
1024
|
+
merge_ref = pos_group.merge(
|
|
1025
|
+
df_ref,
|
|
1026
|
+
how="outer",
|
|
1027
|
+
left_on=["REFERENCE_ID", "FRAME"],
|
|
1028
|
+
right_on=ref_merge_cols,
|
|
1029
|
+
suffixes=("", "_reference"),
|
|
1030
|
+
)
|
|
1031
|
+
merge_neigh = merge_ref.merge(
|
|
1032
|
+
df_neigh,
|
|
1033
|
+
how="outer",
|
|
1034
|
+
left_on=["NEIGHBOR_ID", "FRAME"],
|
|
1035
|
+
right_on=neigh_merge_cols,
|
|
1036
|
+
suffixes=("_reference", "_neighbor"),
|
|
1037
|
+
)
|
|
1038
|
+
expanded_table.append(merge_neigh)
|
|
1039
|
+
|
|
1040
|
+
df_expanded = pd.concat(expanded_table, axis=0, ignore_index=True)
|
|
1041
|
+
df_expanded = df_expanded.sort_values(
|
|
1042
|
+
by=[
|
|
1043
|
+
"position",
|
|
1044
|
+
"reference_population",
|
|
1045
|
+
"neighbor_population",
|
|
1046
|
+
"REFERENCE_ID",
|
|
1047
|
+
"NEIGHBOR_ID",
|
|
1048
|
+
"FRAME",
|
|
1049
|
+
]
|
|
1050
|
+
)
|
|
1051
|
+
df_expanded = df_expanded.dropna(
|
|
1052
|
+
axis=0,
|
|
1053
|
+
subset=[
|
|
1054
|
+
"REFERENCE_ID",
|
|
1055
|
+
"NEIGHBOR_ID",
|
|
1056
|
+
"reference_population",
|
|
1057
|
+
"neighbor_population",
|
|
1058
|
+
],
|
|
1059
|
+
)
|
|
1060
|
+
|
|
1061
|
+
return df_expanded
|