celldetective 1.3.9.post5__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celldetective/__init__.py +0 -3
- celldetective/_version.py +1 -1
- celldetective/events.py +2 -4
- celldetective/exceptions.py +11 -0
- celldetective/extra_properties.py +132 -0
- celldetective/filters.py +7 -1
- celldetective/gui/InitWindow.py +37 -46
- celldetective/gui/__init__.py +3 -9
- celldetective/gui/about.py +19 -15
- celldetective/gui/analyze_block.py +34 -19
- celldetective/gui/base_annotator.py +786 -0
- celldetective/gui/base_components.py +23 -0
- celldetective/gui/classifier_widget.py +86 -94
- celldetective/gui/configure_new_exp.py +163 -46
- celldetective/gui/control_panel.py +76 -146
- celldetective/gui/{signal_annotator.py → event_annotator.py} +533 -1438
- celldetective/gui/generic_signal_plot.py +11 -13
- celldetective/gui/gui_utils.py +54 -23
- celldetective/gui/help/neighborhood.json +2 -2
- celldetective/gui/json_readers.py +5 -4
- celldetective/gui/layouts.py +265 -31
- celldetective/gui/{signal_annotator2.py → pair_event_annotator.py} +433 -635
- celldetective/gui/plot_measurements.py +21 -17
- celldetective/gui/plot_signals_ui.py +125 -72
- celldetective/gui/process_block.py +283 -188
- celldetective/gui/processes/compute_neighborhood.py +594 -0
- celldetective/gui/processes/downloader.py +37 -34
- celldetective/gui/processes/measure_cells.py +19 -8
- celldetective/gui/processes/segment_cells.py +47 -11
- celldetective/gui/processes/track_cells.py +18 -13
- celldetective/gui/seg_model_loader.py +21 -62
- celldetective/gui/settings/__init__.py +7 -0
- celldetective/gui/settings/_settings_base.py +70 -0
- celldetective/gui/{retrain_signal_model_options.py → settings/_settings_event_model_training.py} +54 -109
- celldetective/gui/{measurement_options.py → settings/_settings_measurements.py} +54 -92
- celldetective/gui/{neighborhood_options.py → settings/_settings_neighborhood.py} +10 -13
- celldetective/gui/settings/_settings_segmentation.py +49 -0
- celldetective/gui/{retrain_segmentation_model_options.py → settings/_settings_segmentation_model_training.py} +38 -92
- celldetective/gui/{signal_annotator_options.py → settings/_settings_signal_annotator.py} +78 -103
- celldetective/gui/{btrack_options.py → settings/_settings_tracking.py} +85 -116
- celldetective/gui/styles.py +2 -1
- celldetective/gui/survival_ui.py +49 -95
- celldetective/gui/tableUI.py +53 -25
- celldetective/gui/table_ops/__init__.py +0 -0
- celldetective/gui/table_ops/merge_groups.py +118 -0
- celldetective/gui/thresholds_gui.py +617 -1221
- celldetective/gui/viewers.py +107 -42
- celldetective/gui/workers.py +8 -4
- celldetective/io.py +137 -57
- celldetective/links/zenodo.json +145 -144
- celldetective/measure.py +94 -53
- celldetective/neighborhood.py +342 -268
- celldetective/preprocessing.py +56 -35
- celldetective/regionprops/_regionprops.py +16 -5
- celldetective/relative_measurements.py +50 -29
- celldetective/scripts/analyze_signals.py +4 -1
- celldetective/scripts/measure_cells.py +5 -5
- celldetective/scripts/measure_relative.py +20 -12
- celldetective/scripts/segment_cells.py +4 -10
- celldetective/scripts/segment_cells_thresholds.py +3 -3
- celldetective/scripts/track_cells.py +10 -8
- celldetective/scripts/train_segmentation_model.py +18 -6
- celldetective/signals.py +29 -14
- celldetective/tracking.py +14 -3
- celldetective/utils.py +91 -62
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/METADATA +24 -16
- celldetective-1.4.1.dist-info/RECORD +123 -0
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/WHEEL +1 -1
- tests/gui/__init__.py +0 -0
- tests/gui/test_new_project.py +228 -0
- tests/gui/test_project.py +99 -0
- tests/test_preprocessing.py +2 -2
- celldetective/models/segmentation_effectors/ricm_bf_all_last/config_input.json +0 -79
- celldetective/models/segmentation_effectors/ricm_bf_all_last/ricm_bf_all_last +0 -0
- celldetective/models/segmentation_effectors/ricm_bf_all_last/training_instructions.json +0 -37
- celldetective/models/segmentation_effectors/test-transfer/config_input.json +0 -39
- celldetective/models/segmentation_effectors/test-transfer/test-transfer +0 -0
- celldetective/models/signal_detection/NucCond/classification_loss.png +0 -0
- celldetective/models/signal_detection/NucCond/classifier.h5 +0 -0
- celldetective/models/signal_detection/NucCond/config_input.json +0 -1
- celldetective/models/signal_detection/NucCond/log_classifier.csv +0 -126
- celldetective/models/signal_detection/NucCond/log_regressor.csv +0 -282
- celldetective/models/signal_detection/NucCond/regression_loss.png +0 -0
- celldetective/models/signal_detection/NucCond/regressor.h5 +0 -0
- celldetective/models/signal_detection/NucCond/scores.npy +0 -0
- celldetective/models/signal_detection/NucCond/test_confusion_matrix.png +0 -0
- celldetective/models/signal_detection/NucCond/test_regression.png +0 -0
- celldetective/models/signal_detection/NucCond/validation_confusion_matrix.png +0 -0
- celldetective/models/signal_detection/NucCond/validation_regression.png +0 -0
- celldetective-1.3.9.post5.dist-info/RECORD +0 -129
- tests/test_qt.py +0 -103
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/entry_points.txt +0 -0
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info/licenses}/LICENSE +0 -0
- {celldetective-1.3.9.post5.dist-info → celldetective-1.4.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,594 @@
|
|
|
1
|
+
from multiprocessing import Process
|
|
2
|
+
import time
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from celldetective.io import locate_labels, get_position_table, get_position_pickle
|
|
6
|
+
|
|
7
|
+
from tqdm import tqdm
|
|
8
|
+
import numpy as np
|
|
9
|
+
import pandas as pd
|
|
10
|
+
from art import tprint
|
|
11
|
+
|
|
12
|
+
from celldetective.neighborhood import _fill_distance_neighborhood_at_t, set_live_status, compute_attention_weight, \
|
|
13
|
+
compute_neighborhood_metrics, mean_neighborhood_after_event, \
|
|
14
|
+
mean_neighborhood_before_event, _compute_mask_contact_dist_map, _fill_contact_neighborhood_at_t
|
|
15
|
+
from celldetective.utils import extract_identity_col
|
|
16
|
+
from scipy.spatial.distance import cdist
|
|
17
|
+
|
|
18
|
+
class NeighborhoodProcess(Process):
|
|
19
|
+
|
|
20
|
+
def __init__(self, queue=None, process_args=None):
|
|
21
|
+
|
|
22
|
+
super().__init__()
|
|
23
|
+
|
|
24
|
+
self.queue = queue
|
|
25
|
+
|
|
26
|
+
if process_args is not None:
|
|
27
|
+
for key, value in process_args.items():
|
|
28
|
+
setattr(self, key, value)
|
|
29
|
+
|
|
30
|
+
self.column_labels = {'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'}
|
|
31
|
+
|
|
32
|
+
tprint("Neighborhood")
|
|
33
|
+
|
|
34
|
+
self.sum_done = 0
|
|
35
|
+
self.t0 = time.time()
|
|
36
|
+
|
|
37
|
+
def mask_contact_neighborhood(self, setA, setB, labelsA, labelsB, distance, mode='two-pop', status=None,
|
|
38
|
+
not_status_option=None, compute_cum_sum=True,
|
|
39
|
+
attention_weight=True, symmetrize=True, include_dead_weight=True,
|
|
40
|
+
column_labels={'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X','y': 'POSITION_Y','mask_id': 'class_id'}):
|
|
41
|
+
|
|
42
|
+
if setA is not None and setB is not None:
|
|
43
|
+
setA, setB, status = set_live_status(setA, setB, status, not_status_option)
|
|
44
|
+
else:
|
|
45
|
+
return None, None
|
|
46
|
+
|
|
47
|
+
# Check distance option
|
|
48
|
+
if not isinstance(distance, list):
|
|
49
|
+
distance = [distance]
|
|
50
|
+
|
|
51
|
+
cl = []
|
|
52
|
+
for s in [setA, setB]:
|
|
53
|
+
|
|
54
|
+
# Check whether data can be tracked
|
|
55
|
+
temp_column_labels = column_labels.copy()
|
|
56
|
+
|
|
57
|
+
id_col = extract_identity_col(s)
|
|
58
|
+
temp_column_labels.update({'track': id_col})
|
|
59
|
+
if id_col == 'ID':
|
|
60
|
+
compute_cum_sum = False
|
|
61
|
+
|
|
62
|
+
cl.append(temp_column_labels)
|
|
63
|
+
|
|
64
|
+
setA = setA.loc[~setA[cl[0]['track']].isnull(), :].copy()
|
|
65
|
+
setB = setB.loc[~setB[cl[1]['track']].isnull(), :].copy()
|
|
66
|
+
|
|
67
|
+
if labelsB is None:
|
|
68
|
+
labelsB = [None] * len(labelsA)
|
|
69
|
+
|
|
70
|
+
for d in distance:
|
|
71
|
+
# loop over each provided distance
|
|
72
|
+
if mode == 'two-pop':
|
|
73
|
+
neigh_col = f'neighborhood_2_contact_{d}_px'
|
|
74
|
+
elif mode == 'self':
|
|
75
|
+
neigh_col = f'neighborhood_self_contact_{d}_px'
|
|
76
|
+
else:
|
|
77
|
+
print("Please provide a valid mode between `two-pop` and `self`...")
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
setA[neigh_col] = np.nan
|
|
81
|
+
setA[neigh_col] = setA[neigh_col].astype(object)
|
|
82
|
+
|
|
83
|
+
setB[neigh_col] = np.nan
|
|
84
|
+
setB[neigh_col] = setB[neigh_col].astype(object)
|
|
85
|
+
|
|
86
|
+
# Loop over each available timestep
|
|
87
|
+
timeline = np.unique(
|
|
88
|
+
np.concatenate([setA[cl[0]['time']].to_numpy(), setB[cl[1]['time']].to_numpy()])).astype(
|
|
89
|
+
int)
|
|
90
|
+
|
|
91
|
+
self.sum_done = 0
|
|
92
|
+
self.t0 = time.time()
|
|
93
|
+
|
|
94
|
+
for t in tqdm(timeline):
|
|
95
|
+
|
|
96
|
+
setA_t = setA.loc[setA[cl[0]['time']] == t,:].copy()
|
|
97
|
+
setB_t = setB.loc[setB[cl[1]['time']] == t,:].copy()
|
|
98
|
+
|
|
99
|
+
if len(setA_t) > 0 and len(setB_t) > 0:
|
|
100
|
+
dist_map, intersection_map = _compute_mask_contact_dist_map(setA_t, setB_t, labelsA[t], labelsB[t], distance=d, mode=mode, column_labelsA=cl[0], column_labelsB=cl[1])
|
|
101
|
+
|
|
102
|
+
d_filter = 1.0E05
|
|
103
|
+
if attention_weight:
|
|
104
|
+
status_A = setA_t[status[0]].to_numpy()
|
|
105
|
+
ids_A = setA_t[cl[0]["track"]].to_numpy()
|
|
106
|
+
weights, closest_A = compute_attention_weight(dist_map, d_filter, status_A, ids_A, axis=1, include_dead_weight=include_dead_weight)
|
|
107
|
+
else:
|
|
108
|
+
weights = None
|
|
109
|
+
closest_A = None
|
|
110
|
+
|
|
111
|
+
_fill_contact_neighborhood_at_t(t, setA, setB, dist_map, intersection_map=intersection_map, attention_weight=attention_weight, include_dead_weight=include_dead_weight, symmetrize=symmetrize, compute_cum_sum=compute_cum_sum, weights=weights, closest_A=closest_A, neigh_col=neigh_col, column_labelsA=cl[0], column_labelsB=cl[1], statusA=status[0], statusB=status[1], d_filter=d_filter)
|
|
112
|
+
|
|
113
|
+
self.sum_done += 1 / len(timeline) * 100
|
|
114
|
+
mean_exec_per_step = (time.time() - self.t0) / (self.sum_done * len(timeline)/ 100 + 1)
|
|
115
|
+
pred_time = (len(timeline) - (self.sum_done * len(timeline) / 100 + 1)) * mean_exec_per_step
|
|
116
|
+
self.queue.put([self.sum_done, pred_time])
|
|
117
|
+
|
|
118
|
+
return setA, setB
|
|
119
|
+
|
|
120
|
+
def distance_cut_neighborhood(self, setA, setB, distance, mode='two-pop', status=None, not_status_option=None,
|
|
121
|
+
compute_cum_sum=True,
|
|
122
|
+
attention_weight=True, symmetrize=True, include_dead_weight=True,
|
|
123
|
+
column_labels={'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X',
|
|
124
|
+
'y': 'POSITION_Y'}):
|
|
125
|
+
# Check live_status option
|
|
126
|
+
if setA is not None and setB is not None:
|
|
127
|
+
setA, setB, status = set_live_status(setA, setB, status, not_status_option)
|
|
128
|
+
else:
|
|
129
|
+
return None, None
|
|
130
|
+
|
|
131
|
+
# Check distance option
|
|
132
|
+
if not isinstance(distance, list):
|
|
133
|
+
distance = [distance]
|
|
134
|
+
|
|
135
|
+
for d in distance:
|
|
136
|
+
# loop over each provided distance
|
|
137
|
+
|
|
138
|
+
if mode == 'two-pop':
|
|
139
|
+
neigh_col = f'neighborhood_2_circle_{d}_px'
|
|
140
|
+
elif mode == 'self':
|
|
141
|
+
neigh_col = f'neighborhood_self_circle_{d}_px'
|
|
142
|
+
|
|
143
|
+
cl = []
|
|
144
|
+
for s in [setA, setB]:
|
|
145
|
+
|
|
146
|
+
# Check whether data can be tracked
|
|
147
|
+
temp_column_labels = column_labels.copy()
|
|
148
|
+
|
|
149
|
+
id_col = extract_identity_col(s)
|
|
150
|
+
temp_column_labels.update({'track': id_col})
|
|
151
|
+
if id_col == 'ID':
|
|
152
|
+
compute_cum_sum = False # if no tracking data then cum_sum is not relevant
|
|
153
|
+
cl.append(temp_column_labels)
|
|
154
|
+
|
|
155
|
+
# Remove nan tracks (cells that do not belong to a track)
|
|
156
|
+
s[neigh_col] = np.nan
|
|
157
|
+
s[neigh_col] = s[neigh_col].astype(object)
|
|
158
|
+
s.dropna(subset=[cl[-1]['track']], inplace=True)
|
|
159
|
+
|
|
160
|
+
# Loop over each available timestep
|
|
161
|
+
timeline = np.unique(
|
|
162
|
+
np.concatenate([setA[cl[0]['time']].to_numpy(), setB[cl[1]['time']].to_numpy()])).astype(
|
|
163
|
+
int)
|
|
164
|
+
|
|
165
|
+
self.sum_done = 0
|
|
166
|
+
self.t0 = time.time()
|
|
167
|
+
|
|
168
|
+
for t in tqdm(timeline):
|
|
169
|
+
|
|
170
|
+
coordinates_A = setA.loc[setA[cl[0]['time']] == t, [cl[0]['x'], cl[0]['y']]].to_numpy()
|
|
171
|
+
ids_A = setA.loc[setA[cl[0]['time']] == t, cl[0]['track']].to_numpy()
|
|
172
|
+
status_A = setA.loc[setA[cl[0]['time']] == t, status[0]].to_numpy()
|
|
173
|
+
|
|
174
|
+
coordinates_B = setB.loc[setB[cl[1]['time']] == t, [cl[1]['x'], cl[1]['y']]].to_numpy()
|
|
175
|
+
ids_B = setB.loc[setB[cl[1]['time']] == t, cl[1]['track']].to_numpy()
|
|
176
|
+
|
|
177
|
+
if len(ids_A) > 0 and len(ids_B) > 0:
|
|
178
|
+
|
|
179
|
+
# compute distance matrix
|
|
180
|
+
dist_map = cdist(coordinates_A, coordinates_B, metric="euclidean")
|
|
181
|
+
|
|
182
|
+
if attention_weight:
|
|
183
|
+
weights, closest_A = compute_attention_weight(dist_map, d, status_A, ids_A, axis=1,
|
|
184
|
+
include_dead_weight=include_dead_weight)
|
|
185
|
+
|
|
186
|
+
_fill_distance_neighborhood_at_t(t, setA, setB, dist_map,
|
|
187
|
+
attention_weight=attention_weight,
|
|
188
|
+
include_dead_weight=include_dead_weight, symmetrize=symmetrize,
|
|
189
|
+
compute_cum_sum=compute_cum_sum, weights=weights,
|
|
190
|
+
closest_A=closest_A,
|
|
191
|
+
neigh_col=neigh_col, column_labelsA=cl[0], column_labelsB=cl[1],
|
|
192
|
+
statusA=status[0], statusB=status[1], distance=d)
|
|
193
|
+
|
|
194
|
+
self.sum_done += 1 / len(timeline) * 100
|
|
195
|
+
mean_exec_per_step = (time.time() - self.t0) / (self.sum_done * len(timeline)/ 100 + 1)
|
|
196
|
+
pred_time = (len(timeline) - (self.sum_done * len(timeline) / 100 + 1)) * mean_exec_per_step
|
|
197
|
+
print(f"{self.sum_done=} {pred_time=}")
|
|
198
|
+
self.queue.put([self.sum_done, pred_time])
|
|
199
|
+
|
|
200
|
+
return setA, setB
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def compute_neighborhood_at_position(self, pos, distance, population=['targets', 'effectors'], theta_dist=None,
|
|
204
|
+
img_shape=(2048, 2048), return_tables=False, clear_neigh=False,
|
|
205
|
+
event_time_col=None,
|
|
206
|
+
neighborhood_kwargs={'mode': 'two-pop', 'status': None,
|
|
207
|
+
'not_status_option': None,
|
|
208
|
+
'include_dead_weight': True, "compute_cum_sum": False,
|
|
209
|
+
"attention_weight": True, 'symmetrize': True}):
|
|
210
|
+
|
|
211
|
+
pos = pos.replace('\\', '/')
|
|
212
|
+
pos = rf"{pos}"
|
|
213
|
+
assert os.path.exists(pos), f'Position {pos} is not a valid path.'
|
|
214
|
+
|
|
215
|
+
if isinstance(population, str):
|
|
216
|
+
population = [population, population]
|
|
217
|
+
|
|
218
|
+
if not isinstance(distance, list):
|
|
219
|
+
distance = [distance]
|
|
220
|
+
if not theta_dist is None and not isinstance(theta_dist, list):
|
|
221
|
+
theta_dist = [theta_dist]
|
|
222
|
+
|
|
223
|
+
if theta_dist is None:
|
|
224
|
+
theta_dist = [0.9 * d for d in distance]
|
|
225
|
+
assert len(theta_dist) == len(distance), 'Incompatible number of distances and number of edge thresholds.'
|
|
226
|
+
|
|
227
|
+
if population[0] == population[1]:
|
|
228
|
+
neighborhood_kwargs.update({'mode': 'self'})
|
|
229
|
+
if population[1] != population[0]:
|
|
230
|
+
neighborhood_kwargs.update({'mode': 'two-pop'})
|
|
231
|
+
|
|
232
|
+
df_A, path_A = get_position_table(pos, population=population[0], return_path=True)
|
|
233
|
+
df_B, path_B = get_position_table(pos, population=population[1], return_path=True)
|
|
234
|
+
if df_A is None or df_B is None:
|
|
235
|
+
return None
|
|
236
|
+
|
|
237
|
+
if clear_neigh:
|
|
238
|
+
if os.path.exists(path_A.replace('.csv', '.pkl')):
|
|
239
|
+
os.remove(path_A.replace('.csv', '.pkl'))
|
|
240
|
+
if os.path.exists(path_B.replace('.csv', '.pkl')):
|
|
241
|
+
os.remove(path_B.replace('.csv', '.pkl'))
|
|
242
|
+
df_pair, pair_path = get_position_table(pos, population='pairs', return_path=True)
|
|
243
|
+
if df_pair is not None:
|
|
244
|
+
os.remove(pair_path)
|
|
245
|
+
|
|
246
|
+
df_A_pkl = get_position_pickle(pos, population=population[0], return_path=False)
|
|
247
|
+
df_B_pkl = get_position_pickle(pos, population=population[1], return_path=False)
|
|
248
|
+
|
|
249
|
+
if df_A_pkl is not None:
|
|
250
|
+
pkl_columns = np.array(df_A_pkl.columns)
|
|
251
|
+
neigh_columns = np.array([c.startswith('neighborhood') for c in pkl_columns])
|
|
252
|
+
cols = list(pkl_columns[neigh_columns]) + ['FRAME']
|
|
253
|
+
|
|
254
|
+
id_col = extract_identity_col(df_A_pkl)
|
|
255
|
+
cols.append(id_col)
|
|
256
|
+
on_cols = [id_col, 'FRAME']
|
|
257
|
+
|
|
258
|
+
print(f'Recover {cols} from the pickle file...')
|
|
259
|
+
try:
|
|
260
|
+
df_A = pd.merge(df_A, df_A_pkl.loc[:, cols], how="outer", on=on_cols)
|
|
261
|
+
print(df_A.columns)
|
|
262
|
+
except Exception as e:
|
|
263
|
+
print(f'Failure to merge pickle and csv files: {e}')
|
|
264
|
+
|
|
265
|
+
if df_B_pkl is not None and df_B is not None:
|
|
266
|
+
pkl_columns = np.array(df_B_pkl.columns)
|
|
267
|
+
neigh_columns = np.array([c.startswith('neighborhood') for c in pkl_columns])
|
|
268
|
+
cols = list(pkl_columns[neigh_columns]) + ['FRAME']
|
|
269
|
+
|
|
270
|
+
id_col = extract_identity_col(df_B_pkl)
|
|
271
|
+
cols.append(id_col)
|
|
272
|
+
on_cols = [id_col, 'FRAME']
|
|
273
|
+
|
|
274
|
+
print(f'Recover {cols} from the pickle file...')
|
|
275
|
+
try:
|
|
276
|
+
df_B = pd.merge(df_B, df_B_pkl.loc[:, cols], how="outer", on=on_cols)
|
|
277
|
+
except Exception as e:
|
|
278
|
+
print(f'Failure to merge pickle and csv files: {e}')
|
|
279
|
+
|
|
280
|
+
if clear_neigh:
|
|
281
|
+
unwanted = df_A.columns[df_A.columns.str.contains('neighborhood')]
|
|
282
|
+
df_A = df_A.drop(columns=unwanted)
|
|
283
|
+
unwanted = df_B.columns[df_B.columns.str.contains('neighborhood')]
|
|
284
|
+
df_B = df_B.drop(columns=unwanted)
|
|
285
|
+
|
|
286
|
+
df_A, df_B = self.distance_cut_neighborhood(df_A, df_B, distance, **neighborhood_kwargs)
|
|
287
|
+
|
|
288
|
+
if df_A is None or df_B is None or len(df_A) == 0:
|
|
289
|
+
return None
|
|
290
|
+
|
|
291
|
+
for td, d in zip(theta_dist, distance):
|
|
292
|
+
|
|
293
|
+
if neighborhood_kwargs['mode'] == 'two-pop':
|
|
294
|
+
neigh_col = f'neighborhood_2_circle_{d}_px'
|
|
295
|
+
|
|
296
|
+
elif neighborhood_kwargs['mode'] == 'self':
|
|
297
|
+
neigh_col = f'neighborhood_self_circle_{d}_px'
|
|
298
|
+
|
|
299
|
+
# edge_filter_A = (df_A['POSITION_X'] > td)&(df_A['POSITION_Y'] > td)&(df_A['POSITION_Y'] < (img_shape[0] - td))&(df_A['POSITION_X'] < (img_shape[1] - td))
|
|
300
|
+
# edge_filter_B = (df_B['POSITION_X'] > td)&(df_B['POSITION_Y'] > td)&(df_B['POSITION_Y'] < (img_shape[0] - td))&(df_B['POSITION_X'] < (img_shape[1] - td))
|
|
301
|
+
# df_A.loc[~edge_filter_A, neigh_col] = np.nan
|
|
302
|
+
# df_B.loc[~edge_filter_B, neigh_col] = np.nan
|
|
303
|
+
|
|
304
|
+
print('Count neighborhood...')
|
|
305
|
+
df_A = compute_neighborhood_metrics(df_A, neigh_col, metrics=['inclusive', 'exclusive', 'intermediate'],
|
|
306
|
+
decompose_by_status=True)
|
|
307
|
+
# if neighborhood_kwargs['symmetrize']:
|
|
308
|
+
# df_B = compute_neighborhood_metrics(df_B, neigh_col, metrics=['inclusive','exclusive','intermediate'], decompose_by_status=True)
|
|
309
|
+
print('Done...')
|
|
310
|
+
|
|
311
|
+
if 'TRACK_ID' in list(df_A.columns):
|
|
312
|
+
if not np.all(df_A['TRACK_ID'].isnull()):
|
|
313
|
+
print('Estimate average neighborhood before/after event...')
|
|
314
|
+
df_A = mean_neighborhood_before_event(df_A, neigh_col, event_time_col)
|
|
315
|
+
if event_time_col is not None:
|
|
316
|
+
df_A = mean_neighborhood_after_event(df_A, neigh_col, event_time_col)
|
|
317
|
+
print('Done...')
|
|
318
|
+
|
|
319
|
+
if not population[0] == population[1]:
|
|
320
|
+
# Remove neighborhood column from neighbor table, rename with actual population name
|
|
321
|
+
for td, d in zip(theta_dist, distance):
|
|
322
|
+
if neighborhood_kwargs['mode'] == 'two-pop':
|
|
323
|
+
neigh_col = f'neighborhood_2_circle_{d}_px'
|
|
324
|
+
new_neigh_col = neigh_col.replace('_2_', f'_({population[0]}-{population[1]})_')
|
|
325
|
+
df_A = df_A.rename(columns={neigh_col: new_neigh_col})
|
|
326
|
+
elif neighborhood_kwargs['mode'] == 'self':
|
|
327
|
+
neigh_col = f'neighborhood_self_circle_{d}_px'
|
|
328
|
+
df_B = df_B.drop(columns=[neigh_col])
|
|
329
|
+
df_B.to_pickle(path_B.replace('.csv', '.pkl'))
|
|
330
|
+
|
|
331
|
+
cols_to_rename = [c for c in list(df_A.columns) if
|
|
332
|
+
c.startswith('intermediate_count_') or c.startswith('inclusive_count_') or c.startswith(
|
|
333
|
+
'exclusive_count_') or c.startswith('mean_count_')]
|
|
334
|
+
new_col_names = [c.replace('_2_', f'_({population[0]}-{population[1]})_') for c in cols_to_rename]
|
|
335
|
+
new_name_map = {}
|
|
336
|
+
for k, c in enumerate(cols_to_rename):
|
|
337
|
+
new_name_map.update({c: new_col_names[k]})
|
|
338
|
+
df_A = df_A.rename(columns=new_name_map)
|
|
339
|
+
|
|
340
|
+
df_A.to_pickle(path_A.replace('.csv', '.pkl'))
|
|
341
|
+
|
|
342
|
+
unwanted = df_A.columns[df_A.columns.str.startswith('neighborhood_')]
|
|
343
|
+
df_A2 = df_A.drop(columns=unwanted)
|
|
344
|
+
df_A2.to_csv(path_A, index=False)
|
|
345
|
+
|
|
346
|
+
if not population[0] == population[1]:
|
|
347
|
+
unwanted = df_B.columns[df_B.columns.str.startswith('neighborhood_')]
|
|
348
|
+
df_B_csv = df_B.drop(unwanted, axis=1, inplace=False)
|
|
349
|
+
df_B_csv.to_csv(path_B, index=False)
|
|
350
|
+
|
|
351
|
+
if return_tables:
|
|
352
|
+
return df_A, df_B
|
|
353
|
+
|
|
354
|
+
def compute_contact_neighborhood_at_position(self, pos, distance, population=['targets', 'effectors'], theta_dist=None,
|
|
355
|
+
img_shape=(2048, 2048), return_tables=False, clear_neigh=False,
|
|
356
|
+
event_time_col=None,
|
|
357
|
+
neighborhood_kwargs={'mode': 'two-pop', 'status': None,
|
|
358
|
+
'not_status_option': None,
|
|
359
|
+
'include_dead_weight': True,
|
|
360
|
+
"compute_cum_sum": False,
|
|
361
|
+
"attention_weight": True, 'symmetrize': True}):
|
|
362
|
+
|
|
363
|
+
pos = pos.replace('\\', '/')
|
|
364
|
+
pos = rf"{pos}"
|
|
365
|
+
assert os.path.exists(pos), f'Position {pos} is not a valid path.'
|
|
366
|
+
|
|
367
|
+
if isinstance(population, str):
|
|
368
|
+
population = [population, population]
|
|
369
|
+
|
|
370
|
+
if not isinstance(distance, list):
|
|
371
|
+
distance = [distance]
|
|
372
|
+
if not theta_dist is None and not isinstance(theta_dist, list):
|
|
373
|
+
theta_dist = [theta_dist]
|
|
374
|
+
|
|
375
|
+
if theta_dist is None:
|
|
376
|
+
theta_dist = [0 for d in distance] # 0.9*d
|
|
377
|
+
assert len(theta_dist) == len(distance), 'Incompatible number of distances and number of edge thresholds.'
|
|
378
|
+
|
|
379
|
+
if population[0] == population[1]:
|
|
380
|
+
neighborhood_kwargs.update({'mode': 'self'})
|
|
381
|
+
if population[1] != population[0]:
|
|
382
|
+
neighborhood_kwargs.update({'mode': 'two-pop'})
|
|
383
|
+
|
|
384
|
+
df_A, path_A = get_position_table(pos, population=population[0], return_path=True)
|
|
385
|
+
df_B, path_B = get_position_table(pos, population=population[1], return_path=True)
|
|
386
|
+
if df_A is None or df_B is None:
|
|
387
|
+
return None
|
|
388
|
+
|
|
389
|
+
if clear_neigh:
|
|
390
|
+
if os.path.exists(path_A.replace('.csv', '.pkl')):
|
|
391
|
+
os.remove(path_A.replace('.csv', '.pkl'))
|
|
392
|
+
if os.path.exists(path_B.replace('.csv', '.pkl')):
|
|
393
|
+
os.remove(path_B.replace('.csv', '.pkl'))
|
|
394
|
+
df_pair, pair_path = get_position_table(pos, population='pairs', return_path=True)
|
|
395
|
+
if df_pair is not None:
|
|
396
|
+
os.remove(pair_path)
|
|
397
|
+
|
|
398
|
+
df_A_pkl = get_position_pickle(pos, population=population[0], return_path=False)
|
|
399
|
+
df_B_pkl = get_position_pickle(pos, population=population[1], return_path=False)
|
|
400
|
+
|
|
401
|
+
if df_A_pkl is not None:
|
|
402
|
+
pkl_columns = np.array(df_A_pkl.columns)
|
|
403
|
+
neigh_columns = np.array([c.startswith('neighborhood') for c in pkl_columns])
|
|
404
|
+
cols = list(pkl_columns[neigh_columns]) + ['FRAME']
|
|
405
|
+
|
|
406
|
+
id_col = extract_identity_col(df_A_pkl)
|
|
407
|
+
cols.append(id_col)
|
|
408
|
+
on_cols = [id_col, 'FRAME']
|
|
409
|
+
|
|
410
|
+
print(f'Recover {cols} from the pickle file...')
|
|
411
|
+
try:
|
|
412
|
+
df_A = pd.merge(df_A, df_A_pkl.loc[:, cols], how="outer", on=on_cols)
|
|
413
|
+
print(df_A.columns)
|
|
414
|
+
except Exception as e:
|
|
415
|
+
print(f'Failure to merge pickle and csv files: {e}')
|
|
416
|
+
|
|
417
|
+
if df_B_pkl is not None and df_B is not None:
|
|
418
|
+
pkl_columns = np.array(df_B_pkl.columns)
|
|
419
|
+
neigh_columns = np.array([c.startswith('neighborhood') for c in pkl_columns])
|
|
420
|
+
cols = list(pkl_columns[neigh_columns]) + ['FRAME']
|
|
421
|
+
|
|
422
|
+
id_col = extract_identity_col(df_B_pkl)
|
|
423
|
+
cols.append(id_col)
|
|
424
|
+
on_cols = [id_col, 'FRAME']
|
|
425
|
+
|
|
426
|
+
print(f'Recover {cols} from the pickle file...')
|
|
427
|
+
try:
|
|
428
|
+
df_B = pd.merge(df_B, df_B_pkl.loc[:, cols], how="outer", on=on_cols)
|
|
429
|
+
except Exception as e:
|
|
430
|
+
print(f'Failure to merge pickle and csv files: {e}')
|
|
431
|
+
|
|
432
|
+
labelsA = locate_labels(pos, population=population[0])
|
|
433
|
+
if population[1] == population[0]:
|
|
434
|
+
labelsB = None
|
|
435
|
+
else:
|
|
436
|
+
labelsB = locate_labels(pos, population=population[1])
|
|
437
|
+
|
|
438
|
+
if clear_neigh:
|
|
439
|
+
unwanted = df_A.columns[df_A.columns.str.contains('neighborhood')]
|
|
440
|
+
df_A = df_A.drop(columns=unwanted)
|
|
441
|
+
unwanted = df_B.columns[df_B.columns.str.contains('neighborhood')]
|
|
442
|
+
df_B = df_B.drop(columns=unwanted)
|
|
443
|
+
|
|
444
|
+
print(f"Distance: {distance} for mask contact")
|
|
445
|
+
df_A, df_B = self.mask_contact_neighborhood(df_A, df_B, labelsA, labelsB, distance, **neighborhood_kwargs)
|
|
446
|
+
if df_A is None or df_B is None or len(df_A) == 0:
|
|
447
|
+
return None
|
|
448
|
+
|
|
449
|
+
for td, d in zip(theta_dist, distance):
|
|
450
|
+
|
|
451
|
+
if neighborhood_kwargs['mode'] == 'two-pop':
|
|
452
|
+
neigh_col = f'neighborhood_2_contact_{d}_px'
|
|
453
|
+
elif neighborhood_kwargs['mode'] == 'self':
|
|
454
|
+
neigh_col = f'neighborhood_self_contact_{d}_px'
|
|
455
|
+
else:
|
|
456
|
+
print('Invalid mode...')
|
|
457
|
+
return None
|
|
458
|
+
|
|
459
|
+
df_A.loc[df_A['class_id'].isnull(), neigh_col] = np.nan
|
|
460
|
+
|
|
461
|
+
# edge_filter_A = (df_A['POSITION_X'] > td)&(df_A['POSITION_Y'] > td)&(df_A['POSITION_Y'] < (img_shape[0] - td))&(df_A['POSITION_X'] < (img_shape[1] - td))
|
|
462
|
+
# edge_filter_B = (df_B['POSITION_X'] > td)&(df_B['POSITION_Y'] > td)&(df_B['POSITION_Y'] < (img_shape[0] - td))&(df_B['POSITION_X'] < (img_shape[1] - td))
|
|
463
|
+
# df_A.loc[~edge_filter_A, neigh_col] = np.nan
|
|
464
|
+
# df_B.loc[~edge_filter_B, neigh_col] = np.nan
|
|
465
|
+
|
|
466
|
+
df_A = compute_neighborhood_metrics(df_A, neigh_col, metrics=['inclusive', 'intermediate'],
|
|
467
|
+
decompose_by_status=True)
|
|
468
|
+
if 'TRACK_ID' in list(df_A.columns):
|
|
469
|
+
if not np.all(df_A['TRACK_ID'].isnull()):
|
|
470
|
+
df_A = mean_neighborhood_before_event(df_A, neigh_col, event_time_col,
|
|
471
|
+
metrics=['inclusive', 'intermediate'])
|
|
472
|
+
if event_time_col is not None:
|
|
473
|
+
df_A = mean_neighborhood_after_event(df_A, neigh_col, event_time_col,
|
|
474
|
+
metrics=['inclusive', 'intermediate'])
|
|
475
|
+
print('Done...')
|
|
476
|
+
|
|
477
|
+
if not population[0] == population[1]:
|
|
478
|
+
# Remove neighborhood column from neighbor table, rename with actual population name
|
|
479
|
+
for td, d in zip(theta_dist, distance):
|
|
480
|
+
if neighborhood_kwargs['mode'] == 'two-pop':
|
|
481
|
+
neigh_col = f'neighborhood_2_contact_{d}_px'
|
|
482
|
+
new_neigh_col = neigh_col.replace('_2_', f'_({population[0]}-{population[1]})_')
|
|
483
|
+
df_A = df_A.rename(columns={neigh_col: new_neigh_col})
|
|
484
|
+
elif neighborhood_kwargs['mode'] == 'self':
|
|
485
|
+
neigh_col = f'neighborhood_self_contact_{d}_px'
|
|
486
|
+
else:
|
|
487
|
+
print("Invalid mode...")
|
|
488
|
+
return None
|
|
489
|
+
df_B = df_B.drop(columns=[neigh_col])
|
|
490
|
+
df_B.to_pickle(path_B.replace('.csv', '.pkl'))
|
|
491
|
+
|
|
492
|
+
cols_to_rename = [c for c in list(df_A.columns) if
|
|
493
|
+
c.startswith('intermediate_count_') or c.startswith('inclusive_count_') or c.startswith(
|
|
494
|
+
'exclusive_count_') or c.startswith('mean_count_')]
|
|
495
|
+
new_col_names = [c.replace('_2_', f'_({population[0]}-{population[1]})_') for c in cols_to_rename]
|
|
496
|
+
new_name_map = {}
|
|
497
|
+
for k, c in enumerate(cols_to_rename):
|
|
498
|
+
new_name_map.update({c: new_col_names[k]})
|
|
499
|
+
df_A = df_A.rename(columns=new_name_map)
|
|
500
|
+
|
|
501
|
+
print(f'{df_A.columns=}')
|
|
502
|
+
df_A.to_pickle(path_A.replace('.csv', '.pkl'))
|
|
503
|
+
|
|
504
|
+
unwanted = df_A.columns[df_A.columns.str.startswith('neighborhood_')]
|
|
505
|
+
df_A2 = df_A.drop(columns=unwanted)
|
|
506
|
+
df_A2.to_csv(path_A, index=False)
|
|
507
|
+
|
|
508
|
+
if not population[0] == population[1]:
|
|
509
|
+
unwanted = df_B.columns[df_B.columns.str.startswith('neighborhood_')]
|
|
510
|
+
df_B_csv = df_B.drop(unwanted, axis=1, inplace=False)
|
|
511
|
+
df_B_csv.to_csv(path_B, index=False)
|
|
512
|
+
|
|
513
|
+
if return_tables:
|
|
514
|
+
return df_A, df_B
|
|
515
|
+
|
|
516
|
+
def run(self):
|
|
517
|
+
print(f"Launching the neighborhood computation...")
|
|
518
|
+
if self.protocol['neighborhood_type']=="distance_threshold":
|
|
519
|
+
self.compute_neighborhood_at_position(self.pos,
|
|
520
|
+
self.protocol['distance'],
|
|
521
|
+
population=self.protocol['population'],
|
|
522
|
+
theta_dist=None,
|
|
523
|
+
img_shape=self.img_shape,
|
|
524
|
+
return_tables=False,
|
|
525
|
+
clear_neigh=self.protocol['clear_neigh'],
|
|
526
|
+
event_time_col=self.protocol['event_time_col'],
|
|
527
|
+
neighborhood_kwargs=self.protocol['neighborhood_kwargs'],
|
|
528
|
+
)
|
|
529
|
+
print(f"Computation done!")
|
|
530
|
+
elif self.protocol["neighborhood_type"]=="mask_contact":
|
|
531
|
+
print(f"Compute contact neigh!!")
|
|
532
|
+
self.compute_contact_neighborhood_at_position(self.pos,
|
|
533
|
+
self.protocol['distance'],
|
|
534
|
+
population=self.protocol['population'],
|
|
535
|
+
theta_dist=None,
|
|
536
|
+
img_shape=self.img_shape,
|
|
537
|
+
return_tables=False,
|
|
538
|
+
clear_neigh=self.protocol['clear_neigh'],
|
|
539
|
+
event_time_col=self.protocol['event_time_col'],
|
|
540
|
+
neighborhood_kwargs=self.protocol['neighborhood_kwargs'],
|
|
541
|
+
)
|
|
542
|
+
print(f"Computation done!")
|
|
543
|
+
|
|
544
|
+
|
|
545
|
+
# self.indices = list(range(self.img_num_channels.shape[1]))
|
|
546
|
+
# chunks = np.array_split(self.indices, self.n_threads)
|
|
547
|
+
#
|
|
548
|
+
# self.timestep_dataframes = []
|
|
549
|
+
# with concurrent.futures.ThreadPoolExecutor(max_workers=self.n_threads) as executor:
|
|
550
|
+
# results = executor.map(self.parallel_job,
|
|
551
|
+
# chunks) # list(map(lambda x: executor.submit(self.parallel_job, x), chunks))
|
|
552
|
+
# try:
|
|
553
|
+
# for i, return_value in enumerate(results):
|
|
554
|
+
# print(f'Thread {i} completed...')
|
|
555
|
+
# self.timestep_dataframes.extend(return_value)
|
|
556
|
+
# except Exception as e:
|
|
557
|
+
# print("Exception: ", e)
|
|
558
|
+
#
|
|
559
|
+
# print('Measurements successfully performed...')
|
|
560
|
+
#
|
|
561
|
+
# if len(self.timestep_dataframes) > 0:
|
|
562
|
+
#
|
|
563
|
+
# df = pd.concat(self.timestep_dataframes)
|
|
564
|
+
#
|
|
565
|
+
# if self.trajectories is not None:
|
|
566
|
+
# df = df.sort_values(by=[self.column_labels['track'], self.column_labels['time']])
|
|
567
|
+
# df = df.dropna(subset=[self.column_labels['track']])
|
|
568
|
+
# else:
|
|
569
|
+
# df['ID'] = np.arange(len(df))
|
|
570
|
+
# df = df.sort_values(by=[self.column_labels['time'], 'ID'])
|
|
571
|
+
#
|
|
572
|
+
# df = df.reset_index(drop=True)
|
|
573
|
+
# df = _remove_invalid_cols(df)
|
|
574
|
+
#
|
|
575
|
+
# df.to_csv(self.pos + os.sep.join(["output", "tables", self.table_name]), index=False)
|
|
576
|
+
# print(f'Measurement table successfully exported in {os.sep.join(["output", "tables"])}...')
|
|
577
|
+
# print('Done.')
|
|
578
|
+
# else:
|
|
579
|
+
# print('No measurement could be performed. Check your inputs.')
|
|
580
|
+
# print('Done.')
|
|
581
|
+
|
|
582
|
+
# Send end signal
|
|
583
|
+
self.queue.put("finished")
|
|
584
|
+
self.queue.close()
|
|
585
|
+
|
|
586
|
+
def end_process(self):
|
|
587
|
+
|
|
588
|
+
self.terminate()
|
|
589
|
+
self.queue.put("finished")
|
|
590
|
+
|
|
591
|
+
def abort_process(self):
|
|
592
|
+
|
|
593
|
+
self.terminate()
|
|
594
|
+
self.queue.put("error")
|
|
@@ -43,41 +43,44 @@ class DownloadProcess(Process):
|
|
|
43
43
|
self.t0 = time.time()
|
|
44
44
|
|
|
45
45
|
def download_url_to_file(self, url, dst):
|
|
46
|
-
|
|
47
|
-
file_size = None
|
|
48
|
-
ssl._create_default_https_context = ssl._create_unverified_context
|
|
49
|
-
u = urlopen(url)
|
|
50
|
-
meta = u.info()
|
|
51
|
-
if hasattr(meta, 'getheaders'):
|
|
52
|
-
content_length = meta.getheaders("Content-Length")
|
|
53
|
-
else:
|
|
54
|
-
content_length = meta.get_all("Content-Length")
|
|
55
|
-
if content_length is not None and len(content_length) > 0:
|
|
56
|
-
file_size = int(content_length[0])
|
|
57
|
-
# We deliberately save it in a temp file and move it after
|
|
58
|
-
dst = os.path.expanduser(dst)
|
|
59
|
-
dst_dir = os.path.dirname(dst)
|
|
60
|
-
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
|
|
61
|
-
|
|
62
46
|
try:
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
47
|
+
file_size = None
|
|
48
|
+
ssl._create_default_https_context = ssl._create_unverified_context
|
|
49
|
+
u = urlopen(url)
|
|
50
|
+
meta = u.info()
|
|
51
|
+
if hasattr(meta, 'getheaders'):
|
|
52
|
+
content_length = meta.getheaders("Content-Length")
|
|
53
|
+
else:
|
|
54
|
+
content_length = meta.get_all("Content-Length")
|
|
55
|
+
if content_length is not None and len(content_length) > 0:
|
|
56
|
+
file_size = int(content_length[0])
|
|
57
|
+
# We deliberately save it in a temp file and move it after
|
|
58
|
+
dst = os.path.expanduser(dst)
|
|
59
|
+
dst_dir = os.path.dirname(dst)
|
|
60
|
+
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
|
|
61
|
+
|
|
62
|
+
try:
|
|
63
|
+
with tqdm(total=file_size, disable=not self.progress,
|
|
64
|
+
unit='B', unit_scale=True, unit_divisor=1024) as pbar:
|
|
65
|
+
while True:
|
|
66
|
+
buffer = u.read(8192) #8192
|
|
67
|
+
if len(buffer) == 0:
|
|
68
|
+
break
|
|
69
|
+
f.write(buffer)
|
|
70
|
+
pbar.update(len(buffer))
|
|
71
|
+
self.sum_done+=len(buffer) / file_size * 100
|
|
72
|
+
mean_exec_per_step = (time.time() - self.t0) / (self.sum_done*file_size / 100 + 1)
|
|
73
|
+
pred_time = (file_size - (self.sum_done*file_size / 100 + 1)) * mean_exec_per_step
|
|
74
|
+
self.queue.put([self.sum_done, pred_time])
|
|
75
|
+
f.close()
|
|
76
|
+
shutil.move(f.name, dst)
|
|
77
|
+
finally:
|
|
78
|
+
f.close()
|
|
79
|
+
if os.path.exists(f.name):
|
|
80
|
+
os.remove(f.name)
|
|
81
|
+
except Exception as e:
|
|
82
|
+
print("No internet connection: ", e)
|
|
83
|
+
return None
|
|
81
84
|
|
|
82
85
|
def run(self):
|
|
83
86
|
|