celldetective 1.1.1.post4__py3-none-any.whl → 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. celldetective/__init__.py +2 -1
  2. celldetective/extra_properties.py +62 -34
  3. celldetective/gui/__init__.py +1 -0
  4. celldetective/gui/analyze_block.py +2 -1
  5. celldetective/gui/classifier_widget.py +15 -9
  6. celldetective/gui/control_panel.py +50 -6
  7. celldetective/gui/layouts.py +5 -4
  8. celldetective/gui/neighborhood_options.py +13 -9
  9. celldetective/gui/plot_signals_ui.py +39 -11
  10. celldetective/gui/process_block.py +413 -95
  11. celldetective/gui/retrain_segmentation_model_options.py +17 -4
  12. celldetective/gui/retrain_signal_model_options.py +106 -6
  13. celldetective/gui/signal_annotator.py +29 -9
  14. celldetective/gui/signal_annotator2.py +2708 -0
  15. celldetective/gui/signal_annotator_options.py +3 -1
  16. celldetective/gui/survival_ui.py +15 -6
  17. celldetective/gui/tableUI.py +222 -60
  18. celldetective/io.py +536 -420
  19. celldetective/measure.py +919 -969
  20. celldetective/models/pair_signal_detection/blank +0 -0
  21. celldetective/models/segmentation_effectors/ricm-bimodal/config_input.json +130 -0
  22. celldetective/models/segmentation_effectors/ricm-bimodal/ricm-bimodal +0 -0
  23. celldetective/models/segmentation_effectors/ricm-bimodal/training_instructions.json +37 -0
  24. celldetective/neighborhood.py +428 -354
  25. celldetective/relative_measurements.py +648 -0
  26. celldetective/scripts/analyze_signals.py +1 -1
  27. celldetective/scripts/measure_cells.py +28 -8
  28. celldetective/scripts/measure_relative.py +103 -0
  29. celldetective/scripts/segment_cells.py +5 -5
  30. celldetective/scripts/track_cells.py +4 -1
  31. celldetective/scripts/train_segmentation_model.py +23 -18
  32. celldetective/scripts/train_signal_model.py +33 -0
  33. celldetective/signals.py +405 -8
  34. celldetective/tracking.py +8 -2
  35. celldetective/utils.py +178 -17
  36. {celldetective-1.1.1.post4.dist-info → celldetective-1.2.1.dist-info}/METADATA +8 -8
  37. {celldetective-1.1.1.post4.dist-info → celldetective-1.2.1.dist-info}/RECORD +41 -34
  38. {celldetective-1.1.1.post4.dist-info → celldetective-1.2.1.dist-info}/WHEEL +1 -1
  39. {celldetective-1.1.1.post4.dist-info → celldetective-1.2.1.dist-info}/LICENSE +0 -0
  40. {celldetective-1.1.1.post4.dist-info → celldetective-1.2.1.dist-info}/entry_points.txt +0 -0
  41. {celldetective-1.1.1.post4.dist-info → celldetective-1.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,648 @@
1
+ import pandas as pd
2
+ import numpy as np
3
+ from celldetective.utils import derivative, extract_identity_col
4
+ import os
5
+ import subprocess
6
+ from math import ceil
7
+ abs_path = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'celldetective'])
8
+ import random
9
+ from tqdm import tqdm
10
+
11
+ def measure_pairs(pos, neighborhood_protocol):
12
+
13
+ reference_population = neighborhood_protocol['reference']
14
+ neighbor_population = neighborhood_protocol['neighbor']
15
+ neighborhood_type = neighborhood_protocol['type']
16
+ neighborhood_distance = neighborhood_protocol['distance']
17
+ neighborhood_description = neighborhood_protocol['description']
18
+
19
+ relative_measurements = []
20
+
21
+ tab_ref = pos + os.sep.join(['output', 'tables', f'trajectories_{reference_population}.pkl'])
22
+ if os.path.exists(tab_ref):
23
+ df_reference = np.load(tab_ref, allow_pickle=True)
24
+ else:
25
+ df_reference = None
26
+
27
+ if os.path.exists(tab_ref.replace(reference_population, neighbor_population)):
28
+ df_neighbor = np.load(tab_ref.replace(reference_population, neighbor_population), allow_pickle=True)
29
+ else:
30
+ if os.path.exists(tab_ref.replace(reference_population, neighbor_population).replace('.pkl','.csv')):
31
+ df_neighbor = pd.read_csv(tab_ref.replace(reference_population, neighbor_population).replace('.pkl','.csv'))
32
+ else:
33
+ df_neighbor = None
34
+
35
+ if df_reference is None:
36
+ return None
37
+
38
+ assert str(neighborhood_description) in list(df_reference.columns)
39
+ neighborhood = df_reference.loc[:,f'{neighborhood_description}'].to_numpy()
40
+
41
+ ref_id_col = extract_identity_col(df_reference)
42
+ ref_tracked = False
43
+ if ref_id_col is None:
44
+ return None
45
+ elif ref_id_col=='TRACK_ID':
46
+ ref_tracked = True
47
+ neigh_id_col = extract_identity_col(df_neighbor)
48
+ neigh_tracked = False
49
+ if neigh_id_col is None:
50
+ return None
51
+ elif neigh_id_col=='TRACK_ID':
52
+ neigh_tracked = True
53
+
54
+ centre_of_mass_columns = [(c,c.replace('POSITION_X','POSITION_Y')) for c in list(df_neighbor.columns) if c.endswith('centre_of_mass_POSITION_X')]
55
+ centre_of_mass_labels = [c.replace('_centre_of_mass_POSITION_X','') for c in list(df_neighbor.columns) if c.endswith('centre_of_mass_POSITION_X')]
56
+
57
+ for t in np.unique(list(df_reference['FRAME'].unique())+list(df_neighbor['FRAME'])):
58
+
59
+ group_reference = df_reference.loc[df_reference['FRAME']==t,:]
60
+ group_neighbors = df_neighbor.loc[df_neighbor['FRAME']==t, :]
61
+
62
+ for tid, group in group_reference.groupby(ref_id_col):
63
+
64
+ neighborhood = group.loc[: , f'{neighborhood_description}'].to_numpy()[0]
65
+ coords_reference = group[['POSITION_X', 'POSITION_Y']].to_numpy()[0]
66
+
67
+ neighbors = []
68
+ if isinstance(neighborhood, float) or neighborhood!=neighborhood:
69
+ pass
70
+ else:
71
+ for neigh in neighborhood:
72
+ neighbors.append(neigh['id'])
73
+
74
+ unique_neigh = list(np.unique(neighbors))
75
+ print(f'{unique_neigh=}')
76
+
77
+ neighbor_properties = group_neighbors.loc[group_neighbors[neigh_id_col].isin(unique_neigh)]
78
+
79
+ for nc, group_neigh in neighbor_properties.groupby(neigh_id_col):
80
+
81
+ neighbor_vector = np.zeros((2))
82
+ neighbor_vector[:] = np.nan
83
+ mass_displacement_vector = np.zeros((len(centre_of_mass_columns), 2))
84
+
85
+ coords_centre_of_mass = []
86
+ for col in centre_of_mass_columns:
87
+ coords_centre_of_mass.append(group_neigh[[col[0],col[1]]].to_numpy()[0])
88
+
89
+ dot_product_vector = np.zeros((len(centre_of_mass_columns)))
90
+ dot_product_vector[:] = np.nan
91
+
92
+ cosine_dot_vector = np.zeros((len(centre_of_mass_columns)))
93
+ cosine_dot_vector[:] = np.nan
94
+
95
+ coords_neighbor = group_neigh[['POSITION_X', 'POSITION_Y']].to_numpy()[0]
96
+ neighbor_vector[0] = coords_neighbor[0] - coords_reference[0]
97
+ neighbor_vector[1] = coords_neighbor[1] - coords_reference[1]
98
+
99
+ if neighbor_vector[0]==neighbor_vector[0] and neighbor_vector[1]==neighbor_vector[1]:
100
+ angle = np.arctan2(neighbor_vector[1], neighbor_vector[0])
101
+ relative_distance = np.sqrt(neighbor_vector[0]**2 + neighbor_vector[1]**2)
102
+
103
+ for z,cols in enumerate(centre_of_mass_columns):
104
+
105
+ mass_displacement_vector[z,0] = coords_centre_of_mass[z][0] - coords_neighbor[0]
106
+ mass_displacement_vector[z,1] = coords_centre_of_mass[z][1] - coords_neighbor[1]
107
+
108
+ dot_product_vector[z] = np.dot(mass_displacement_vector[z], -neighbor_vector)
109
+ cosine_dot_vector[z] = np.dot(mass_displacement_vector[z], -neighbor_vector) / (np.linalg.norm(mass_displacement_vector[z])*np.linalg.norm(-neighbor_vector))
110
+
111
+ relative_measurements.append(
112
+ {'REFERENCE_ID': tid, 'NEIGHBOR_ID': nc,
113
+ 'reference_population': reference_population,
114
+ 'neighbor_population': neighbor_population,
115
+ 'FRAME': t, 'distance': relative_distance,
116
+ 'angle': angle * 180 / np.pi,
117
+ f'status_{neighborhood_description}': 1,
118
+ f'class_{neighborhood_description}': 0,
119
+ 'reference_tracked': ref_tracked, 'neighbors_tracked': neigh_tracked,
120
+ })
121
+ for z,lbl in enumerate(centre_of_mass_labels):
122
+ relative_measurements[-1].update({lbl+'_centre_of_mass_dot_product': dot_product_vector[z], lbl+'_centre_of_mass_dot_cosine': cosine_dot_vector[z]})
123
+
124
+ df_pairs = pd.DataFrame(relative_measurements)
125
+
126
+ return df_pairs
127
+
128
+
129
+
130
+
131
+
132
+
133
+
134
+ def measure_pair_signals_at_position(pos, neighborhood_protocol, velocity_kwargs={'window': 3, 'mode': 'bi'}):
135
+ """
136
+ pos: position to process
137
+ target_classes [list]: target classes to keep
138
+ neigh_dist: neighborhood cut distance
139
+ theta_dist: distance to edge threshold
140
+ velocity_kwargs: params for derivative of relative position
141
+ neighborhood_kwargs: params for neigh
142
+ """
143
+
144
+ reference_population = neighborhood_protocol['reference']
145
+ neighbor_population = neighborhood_protocol['neighbor']
146
+ neighborhood_type = neighborhood_protocol['type']
147
+ neighborhood_distance = neighborhood_protocol['distance']
148
+ neighborhood_description = neighborhood_protocol['description']
149
+
150
+ relative_measurements = []
151
+
152
+ tab_ref = pos + os.sep.join(['output', 'tables', f'trajectories_{reference_population}.pkl'])
153
+ if os.path.exists(tab_ref):
154
+ df_reference = np.load(tab_ref, allow_pickle=True)
155
+ else:
156
+ df_reference = None
157
+
158
+ if os.path.exists(tab_ref.replace(reference_population, neighbor_population)):
159
+ df_neighbor = np.load(tab_ref.replace(reference_population, neighbor_population), allow_pickle=True)
160
+ else:
161
+ if os.path.exists(tab_ref.replace(reference_population, neighbor_population).replace('.pkl','.csv')):
162
+ df_neighbor = pd.read_csv(tab_ref.replace(reference_population, neighbor_population).replace('.pkl','.csv'))
163
+ else:
164
+ df_neighbor = None
165
+
166
+ if df_reference is None:
167
+ return None
168
+
169
+ assert str(neighborhood_description) in list(df_reference.columns)
170
+ neighborhood = df_reference.loc[:,f'{neighborhood_description}'].to_numpy()
171
+
172
+ ref_id_col = extract_identity_col(df_reference)
173
+ if ref_id_col is not None:
174
+ df_reference = df_reference.sort_values(by=[ref_id_col, 'FRAME'])
175
+
176
+ ref_tracked = False
177
+ if ref_id_col=='TRACK_ID':
178
+ compute_velocity = True
179
+ ref_tracked = True
180
+ elif ref_id_col=='ID':
181
+ df_pairs = measure_pairs(pos, neighborhood_protocol)
182
+ return df_pairs
183
+ else:
184
+ print('ID or TRACK ID column could not be found in neighbor table. Abort.')
185
+ return None
186
+
187
+ print(f'Measuring pair signals...')
188
+
189
+ neigh_id_col = extract_identity_col(df_neighbor)
190
+ neigh_tracked = False
191
+ if neigh_id_col=='TRACK_ID':
192
+ compute_velocity = True
193
+ neigh_tracked = True
194
+ elif neigh_id_col=='ID':
195
+ df_pairs = measure_pairs(pos, neighborhood_protocol)
196
+ return df_pairs
197
+ else:
198
+ print('ID or TRACK ID column could not be found in neighbor table. Abort.')
199
+ return None
200
+
201
+ try:
202
+ for tid, group in df_reference.groupby(ref_id_col):
203
+
204
+ neighbor_dicts = group.loc[: , f'{neighborhood_description}'].values
205
+ timeline_reference = group['FRAME'].to_numpy()
206
+ coords_reference = group[['POSITION_X', 'POSITION_Y']].to_numpy()
207
+
208
+ neighbor_ids = []
209
+ neighbor_ids_per_t = []
210
+
211
+ time_of_first_entrance_in_neighborhood = {}
212
+ t_departure={}
213
+
214
+ for t in range(len(timeline_reference)):
215
+
216
+ neighbors_at_t = neighbor_dicts[t]
217
+ neighs_t = []
218
+ if isinstance(neighbors_at_t, float) or neighbors_at_t!=neighbors_at_t:
219
+ pass
220
+ else:
221
+ for neigh in neighbors_at_t:
222
+ if neigh['id'] not in neighbor_ids:
223
+ time_of_first_entrance_in_neighborhood[neigh['id']]=t
224
+ neighbor_ids.append(neigh['id'])
225
+ neighs_t.append(neigh['id'])
226
+ neighbor_ids_per_t.append(neighs_t)
227
+
228
+ #print(neighbor_ids_per_t)
229
+ unique_neigh = list(np.unique(neighbor_ids))
230
+ print(f'Reference cell {tid}: found {len(unique_neigh)} neighbour cells: {unique_neigh}...')
231
+
232
+ neighbor_properties = df_neighbor.loc[df_neighbor[neigh_id_col].isin(unique_neigh)]
233
+
234
+ for nc, group_neigh in neighbor_properties.groupby(neigh_id_col):
235
+
236
+ coords_neighbor = group_neigh[['POSITION_X', 'POSITION_Y']].to_numpy()
237
+ timeline_neighbor = group_neigh['FRAME'].to_numpy()
238
+
239
+ # # Perform timeline matching to have same start-end points and no gaps
240
+ full_timeline, _, _ = timeline_matching(timeline_reference, timeline_neighbor)
241
+
242
+ neighbor_vector = np.zeros((len(full_timeline), 2))
243
+ neighbor_vector[:,:] = np.nan
244
+
245
+ centre_of_mass_columns = [(c,c.replace('POSITION_X','POSITION_Y')) for c in list(neighbor_properties.columns) if c.endswith('centre_of_mass_POSITION_X')]
246
+ centre_of_mass_labels = [c.replace('_centre_of_mass_POSITION_X','') for c in list(neighbor_properties.columns) if c.endswith('centre_of_mass_POSITION_X')]
247
+
248
+ mass_displacement_vector = np.zeros((len(centre_of_mass_columns), len(full_timeline), 2))
249
+ mass_displacement_vector[:,:,:] = np.nan
250
+
251
+ dot_product_vector = np.zeros((len(centre_of_mass_columns), len(full_timeline)))
252
+ dot_product_vector[:,:] = np.nan
253
+
254
+ cosine_dot_vector = np.zeros((len(centre_of_mass_columns), len(full_timeline)))
255
+ cosine_dot_vector[:,:] = np.nan
256
+
257
+ coords_centre_of_mass = []
258
+ for col in centre_of_mass_columns:
259
+ coords_centre_of_mass.append(group_neigh[[col[0],col[1]]].to_numpy())
260
+
261
+ # Relative distance
262
+ for t in range(len(full_timeline)):
263
+
264
+ if t in timeline_reference and t in timeline_neighbor: # meaning position exists on both sides
265
+
266
+ idx_reference = list(timeline_reference).index(t) #index_reference[list(full_timeline).index(t)]
267
+ idx_neighbor = list(timeline_neighbor).index(t) #index_neighbor[list(full_timeline).index(t)]
268
+
269
+ neighbor_vector[t, 0] = coords_neighbor[idx_neighbor, 0] - coords_reference[idx_reference, 0]
270
+ neighbor_vector[t, 1] = coords_neighbor[idx_neighbor, 1] - coords_reference[idx_reference, 1]
271
+
272
+ for z,cols in enumerate(centre_of_mass_columns):
273
+
274
+ mass_displacement_vector[z,t,0] = coords_centre_of_mass[z][idx_neighbor, 0] - coords_neighbor[idx_neighbor, 0]
275
+ mass_displacement_vector[z,t,1] = coords_centre_of_mass[z][idx_neighbor, 1] - coords_neighbor[idx_neighbor, 1]
276
+
277
+ dot_product_vector[z,t] = np.dot(mass_displacement_vector[z,t], -neighbor_vector[t])
278
+ cosine_dot_vector[z,t] = np.dot(mass_displacement_vector[z,t], -neighbor_vector[t]) / (np.linalg.norm(mass_displacement_vector[z,t])*np.linalg.norm(-neighbor_vector[t]))
279
+ if tid==44.0 and nc==173.0:
280
+ print(f'{centre_of_mass_columns[z]=} {mass_displacement_vector[z,t]=} {-neighbor_vector[t]=} {dot_product_vector[z,t]=} {cosine_dot_vector[z,t]=}')
281
+
282
+
283
+ angle = np.zeros(len(full_timeline))
284
+ angle[:] = np.nan
285
+
286
+ exclude = neighbor_vector[:,1]!=neighbor_vector[:,1]
287
+ angle[~exclude] = np.arctan2(neighbor_vector[:, 1][~exclude], neighbor_vector[:, 0][~exclude])
288
+ #print(f'Angle before unwrap: {angle}')
289
+ angle[~exclude] = np.unwrap(angle[~exclude])
290
+ #print(f'Angle after unwrap: {angle}')
291
+ relative_distance = np.sqrt(neighbor_vector[:,0]**2 + neighbor_vector[:, 1]**2)
292
+ #print(f'Timeline: {full_timeline}; Distance: {relative_distance}')
293
+
294
+ if compute_velocity:
295
+ rel_velocity = derivative(relative_distance, full_timeline, **velocity_kwargs)
296
+ rel_velocity_long_timescale = derivative(relative_distance, full_timeline, window = 7, mode='bi')
297
+ #rel_velocity = np.insert(rel_velocity, 0, np.nan)[:-1]
298
+
299
+ angular_velocity = np.zeros(len(full_timeline))
300
+ angular_velocity[:] = np.nan
301
+ angular_velocity_long_timescale = np.zeros(len(full_timeline))
302
+ angular_velocity_long_timescale[:] = np.nan
303
+
304
+ angular_velocity[~exclude] = derivative(angle[~exclude], full_timeline[~exclude], **velocity_kwargs)
305
+ angular_velocity_long_timescale[~exclude] = derivative(angle[~exclude], full_timeline[~exclude], window = 7, mode='bi')
306
+
307
+ # angular_velocity = np.zeros(len(full_timeline))
308
+ # angular_velocity[:] = np.nan
309
+
310
+ # for t in range(1, len(relative_angle1)):
311
+ # if not np.isnan(relative_angle1[t]) and not np.isnan(relative_angle1[t - 1]):
312
+ # delta_angle = relative_angle1[t] - relative_angle1[t - 1]
313
+ # delta_time = full_timeline[t] - full_timeline[t - 1]
314
+ # if delta_time != 0:
315
+ # angular_velocity[t] = delta_angle / delta_time
316
+
317
+ duration_in_neigh = list(neighbor_ids).count(nc)
318
+ #print(nc, duration_in_neigh, ' frames')
319
+
320
+ cum_sum = 0
321
+ for t in range(len(full_timeline)):
322
+
323
+ if t in timeline_reference: # meaning position exists on both sides
324
+
325
+ idx_reference = list(timeline_reference).index(t)
326
+
327
+ if nc in neighbor_ids_per_t[idx_reference]:
328
+
329
+ cum_sum+=1
330
+ relative_measurements.append(
331
+ {'REFERENCE_ID': tid, 'NEIGHBOR_ID': nc,
332
+ 'reference_population': reference_population,
333
+ 'neighbor_population': neighbor_population,
334
+ 'FRAME': t, 'distance': relative_distance[t],
335
+ 'velocity': rel_velocity[t],
336
+ 'velocity_smooth': rel_velocity_long_timescale[t],
337
+ 'angle': angle[t] * 180 / np.pi,
338
+ #'angle-neigh-ref': angle[t] * 180 / np.pi,
339
+ 'angular_velocity': angular_velocity[t],
340
+ 'angular_velocity_smooth': angular_velocity_long_timescale[t],
341
+ f'status_{neighborhood_description}': 1,
342
+ f'residence_time_in_{neighborhood_description}': cum_sum,
343
+ f'class_{neighborhood_description}': 0,
344
+ f't0_{neighborhood_description}': time_of_first_entrance_in_neighborhood[nc],
345
+ 'reference_tracked': ref_tracked, 'neighbors_tracked': neigh_tracked,
346
+ })
347
+ for z,lbl in enumerate(centre_of_mass_labels):
348
+ relative_measurements[-1].update({lbl+'_centre_of_mass_dot_product': dot_product_vector[z,t], lbl+'_centre_of_mass_dot_cosine': cosine_dot_vector[z,t]})
349
+
350
+ else:
351
+ relative_measurements.append(
352
+ {'REFERENCE_ID': tid, 'NEIGHBOR_ID': nc,
353
+ 'reference_population': reference_population,
354
+ 'neighbor_population': neighbor_population,
355
+ 'FRAME': t, 'distance': relative_distance[t],
356
+ 'velocity': rel_velocity[t],
357
+ 'velocity_smooth': rel_velocity_long_timescale[t],
358
+ 'angle': angle[t] * 180 / np.pi,
359
+ #'angle-neigh-ref': angle[t] * 180 / np.pi,
360
+ 'angular_velocity': angular_velocity[t],
361
+ 'angular_velocity_smooth': angular_velocity_long_timescale[t],
362
+ f'status_{neighborhood_description}': 0,
363
+ f'residence_time_in_{neighborhood_description}': cum_sum,
364
+ f'class_{neighborhood_description}': 0,
365
+ f't0_{neighborhood_description}': time_of_first_entrance_in_neighborhood[nc],
366
+ 'reference_tracked': ref_tracked, 'neighbors_tracked': neigh_tracked,
367
+ })
368
+ for z,lbl in enumerate(centre_of_mass_labels):
369
+ relative_measurements[-1].update({lbl+'_centre_of_mass_dot_product': dot_product_vector[z,t], lbl+'_centre_of_mass_dot_cosine': cosine_dot_vector[z,t]})
370
+
371
+ df_pairs = pd.DataFrame(relative_measurements)
372
+
373
+ return df_pairs
374
+
375
+ except KeyError:
376
+ print(f"Neighborhood {description} not found in data frame. Measurements for this neighborhood will not be calculated")
377
+
378
+
379
+ def timeline_matching(timeline1, timeline2):
380
+
381
+ """
382
+ Match two timelines and create a unified timeline with corresponding indices.
383
+
384
+ Parameters
385
+ ----------
386
+ timeline1 : array-like
387
+ The first timeline to be matched.
388
+ timeline2 : array-like
389
+ The second timeline to be matched.
390
+
391
+ Returns
392
+ -------
393
+ tuple
394
+ A tuple containing:
395
+ - full_timeline : numpy.ndarray
396
+ The unified timeline spanning from the minimum to the maximum time point in the input timelines.
397
+ - index1 : list of int
398
+ The indices of `timeline1` in the `full_timeline`.
399
+ - index2 : list of int
400
+ The indices of `timeline2` in the `full_timeline`.
401
+
402
+ Examples
403
+ --------
404
+ >>> timeline1 = [1, 2, 5, 6]
405
+ >>> timeline2 = [2, 3, 4, 6]
406
+ >>> full_timeline, index1, index2 = timeline_matching(timeline1, timeline2)
407
+ >>> print(full_timeline)
408
+ [1 2 3 4 5 6]
409
+ >>> print(index1)
410
+ [0, 1, 4, 5]
411
+ >>> print(index2)
412
+ [1, 2, 3, 5]
413
+
414
+ Notes
415
+ -----
416
+ - The function combines the two timelines and generates a continuous range from the minimum to the maximum time point.
417
+ - It then finds the indices of the original timelines in this unified timeline.
418
+ - The function assumes that the input timelines consist of integer values.
419
+ """
420
+
421
+ min_t = np.amin(np.concatenate((timeline1, timeline2)))
422
+ max_t = np.amax(np.concatenate((timeline1, timeline2)))
423
+ full_timeline = np.arange(min_t, max_t + 1)
424
+
425
+ index1 = [list(np.where(full_timeline == int(t))[0])[0] for t in timeline1]
426
+ index2 = [list(np.where(full_timeline == int(t))[0])[0] for t in timeline2]
427
+
428
+ return full_timeline, index1, index2
429
+
430
+
431
+ def rel_measure_at_position(pos):
432
+
433
+ pos = pos.replace('\\', '/')
434
+ pos = rf"{pos}"
435
+ assert os.path.exists(pos), f'Position {pos} is not a valid path.'
436
+ if not pos.endswith('/'):
437
+ pos += '/'
438
+ script_path = os.sep.join([abs_path, 'scripts', 'measure_relative.py'])
439
+ cmd = f'python "{script_path}" --pos "{pos}"'
440
+ subprocess.call(cmd, shell=True)
441
+
442
+
443
+ # def mcf7_size_model(x,x0,x2):
444
+ # return np.piecewise(x, [x<= x0, (x > x0)*(x<=x2), x > x2], [lambda x: 1, lambda x: -1/(x2-x0)*x + (1+x0/(x2-x0)), 0])
445
+ # def sigmoid(x,x0,k):
446
+ # return 1/(1 + np.exp(-(x-x0)/k))
447
+ # def velocity_law(x):
448
+ # return np.piecewise(x, [x<=-10, x > -10],[lambda x: 0., lambda x: (1*x+10)*(1-sigmoid(x, 1,1))/10])
449
+
450
+
451
+ # def probabilities(pairs,radius_critical=80,radius_max=150):
452
+ # scores = []
453
+ # pair_dico=[]
454
+ # print(f'Found {len(pairs)} TC-NK pairs...')
455
+ # if len(pairs) > 0:
456
+ # unique_tcs = np.unique(pairs['tc'].to_numpy())
457
+ # unique_nks = np.unique(pairs['nk'].to_numpy())
458
+ # matrix = np.zeros((len(unique_tcs), len(unique_nks)))
459
+ # for index, row in pairs.iterrows():
460
+
461
+ # i = np.where(unique_tcs == row['tc'])[0]
462
+ # j = np.where(unique_nks == row['nk'])[0]
463
+
464
+ # d_prob = mcf7_size_model(row['drel'], radius_critical, radius_max)
465
+ # lamp_prob = sigmoid(row['lamp1'], 1.05, 0.01)
466
+ # synapse_prob = row['syn_class']
467
+ # velocity_prob = velocity_law(row['vrel']) # 1-sigmoid(row['vrel'], 1,1)
468
+ # time_prob = row['t_residence_rel']
469
+
470
+ # hypotheses = [d_prob, velocity_prob, lamp_prob, synapse_prob,
471
+ # time_prob] # lamp_prob d_prob, synapse_prob, velocity_prob, lamp_prob
472
+ # s = np.sum(hypotheses) / len(hypotheses)
473
+
474
+ # matrix[i, j] = s # synapse_prob': synapse_prob,
475
+ # pair_dico.append(
476
+ # { 'tc': row['tc'], 'nk': row['nk'], 'synapse_prob': synapse_prob,
477
+ # 'd_prob': d_prob, 'lamp_prob': lamp_prob, 'velocity_prob': velocity_prob, 'time_prob': time_prob})
478
+ # pair_dico = pd.DataFrame(pair_dico)
479
+
480
+ # hypotheses = ['velocity_prob', 'd_prob', 'time_prob', 'lamp_prob', 'synapse_prob']
481
+
482
+ # for i in tqdm(range(2000)):
483
+ # sample = np.array(random.choices(np.linspace(0, 1, 100), k=len(hypotheses)))
484
+ # weights = sample / np.sum(sample)
485
+
486
+ # score_i = {}
487
+ # for k, hyp in enumerate(hypotheses):
488
+ # score_i.update({'w_' + hyp: weights[k]})
489
+ # probs=[]
490
+ # for cells, group in pair_dico.groupby(['tc']):
491
+
492
+ # group['total_prob'] = 0
493
+ # for hyp in hypotheses:
494
+ # group['total_prob'] += group[hyp] * score_i['w_' + hyp]
495
+ # probs.append(group)
496
+ # return probs
497
+
498
+ def update_effector_table(df_relative, df_effector):
499
+ df_effector['group_neighborhood']=1
500
+ effectors = np.unique(df_relative['EFFECTOR_ID'].to_numpy())
501
+ for effector in effectors:
502
+ try:
503
+ # Set group_neighborhood to 0 where TRACK_ID matches effector
504
+ df_effector.loc[df_effector['TRACK_ID'] == effector, 'group_neighborhood'] = 0
505
+ except:
506
+ df_effector.loc[df_effector['ID'] == effector, 'group_neighborhood'] = 0
507
+ return df_effector
508
+
509
+ def extract_neighborhoods_from_pickles(pos):
510
+
511
+ """
512
+ Extract neighborhood protocols from pickle files located at a given position.
513
+
514
+ Parameters
515
+ ----------
516
+ pos : str
517
+ The base directory path where the pickle files are located.
518
+
519
+ Returns
520
+ -------
521
+ list of dict
522
+ A list of dictionaries, each containing a neighborhood protocol. Each dictionary has the keys:
523
+ - 'reference' : str
524
+ The reference population ('targets' or 'effectors').
525
+ - 'neighbor' : str
526
+ The neighbor population.
527
+ - 'type' : str
528
+ The type of neighborhood ('circle' or 'contact').
529
+ - 'distance' : float
530
+ The distance parameter for the neighborhood.
531
+ - 'description' : str
532
+ The original neighborhood string.
533
+
534
+ Notes
535
+ -----
536
+ - The function checks for the existence of pickle files containing target and effector trajectory data.
537
+ - If the files exist, it loads the data and extracts columns that start with 'neighborhood'.
538
+ - The neighborhood settings are extracted using the `extract_neighborhood_settings` function.
539
+ - The function assumes the presence of subdirectories 'output/tables' under the provided `pos`.
540
+
541
+ Examples
542
+ --------
543
+ >>> protocols = extract_neighborhoods_from_pickles('/path/to/data')
544
+ >>> for protocol in protocols:
545
+ >>> print(protocol)
546
+ {'reference': 'targets', 'neighbor': 'targets', 'type': 'contact', 'distance': 5.0, 'description': 'neighborhood_self_contact_5_px'}
547
+ """
548
+
549
+ tab_tc = pos + os.sep.join(['output', 'tables', 'trajectories_targets.pkl'])
550
+ if os.path.exists(tab_tc):
551
+ df_targets = np.load(tab_tc, allow_pickle=True)
552
+ else:
553
+ df_targets = None
554
+ if os.path.exists(tab_tc.replace('targets','effectors')):
555
+ df_effectors = np.load(tab_tc.replace('targets','effectors'), allow_pickle=True)
556
+ else:
557
+ df_effectors = None
558
+
559
+ neighborhood_protocols=[]
560
+
561
+ if df_targets is not None:
562
+ for column in list(df_targets.columns):
563
+ if column.startswith('neighborhood'):
564
+ neigh_protocol = extract_neighborhood_settings(column, population='targets')
565
+ neighborhood_protocols.append(neigh_protocol)
566
+
567
+ if df_effectors is not None:
568
+ for column in list(df_effectors.columns):
569
+ if column.startswith('neighborhood'):
570
+ neigh_protocol = extract_neighborhood_settings(column, population='effectors')
571
+ neighborhood_protocols.append(neigh_protocol)
572
+
573
+ return neighborhood_protocols
574
+
575
+ def extract_neighborhood_settings(neigh_string, population='targets'):
576
+
577
+ """
578
+ Extract neighborhood settings from a given string.
579
+
580
+ Parameters
581
+ ----------
582
+ neigh_string : str
583
+ The string describing the neighborhood settings. Must start with 'neighborhood'.
584
+ population : str, optional
585
+ The population type ('targets' by default). Can be either 'targets' or 'effectors'.
586
+
587
+ Returns
588
+ -------
589
+ dict
590
+ A dictionary containing the neighborhood protocol with keys:
591
+ - 'reference' : str
592
+ The reference population.
593
+ - 'neighbor' : str
594
+ The neighbor population.
595
+ - 'type' : str
596
+ The type of neighborhood ('circle' or 'contact').
597
+ - 'distance' : float
598
+ The distance parameter for the neighborhood.
599
+ - 'description' : str
600
+ The original neighborhood string.
601
+
602
+ Raises
603
+ ------
604
+ AssertionError
605
+ If the `neigh_string` does not start with 'neighborhood'.
606
+
607
+ Notes
608
+ -----
609
+ - The function determines the neighbor population based on the given population.
610
+ - The neighborhood type and distance are extracted from the `neigh_string`.
611
+ - The description field in the returned dictionary contains the original neighborhood string.
612
+
613
+ Examples
614
+ --------
615
+ >>> extract_neighborhood_settings('neighborhood_self_contact_5_px', 'targets')
616
+ {'reference': 'targets', 'neighbor': 'targets', 'type': 'contact', 'distance': 5.0, 'description': 'neighborhood_self_contact_5_px'}
617
+ """
618
+
619
+ assert neigh_string.startswith('neighborhood')
620
+ if population=='targets':
621
+ neighbor_population = 'effectors'
622
+ elif population=='effectors':
623
+ neighbor_population = 'targets'
624
+
625
+ if 'self' in neigh_string:
626
+
627
+ if 'circle' in neigh_string:
628
+
629
+ distance = float(neigh_string.split('circle_')[1].replace('_px',''))
630
+ neigh_protocol = {'reference': population,'neighbor': population,'type':'circle','distance':distance,'description': neigh_string}
631
+ elif 'contact' in neigh_string:
632
+ distance=float(neigh_string.split('contact_')[1].replace('_px',''))
633
+ neigh_protocol = {'reference': population,'neighbor': population,'type':'contact','distance':distance,'description': neigh_string}
634
+ else:
635
+
636
+ if 'circle' in neigh_string:
637
+
638
+ distance=float(neigh_string.split('circle_')[1].replace('_px',''))
639
+ neigh_protocol = {'reference': population,'neighbor': neighbor_population,'type':'circle','distance':distance,'description': neigh_string}
640
+ elif 'contact' in neigh_string:
641
+
642
+ distance=float(neigh_string.split('contact_')[1].replace('_px',''))
643
+ neigh_protocol = {'reference': population,'neighbor': neighbor_population,'type':'contact','distance':distance,'description': neigh_string}
644
+
645
+ return neigh_protocol
646
+
647
+
648
+
@@ -1,5 +1,5 @@
1
1
  """
2
- Copright © 2022 Laboratoire Adhesion et Inflammation, Authored by Remy Torro.
2
+ Copyright © 2022 Laboratoire Adhesion et Inflammation, Authored by Remy Torro.
3
3
  """
4
4
 
5
5
  import argparse