celldetective 1.3.0__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,68 @@
1
+ {
2
+ "TrackerConfig":
3
+ {
4
+ "MotionModel":
5
+ {
6
+ "name": "biased_motion",
7
+ "dt": 1.0,
8
+ "measurements": 3,
9
+ "states": 6,
10
+ "accuracy": 7.5,
11
+ "prob_not_assign": 0.001,
12
+ "max_lost": 5,
13
+ "A": {
14
+ "matrix": [1,0,0,1,0,0,
15
+ 0,1,0,0,0,0,
16
+ 0,0,1,0,0,1,
17
+ 0,0,0,1,0,0,
18
+ 0,0,0,0,0,0,
19
+ 0,0,0,0,0,1]
20
+ },
21
+ "H": {
22
+ "matrix": [1,0,0,0,0,0,
23
+ 0,1,0,0,0,0,
24
+ 0,0,1,0,0,0]
25
+ },
26
+ "P": {
27
+ "sigma": 150.0,
28
+ "matrix": [0.1,0,0,0,0,0,
29
+ 0,0.1,0,0,0,0,
30
+ 0,0,0.1,0,0,0,
31
+ 0,0,0,1,0,0,
32
+ 0,0,0,0,1,0,
33
+ 0,0,0,0,0,1]
34
+ },
35
+ "G": {
36
+ "sigma": 15.0,
37
+ "matrix": [0.5,0.5,0.5,1,1,1]
38
+
39
+ },
40
+ "R": {
41
+ "sigma": 5.0,
42
+ "matrix": [1,0,0,
43
+ 0,1,0,
44
+ 0,0,1]
45
+ }
46
+ },
47
+ "ObjectModel":
48
+ {},
49
+ "HypothesisModel":
50
+ {
51
+ "name": "cell_hypothesis",
52
+ "hypotheses": ["P_FP", "P_init", "P_term", "P_branch","P_link", "P_merge"],
53
+ "lambda_time": 5.0,
54
+ "lambda_dist": 3.0,
55
+ "lambda_link": 10.0,
56
+ "lambda_branch": 1.0,
57
+ "eta": 1e-10,
58
+ "theta_dist": 20.0,
59
+ "theta_time": 5.0,
60
+ "dist_thresh": 35.0,
61
+ "time_thresh": 20.0,
62
+ "apop_thresh": 10,
63
+ "segmentation_miss_rate": 0.05,
64
+ "apoptosis_rate": 0.0005,
65
+ "relax": true
66
+ }
67
+ }
68
+ }
@@ -0,0 +1,202 @@
1
+ {
2
+ "name": "no_z_motion",
3
+ "verbose": false,
4
+ "motion_model": {
5
+ "measurements": 3,
6
+ "states": 6,
7
+ "A": [
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 1.0,
12
+ 0.0,
13
+ 0.0,
14
+ 0.0,
15
+ 1.0,
16
+ 0.0,
17
+ 0.0,
18
+ 1.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 0.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 0.0,
28
+ 0.0,
29
+ 1.0,
30
+ 0.0,
31
+ 0.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 0.0,
42
+ 0.0,
43
+ 0.0
44
+ ],
45
+ "H": [
46
+ 1.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 0.0,
51
+ 0.0,
52
+ 0.0,
53
+ 1.0,
54
+ 0.0,
55
+ 0.0,
56
+ 0.0,
57
+ 0.0,
58
+ 0.0,
59
+ 0.0,
60
+ 1.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0
64
+ ],
65
+ "P": [
66
+ 30.0,
67
+ 0.0,
68
+ 0.0,
69
+ 0.0,
70
+ 0.0,
71
+ 0.0,
72
+ 0.0,
73
+ 30.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 0.0,
79
+ 0.0,
80
+ 30.0,
81
+ 0.0,
82
+ 0.0,
83
+ 0.0,
84
+ 0.0,
85
+ 0.0,
86
+ 0.0,
87
+ 300.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 0.0,
93
+ 0.0,
94
+ 300.0,
95
+ 0.0,
96
+ 0.0,
97
+ 0.0,
98
+ 0.0,
99
+ 0.0,
100
+ 0.0,
101
+ 300.0
102
+ ],
103
+ "R": [
104
+ 10.0,
105
+ 0.0,
106
+ 0.0,
107
+ 0.0,
108
+ 10.0,
109
+ 0.0,
110
+ 0.0,
111
+ 0.0,
112
+ 10.0
113
+ ],
114
+ "G": [
115
+ 15.0,
116
+ 15.0,
117
+ 15.0,
118
+ 30.0,
119
+ 30.0,
120
+ 30.0
121
+ ],
122
+ "Q": [
123
+ 225.0,
124
+ 225.0,
125
+ 225.0,
126
+ 450.0,
127
+ 450.0,
128
+ 450.0,
129
+ 225.0,
130
+ 225.0,
131
+ 225.0,
132
+ 450.0,
133
+ 450.0,
134
+ 450.0,
135
+ 225.0,
136
+ 225.0,
137
+ 225.0,
138
+ 450.0,
139
+ 450.0,
140
+ 450.0,
141
+ 450.0,
142
+ 450.0,
143
+ 450.0,
144
+ 900.0,
145
+ 900.0,
146
+ 900.0,
147
+ 450.0,
148
+ 450.0,
149
+ 450.0,
150
+ 900.0,
151
+ 900.0,
152
+ 900.0,
153
+ 450.0,
154
+ 450.0,
155
+ 450.0,
156
+ 900.0,
157
+ 900.0,
158
+ 900.0
159
+ ],
160
+ "dt": 1.0,
161
+ "accuracy": 5.0,
162
+ "max_lost": 5,
163
+ "prob_not_assign": 0.1,
164
+ "name": "ricm2"
165
+ },
166
+ "object_model": null,
167
+ "hypothesis_model": {
168
+ "hypotheses": [
169
+ "P_FP",
170
+ "P_init",
171
+ "P_term",
172
+ "P_link"
173
+ ],
174
+ "lambda_time": 5.0,
175
+ "lambda_dist": 3.0,
176
+ "lambda_link": 50.0,
177
+ "lambda_branch": 50.0,
178
+ "eta": 1e-10,
179
+ "theta_dist": 20.0,
180
+ "theta_time": 5.0,
181
+ "dist_thresh": 75.0,
182
+ "time_thresh": 5.0,
183
+ "apop_thresh": 5,
184
+ "segmentation_miss_rate": 0.1,
185
+ "apoptosis_rate": 0.001,
186
+ "relax": true,
187
+ "name": "ricm2"
188
+ },
189
+ "max_search_radius": 200.0,
190
+ "return_kalman": false,
191
+ "store_candidate_graph": false,
192
+ "volume": null,
193
+ "update_method": 0,
194
+ "optimizer_options": {
195
+ "tm_lim": 60000
196
+ },
197
+ "features": [],
198
+ "tracking_updates": [
199
+ 1
200
+ ],
201
+ "enable_optimisation": true
202
+ }
@@ -6,12 +6,13 @@ from tqdm import tqdm
6
6
  import numpy as np
7
7
  import os
8
8
  from celldetective.io import get_config, get_experiment_wells, interpret_wells_and_positions, extract_well_name_and_number, get_positions_in_well, extract_position_name, get_position_movie_path, load_frames, auto_load_number_of_frames
9
- from celldetective.utils import estimate_unreliable_edge, unpad, ConfigSectionMap, _extract_channel_indices_from_config, _extract_nbr_channels_from_config, _get_img_num_per_channel
9
+ from celldetective.utils import interpolate_nan, estimate_unreliable_edge, unpad, ConfigSectionMap, _extract_channel_indices_from_config, _extract_nbr_channels_from_config, _get_img_num_per_channel
10
10
  from celldetective.segmentation import filter_image, threshold_image
11
11
  from csbdeep.io import save_tiff_imagej_compatible
12
12
  from gc import collect
13
13
  from lmfit import Parameters, Model
14
14
  import tifffile.tifffile as tiff
15
+ from scipy.ndimage import shift
15
16
 
16
17
  def estimate_background_per_condition(experiment, threshold_on_std=1, well_option='*', target_channel="channel_name", frame_range=[0,5], mode="timeseries", activation_protocol=[['gauss',2],['std',4]], show_progress_per_pos=False, show_progress_per_well=True):
17
18
 
@@ -879,7 +880,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
879
880
 
880
881
  stack_length_auto = auto_load_number_of_frames(stack_path)
881
882
  if stack_length_auto is None and stack_length is None:
882
- print('stack length not provided')
883
+ print('Stack length not provided...')
883
884
  return None
884
885
  if stack_length_auto is not None:
885
886
  stack_length = stack_length_auto
@@ -899,6 +900,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
899
900
 
900
901
  frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
901
902
  target_img = frames[:,:,target_channel_index].copy()
903
+
902
904
  correction = field_correction(target_img, threshold_on_std=threshold_on_std, operation=operation, model=model, clip=clip, activation_protocol=activation_protocol)
903
905
  frames[:,:,target_channel_index] = correction.copy()
904
906
 
@@ -919,6 +921,7 @@ def fit_and_apply_model_background_to_stack(stack_path,
919
921
 
920
922
  frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
921
923
  target_img = frames[:,:,target_channel_index].copy()
924
+
922
925
  correction = field_correction(target_img, threshold_on_std=threshold_on_std, operation=operation, model=model, clip=clip, activation_protocol=activation_protocol)
923
926
  frames[:,:,target_channel_index] = correction.copy()
924
927
 
@@ -981,6 +984,8 @@ def field_correction(img, threshold_on_std=1, operation='divide', model='parabol
981
984
  """
982
985
 
983
986
  target_copy = img.copy().astype(float)
987
+ if np.percentile(target_copy.flatten(),99.9)==0.0:
988
+ return target_copy
984
989
 
985
990
  std_frame = filter_image(target_copy,filters=activation_protocol)
986
991
  edge = estimate_unreliable_edge(activation_protocol)
@@ -1050,4 +1055,168 @@ def fit_background_model(img, cell_masks=None, model='paraboloid', edge_exclusio
1050
1055
  if bg is not None:
1051
1056
  bg = np.array(bg)
1052
1057
 
1053
- return bg
1058
+ return bg
1059
+
1060
+
1061
+ def correct_channel_offset(
1062
+ experiment,
1063
+ well_option='*',
1064
+ position_option='*',
1065
+ target_channel="channel_name",
1066
+ correction_horizontal = 0,
1067
+ correction_vertical = 0,
1068
+ show_progress_per_well = True,
1069
+ show_progress_per_pos = False,
1070
+ export = False,
1071
+ return_stacks = False,
1072
+ movie_prefix=None,
1073
+ export_prefix='Corrected',
1074
+ return_stack = True,
1075
+ **kwargs,
1076
+ ):
1077
+
1078
+
1079
+ config = get_config(experiment)
1080
+ wells = get_experiment_wells(experiment)
1081
+ len_movie = float(ConfigSectionMap(config,"MovieSettings")["len_movie"])
1082
+ if movie_prefix is None:
1083
+ movie_prefix = ConfigSectionMap(config,"MovieSettings")["movie_prefix"]
1084
+
1085
+ well_indices, position_indices = interpret_wells_and_positions(experiment, well_option, position_option)
1086
+ channel_indices = _extract_channel_indices_from_config(config, [target_channel])
1087
+ nbr_channels = _extract_nbr_channels_from_config(config)
1088
+ img_num_channels = _get_img_num_per_channel(channel_indices, int(len_movie), nbr_channels)
1089
+
1090
+ stacks = []
1091
+
1092
+ for k, well_path in enumerate(tqdm(wells[well_indices], disable=not show_progress_per_well)):
1093
+
1094
+ well_name, _ = extract_well_name_and_number(well_path)
1095
+ positions = get_positions_in_well(well_path)
1096
+ selection = positions[position_indices]
1097
+ if isinstance(selection[0],np.ndarray):
1098
+ selection = selection[0]
1099
+
1100
+ for pidx,pos_path in enumerate(tqdm(selection, disable=not show_progress_per_pos)):
1101
+
1102
+ stack_path = get_position_movie_path(pos_path, prefix=movie_prefix)
1103
+ print(f'Applying the correction to position {extract_position_name(pos_path)}...')
1104
+ len_movie_auto = auto_load_number_of_frames(stack_path)
1105
+ if len_movie_auto is not None:
1106
+ len_movie = len_movie_auto
1107
+ img_num_channels = _get_img_num_per_channel(channel_indices, int(len_movie), nbr_channels)
1108
+
1109
+ corrected_stack = correct_channel_offset_single_stack(stack_path,
1110
+ target_channel_index=channel_indices[0],
1111
+ nbr_channels=nbr_channels,
1112
+ stack_length=len_movie,
1113
+ correction_vertical=correction_vertical,
1114
+ correction_horizontal=correction_horizontal,
1115
+ export=export,
1116
+ prefix=export_prefix,
1117
+ return_stacks = return_stacks,
1118
+ )
1119
+ print('Correction successful.')
1120
+ if return_stacks:
1121
+ stacks.append(corrected_stack)
1122
+ else:
1123
+ del corrected_stack
1124
+ collect()
1125
+
1126
+ if return_stacks:
1127
+ return stacks
1128
+
1129
+
1130
+ def correct_channel_offset_single_stack(stack_path,
1131
+ target_channel_index=0,
1132
+ nbr_channels=1,
1133
+ stack_length=45,
1134
+ correction_vertical=0,
1135
+ correction_horizontal=0,
1136
+ export=False,
1137
+ prefix="Corrected",
1138
+ return_stacks=True,
1139
+ ):
1140
+
1141
+ assert os.path.exists(stack_path),f"The stack {stack_path} does not exist... Abort."
1142
+
1143
+ stack_length_auto = auto_load_number_of_frames(stack_path)
1144
+ if stack_length_auto is None and stack_length is None:
1145
+ print('Stack length not provided...')
1146
+ return None
1147
+ if stack_length_auto is not None:
1148
+ stack_length = stack_length_auto
1149
+
1150
+ corrected_stack = []
1151
+
1152
+ if export:
1153
+ path,file = os.path.split(stack_path)
1154
+ if prefix is None:
1155
+ newfile = 'temp_'+file
1156
+ else:
1157
+ newfile = '_'.join([prefix,file])
1158
+
1159
+ with tiff.TiffWriter(os.sep.join([path,newfile]),imagej=True) as tif:
1160
+
1161
+ for i in tqdm(range(0,int(stack_length*nbr_channels),nbr_channels)):
1162
+
1163
+ frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
1164
+ target_img = frames[:,:,target_channel_index].copy()
1165
+
1166
+ if np.percentile(target_img.flatten(), 99.9)==0.0:
1167
+ correction = target_img
1168
+ elif np.any(target_img.flatten()!=target_img.flatten()):
1169
+ # Routine to interpolate NaN for the spline filter then mask it again
1170
+ target_interp = interpolate_nan(target_img)
1171
+ correction = shift(target_interp, [correction_vertical, correction_horizontal])
1172
+ correction_nan = shift(target_img, [correction_vertical, correction_horizontal], prefilter=False)
1173
+ nan_i, nan_j = np.where(correction_nan!=correction_nan)
1174
+ correction[nan_i, nan_j] = np.nan
1175
+ else:
1176
+ correction = shift(target_img, [correction_vertical, correction_horizontal])
1177
+
1178
+ frames[:,:,target_channel_index] = correction.copy()
1179
+
1180
+ if return_stacks:
1181
+ corrected_stack.append(frames)
1182
+
1183
+ if export:
1184
+ tif.write(np.moveaxis(frames,-1,0).astype(np.dtype('f')), contiguous=True)
1185
+ del frames
1186
+ del target_img
1187
+ del correction
1188
+ collect()
1189
+
1190
+ if prefix is None:
1191
+ os.replace(os.sep.join([path,newfile]), os.sep.join([path,file]))
1192
+ else:
1193
+ for i in tqdm(range(0,int(stack_length*nbr_channels),nbr_channels)):
1194
+
1195
+ frames = load_frames(list(np.arange(i,(i+nbr_channels))), stack_path, normalize_input=False).astype(float)
1196
+ target_img = frames[:,:,target_channel_index].copy()
1197
+
1198
+ if np.percentile(target_img.flatten(), 99.9)==0.0:
1199
+ correction = target_img
1200
+ elif np.any(target_img.flatten()!=target_img.flatten()):
1201
+ # Routine to interpolate NaN for the spline filter then mask it again
1202
+ target_interp = interpolate_nan(target_img)
1203
+ correction = shift(target_interp, [correction_vertical, correction_horizontal])
1204
+ correction_nan = shift(target_img, [correction_vertical, correction_horizontal], prefilter=False)
1205
+ nan_i, nan_j = np.where(correction_nan!=correction_nan)
1206
+ correction[nan_i, nan_j] = np.nan
1207
+ else:
1208
+ correction = shift(target_img, [correction_vertical, correction_horizontal])
1209
+
1210
+ frames[:,:,target_channel_index] = correction.copy()
1211
+
1212
+ corrected_stack.append(frames)
1213
+
1214
+ del frames
1215
+ del target_img
1216
+ del correction
1217
+ collect()
1218
+
1219
+ if return_stacks:
1220
+ return np.array(corrected_stack)
1221
+ else:
1222
+ return None
celldetective/signals.py CHANGED
@@ -3079,7 +3079,7 @@ def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], retu
3079
3079
  class_value = [class_value]
3080
3080
 
3081
3081
  if forced_max_duration is None:
3082
- max_duration = ceil(np.amax(df.groupby(['position','TRACK_ID']).size().values))
3082
+ max_duration = int(df['FRAME'].max())+1 #ceil(np.amax(df.groupby(['position','TRACK_ID']).size().values))
3083
3083
  else:
3084
3084
  max_duration = forced_max_duration
3085
3085
 
@@ -3115,9 +3115,12 @@ def mean_signal(df, signal_name, class_col, time_col=None, class_value=[0], retu
3115
3115
  else:
3116
3116
  signal = track_group[signal_name].to_numpy()
3117
3117
 
3118
+ if ref_time <=0:
3119
+ ref_time = 0
3120
+
3118
3121
  timeline = track_group['FRAME'].unique().astype(int)
3119
3122
  timeline_shifted = timeline - ref_time + max_duration
3120
- signal_matrix[trackid,timeline_shifted] = signal
3123
+ signal_matrix[trackid,timeline_shifted.astype(int)] = signal
3121
3124
  trackid+=1
3122
3125
 
3123
3126
  mean_signal, std_signal = columnwise_mean(signal_matrix, min_nbr_values=min_nbr_values)
celldetective/tracking.py CHANGED
@@ -609,7 +609,7 @@ def interpolate_time_gaps(trajectories, column_labels={'track': "TRACK_ID", 'tim
609
609
 
610
610
  trajectories[column_labels['time']] = pd.to_datetime(trajectories[column_labels['time']], unit='s')
611
611
  trajectories.set_index(column_labels['track'], inplace=True)
612
- trajectories = trajectories.groupby(column_labels['track'], group_keys=True).apply(lambda x: x.set_index(column_labels['time']).resample('1S').asfreq()).reset_index()
612
+ trajectories = trajectories.groupby(column_labels['track'], group_keys=True).apply(lambda x: x.set_index(column_labels['time']).resample('1s').asfreq()).reset_index()
613
613
  trajectories[[column_labels['x'], column_labels['y']]] = trajectories.groupby(column_labels['track'], group_keys=False)[[column_labels['x'], column_labels['y']]].apply(lambda x: x.interpolate(method='linear'))
614
614
  trajectories.reset_index(drop=True, inplace=True)
615
615
  trajectories[column_labels['time']] = trajectories[column_labels['time']].astype('int64').astype(float) / 10**9
@@ -679,7 +679,11 @@ def extrapolate_tracks(trajectories, post=False, pre=False, column_labels={'trac
679
679
  extrapolated_positions = pd.DataFrame({column_labels['x']: last_known_position[0][1], column_labels['y']: last_known_position[0][2]}, index=np.arange(last_known_position[0][0] + 1, max_time + 1))
680
680
  track_data = extrapolated_frames.join(extrapolated_positions, how="inner", on=column_labels['time'])
681
681
  track_data[column_labels['track']] = track_id
682
- df_extrapolated = pd.concat([df_extrapolated, track_data])
682
+
683
+ if len(df_extrapolated)==0:
684
+ df_extrapolated = track_data
685
+ elif len(track_data)!=0:
686
+ df_extrapolated = pd.concat([df_extrapolated, track_data])
683
687
 
684
688
 
685
689
  # concatenate the original dataframe and the extrapolated dataframe
@@ -960,7 +964,7 @@ def write_first_detection_class(tab, column_labels={'track': "TRACK_ID", 'time':
960
964
  if np.any(detection==detection):
961
965
  t_first = timeline[detection==detection][0]
962
966
  cclass = 0
963
- if t_first==0:
967
+ if t_first<=0:
964
968
  t_first = -1
965
969
  cclass = 2
966
970
  else:
celldetective/utils.py CHANGED
@@ -2347,12 +2347,12 @@ def download_zenodo_file(file, output_dir):
2347
2347
  if len(file_to_rename)>0 and not file_to_rename[0].endswith(os.sep) and not file.startswith('demo'):
2348
2348
  os.rename(file_to_rename[0], os.sep.join([output_dir,file,file]))
2349
2349
 
2350
- if file.startswith('db_'):
2351
- os.rename(os.sep.join([output_dir,file.replace('db_','')]), os.sep.join([output_dir,file]))
2352
- if file=='db-si-NucPI':
2353
- os.rename(os.sep.join([output_dir,'db2-NucPI']), os.sep.join([output_dir,file]))
2354
- if file=='db-si-NucCondensation':
2355
- os.rename(os.sep.join([output_dir,'db1-NucCondensation']), os.sep.join([output_dir,file]))
2350
+ #if file.startswith('db_'):
2351
+ # os.rename(os.sep.join([output_dir,file.replace('db_','')]), os.sep.join([output_dir,file]))
2352
+ #if file=='db-si-NucPI':
2353
+ # os.rename(os.sep.join([output_dir,'db2-NucPI']), os.sep.join([output_dir,file]))
2354
+ #if file=='db-si-NucCondensation':
2355
+ # os.rename(os.sep.join([output_dir,'db1-NucCondensation']), os.sep.join([output_dir,file]))
2356
2356
 
2357
2357
  os.remove(path_to_zip_file)
2358
2358
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: celldetective
3
- Version: 1.3.0
3
+ Version: 1.3.1
4
4
  Summary: description
5
5
  Home-page: http://github.com/remyeltorro/celldetective
6
6
  Author: Rémy Torro
@@ -68,7 +68,7 @@ analysis on multimodal time lapse microscopy images.
68
68
 
69
69
  ## Overview
70
70
 
71
- ![Pipeline](https://github.com/remyeltorro/celldetective/raw/main/docs/source/_static/celldetective-blocks.png)
71
+ ![Pipeline](https://github.com/celldetective/celldetective/raw/main/docs/source/_static/celldetective-blocks.png)
72
72
 
73
73
 
74
74
  Celldetective was designed to analyze time-lapse microscopy images in difficult situations: mixed cell populations that are only separable through multimodal information. This software provides a toolkit for the analysis of cell population interactions.
@@ -87,7 +87,7 @@ Instead of reinventing the wheel and out of respect for the amazing work done by
87
87
 
88
88
  **Target Audience**: The software is targeted to scientists who are interested in quantifying dynamically (or not) cell populations from microscopy images. Experimental scientists who produce such images can also analyze their data, thanks to the graphical interface, that completely removes the need for coding, and the many helper functions that guide the user in the analysis steps. Finally, the modular structure of Celldetective welcomes users with a partial need.
89
89
 
90
- ![Signal analysis](https://github.com/remyeltorro/celldetective/raw/main/docs/source/_static/signal-annotator.gif)
90
+ ![Signal analysis](https://github.com/celldetective/celldetective/raw/main/docs/source/_static/signal-annotator.gif)
91
91
 
92
92
 
93
93
  # System requirements