celldetective 1.0.2__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. celldetective/__main__.py +2 -2
  2. celldetective/events.py +2 -44
  3. celldetective/filters.py +4 -5
  4. celldetective/gui/__init__.py +1 -1
  5. celldetective/gui/analyze_block.py +37 -10
  6. celldetective/gui/btrack_options.py +24 -23
  7. celldetective/gui/classifier_widget.py +62 -19
  8. celldetective/gui/configure_new_exp.py +32 -35
  9. celldetective/gui/control_panel.py +115 -81
  10. celldetective/gui/gui_utils.py +674 -396
  11. celldetective/gui/json_readers.py +7 -6
  12. celldetective/gui/layouts.py +755 -0
  13. celldetective/gui/measurement_options.py +168 -487
  14. celldetective/gui/neighborhood_options.py +322 -270
  15. celldetective/gui/plot_measurements.py +1114 -0
  16. celldetective/gui/plot_signals_ui.py +20 -20
  17. celldetective/gui/process_block.py +449 -169
  18. celldetective/gui/retrain_segmentation_model_options.py +27 -26
  19. celldetective/gui/retrain_signal_model_options.py +25 -24
  20. celldetective/gui/seg_model_loader.py +31 -27
  21. celldetective/gui/signal_annotator.py +2326 -2295
  22. celldetective/gui/signal_annotator_options.py +18 -16
  23. celldetective/gui/styles.py +16 -1
  24. celldetective/gui/survival_ui.py +61 -39
  25. celldetective/gui/tableUI.py +60 -23
  26. celldetective/gui/thresholds_gui.py +68 -66
  27. celldetective/gui/viewers.py +596 -0
  28. celldetective/io.py +234 -23
  29. celldetective/measure.py +37 -32
  30. celldetective/neighborhood.py +495 -27
  31. celldetective/preprocessing.py +683 -0
  32. celldetective/scripts/analyze_signals.py +7 -0
  33. celldetective/scripts/measure_cells.py +12 -0
  34. celldetective/scripts/segment_cells.py +5 -0
  35. celldetective/scripts/track_cells.py +11 -0
  36. celldetective/signals.py +221 -98
  37. celldetective/tracking.py +0 -1
  38. celldetective/utils.py +178 -36
  39. celldetective-1.1.0.dist-info/METADATA +305 -0
  40. celldetective-1.1.0.dist-info/RECORD +80 -0
  41. {celldetective-1.0.2.dist-info → celldetective-1.1.0.dist-info}/top_level.txt +1 -0
  42. tests/__init__.py +0 -0
  43. tests/test_events.py +28 -0
  44. tests/test_filters.py +24 -0
  45. tests/test_io.py +70 -0
  46. tests/test_measure.py +141 -0
  47. tests/test_neighborhood.py +70 -0
  48. tests/test_segmentation.py +93 -0
  49. tests/test_signals.py +135 -0
  50. tests/test_tracking.py +164 -0
  51. tests/test_utils.py +71 -0
  52. celldetective-1.0.2.dist-info/METADATA +0 -192
  53. celldetective-1.0.2.dist-info/RECORD +0 -66
  54. {celldetective-1.0.2.dist-info → celldetective-1.1.0.dist-info}/LICENSE +0 -0
  55. {celldetective-1.0.2.dist-info → celldetective-1.1.0.dist-info}/WHEEL +0 -0
  56. {celldetective-1.0.2.dist-info → celldetective-1.1.0.dist-info}/entry_points.txt +0 -0
celldetective/io.py CHANGED
@@ -19,7 +19,12 @@ from celldetective.utils import ConfigSectionMap, extract_experiment_channels, _
19
19
  import json
20
20
  import threading
21
21
  from skimage.measure import regionprops_table
22
-
22
+ from celldetective.utils import _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
23
+ import matplotlib.pyplot as plt
24
+ from celldetective.filters import std_filter, median_filter, gauss_filter
25
+ from stardist import fill_label_holes
26
+ from celldetective.utils import interpolate_nan
27
+ from scipy.interpolate import griddata
23
28
 
24
29
  def get_experiment_wells(experiment):
25
30
 
@@ -131,15 +136,58 @@ def get_experiment_pharmaceutical_agents(experiment, dtype=str):
131
136
  return np.array([dtype(c) for c in pharmaceutical_agents])
132
137
 
133
138
 
134
- def _interpret_wells_and_positions(experiment, well_option, position_option):
139
+ def interpret_wells_and_positions(experiment, well_option, position_option):
135
140
 
136
- wells = get_experiment_wells(experiment)
137
- nbr_of_wells = len(wells)
141
+ """
142
+ Interpret well and position options for a given experiment.
143
+
144
+ This function takes an experiment and well/position options to return the selected
145
+ wells and positions. It supports selection of all wells or specific wells/positions
146
+ as specified. The well numbering starts from 0 (i.e., Well 0 is W1 and so on).
147
+
148
+ Parameters
149
+ ----------
150
+ experiment : object
151
+ The experiment object containing well information.
152
+ well_option : str, int, or list of int
153
+ The well selection option:
154
+ - '*' : Select all wells.
155
+ - int : Select a specific well by its index.
156
+ - list of int : Select multiple wells by their indices.
157
+ position_option : str, int, or list of int
158
+ The position selection option:
159
+ - '*' : Select all positions (returns None).
160
+ - int : Select a specific position by its index.
161
+ - list of int : Select multiple positions by their indices.
162
+
163
+ Returns
164
+ -------
165
+ well_indices : numpy.ndarray or list of int
166
+ The indices of the selected wells.
167
+ position_indices : numpy.ndarray or list of int or None
168
+ The indices of the selected positions. Returns None if all positions are selected.
169
+
170
+ Examples
171
+ --------
172
+ >>> experiment = ... # Some experiment object
173
+ >>> interpret_wells_and_positions(experiment, '*', '*')
174
+ (array([0, 1, 2, ..., n-1]), None)
175
+
176
+ >>> interpret_wells_and_positions(experiment, 2, '*')
177
+ ([2], None)
178
+
179
+ >>> interpret_wells_and_positions(experiment, [1, 3, 5], 2)
180
+ ([1, 3, 5], array([2]))
181
+
182
+ """
138
183
 
184
+ wells = get_experiment_wells(experiment)
185
+ nbr_of_wells = len(wells)
186
+
139
187
  if well_option=='*':
140
- well_indices = np.arange(len(wells))
141
- elif isinstance(well_option, int):
142
- well_indices = np.array([well_option], dtype=int)
188
+ well_indices = np.arange(nbr_of_wells)
189
+ elif isinstance(well_option, int) or isinstance(well_option, np.int_):
190
+ well_indices = [int(well_option)]
143
191
  elif isinstance(well_option, list):
144
192
  well_indices = well_option
145
193
 
@@ -149,11 +197,43 @@ def _interpret_wells_and_positions(experiment, well_option, position_option):
149
197
  position_indices = np.array([position_option], dtype=int)
150
198
  elif isinstance(position_option, list):
151
199
  position_indices = position_option
152
-
200
+
153
201
  return well_indices, position_indices
154
202
 
155
203
  def extract_well_name_and_number(well):
156
204
 
205
+ """
206
+ Extract the well name and number from a given well path.
207
+
208
+ This function takes a well path string, splits it by the OS-specific path separator,
209
+ and extracts the well name and number. The well name is the last component of the path,
210
+ and the well number is derived by removing the 'W' prefix and converting the remaining
211
+ part to an integer.
212
+
213
+ Parameters
214
+ ----------
215
+ well : str
216
+ The well path string, where the well name is the last component.
217
+
218
+ Returns
219
+ -------
220
+ well_name : str
221
+ The name of the well, extracted from the last component of the path.
222
+ well_number : int
223
+ The well number, obtained by stripping the 'W' prefix from the well name
224
+ and converting the remainder to an integer.
225
+
226
+ Examples
227
+ --------
228
+ >>> well_path = "path/to/W23"
229
+ >>> extract_well_name_and_number(well_path)
230
+ ('W23', 23)
231
+
232
+ >>> well_path = "another/path/W1"
233
+ >>> extract_well_name_and_number(well_path)
234
+ ('W1', 1)
235
+ """
236
+
157
237
  split_well_path = well.split(os.sep)
158
238
  split_well_path = list(filter(None, split_well_path))
159
239
  well_name = split_well_path[-1]
@@ -163,6 +243,34 @@ def extract_well_name_and_number(well):
163
243
 
164
244
  def extract_position_name(pos):
165
245
 
246
+ """
247
+ Extract the position name from a given position path.
248
+
249
+ This function takes a position path string, splits it by the OS-specific path separator,
250
+ filters out any empty components, and extracts the position name, which is the last
251
+ component of the path.
252
+
253
+ Parameters
254
+ ----------
255
+ pos : str
256
+ The position path string, where the position name is the last component.
257
+
258
+ Returns
259
+ -------
260
+ pos_name : str
261
+ The name of the position, extracted from the last component of the path.
262
+
263
+ Examples
264
+ --------
265
+ >>> pos_path = "path/to/position1"
266
+ >>> extract_position_name(pos_path)
267
+ 'position1'
268
+
269
+ >>> pos_path = "another/path/positionA"
270
+ >>> extract_position_name(pos_path)
271
+ 'positionA'
272
+ """
273
+
166
274
  split_pos_path = pos.split(os.sep)
167
275
  split_pos_path = list(filter(None, split_pos_path))
168
276
  pos_name = split_pos_path[-1]
@@ -222,18 +330,109 @@ def get_position_table(pos, population, return_path=False):
222
330
  else:
223
331
  return df_pos
224
332
 
333
+ def get_position_pickle(pos, population, return_path=False):
334
+
335
+ """
336
+ Retrieves the data table for a specified population at a given position, optionally returning the table's file path.
337
+
338
+ This function locates and loads a CSV data table associated with a specific population (e.g., 'targets', 'cells')
339
+ from a specified position directory. The position directory should contain an 'output/tables' subdirectory where
340
+ the CSV file named 'trajectories_{population}.csv' is expected to be found. If the file exists, it is loaded into
341
+ a pandas DataFrame; otherwise, None is returned.
342
+
343
+ Parameters
344
+ ----------
345
+ pos : str
346
+ The path to the position directory from which to load the data table.
347
+ population : str
348
+ The name of the population for which the data table is to be retrieved. This name is used to construct the
349
+ file name of the CSV file to be loaded.
350
+ return_path : bool, optional
351
+ If True, returns a tuple containing the loaded data table (or None) and the path to the CSV file. If False,
352
+ only the loaded data table (or None) is returned (default is False).
353
+
354
+ Returns
355
+ -------
356
+ pandas.DataFrame or None, or (pandas.DataFrame or None, str)
357
+ If return_path is False, returns the loaded data table as a pandas DataFrame, or None if the table file does
358
+ not exist. If return_path is True, returns a tuple where the first element is the data table (or None) and the
359
+ second element is the path to the CSV file.
360
+
361
+ Examples
362
+ --------
363
+ >>> df_pos = get_position_table('/path/to/position', 'targets')
364
+ # This will load the 'trajectories_targets.csv' table from the specified position directory into a pandas DataFrame.
365
+
366
+ >>> df_pos, table_path = get_position_table('/path/to/position', 'targets', return_path=True)
367
+ # This will load the 'trajectories_targets.csv' table and also return the path to the CSV file.
368
+
369
+ """
370
+
371
+ if not pos.endswith(os.sep):
372
+ table = os.sep.join([pos,'output','tables',f'trajectories_{population}.pkl'])
373
+ else:
374
+ table = pos + os.sep.join(['output','tables',f'trajectories_{population}.pkl'])
375
+
376
+ if os.path.exists(table):
377
+ df_pos = np.load(table, allow_pickle=True)
378
+ else:
379
+ df_pos = None
380
+
381
+ if return_path:
382
+ return df_pos, table
383
+ else:
384
+ return df_pos
385
+
386
+
225
387
  def get_position_movie_path(pos, prefix=''):
226
388
 
389
+ """
390
+ Get the path of the movie file for a given position.
391
+
392
+ This function constructs the path to a movie file within a given position directory.
393
+ It searches for TIFF files that match the specified prefix. If multiple matching files
394
+ are found, the first one is returned.
395
+
396
+ Parameters
397
+ ----------
398
+ pos : str
399
+ The directory path for the position.
400
+ prefix : str, optional
401
+ The prefix to filter movie files. Defaults to an empty string.
402
+
403
+ Returns
404
+ -------
405
+ stack_path : str or None
406
+ The path to the first matching movie file, or None if no matching file is found.
407
+
408
+ Examples
409
+ --------
410
+ >>> pos_path = "path/to/position1"
411
+ >>> get_position_movie_path(pos_path, prefix='experiment_')
412
+ 'path/to/position1/movie/experiment_001.tif'
413
+
414
+ >>> pos_path = "another/path/positionA"
415
+ >>> get_position_movie_path(pos_path)
416
+ 'another/path/positionA/movie/001.tif'
417
+
418
+ >>> pos_path = "nonexistent/path"
419
+ >>> get_position_movie_path(pos_path)
420
+ None
421
+ """
422
+
423
+
227
424
  if not pos.endswith(os.sep):
228
425
  pos+=os.sep
229
426
  movies = glob(pos+os.sep.join(['movie',prefix+'*.tif']))
230
427
  if len(movies)>0:
231
428
  stack_path = movies[0]
232
429
  else:
233
- stack_path = np.nan
430
+ stack_path = None
234
431
 
235
432
  return stack_path
236
-
433
+
434
+
435
+
237
436
  def load_experiment_tables(experiment, population='targets', well_option='*',position_option='*', return_pos_info=False):
238
437
 
239
438
 
@@ -303,18 +502,19 @@ def load_experiment_tables(experiment, population='targets', well_option='*',pos
303
502
  pharmaceutical_agents = get_experiment_pharmaceutical_agents(experiment)
304
503
  well_labels = _extract_labels_from_config(config,len(wells))
305
504
 
306
- well_indices, position_indices = _interpret_wells_and_positions(experiment, well_option, position_option)
505
+ well_indices, position_indices = interpret_wells_and_positions(experiment, well_option, position_option)
307
506
 
308
507
  df = []
309
508
  df_pos_info = []
310
509
  real_well_index = 0
311
510
 
312
- for widx, well_path in enumerate(tqdm(wells[well_indices])):
511
+ for k, well_path in enumerate(tqdm(wells[well_indices])):
313
512
 
314
513
  any_table = False # assume no table
315
514
 
316
- well_index = widx
317
515
  well_name, well_number = extract_well_name_and_number(well_path)
516
+ widx = well_indices[k]
517
+
318
518
  well_alias = well_labels[widx]
319
519
 
320
520
  well_concentration = concentrations[widx]
@@ -322,7 +522,7 @@ def load_experiment_tables(experiment, population='targets', well_option='*',pos
322
522
  well_cell_type = cell_types[widx]
323
523
  well_pharmaceutical_agent = pharmaceutical_agents[widx]
324
524
 
325
- positions = np.array(natsorted(glob(well_path+'*'+os.sep)),dtype=str)
525
+ positions = get_positions_in_well(well_path)
326
526
  if position_indices is not None:
327
527
  try:
328
528
  positions = positions[position_indices]
@@ -413,6 +613,9 @@ def locate_stack(position, prefix='Aligned'):
413
613
 
414
614
  """
415
615
 
616
+ if not position.endswith(os.sep):
617
+ position+=os.sep
618
+
416
619
  stack_path = glob(position+os.sep.join(['movie', f'{prefix}*.tif']))
417
620
  assert len(stack_path)>0,f"No movie with prefix {prefix} found..."
418
621
  stack = imread(stack_path[0].replace('\\','/'))
@@ -457,7 +660,9 @@ def locate_labels(position, population='target'):
457
660
 
458
661
  """
459
662
 
460
-
663
+ if not position.endswith(os.sep):
664
+ position+=os.sep
665
+
461
666
  if population.lower()=="target" or population.lower()=="targets":
462
667
  label_path = natsorted(glob(position+os.sep.join(["labels_targets", "*.tif"])))
463
668
  elif population.lower()=="effector" or population.lower()=="effectors":
@@ -758,12 +963,16 @@ def get_signal_models_list(return_path=False):
758
963
  return available_models, modelpath
759
964
 
760
965
 
761
- def locate_signal_model(name):
966
+ def locate_signal_model(name, path=None):
762
967
 
763
968
  main_dir = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0],"celldetective"])
764
969
  modelpath = os.sep.join([main_dir, "models", "signal_detection", os.sep])
765
970
  print(f'Looking for {name} in {modelpath}')
766
971
  models = glob(modelpath+f'*{os.sep}')
972
+ if path is not None:
973
+ if not path.endswith(os.sep):
974
+ path += os.sep
975
+ models += glob(path+f'*{os.sep}')
767
976
 
768
977
  match=None
769
978
  for m in models:
@@ -1002,7 +1211,7 @@ def view_on_napari_btrack(data,properties,graph,stack=None,labels=None,relabel=T
1002
1211
  del labels
1003
1212
  gc.collect()
1004
1213
 
1005
- def load_napari_data(position, prefix="Aligned", population="target"):
1214
+ def load_napari_data(position, prefix="Aligned", population="target", return_stack=True):
1006
1215
 
1007
1216
  """
1008
1217
  Load the necessary data for visualization in napari.
@@ -1035,9 +1244,11 @@ def load_napari_data(position, prefix="Aligned", population="target"):
1035
1244
  data = napari_data.item()['data']
1036
1245
  properties = napari_data.item()['properties']
1037
1246
  graph = napari_data.item()['graph']
1038
-
1039
- stack,labels = locate_stack_and_labels(position, prefix=prefix, population=population)
1040
-
1247
+ if return_stack:
1248
+ stack,labels = locate_stack_and_labels(position, prefix=prefix, population=population)
1249
+ else:
1250
+ labels=locate_labels(position,population=population)
1251
+ stack = None
1041
1252
  return data,properties,graph,labels,stack
1042
1253
 
1043
1254
  from skimage.measure import label
@@ -1828,11 +2039,11 @@ def load_frames(img_nums, stack_path, scale=None, normalize_input=True, dtype=fl
1828
2039
  channel_axis = np.argmin(frames.shape)
1829
2040
  frames = np.moveaxis(frames, channel_axis, -1)
1830
2041
  if frames.ndim==2:
1831
- frames = frames[:,:,np.newaxis]
2042
+ frames = frames[:,:,np.newaxis].astype(float)
1832
2043
  if normalize_input:
1833
2044
  frames = normalize_multichannel(frames, **normalize_kwargs)
1834
2045
  if scale is not None:
1835
- frames = zoom(frames, [scale,scale,1], order=3)
2046
+ frames = zoom(frames, [scale,scale,1], order=3, prefilter=False)
1836
2047
 
1837
2048
  # add a fake pixel to prevent auto normalization errors on images that are uniform
1838
2049
  # to revisit
@@ -1955,7 +2166,7 @@ def get_positions_in_well(well):
1955
2166
  well = well[:-1]
1956
2167
 
1957
2168
  w_numeric = os.path.split(well)[-1].replace('W','')
1958
- positions = glob(os.sep.join([well,f'{w_numeric}*{os.sep}']))
2169
+ positions = natsorted(glob(os.sep.join([well,f'{w_numeric}*{os.sep}'])))
1959
2170
 
1960
2171
  return np.array(positions,dtype=str)
1961
2172
 
celldetective/measure.py CHANGED
@@ -308,10 +308,11 @@ def drop_tonal_features(features):
308
308
 
309
309
  """
310
310
 
311
+ feat2 = features[:]
311
312
  for f in features:
312
313
  if 'intensity' in f:
313
- features.remove(f)
314
- return features
314
+ feat2.remove(f)
315
+ return feat2
315
316
 
316
317
  def measure_features(img, label, features=['area', 'intensity_mean'], channels=None,
317
318
  border_dist=None, haralick_options=None, verbose=True, normalisation_list=None,
@@ -389,26 +390,20 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
389
390
  columns=['count', 'spot_mean_intensity']).reset_index()
390
391
  # Rename columns
391
392
  df_spots.columns = ['label', 'spot_count', 'spot_mean_intensity']
393
+
392
394
  if normalisation_list:
393
395
  for norm in normalisation_list:
394
396
  for index, channel in enumerate(channels):
395
- if channel == norm['target channel']:
397
+ if channel == norm['target_channel']:
396
398
  ind = index
397
- if norm['mode'] == 'local':
399
+ if norm['correction_type'] == 'local':
398
400
  normalised_image = normalise_by_cell(img[:, :, ind].copy(), label,
399
- distance=int(norm['distance']), mode=norm['type'],
400
- operation=norm['operation'])
401
+ distance=int(norm['distance']), model=norm['model'],
402
+ operation=norm['operation'], clip=norm['clip'])
401
403
  img[:, :, ind] = normalised_image
402
404
  else:
403
- if norm['operation'] == 'Divide':
404
- normalised_image, bg = field_normalisation(img[:, :, ind].copy(), threshold=norm['threshold'],
405
- normalisation_operation=norm['operation'],
406
- clip=False, mode=norm['type'])
407
- else:
408
- normalised_image, bg = field_normalisation(img[:, :, ind].copy(), threshold=norm['threshold'],
409
- normalisation_operation=norm['operation'],
410
- clip=norm['clip'], mode=norm['type'])
411
- img[:, :, ind] = normalised_image
405
+ corrected_image = field_correction(img[:,:,ind].copy(), threshold_on_std=norm['threshold_on_std'], operation=norm['operation'], model=norm['model'], clip=norm['clip'])
406
+ img[:, :, ind] = corrected_image
412
407
 
413
408
  extra_props = getmembers(extra_properties, isfunction)
414
409
  extra_props = [extra_props[i][0] for i in range(len(extra_props))]
@@ -725,7 +720,7 @@ def measure_isotropic_intensity(positions, # Dataframe of cell positions @ t
725
720
 
726
721
  """
727
722
 
728
-
723
+ epsilon = -10000
729
724
  assert ((img.ndim==2)|(img.ndim==3)),f'Invalid image shape to compute the Haralick features. Expected YXC, got {img.shape}...'
730
725
 
731
726
  if img.ndim==2:
@@ -753,7 +748,7 @@ def measure_isotropic_intensity(positions, # Dataframe of cell positions @ t
753
748
 
754
749
  pad_value_x = mask.shape[0]//2 + 1
755
750
  pad_value_y = mask.shape[1]//2 + 1
756
- frame_padded = np.pad(img, [(pad_value_x,pad_value_x),(pad_value_y,pad_value_y),(0,0)])
751
+ frame_padded = np.pad(img.astype(float), [(pad_value_x,pad_value_x),(pad_value_y,pad_value_y),(0,0)], constant_values=[(epsilon,epsilon),(epsilon,epsilon),(0,0)])
757
752
 
758
753
  # Find a way to measure intensity in mask
759
754
  for tid,group in positions.groupby(column_labels['track']):
@@ -770,11 +765,17 @@ def measure_isotropic_intensity(positions, # Dataframe of cell positions @ t
770
765
 
771
766
  expanded_mask = np.expand_dims(mask, axis=-1) # shape: (X, Y, 1)
772
767
  crop = frame_padded[ymin:ymax,xmin:xmax]
773
- projection = np.multiply(crop, expanded_mask)
768
+
769
+ crop_temp = crop.copy()
770
+ crop_temp[crop_temp==epsilon] = 0.
771
+ projection = np.multiply(crop_temp, expanded_mask)
772
+
773
+ projection[crop==epsilon] = epsilon
774
+ projection[expanded_mask==0.] = epsilon
774
775
 
775
776
  for op in operations:
776
777
  func = eval('np.'+op)
777
- intensity_values = func(projection, axis=(0,1), where=projection!=0.)
778
+ intensity_values = func(projection, axis=(0,1), where=projection>epsilon)
778
779
  for k in range(crop.shape[-1]):
779
780
  if isinstance(r,list):
780
781
  positions.loc[group.index, f'{channels[k]}_ring_{min(r)}_{max(r)}_{op}'] = intensity_values[k]
@@ -806,7 +807,7 @@ def measure_isotropic_intensity(positions, # Dataframe of cell positions @ t
806
807
 
807
808
  for op in operations:
808
809
  func = eval('np.'+op)
809
- intensity_values = func(projection, axis=(0,1), where=projection!=0.)
810
+ intensity_values = func(projection, axis=(0,1), where=projection==projection)
810
811
  for k in range(crop.shape[-1]):
811
812
  positions.loc[group.index, f'{channels[k]}_custom_kernel_{op}'] = intensity_values[k]
812
813
 
@@ -860,7 +861,7 @@ def measure_at_position(pos, mode, return_measurements=False, threads=1):
860
861
  return None
861
862
 
862
863
 
863
- def local_normalisation(image, labels, background_intensity, mode, operation):
864
+ def local_normalisation(image, labels, background_intensity, model='median', operation='subtract', clip=False):
864
865
  """
865
866
  Perform local normalization on an image based on labels.
866
867
 
@@ -900,19 +901,23 @@ def local_normalisation(image, labels, background_intensity, mode, operation):
900
901
  based on the mode specified.
901
902
  - The background intensity values should be provided in the same order as the labels.
902
903
  """
904
+
903
905
  for index, cell in enumerate(np.unique(labels)):
904
906
  if cell == 0:
905
907
  continue
906
- if operation == 'Subtract':
908
+ if operation == 'subtract':
907
909
  image[np.where(labels == cell)] = image[np.where(labels == cell)].astype(float) - \
908
- background_intensity[f'intensity_{mode.lower()}'][index-1].astype(float)
909
- elif operation == 'Divide':
910
+ background_intensity[f'intensity_{model.lower()}'][index-1].astype(float)
911
+ elif operation == 'divide':
910
912
  image[np.where(labels == cell)] = image[np.where(labels == cell)].astype(float) / \
911
- background_intensity[f'intensity_{mode.lower()}'][index-1].astype(float)
913
+ background_intensity[f'intensity_{model.lower()}'][index-1].astype(float)
914
+ if clip:
915
+ image[image<=0.] = 0.
916
+
912
917
  return image.astype(float)
913
918
 
914
919
 
915
- def normalise_by_cell(image, labels, distance, mode, operation):
920
+ def normalise_by_cell(image, labels, distance=5, model='median', operation='subtract', clip=False):
916
921
  """
917
922
  Normalize an image based on cell regions.
918
923
 
@@ -950,17 +955,17 @@ def normalise_by_cell(image, labels, distance, mode, operation):
950
955
  - The operation determines whether to subtract or divide the background intensity from the image.
951
956
  """
952
957
  border = contour_of_instance_segmentation(label=labels, distance=distance * (-1))
953
- if mode == 'Mean':
958
+ if model == 'mean':
954
959
  background_intensity = regionprops_table(intensity_image=image, label_image=border,
955
960
  properties=['intensity_mean'])
956
- elif mode == 'Median':
961
+ elif model == 'median':
957
962
  median = []
958
963
  median.append(getattr(extra_properties, 'intensity_median'))
959
964
  background_intensity = regionprops_table(intensity_image=image, label_image=border,
960
965
  extra_properties=median)
961
966
  normalised_frame = local_normalisation(image=image.astype(float).copy(),
962
- labels=labels, background_intensity=background_intensity, mode=mode,
963
- operation=operation)
967
+ labels=labels, background_intensity=background_intensity, model=model,
968
+ operation=operation, clip=clip)
964
969
 
965
970
  return normalised_frame
966
971
 
@@ -1015,7 +1020,7 @@ def fit_plane(image, cell_masks=None):
1015
1020
  x=xx,
1016
1021
  y=yy,
1017
1022
  weights=weights,
1018
- params=params, max_nfev=100)
1023
+ params=params, max_nfev=3000)
1019
1024
  a = result.params['a'].value
1020
1025
  b = result.params['b'].value
1021
1026
  c = result.params['c'].value
@@ -1069,7 +1074,7 @@ def fit_paraboloid(image, cell_masks=None):
1069
1074
  x=xx,
1070
1075
  y=yy,
1071
1076
  weights=weights,
1072
- params=params, max_nfev=100)
1077
+ params=params, max_nfev=3000)
1073
1078
  a = result.params['a'].value
1074
1079
  b = result.params['b'].value
1075
1080
  c = result.params['c'].value