celldetective 1.3.1__py3-none-any.whl → 1.3.3.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celldetective/_version.py +1 -1
- celldetective/events.py +2 -0
- celldetective/gui/classifier_widget.py +51 -3
- celldetective/gui/control_panel.py +9 -3
- celldetective/gui/generic_signal_plot.py +161 -2
- celldetective/gui/gui_utils.py +90 -1
- celldetective/gui/measurement_options.py +35 -32
- celldetective/gui/plot_signals_ui.py +8 -3
- celldetective/gui/process_block.py +36 -114
- celldetective/gui/retrain_segmentation_model_options.py +3 -1
- celldetective/gui/signal_annotator.py +53 -26
- celldetective/gui/signal_annotator2.py +17 -30
- celldetective/gui/survival_ui.py +7 -3
- celldetective/gui/tableUI.py +300 -183
- celldetective/gui/thresholds_gui.py +195 -199
- celldetective/gui/viewers.py +267 -13
- celldetective/io.py +110 -10
- celldetective/measure.py +128 -88
- celldetective/models/segmentation_effectors/ricm_bf_all_last/config_input.json +79 -0
- celldetective/models/segmentation_effectors/ricm_bf_all_last/ricm_bf_all_last +0 -0
- celldetective/models/segmentation_effectors/ricm_bf_all_last/training_instructions.json +37 -0
- celldetective/models/segmentation_effectors/test-transfer/config_input.json +39 -0
- celldetective/models/segmentation_effectors/test-transfer/test-transfer +0 -0
- celldetective/neighborhood.py +154 -69
- celldetective/relative_measurements.py +128 -4
- celldetective/scripts/measure_cells.py +3 -3
- celldetective/signals.py +207 -213
- celldetective/utils.py +16 -0
- {celldetective-1.3.1.dist-info → celldetective-1.3.3.post1.dist-info}/METADATA +11 -10
- {celldetective-1.3.1.dist-info → celldetective-1.3.3.post1.dist-info}/RECORD +34 -29
- {celldetective-1.3.1.dist-info → celldetective-1.3.3.post1.dist-info}/WHEEL +1 -1
- {celldetective-1.3.1.dist-info → celldetective-1.3.3.post1.dist-info}/LICENSE +0 -0
- {celldetective-1.3.1.dist-info → celldetective-1.3.3.post1.dist-info}/entry_points.txt +0 -0
- {celldetective-1.3.1.dist-info → celldetective-1.3.3.post1.dist-info}/top_level.txt +0 -0
celldetective/measure.py
CHANGED
|
@@ -15,6 +15,7 @@ import subprocess
|
|
|
15
15
|
from math import ceil
|
|
16
16
|
|
|
17
17
|
from skimage.draw import disk as dsk
|
|
18
|
+
from skimage.feature import blob_dog, blob_log
|
|
18
19
|
|
|
19
20
|
from celldetective.utils import rename_intensity_column, create_patch_mask, remove_redundant_features, \
|
|
20
21
|
remove_trajectory_measurements, contour_of_instance_segmentation, extract_cols_from_query, step_function, interpolate_nan
|
|
@@ -24,6 +25,8 @@ from celldetective.extra_properties import *
|
|
|
24
25
|
from inspect import getmembers, isfunction
|
|
25
26
|
from skimage.morphology import disk
|
|
26
27
|
|
|
28
|
+
import matplotlib.pyplot as plt
|
|
29
|
+
|
|
27
30
|
abs_path = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'celldetective'])
|
|
28
31
|
|
|
29
32
|
def measure(stack=None, labels=None, trajectories=None, channel_names=None,
|
|
@@ -325,12 +328,7 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
|
|
|
325
328
|
for index, channel in enumerate(channels):
|
|
326
329
|
if channel == spot_detection['channel']:
|
|
327
330
|
ind = index
|
|
328
|
-
|
|
329
|
-
threshold=spot_detection['threshold'])
|
|
330
|
-
df_spots = pd.DataFrame.from_dict(blobs, orient='index',
|
|
331
|
-
columns=['count', 'spot_mean_intensity']).reset_index()
|
|
332
|
-
# Rename columns
|
|
333
|
-
df_spots.columns = ['label', 'spot_count', 'spot_mean_intensity']
|
|
331
|
+
df_spots = blob_detection(img, label, diameter=spot_detection['diameter'],threshold=spot_detection['threshold'], channel_name=spot_detection['channel'], target_channel=ind)
|
|
334
332
|
|
|
335
333
|
if normalisation_list:
|
|
336
334
|
for norm in normalisation_list:
|
|
@@ -362,47 +360,27 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
|
|
|
362
360
|
props = regionprops_table(label, intensity_image=img, properties=feats, extra_properties=extra_props_list)
|
|
363
361
|
df_props = pd.DataFrame(props)
|
|
364
362
|
if spot_detection is not None:
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
# if spot_detection is not None:
|
|
372
|
-
# for index, channel in enumerate(channels):
|
|
373
|
-
# if channel == spot_detection['channel']:
|
|
374
|
-
# ind = index
|
|
375
|
-
# blobs = blob_detection(img[:, :, ind], label, diameter=spot_detection['diameter'],
|
|
376
|
-
# threshold=spot_detection['threshold'])
|
|
377
|
-
# df_spots = pd.DataFrame.from_dict(blobs, orient='index', columns=['count', 'spot_mean_intensity']).reset_index()
|
|
378
|
-
# # Rename columns
|
|
379
|
-
# df_spots.columns = ['label', 'spot_count', 'spot_mean_intensity']
|
|
380
|
-
# df_props = df_props.merge(df_spots, how='outer', on='label')
|
|
381
|
-
# df_props['spot_count'] = df_props['spot_count'].replace(np.nan, 0)
|
|
382
|
-
# df_props['spot_mean_intensity'] = df_props['spot_mean_intensity'].replace(np.nan, 0)
|
|
383
|
-
|
|
363
|
+
if df_spots is not None:
|
|
364
|
+
df_props = df_props.merge(df_spots, how='outer', on='label',suffixes=('_delme', ''))
|
|
365
|
+
df_props = df_props[[c for c in df_props.columns if not c.endswith('_delme')]]
|
|
384
366
|
|
|
385
367
|
if border_dist is not None:
|
|
386
368
|
# automatically drop all non intensity features
|
|
387
369
|
intensity_features_test = [('intensity' in s and 'centroid' not in s and 'peripheral' not in s) for s in
|
|
388
370
|
features]
|
|
389
371
|
intensity_features = list(np.array(features)[np.array(intensity_features_test)])
|
|
390
|
-
# intensity_extra = [(s in extra_props_list)for s in intensity_features]
|
|
391
|
-
# print(intensity_extra)
|
|
392
372
|
intensity_extra = []
|
|
393
373
|
for s in intensity_features:
|
|
394
374
|
if s in extra_props:
|
|
395
375
|
intensity_extra.append(getattr(extra_properties, s))
|
|
396
376
|
intensity_features.remove(s)
|
|
397
|
-
|
|
398
|
-
# If no intensity feature was passed still measure mean intensity
|
|
377
|
+
|
|
399
378
|
if len(intensity_features) == 0:
|
|
400
379
|
if verbose:
|
|
401
380
|
print('No intensity feature was passed... Adding mean intensity for edge measurement...')
|
|
402
381
|
intensity_features = np.append(intensity_features, 'intensity_mean')
|
|
403
382
|
intensity_features = list(np.append(intensity_features, 'label'))
|
|
404
383
|
|
|
405
|
-
# Remove extra intensity properties from border measurements
|
|
406
384
|
new_intensity_features = intensity_features.copy()
|
|
407
385
|
for int_feat in intensity_features:
|
|
408
386
|
if int_feat in extra_props:
|
|
@@ -439,8 +417,9 @@ def measure_features(img, label, features=['area', 'intensity_mean'], channels=N
|
|
|
439
417
|
if haralick_options is not None:
|
|
440
418
|
try:
|
|
441
419
|
df_haralick = compute_haralick_features(img, label, channels=channels, **haralick_options)
|
|
442
|
-
|
|
443
|
-
|
|
420
|
+
df_haralick = df_haralick.rename(columns={"cell_id": "label"})
|
|
421
|
+
df_props = df_props.merge(df_haralick, how='outer', on='label', suffixes=('_delme', ''))
|
|
422
|
+
df_props = df_props[[c for c in df_props.columns if not c.endswith('_delme')]]
|
|
444
423
|
except Exception as e:
|
|
445
424
|
print(e)
|
|
446
425
|
pass
|
|
@@ -916,67 +895,125 @@ def normalise_by_cell(image, labels, distance=5, model='median', operation='subt
|
|
|
916
895
|
return normalised_frame
|
|
917
896
|
|
|
918
897
|
|
|
919
|
-
def
|
|
920
|
-
|
|
921
|
-
|
|
898
|
+
def extract_blobs_in_image(image, label, diameter, threshold=0., method="log"):
|
|
899
|
+
|
|
900
|
+
if np.percentile(image.flatten(),99.9)==0.0:
|
|
901
|
+
return None
|
|
922
902
|
|
|
923
|
-
|
|
924
|
-
- image (numpy.ndarray): The input image data.
|
|
925
|
-
- label (numpy.ndarray): An array specifying labeled regions in the image.
|
|
926
|
-
- threshold (float): The threshold value for blob detection.
|
|
927
|
-
- diameter (float): The expected diameter of blobs.
|
|
903
|
+
dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
|
|
928
904
|
|
|
929
|
-
|
|
930
|
-
|
|
905
|
+
masked_image = image.copy()
|
|
906
|
+
masked_image[np.where((dilated_image == 0)|(image!=image))] = 0
|
|
907
|
+
min_sigma = (1 / (1 + math.sqrt(2))) * diameter
|
|
908
|
+
max_sigma = math.sqrt(2) * min_sigma
|
|
909
|
+
if method=="dog":
|
|
910
|
+
blobs = blob_dog(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
|
|
911
|
+
elif method=="log":
|
|
912
|
+
blobs = blob_log(masked_image, threshold=threshold, min_sigma=min_sigma, max_sigma=max_sigma, overlap=0.75)
|
|
913
|
+
# Exclude spots outside of cell masks
|
|
914
|
+
mask = np.array([label[int(y), int(x)] != 0 for y, x, _ in blobs])
|
|
915
|
+
if np.any(mask):
|
|
916
|
+
blobs_filtered = blobs[mask]
|
|
917
|
+
else:
|
|
918
|
+
blobs_filtered=[]
|
|
931
919
|
|
|
932
|
-
|
|
933
|
-
and detects blobs within the region using the Difference of Gaussians (DoG) method. Detected blobs are filtered
|
|
934
|
-
based on the specified threshold and expected diameter. The function returns a dictionary containing the number of
|
|
935
|
-
detected blobs and their mean intensity for each labeled region.
|
|
920
|
+
return blobs_filtered
|
|
936
921
|
|
|
937
|
-
Example:
|
|
938
|
-
>>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
|
|
939
|
-
>>> label = np.array([[0, 1, 1], [2, 2, 0], [3, 3, 0]])
|
|
940
|
-
>>> threshold = 0.1
|
|
941
|
-
>>> diameter = 5.0
|
|
942
|
-
>>> result = blob_detection(image, label, threshold, diameter)
|
|
943
|
-
>>> print(result)
|
|
944
|
-
{1: [1, 4.0], 2: [0, nan], 3: [0, nan]}
|
|
945
922
|
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
923
|
+
def blob_detection(image, label, diameter, threshold=0., channel_name=None, target_channel=0, method="log"):
|
|
924
|
+
|
|
925
|
+
image = image[:, :, target_channel].copy()
|
|
926
|
+
if np.percentile(image.flatten(),99.9)==0.0:
|
|
927
|
+
return None
|
|
928
|
+
|
|
929
|
+
detections = []
|
|
930
|
+
blobs_filtered = extract_blobs_in_image(image, label, diameter, threshold=threshold)
|
|
931
|
+
|
|
932
|
+
for lbl in np.unique(label):
|
|
933
|
+
if lbl>0:
|
|
934
|
+
|
|
935
|
+
blob_selection = np.array([label[int(y), int(x)] == lbl for y, x, _ in blobs_filtered])
|
|
936
|
+
if np.any(blob_selection):
|
|
937
|
+
# if any spot
|
|
938
|
+
blobs_in_cell = blobs_filtered[blob_selection]
|
|
939
|
+
n_spots = len(blobs_in_cell)
|
|
940
|
+
binary_blobs = np.zeros_like(label)
|
|
941
|
+
for blob in blobs_in_cell:
|
|
942
|
+
y, x, sig = blob
|
|
943
|
+
r = np.sqrt(2)*sig
|
|
944
|
+
rr, cc = dsk((y, x), r, shape=binary_blobs.shape)
|
|
945
|
+
binary_blobs[rr, cc] = 1
|
|
946
|
+
intensity_mean = np.nanmean(image[binary_blobs==1].flatten())
|
|
947
|
+
else:
|
|
948
|
+
n_spots = 0
|
|
949
|
+
intensity_mean = np.nan
|
|
950
|
+
detections.append({'label': lbl, f'{channel_name}_spot_count': n_spots, f'{channel_name}_mean_spot_intensity': intensity_mean})
|
|
951
|
+
detections = pd.DataFrame(detections)
|
|
952
|
+
|
|
953
|
+
return detections
|
|
954
|
+
|
|
955
|
+
|
|
956
|
+
# def blob_detectionv0(image, label, threshold, diameter):
|
|
957
|
+
# """
|
|
958
|
+
# Perform blob detection on an image based on labeled regions.
|
|
959
|
+
|
|
960
|
+
# Parameters:
|
|
961
|
+
# - image (numpy.ndarray): The input image data.
|
|
962
|
+
# - label (numpy.ndarray): An array specifying labeled regions in the image.
|
|
963
|
+
# - threshold (float): The threshold value for blob detection.
|
|
964
|
+
# - diameter (float): The expected diameter of blobs.
|
|
965
|
+
|
|
966
|
+
# Returns:
|
|
967
|
+
# - dict: A dictionary containing information about detected blobs.
|
|
968
|
+
|
|
969
|
+
# This function performs blob detection on an image based on labeled regions. It iterates over each labeled region
|
|
970
|
+
# and detects blobs within the region using the Difference of Gaussians (DoG) method. Detected blobs are filtered
|
|
971
|
+
# based on the specified threshold and expected diameter. The function returns a dictionary containing the number of
|
|
972
|
+
# detected blobs and their mean intensity for each labeled region.
|
|
973
|
+
|
|
974
|
+
# Example:
|
|
975
|
+
# >>> image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
|
|
976
|
+
# >>> label = np.array([[0, 1, 1], [2, 2, 0], [3, 3, 0]])
|
|
977
|
+
# >>> threshold = 0.1
|
|
978
|
+
# >>> diameter = 5.0
|
|
979
|
+
# >>> result = blob_detection(image, label, threshold, diameter)
|
|
980
|
+
# >>> print(result)
|
|
981
|
+
# {1: [1, 4.0], 2: [0, nan], 3: [0, nan]}
|
|
982
|
+
|
|
983
|
+
# Note:
|
|
984
|
+
# - Blobs are detected using the Difference of Gaussians (DoG) method.
|
|
985
|
+
# - Detected blobs are filtered based on the specified threshold and expected diameter.
|
|
986
|
+
# - The returned dictionary contains information about the number of detected blobs and their mean intensity
|
|
987
|
+
# for each labeled region.
|
|
988
|
+
# """
|
|
989
|
+
# blob_labels = {}
|
|
990
|
+
# dilated_image = ndimage.grey_dilation(label, footprint=disk(10))
|
|
991
|
+
# for mask_index in np.unique(label):
|
|
992
|
+
# if mask_index == 0:
|
|
993
|
+
# continue
|
|
994
|
+
# removed_background = image.copy()
|
|
995
|
+
# one_mask = label.copy()
|
|
996
|
+
# one_mask[np.where(label != mask_index)] = 0
|
|
997
|
+
# dilated_copy = dilated_image.copy()
|
|
998
|
+
# dilated_copy[np.where(dilated_image != mask_index)] = 0
|
|
999
|
+
# removed_background[np.where(dilated_copy == 0)] = 0
|
|
1000
|
+
# min_sigma = (1 / (1 + math.sqrt(2))) * diameter
|
|
1001
|
+
# max_sigma = math.sqrt(2) * min_sigma
|
|
1002
|
+
# blobs = blob_dog(removed_background, threshold=threshold, min_sigma=min_sigma,
|
|
1003
|
+
# max_sigma=max_sigma)
|
|
1004
|
+
|
|
1005
|
+
# mask = np.array([one_mask[int(y), int(x)] != 0 for y, x, r in blobs])
|
|
1006
|
+
# if not np.any(mask):
|
|
1007
|
+
# continue
|
|
1008
|
+
# blobs_filtered = blobs[mask]
|
|
1009
|
+
# binary_blobs = np.zeros_like(label)
|
|
1010
|
+
# for blob in blobs_filtered:
|
|
1011
|
+
# y, x, r = blob
|
|
1012
|
+
# rr, cc = dsk((y, x), r, shape=binary_blobs.shape)
|
|
1013
|
+
# binary_blobs[rr, cc] = 1
|
|
1014
|
+
# spot_intensity = regionprops_table(binary_blobs, removed_background, ['intensity_mean'])
|
|
1015
|
+
# blob_labels[mask_index] = [blobs_filtered.shape[0], spot_intensity['intensity_mean'][0]]
|
|
1016
|
+
# return blob_labels
|
|
980
1017
|
|
|
981
1018
|
### Classification ####
|
|
982
1019
|
|
|
@@ -1025,6 +1062,7 @@ def estimate_time(df, class_attr, model='step_function', class_of_interest=[2],
|
|
|
1025
1062
|
|
|
1026
1063
|
df = df.sort_values(by=sort_cols,ignore_index=True)
|
|
1027
1064
|
df = df.reset_index(drop=True)
|
|
1065
|
+
max_time = df['FRAME'].max()
|
|
1028
1066
|
|
|
1029
1067
|
|
|
1030
1068
|
for tid,group in df.loc[df[class_attr].isin(class_of_interest)].groupby(sort_cols):
|
|
@@ -1053,6 +1091,8 @@ def estimate_time(df, class_attr, model='step_function', class_of_interest=[2],
|
|
|
1053
1091
|
|
|
1054
1092
|
if r2 > float(r2_threshold):
|
|
1055
1093
|
t0 = popt[0]
|
|
1094
|
+
if t0>=max_time:
|
|
1095
|
+
t0 = max_time - 1
|
|
1056
1096
|
df.loc[indices, class_attr.replace('class','t')] = t0
|
|
1057
1097
|
df.loc[indices, class_attr] = 0.0
|
|
1058
1098
|
else:
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
{
|
|
2
|
+
"channels": [
|
|
3
|
+
"adhesion_channel",
|
|
4
|
+
"brightfield_channel"
|
|
5
|
+
],
|
|
6
|
+
"diameter": 30.0,
|
|
7
|
+
"cellprob_threshold": 0.0,
|
|
8
|
+
"flow_threshold": 0.4,
|
|
9
|
+
"normalization_percentile": [
|
|
10
|
+
false,
|
|
11
|
+
true
|
|
12
|
+
],
|
|
13
|
+
"normalization_clip": [
|
|
14
|
+
true,
|
|
15
|
+
true
|
|
16
|
+
],
|
|
17
|
+
"normalization_values": [
|
|
18
|
+
[
|
|
19
|
+
0.75,
|
|
20
|
+
1.25
|
|
21
|
+
],
|
|
22
|
+
[
|
|
23
|
+
1.0,
|
|
24
|
+
99.0
|
|
25
|
+
]
|
|
26
|
+
],
|
|
27
|
+
"model_type": "cellpose",
|
|
28
|
+
"spatial_calibration": 0.2,
|
|
29
|
+
"dataset": {
|
|
30
|
+
"train": [
|
|
31
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_404_0066_roi_429_935_159_649.tif",
|
|
32
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_200_0003_roi_457_991_5_488.tif",
|
|
33
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-11-23-analysis-2024_508_0008.tif",
|
|
34
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-11-23-analysis-2024_305_0075.tif",
|
|
35
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_302_0063_roi_405_865_462_937.tif",
|
|
36
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_506_0066_roi_473_944_494_953.tif",
|
|
37
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_202_0027_roi_557_998_3_447.tif",
|
|
38
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_208_0027_roi_600_997_5_387.tif",
|
|
39
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_206_0023_roi_251_707_249_666.tif",
|
|
40
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_201_0017_roi_324_961_23_596.tif",
|
|
41
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_102_0053_roi_511_996_4_433.tif",
|
|
42
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_403_0009_roi_554_996_460_994.tif",
|
|
43
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-11-23-analysis-2024_508_0066.tif",
|
|
44
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_507_0066_roi_492_998_9_474.tif",
|
|
45
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_202_0029_roi_177_746_4_533.tif",
|
|
46
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_203_0060_roi_369_906_119_648.tif",
|
|
47
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_304_0025.tif",
|
|
48
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_207_0030_roi_152_661_531_998.tif",
|
|
49
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_105_0018_roi_510_954_13_422.tif",
|
|
50
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_507_0016_roi_29_585_462_996.tif",
|
|
51
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_504_0066_roi_305_866_382_909.tif",
|
|
52
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_605_0060_roi_6_479_351_847.tif",
|
|
53
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-11-23-analysis-2024_403_0008.tif",
|
|
54
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_203_0029_roi_3_376_622_1000.tif",
|
|
55
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_102_0094.tif",
|
|
56
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_202_0055_roi_1_514_68_597.tif",
|
|
57
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_206_0000.tif",
|
|
58
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_302_0023_roi_295_768_6_437.tif",
|
|
59
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-11-23-analysis-2024_805_0081.tif",
|
|
60
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_605_0010_roi_341_847_162_673.tif",
|
|
61
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_205_0002_roi_70_569_6_490.tif",
|
|
62
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_200_0055_roi_99_778_341_986.tif",
|
|
63
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-11-23-analysis-2024_802_0040.tif",
|
|
64
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_206_0028_roi_2_426_487_995.tif",
|
|
65
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_706_0042_roi_99_515_11_394.tif"
|
|
66
|
+
],
|
|
67
|
+
"validation": [
|
|
68
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_201_0003_roi_501_957_576_1000.tif",
|
|
69
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-11-23-analysis-2024_603_0032.tif",
|
|
70
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_706_0030_roi_323_824_488_995.tif",
|
|
71
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_202_0014_roi_258_711_504_959.tif",
|
|
72
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_207_0007_roi_119_611_5_451.tif",
|
|
73
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_206_0030.tif",
|
|
74
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_207_0055_roi_519_989_6_429.tif",
|
|
75
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-03-03-analysis-2024_506_0053_roi_190_724_13_490.tif",
|
|
76
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects/Marie-20-06-analysis-2024_300_0047_roi_416_985_436_972.tif"
|
|
77
|
+
]
|
|
78
|
+
}
|
|
79
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
{
|
|
2
|
+
"model_name": "ricm_bf_all_last",
|
|
3
|
+
"model_type": "cellpose",
|
|
4
|
+
"pretrained": null,
|
|
5
|
+
"spatial_calibration": 0.2,
|
|
6
|
+
"channel_option": [
|
|
7
|
+
"adhesion_channel",
|
|
8
|
+
"brightfield_channel"
|
|
9
|
+
],
|
|
10
|
+
"normalization_percentile": [
|
|
11
|
+
false,
|
|
12
|
+
true
|
|
13
|
+
],
|
|
14
|
+
"normalization_clip": [
|
|
15
|
+
true,
|
|
16
|
+
true
|
|
17
|
+
],
|
|
18
|
+
"normalization_values": [
|
|
19
|
+
[
|
|
20
|
+
0.75,
|
|
21
|
+
1.25
|
|
22
|
+
],
|
|
23
|
+
[
|
|
24
|
+
1.0,
|
|
25
|
+
99.0
|
|
26
|
+
]
|
|
27
|
+
],
|
|
28
|
+
"ds": [
|
|
29
|
+
"/home/limozin/Downloads/dataset-marie-all-objects(1)/dataset-marie-all-objects"
|
|
30
|
+
],
|
|
31
|
+
"augmentation_factor": 3.0,
|
|
32
|
+
"validation_split": 0.2,
|
|
33
|
+
"learning_rate": 0.008,
|
|
34
|
+
"batch_size": 8,
|
|
35
|
+
"epochs": 3000,
|
|
36
|
+
"target_directory": "/home/limozin/Documents/GitHub/celldetective/celldetective/models/segmentation_effectors"
|
|
37
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
{
|
|
2
|
+
"channels": [
|
|
3
|
+
"brightfield_channel",
|
|
4
|
+
"None"
|
|
5
|
+
],
|
|
6
|
+
"diameter": 30.0,
|
|
7
|
+
"cellprob_threshold": 0.0,
|
|
8
|
+
"flow_threshold": 0.4,
|
|
9
|
+
"normalization_percentile": [
|
|
10
|
+
true,
|
|
11
|
+
true
|
|
12
|
+
],
|
|
13
|
+
"normalization_clip": [
|
|
14
|
+
true,
|
|
15
|
+
true
|
|
16
|
+
],
|
|
17
|
+
"normalization_values": [
|
|
18
|
+
[
|
|
19
|
+
1.0,
|
|
20
|
+
99.0
|
|
21
|
+
],
|
|
22
|
+
[
|
|
23
|
+
1.0,
|
|
24
|
+
99.0
|
|
25
|
+
]
|
|
26
|
+
],
|
|
27
|
+
"model_type": "cellpose",
|
|
28
|
+
"spatial_calibration": 0.36666666666666664,
|
|
29
|
+
"dataset": {
|
|
30
|
+
"train": [
|
|
31
|
+
"/home/torro/Documents/Experiments/Marie-03-03-analysis-2024/annotations_effectors/Marie-03-03-analysis-2024_206_0028_roi_2_426_487_995.tif",
|
|
32
|
+
"/home/torro/Documents/Experiments/Marie-03-03-analysis-2024/annotations_effectors/Marie-03-03-analysis-2024_203_0029_roi_3_376_622_1000.tif",
|
|
33
|
+
"/home/torro/Documents/Experiments/Marie-03-03-analysis-2024/annotations_effectors/Marie-03-03-analysis-2024_208_0027_roi_600_997_5_387.tif"
|
|
34
|
+
],
|
|
35
|
+
"validation": [
|
|
36
|
+
"/home/torro/Documents/Experiments/Marie-03-03-analysis-2024/annotations_effectors/Marie-03-03-analysis-2024_206_0023_roi_251_707_249_666.tif"
|
|
37
|
+
]
|
|
38
|
+
}
|
|
39
|
+
}
|
|
Binary file
|