celldetective 1.1.1__py3-none-any.whl → 1.1.1.post3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celldetective/extra_properties.py +1 -1
- celldetective/filters.py +39 -11
- celldetective/gui/classifier_widget.py +56 -7
- celldetective/gui/layouts.py +246 -3
- celldetective/gui/retrain_segmentation_model_options.py +66 -164
- celldetective/gui/retrain_signal_model_options.py +18 -164
- celldetective/gui/tableUI.py +175 -61
- celldetective/gui/thresholds_gui.py +44 -5
- celldetective/gui/viewers.py +1 -1
- celldetective/io.py +29 -14
- celldetective/preprocessing.py +23 -11
- celldetective/scripts/segment_cells.py +13 -4
- celldetective/scripts/train_segmentation_model.py +11 -22
- celldetective/segmentation.py +16 -12
- celldetective/utils.py +6 -2
- {celldetective-1.1.1.dist-info → celldetective-1.1.1.post3.dist-info}/METADATA +1 -1
- {celldetective-1.1.1.dist-info → celldetective-1.1.1.post3.dist-info}/RECORD +21 -21
- {celldetective-1.1.1.dist-info → celldetective-1.1.1.post3.dist-info}/WHEEL +1 -1
- {celldetective-1.1.1.dist-info → celldetective-1.1.1.post3.dist-info}/LICENSE +0 -0
- {celldetective-1.1.1.dist-info → celldetective-1.1.1.post3.dist-info}/entry_points.txt +0 -0
- {celldetective-1.1.1.dist-info → celldetective-1.1.1.post3.dist-info}/top_level.txt +0 -0
|
@@ -9,7 +9,7 @@ import json
|
|
|
9
9
|
from stardist.models import StarDist2D
|
|
10
10
|
from cellpose.models import CellposeModel
|
|
11
11
|
from celldetective.io import locate_segmentation_model, auto_load_number_of_frames, load_frames
|
|
12
|
-
from celldetective.utils import _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
|
|
12
|
+
from celldetective.utils import interpolate_nan, _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
|
|
13
13
|
from pathlib import Path, PurePath
|
|
14
14
|
from glob import glob
|
|
15
15
|
from shutil import rmtree
|
|
@@ -121,6 +121,7 @@ if model_type=='cellpose':
|
|
|
121
121
|
flow_threshold = input_config['flow_threshold']
|
|
122
122
|
|
|
123
123
|
scale = _estimate_scale_factor(spatial_calibration, required_spatial_calibration)
|
|
124
|
+
print(f"Scale = {scale}...")
|
|
124
125
|
|
|
125
126
|
nbr_channels = _extract_nbr_channels_from_config(config)
|
|
126
127
|
print(f'Number of channels in the input movie: {nbr_channels}')
|
|
@@ -140,12 +141,14 @@ with open(pos+f'log_{mode}.json', 'a') as f:
|
|
|
140
141
|
|
|
141
142
|
# Loop over all frames and segment
|
|
142
143
|
def segment_index(indices):
|
|
144
|
+
global scale
|
|
143
145
|
|
|
144
146
|
if model_type=='stardist':
|
|
145
147
|
model = StarDist2D(None, name=modelname, basedir=Path(model_complete_path).parent)
|
|
146
148
|
model.config.use_gpu = use_gpu
|
|
147
149
|
model.use_gpu = use_gpu
|
|
148
150
|
print(f"StarDist model {modelname} successfully loaded.")
|
|
151
|
+
scale_model = scale
|
|
149
152
|
|
|
150
153
|
elif model_type=='cellpose':
|
|
151
154
|
|
|
@@ -156,7 +159,11 @@ def segment_index(indices):
|
|
|
156
159
|
device = torch.device("cuda")
|
|
157
160
|
|
|
158
161
|
model = CellposeModel(gpu=use_gpu, device=device, pretrained_model=model_complete_path+modelname, model_type=None, nchan=len(required_channels)) #diam_mean=30.0,
|
|
159
|
-
|
|
162
|
+
if scale is None:
|
|
163
|
+
scale_model = model.diam_mean / model.diam_labels
|
|
164
|
+
else:
|
|
165
|
+
scale_model = scale * model.diam_mean / model.diam_labels
|
|
166
|
+
print(f"Diam mean: {model.diam_mean}; Diam labels: {model.diam_labels}; Final rescaling: {scale_model}...")
|
|
160
167
|
print(f'Cellpose model {modelname} successfully loaded.')
|
|
161
168
|
|
|
162
169
|
for t in tqdm(indices,desc="frame"):
|
|
@@ -172,10 +179,12 @@ def segment_index(indices):
|
|
|
172
179
|
percentiles.append(None)
|
|
173
180
|
values.append(normalization_values[k])
|
|
174
181
|
|
|
175
|
-
f = load_frames(img_num_channels[:,t], file, scale=
|
|
182
|
+
f = load_frames(img_num_channels[:,t], file, scale=scale_model, normalize_input=True, normalize_kwargs={"percentiles": percentiles, 'values': values, 'clip': normalization_clip})
|
|
183
|
+
f = np.moveaxis([interpolate_nan(f[:,:,c].copy()) for c in range(f.shape[-1])],0,-1)
|
|
176
184
|
|
|
177
185
|
if np.any(img_num_channels[:,t]==-1):
|
|
178
186
|
f[:,:,np.where(img_num_channels[:,t]==-1)[0]] = 0.
|
|
187
|
+
|
|
179
188
|
|
|
180
189
|
if model_type=="stardist":
|
|
181
190
|
Y_pred, details = model.predict_instances(f, n_tiles=model._guess_n_tiles(f), show_tile_progress=False, verbose=False)
|
|
@@ -188,7 +197,7 @@ def segment_index(indices):
|
|
|
188
197
|
Y_pred = Y_pred.astype(np.uint16)
|
|
189
198
|
|
|
190
199
|
if scale is not None:
|
|
191
|
-
Y_pred = zoom(Y_pred, [1./
|
|
200
|
+
Y_pred = zoom(Y_pred, [1./scale_model,1./scale_model],order=0)
|
|
192
201
|
|
|
193
202
|
template = load_frames(0,file,scale=1,normalize_input=False)
|
|
194
203
|
if Y_pred.shape != template.shape[:2]:
|
|
@@ -60,17 +60,10 @@ batch_size = training_instructions['batch_size']
|
|
|
60
60
|
|
|
61
61
|
# Load dataset
|
|
62
62
|
print(f'Datasets: {datasets}')
|
|
63
|
-
X,Y = load_image_dataset(datasets, target_channels, train_spatial_calibration=spatial_calibration,
|
|
63
|
+
X,Y,filenames = load_image_dataset(datasets, target_channels, train_spatial_calibration=spatial_calibration,
|
|
64
64
|
mask_suffix='labelled')
|
|
65
65
|
print('Dataset loaded...')
|
|
66
66
|
|
|
67
|
-
# Normalize images
|
|
68
|
-
# X = normalize_per_channel(X,
|
|
69
|
-
# normalization_percentile_mode=normalization_percentile,
|
|
70
|
-
# normalization_values=normalization_values,
|
|
71
|
-
# normalization_clipping=normalization_clip
|
|
72
|
-
# )
|
|
73
|
-
|
|
74
67
|
values = []
|
|
75
68
|
percentiles = []
|
|
76
69
|
for k in range(len(normalization_percentile)):
|
|
@@ -88,17 +81,6 @@ for k in range(len(X)):
|
|
|
88
81
|
x_interp = np.moveaxis([interpolate_nan(x[:,:,c].copy()) for c in range(x.shape[-1])],0,-1)
|
|
89
82
|
X[k] = x_interp
|
|
90
83
|
|
|
91
|
-
# for x in X[:10]:
|
|
92
|
-
# plt.imshow(x[:,:,0])
|
|
93
|
-
# plt.colorbar()
|
|
94
|
-
# plt.pause(2)
|
|
95
|
-
# plt.close()
|
|
96
|
-
|
|
97
|
-
# plt.imshow(x[:,:,1])
|
|
98
|
-
# plt.colorbar()
|
|
99
|
-
# plt.pause(2)
|
|
100
|
-
# plt.close()
|
|
101
|
-
|
|
102
84
|
Y = [fill_label_holes(y) for y in tqdm(Y)]
|
|
103
85
|
|
|
104
86
|
assert len(X) > 1, "not enough training data"
|
|
@@ -107,7 +89,11 @@ ind = rng.permutation(len(X))
|
|
|
107
89
|
n_val = max(1, int(round(validation_split * len(ind))))
|
|
108
90
|
ind_train, ind_val = ind[:-n_val], ind[-n_val:]
|
|
109
91
|
X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]
|
|
110
|
-
X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]
|
|
92
|
+
X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]
|
|
93
|
+
|
|
94
|
+
files_train = [filenames[i] for i in ind_train]
|
|
95
|
+
files_val = [filenames[i] for i in ind_val]
|
|
96
|
+
|
|
111
97
|
print('number of images: %3d' % len(X))
|
|
112
98
|
print('- training: %3d' % len(X_trn))
|
|
113
99
|
print('- validation: %3d' % len(X_val))
|
|
@@ -134,7 +120,10 @@ if model_type=='cellpose':
|
|
|
134
120
|
import torch
|
|
135
121
|
|
|
136
122
|
if not use_gpu:
|
|
123
|
+
print('Using CPU for training...')
|
|
137
124
|
device = torch.device("cpu")
|
|
125
|
+
else:
|
|
126
|
+
print('Using GPU for training...')
|
|
138
127
|
|
|
139
128
|
logger, log_file = logger_setup()
|
|
140
129
|
print(f'Pretrained model: ',pretrained)
|
|
@@ -163,7 +152,7 @@ if model_type=='cellpose':
|
|
|
163
152
|
config_inputs = {"channels": target_channels, "diameter": standard_diameter, 'cellprob_threshold': 0., 'flow_threshold': 0.4,
|
|
164
153
|
'normalization_percentile': normalization_percentile, 'normalization_clip': normalization_clip,
|
|
165
154
|
'normalization_values': normalization_values, 'model_type': 'cellpose',
|
|
166
|
-
'spatial_calibration': input_spatial_calibration}
|
|
155
|
+
'spatial_calibration': input_spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
|
|
167
156
|
json_input_config = json.dumps(config_inputs, indent=4)
|
|
168
157
|
with open(os.sep.join([target_directory, model_name, "config_input.json"]), "w") as outfile:
|
|
169
158
|
outfile.write(json_input_config)
|
|
@@ -234,7 +223,7 @@ elif model_type=='stardist':
|
|
|
234
223
|
|
|
235
224
|
config_inputs = {"channels": target_channels, 'normalization_percentile': normalization_percentile,
|
|
236
225
|
'normalization_clip': normalization_clip, 'normalization_values': normalization_values,
|
|
237
|
-
'model_type': 'stardist', 'spatial_calibration': spatial_calibration}
|
|
226
|
+
'model_type': 'stardist', 'spatial_calibration': spatial_calibration, 'dataset': {'train': files_train, 'validation': files_val}}
|
|
238
227
|
|
|
239
228
|
json_input_config = json.dumps(config_inputs, indent=4)
|
|
240
229
|
with open(os.sep.join([target_directory, model_name, "config_input.json"]), "w") as outfile:
|
celldetective/segmentation.py
CHANGED
|
@@ -116,6 +116,10 @@ def segment(stack, model_name, channels=None, spatial_calibration=None, view_on_
|
|
|
116
116
|
|
|
117
117
|
elif model_type=='cellpose':
|
|
118
118
|
model = CellposeModel(gpu=use_gpu, pretrained_model=model_path+model_path.split('/')[-2], diam_mean=30.0)
|
|
119
|
+
if scale is None:
|
|
120
|
+
scale_model = model.diam_mean / model.diam_labels
|
|
121
|
+
else:
|
|
122
|
+
scale_model = scale * model.diam_mean / model.diam_labels
|
|
119
123
|
|
|
120
124
|
labels = []
|
|
121
125
|
if (time_flat_normalization)*normalize:
|
|
@@ -133,7 +137,7 @@ def segment(stack, model_name, channels=None, spatial_calibration=None, view_on_
|
|
|
133
137
|
frame = normalize_multichannel(frame, values=normalization_values)
|
|
134
138
|
|
|
135
139
|
if scale is not None:
|
|
136
|
-
frame = [ndi.zoom(frame[:,:,c].copy(), [
|
|
140
|
+
frame = [ndi.zoom(frame[:,:,c].copy(), [scale_model,scale_model], order=3, prefilter=False) for c in range(frame.shape[-1])]
|
|
137
141
|
|
|
138
142
|
if model_type=="stardist":
|
|
139
143
|
|
|
@@ -142,16 +146,11 @@ def segment(stack, model_name, channels=None, spatial_calibration=None, view_on_
|
|
|
142
146
|
|
|
143
147
|
elif model_type=="cellpose":
|
|
144
148
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
else:
|
|
148
|
-
channels_cp = [[0,1]]
|
|
149
|
-
|
|
150
|
-
Y_pred, _, _ = model.eval([frame], diameter = diameter, flow_threshold=flow_threshold, channels=channels_cp, normalize=normalize)
|
|
151
|
-
Y_pred = Y_pred[0].astype(np.uint16)
|
|
149
|
+
Y_pred, _, _ = model.eval(frame, diameter = diameter, cellprob_threshold=cellprob_threshold, flow_threshold=flow_threshold, channels=None, normalize=False)
|
|
150
|
+
Y_pred = Y_pred.astype(np.uint16)
|
|
152
151
|
|
|
153
152
|
if scale is not None:
|
|
154
|
-
Y_pred = ndi.zoom(Y_pred, [1./
|
|
153
|
+
Y_pred = ndi.zoom(Y_pred, [1./scale_model,1./scale_model],order=0)
|
|
155
154
|
|
|
156
155
|
|
|
157
156
|
if Y_pred.shape != stack[0].shape[:2]:
|
|
@@ -229,7 +228,7 @@ def segment_from_thresholds(stack, target_channel=0, thresholds=None, view_on_na
|
|
|
229
228
|
return masks
|
|
230
229
|
|
|
231
230
|
def segment_frame_from_thresholds(frame, target_channel=0, thresholds=None, equalize_reference=None,
|
|
232
|
-
filters=None, marker_min_distance=30, marker_footprint_size=20, marker_footprint=None, feature_queries=None, channel_names=None):
|
|
231
|
+
filters=None, marker_min_distance=30, marker_footprint_size=20, marker_footprint=None, feature_queries=None, channel_names=None, do_watershed=True):
|
|
233
232
|
|
|
234
233
|
"""
|
|
235
234
|
Segments objects within a single frame based on intensity thresholds and optional image processing steps.
|
|
@@ -276,8 +275,13 @@ def segment_frame_from_thresholds(frame, target_channel=0, thresholds=None, equa
|
|
|
276
275
|
img = filter_image(img, filters=filters)
|
|
277
276
|
edge = estimate_unreliable_edge(filters)
|
|
278
277
|
binary_image = threshold_image(img, thresholds[0], thresholds[1], fill_holes=True, edge_exclusion=edge)
|
|
279
|
-
|
|
280
|
-
|
|
278
|
+
|
|
279
|
+
if do_watershed:
|
|
280
|
+
coords,distance = identify_markers_from_binary(binary_image, marker_min_distance, footprint_size=marker_footprint_size, footprint=marker_footprint, return_edt=True)
|
|
281
|
+
instance_seg = apply_watershed(binary_image, coords, distance)
|
|
282
|
+
else:
|
|
283
|
+
instance_seg, _ = ndi.label(binary_image.astype(int).copy())
|
|
284
|
+
|
|
281
285
|
instance_seg = filter_on_property(instance_seg, intensity_image=img_mc, queries=feature_queries, channel_names=channel_names)
|
|
282
286
|
|
|
283
287
|
return instance_seg
|
celldetective/utils.py
CHANGED
|
@@ -2046,7 +2046,7 @@ def load_image_dataset(datasets, channels, train_spatial_calibration=None, mask_
|
|
|
2046
2046
|
|
|
2047
2047
|
assert isinstance(channels, list),'Please provide a list of channels. Abort.'
|
|
2048
2048
|
|
|
2049
|
-
X = []; Y = [];
|
|
2049
|
+
X = []; Y = []; files = [];
|
|
2050
2050
|
|
|
2051
2051
|
for ds in datasets:
|
|
2052
2052
|
print(f'Loading data from dataset {ds}...')
|
|
@@ -2101,9 +2101,10 @@ def load_image_dataset(datasets, channels, train_spatial_calibration=None, mask_
|
|
|
2101
2101
|
|
|
2102
2102
|
X.append(image)
|
|
2103
2103
|
Y.append(mask)
|
|
2104
|
+
files.append(im)
|
|
2104
2105
|
|
|
2105
2106
|
assert len(X)==len(Y),'The number of images does not match with the number of masks... Abort.'
|
|
2106
|
-
return X,Y
|
|
2107
|
+
return X,Y,files
|
|
2107
2108
|
|
|
2108
2109
|
|
|
2109
2110
|
def download_url_to_file(url, dst, progress=True):
|
|
@@ -2220,6 +2221,9 @@ def interpolate_nan(img, method='nearest'):
|
|
|
2220
2221
|
Interpolate NaN on single channel array 2D
|
|
2221
2222
|
"""
|
|
2222
2223
|
|
|
2224
|
+
if np.all(img==0):
|
|
2225
|
+
return img
|
|
2226
|
+
|
|
2223
2227
|
if np.any(img.flatten()!=img.flatten()):
|
|
2224
2228
|
# then need to interpolate
|
|
2225
2229
|
x_grid, y_grid = np.meshgrid(np.arange(img.shape[1]),np.arange(img.shape[0]))
|
|
@@ -1,43 +1,43 @@
|
|
|
1
1
|
celldetective/__init__.py,sha256=FEZpJKcskBH2IginYzeqPWoR1lVGuyYCXhv7Hnlkoo8,49
|
|
2
2
|
celldetective/__main__.py,sha256=XFAkq_2cBEkWAVXDGSNFagoQBglyl0Y-GOO3KFc8UqM,13888
|
|
3
3
|
celldetective/events.py,sha256=s2pWnR3Z1fcB15sET5WsF2Pi6v6qv16hks_m3WiklNs,3658
|
|
4
|
-
celldetective/extra_properties.py,sha256=
|
|
5
|
-
celldetective/filters.py,sha256=
|
|
6
|
-
celldetective/io.py,sha256=
|
|
4
|
+
celldetective/extra_properties.py,sha256=ZdWV046RR8jrQPYxLSVVb1cXkWYxfrrLj2tXfc358cU,4179
|
|
5
|
+
celldetective/filters.py,sha256=b0qKwHor1fvNA_dHovP17nQz8EsW5YlyhT2TJnayn08,3615
|
|
6
|
+
celldetective/io.py,sha256=ptaX4GadWuf0zOQ3ZWSLCzihEID-YAalPfxW1nqvgIU,80889
|
|
7
7
|
celldetective/measure.py,sha256=HDQZfSRx3daOCV5Snu1paYU5JYkwu8engO2qZqhTAUo,48089
|
|
8
8
|
celldetective/neighborhood.py,sha256=QCuhesMHGyr3c3ys9wWcNR1HM6CHdHe51R8upoolgPw,49514
|
|
9
|
-
celldetective/preprocessing.py,sha256=
|
|
10
|
-
celldetective/segmentation.py,sha256=
|
|
9
|
+
celldetective/preprocessing.py,sha256=dpor6ry6RChzd-rdH98SQc0zvLZv95YZB5KIGym1mEY,37388
|
|
10
|
+
celldetective/segmentation.py,sha256=Hu4-lOJ4UhPw2o0Hn_NPOCHsa6Iwsw5A3PR6X3Tzbb8,29196
|
|
11
11
|
celldetective/signals.py,sha256=P7eiDZGGIAYCFBKjGCBi8gMBvJYywxlxZNzyGgw-26Y,102783
|
|
12
12
|
celldetective/tracking.py,sha256=A0mhdF4uq4m8OX1-rhtuhG69rlh_6Pb34Aebu7hIeKM,37601
|
|
13
|
-
celldetective/utils.py,sha256=
|
|
13
|
+
celldetective/utils.py,sha256=f0VvWv5ZgndZiWsFFDs0O6tJlRXkRtPW0xEKM_R90yo,77581
|
|
14
14
|
celldetective/datasets/segmentation_annotations/blank,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
15
15
|
celldetective/datasets/signal_annotations/blank,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
celldetective/gui/__init__.py,sha256=y2dvrUdJi17QMgPjl8WN3XFHYzJpu2ul4_8y7MQV1Bk,941
|
|
17
17
|
celldetective/gui/about.py,sha256=i-y54Opb10pKTVNUEcJC-D6Cbiqud2EJ3ZLayXqhdqc,1715
|
|
18
18
|
celldetective/gui/analyze_block.py,sha256=WD3JQQylx_dVozFCvNqrOyR6LcNHV7R1_gGh7XqOVeI,25423
|
|
19
19
|
celldetective/gui/btrack_options.py,sha256=eQEf63yTUsPCN-d1LqgAMmUQpfv2FH85FqnOSaR-9Q8,35575
|
|
20
|
-
celldetective/gui/classifier_widget.py,sha256=
|
|
20
|
+
celldetective/gui/classifier_widget.py,sha256=52a3kOfU3uMEzUYMIOHGqiy-Q5itMEvcG-3JYrO34I8,16918
|
|
21
21
|
celldetective/gui/configure_new_exp.py,sha256=ANJ-Zn4sjBphtj_aoJu6m1PFEKyv9gxeh9XqS6xOGjk,18969
|
|
22
22
|
celldetective/gui/control_panel.py,sha256=wcDqe4XaDJRMmPmWKJpxd0D9V4_DrdRGnEH6D7B_IK0,17557
|
|
23
23
|
celldetective/gui/gui_utils.py,sha256=PidFfdc8XASeIzZO5pfKgwqe4vROG7-KpYMcBZ42jdw,22673
|
|
24
24
|
celldetective/gui/json_readers.py,sha256=fTrNrlxv9NCae8ZJexBEHxI3yCLRqt6F0Yo1OeDycfA,3686
|
|
25
|
-
celldetective/gui/layouts.py,sha256=
|
|
25
|
+
celldetective/gui/layouts.py,sha256=sHTWXsKdRoHYYrPELEW7djNetevcQ-RKW9aQQ2FvWoc,37644
|
|
26
26
|
celldetective/gui/measurement_options.py,sha256=i0CdAfupHJAqhOT7RsufEK919sAzQnFBkQO4IAMYZL0,47704
|
|
27
27
|
celldetective/gui/neighborhood_options.py,sha256=sdKxVRliZtuKSpcPfnFxqkW4V8rN2tzjhDxOPVmElyE,20191
|
|
28
28
|
celldetective/gui/plot_measurements.py,sha256=xUoGxV6uXcen-t4yWtAmcGTUayICI-FxTVKPrWMNlfg,51799
|
|
29
29
|
celldetective/gui/plot_signals_ui.py,sha256=TwWU2u3_mkRNsM8er0kI_kwr5EoZ29YEzlr0cQzyW4A,43732
|
|
30
30
|
celldetective/gui/process_block.py,sha256=7n9glZ1ojEi1bObqwIj4giNhrteT69X1EPMQ1hK63aU,53565
|
|
31
|
-
celldetective/gui/retrain_segmentation_model_options.py,sha256
|
|
32
|
-
celldetective/gui/retrain_signal_model_options.py,sha256=
|
|
31
|
+
celldetective/gui/retrain_segmentation_model_options.py,sha256=VixtKZTjoJFJy8X6aDbAf0zgK46aXj3I6oPP_92yqhA,22457
|
|
32
|
+
celldetective/gui/retrain_signal_model_options.py,sha256=gdK1ITuaTPNvnQWXYuUBcP_pH5cvaEEns5XejjTi_Eo,17854
|
|
33
33
|
celldetective/gui/seg_model_loader.py,sha256=uKp8oab-4QdTGqb-tb6bOD-FLD_154GOvgWFYz97BwY,17350
|
|
34
34
|
celldetective/gui/signal_annotator.py,sha256=4ymMpo_GjSBsJSRkyNKrWRLy0EFXHINbFtp9ykDqfGE,84864
|
|
35
35
|
celldetective/gui/signal_annotator_options.py,sha256=-Q7f8eCwniqbgLJqMCa91Wc-V3VHAZidwt7LPd4Z5yU,10879
|
|
36
36
|
celldetective/gui/styles.py,sha256=Vw4wr6MQ4iBwhOY-ZWAxFDZZ3CNohmEnuPPazwhJaho,4129
|
|
37
37
|
celldetective/gui/survival_ui.py,sha256=2JGLC5m6D_gVLwnBAM7uEvuCKw1Cli8nM9i5s7TIpGg,33612
|
|
38
|
-
celldetective/gui/tableUI.py,sha256=
|
|
39
|
-
celldetective/gui/thresholds_gui.py,sha256=
|
|
40
|
-
celldetective/gui/viewers.py,sha256=
|
|
38
|
+
celldetective/gui/tableUI.py,sha256=7QSvkvVpglcluLxqhKjehmYDwHoyZs0R7uLcJK_9fhs,34268
|
|
39
|
+
celldetective/gui/thresholds_gui.py,sha256=b8SkG4DlfxBRjacKTe1NSNkq7rJm8lnSLifH-mg846k,49529
|
|
40
|
+
celldetective/gui/viewers.py,sha256=buBholjAieaVVb6YinVhxPshEHxBEU3er_N0UrkwAew,27734
|
|
41
41
|
celldetective/icons/logo-large.png,sha256=FXSwV3u6zEKcfpuSn4unnqB0oUnN9cHqQ9BCKWytrpg,36631
|
|
42
42
|
celldetective/icons/logo.png,sha256=wV2OS8_dU5Td5cgdPbCOU3JpMpTwNuYLnfVcnQX0tJA,2437
|
|
43
43
|
celldetective/icons/signals_icon.png,sha256=vEiKoqWTtN0-uJgVqtAlwCuP-f4QeWYOlO3sdp2tg2w,3969
|
|
@@ -60,10 +60,10 @@ celldetective/models/tracking_configs/ricm.json,sha256=L-vmwCR1f89U-qnH2Ms0cBfPF
|
|
|
60
60
|
celldetective/models/tracking_configs/ricm2.json,sha256=DDjJ6ScYcDWvlsy7ujPID8v8H28vcNcMuZmNR8XmGxo,2718
|
|
61
61
|
celldetective/scripts/analyze_signals.py,sha256=23TXGNw-j5xT3ss4mXlnKdBgFLnQ50JUEQOC6_H7Q_0,2203
|
|
62
62
|
celldetective/scripts/measure_cells.py,sha256=4uRG6Dg0WsO-N8ZaBJ4loWOvX6FdHaCblIFXq6Dtirc,11000
|
|
63
|
-
celldetective/scripts/segment_cells.py,sha256=
|
|
63
|
+
celldetective/scripts/segment_cells.py,sha256=55hM3JbMoW5TB43uENgtvADL8BziI17B3GjG4axsj6s,8291
|
|
64
64
|
celldetective/scripts/segment_cells_thresholds.py,sha256=GbWXa6xoO8s4PinJPZIxAuosw4vpzyJ7FiFYpSURojk,4998
|
|
65
65
|
celldetective/scripts/track_cells.py,sha256=AaNiYEW4osYKKR2kbdVLOUnQEBbcZIA-D0mkhcxPWTY,7985
|
|
66
|
-
celldetective/scripts/train_segmentation_model.py,sha256=
|
|
66
|
+
celldetective/scripts/train_segmentation_model.py,sha256=rw5LD70PrE_SCBvaFt0ri1uSa-YI5DM5exH_BGVlw5Y,8515
|
|
67
67
|
celldetective/scripts/train_signal_model.py,sha256=9-dmPCLKJ9ypjsV9AwFd-Sb6B6YaHS0QGT218H5hUPo,1861
|
|
68
68
|
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
69
69
|
tests/test_events.py,sha256=eLFwwEEJfQAdwhews3-fn1HSvzozcNNFN_Qn0gOvQkE,685
|
|
@@ -76,9 +76,9 @@ tests/test_segmentation.py,sha256=-3b7o_fUVMYxfVwX5VHFqRF0dDXObSTtylf5XQGcq1A,34
|
|
|
76
76
|
tests/test_signals.py,sha256=No4cah6KxplhDcKXnU8RrA7eDla4hWw6ccf7xGnBokU,3599
|
|
77
77
|
tests/test_tracking.py,sha256=8hebWSqEIuttD1ABn-6dKCT7EXKRR7-4RwyFWi1WPFo,8800
|
|
78
78
|
tests/test_utils.py,sha256=NKRCAC1d89aBK5cWjTb7-pInYow901RrT-uBlIdz4KI,3692
|
|
79
|
-
celldetective-1.1.1.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
80
|
-
celldetective-1.1.1.dist-info/METADATA,sha256=
|
|
81
|
-
celldetective-1.1.1.dist-info/WHEEL,sha256=
|
|
82
|
-
celldetective-1.1.1.dist-info/entry_points.txt,sha256=2NU6_EOByvPxqBbCvjwxlVlvnQreqZ3BKRCVIKEv3dg,62
|
|
83
|
-
celldetective-1.1.1.dist-info/top_level.txt,sha256=6rsIKKfGMKgud7HPuATcpq6EhdXwcg_yknBVWn9x4C4,20
|
|
84
|
-
celldetective-1.1.1.dist-info/RECORD,,
|
|
79
|
+
celldetective-1.1.1.post3.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
80
|
+
celldetective-1.1.1.post3.dist-info/METADATA,sha256=r2-IaTUiO4VxRyIFdQJbsYx_MJltOPt76Emkf8XqAtk,12418
|
|
81
|
+
celldetective-1.1.1.post3.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
|
|
82
|
+
celldetective-1.1.1.post3.dist-info/entry_points.txt,sha256=2NU6_EOByvPxqBbCvjwxlVlvnQreqZ3BKRCVIKEv3dg,62
|
|
83
|
+
celldetective-1.1.1.post3.dist-info/top_level.txt,sha256=6rsIKKfGMKgud7HPuATcpq6EhdXwcg_yknBVWn9x4C4,20
|
|
84
|
+
celldetective-1.1.1.post3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|