spacr 0.3.22__py3-none-any.whl → 0.3.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/app_annotate.py +1 -2
- spacr/deep_spacr.py +131 -227
- spacr/gui.py +1 -0
- spacr/gui_core.py +13 -4
- spacr/gui_elements.py +72 -49
- spacr/gui_utils.py +33 -44
- spacr/io.py +4 -4
- spacr/measure.py +1 -38
- spacr/plot.py +0 -2
- spacr/settings.py +50 -5
- spacr/utils.py +383 -28
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/METADATA +1 -1
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/RECORD +17 -17
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/LICENSE +0 -0
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/WHEEL +0 -0
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/entry_points.txt +0 -0
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/top_level.txt +0 -0
spacr/gui_utils.py
CHANGED
@@ -77,7 +77,7 @@ def load_app(root, app_name, app_func):
|
|
77
77
|
else:
|
78
78
|
proceed_with_app(root, app_name, app_func)
|
79
79
|
|
80
|
-
def
|
80
|
+
def parse_list_v1(value):
|
81
81
|
"""
|
82
82
|
Parses a string representation of a list and returns the parsed list.
|
83
83
|
|
@@ -98,6 +98,34 @@ def parse_list(value):
|
|
98
98
|
return parsed_value
|
99
99
|
elif all(isinstance(item, str) for item in parsed_value):
|
100
100
|
return parsed_value
|
101
|
+
elif all(isinstance(item, float) for item in parsed_value):
|
102
|
+
return parsed_value
|
103
|
+
else:
|
104
|
+
raise ValueError("List contains mixed types or unsupported types")
|
105
|
+
else:
|
106
|
+
raise ValueError(f"Expected a list but got {type(parsed_value).__name__}")
|
107
|
+
except (ValueError, SyntaxError) as e:
|
108
|
+
raise ValueError(f"Invalid format for list: {value}. Error: {e}")
|
109
|
+
|
110
|
+
def parse_list(value):
|
111
|
+
"""
|
112
|
+
Parses a string representation of a list and returns the parsed list.
|
113
|
+
|
114
|
+
Args:
|
115
|
+
value (str): The string representation of the list.
|
116
|
+
|
117
|
+
Returns:
|
118
|
+
list: The parsed list, which can contain integers, floats, or strings.
|
119
|
+
|
120
|
+
Raises:
|
121
|
+
ValueError: If the input value is not a valid list format or contains mixed types or unsupported types.
|
122
|
+
"""
|
123
|
+
try:
|
124
|
+
parsed_value = ast.literal_eval(value)
|
125
|
+
if isinstance(parsed_value, list):
|
126
|
+
# Check if all elements are homogeneous (either all int, float, or str)
|
127
|
+
if all(isinstance(item, (int, float, str)) for item in parsed_value):
|
128
|
+
return parsed_value
|
101
129
|
else:
|
102
130
|
raise ValueError("List contains mixed types or unsupported types")
|
103
131
|
else:
|
@@ -225,7 +253,7 @@ def annotate(settings):
|
|
225
253
|
|
226
254
|
root = tk.Tk()
|
227
255
|
root.geometry(settings['geom'])
|
228
|
-
app = AnnotateApp(root, db, src, image_type=settings['image_type'], channels=settings['channels'], image_size=settings['img_size'], grid_rows=settings['rows'], grid_cols=settings['columns'], annotation_column=settings['annotation_column'], normalize=settings['normalize'], percentiles=settings['percentiles'], measurement=settings['measurement'], threshold=settings['threshold'])
|
256
|
+
app = AnnotateApp(root, db, src, image_type=settings['image_type'], channels=settings['channels'], image_size=settings['img_size'], grid_rows=settings['rows'], grid_cols=settings['columns'], annotation_column=settings['annotation_column'], normalize=settings['normalize'], percentiles=settings['percentiles'], measurement=settings['measurement'], threshold=settings['threshold'], normalize_channels=settings['normalize_channels'])
|
229
257
|
next_button = tk.Button(root, text="Next", command=app.next_page)
|
230
258
|
next_button.grid(row=app.grid_rows, column=app.grid_cols - 1)
|
231
259
|
back_button = tk.Button(root, text="Back", command=app.previous_page)
|
@@ -256,7 +284,6 @@ def generate_annotate_fields(frame):
|
|
256
284
|
# Arrange input fields and labels
|
257
285
|
for row, (name, data) in enumerate(vars_dict.items()):
|
258
286
|
tk.Label(frame, text=f"{name.replace('_', ' ').capitalize()}:", bg=style_out['bg_color'], fg=style_out['fg_color'], font=font_loader.get_font(size=font_size)).grid(row=row, column=0)
|
259
|
-
#ttk.Label(frame, text=f"{name.replace('_', ' ').capitalize()}:", background="black", foreground="white").grid(row=row, column=0)
|
260
287
|
if isinstance(data['value'], list):
|
261
288
|
# Convert lists to comma-separated strings
|
262
289
|
data['entry'].insert(0, ','.join(map(str, data['value'])))
|
@@ -272,6 +299,7 @@ def run_annotate_app(vars_dict, parent_frame):
|
|
272
299
|
settings['img_size'] = list(map(int, settings['img_size'].split(','))) # Convert string to list of integers
|
273
300
|
settings['percentiles'] = list(map(int, settings['percentiles'].split(','))) # Convert string to list of integers
|
274
301
|
settings['normalize'] = settings['normalize'].lower() == 'true'
|
302
|
+
settings['normalize_channels'] = settings['channels'].split(',')
|
275
303
|
settings['rows'] = int(settings['rows'])
|
276
304
|
settings['columns'] = int(settings['columns'])
|
277
305
|
settings['measurement'] = settings['measurement'].split(',')
|
@@ -292,6 +320,7 @@ def annotate_app(parent_frame, settings):
|
|
292
320
|
global global_image_refs
|
293
321
|
global_image_refs.clear()
|
294
322
|
root = parent_frame.winfo_toplevel()
|
323
|
+
print('annotate_app',settings)
|
295
324
|
annotate_with_image_refs(settings, root, lambda: load_next_app(root))
|
296
325
|
|
297
326
|
def load_next_app(root):
|
@@ -313,46 +342,6 @@ def load_next_app(root):
|
|
313
342
|
new_root.title("SpaCr Application")
|
314
343
|
next_app_func(new_root, *next_app_args)
|
315
344
|
|
316
|
-
def annotate_with_image_refs(settings, root, shutdown_callback):
|
317
|
-
#from .gui_utils import proceed_with_app
|
318
|
-
from .gui import gui_app
|
319
|
-
from .settings import set_annotate_default_settings
|
320
|
-
|
321
|
-
settings = set_annotate_default_settings(settings)
|
322
|
-
src = settings['src']
|
323
|
-
|
324
|
-
db = os.path.join(src, 'measurements/measurements.db')
|
325
|
-
conn = sqlite3.connect(db)
|
326
|
-
c = conn.cursor()
|
327
|
-
c.execute('PRAGMA table_info(png_list)')
|
328
|
-
cols = c.fetchall()
|
329
|
-
if settings['annotation_column'] not in [col[1] for col in cols]:
|
330
|
-
c.execute(f"ALTER TABLE png_list ADD COLUMN {settings['annotation_column']} integer")
|
331
|
-
conn.commit()
|
332
|
-
conn.close()
|
333
|
-
|
334
|
-
app = AnnotateApp(root, db, src, image_type=settings['image_type'], channels=settings['channels'], image_size=settings['img_size'], grid_rows=settings['rows'], grid_cols=settings['columns'], annotation_column=settings['annotation_column'], normalize=settings['normalize'], percentiles=settings['percentiles'], measurement=settings['measurement'], threshold=settings['threshold'])
|
335
|
-
|
336
|
-
# Set the canvas background to black
|
337
|
-
root.configure(bg='black')
|
338
|
-
|
339
|
-
next_button = tk.Button(root, text="Next", command=app.next_page, background='black', foreground='white')
|
340
|
-
next_button.grid(row=app.grid_rows, column=app.grid_cols - 1)
|
341
|
-
back_button = tk.Button(root, text="Back", command=app.previous_page, background='black', foreground='white')
|
342
|
-
back_button.grid(row=app.grid_rows, column=app.grid_cols - 2)
|
343
|
-
exit_button = tk.Button(root, text="Exit", command=lambda: [app.shutdown(), shutdown_callback()], background='black', foreground='white')
|
344
|
-
exit_button.grid(row=app.grid_rows, column=app.grid_cols - 3)
|
345
|
-
|
346
|
-
#app.load_images()
|
347
|
-
|
348
|
-
# Store the shutdown function and next app details in the root
|
349
|
-
root.current_app_exit_func = lambda: [app.shutdown(), shutdown_callback()]
|
350
|
-
root.next_app_func = proceed_with_app
|
351
|
-
root.next_app_args = ("Main App", gui_app)
|
352
|
-
|
353
|
-
# Call load_images after setting up the root window
|
354
|
-
app.load_images()
|
355
|
-
|
356
345
|
def annotate_with_image_refs(settings, root, shutdown_callback):
|
357
346
|
from .settings import set_annotate_default_settings
|
358
347
|
|
@@ -373,7 +362,7 @@ def annotate_with_image_refs(settings, root, shutdown_callback):
|
|
373
362
|
screen_height = root.winfo_screenheight()
|
374
363
|
root.geometry(f"{screen_width}x{screen_height}")
|
375
364
|
|
376
|
-
app = AnnotateApp(root, db, src, image_type=settings['image_type'], channels=settings['channels'], image_size=settings['img_size'], annotation_column=settings['annotation_column'], normalize=settings['normalize'], percentiles=settings['percentiles'], measurement=settings['measurement'], threshold=settings['threshold'])
|
365
|
+
app = AnnotateApp(root, db, src, image_type=settings['image_type'], channels=settings['channels'], image_size=settings['img_size'], annotation_column=settings['annotation_column'], normalize=settings['normalize'], percentiles=settings['percentiles'], measurement=settings['measurement'], threshold=settings['threshold'], normalize_channels=settings['normalize_channels'])
|
377
366
|
|
378
367
|
# Set the canvas background to black
|
379
368
|
root.configure(bg='black')
|
spacr/io.py
CHANGED
@@ -2861,10 +2861,10 @@ def generate_dataset(settings={}):
|
|
2861
2861
|
date_name = datetime.date.today().strftime('%y%m%d')
|
2862
2862
|
if len(settings['src']) > 1:
|
2863
2863
|
date_name = f"{date_name}_combined"
|
2864
|
-
if not settings['file_metadata'] is None:
|
2865
|
-
|
2866
|
-
else:
|
2867
|
-
|
2864
|
+
#if not settings['file_metadata'] is None:
|
2865
|
+
# tar_name = f"{date_name}_{settings['experiment']}_{settings['file_metadata']}.tar"
|
2866
|
+
#else:
|
2867
|
+
tar_name = f"{date_name}_{settings['experiment']}.tar"
|
2868
2868
|
tar_name = os.path.join(dst, tar_name)
|
2869
2869
|
if os.path.exists(tar_name):
|
2870
2870
|
number = random.randint(1, 100)
|
spacr/measure.py
CHANGED
@@ -652,43 +652,6 @@ def img_list_to_grid(grid, titles=None):
|
|
652
652
|
plt.tight_layout(pad=0.1)
|
653
653
|
return fig
|
654
654
|
|
655
|
-
def filepaths_to_database(img_paths, settings, source_folder, crop_mode):
|
656
|
-
from. utils import _map_wells_png
|
657
|
-
png_df = pd.DataFrame(img_paths, columns=['png_path'])
|
658
|
-
|
659
|
-
png_df['file_name'] = png_df['png_path'].apply(lambda x: os.path.basename(x))
|
660
|
-
|
661
|
-
parts = png_df['file_name'].apply(lambda x: pd.Series(_map_wells_png(x, timelapse=settings['timelapse'])))
|
662
|
-
|
663
|
-
columns = ['plate', 'row', 'col', 'field']
|
664
|
-
|
665
|
-
if settings['timelapse']:
|
666
|
-
columns = columns + ['time_id']
|
667
|
-
|
668
|
-
columns = columns + ['prcfo']
|
669
|
-
|
670
|
-
if crop_mode == 'cell':
|
671
|
-
columns = columns + ['cell_id']
|
672
|
-
|
673
|
-
if crop_mode == 'nucleus':
|
674
|
-
columns = columns + ['nucleus_id']
|
675
|
-
|
676
|
-
if crop_mode == 'pathogen':
|
677
|
-
columns = columns + ['pathogen_id']
|
678
|
-
|
679
|
-
if crop_mode == 'cytoplasm':
|
680
|
-
columns = columns + ['cytoplasm_id']
|
681
|
-
|
682
|
-
png_df[columns] = parts
|
683
|
-
|
684
|
-
try:
|
685
|
-
conn = sqlite3.connect(f'{source_folder}/measurements/measurements.db', timeout=5)
|
686
|
-
png_df.to_sql('png_list', conn, if_exists='append', index=False)
|
687
|
-
conn.commit()
|
688
|
-
except sqlite3.OperationalError as e:
|
689
|
-
print(f"SQLite error: {e}", flush=True)
|
690
|
-
traceback.print_exc()
|
691
|
-
|
692
655
|
#@log_function_call
|
693
656
|
def _measure_crop_core(index, time_ls, file, settings):
|
694
657
|
|
@@ -711,7 +674,7 @@ def _measure_crop_core(index, time_ls, file, settings):
|
|
711
674
|
"""
|
712
675
|
|
713
676
|
from .plot import _plot_cropped_arrays
|
714
|
-
from .utils import _merge_overlapping_objects, _filter_object, _relabel_parent_with_child_labels, _exclude_objects, normalize_to_dtype
|
677
|
+
from .utils import _merge_overlapping_objects, _filter_object, _relabel_parent_with_child_labels, _exclude_objects, normalize_to_dtype, filepaths_to_database
|
715
678
|
from .utils import _merge_and_save_to_database, _crop_center, _find_bounding_box, _generate_names, _get_percentiles
|
716
679
|
|
717
680
|
figs = {}
|
spacr/plot.py
CHANGED
@@ -1020,8 +1020,6 @@ def _plot_recruitment_v2(df, df_type, channel_of_interest, columns=[], figuresiz
|
|
1020
1020
|
None
|
1021
1021
|
"""
|
1022
1022
|
|
1023
|
-
from .plot import spacrGraph
|
1024
|
-
|
1025
1023
|
color_list = [(55/255, 155/255, 155/255),
|
1026
1024
|
(155/255, 55/255, 155/255),
|
1027
1025
|
(55/255, 155/255, 255/255),
|
spacr/settings.py
CHANGED
@@ -246,7 +246,7 @@ def get_measure_crop_settings(settings={}):
|
|
246
246
|
settings.setdefault('normalize_by','png')
|
247
247
|
settings.setdefault('crop_mode',['cell'])
|
248
248
|
settings.setdefault('dialate_pngs', False)
|
249
|
-
settings.setdefault('dialate_png_ratios', [0.2,
|
249
|
+
settings.setdefault('dialate_png_ratios', [0.2,0.2])
|
250
250
|
|
251
251
|
# Timelapsed settings
|
252
252
|
settings.setdefault('timelapse', False)
|
@@ -859,7 +859,7 @@ expected_types = {
|
|
859
859
|
'dataset':str,
|
860
860
|
'score_threshold':float,
|
861
861
|
'sample':None,
|
862
|
-
'file_metadata':None,
|
862
|
+
'file_metadata':(str, type(None), list),
|
863
863
|
'apply_model_to_dataset':False,
|
864
864
|
"train":bool,
|
865
865
|
"test":bool,
|
@@ -880,6 +880,11 @@ expected_types = {
|
|
880
880
|
"generate_training_dataset":bool,
|
881
881
|
"segmentation_mode":str,
|
882
882
|
"train_DL_model":bool,
|
883
|
+
"normalize":bool,
|
884
|
+
"overlay":bool,
|
885
|
+
"correlate":bool,
|
886
|
+
"target_layer":str,
|
887
|
+
"normalize_input":bool,
|
883
888
|
}
|
884
889
|
|
885
890
|
categories = {"Paths":[ "src", "grna", "barcodes", "custom_model_path", "dataset","model_path","grna_csv","row_csv","column_csv"],
|
@@ -889,18 +894,19 @@ categories = {"Paths":[ "src", "grna", "barcodes", "custom_model_path", "dataset
|
|
889
894
|
"Nucleus": ["nucleus_intensity_range", "nucleus_size_range", "nucleus_chann_dim", "nucleus_channel", "nucleus_background", "nucleus_Signal_to_noise", "nucleus_CP_prob", "nucleus_FT", "remove_background_nucleus", "nucleus_min_size", "nucleus_mask_dim", "nucleus_loc"],
|
890
895
|
"Pathogen": ["pathogen_intensity_range", "pathogen_size_range", "pathogen_chann_dim", "pathogen_channel", "pathogen_background", "pathogen_Signal_to_noise", "pathogen_CP_prob", "pathogen_FT", "pathogen_model", "remove_background_pathogen", "pathogen_min_size", "pathogen_mask_dim", "pathogens", "pathogen_loc", "pathogen_types", "pathogen_plate_metadata", ],
|
891
896
|
"Measurements": ["remove_image_canvas", "remove_highly_correlated", "homogeneity", "homogeneity_distances", "radial_dist", "calculate_correlation", "manders_thresholds", "save_measurements", "tables", "image_nr", "dot_size", "filter_by", "remove_highly_correlated_features", "remove_low_variance_features", "channel_of_interest"],
|
892
|
-
"Object Image": ["save_png", "dialate_pngs", "dialate_png_ratios", "png_size", "png_dims", "save_arrays", "normalize_by", "
|
897
|
+
"Object Image": ["save_png", "dialate_pngs", "dialate_png_ratios", "png_size", "png_dims", "save_arrays", "normalize_by", "crop_mode", "dialate_pngs", "normalize", "use_bounding_box"],
|
893
898
|
"Sequencing": ["signal_direction","mode","comp_level","comp_type","save_h5","expected_end","offset","target_sequence","regex", "highlight"],
|
894
899
|
"Generate Dataset":["file_metadata","class_metadata", "annotation_column","annotated_classes", "dataset_mode", "metadata_type_by","custom_measurement", "sample", "size"],
|
895
900
|
"Hyperparamiters (Training)": ["png_type", "score_threshold","file_type", "train_channels", "epochs", "loss_type", "optimizer_type","image_size","val_split","learning_rate","weight_decay","dropout_rate", "init_weights", "train", "classes", "augment", "amsgrad","use_checkpoint","gradient_accumulation","gradient_accumulation_steps","intermedeate_save","pin_memory"],
|
896
901
|
"Hyperparamiters (Embedding)": ["visualize","n_neighbors","min_dist","metric","resnet_features","reduction_method","embedding_by_controls","col_to_compare","log_data"],
|
897
902
|
"Hyperparamiters (Clustering)": ["eps","min_samples","analyze_clusters","clustering","remove_cluster_noise"],
|
898
903
|
"Hyperparamiters (Regression)":["cov_type", "class_1_threshold", "plate", "other", "fraction_threshold", "alpha", "random_row_column_effects", "regression_type", "min_cell_count", "agg_type", "transform", "dependent_variable"],
|
904
|
+
"Hyperparamiters (Activation)":["cam_type", "normalize", "overlay", "correlation", "target_layer", "normalize_input"],
|
899
905
|
"Annotation": ["nc_loc", "pc_loc", "nc", "pc", "cell_plate_metadata","treatment_plate_metadata", "metadata_types", "cell_types", "target","positive_control","negative_control", "location_column", "treatment_loc", "channel_of_interest", "measurement", "treatments", "um_per_pixel", "nr_imgs", "exclude", "exclude_conditions", "mix", "pos", "neg"],
|
900
906
|
"Plot": ["plot", "plot_control", "plot_nr", "examples_to_plot", "normalize_plots", "cmap", "figuresize", "plot_cluster_grids", "img_zoom", "row_limit", "color_by", "plot_images", "smooth_lines", "plot_points", "plot_outlines", "black_background", "plot_by_cluster", "heatmap_feature","grouping","min_max","cmap","save_figure"],
|
901
907
|
"Test": ["test_mode", "test_images", "random_test", "test_nr", "test", "test_split"],
|
902
908
|
"Timelapse": ["timelapse", "fps", "timelapse_displacement", "timelapse_memory", "timelapse_frame_limits", "timelapse_remove_transient", "timelapse_mode", "timelapse_objects", "compartments"],
|
903
|
-
"Advanced": ["target_intensity_min", "cells_per_well", "nuclei_limit", "pathogen_limit", "uninfected", "backgrounds", "schedule", "test_size","exclude","n_repeats","top_features", "model_type_ml", "model_type","minimum_cell_count","n_estimators","preprocess", "remove_background", "normalize", "lower_percentile", "merge_pathogens", "batch_size", "filter", "save", "masks", "verbose", "randomize", "n_jobs"],
|
909
|
+
"Advanced": ["shuffle", "target_intensity_min", "cells_per_well", "nuclei_limit", "pathogen_limit", "uninfected", "backgrounds", "schedule", "test_size","exclude","n_repeats","top_features", "model_type_ml", "model_type","minimum_cell_count","n_estimators","preprocess", "remove_background", "normalize", "lower_percentile", "merge_pathogens", "batch_size", "filter", "save", "masks", "verbose", "randomize", "n_jobs"],
|
904
910
|
"Miscellaneous": ["all_to_mip", "pick_slice", "skip_mode", "upscale", "upscale_factor"]
|
905
911
|
}
|
906
912
|
|
@@ -949,6 +955,14 @@ def check_settings(vars_dict, expected_types, q=None):
|
|
949
955
|
settings[key] = float(value) if '.' in value else int(value)
|
950
956
|
elif expected_type == (str, type(None)):
|
951
957
|
settings[key] = str(value) if value else None
|
958
|
+
elif expected_type == (str, type(None), list):
|
959
|
+
if isinstance(value, list):
|
960
|
+
settings[key] = parse_list(value) if value else None
|
961
|
+
elif isinstance(value, str):
|
962
|
+
settings[key] = str(value)
|
963
|
+
else:
|
964
|
+
settings[key] = None
|
965
|
+
|
952
966
|
elif expected_type == dict:
|
953
967
|
try:
|
954
968
|
# Ensure that the value is a string that can be converted to a dictionary
|
@@ -1206,7 +1220,7 @@ def generate_fields(variables, scrollable_frame):
|
|
1206
1220
|
"dataset": "str - file name of the tar file with image dataset",
|
1207
1221
|
"score_threshold": "float - threshold for classification",
|
1208
1222
|
"sample": "str - number of images to sample for tar dataset (including both classes). Default: None",
|
1209
|
-
"file_metadata": "str - string that must be present in image path to be included in the dataset",
|
1223
|
+
"file_metadata": "str or list of strings - string(s) that must be present in image path to be included in the dataset",
|
1210
1224
|
"apply_model_to_dataset": "bool - whether to apply model to the dataset",
|
1211
1225
|
"train_channels": "list - channels to use for training",
|
1212
1226
|
"dataset_mode": "str - How to generate train/test dataset.",
|
@@ -1247,6 +1261,13 @@ def generate_fields(variables, scrollable_frame):
|
|
1247
1261
|
"mode": "(str) - Mode to use for sequence analysis (either single for R1 or R2 fastq files or paired for the combination of R1 and R2).",
|
1248
1262
|
"signal_direction": "(str) - Direction of fastq file (R1 or R2). only relevent when mode is single.",
|
1249
1263
|
"custom_model_path": "(str) - Path to the custom model to finetune.",
|
1264
|
+
"cam_type": "(str) - Choose between: gradcam, gradcam_pp, saliency_image, saliency_channel to generate activateion maps of DL models",
|
1265
|
+
"target_layer": "(str) - Only used for gradcam and gradcam_pp. The layer to use for the activation map.",
|
1266
|
+
"normalize": "(bool) - Normalize images before overlayng the activation maps.",
|
1267
|
+
"overlay": "(bool) - Overlay activation maps on the images.",
|
1268
|
+
"shuffle": "(bool) - Shuffle the dataset bufore generating the activation maps",
|
1269
|
+
"correlation": "(bool) - Calculate correlation between image channels and activation maps. Data is saved to .db.",
|
1270
|
+
"normalize_input": "(bool) - Normalize the input images before passing them to the model.",
|
1250
1271
|
}
|
1251
1272
|
|
1252
1273
|
for key, (var_type, options, default_value) in variables.items():
|
@@ -1282,6 +1303,8 @@ descriptions = {
|
|
1282
1303
|
|
1283
1304
|
'regression': "Perform regression analysis on your data. Function: regression_tools from spacr.analysis.\n\nKey Features:\n- Statistical Analysis: Conduct various types of regression analysis to identify relationships within your data.\n- Flexible Options: Supports multiple regression models and configurations.\n- Data Insight: Gain deeper insights into your dataset through advanced regression techniques.",
|
1284
1305
|
|
1306
|
+
'activation': "",
|
1307
|
+
|
1285
1308
|
'recruitment': "Analyze recruitment data to understand sample recruitment dynamics. Function: recruitment_analysis_tools from spacr.analysis.\n\nKey Features:\n- Recruitment Analysis: Investigate and analyze the recruitment of samples over time or conditions.\n- Visualization: Generate visualizations to represent recruitment trends and patterns.\n- Integration: Utilize data from various sources for a comprehensive recruitment analysis."
|
1286
1309
|
}
|
1287
1310
|
|
@@ -1292,6 +1315,7 @@ def set_annotate_default_settings(settings):
|
|
1292
1315
|
settings.setdefault('img_size', 200)
|
1293
1316
|
settings.setdefault('annotation_column', 'test')
|
1294
1317
|
settings.setdefault('normalize', 'False')
|
1318
|
+
settings.setdefault('normalize_channels', "r,g,b")
|
1295
1319
|
settings.setdefault('percentiles', [2, 98])
|
1296
1320
|
settings.setdefault('measurement', '')#'cytoplasm_channel_3_mean_intensity,pathogen_channel_3_mean_intensity')
|
1297
1321
|
settings.setdefault('threshold', '')#'2')
|
@@ -1314,4 +1338,25 @@ def set_default_generate_barecode_mapping(settings={}):
|
|
1314
1338
|
settings.setdefault('mode', 'paired')
|
1315
1339
|
settings.setdefault('single_direction', 'R1')
|
1316
1340
|
settings.setdefault('test', False)
|
1341
|
+
return settings
|
1342
|
+
|
1343
|
+
def get_default_generate_activation_map_settings(settings):
|
1344
|
+
settings.setdefault('dataset', 'path')
|
1345
|
+
settings.setdefault('model_type', 'maxvit')
|
1346
|
+
settings.setdefault('model_path', 'path')
|
1347
|
+
settings.setdefault('image_size', 224)
|
1348
|
+
settings.setdefault('batch_size', 64)
|
1349
|
+
settings.setdefault('normalize', True)
|
1350
|
+
settings.setdefault('cam_type', 'gradcam')
|
1351
|
+
settings.setdefault('target_layer', None)
|
1352
|
+
settings.setdefault('plot', False)
|
1353
|
+
settings.setdefault('save', True)
|
1354
|
+
settings.setdefault('normalize_input', True)
|
1355
|
+
settings.setdefault('channels', [1,2,3])
|
1356
|
+
settings.setdefault('overlay', True)
|
1357
|
+
settings.setdefault('shuffle', True)
|
1358
|
+
settings.setdefault('correlation', True)
|
1359
|
+
settings.setdefault('manders_thresholds', [15,50, 75])
|
1360
|
+
settings.setdefault('n_jobs', None)
|
1361
|
+
|
1317
1362
|
return settings
|