spacr 0.0.36__py3-none-any.whl → 0.0.61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/sim.py CHANGED
@@ -397,29 +397,6 @@ def classifier_v2(positive_mean, positive_variance, negative_mean, negative_vari
397
397
  df['score'] = df['is_active'].apply(lambda is_active: np.random.beta(a1, b1) if is_active else np.random.beta(a2, b2))
398
398
  return df
399
399
 
400
- def classifier_v1(positive_mean, positive_variance, negative_mean, negative_variance, df):
401
- """
402
- Classifies the data in the DataFrame based on the given parameters.
403
-
404
- Args:
405
- positive_mean (float): The mean of the positive distribution.
406
- positive_variance (float): The variance of the positive distribution.
407
- negative_mean (float): The mean of the negative distribution.
408
- negative_variance (float): The variance of the negative distribution.
409
- df (pandas.DataFrame): The DataFrame containing the data to be classified.
410
-
411
- Returns:
412
- pandas.DataFrame: The DataFrame with an additional 'score' column containing the classification scores.
413
- """
414
- # alpha and beta for positive distribution
415
- a1 = positive_mean*(positive_mean*(1-positive_mean)/positive_variance - 1)
416
- b1 = a1*(1-positive_mean)/positive_mean
417
- # alpha and beta for negative distribution
418
- a2 = negative_mean*(negative_mean*(1-negative_mean)/negative_variance - 1)
419
- b2 = a2*(1-negative_mean)/negative_mean
420
- df['score'] = df['is_active'].apply(lambda is_active: np.random.beta(a1, b1) if is_active else np.random.beta(a2, b2))
421
- return df
422
-
423
400
  def compute_roc_auc(cell_scores):
424
401
  """
425
402
  Compute the Receiver Operating Characteristic (ROC) Area Under the Curve (AUC) for cell scores.
@@ -1326,25 +1303,6 @@ def generate_floats(start, stop, step):
1326
1303
 
1327
1304
  return floats_list
1328
1305
 
1329
- def remove_columns_with_single_value_v1(df):
1330
- """
1331
- Removes columns from the DataFrame that have the same value in all rows.
1332
-
1333
- Args:
1334
- df (pandas.DataFrame): The original DataFrame.
1335
-
1336
- Returns:
1337
- pandas.DataFrame: A DataFrame with the columns removed that contained only one unique value.
1338
- """
1339
-
1340
- df=df.copy()
1341
-
1342
- for column in df.columns:
1343
- if len(df[column].unique()) == 1:
1344
- df.drop(column, axis=1, inplace=True)
1345
-
1346
- return df
1347
-
1348
1306
  def remove_columns_with_single_value(df):
1349
1307
  """
1350
1308
  Removes columns from the DataFrame that have the same value in all rows.
spacr/timelapse.py CHANGED
@@ -3,7 +3,6 @@ import numpy as np
3
3
  import pandas as pd
4
4
  from collections import defaultdict
5
5
  import matplotlib.pyplot as plt
6
- from matplotlib.animation import FuncAnimation
7
6
  from IPython.display import display
8
7
  from IPython.display import Image as ipyimage
9
8
  import trackpy as tp
spacr/train.py CHANGED
@@ -6,6 +6,7 @@ from torch.autograd import grad
6
6
  from torch.optim.lr_scheduler import StepLR
7
7
  import torch.nn.functional as F
8
8
  from IPython.display import display, clear_output
9
+ import difflib
9
10
 
10
11
  from .logger import log_function_call
11
12
 
@@ -233,17 +234,17 @@ def train_test_model(src, settings, custom_model=False, custom_model_path=None):
233
234
 
234
235
  if settings['test']:
235
236
  test, _, plate_names_test = generate_loaders(src,
236
- train_mode=settings['train_mode'],
237
- mode='test',
238
- image_size=settings['image_size'],
239
- batch_size=settings['batch_size'],
240
- classes=settings['classes'],
241
- num_workers=settings['num_workers'],
242
- validation_split=0.0,
243
- pin_memory=settings['pin_memory'],
244
- normalize=settings['normalize'],
245
- channels=settings['channels'],
246
- verbose=settings['verbose'])
237
+ train_mode=settings['train_mode'],
238
+ mode='test',
239
+ image_size=settings['image_size'],
240
+ batch_size=settings['batch_size'],
241
+ classes=settings['classes'],
242
+ num_workers=settings['num_workers'],
243
+ validation_split=0.0,
244
+ pin_memory=settings['pin_memory'],
245
+ normalize=settings['normalize'],
246
+ channels=settings['channels'],
247
+ verbose=settings['verbose'])
247
248
  if model == None:
248
249
  model_path = pick_best_model(src+'/model')
249
250
  print(f'Best model: {model_path}')
@@ -334,7 +335,7 @@ def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='
334
335
  """
335
336
 
336
337
  from .io import _save_model, _save_progress
337
- from .utils import compute_irm_penalty, calculate_loss, choose_model #evaluate_model_performance,
338
+ from .utils import compute_irm_penalty, calculate_loss, choose_model
338
339
 
339
340
  print(f'Train batches:{len(train_loaders)}, Validation batches:{len(val_loaders)}')
340
341
 
@@ -505,4 +506,162 @@ def train_model(dst, model_type, train_loaders, train_loader_names, train_mode='
505
506
  _save_progress(dst, results_df, train_metrics_df)
506
507
  _save_model(model, model_type, results_df, dst, epoch, epochs, intermedeate_save=[0.99,0.98,0.95,0.94])
507
508
  print(f'Saved model: {dst}')
508
- return
509
+ return
510
+
511
+ def get_submodules(model, prefix=''):
512
+ submodules = []
513
+ for name, module in model.named_children():
514
+ full_name = prefix + ('.' if prefix else '') + name
515
+ submodules.append(full_name)
516
+ submodules.extend(get_submodules(module, full_name))
517
+ return submodules
518
+
519
+ def visualize_model_attention_v2(src, model_type='maxvit', model_path='', image_size=224, channels=[1,2,3], normalize=True, class_names=None, save_saliency=False, save_dir='saliency_maps'):
520
+ import torch
521
+ import os
522
+ from spacr.utils import SaliencyMapGenerator, preprocess_image
523
+ import matplotlib.pyplot as plt
524
+ import numpy as np
525
+ from PIL import Image
526
+
527
+ use_cuda = torch.cuda.is_available()
528
+ device = torch.device("cuda" if use_cuda else "cpu")
529
+
530
+ # Load the entire model object
531
+ model = torch.load(model_path)
532
+ model.to(device)
533
+
534
+ # Create directory for saving saliency maps if it does not exist
535
+ if save_saliency and not os.path.exists(save_dir):
536
+ os.makedirs(save_dir)
537
+
538
+ # Collect all images and their tensors
539
+ images = []
540
+ input_tensors = []
541
+ filenames = []
542
+ for file in os.listdir(src):
543
+ image_path = os.path.join(src, file)
544
+ image, input_tensor = preprocess_image(image_path, normalize=normalize, image_size=image_size, channels=channels)
545
+ images.append(image)
546
+ input_tensors.append(input_tensor)
547
+ filenames.append(file)
548
+
549
+ input_tensors = torch.cat(input_tensors).to(device)
550
+ class_labels = torch.zeros(input_tensors.size(0), dtype=torch.long).to(device) # Replace with actual class labels if available
551
+
552
+ # Generate saliency maps
553
+ cam_generator = SaliencyMapGenerator(model)
554
+ saliency_maps = cam_generator.compute_saliency_maps(input_tensors, class_labels)
555
+
556
+ # Plot images, saliency maps, and overlays
557
+ saliency_maps = saliency_maps.cpu().numpy()
558
+ N = len(images)
559
+
560
+ dst = os.path.join(src, 'saliency_maps')
561
+ os.makedirs(dst, exist_ok=True)
562
+
563
+ for i in range(N):
564
+ fig, axes = plt.subplots(1, 3, figsize=(15, 5))
565
+
566
+ # Original image
567
+ axes[0].imshow(images[i])
568
+ axes[0].axis('off')
569
+ if class_names:
570
+ axes[0].set_title(class_names[class_labels[i].item()])
571
+
572
+ # Saliency map
573
+ axes[1].imshow(saliency_maps[i], cmap=plt.cm.hot)
574
+ axes[1].axis('off')
575
+
576
+ # Overlay
577
+ overlay = np.array(images[i])
578
+ axes[2].imshow(overlay)
579
+ axes[2].imshow(saliency_maps[i], cmap='jet', alpha=0.5)
580
+ axes[2].axis('off')
581
+
582
+ plt.tight_layout()
583
+ plt.show()
584
+
585
+ # Save the saliency map if required
586
+ if save_saliency:
587
+ saliency_image = Image.fromarray((saliency_maps[i] * 255).astype(np.uint8))
588
+ saliency_image.save(os.path.join(dst, f'saliency_{filenames[i]}'))
589
+
590
+ def visualize_model_attention(src, model_type='maxvit', model_path='', image_size=224, channels=[1,2,3], normalize=True, class_names=None, save_saliency=False, save_dir='saliency_maps'):
591
+ import torch
592
+ import os
593
+ from spacr.utils import SaliencyMapGenerator, preprocess_image
594
+ import matplotlib.pyplot as plt
595
+ import numpy as np
596
+ from PIL import Image
597
+
598
+ use_cuda = torch.cuda.is_available()
599
+ device = torch.device("cuda" if use_cuda else "cpu")
600
+
601
+ # Load the entire model object
602
+ model = torch.load(model_path)
603
+ model.to(device)
604
+
605
+ # Create directory for saving saliency maps if it does not exist
606
+ if save_saliency and not os.path.exists(save_dir):
607
+ os.makedirs(save_dir)
608
+
609
+ # Collect all images and their tensors
610
+ images = []
611
+ input_tensors = []
612
+ filenames = []
613
+ for file in os.listdir(src):
614
+ if not file.endswith('.png'):
615
+ continue
616
+ image_path = os.path.join(src, file)
617
+ image, input_tensor = preprocess_image(image_path, normalize=normalize, image_size=image_size, channels=channels)
618
+ images.append(image)
619
+ input_tensors.append(input_tensor)
620
+ filenames.append(file)
621
+
622
+ input_tensors = torch.cat(input_tensors).to(device)
623
+ class_labels = torch.zeros(input_tensors.size(0), dtype=torch.long).to(device) # Replace with actual class labels if available
624
+
625
+ # Generate saliency maps
626
+ cam_generator = SaliencyMapGenerator(model)
627
+ saliency_maps = cam_generator.compute_saliency_maps(input_tensors, class_labels)
628
+
629
+ # Convert saliency maps to numpy arrays
630
+ saliency_maps = saliency_maps.cpu().numpy()
631
+
632
+ N = len(images)
633
+
634
+ dst = os.path.join(src, 'saliency_maps')
635
+
636
+ for i in range(N):
637
+ fig, axes = plt.subplots(1, 3, figsize=(20, 5))
638
+
639
+ # Original image
640
+ axes[0].imshow(images[i])
641
+ axes[0].axis('off')
642
+ if class_names:
643
+ axes[0].set_title(f"Class: {class_names[class_labels[i].item()]}")
644
+
645
+ # Saliency Map
646
+ axes[1].imshow(saliency_maps[i, 0], cmap='hot')
647
+ axes[1].axis('off')
648
+ axes[1].set_title("Saliency Map")
649
+
650
+ # Overlay
651
+ overlay = np.array(images[i])
652
+ overlay = overlay / overlay.max()
653
+ saliency_map_rgb = np.stack([saliency_maps[i, 0]] * 3, axis=-1) # Convert saliency map to RGB
654
+ overlay = (overlay * 0.5 + saliency_map_rgb * 0.5).clip(0, 1)
655
+ axes[2].imshow(overlay)
656
+ axes[2].axis('off')
657
+ axes[2].set_title("Overlay")
658
+
659
+ plt.tight_layout()
660
+ plt.show()
661
+
662
+ # Save the saliency map if required
663
+ if save_saliency:
664
+ os.makedirs(dst, exist_ok=True)
665
+ saliency_image = Image.fromarray((saliency_maps[i, 0] * 255).astype(np.uint8))
666
+ saliency_image.save(os.path.join(dst, f'saliency_{filenames[i]}'))
667
+