microlive 1.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,252 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ from torch.utils.data import DataLoader, Dataset
5
+ from PIL import Image
6
+ import os
7
+ import numpy as np
8
+
9
+ from torchvision import transforms
10
+ from torchvision.transforms import functional as TF
11
+ import random
12
+
13
+ RESHAPE_IMAGE_SIZE = 64
14
+ perform_validation = False
15
+
16
+ def random_rotate_image(image):
17
+ k = random.choice([0, 1, 2, 3]) # Randomly choose 1, 2, or 3
18
+ if k > 0:
19
+ rotated_image = np.rot90(image, k)
20
+ else:
21
+ rotated_image = image
22
+ return rotated_image
23
+
24
+
25
+
26
+ def load_model(model, path):
27
+ # pick target device
28
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
29
+ try:
30
+ # always map to CPU or CUDA
31
+ state_dict = torch.load(path, map_location=device)
32
+ except Exception as e:
33
+ #print("Warning loading model on", device, "– retrying on CPU:", e)
34
+ state_dict = torch.load(path, map_location='cpu')
35
+ model.load_state_dict(state_dict)
36
+ model.to(device)
37
+ model.eval()
38
+ #print(f"Model loaded to {device}.")
39
+
40
+
41
+
42
+ def normalize_crop_return_list(array_crops_YXC,crop_size,selected_color_channel=0, normalize_to_255 = False):
43
+ list_crops = []
44
+ number_crops = array_crops_YXC.shape[0] // crop_size
45
+ for crop_id in range(number_crops):
46
+ crop = array_crops_YXC[crop_id * crop_size:(crop_id + 1) * crop_size, :, selected_color_channel]
47
+ crop = crop - np.percentile(crop, 0.01)
48
+ crop = crop / np.percentile(crop, 99.95)
49
+ if normalize_to_255:
50
+ crop = np.clip(crop, 0, 1)
51
+ crop = (crop * 255).astype(np.uint8)
52
+ else:
53
+ crop = np.clip(crop, 0, 1)
54
+ list_crops.append(crop)
55
+ return list_crops
56
+
57
+ def standardize_spot_return_list(array_crops_YXC, crop_size, selected_color_channel=0):
58
+ list_crops = []
59
+ number_crops = array_crops_YXC.shape[0] // crop_size
60
+ for crop_id in range(number_crops):
61
+ crop = array_crops_YXC[crop_id * crop_size:(crop_id + 1) * crop_size, :, selected_color_channel]
62
+ crop = (crop - np.mean(crop)) / np.std(crop)
63
+ list_crops.append(crop)
64
+ return list_crops
65
+
66
+ def standarize_crop(crop):
67
+ return (crop - np.mean(crop)) / np.std(crop)
68
+
69
+ def normalize_crop(crop):
70
+ crop= ((crop - np.min(crop)) / (np.max(crop) - np.min(crop))) #* 255
71
+ return crop
72
+
73
+ # def predict_crops(model, list_crops,threshold=0.5):
74
+ # model.eval()
75
+ # if torch.cuda.is_available():
76
+ # device = torch.device('cuda')
77
+ # elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
78
+ # device = torch.device('mps')
79
+ # else:
80
+ # device = torch.device('cpu')
81
+ # model.to(device)
82
+ # flag_vector = []
83
+ # ml_prediction_value = []
84
+ # for crop in list_crops:
85
+ # # normalize the original image from 255 to 0-1
86
+ # crop = np.array(Image.fromarray(crop).resize((RESHAPE_IMAGE_SIZE, RESHAPE_IMAGE_SIZE))).astype(np.float32) / 255.0
87
+ # crop_tensor = torch.tensor(crop).unsqueeze(0).unsqueeze(0).to(device) # Move input to the same device as the model
88
+ # with torch.no_grad(): # Disable gradient computation
89
+ # output = model(crop_tensor)
90
+ # # ml threshold
91
+ # ml_prediction_value = torch.sigmoid(output).float().item()
92
+ # prediction = (torch.sigmoid(output) > threshold).float().item() # Convert output to label (0 or 1)
93
+ # flag_vector.append(int(prediction))
94
+ # ml_prediction_value.append(ml_prediction_value)
95
+ # return np.array(flag_vector), np.array(ml_prediction_value)
96
+
97
+ def predict_crops(model, list_crops, threshold=0.5):
98
+ model.eval()
99
+ if torch.cuda.is_available():
100
+ device = torch.device('cuda')
101
+ elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
102
+ device = torch.device('mps')
103
+ else:
104
+ device = torch.device('cpu')
105
+ model.to(device)
106
+
107
+ flag_vector = []
108
+ ml_prediction_values = [] # Changed name to be clearer it's a list
109
+
110
+ for crop in list_crops:
111
+ # normalize the original image from 255 to 0-1
112
+ crop = np.array(Image.fromarray(crop).resize((RESHAPE_IMAGE_SIZE, RESHAPE_IMAGE_SIZE))).astype(np.float32) / 255.0
113
+ crop_tensor = torch.tensor(crop).unsqueeze(0).unsqueeze(0).to(device) # Move input to the same device as the model
114
+
115
+ with torch.no_grad(): # Disable gradient computation
116
+ output = model(crop_tensor)
117
+ # Get sigmoid probability value
118
+ sigmoid_value = torch.sigmoid(output).float().item()
119
+ prediction = (sigmoid_value > threshold) # Use the sigmoid_value directly
120
+
121
+ flag_vector.append(int(prediction))
122
+ ml_prediction_values.append(sigmoid_value) # Append the sigmoid value to the list
123
+
124
+ return np.array(flag_vector), np.array(ml_prediction_values)
125
+
126
+ def save_model(model, path='particle_detection_model.pth'):
127
+ torch.save(model.state_dict(), path)
128
+ print(f"Model state dictionary saved to {path}")
129
+
130
+ class ParticleDetectionCNN(nn.Module):
131
+ def __init__(self):
132
+ super(ParticleDetectionCNN, self).__init__()
133
+ self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1)
134
+ self.conv2 = nn.Conv2d(in_channels=32, out_channels=RESHAPE_IMAGE_SIZE, kernel_size=3, stride=1, padding=1)
135
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
136
+ self.fc1 = nn.Linear(RESHAPE_IMAGE_SIZE * 16 * 16, 128) # This will be updated dynamically
137
+ self.fc2 = nn.Linear(128, 1) # Binary classification (particle or not)
138
+
139
+ def forward(self, x):
140
+ x = self.pool(torch.relu(self.conv1(x)))
141
+ x = self.pool(torch.relu(self.conv2(x)))
142
+ # Dynamically calculate the number of features for the fully connected layer
143
+ x = x.view(x.size(0), -1) # Flatten the tensor, keeping batch size
144
+ x = torch.relu(self.fc1(x))
145
+ x = torch.sigmoid(self.fc2(x)) # Sigmoid activation for binary classification
146
+ return x
147
+
148
+
149
+ class ParticleDataset(Dataset):
150
+ def __init__(self, images_dir, subset='train', use_transform=False):
151
+ self.images_dir = images_dir
152
+ #self.transform = transform
153
+ self.use_transform = use_transform
154
+ images = [os.path.join(images_dir, img) for img in os.listdir(images_dir) if img.endswith('.png')]
155
+ labels = [0 if 'no_particle' in img else 1 for img in images]
156
+ # Shuffling and splitting data
157
+ combined = list(zip(images, labels))
158
+ random.seed(42)
159
+ random.shuffle(combined)
160
+ images, labels = zip(*combined)
161
+ total_images = len(images)
162
+ train_end = int(total_images * 0.8)
163
+ valid_end = int(total_images * 1)
164
+ #print(f"Total images: {total_images}, Train images: {train_end}, Validation images: {valid_end - train_end}, Test images: {total_images - valid_end}")
165
+ if subset == 'train':
166
+ self.images = images[:train_end]
167
+ self.labels = labels[:train_end]
168
+ elif subset == 'valid':
169
+ self.images = images[train_end:valid_end]
170
+ self.labels = labels[train_end:valid_end]
171
+ elif subset == 'test':
172
+ self.images = images[valid_end:]
173
+ self.labels = labels[valid_end:]
174
+
175
+ def __len__(self):
176
+ return len(self.images)
177
+
178
+ def __getitem__(self, idx):
179
+ img_path = self.images[idx]
180
+ image = Image.open(img_path).convert('L')
181
+ label = self.labels[idx]
182
+ image = image.resize((RESHAPE_IMAGE_SIZE, RESHAPE_IMAGE_SIZE))
183
+ # randomly add a rotation to the image of 90, 180, or 270 degrees
184
+ if self.use_transform:
185
+ image = random_rotate_image(image)
186
+ # normalize image to [0, 1]
187
+ image = np.array(image, dtype=np.float32) / 255.0 # Normalize the image to range [0, 1]
188
+ image = torch.tensor(image).unsqueeze(0) # Convert to tensor and add channel dimension
189
+
190
+ return image, label
191
+
192
+
193
+ def validate(model, loader, criterion, device):
194
+ model.eval()
195
+ validation_loss = 0.0
196
+ with torch.no_grad():
197
+ for inputs, labels in loader:
198
+ #inputs, labels = inputs.to(device), labels.to(device)
199
+ inputs = inputs.float().to(device) # Move inputs to the GPU
200
+ labels = labels.unsqueeze(1).float().to(device) # Move labels to the GPU
201
+ outputs = model(inputs)
202
+ loss = criterion(outputs, labels)
203
+ validation_loss += loss.item()
204
+ return validation_loss / len(loader)
205
+
206
+
207
+ def run_network(image_dir='training_crops', num_epochs=10000, learning_rate=0.0000005, batch_size=256, perform_validation=perform_validation):
208
+
209
+ train_dataset = ParticleDataset(image_dir, subset='train', use_transform=True)
210
+ train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
211
+ if perform_validation:
212
+ valid_dataset = ParticleDataset(image_dir, subset='valid', use_transform=False) # No augmentation for validation
213
+ valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
214
+
215
+ #device = torch.device('mps' if torch.backends.mps.is_available() else 'cpu')
216
+ # adapt to windows or mac
217
+ if torch.cuda.is_available():
218
+ device = torch.device('cuda')
219
+ elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
220
+ device = torch.device('mps')
221
+ else:
222
+ device = torch.device('cpu')
223
+ model = ParticleDetectionCNN().to(device)
224
+ criterion = nn.BCELoss()
225
+ optimizer = optim.Adam(model.parameters(), lr=learning_rate)
226
+ training_losses = []
227
+ validation_losses = []
228
+ model.train()
229
+ for epoch in range(num_epochs):
230
+ running_loss = 0.0
231
+ for inputs, labels in train_loader:
232
+ inputs = inputs.float().to(device) # Move inputs to the GPU
233
+ labels = labels.unsqueeze(1).float().to(device) # Move labels to the GPU
234
+ optimizer.zero_grad()
235
+ outputs = model(inputs)
236
+ loss = criterion(outputs, labels)
237
+ loss.backward()
238
+ optimizer.step()
239
+ running_loss += loss.item()
240
+ training_losses.append(running_loss / len(train_loader))
241
+ if perform_validation:
242
+ validation_loss = validate(model, valid_loader, criterion, device)
243
+ else:
244
+ validation_loss = 0
245
+ validation_losses.append(validation_loss)
246
+ if (epoch == 0) or ((epoch ) % batch_size == 0):
247
+ print(f"Epoch {epoch}/{num_epochs} , Training Loss: {running_loss / len(train_loader)}, Validation Loss: {validation_loss}")
248
+
249
+ return model, training_losses, validation_losses
250
+
251
+
252
+
@@ -0,0 +1,17 @@
1
+ """Analysis pipeline modules for MicroLive.
2
+
3
+ These pipelines provide automated batch processing workflows
4
+ for common microscopy analysis tasks.
5
+
6
+ Available pipelines:
7
+ - pipeline_particle_tracking: Full particle tracking workflow
8
+ - pipeline_FRAP: FRAP (Fluorescence Recovery After Photobleaching) analysis
9
+ - pipeline_folding_efficiency: Protein folding efficiency quantification
10
+ - pipeline_spot_detection_no_tracking: Spot detection without linking
11
+ """
12
+
13
+ # Note: Imports are intentionally lazy to avoid circular imports
14
+ # and speed up package loading. Import pipelines directly when needed:
15
+ #
16
+ # from microlive.pipelines.pipeline_particle_tracking import pipeline_particle_tracking
17
+ # from microlive.pipelines.pipeline_FRAP import run_frap_pipeline