spacr 0.3.22__py3-none-any.whl → 0.3.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/app_annotate.py +1 -2
- spacr/deep_spacr.py +131 -227
- spacr/gui.py +1 -0
- spacr/gui_core.py +13 -4
- spacr/gui_elements.py +72 -49
- spacr/gui_utils.py +33 -44
- spacr/io.py +4 -4
- spacr/measure.py +1 -38
- spacr/plot.py +0 -2
- spacr/settings.py +50 -5
- spacr/utils.py +383 -28
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/METADATA +1 -1
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/RECORD +17 -17
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/LICENSE +0 -0
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/WHEEL +0 -0
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/entry_points.txt +0 -0
- {spacr-0.3.22.dist-info → spacr-0.3.31.dist-info}/top_level.txt +0 -0
spacr/utils.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import os, re, sqlite3, torch, torchvision, random, string, shutil, cv2, tarfile, glob, psutil, platform, gzip, subprocess, time, requests, ast
|
1
|
+
import os, re, sqlite3, torch, torchvision, random, string, shutil, cv2, tarfile, glob, psutil, platform, gzip, subprocess, time, requests, ast, traceback
|
2
2
|
|
3
3
|
import numpy as np
|
4
4
|
import pandas as pd
|
@@ -12,6 +12,7 @@ from skimage.transform import resize as resizescikit
|
|
12
12
|
from skimage.morphology import dilation, square
|
13
13
|
from skimage.measure import find_contours
|
14
14
|
from skimage.segmentation import clear_border
|
15
|
+
from scipy.stats import pearsonr
|
15
16
|
|
16
17
|
from collections import defaultdict, OrderedDict
|
17
18
|
from PIL import Image
|
@@ -67,6 +68,192 @@ from huggingface_hub import list_repo_files
|
|
67
68
|
import umap.umap_ as umap
|
68
69
|
#import umap
|
69
70
|
|
71
|
+
def filepaths_to_database(img_paths, settings, source_folder, crop_mode):
|
72
|
+
|
73
|
+
png_df = pd.DataFrame(img_paths, columns=['png_path'])
|
74
|
+
|
75
|
+
png_df['file_name'] = png_df['png_path'].apply(lambda x: os.path.basename(x))
|
76
|
+
|
77
|
+
parts = png_df['file_name'].apply(lambda x: pd.Series(_map_wells_png(x, timelapse=settings['timelapse'])))
|
78
|
+
|
79
|
+
columns = ['plate', 'row', 'col', 'field']
|
80
|
+
|
81
|
+
if settings['timelapse']:
|
82
|
+
columns = columns + ['time_id']
|
83
|
+
|
84
|
+
columns = columns + ['prcfo']
|
85
|
+
|
86
|
+
if crop_mode == 'cell':
|
87
|
+
columns = columns + ['cell_id']
|
88
|
+
|
89
|
+
if crop_mode == 'nucleus':
|
90
|
+
columns = columns + ['nucleus_id']
|
91
|
+
|
92
|
+
if crop_mode == 'pathogen':
|
93
|
+
columns = columns + ['pathogen_id']
|
94
|
+
|
95
|
+
if crop_mode == 'cytoplasm':
|
96
|
+
columns = columns + ['cytoplasm_id']
|
97
|
+
|
98
|
+
png_df[columns] = parts
|
99
|
+
|
100
|
+
try:
|
101
|
+
conn = sqlite3.connect(f'{source_folder}/measurements/measurements.db', timeout=5)
|
102
|
+
png_df.to_sql('png_list', conn, if_exists='append', index=False)
|
103
|
+
conn.commit()
|
104
|
+
except sqlite3.OperationalError as e:
|
105
|
+
print(f"SQLite error: {e}", flush=True)
|
106
|
+
traceback.print_exc()
|
107
|
+
|
108
|
+
def activation_maps_to_database(img_paths, source_folder, settings):
|
109
|
+
from .io import _create_database
|
110
|
+
|
111
|
+
png_df = pd.DataFrame(img_paths, columns=['png_path'])
|
112
|
+
png_df['file_name'] = png_df['png_path'].apply(lambda x: os.path.basename(x))
|
113
|
+
parts = png_df['file_name'].apply(lambda x: pd.Series(_map_wells_png(x, timelapse=False)))
|
114
|
+
columns = ['plate', 'row', 'col', 'field', 'prcfo', 'object']
|
115
|
+
png_df[columns] = parts
|
116
|
+
|
117
|
+
dataset_name = os.path.splitext(os.path.basename(settings['dataset']))[0]
|
118
|
+
database_name = f"{source_folder}/measurements/{dataset_name}.db"
|
119
|
+
|
120
|
+
if not os.path.exists(database_name):
|
121
|
+
_create_database(database_name)
|
122
|
+
|
123
|
+
try:
|
124
|
+
conn = sqlite3.connect(database_name, timeout=5)
|
125
|
+
png_df.to_sql(f"{settings['cam_type']}_list", conn, if_exists='append', index=False)
|
126
|
+
conn.commit()
|
127
|
+
except sqlite3.OperationalError as e:
|
128
|
+
print(f"SQLite error: {e}", flush=True)
|
129
|
+
traceback.print_exc()
|
130
|
+
|
131
|
+
def activation_correlations_to_database(df, img_paths, source_folder, settings):
|
132
|
+
from .io import _create_database
|
133
|
+
|
134
|
+
png_df = pd.DataFrame(img_paths, columns=['png_path'])
|
135
|
+
png_df['file_name'] = png_df['png_path'].apply(lambda x: os.path.basename(x))
|
136
|
+
parts = png_df['file_name'].apply(lambda x: pd.Series(_map_wells_png(x, timelapse=False)))
|
137
|
+
columns = ['plate', 'row', 'col', 'field', 'prcfo', 'object']
|
138
|
+
png_df[columns] = parts
|
139
|
+
|
140
|
+
# Align both DataFrames by file_name
|
141
|
+
png_df.set_index('file_name', inplace=True)
|
142
|
+
df.set_index('file_name', inplace=True)
|
143
|
+
|
144
|
+
merged_df = pd.concat([png_df, df], axis=1)
|
145
|
+
merged_df.reset_index(inplace=True)
|
146
|
+
|
147
|
+
dataset_name = os.path.splitext(os.path.basename(settings['dataset']))[0]
|
148
|
+
database_name = f"{source_folder}/measurements/{dataset_name}.db"
|
149
|
+
|
150
|
+
if not os.path.exists(database_name):
|
151
|
+
_create_database(database_name)
|
152
|
+
|
153
|
+
try:
|
154
|
+
conn = sqlite3.connect(database_name, timeout=5)
|
155
|
+
merged_df.to_sql(f"{settings['cam_type']}_correlations", conn, if_exists='append', index=False)
|
156
|
+
conn.commit()
|
157
|
+
except sqlite3.OperationalError as e:
|
158
|
+
print(f"SQLite error: {e}", flush=True)
|
159
|
+
traceback.print_exc()
|
160
|
+
|
161
|
+
def calculate_activation_correlations(inputs, activation_maps, file_names, manders_thresholds=[15, 50, 75]):
|
162
|
+
"""
|
163
|
+
Calculates Pearson and Manders correlations between input image channels and activation map channels.
|
164
|
+
|
165
|
+
Args:
|
166
|
+
inputs: A batch of input images, Tensor of shape (batch_size, channels, height, width)
|
167
|
+
activation_maps: A batch of activation maps, Tensor of shape (batch_size, channels, height, width)
|
168
|
+
file_names: List of file names corresponding to each image in the batch.
|
169
|
+
manders_thresholds: List of intensity percentiles to calculate Manders correlation.
|
170
|
+
|
171
|
+
Returns:
|
172
|
+
df_correlations: A DataFrame with columns for pairwise correlations (Pearson and Manders)
|
173
|
+
between input channels and activation map channels.
|
174
|
+
"""
|
175
|
+
|
176
|
+
# Ensure tensors are detached and moved to CPU before converting to numpy
|
177
|
+
inputs = inputs.detach().cpu()
|
178
|
+
activation_maps = activation_maps.detach().cpu()
|
179
|
+
|
180
|
+
batch_size, in_channels, height, width = inputs.shape
|
181
|
+
|
182
|
+
if activation_maps.dim() == 3:
|
183
|
+
# If activation maps have no channels, add a dummy channel dimension
|
184
|
+
activation_maps = activation_maps.unsqueeze(1) # Now shape is (batch_size, 1, height, width)
|
185
|
+
|
186
|
+
_, act_channels, act_height, act_width = activation_maps.shape
|
187
|
+
|
188
|
+
# Ensure that the inputs and activation maps are the same size
|
189
|
+
if (height != act_height) or (width != act_width):
|
190
|
+
activation_maps = torch.nn.functional.interpolate(activation_maps, size=(height, width), mode='bilinear')
|
191
|
+
|
192
|
+
# Dictionary to collect correlation results
|
193
|
+
correlations_dict = {'file_name': []}
|
194
|
+
|
195
|
+
# Initialize correlation columns based on input channels and activation map channels
|
196
|
+
for in_c in range(in_channels):
|
197
|
+
for act_c in range(act_channels):
|
198
|
+
correlations_dict[f'channel_{in_c}_activation_{act_c}_pearsons'] = []
|
199
|
+
for threshold in manders_thresholds:
|
200
|
+
correlations_dict[f'channel_{in_c}_activation_{act_c}_{threshold}_M1'] = []
|
201
|
+
correlations_dict[f'channel_{in_c}_activation_{act_c}_{threshold}_M2'] = []
|
202
|
+
|
203
|
+
# Loop over the batch
|
204
|
+
for b in range(batch_size):
|
205
|
+
input_img = inputs[b] # Input image channels (C, H, W)
|
206
|
+
activation_map = activation_maps[b] # Activation map channels (C, H, W)
|
207
|
+
|
208
|
+
# Add the file name to the current row
|
209
|
+
correlations_dict['file_name'].append(file_names[b])
|
210
|
+
|
211
|
+
# Calculate correlations for each channel pair
|
212
|
+
for in_c in range(in_channels):
|
213
|
+
input_channel = input_img[in_c].flatten().numpy() # Flatten the input image channel
|
214
|
+
input_channel = input_channel[np.isfinite(input_channel)] # Remove NaN or inf values
|
215
|
+
|
216
|
+
for act_c in range(act_channels):
|
217
|
+
activation_channel = activation_map[act_c].flatten().numpy() # Flatten the activation map channel
|
218
|
+
activation_channel = activation_channel[np.isfinite(activation_channel)] # Remove NaN or inf values
|
219
|
+
|
220
|
+
# Check if there are valid (non-empty) arrays left to calculate the Pearson correlation
|
221
|
+
if input_channel.size > 0 and activation_channel.size > 0:
|
222
|
+
pearson_corr, _ = pearsonr(input_channel, activation_channel)
|
223
|
+
else:
|
224
|
+
pearson_corr = np.nan # Assign NaN if there are no valid data points
|
225
|
+
correlations_dict[f'channel_{in_c}_activation_{act_c}_pearsons'].append(pearson_corr)
|
226
|
+
|
227
|
+
# Compute Manders correlations for each threshold
|
228
|
+
for threshold in manders_thresholds:
|
229
|
+
# Get the top percentile pixels based on intensity in both channels
|
230
|
+
if input_channel.size > 0 and activation_channel.size > 0:
|
231
|
+
input_threshold = np.percentile(input_channel, threshold)
|
232
|
+
activation_threshold = np.percentile(activation_channel, threshold)
|
233
|
+
|
234
|
+
# Mask the pixels above the threshold
|
235
|
+
mask = (input_channel >= input_threshold) & (activation_channel >= activation_threshold)
|
236
|
+
|
237
|
+
# If we have enough pixels, calculate Manders correlation
|
238
|
+
if np.sum(mask) > 0:
|
239
|
+
manders_corr_M1 = np.sum(input_channel[mask] * activation_channel[mask]) / np.sum(input_channel[mask] ** 2)
|
240
|
+
manders_corr_M2 = np.sum(activation_channel[mask] * input_channel[mask]) / np.sum(activation_channel[mask] ** 2)
|
241
|
+
else:
|
242
|
+
manders_corr_M1 = np.nan
|
243
|
+
manders_corr_M2 = np.nan
|
244
|
+
else:
|
245
|
+
manders_corr_M1 = np.nan
|
246
|
+
manders_corr_M2 = np.nan
|
247
|
+
|
248
|
+
# Store the Manders correlation for this threshold
|
249
|
+
correlations_dict[f'channel_{in_c}_activation_{act_c}_{threshold}_M1'].append(manders_corr_M1)
|
250
|
+
correlations_dict[f'channel_{in_c}_activation_{act_c}_{threshold}_M2'].append(manders_corr_M2)
|
251
|
+
|
252
|
+
# Convert the dictionary to a DataFrame
|
253
|
+
df_correlations = pd.DataFrame(correlations_dict)
|
254
|
+
|
255
|
+
return df_correlations
|
256
|
+
|
70
257
|
def load_settings(csv_file_path, show=False, setting_key='setting_key', setting_value='setting_value'):
|
71
258
|
"""
|
72
259
|
Convert a CSV file with 'settings_key' and 'settings_value' columns into a dictionary.
|
@@ -892,7 +1079,7 @@ def _map_wells_png(file_name, timelapse=False):
|
|
892
1079
|
print(f"Error: {e}")
|
893
1080
|
plate, row, column, field, object_id, prcfo = 'error', 'error', 'error', 'error', 'error', 'error'
|
894
1081
|
if timelapse:
|
895
|
-
return plate, row, column, field, timeid, prcfo, object_id
|
1082
|
+
return plate, row, column, field, timeid, prcfo, object_id
|
896
1083
|
else:
|
897
1084
|
return plate, row, column, field, prcfo, object_id
|
898
1085
|
|
@@ -3097,46 +3284,176 @@ class SaliencyMapGenerator:
|
|
3097
3284
|
saliency = X.grad.abs()
|
3098
3285
|
|
3099
3286
|
return saliency, predictions
|
3100
|
-
|
3101
|
-
def
|
3287
|
+
|
3288
|
+
def plot_activation_grid(self, X, saliency, predictions, overlay=True, normalize=False):
|
3102
3289
|
N = X.shape[0]
|
3103
|
-
rows = (N + 7) // 8
|
3290
|
+
rows = (N + 7) // 8
|
3104
3291
|
fig, axs = plt.subplots(rows, 8, figsize=(16, rows * 2))
|
3105
3292
|
|
3106
3293
|
for i in range(N):
|
3107
3294
|
ax = axs[i // 8, i % 8]
|
3295
|
+
saliency_map = saliency[i].cpu().numpy() # Move to CPU and convert to numpy
|
3108
3296
|
|
3109
|
-
if
|
3110
|
-
saliency_map =
|
3111
|
-
|
3297
|
+
if saliency_map.shape[0] == 3: # Channels first, reshape to (H, W, 3)
|
3298
|
+
saliency_map = np.transpose(saliency_map, (1, 2, 0))
|
3299
|
+
|
3300
|
+
# Normalize image channels to 2nd and 98th percentiles
|
3301
|
+
if overlay:
|
3302
|
+
img_np = X[i].permute(1, 2, 0).detach().cpu().numpy()
|
3303
|
+
if normalize:
|
3304
|
+
img_np = self.percentile_normalize(img_np)
|
3305
|
+
ax.imshow(img_np)
|
3112
3306
|
ax.imshow(saliency_map, cmap='jet', alpha=0.5)
|
3113
3307
|
|
3114
|
-
|
3115
|
-
|
3116
|
-
|
3117
|
-
|
3118
|
-
|
3119
|
-
|
3308
|
+
# Add class label in the top-left corner
|
3309
|
+
ax.text(5, 25, str(predictions[i].item()), fontsize=12, color='white', weight='bold',
|
3310
|
+
bbox=dict(facecolor='black', alpha=0.7, boxstyle='round,pad=0.2'))
|
3311
|
+
ax.axis('off')
|
3312
|
+
|
3313
|
+
plt.tight_layout(pad=0)
|
3314
|
+
return fig
|
3315
|
+
|
3316
|
+
def percentile_normalize(self, img, lower_percentile=2, upper_percentile=98):
|
3317
|
+
"""
|
3318
|
+
Normalize each channel of the image to the given percentiles.
|
3319
|
+
Args:
|
3320
|
+
img: Input image as numpy array with shape (H, W, C)
|
3321
|
+
lower_percentile: Lower percentile for normalization (default 2)
|
3322
|
+
upper_percentile: Upper percentile for normalization (default 98)
|
3323
|
+
Returns:
|
3324
|
+
img: Normalized image
|
3325
|
+
"""
|
3326
|
+
img_normalized = np.zeros_like(img)
|
3120
3327
|
|
3121
|
-
|
3122
|
-
|
3123
|
-
|
3124
|
-
|
3125
|
-
|
3126
|
-
|
3127
|
-
|
3328
|
+
for c in range(img.shape[2]): # Iterate over each channel
|
3329
|
+
low = np.percentile(img[:, :, c], lower_percentile)
|
3330
|
+
high = np.percentile(img[:, :, c], upper_percentile)
|
3331
|
+
img_normalized[:, :, c] = np.clip((img[:, :, c] - low) / (high - low), 0, 1)
|
3332
|
+
|
3333
|
+
return img_normalized
|
3334
|
+
|
3335
|
+
|
3336
|
+
class GradCAMGenerator:
|
3337
|
+
def __init__(self, model, target_layer, cam_type='gradcam'):
|
3338
|
+
self.model = model
|
3339
|
+
self.model.eval()
|
3340
|
+
self.target_layer = target_layer
|
3341
|
+
self.cam_type = cam_type
|
3342
|
+
self.gradients = None
|
3343
|
+
self.activations = None
|
3344
|
+
|
3345
|
+
# Hook the target layer
|
3346
|
+
self.target_layer_module = self.get_layer(self.model, self.target_layer)
|
3347
|
+
self.hook_layers()
|
3348
|
+
|
3349
|
+
def hook_layers(self):
|
3350
|
+
# Forward hook to get activations
|
3351
|
+
def forward_hook(module, input, output):
|
3352
|
+
self.activations = output
|
3353
|
+
|
3354
|
+
# Backward hook to get gradients
|
3355
|
+
def backward_hook(module, grad_input, grad_output):
|
3356
|
+
self.gradients = grad_output[0]
|
3357
|
+
|
3358
|
+
self.target_layer_module.register_forward_hook(forward_hook)
|
3359
|
+
self.target_layer_module.register_backward_hook(backward_hook)
|
3360
|
+
|
3361
|
+
def get_layer(self, model, target_layer):
|
3362
|
+
# Recursively find the layer specified in target_layer
|
3363
|
+
modules = target_layer.split('.')
|
3364
|
+
layer = model
|
3365
|
+
for module in modules:
|
3366
|
+
layer = getattr(layer, module)
|
3367
|
+
return layer
|
3368
|
+
|
3369
|
+
def compute_gradcam_maps(self, X, y):
|
3370
|
+
X.requires_grad_()
|
3128
3371
|
|
3129
|
-
|
3372
|
+
# Forward pass
|
3373
|
+
scores = self.model(X).squeeze()
|
3374
|
+
|
3375
|
+
# Perform backward pass
|
3376
|
+
target_scores = scores * (2 * y - 1)
|
3377
|
+
self.model.zero_grad()
|
3378
|
+
target_scores.backward(torch.ones_like(target_scores))
|
3379
|
+
|
3380
|
+
# Compute GradCAM
|
3381
|
+
pooled_gradients = torch.mean(self.gradients, dim=[0, 2, 3])
|
3382
|
+
for i in range(self.activations.size(1)):
|
3383
|
+
self.activations[:, i, :, :] *= pooled_gradients[i]
|
3384
|
+
|
3385
|
+
gradcam = torch.mean(self.activations, dim=1).squeeze()
|
3386
|
+
gradcam = F.relu(gradcam)
|
3387
|
+
gradcam = F.interpolate(gradcam.unsqueeze(0).unsqueeze(0), size=X.shape[2:], mode='bilinear')
|
3388
|
+
gradcam = gradcam.squeeze().cpu().detach().numpy()
|
3389
|
+
gradcam = (gradcam - gradcam.min()) / (gradcam.max() - gradcam.min())
|
3390
|
+
|
3391
|
+
return gradcam
|
3392
|
+
|
3393
|
+
def compute_gradcam_and_predictions(self, X):
|
3394
|
+
self.model.eval()
|
3395
|
+
X.requires_grad_()
|
3396
|
+
|
3397
|
+
# Forward pass to get predictions (logits)
|
3398
|
+
scores = self.model(X).squeeze()
|
3399
|
+
|
3400
|
+
# Get predicted class (0 or 1 for binary classification)
|
3401
|
+
predictions = (scores > 0).long()
|
3402
|
+
|
3403
|
+
# Compute gradcam maps
|
3404
|
+
gradcam_maps = []
|
3405
|
+
for i in range(X.size(0)):
|
3406
|
+
gradcam_map = self.compute_gradcam_maps(X[i].unsqueeze(0), predictions[i])
|
3407
|
+
gradcam_maps.append(gradcam_map)
|
3408
|
+
|
3409
|
+
return torch.tensor(gradcam_maps), predictions
|
3410
|
+
|
3411
|
+
def plot_activation_grid(self, X, gradcam, predictions, overlay=True, normalize=False):
|
3412
|
+
N = X.shape[0]
|
3413
|
+
rows = (N + 7) // 8
|
3414
|
+
fig, axs = plt.subplots(rows, 8, figsize=(16, rows * 2))
|
3415
|
+
|
3416
|
+
for i in range(N):
|
3417
|
+
ax = axs[i // 8, i % 8]
|
3418
|
+
gradcam_map = gradcam[i].cpu().numpy()
|
3419
|
+
|
3420
|
+
# Normalize image channels to 2nd and 98th percentiles
|
3421
|
+
if overlay:
|
3422
|
+
img_np = X[i].permute(1, 2, 0).detach().cpu().numpy()
|
3423
|
+
if normalize:
|
3424
|
+
img_np = self.percentile_normalize(img_np)
|
3425
|
+
ax.imshow(img_np)
|
3426
|
+
ax.imshow(gradcam_map, cmap='jet', alpha=0.5)
|
3427
|
+
|
3428
|
+
#ax.imshow(X[i].permute(1, 2, 0).detach().cpu().numpy()) # Original image
|
3429
|
+
#ax.imshow(gradcam_map, cmap='jet', alpha=0.5) # Overlay the gradcam map
|
3430
|
+
|
3431
|
+
# Add class label in the top-left corner
|
3130
3432
|
ax.text(5, 25, str(predictions[i].item()), fontsize=12, color='white', weight='bold',
|
3131
3433
|
bbox=dict(facecolor='black', alpha=0.7, boxstyle='round,pad=0.2'))
|
3132
3434
|
ax.axis('off')
|
3133
3435
|
|
3134
|
-
# Turn off unused axes
|
3135
|
-
for j in range(N, rows * 8):
|
3136
|
-
fig.delaxes(axs[j // 8, j % 8])
|
3137
|
-
|
3138
3436
|
plt.tight_layout(pad=0)
|
3139
|
-
|
3437
|
+
return fig
|
3438
|
+
|
3439
|
+
def percentile_normalize(self, img, lower_percentile=2, upper_percentile=98):
|
3440
|
+
"""
|
3441
|
+
Normalize each channel of the image to the given percentiles.
|
3442
|
+
Args:
|
3443
|
+
img: Input image as numpy array with shape (H, W, C)
|
3444
|
+
lower_percentile: Lower percentile for normalization (default 2)
|
3445
|
+
upper_percentile: Upper percentile for normalization (default 98)
|
3446
|
+
Returns:
|
3447
|
+
img: Normalized image
|
3448
|
+
"""
|
3449
|
+
img_normalized = np.zeros_like(img)
|
3450
|
+
|
3451
|
+
for c in range(img.shape[2]): # Iterate over each channel
|
3452
|
+
low = np.percentile(img[:, :, c], lower_percentile)
|
3453
|
+
high = np.percentile(img[:, :, c], upper_percentile)
|
3454
|
+
img_normalized[:, :, c] = np.clip((img[:, :, c] - low) / (high - low), 0, 1)
|
3455
|
+
|
3456
|
+
return img_normalized
|
3140
3457
|
|
3141
3458
|
def preprocess_image(image_path, normalize=True, image_size=224, channels=[1,2,3]):
|
3142
3459
|
preprocess = transforms.Compose([
|
@@ -3677,8 +3994,37 @@ def plot_grid(cluster_images, colors, figuresize, black_background, verbose):
|
|
3677
3994
|
plt.show()
|
3678
3995
|
return grid_fig
|
3679
3996
|
|
3680
|
-
def
|
3997
|
+
def generate_path_list_from_db_v1(db_path, file_metadata):
|
3998
|
+
|
3999
|
+
all_paths = []
|
4000
|
+
|
4001
|
+
# Connect to the database and retrieve the image paths
|
4002
|
+
print(f"Reading DataBase: {db_path}")
|
4003
|
+
try:
|
4004
|
+
with sqlite3.connect(db_path) as conn:
|
4005
|
+
cursor = conn.cursor()
|
4006
|
+
if file_metadata:
|
4007
|
+
if isinstance(file_metadata, str):
|
4008
|
+
cursor.execute("SELECT png_path FROM png_list WHERE png_path LIKE ?", (f"%{file_metadata}%",))
|
4009
|
+
else:
|
4010
|
+
cursor.execute("SELECT png_path FROM png_list")
|
3681
4011
|
|
4012
|
+
while True:
|
4013
|
+
rows = cursor.fetchmany(1000)
|
4014
|
+
if not rows:
|
4015
|
+
break
|
4016
|
+
all_paths.extend([row[0] for row in rows])
|
4017
|
+
|
4018
|
+
except sqlite3.Error as e:
|
4019
|
+
print(f"Database error: {e}")
|
4020
|
+
return
|
4021
|
+
except Exception as e:
|
4022
|
+
print(f"Error: {e}")
|
4023
|
+
return
|
4024
|
+
|
4025
|
+
return all_paths
|
4026
|
+
|
4027
|
+
def generate_path_list_from_db(db_path, file_metadata):
|
3682
4028
|
all_paths = []
|
3683
4029
|
|
3684
4030
|
# Connect to the database and retrieve the image paths
|
@@ -3686,10 +4032,19 @@ def generate_path_list_from_db(db_path, file_metadata):
|
|
3686
4032
|
try:
|
3687
4033
|
with sqlite3.connect(db_path) as conn:
|
3688
4034
|
cursor = conn.cursor()
|
4035
|
+
|
3689
4036
|
if file_metadata:
|
3690
4037
|
if isinstance(file_metadata, str):
|
4038
|
+
# If file_metadata is a single string
|
3691
4039
|
cursor.execute("SELECT png_path FROM png_list WHERE png_path LIKE ?", (f"%{file_metadata}%",))
|
4040
|
+
elif isinstance(file_metadata, list):
|
4041
|
+
# If file_metadata is a list of strings
|
4042
|
+
query = "SELECT png_path FROM png_list WHERE " + " OR ".join(
|
4043
|
+
["png_path LIKE ?" for _ in file_metadata])
|
4044
|
+
params = [f"%{meta}%" for meta in file_metadata]
|
4045
|
+
cursor.execute(query, params)
|
3692
4046
|
else:
|
4047
|
+
# If file_metadata is None or empty
|
3693
4048
|
cursor.execute("SELECT png_path FROM png_list")
|
3694
4049
|
|
3695
4050
|
while True:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
spacr/__init__.py,sha256=3TNo4PgxHZTHOhyPc8AORvG3tzdPFEc30KAtsOou174,1618
|
2
2
|
spacr/__main__.py,sha256=bkAJJD2kjIqOP-u1kLvct9jQQCeUXzlEjdgitwi1Lm8,75
|
3
|
-
spacr/app_annotate.py,sha256=
|
3
|
+
spacr/app_annotate.py,sha256=XxGpapg71099aVjb5ZEqgpKTBc_1OIb6_y8yzWfqrsM,2252
|
4
4
|
spacr/app_classify.py,sha256=urTP_wlZ58hSyM5a19slYlBxN0PdC-9-ga0hvq8CGWc,165
|
5
5
|
spacr/app_make_masks.py,sha256=pqDhRpluiHZz-kPX2Zh_KbYe4TsU43qYBa_7f-rsjpw,1694
|
6
6
|
spacr/app_mask.py,sha256=l-dBY8ftzCMdDe6-pXc2Nh_u-idNL9G7UOARiLJBtds,153
|
@@ -9,25 +9,25 @@ spacr/app_sequencing.py,sha256=DjG26jy4cpddnV8WOOAIiExtOe9MleVMY4MFa5uTo5w,157
|
|
9
9
|
spacr/app_umap.py,sha256=ZWAmf_OsIKbYvolYuWPMYhdlVe-n2CADoJulAizMiEo,153
|
10
10
|
spacr/cellpose.py,sha256=zv4BzhaP2O-mtQ-pUfYvpOyxgn1ke_bDWgdHD5UWm9I,13942
|
11
11
|
spacr/core.py,sha256=G_x-w7FRIHNfSOoPaIZPSf_A7mVj7PA7o9HQZ4nIu5o,48231
|
12
|
-
spacr/deep_spacr.py,sha256=
|
13
|
-
spacr/gui.py,sha256=
|
14
|
-
spacr/gui_core.py,sha256=
|
15
|
-
spacr/gui_elements.py,sha256=
|
16
|
-
spacr/gui_utils.py,sha256=
|
17
|
-
spacr/io.py,sha256=
|
12
|
+
spacr/deep_spacr.py,sha256=HdOcNU8cHcE_19nP7_5uTz-ih3E169ffr2Hm--NvMvA,43255
|
13
|
+
spacr/gui.py,sha256=ARyn9Q_g8HoP-cXh1nzMLVFCKqthY4v2u9yORyaQqQE,8230
|
14
|
+
spacr/gui_core.py,sha256=LV_HX5zreu3Bye6sQFDbOuk8Dfj4StMoohy6hsrDEXA,41363
|
15
|
+
spacr/gui_elements.py,sha256=puDqf7PJJ_UMA01fjqODk-zsfSmvzVXpvaZ1BYV988w,136554
|
16
|
+
spacr/gui_utils.py,sha256=TFY3zNyTk-FkJ0mjSrWsE2DpHWBbGuQoi1rh2AkXQyQ,45007
|
17
|
+
spacr/io.py,sha256=AARmqn1fMmTgVDwWy8bEYK6SjH-6DZIulgCSPdBTyf0,143370
|
18
18
|
spacr/logger.py,sha256=lJhTqt-_wfAunCPl93xE65Wr9Y1oIHJWaZMjunHUeIw,1538
|
19
|
-
spacr/measure.py,sha256=
|
19
|
+
spacr/measure.py,sha256=BThn_sALgKrwGKnLOGpT4FyoJeRVoTZoP9SXbCtCMRw,54857
|
20
20
|
spacr/mediar.py,sha256=FwLvbLQW5LQzPgvJZG8Lw7GniA2vbZx6Jv6vIKu7I5c,14743
|
21
21
|
spacr/ml.py,sha256=3XiQUfhhseCz9cZXhaVkCCv_qfqoZCdXGnO_p3ulwo4,47131
|
22
22
|
spacr/openai.py,sha256=5vBZ3Jl2llYcW3oaTEXgdyCB2aJujMUIO5K038z7w_A,1246
|
23
|
-
spacr/plot.py,sha256=
|
23
|
+
spacr/plot.py,sha256=Lv-QFD_NwP9pdsUIiJ--XHJN-jQBkFz_AI9y4i36jEA,105506
|
24
24
|
spacr/sequencing.py,sha256=t18mgpK6rhWuB1LtFOsPxqgpFXxuUmrD06ecsaVQ0Gw,19655
|
25
|
-
spacr/settings.py,sha256=
|
25
|
+
spacr/settings.py,sha256=uTTR6pmwBHbZ_uLLWE4cXplGK7q6K_OmZnsXH-HAFW0,75828
|
26
26
|
spacr/sim.py,sha256=1xKhXimNU3ukzIw-3l9cF3Znc_brW8h20yv8fSTzvss,71173
|
27
27
|
spacr/submodules.py,sha256=AB7s6-cULsaqz-haAaCtXfGEIi8uPZGT4xoCslUJC3Y,18391
|
28
28
|
spacr/timelapse.py,sha256=FSYpUtAVy6xc3lwprRYgyDTT9ysUhfRQ4zrP9_h2mvg,39465
|
29
29
|
spacr/toxo.py,sha256=us3pQyULtMTyfTq0MWPn4QJTTmQ6BwAJKChNf75jo3I,10082
|
30
|
-
spacr/utils.py,sha256=
|
30
|
+
spacr/utils.py,sha256=w4Cht32Mhep7jfXKm5CSpyFLB3lOxiBCQI6PnaYcI3Q,213360
|
31
31
|
spacr/version.py,sha256=axH5tnGwtgSnJHb5IDhiu4Zjk5GhLyAEDRe-rnaoFOA,409
|
32
32
|
spacr/resources/MEDIAR/.gitignore,sha256=Ff1q9Nme14JUd-4Q3jZ65aeQ5X4uttptssVDgBVHYo8,152
|
33
33
|
spacr/resources/MEDIAR/LICENSE,sha256=yEj_TRDLUfDpHDNM0StALXIt6mLqSgaV2hcCwa6_TcY,1065
|
@@ -150,9 +150,9 @@ spacr/resources/icons/umap.png,sha256=dOLF3DeLYy9k0nkUybiZMe1wzHQwLJFRmgccppw-8b
|
|
150
150
|
spacr/resources/images/plate1_E01_T0001F001L01A01Z01C02.tif,sha256=Tl0ZUfZ_AYAbu0up_nO0tPRtF1BxXhWQ3T3pURBCCRo,7958528
|
151
151
|
spacr/resources/images/plate1_E01_T0001F001L01A02Z01C01.tif,sha256=m8N-V71rA1TT4dFlENNg8s0Q0YEXXs8slIn7yObmZJQ,7958528
|
152
152
|
spacr/resources/images/plate1_E01_T0001F001L01A03Z01C03.tif,sha256=Pbhk7xn-KUP6RSIhJsxQcrHFImBm3GEpLkzx7WOc-5M,7958528
|
153
|
-
spacr-0.3.
|
154
|
-
spacr-0.3.
|
155
|
-
spacr-0.3.
|
156
|
-
spacr-0.3.
|
157
|
-
spacr-0.3.
|
158
|
-
spacr-0.3.
|
153
|
+
spacr-0.3.31.dist-info/LICENSE,sha256=SR-2MeGc6SCM1UORJYyarSWY_A-JaOMFDj7ReSs9tRM,1083
|
154
|
+
spacr-0.3.31.dist-info/METADATA,sha256=-U4SqumPkRAW6fWg7hsKsgq7tTVPvwtH8um0ZEXca1c,5949
|
155
|
+
spacr-0.3.31.dist-info/WHEEL,sha256=HiCZjzuy6Dw0hdX5R3LCFPDmFS4BWl8H-8W39XfmgX4,91
|
156
|
+
spacr-0.3.31.dist-info/entry_points.txt,sha256=BMC0ql9aNNpv8lUZ8sgDLQMsqaVnX5L535gEhKUP5ho,296
|
157
|
+
spacr-0.3.31.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
|
158
|
+
spacr-0.3.31.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|