Semapp 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- semapp/Layout/__init__.py +26 -0
- semapp/Layout/create_button.py +1248 -0
- semapp/Layout/main_window_att.py +54 -0
- semapp/Layout/settings.py +170 -0
- semapp/Layout/styles.py +152 -0
- semapp/Layout/toast.py +157 -0
- semapp/Plot/__init__.py +8 -0
- semapp/Plot/frame_attributes.py +690 -0
- semapp/Plot/overview_window.py +355 -0
- semapp/Plot/styles.py +55 -0
- semapp/Plot/utils.py +295 -0
- semapp/Processing/__init__.py +4 -0
- semapp/Processing/detection.py +513 -0
- semapp/Processing/klarf_reader.py +461 -0
- semapp/Processing/processing.py +686 -0
- semapp/Processing/rename_tif.py +498 -0
- semapp/Processing/split_tif.py +323 -0
- semapp/Processing/threshold.py +777 -0
- semapp/__init__.py +10 -0
- semapp/asset/icon.png +0 -0
- semapp/main.py +103 -0
- semapp-1.0.5.dist-info/METADATA +300 -0
- semapp-1.0.5.dist-info/RECORD +27 -0
- semapp-1.0.5.dist-info/WHEEL +5 -0
- semapp-1.0.5.dist-info/entry_points.txt +2 -0
- semapp-1.0.5.dist-info/licenses/LICENSE +674 -0
- semapp-1.0.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,777 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SEM Image Thresholding and Analysis Module
|
|
3
|
+
|
|
4
|
+
This module provides functionality for batch processing of SEM images,
|
|
5
|
+
including thresholding, particle detection, image fusion, and data visualization.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import cv2
|
|
10
|
+
import numpy as np
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
import matplotlib.pyplot as plt
|
|
13
|
+
import csv
|
|
14
|
+
import pandas as pd
|
|
15
|
+
from scipy.interpolate import griddata
|
|
16
|
+
from typing import Optional, List, Dict, Tuple, Union
|
|
17
|
+
from PIL import Image
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class SEMThresholdProcessor:
|
|
21
|
+
"""
|
|
22
|
+
A comprehensive processor for SEM image thresholding and analysis.
|
|
23
|
+
|
|
24
|
+
This class provides methods for:
|
|
25
|
+
- Single image thresholding and particle detection
|
|
26
|
+
- Batch processing of image directories
|
|
27
|
+
- Image fusion from multiple detectors
|
|
28
|
+
- CSV data consolidation and visualization
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self,
|
|
32
|
+
threshold: int = 128,
|
|
33
|
+
min_size: int = 50,
|
|
34
|
+
image_size_um: float = 5.0,
|
|
35
|
+
save_results: bool = True,
|
|
36
|
+
verbose: bool = False):
|
|
37
|
+
"""
|
|
38
|
+
Initialize the SEM threshold processor.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
threshold: Threshold value for binarization (0-255)
|
|
42
|
+
min_size: Minimum particle size in pixels²
|
|
43
|
+
image_size_um: Image size in micrometers (for area calculations)
|
|
44
|
+
save_results: Whether to save CSV results
|
|
45
|
+
verbose: Whether to print progress information
|
|
46
|
+
"""
|
|
47
|
+
self.threshold = threshold
|
|
48
|
+
self.min_size = min_size
|
|
49
|
+
self.image_size_um = image_size_um
|
|
50
|
+
self.save_results = save_results
|
|
51
|
+
self.verbose = verbose
|
|
52
|
+
|
|
53
|
+
# Validate parameters
|
|
54
|
+
if not 0 <= threshold <= 255:
|
|
55
|
+
raise ValueError("Threshold must be between 0 and 255")
|
|
56
|
+
if min_size < 0:
|
|
57
|
+
raise ValueError("Minimum size must be non-negative")
|
|
58
|
+
if image_size_um <= 0:
|
|
59
|
+
raise ValueError("Image size must be positive")
|
|
60
|
+
|
|
61
|
+
def _log(self, message: str) -> None:
|
|
62
|
+
"""Print message if verbose mode is enabled."""
|
|
63
|
+
if self.verbose:
|
|
64
|
+
print(message)
|
|
65
|
+
|
|
66
|
+
def process_single_image(self,
|
|
67
|
+
image_path: str,
|
|
68
|
+
show_result: bool = False) -> Optional[Dict]:
|
|
69
|
+
"""
|
|
70
|
+
Apply thresholding to a single SEM image and detect particles.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
image_path: Path to the image file
|
|
74
|
+
show_result: Whether to display the result
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Dictionary containing processing results or None if error
|
|
78
|
+
"""
|
|
79
|
+
# Validate threshold
|
|
80
|
+
if not 0 <= self.threshold <= 255:
|
|
81
|
+
self._log("Error: Threshold must be between 0 and 255")
|
|
82
|
+
return None
|
|
83
|
+
|
|
84
|
+
# Load image
|
|
85
|
+
try:
|
|
86
|
+
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
|
87
|
+
if image is None:
|
|
88
|
+
self._log(f"Error: Could not load image {image_path}")
|
|
89
|
+
return None
|
|
90
|
+
except Exception as e:
|
|
91
|
+
self._log(f"Error loading image: {e}")
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
# Apply smoothing
|
|
95
|
+
kernel_size = 1
|
|
96
|
+
image_smooth = cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
|
|
97
|
+
|
|
98
|
+
# Apply thresholding
|
|
99
|
+
_, binary_image = cv2.threshold(image_smooth, self.threshold, 255, cv2.THRESH_BINARY)
|
|
100
|
+
|
|
101
|
+
# Find contours
|
|
102
|
+
contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
103
|
+
|
|
104
|
+
# Filter contours by size
|
|
105
|
+
detected_particles = []
|
|
106
|
+
for contour in contours:
|
|
107
|
+
area = cv2.contourArea(contour)
|
|
108
|
+
if area > self.min_size:
|
|
109
|
+
detected_particles.append({
|
|
110
|
+
'area': area,
|
|
111
|
+
'contour': contour
|
|
112
|
+
})
|
|
113
|
+
|
|
114
|
+
# Create visualization
|
|
115
|
+
colored_image = binary_image.copy()
|
|
116
|
+
original_with_contours = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
|
117
|
+
|
|
118
|
+
# Fill detected areas
|
|
119
|
+
for particle in detected_particles:
|
|
120
|
+
cv2.fillPoly(original_with_contours, [particle['contour']], (0, 255, 0))
|
|
121
|
+
|
|
122
|
+
# Display results if requested
|
|
123
|
+
if show_result:
|
|
124
|
+
self._display_results(image, image_smooth, binary_image,
|
|
125
|
+
colored_image, original_with_contours, detected_particles)
|
|
126
|
+
|
|
127
|
+
# # Save contour image
|
|
128
|
+
# base_name = os.path.splitext(os.path.basename(image_path))[0]
|
|
129
|
+
# contour_filename = f"{base_name}_contours.tiff"
|
|
130
|
+
# contour_path = os.path.join(os.path.dirname(image_path), contour_filename)
|
|
131
|
+
# cv2.imwrite(contour_path, original_with_contours)
|
|
132
|
+
|
|
133
|
+
# Save CSV results if requested
|
|
134
|
+
if self.save_results:
|
|
135
|
+
self._save_csv_results(image_path, detected_particles, binary_image)
|
|
136
|
+
|
|
137
|
+
return {
|
|
138
|
+
'binary_image': binary_image,
|
|
139
|
+
'colored_image': colored_image,
|
|
140
|
+
'detected_particles': detected_particles,
|
|
141
|
+
'contour_image': original_with_contours,
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
def _display_results(self, original, smooth, binary, colored, contours, particles):
|
|
145
|
+
"""Display processing results in a matplotlib figure."""
|
|
146
|
+
fig, axes = plt.subplots(2, 4, figsize=(24, 10))
|
|
147
|
+
|
|
148
|
+
# Original image
|
|
149
|
+
axes[0, 0].imshow(original, cmap='gray')
|
|
150
|
+
axes[0, 0].set_title(f'Original Image\nSize: {original.shape}')
|
|
151
|
+
axes[0, 0].axis('off')
|
|
152
|
+
|
|
153
|
+
# Smoothed image
|
|
154
|
+
axes[0, 1].imshow(smooth, cmap='gray')
|
|
155
|
+
axes[0, 1].set_title(f'Smoothed Image\n(Gaussian {1}x{1})')
|
|
156
|
+
axes[0, 1].axis('off')
|
|
157
|
+
|
|
158
|
+
# Binary image
|
|
159
|
+
axes[0, 2].imshow(binary, cmap='gray')
|
|
160
|
+
axes[0, 2].set_title(f'Binary Image (threshold={self.threshold})')
|
|
161
|
+
axes[0, 2].axis('off')
|
|
162
|
+
|
|
163
|
+
# Mask
|
|
164
|
+
axes[0, 3].imshow(colored, cmap='gray')
|
|
165
|
+
axes[0, 3].set_title(f'Mask: pixels > threshold\n({len(particles)} particles)')
|
|
166
|
+
axes[0, 3].axis('off')
|
|
167
|
+
|
|
168
|
+
# Binary with contours
|
|
169
|
+
verification_image = cv2.cvtColor(binary, cv2.COLOR_GRAY2RGB)
|
|
170
|
+
for particle in particles:
|
|
171
|
+
cv2.drawContours(verification_image, [particle['contour']], -1, (0, 255, 0), 3)
|
|
172
|
+
axes[1, 0].imshow(verification_image)
|
|
173
|
+
axes[1, 0].set_title('Verification: white areas + green contours')
|
|
174
|
+
axes[1, 0].axis('off')
|
|
175
|
+
|
|
176
|
+
# Original with filled areas
|
|
177
|
+
axes[1, 1].imshow(contours)
|
|
178
|
+
axes[1, 1].set_title('Filled areas on original image')
|
|
179
|
+
axes[1, 1].axis('off')
|
|
180
|
+
|
|
181
|
+
# Histogram comparison
|
|
182
|
+
axes[1, 2].hist(original.ravel(), bins=256, range=[0, 256], alpha=0.5,
|
|
183
|
+
color='blue', label='Original')
|
|
184
|
+
axes[1, 2].set_title('Intensity Histogram')
|
|
185
|
+
axes[1, 2].set_xlabel('Intensity')
|
|
186
|
+
axes[1, 2].set_ylabel('Frequency')
|
|
187
|
+
axes[1, 2].legend()
|
|
188
|
+
|
|
189
|
+
# Statistics
|
|
190
|
+
axes[1, 3].text(0.1, 0.9, f'Particles detected: {len(particles)}',
|
|
191
|
+
transform=axes[1, 3].transAxes, fontsize=12)
|
|
192
|
+
axes[1, 3].text(0.1, 0.8, f'Threshold: {self.threshold}',
|
|
193
|
+
transform=axes[1, 3].transAxes, fontsize=12)
|
|
194
|
+
axes[1, 3].text(0.1, 0.7, f'Min size: {self.min_size} px²',
|
|
195
|
+
transform=axes[1, 3].transAxes, fontsize=12)
|
|
196
|
+
axes[1, 3].axis('off')
|
|
197
|
+
|
|
198
|
+
plt.tight_layout()
|
|
199
|
+
plt.show()
|
|
200
|
+
|
|
201
|
+
def _save_csv_results(self, image_path: str, particles: List[Dict], binary_image: np.ndarray) -> None:
|
|
202
|
+
"""Save processing results to CSV file."""
|
|
203
|
+
try:
|
|
204
|
+
# Create mask for detected particles
|
|
205
|
+
mask_particles = np.zeros_like(binary_image, dtype=np.uint8)
|
|
206
|
+
for particle in particles:
|
|
207
|
+
cv2.fillPoly(mask_particles, [particle['contour']], 200)
|
|
208
|
+
|
|
209
|
+
# Calculate statistics
|
|
210
|
+
num_particles = len(particles)
|
|
211
|
+
total_area_pixels = int(np.count_nonzero(mask_particles == 200))
|
|
212
|
+
avg_area_pixels = total_area_pixels / num_particles if num_particles > 0 else 0
|
|
213
|
+
|
|
214
|
+
# Calculate areas in µm²
|
|
215
|
+
height, width = binary_image.shape
|
|
216
|
+
pixel_area_um2 = (self.image_size_um ** 2) / (height * width)
|
|
217
|
+
|
|
218
|
+
total_area_um2 = total_area_pixels * pixel_area_um2
|
|
219
|
+
avg_area_um2 = avg_area_pixels * pixel_area_um2
|
|
220
|
+
total_area_percentage = (total_area_pixels / (height * width)) * 100
|
|
221
|
+
|
|
222
|
+
# Calculate density (particles/µm²)
|
|
223
|
+
surface_area_um2 = self.image_size_um ** 2
|
|
224
|
+
density = num_particles / surface_area_um2 if surface_area_um2 > 0 else 0
|
|
225
|
+
|
|
226
|
+
# Create CSV filename
|
|
227
|
+
base_name = os.path.splitext(os.path.basename(image_path))[0]
|
|
228
|
+
csv_filename = f"{base_name}_results.csv"
|
|
229
|
+
csv_path = os.path.join(os.path.dirname(image_path), csv_filename)
|
|
230
|
+
|
|
231
|
+
# Write CSV
|
|
232
|
+
with open(csv_path, 'w', newline='', encoding='utf-8') as csvfile:
|
|
233
|
+
fieldnames = ['Filename', 'Density', 'Avg_area_um2',
|
|
234
|
+
'Total_area_percentage', 'Num_particles', 'Total_area_um2']
|
|
235
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
236
|
+
|
|
237
|
+
writer.writeheader()
|
|
238
|
+
writer.writerow({
|
|
239
|
+
'Filename': os.path.basename(image_path),
|
|
240
|
+
'Density': density,
|
|
241
|
+
'Avg_area_um2': avg_area_um2,
|
|
242
|
+
'Total_area_percentage': total_area_percentage,
|
|
243
|
+
'Num_particles': num_particles,
|
|
244
|
+
'Total_area_um2': total_area_um2
|
|
245
|
+
})
|
|
246
|
+
|
|
247
|
+
self._log(f"Results saved to: {csv_filename}")
|
|
248
|
+
|
|
249
|
+
except Exception as e:
|
|
250
|
+
self._log(f"Error saving CSV: {e}")
|
|
251
|
+
|
|
252
|
+
def process_merged_tiff_directory(self,
|
|
253
|
+
parent_directory: str,
|
|
254
|
+
coordinates_df: pd.DataFrame,
|
|
255
|
+
image_type: int,
|
|
256
|
+
number_type: int,
|
|
257
|
+
scale: str,
|
|
258
|
+
show_results: bool = False) -> List[str]:
|
|
259
|
+
"""
|
|
260
|
+
Process specific images in a merged TIFF file based on coordinates and settings.
|
|
261
|
+
|
|
262
|
+
This function finds the merged TIFF file, opens it, and processes only
|
|
263
|
+
the pages that correspond to the coordinates and image type settings,
|
|
264
|
+
using the same logic as on_click functionality.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
parent_directory: Path to directory containing the merged TIFF
|
|
268
|
+
coordinates_df: DataFrame with X, Y coordinates (same as used in on_click)
|
|
269
|
+
image_type: Image type index (same as used in on_click)
|
|
270
|
+
number_type: Number of image types (same as used in on_click)
|
|
271
|
+
show_results: Whether to display results for each image
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
List of processed filenames
|
|
275
|
+
"""
|
|
276
|
+
all_results = []
|
|
277
|
+
|
|
278
|
+
self._log(f"Scanning directory for merged TIFF: {parent_directory}")
|
|
279
|
+
|
|
280
|
+
# Find merged TIFF file
|
|
281
|
+
merged_tiff_files = []
|
|
282
|
+
for root, dirs, files in os.walk(parent_directory):
|
|
283
|
+
# Look for TIFF files that might be merged (not starting with specific patterns)
|
|
284
|
+
tiff_files = [f for f in files if f.lower().endswith(('.tiff', '.tif'))]
|
|
285
|
+
|
|
286
|
+
for file in tiff_files:
|
|
287
|
+
|
|
288
|
+
# Check if it's a potential merged file
|
|
289
|
+
file_path = os.path.join(root, file)
|
|
290
|
+
try:
|
|
291
|
+
# Try to open and check if it has multiple pages
|
|
292
|
+
with Image.open(file_path) as img:
|
|
293
|
+
if hasattr(img, 'n_frames') and img.n_frames > 1:
|
|
294
|
+
merged_tiff_files.append(file_path)
|
|
295
|
+
self._log(f"Found merged TIFF with {img.n_frames} pages: {file}")
|
|
296
|
+
except Exception as e:
|
|
297
|
+
self._log(f"Could not check file {file}: {e}")
|
|
298
|
+
continue
|
|
299
|
+
|
|
300
|
+
if not merged_tiff_files:
|
|
301
|
+
self._log("No merged TIFF files found in directory.")
|
|
302
|
+
return all_results
|
|
303
|
+
|
|
304
|
+
# Process each merged TIFF file
|
|
305
|
+
for merged_file_path in merged_tiff_files:
|
|
306
|
+
self._log(f"\n{'='*60}")
|
|
307
|
+
self._log(f"PROCESSING MERGED TIFF: {merged_file_path}")
|
|
308
|
+
self._log(f"{'='*60}")
|
|
309
|
+
|
|
310
|
+
try:
|
|
311
|
+
with Image.open(merged_file_path) as img:
|
|
312
|
+
total_pages = img.n_frames if hasattr(img, 'n_frames') else 1
|
|
313
|
+
self._log(f"Total pages in merged TIFF: {total_pages}")
|
|
314
|
+
|
|
315
|
+
directory_results = []
|
|
316
|
+
|
|
317
|
+
# Calculate which pages to process based on coordinates and settings
|
|
318
|
+
# Same logic as in on_click: result = self.image_type + (closest_idx * self_number_type)
|
|
319
|
+
pages_to_process = []
|
|
320
|
+
|
|
321
|
+
for idx in range(len(coordinates_df)):
|
|
322
|
+
# Calculate the page index using the same formula as on_click
|
|
323
|
+
page_index = image_type + (idx * number_type)
|
|
324
|
+
|
|
325
|
+
# Check if this page index is valid for the TIFF
|
|
326
|
+
if 0 <= page_index < total_pages:
|
|
327
|
+
pages_to_process.append({
|
|
328
|
+
'page_index': page_index,
|
|
329
|
+
'coordinate_idx': idx,
|
|
330
|
+
'x': coordinates_df.iloc[idx]['X'],
|
|
331
|
+
'y': coordinates_df.iloc[idx]['Y']
|
|
332
|
+
})
|
|
333
|
+
|
|
334
|
+
self._log(f"Will process {len(pages_to_process)} pages out of {total_pages} total pages")
|
|
335
|
+
|
|
336
|
+
# Process only the calculated pages
|
|
337
|
+
for page_info in pages_to_process:
|
|
338
|
+
page_index = page_info['page_index']
|
|
339
|
+
coord_idx = page_info['coordinate_idx']
|
|
340
|
+
x_coord = page_info['x']
|
|
341
|
+
y_coord = page_info['y']
|
|
342
|
+
|
|
343
|
+
try:
|
|
344
|
+
# Seek to the specific page
|
|
345
|
+
img.seek(page_index)
|
|
346
|
+
|
|
347
|
+
# Convert PIL image to numpy array for processing
|
|
348
|
+
img_array = np.array(img)
|
|
349
|
+
|
|
350
|
+
self._log(f"Processing page {page_index + 1}/{total_pages} (coord {coord_idx}: {x_coord:.1f}, {y_coord:.1f})")
|
|
351
|
+
|
|
352
|
+
# Process the image using the same method as process_single_image
|
|
353
|
+
result = self._process_image_array(img_array, show_result=show_results)
|
|
354
|
+
|
|
355
|
+
if result is not None:
|
|
356
|
+
particles = result['detected_particles']
|
|
357
|
+
binary_image = result['binary_image']
|
|
358
|
+
|
|
359
|
+
# Calculate statistics for this page
|
|
360
|
+
mask_particles = np.zeros_like(binary_image, dtype=np.uint8)
|
|
361
|
+
for particle in particles:
|
|
362
|
+
cv2.fillPoly(mask_particles, [particle['contour']], 200)
|
|
363
|
+
|
|
364
|
+
num_particles = len(particles)
|
|
365
|
+
total_area_pixels = int(np.count_nonzero(mask_particles == 200))
|
|
366
|
+
avg_area_pixels = total_area_pixels / num_particles if num_particles > 0 else 0
|
|
367
|
+
|
|
368
|
+
# Calculate areas in µm²
|
|
369
|
+
height, width = binary_image.shape
|
|
370
|
+
pixel_area_um2 = (self.image_size_um ** 2) / (height * width)
|
|
371
|
+
|
|
372
|
+
total_area_um2 = total_area_pixels * pixel_area_um2
|
|
373
|
+
avg_area_um2 = avg_area_pixels * pixel_area_um2
|
|
374
|
+
total_area_percentage = (total_area_pixels / (height * width)) * 100
|
|
375
|
+
|
|
376
|
+
# Calculate density
|
|
377
|
+
surface_area_um2 = self.image_size_um ** 2
|
|
378
|
+
density = num_particles / surface_area_um2 if surface_area_um2 > 0 else 0
|
|
379
|
+
|
|
380
|
+
# Generate filename for this page with scale and coordinates
|
|
381
|
+
page_filename = f"{scale}_{x_coord:.1f}_{y_coord:.1f}.tiff"
|
|
382
|
+
|
|
383
|
+
# Save processed image
|
|
384
|
+
if self.save_results:
|
|
385
|
+
output_dir = os.path.dirname(merged_file_path)
|
|
386
|
+
processed_path = os.path.join(output_dir, page_filename)
|
|
387
|
+
|
|
388
|
+
# Save the processed image (with contours)
|
|
389
|
+
processed_img = Image.fromarray(result['processed_image'])
|
|
390
|
+
processed_img.save(processed_path)
|
|
391
|
+
|
|
392
|
+
self._log(f"Processed image saved: {processed_path}")
|
|
393
|
+
|
|
394
|
+
# Add to directory results
|
|
395
|
+
directory_results.append({
|
|
396
|
+
'Filename': page_filename,
|
|
397
|
+
'Page_Index': page_index + 1,
|
|
398
|
+
'Density': density,
|
|
399
|
+
'Avg_area_um2': avg_area_um2,
|
|
400
|
+
'Total_area_percentage': total_area_percentage,
|
|
401
|
+
'Num_particles': num_particles,
|
|
402
|
+
'Total_area_um2': total_area_um2
|
|
403
|
+
})
|
|
404
|
+
|
|
405
|
+
all_results.append(page_filename)
|
|
406
|
+
|
|
407
|
+
except Exception as e:
|
|
408
|
+
self._log(f"Error processing page {page_index + 1}: {e}")
|
|
409
|
+
continue
|
|
410
|
+
|
|
411
|
+
# Save consolidated CSV for this merged TIFF
|
|
412
|
+
if directory_results:
|
|
413
|
+
output_dir = os.path.dirname(merged_file_path)
|
|
414
|
+
csv_path = os.path.join(output_dir, "merged_threshold_results.csv")
|
|
415
|
+
|
|
416
|
+
with open(csv_path, 'w', newline='', encoding='utf-8') as csvfile:
|
|
417
|
+
fieldnames = ['Filename', 'Page_Index', 'Density', 'Avg_area_um2',
|
|
418
|
+
'Total_area_percentage', 'Num_particles', 'Total_area_um2']
|
|
419
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
420
|
+
|
|
421
|
+
writer.writeheader()
|
|
422
|
+
for result in directory_results:
|
|
423
|
+
writer.writerow(result)
|
|
424
|
+
|
|
425
|
+
self._log(f"Consolidated CSV saved: {csv_path}")
|
|
426
|
+
|
|
427
|
+
self._log(f"Pages processed from merged TIFF: {len(directory_results)}")
|
|
428
|
+
|
|
429
|
+
except Exception as e:
|
|
430
|
+
self._log(f"Error processing merged TIFF {merged_file_path}: {e}")
|
|
431
|
+
continue
|
|
432
|
+
|
|
433
|
+
self._log(f"\n{'='*60}")
|
|
434
|
+
self._log(f"MERGED TIFF PROCESSING SUMMARY")
|
|
435
|
+
self._log(f"{'='*60}")
|
|
436
|
+
self._log(f"Total pages processed: {len(all_results)}")
|
|
437
|
+
|
|
438
|
+
return all_results
|
|
439
|
+
|
|
440
|
+
def _process_image_array(self, img_array: np.ndarray, show_result: bool = False) -> Optional[Dict]:
|
|
441
|
+
"""
|
|
442
|
+
Process a numpy array image using the same method as process_single_image.
|
|
443
|
+
|
|
444
|
+
Args:
|
|
445
|
+
img_array: Numpy array of the image
|
|
446
|
+
show_result: Whether to display results
|
|
447
|
+
|
|
448
|
+
Returns:
|
|
449
|
+
Dictionary with processing results or None if failed
|
|
450
|
+
"""
|
|
451
|
+
try:
|
|
452
|
+
# Convert to grayscale if needed (same as process_single_image)
|
|
453
|
+
if len(img_array.shape) == 3:
|
|
454
|
+
if img_array.shape[2] == 4: # RGBA
|
|
455
|
+
gray = cv2.cvtColor(img_array, cv2.COLOR_RGBA2GRAY)
|
|
456
|
+
elif img_array.shape[2] == 3: # RGB
|
|
457
|
+
gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
|
|
458
|
+
else:
|
|
459
|
+
gray = img_array[:, :, 0]
|
|
460
|
+
else:
|
|
461
|
+
gray = img_array
|
|
462
|
+
|
|
463
|
+
# Apply smoothing
|
|
464
|
+
image_smooth = cv2.GaussianBlur(gray, (3, 3), 0)
|
|
465
|
+
|
|
466
|
+
# Apply thresholding
|
|
467
|
+
_, binary_image = cv2.threshold(image_smooth, self.threshold, 255, cv2.THRESH_BINARY)
|
|
468
|
+
|
|
469
|
+
# Find contours
|
|
470
|
+
contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
471
|
+
|
|
472
|
+
# Filter contours by size
|
|
473
|
+
detected_particles = []
|
|
474
|
+
for contour in contours:
|
|
475
|
+
area = cv2.contourArea(contour)
|
|
476
|
+
if area > self.min_size:
|
|
477
|
+
detected_particles.append({
|
|
478
|
+
'area': area,
|
|
479
|
+
'contour': contour
|
|
480
|
+
})
|
|
481
|
+
|
|
482
|
+
# Create visualization: show contours on original image
|
|
483
|
+
original_with_contours = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
|
|
484
|
+
|
|
485
|
+
# Draw contours on original image
|
|
486
|
+
for particle in detected_particles:
|
|
487
|
+
cv2.fillPoly(original_with_contours, [particle['contour']], (0, 255, 0)) # Green fill
|
|
488
|
+
|
|
489
|
+
# Show result if requested
|
|
490
|
+
if show_result:
|
|
491
|
+
cv2.imshow('Processed Image', original_with_contours)
|
|
492
|
+
cv2.waitKey(0)
|
|
493
|
+
cv2.destroyAllWindows()
|
|
494
|
+
|
|
495
|
+
return {
|
|
496
|
+
'detected_particles': detected_particles,
|
|
497
|
+
'binary_image': binary_image,
|
|
498
|
+
'processed_image': original_with_contours
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
except Exception as e:
|
|
502
|
+
self._log(f"Error processing image array: {e}")
|
|
503
|
+
return None
|
|
504
|
+
|
|
505
|
+
def consolidate_csv_files(self, parent_directory: str) -> Optional[str]:
|
|
506
|
+
"""
|
|
507
|
+
Consolidate all 'results.csv' files found in parent directory and subdirectories
|
|
508
|
+
into a single consolidated CSV file.
|
|
509
|
+
|
|
510
|
+
Args:
|
|
511
|
+
parent_directory: Path to parent directory
|
|
512
|
+
|
|
513
|
+
Returns:
|
|
514
|
+
Path to created consolidated CSV file or None if error
|
|
515
|
+
"""
|
|
516
|
+
self._log(f"Consolidating CSV files in: {parent_directory}")
|
|
517
|
+
|
|
518
|
+
# List to store all results
|
|
519
|
+
all_results = []
|
|
520
|
+
|
|
521
|
+
# Walk through all subdirectories
|
|
522
|
+
for root, dirs, files in os.walk(parent_directory):
|
|
523
|
+
self._log(f"\nScanning directory: {root}")
|
|
524
|
+
|
|
525
|
+
# Look for 'results.csv' files
|
|
526
|
+
for file in files:
|
|
527
|
+
if file.endswith("results.csv"):
|
|
528
|
+
csv_path = os.path.join(root, file)
|
|
529
|
+
self._log(f" Found: {csv_path}")
|
|
530
|
+
|
|
531
|
+
try:
|
|
532
|
+
# Read CSV file
|
|
533
|
+
with open(csv_path, 'r', encoding='utf-8') as csvfile:
|
|
534
|
+
reader = csv.DictReader(csvfile)
|
|
535
|
+
|
|
536
|
+
# Add each row to global result
|
|
537
|
+
for row in reader:
|
|
538
|
+
# Split filename into X and Y
|
|
539
|
+
filename = row['Filename']
|
|
540
|
+
if '_' in filename and filename.endswith('.tiff'):
|
|
541
|
+
# Remove .tiff extension
|
|
542
|
+
name_without_ext = filename.replace('.tiff', '')
|
|
543
|
+
# Split by first underscore
|
|
544
|
+
parts = name_without_ext.split('_')
|
|
545
|
+
if len(parts) == 3:
|
|
546
|
+
row['X'] = parts[1]
|
|
547
|
+
row['Y'] = parts[2]
|
|
548
|
+
else:
|
|
549
|
+
row['X'] = name_without_ext
|
|
550
|
+
row['Y'] = ''
|
|
551
|
+
else:
|
|
552
|
+
row['X'] = filename
|
|
553
|
+
row['Y'] = ''
|
|
554
|
+
|
|
555
|
+
all_results.append(row)
|
|
556
|
+
|
|
557
|
+
except Exception as e:
|
|
558
|
+
self._log(f" Error reading {csv_path}: {e}")
|
|
559
|
+
|
|
560
|
+
if not all_results:
|
|
561
|
+
self._log("No 'results.csv' files found!")
|
|
562
|
+
return None
|
|
563
|
+
|
|
564
|
+
# Create consolidated CSV file
|
|
565
|
+
consolidated_filename = "consolidated_results.csv"
|
|
566
|
+
consolidated_path = os.path.join(parent_directory, consolidated_filename)
|
|
567
|
+
|
|
568
|
+
try:
|
|
569
|
+
with open(consolidated_path, 'w', newline='', encoding='utf-8') as csvfile:
|
|
570
|
+
# Define columns (with X and Y separated)
|
|
571
|
+
fieldnames = ['X', 'Y', 'Density', 'Avg_area_um2',
|
|
572
|
+
'Total_area_percentage', 'Num_particles', 'Total_area_um2']
|
|
573
|
+
|
|
574
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
575
|
+
writer.writeheader()
|
|
576
|
+
|
|
577
|
+
# Write all data
|
|
578
|
+
for result in all_results:
|
|
579
|
+
# Create new dictionary with only desired columns
|
|
580
|
+
clean_row = {}
|
|
581
|
+
for field in fieldnames:
|
|
582
|
+
clean_row[field] = result.get(field, '')
|
|
583
|
+
writer.writerow(clean_row)
|
|
584
|
+
|
|
585
|
+
self._log(f"\n{'='*60}")
|
|
586
|
+
self._log(f"CONSOLIDATION COMPLETE")
|
|
587
|
+
self._log(f"{'='*60}")
|
|
588
|
+
self._log(f"Consolidated file created: {consolidated_path}")
|
|
589
|
+
self._log(f"Total rows: {len(all_results)}")
|
|
590
|
+
|
|
591
|
+
return consolidated_path
|
|
592
|
+
|
|
593
|
+
except Exception as e:
|
|
594
|
+
self._log(f"Error creating consolidated file: {e}")
|
|
595
|
+
return None
|
|
596
|
+
|
|
597
|
+
def plot_sem_data(self,
|
|
598
|
+
parent_directory: str,
|
|
599
|
+
show_results: bool = True) -> List[str]:
|
|
600
|
+
"""
|
|
601
|
+
Plot density and total area data from consolidated CSV files
|
|
602
|
+
found in parent directory and subdirectories.
|
|
603
|
+
|
|
604
|
+
Args:
|
|
605
|
+
parent_directory: Path to parent directory
|
|
606
|
+
show_results: Whether to display plots with matplotlib
|
|
607
|
+
|
|
608
|
+
Returns:
|
|
609
|
+
List of paths to processed CSV files
|
|
610
|
+
"""
|
|
611
|
+
processed_files = []
|
|
612
|
+
|
|
613
|
+
self._log(f"\n{'='*60}")
|
|
614
|
+
self._log(f"SEM DATA PLOTTING")
|
|
615
|
+
self._log(f"{'='*60}")
|
|
616
|
+
self._log(f"Parent directory: {parent_directory}")
|
|
617
|
+
|
|
618
|
+
# Walk through all subdirectories
|
|
619
|
+
for root, dirs, files in os.walk(parent_directory):
|
|
620
|
+
# Look for 'consolidated_results.csv' files
|
|
621
|
+
for file in files:
|
|
622
|
+
if file == "consolidated_results.csv":
|
|
623
|
+
csv_path = os.path.join(root, file)
|
|
624
|
+
self._log(f"\nProcessing: {csv_path}")
|
|
625
|
+
|
|
626
|
+
try:
|
|
627
|
+
# Load data
|
|
628
|
+
data_frame = pd.read_csv(csv_path)
|
|
629
|
+
self._log(f" Data loaded: {len(data_frame)} points")
|
|
630
|
+
|
|
631
|
+
# Check that required columns exist
|
|
632
|
+
required_columns = ['X', 'Y', 'Density', 'Total_area_percentage']
|
|
633
|
+
missing_columns = [col for col in required_columns if col not in data_frame.columns]
|
|
634
|
+
|
|
635
|
+
if missing_columns:
|
|
636
|
+
self._log(f" ⚠️ Missing columns: {missing_columns}")
|
|
637
|
+
continue
|
|
638
|
+
|
|
639
|
+
# Create Mapping directory if it doesn't exist
|
|
640
|
+
mapping_dir = os.path.join(root, "Mapping")
|
|
641
|
+
os.makedirs(mapping_dir, exist_ok=True)
|
|
642
|
+
|
|
643
|
+
# Grid parameters
|
|
644
|
+
step = 0.5
|
|
645
|
+
x_min, x_max = data_frame['X'].min(), data_frame['X'].max()
|
|
646
|
+
y_min, y_max = data_frame['Y'].min(), data_frame['Y'].max()
|
|
647
|
+
|
|
648
|
+
# Extend limits slightly for better interpolation
|
|
649
|
+
x_range = x_max - x_min
|
|
650
|
+
y_range = y_max - y_min
|
|
651
|
+
x_min -= x_range * 0.1
|
|
652
|
+
x_max += x_range * 0.1
|
|
653
|
+
y_min -= y_range * 0.1
|
|
654
|
+
y_max += y_range * 0.1
|
|
655
|
+
|
|
656
|
+
# Generate regular grid
|
|
657
|
+
x = np.arange(x_min, x_max + step, step)
|
|
658
|
+
y = np.arange(y_min, y_max + step, step)
|
|
659
|
+
grid_x, grid_y = np.meshgrid(x, y)
|
|
660
|
+
|
|
661
|
+
# Columns to plot
|
|
662
|
+
plot_columns = ['Density', 'Total_area_percentage']
|
|
663
|
+
file_names = ['Density', 'Total_area_percentage']
|
|
664
|
+
|
|
665
|
+
# Process each column
|
|
666
|
+
for i, column in enumerate(plot_columns):
|
|
667
|
+
self._log(f" Plotting {column}...")
|
|
668
|
+
|
|
669
|
+
# Interpolate on grid
|
|
670
|
+
grid_z = griddata(
|
|
671
|
+
(data_frame['X'], data_frame['Y']),
|
|
672
|
+
data_frame[column],
|
|
673
|
+
(grid_x, grid_y),
|
|
674
|
+
method='linear',
|
|
675
|
+
fill_value=np.nan
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
# Create DataFrame to save grid
|
|
679
|
+
grid_data = pd.DataFrame({
|
|
680
|
+
'X': grid_x.flatten(),
|
|
681
|
+
'Y': grid_y.flatten(),
|
|
682
|
+
'Z': grid_z.flatten()
|
|
683
|
+
})
|
|
684
|
+
|
|
685
|
+
# Pivot for display
|
|
686
|
+
grid_z_pivot = grid_data.pivot(index='Y', columns='X', values='Z')
|
|
687
|
+
|
|
688
|
+
# Save grid
|
|
689
|
+
grid_path = os.path.join(mapping_dir, f'{file_names[i]}_grid.csv')
|
|
690
|
+
grid_z_pivot.to_csv(grid_path)
|
|
691
|
+
|
|
692
|
+
# Create plot
|
|
693
|
+
fig, ax = plt.subplots(figsize=(10, 8))
|
|
694
|
+
|
|
695
|
+
# Plot with interpolation
|
|
696
|
+
img = ax.imshow(
|
|
697
|
+
grid_z_pivot,
|
|
698
|
+
extent=(x_min, x_max, y_min, y_max),
|
|
699
|
+
origin='lower',
|
|
700
|
+
cmap='Spectral_r',
|
|
701
|
+
aspect='equal'
|
|
702
|
+
)
|
|
703
|
+
|
|
704
|
+
# Add original data points
|
|
705
|
+
scatter = ax.scatter(
|
|
706
|
+
data_frame['X'],
|
|
707
|
+
data_frame['Y'],
|
|
708
|
+
c=data_frame[column],
|
|
709
|
+
s=50,
|
|
710
|
+
edgecolors='black',
|
|
711
|
+
linewidth=0.5,
|
|
712
|
+
cmap='Spectral_r',
|
|
713
|
+
alpha=0.8
|
|
714
|
+
)
|
|
715
|
+
|
|
716
|
+
# Customize appearance
|
|
717
|
+
cbar = plt.colorbar(img, ax=ax, shrink=1)
|
|
718
|
+
cbar.ax.tick_params(labelsize=20)
|
|
719
|
+
|
|
720
|
+
ax.set_xlabel('X (µm)', fontsize=24)
|
|
721
|
+
ax.set_ylabel('Y (µm)', fontsize=24)
|
|
722
|
+
ax.tick_params(axis='both', labelsize=20)
|
|
723
|
+
|
|
724
|
+
# Title
|
|
725
|
+
column_title = "Density (particles/µm²)" if column == "Density" else "Total Area (%)"
|
|
726
|
+
ax.set_title(f"{column_title} - {os.path.basename(root)}", fontsize=16)
|
|
727
|
+
|
|
728
|
+
# Grid
|
|
729
|
+
ax.grid(True, alpha=0.3)
|
|
730
|
+
|
|
731
|
+
# Save plot
|
|
732
|
+
plot_path = os.path.join(mapping_dir, f"{file_names[i]}.png")
|
|
733
|
+
plt.savefig(plot_path, bbox_inches='tight', dpi=300)
|
|
734
|
+
|
|
735
|
+
if show_results:
|
|
736
|
+
plt.show()
|
|
737
|
+
else:
|
|
738
|
+
plt.close(fig)
|
|
739
|
+
|
|
740
|
+
self._log(f" Saved: {plot_path}")
|
|
741
|
+
|
|
742
|
+
processed_files.append(csv_path)
|
|
743
|
+
|
|
744
|
+
except Exception as e:
|
|
745
|
+
self._log(f" ❌ Error processing {csv_path}: {e}")
|
|
746
|
+
continue
|
|
747
|
+
|
|
748
|
+
self._log(f"\n{'='*60}")
|
|
749
|
+
self._log(f"PLOTTING COMPLETE")
|
|
750
|
+
self._log(f"{'='*60}")
|
|
751
|
+
self._log(f"Files processed: {len(processed_files)}")
|
|
752
|
+
|
|
753
|
+
return processed_files
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
# Example usage
|
|
757
|
+
if __name__ == "__main__":
|
|
758
|
+
# Initialize processor
|
|
759
|
+
processor = SEMThresholdProcessor(
|
|
760
|
+
threshold=200,
|
|
761
|
+
min_size=0,
|
|
762
|
+
image_size_um=5.0,
|
|
763
|
+
save_results=True,
|
|
764
|
+
verbose=True
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
# Example directory
|
|
768
|
+
parent_dir = r"C:\Users\TM273821\Desktop\SEM\TEst_brute\1"
|
|
769
|
+
|
|
770
|
+
# Process directory
|
|
771
|
+
# results = processor.process_directory(parent_dir, show_results=False)
|
|
772
|
+
|
|
773
|
+
# Consolidate CSV files
|
|
774
|
+
# consolidated_path = processor.consolidate_csv_files(parent_dir)
|
|
775
|
+
|
|
776
|
+
# Plot data
|
|
777
|
+
# plot_files = processor.plot_sem_data(parent_dir, show_results=False)
|