drap 0.0.2.post1__py3-none-any.whl → 0.0.3.post0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- drap/automation.py +1 -2
- drap/gui.py +2 -3
- drap/terminal_interface.py +1 -2
- drap/utils.py +1622 -0
- {drap-0.0.2.post1.dist-info → drap-0.0.3.post0.dist-info}/METADATA +15 -1
- drap-0.0.3.post0.dist-info/RECORD +12 -0
- drap-0.0.2.post1.dist-info/RECORD +0 -11
- {drap-0.0.2.post1.dist-info → drap-0.0.3.post0.dist-info}/WHEEL +0 -0
- {drap-0.0.2.post1.dist-info → drap-0.0.3.post0.dist-info}/entry_points.txt +0 -0
- {drap-0.0.2.post1.dist-info → drap-0.0.3.post0.dist-info}/licenses/LICENSE +0 -0
- {drap-0.0.2.post1.dist-info → drap-0.0.3.post0.dist-info}/top_level.txt +0 -0
drap/utils.py
ADDED
@@ -0,0 +1,1622 @@
|
|
1
|
+
import cv2
|
2
|
+
from fabio.edfimage import EdfImage
|
3
|
+
import tkinter as tk
|
4
|
+
from tkinter import filedialog
|
5
|
+
from pathlib import Path
|
6
|
+
import numpy
|
7
|
+
import matplotlib.pyplot as plt
|
8
|
+
import string
|
9
|
+
from PIL import Image
|
10
|
+
from reportlab.lib.pagesizes import A4, letter
|
11
|
+
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle
|
12
|
+
from reportlab.pdfgen import canvas
|
13
|
+
from reportlab.lib import colors
|
14
|
+
import os
|
15
|
+
import copy
|
16
|
+
import time as timelib
|
17
|
+
from datetime import datetime
|
18
|
+
from datetime import timedelta
|
19
|
+
import numpy as np
|
20
|
+
import sys
|
21
|
+
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget, QPushButton, QFileDialog, QMessageBox, QLineEdit, QHBoxLayout, QGroupBox, QCheckBox
|
22
|
+
from PyQt5.QtGui import QPixmap, QPainter, QPen, QImage, QMouseEvent, QColor
|
23
|
+
from PyQt5.QtCore import Qt, QPoint, QRect, QFileInfo
|
24
|
+
import re
|
25
|
+
import argparse
|
26
|
+
import cv2
|
27
|
+
import matplotlib
|
28
|
+
|
29
|
+
import matplotlib.pyplot as plt
|
30
|
+
|
31
|
+
matplotlib.use('Agg')
|
32
|
+
|
33
|
+
|
34
|
+
from PyQt5.QtWidgets import QApplication
|
35
|
+
import sys
|
36
|
+
|
37
|
+
|
38
|
+
|
39
|
+
|
40
|
+
|
41
|
+
class ImageCropper(QMainWindow):
|
42
|
+
def __init__(self):
|
43
|
+
super().__init__()
|
44
|
+
self.initUI()
|
45
|
+
|
46
|
+
# Atributos para desenho
|
47
|
+
self.drawing = False
|
48
|
+
self.rect_start = QPoint()
|
49
|
+
self.current_rect = QRect()
|
50
|
+
self.image = None
|
51
|
+
self.pixmap = None
|
52
|
+
self.original_image = None
|
53
|
+
self.result_image = None
|
54
|
+
|
55
|
+
|
56
|
+
def initUI(self):
|
57
|
+
|
58
|
+
# self.test = True
|
59
|
+
self.test = False
|
60
|
+
print("teste")
|
61
|
+
|
62
|
+
|
63
|
+
self.setWindowTitle('Analysis droplet parameters.')
|
64
|
+
self.setGeometry(100, 100, 1200, 600)
|
65
|
+
|
66
|
+
# Criar um widget central e configurar o layout
|
67
|
+
self.central_widget = QWidget()
|
68
|
+
self.setCentralWidget(self.central_widget)
|
69
|
+
self.main_layout = QHBoxLayout()
|
70
|
+
self.central_widget.setLayout(self.main_layout)
|
71
|
+
|
72
|
+
# Layout to images
|
73
|
+
self.image_layout = QVBoxLayout()
|
74
|
+
self.main_layout.addLayout(self.image_layout)
|
75
|
+
|
76
|
+
# Layout to controls
|
77
|
+
self.controls_layout = QVBoxLayout()
|
78
|
+
self.main_layout.addLayout(self.controls_layout)
|
79
|
+
|
80
|
+
# Create a QLabel to display the image
|
81
|
+
self.image_label = QLabel(self)
|
82
|
+
self.image_layout.addWidget(self.image_label)
|
83
|
+
|
84
|
+
# Create a QLabel to display the resulting image
|
85
|
+
self.result_label = QLabel(self)
|
86
|
+
self.image_layout.addWidget(self.result_label)
|
87
|
+
|
88
|
+
# Create a button to load the image
|
89
|
+
self.load_button = QPushButton('Load Video', self)
|
90
|
+
self.controls_layout.addWidget(self.load_button)
|
91
|
+
self.load_button.clicked.connect(self.load_image)
|
92
|
+
|
93
|
+
|
94
|
+
# Create a button to crop the image
|
95
|
+
self.crop_button = QPushButton('Cut Image', self)
|
96
|
+
self.controls_layout.addWidget(self.crop_button)
|
97
|
+
self.crop_button.clicked.connect(self.crop_image)
|
98
|
+
|
99
|
+
# Create a button to display image information
|
100
|
+
self.info_button = QPushButton('Information of Video', self)
|
101
|
+
self.controls_layout.addWidget(self.info_button)
|
102
|
+
self.info_button.clicked.connect(self.show_image_info)
|
103
|
+
|
104
|
+
# Input fields for three integers
|
105
|
+
#Group to first integer
|
106
|
+
self.int_group4 = QGroupBox("C (mg/ml)")
|
107
|
+
self.int_layout4 = QVBoxLayout()
|
108
|
+
self.int_group4.setLayout(self.int_layout4)
|
109
|
+
self.int_input_label4 = QLabel('Type the value of concentration (mg/ml)', self)
|
110
|
+
self.int_input4 = QLineEdit(self)
|
111
|
+
self.int_output4 = QLabel('', self)
|
112
|
+
self.int_layout4.addWidget(self.int_input_label4)
|
113
|
+
self.int_layout4.addWidget(self.int_input4)
|
114
|
+
self.int_layout4.addWidget(self.int_output4)
|
115
|
+
self.controls_layout.addWidget(self.int_group4)
|
116
|
+
|
117
|
+
# Input fields for three integers
|
118
|
+
#Group to first integer
|
119
|
+
self.int_group1 = QGroupBox("Pixel/mm")
|
120
|
+
self.int_layout1 = QVBoxLayout()
|
121
|
+
self.int_group1.setLayout(self.int_layout1)
|
122
|
+
self.int_input_label1 = QLabel('Type the value of pixel/mm:', self)
|
123
|
+
self.int_input1 = QLineEdit(self)
|
124
|
+
self.int_output1 = QLabel('', self)
|
125
|
+
self.int_layout1.addWidget(self.int_input_label1)
|
126
|
+
self.int_layout1.addWidget(self.int_input1)
|
127
|
+
self.int_layout1.addWidget(self.int_output1)
|
128
|
+
self.controls_layout.addWidget(self.int_group1)
|
129
|
+
|
130
|
+
# Group to second integer
|
131
|
+
self.int_group2 = QGroupBox("Interval")
|
132
|
+
self.int_layout2 = QVBoxLayout()
|
133
|
+
self.int_group2.setLayout(self.int_layout2)
|
134
|
+
self.int_input_label2 = QLabel('Type the value of frame interval:', self)
|
135
|
+
self.int_input2 = QLineEdit(self)
|
136
|
+
self.int_output2 = QLabel('', self)
|
137
|
+
self.int_layout2.addWidget(self.int_input_label2)
|
138
|
+
self.int_layout2.addWidget(self.int_input2)
|
139
|
+
self.int_layout2.addWidget(self.int_output2)
|
140
|
+
self.controls_layout.addWidget(self.int_group2)
|
141
|
+
|
142
|
+
# Group to third integer
|
143
|
+
self.int_group3 = QGroupBox("Time limit")
|
144
|
+
self.int_layout3 = QVBoxLayout()
|
145
|
+
self.int_group3.setLayout(self.int_layout3)
|
146
|
+
self.int_input_label3 = QLabel('Type the value of time limit (s):', self)
|
147
|
+
self.int_input3 = QLineEdit(self)
|
148
|
+
self.int_output3 = QLabel('', self)
|
149
|
+
self.int_layout3.addWidget(self.int_input_label3)
|
150
|
+
self.int_layout3.addWidget(self.int_input3)
|
151
|
+
self.int_layout3.addWidget(self.int_output3)
|
152
|
+
self.controls_layout.addWidget(self.int_group3)
|
153
|
+
|
154
|
+
# Button to process integers
|
155
|
+
self.process_button = QPushButton('Analysis droplet parameters', self)
|
156
|
+
self.controls_layout.addWidget(self.process_button)
|
157
|
+
self.process_button.clicked.connect(self.calcule_size_drop)
|
158
|
+
|
159
|
+
# Group to third integer
|
160
|
+
self.str_group4 = QGroupBox("Root Name File")
|
161
|
+
self.str_layout4 = QVBoxLayout()
|
162
|
+
self.str_group4.setLayout(self.str_layout4)
|
163
|
+
self.str_input_label4 = QLabel('Type the name of root files:', self)
|
164
|
+
self.str_input4 = QLineEdit(self)
|
165
|
+
self.str_output4 = QLabel('', self)
|
166
|
+
self.str_layout4.addWidget(self.str_input_label4)
|
167
|
+
self.str_layout4.addWidget(self.str_input4)
|
168
|
+
self.str_layout4.addWidget(self.str_output4)
|
169
|
+
self.controls_layout.addWidget(self.str_group4)
|
170
|
+
|
171
|
+
# Create a button to choose the directory and upload an image
|
172
|
+
self.load_directory_button = QPushButton('Sort edf files by time', self)
|
173
|
+
self.controls_layout.addWidget(self.load_directory_button)
|
174
|
+
self.load_directory_button.clicked.connect(self.sort_images_edf_time)
|
175
|
+
|
176
|
+
#Create a button to choose the directory and upload an image
|
177
|
+
self.load_directory_button = QPushButton('Sort edf files by size droplet ', self)
|
178
|
+
self.controls_layout.addWidget(self.load_directory_button)
|
179
|
+
self.load_directory_button.clicked.connect(self.sort_images_edf_size_drop)
|
180
|
+
|
181
|
+
|
182
|
+
#Create a button to choose the directory and upload an image
|
183
|
+
self.load_directory_button = QPushButton('Concatene edf files samples and background ', self)
|
184
|
+
self.controls_layout.addWidget(self.load_directory_button)
|
185
|
+
self.load_directory_button.clicked.connect(self.contate_sort_images_edf_size_drop)
|
186
|
+
|
187
|
+
# Checkbox para opção extra
|
188
|
+
self.check_option = QCheckBox("Print PDF with images", self)
|
189
|
+
self.check_option.setChecked(False) # desmarcado por padrão
|
190
|
+
self.controls_layout.addWidget(self.check_option)
|
191
|
+
|
192
|
+
|
193
|
+
|
194
|
+
# Hook mouse events
|
195
|
+
self.image_label.installEventFilter(self)
|
196
|
+
|
197
|
+
|
198
|
+
if self.test:
|
199
|
+
self.load_image()
|
200
|
+
self.int_input1.setText("45.")
|
201
|
+
self.int_input2.setText("5")
|
202
|
+
self.int_input3.setText("50")
|
203
|
+
self.int_input4.setText("2.0")
|
204
|
+
|
205
|
+
self.show()
|
206
|
+
|
207
|
+
def load_image(self):
|
208
|
+
|
209
|
+
|
210
|
+
if self.test:
|
211
|
+
self.file_path = "/home/standard02/Documents/programming/python/bolhas/2024-07-12-NW-usAg-GSH-2mgml.flv"
|
212
|
+
else:
|
213
|
+
self.file_path, _ = QFileDialog.getOpenFileName(self, 'Open Video', '', 'Videos (*.avi *.mp4 *.mov *.mkv *.wmv *.flv *.mpg *.mpeg *.3gp *.ogv .webm)')
|
214
|
+
|
215
|
+
if not self.file_path:
|
216
|
+
QMessageBox.warning(self, 'Warning', 'No video selected.')
|
217
|
+
return
|
218
|
+
|
219
|
+
self.file_path = os.path.normpath(self.file_path)
|
220
|
+
self.file_path = QFileInfo(self.file_path).fileName();
|
221
|
+
self.file_path = Path(self.file_path);
|
222
|
+
self.file_path = self.file_path.resolve();
|
223
|
+
self.file_path = os.path.normpath(self.file_path);
|
224
|
+
|
225
|
+
video = cv2.VideoCapture(self.file_path);
|
226
|
+
|
227
|
+
if not video.isOpened():
|
228
|
+
QMessageBox.critical(self, 'Error', f'Could not open video:\n{self.file_path}')
|
229
|
+
return
|
230
|
+
|
231
|
+
rval, frame = video.read();
|
232
|
+
|
233
|
+
if not rval or frame is None:
|
234
|
+
QMessageBox.critical(self, 'Error', 'Could not read first frame from the video.')
|
235
|
+
video.release()
|
236
|
+
return
|
237
|
+
|
238
|
+
file_path = 'data/';
|
239
|
+
file_path = Path(file_path);
|
240
|
+
file_path = file_path.resolve();
|
241
|
+
os.makedirs(file_path, exist_ok=True)
|
242
|
+
file_path = os.path.normpath(file_path);
|
243
|
+
file_path = os.path.join(file_path, 'sample_frame.jpg');
|
244
|
+
file_path = os.path.normpath(file_path);
|
245
|
+
|
246
|
+
ok = cv2.imwrite(file_path,frame);
|
247
|
+
if not ok:
|
248
|
+
QMessageBox.critical(self, 'Error', f'Failed to save first frame to:\n{file_path}')
|
249
|
+
video.release()
|
250
|
+
return
|
251
|
+
|
252
|
+
if file_path:
|
253
|
+
self.pixmap = QPixmap(file_path)
|
254
|
+
if self.pixmap.isNull():
|
255
|
+
QMessageBox.critical(self, 'Error', f'Failed to load image into QPixmap:\n{file_path}')
|
256
|
+
video.release()
|
257
|
+
return
|
258
|
+
self.original_image = self.pixmap.toImage()
|
259
|
+
self.image = self.pixmap.toImage()
|
260
|
+
self.image_label.setPixmap(self.pixmap)
|
261
|
+
self.image_label.setScaledContents(True)
|
262
|
+
self.fps = video.get(cv2.CAP_PROP_FPS);
|
263
|
+
self.total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
264
|
+
self.max_frames = self.total_frames;
|
265
|
+
video.release();
|
266
|
+
|
267
|
+
def sort_images_edf_time(self):
|
268
|
+
|
269
|
+
self.root_file_gui = str(self.str_input4.text());
|
270
|
+
|
271
|
+
if not self.root_file_gui:
|
272
|
+
QMessageBox.warning(self, 'Warning', 'No root files name was filled:')
|
273
|
+
return
|
274
|
+
|
275
|
+
directory = QFileDialog.getExistingDirectory(self, 'Choose Directory with EDF Images');
|
276
|
+
directory = directory + "/";
|
277
|
+
directory = Path(directory);
|
278
|
+
directory = directory.resolve();
|
279
|
+
directory = os.path.normpath(directory);
|
280
|
+
#file_path, _ = QFileDialog.getOpenFileName(self, 'Choose file with polynomio data', directory, 'Data files (*.dat *.txt *.csv)')
|
281
|
+
if directory:
|
282
|
+
|
283
|
+
set_file_1 = conc_scat_video(4,path = directory, root_name = self.root_file_gui);
|
284
|
+
set_file_1.read_setfiles_edf(option = 1);
|
285
|
+
|
286
|
+
|
287
|
+
def sort_images_edf_size_drop(self):
|
288
|
+
|
289
|
+
self.root_file_gui = str(self.str_input4.text());
|
290
|
+
|
291
|
+
if not self.root_file_gui:
|
292
|
+
QMessageBox.warning(self, 'Warning', 'No root files name was filled:')
|
293
|
+
return
|
294
|
+
|
295
|
+
directory = QFileDialog.getExistingDirectory(self, 'Choose Directory with EDF Images');
|
296
|
+
directory = directory + "/"
|
297
|
+
|
298
|
+
directory = os.path.normpath(directory);
|
299
|
+
file_path, _ = QFileDialog.getOpenFileName(self, 'Choose file with polynomio data', directory, 'Data files (*.dat *.txt *.csv)')
|
300
|
+
file_path = Path(file_path);
|
301
|
+
file_path = file_path.resolve();
|
302
|
+
file_path = os.path.normpath(file_path);
|
303
|
+
|
304
|
+
if directory:
|
305
|
+
set_file_1 = conc_scat_video(5,path = directory, root_name = self.root_file_gui, input_file = file_path);
|
306
|
+
set_file_1.read_setfiles_edf(option = 0);
|
307
|
+
|
308
|
+
def contate_sort_images_edf_size_drop(self):
|
309
|
+
|
310
|
+
directory = QFileDialog.getExistingDirectory(self, 'Choose (Samples) Directory with EDF Images');
|
311
|
+
directory = directory + "/";
|
312
|
+
directory = Path(directory);
|
313
|
+
directory = directory.resolve();
|
314
|
+
directory = os.path.normpath(directory);
|
315
|
+
|
316
|
+
file_path, _ = QFileDialog.getOpenFileName(self, 'Choose (Samples) file with polynomio data', directory, 'Data files (*.dat *.txt *.csv)');
|
317
|
+
|
318
|
+
file_path = Path(file_path);
|
319
|
+
file_path = file_path.resolve();
|
320
|
+
file_path = os.path.normpath(file_path);
|
321
|
+
|
322
|
+
|
323
|
+
if directory:
|
324
|
+
|
325
|
+
set_file_1 = conc_scat_video(5,path = directory, input_file = file_path);
|
326
|
+
set_file_1.read_setfiles_edf(option = 2);
|
327
|
+
|
328
|
+
directory = QFileDialog.getExistingDirectory(self, 'Choose (Background) Directory with EDF Images');
|
329
|
+
directory = directory + "/";
|
330
|
+
|
331
|
+
|
332
|
+
directory = Path(directory);
|
333
|
+
directory = directory.resolve();
|
334
|
+
directory = os.path.normpath(directory);
|
335
|
+
|
336
|
+
file_path, _ = QFileDialog.getOpenFileName(self, 'Choose (Background) file with polynomio data', directory, 'Data files (*.dat *.txt *.csv)');
|
337
|
+
|
338
|
+
file_path = Path(file_path);
|
339
|
+
file_path = file_path.resolve();
|
340
|
+
file_path = os.path.normpath(file_path);
|
341
|
+
|
342
|
+
if directory:
|
343
|
+
|
344
|
+
set_file_2 = conc_scat_video(5,path = directory, input_file = file_path);
|
345
|
+
set_file_2.read_setfiles_edf(option = 2);
|
346
|
+
|
347
|
+
concatene_files_scat_back(set_file_1, set_file_2);
|
348
|
+
|
349
|
+
def eventFilter(self, obj, event):
|
350
|
+
if obj == self.image_label:
|
351
|
+
if event.type() == QMouseEvent.MouseButtonPress:
|
352
|
+
if event.button() == Qt.LeftButton:
|
353
|
+
self.drawing = True
|
354
|
+
self.rect_start = event.pos()
|
355
|
+
elif event.type() == QMouseEvent.MouseMove:
|
356
|
+
if self.drawing:
|
357
|
+
self.current_rect = QRect(self.rect_start, event.pos()).normalized()
|
358
|
+
self.update_image()
|
359
|
+
elif event.type() == QMouseEvent.MouseButtonRelease:
|
360
|
+
if event.button() == Qt.LeftButton:
|
361
|
+
self.drawing = False
|
362
|
+
if not self.current_rect.isNull():
|
363
|
+
self.update_image()
|
364
|
+
return super().eventFilter(obj, event)
|
365
|
+
|
366
|
+
def crop_image(self):
|
367
|
+
|
368
|
+
if self.current_rect.isNull() or not self.original_image:
|
369
|
+
QMessageBox.warning(self, 'Warning', 'No rectangle drawn for cropping. Please draw rectangle first.')
|
370
|
+
return
|
371
|
+
|
372
|
+
# Crop the image
|
373
|
+
cropped_image = self.original_image.copy(self.current_rect)
|
374
|
+
|
375
|
+
# Saving the cropped image
|
376
|
+
save_path = 'data/';
|
377
|
+
save_path = Path(save_path);
|
378
|
+
save_path = save_path.resolve();
|
379
|
+
save_path = os.path.normpath(save_path);
|
380
|
+
save_path = os.path.join(save_path, 'image_cuted.png');
|
381
|
+
save_path = os.path.normpath(save_path);
|
382
|
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
383
|
+
if save_path:
|
384
|
+
cropped_image.save(save_path)
|
385
|
+
self.save_rectangle_coordinates(self.current_rect)
|
386
|
+
|
387
|
+
|
388
|
+
def show_image_info(self):
|
389
|
+
|
390
|
+
# Extract information from image
|
391
|
+
width = self.pixmap.width()
|
392
|
+
height = self.pixmap.height()
|
393
|
+
format_str = self.image.format()
|
394
|
+
depth = self.image.depth()
|
395
|
+
|
396
|
+
info = (f"Dimensions: {width}x{height}\n"
|
397
|
+
f"Format: {format_str}\n"
|
398
|
+
f"Color Depth: {depth} bits\n"
|
399
|
+
f"Frame Rate: {self.fps} f/s\n"
|
400
|
+
f"Number Total of Frames: {self.total_frames}\n"
|
401
|
+
f"Time Total (s): {round(self.total_frames/self.fps)} ")
|
402
|
+
|
403
|
+
QMessageBox.information(self, 'Information of Image', info)
|
404
|
+
|
405
|
+
def calcule_size_drop(self):
|
406
|
+
|
407
|
+
|
408
|
+
if self.check_option.isChecked():
|
409
|
+
print_pdf = True
|
410
|
+
else:
|
411
|
+
print_pdf = False
|
412
|
+
|
413
|
+
if hasattr(self, 'ret'):
|
414
|
+
pass;
|
415
|
+
else:
|
416
|
+
QMessageBox.warning(self, '', 'No rectangle drawn for cropping. Please draw rectangle first and cut the image.')
|
417
|
+
return;
|
418
|
+
|
419
|
+
if (self.int_input1.text() and self.int_input2.text() and self.int_input3.text() and self.int_input4.text()):
|
420
|
+
numbers = [(input_field.text()) for input_field in [self.int_input1, self.int_input2, self.int_input3, self.int_input4]];
|
421
|
+
|
422
|
+
for output_field, number in zip([self.int_output1, self.int_output2, self.int_output3], numbers):
|
423
|
+
output_field.setText(f'Número: {number}')
|
424
|
+
|
425
|
+
set_file_1 = conc_scat_video(3, file_video =self.file_path, px_mm = float(numbers[0]) , step = int(numbers[1]), time_limit = int(numbers[2]), Co = float(numbers[3]) , retangulo = self.ret, print_pdf = print_pdf);
|
426
|
+
result_image = set_file_1.read_video();
|
427
|
+
if result_image:
|
428
|
+
self.result_image = QPixmap(result_image)
|
429
|
+
self.original_image = self.result_image.toImage()
|
430
|
+
self.image = self.result_image.toImage()
|
431
|
+
self.result_label.setPixmap(QPixmap.fromImage(self.original_image))
|
432
|
+
self.result_label.setScaledContents(True)
|
433
|
+
else:
|
434
|
+
QMessageBox.warning(self, 'Warning', 'Please Fill the forms.')
|
435
|
+
return;
|
436
|
+
|
437
|
+
|
438
|
+
def update_image(self):
|
439
|
+
|
440
|
+
if self.original_image and self.pixmap:
|
441
|
+
self.image = self.original_image.copy() # Restore the original image
|
442
|
+
painter = QPainter(self.image)
|
443
|
+
painter.setPen(QPen(Qt.red, 2, Qt.SolidLine))
|
444
|
+
if not self.current_rect.isNull():
|
445
|
+
painter.drawRect(self.current_rect)
|
446
|
+
painter.end()
|
447
|
+
self.pixmap = QPixmap.fromImage(self.image)
|
448
|
+
self.image_label.setPixmap(self.pixmap)
|
449
|
+
|
450
|
+
def save_rectangle_coordinates(self, rect):
|
451
|
+
|
452
|
+
# Save vertex coordinates to a file
|
453
|
+
x1, y1 = rect.topLeft().x(), rect.topLeft().y()
|
454
|
+
x2, y2 = rect.bottomRight().x(), rect.bottomRight().y()
|
455
|
+
self.ret = [x1, x2, y1,y2]
|
456
|
+
coordinates = f"Vértices do Retângulo: ({x1}, {y1}), ({x2}, {y2})"
|
457
|
+
|
458
|
+
# Save the coordinates to a text file
|
459
|
+
save_path = "data/";
|
460
|
+
save_path = Path(save_path);
|
461
|
+
save_path = save_path.resolve();
|
462
|
+
save_path = os.path.normpath(save_path);
|
463
|
+
save_path = os.path.join(save_path, 'data_image_cuted.txt');
|
464
|
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
465
|
+
if save_path:
|
466
|
+
with open(save_path, 'w') as file:
|
467
|
+
file.write(coordinates)
|
468
|
+
QMessageBox.information(self, 'Save', f'Coordenates saved in: {save_path}')
|
469
|
+
|
470
|
+
|
471
|
+
class Data_edf:
|
472
|
+
|
473
|
+
def __init__(self, file_in, date_in, ExposureTime_in):
|
474
|
+
|
475
|
+
self.file_ = file_in;
|
476
|
+
self.Date = date_in;
|
477
|
+
self.ExposureTime = ExposureTime_in;
|
478
|
+
|
479
|
+
|
480
|
+
def get_infos(self):
|
481
|
+
|
482
|
+
file_out = self.file_;
|
483
|
+
date_out = self.Date;
|
484
|
+
ExposureTime_out = self.ExposureTime;
|
485
|
+
|
486
|
+
return file_out, date_out, ExposureTime_out
|
487
|
+
|
488
|
+
|
489
|
+
|
490
|
+
def delete_value_extrem(data_in):
|
491
|
+
|
492
|
+
r_len = len(data_in);
|
493
|
+
window = 10;
|
494
|
+
_w = data_in[:,1]; _h = data_in[:,2];#_area = data_in[:,5];
|
495
|
+
mask = numpy.ones((r_len), dtype=bool);
|
496
|
+
|
497
|
+
|
498
|
+
for i_data in range(window, r_len):
|
499
|
+
|
500
|
+
w = data_in[i_data][1];
|
501
|
+
h = data_in[i_data][2];
|
502
|
+
|
503
|
+
avg_w = numpy.mean(_w[i_data-window:i_data]);
|
504
|
+
avg_h = numpy.mean(_h[i_data-window:i_data]);
|
505
|
+
|
506
|
+
std_w = numpy.std(_w[i_data-window:i_data]);
|
507
|
+
std_h = numpy.std(_h[i_data-window:i_data]);
|
508
|
+
|
509
|
+
if w > (avg_w + 2*std_w) or w < (avg_w - 2*std_w) or h > (avg_h + 2*std_h) or h < (avg_h - 2*std_h):
|
510
|
+
mask[i_data] = False;
|
511
|
+
|
512
|
+
data_out = numpy.zeros(shape=(len(data_in[0]), mask.sum()))
|
513
|
+
data_out[0,:] = data_in[mask,0];
|
514
|
+
data_out[1,:] = data_in[mask,1];
|
515
|
+
data_out[2,:] = data_in[mask,2];
|
516
|
+
data_out[3,:] = data_in[mask,3];
|
517
|
+
data_out[4,:] = data_in[mask,4];
|
518
|
+
data_out[5,:] = data_in[mask,5];
|
519
|
+
data_out[6,:] = data_in[mask,6];
|
520
|
+
data_out[7,:] = data_in[mask,7];
|
521
|
+
data_out[8,:] = data_in[mask,8];
|
522
|
+
|
523
|
+
return numpy.transpose(data_out)
|
524
|
+
|
525
|
+
|
526
|
+
def plot_data(data_abs,data_1, data_2, data_3, data_4, data_5, coef_pol_w_1, coef_pol_h_1, coef_pol_area_1, coef_pol_conc_1, name_file):
|
527
|
+
|
528
|
+
|
529
|
+
|
530
|
+
plt.clf()
|
531
|
+
x_adj_1 = numpy.linspace(min(data_abs), max(data_abs), len(data_abs))
|
532
|
+
y_w_adj_1 = numpy.polyval(coef_pol_w_1, x_adj_1)
|
533
|
+
y_h_adj_1 = numpy.polyval(coef_pol_h_1, x_adj_1)
|
534
|
+
y_area_adj_1 = numpy.polyval(coef_pol_area_1, x_adj_1)
|
535
|
+
y_conc_adj_1 = numpy.polyval(coef_pol_conc_1, x_adj_1)
|
536
|
+
|
537
|
+
|
538
|
+
|
539
|
+
|
540
|
+
fig, (ax2, ax1) = plt.subplots(2);
|
541
|
+
|
542
|
+
ax1.set_xlabel('Time (s)')
|
543
|
+
ax1.set_ylabel('Semi-axes (mm)')
|
544
|
+
ax1.grid(True)
|
545
|
+
|
546
|
+
ax1.plot(data_abs, data_1,"bo", label='width');# concentration edge 1 plot
|
547
|
+
ax1.plot(data_abs,data_2, "ro", label='height'); # concentration edge 2 plot
|
548
|
+
ax1.plot(x_adj_1, y_w_adj_1, color='darkmagenta', label='Adjusted polynomio w');
|
549
|
+
ax1.plot(x_adj_1, y_h_adj_1, color='g', label='Adjusted polynomio h');
|
550
|
+
|
551
|
+
ax1_conc = ax1.twinx()
|
552
|
+
ax1_conc.set_ylabel("Relative Concentration (%)")
|
553
|
+
# Exemplo fictício de curva de concentração proporcional à área (você pode mudar isso):
|
554
|
+
ax1_conc.plot(data_abs, data_4, "go", label="concentration") # concentration edge plot
|
555
|
+
ax1_conc.plot(x_adj_1, y_conc_adj_1, color='m', label='Adjusted polynomio C');
|
556
|
+
# ax2_conc.legend(loc="upper right")
|
557
|
+
|
558
|
+
|
559
|
+
ax1.legend(loc="upper right")
|
560
|
+
|
561
|
+
ax2.set_xlabel('Time (s)')
|
562
|
+
ax2.set_ylabel('Surface ($mm^2$)')
|
563
|
+
ax2.grid(True)
|
564
|
+
|
565
|
+
|
566
|
+
ax2.plot(data_abs,data_3, "mo", label='Surface'); # concentration area plot
|
567
|
+
ax2.plot(x_adj_1, y_area_adj_1, color='k', label='Adjusted polynomio S');
|
568
|
+
# ax2.legend(loc="upper left")
|
569
|
+
|
570
|
+
ax2_vol = ax2.twinx()
|
571
|
+
ax2_vol.set_ylabel("Volume (\u03bcL)")
|
572
|
+
# Exemplo fictício de curva de concentração proporcional à área (você pode mudar isso):
|
573
|
+
ax2_vol.plot(data_abs, data_5, "o", color="gray", label="volume") # concentration edge plot
|
574
|
+
|
575
|
+
|
576
|
+
handles1, labels1 = ax2.get_legend_handles_labels()
|
577
|
+
handles2, labels2 = ax2_vol.get_legend_handles_labels()
|
578
|
+
all_handles = handles1 + handles2
|
579
|
+
all_labels = labels1 + labels2
|
580
|
+
|
581
|
+
ax2.legend(all_handles, all_labels, loc="best")
|
582
|
+
|
583
|
+
|
584
|
+
|
585
|
+
# plt.legend()
|
586
|
+
plt.tight_layout()
|
587
|
+
#plt.show()
|
588
|
+
|
589
|
+
plt.savefig(name_file)
|
590
|
+
|
591
|
+
|
592
|
+
def plot_data_adjus(data_abs, coef_pol_w_1, coef_pol_h_1, coef_pol_area_1, name_file):
|
593
|
+
|
594
|
+
|
595
|
+
plt.clf()
|
596
|
+
x_adj_1 = numpy.linspace(min(data_abs), max(data_abs), len(data_abs))
|
597
|
+
y_w_adj_1 = numpy.polyval(coef_pol_w_1, x_adj_1)
|
598
|
+
y_h_adj_1 = numpy.polyval(coef_pol_h_1, x_adj_1)
|
599
|
+
y_area_adj_1 = numpy.polyval(coef_pol_area_1, x_adj_1)
|
600
|
+
|
601
|
+
|
602
|
+
fig, (ax2, ax1) = plt.subplots(2);
|
603
|
+
|
604
|
+
ax1.set_xlabel('Time (s)')
|
605
|
+
ax1.set_ylabel('Semi-axes (mm)')
|
606
|
+
ax1.grid(True)
|
607
|
+
|
608
|
+
|
609
|
+
ax1.plot(x_adj_1, y_w_adj_1, color='darkmagenta', label='Adjusted polynomio w1');
|
610
|
+
ax1.plot(x_adj_1, y_h_adj_1, color='g', label='Adjusted polynomio h1');
|
611
|
+
|
612
|
+
ax1.legend(loc="upper right")
|
613
|
+
|
614
|
+
ax2.set_xlabel('Time (s)')
|
615
|
+
ax2.set_ylabel('Surface ($mm^2$)')
|
616
|
+
ax2.grid(True)
|
617
|
+
|
618
|
+
ax2.plot(x_adj_1, y_area_adj_1, color='k', label='Adjusted polynomio area 1');
|
619
|
+
|
620
|
+
ax2.legend(loc="upper right")
|
621
|
+
|
622
|
+
|
623
|
+
|
624
|
+
plt.legend()
|
625
|
+
plt.show()
|
626
|
+
|
627
|
+
plt.savefig(name_file)
|
628
|
+
|
629
|
+
|
630
|
+
def concatene_files_scat_back(set_file_1, set_file_2):
|
631
|
+
|
632
|
+
list_scat_back = numpy.empty((len(set_file_1.info_files_edf) , 2), dtype=object) ;
|
633
|
+
list_back_size_avg_drop = numpy.empty((len(set_file_2.info_files_edf) , 2), dtype=object) ;
|
634
|
+
|
635
|
+
|
636
|
+
i_file = 0; factor = 0.15;
|
637
|
+
temp_info_files_edf = [];
|
638
|
+
for _date_1 in set_file_1.info_files_edf:
|
639
|
+
find_back_drop = False;
|
640
|
+
for _date_2 in set_file_2.info_files_edf:
|
641
|
+
if (abs(_date_1['area_small'] - _date_2['area_small']) < (_date_1['area_small'] * factor)) and (abs(_date_1['area_big'] - _date_2['area_big']) < (_date_1['area_big'] * factor) ):
|
642
|
+
list_scat_back[i_file,0] = _date_1['file'];
|
643
|
+
list_scat_back[i_file,1] = _date_2['file'];
|
644
|
+
find_back_drop = True;
|
645
|
+
break;
|
646
|
+
if not find_back_drop:
|
647
|
+
temp_info_files_edf.append(set_file_1.info_files_edf[i_file]);
|
648
|
+
list_scat_back[i_file,0] = _date_1['file'];
|
649
|
+
i_file = i_file + 1;
|
650
|
+
|
651
|
+
|
652
|
+
i_file = 0;
|
653
|
+
for _date_2 in set_file_2.info_files_edf:
|
654
|
+
list_back_size_avg_drop[i_file,0] = _date_2['file'];
|
655
|
+
list_back_size_avg_drop[i_file,1] = (_date_2['area_big'] + _date_2['area_small']) / 2. ;
|
656
|
+
i_file = i_file + 1;
|
657
|
+
|
658
|
+
|
659
|
+
factor = 0.15;
|
660
|
+
max_area = max(_date_1["area_big"] for _date_1 in temp_info_files_edf)
|
661
|
+
temp_name_file = numpy.array(list_scat_back[:,0]);
|
662
|
+
for _date_1 in temp_info_files_edf:
|
663
|
+
area_avg_1 = (_date_1['area_big'] + _date_1['area_small']) / 2. ;
|
664
|
+
|
665
|
+
min_diff = max_area;
|
666
|
+
|
667
|
+
for i_file_back in range(0, len(list_back_size_avg_drop)):
|
668
|
+
if abs(area_avg_1 - float(list_back_size_avg_drop[i_file_back,1])) <= (min_diff):
|
669
|
+
min_diff = abs(area_avg_1 - float(list_back_size_avg_drop[i_file_back,1] ) );
|
670
|
+
result = numpy.where(temp_name_file == _date_1['file']);
|
671
|
+
i_file= result[0].tolist()
|
672
|
+
list_scat_back[i_file,1] = list_back_size_avg_drop[i_file_back,0]
|
673
|
+
|
674
|
+
|
675
|
+
i_files = 0;
|
676
|
+
path_ = Path('data/');
|
677
|
+
path_ = Path(path_);
|
678
|
+
path_ = path_.resolve();
|
679
|
+
path_ = os.path.normpath(path_);
|
680
|
+
os.makedirs(os.path.dirname(path_), exist_ok=True)
|
681
|
+
|
682
|
+
with open(os.path.join(path_, 'FINAL_data_scat_back.lis'), 'w') as w_file:
|
683
|
+
w_file.write('FAKELD.RAD\n');
|
684
|
+
pos = list_scat_back[i_files,0].find('_0_');
|
685
|
+
str_ = list_scat_back[i_files,0];
|
686
|
+
str_ = str_[0:pos];
|
687
|
+
str_ = str_ +'_0_00002.RDN';
|
688
|
+
w_file.write(str_ + '\n');
|
689
|
+
|
690
|
+
for row in range(0, len(list_scat_back)):
|
691
|
+
if list_scat_back[i_files,0] is not None:
|
692
|
+
w_file.write('1\n');
|
693
|
+
list_scat_back[i_files,0] = list_scat_back[i_files,0].replace('.edf', '.RAD')
|
694
|
+
list_scat_back[i_files,1] = list_scat_back[i_files,1].replace('.edf', '.RAD')
|
695
|
+
str_ = f"{list_scat_back[i_files,0]}"
|
696
|
+
w_file.write(str_ + '\n');
|
697
|
+
str_ = f"{list_scat_back[i_files,1]}"
|
698
|
+
w_file.write(str_ + '\n');
|
699
|
+
w_file.write('1.00000000\n');
|
700
|
+
list_scat_back[i_files,0] = list_scat_back[i_files,0].replace('.RAD', '.RDS');
|
701
|
+
str_ = f"{list_scat_back[i_files,0]}"
|
702
|
+
w_file.write(str_ + '\n');
|
703
|
+
i_files = i_files + 1;
|
704
|
+
|
705
|
+
def calcule_concentration(edge_1, edge_2, Co, Vo):
|
706
|
+
|
707
|
+
|
708
|
+
Vol_drop = calcule_vol_spheroide(edge_1, edge_2) / 1000.
|
709
|
+
|
710
|
+
return (Co * Vo) / Vol_drop;
|
711
|
+
|
712
|
+
|
713
|
+
def calcule_vol_spheroide(edge_1, edge_2):
|
714
|
+
|
715
|
+
|
716
|
+
if edge_1 == edge_2: # sphere
|
717
|
+
|
718
|
+
return (4 / 3) * np.pi * edge_1**3
|
719
|
+
|
720
|
+
else : # oblate and prolate spheroid
|
721
|
+
|
722
|
+
|
723
|
+
return (4 / 3) * np.pi * (edge_1**2) * edge_2
|
724
|
+
|
725
|
+
|
726
|
+
|
727
|
+
|
728
|
+
def choose_funtion():
|
729
|
+
|
730
|
+
options = {
|
731
|
+
1: "Option 1: Analysis droplet parameters",
|
732
|
+
2: "Option 2: Sort edf files by time",
|
733
|
+
3: "Option 3: Sort edf files by size drop",
|
734
|
+
4: "Option 4: Concatene edf samples and background files"
|
735
|
+
}
|
736
|
+
|
737
|
+
print("\nChoose one of the following options:\n")
|
738
|
+
for key, description in options.items():
|
739
|
+
print(f"{key}: {description}")
|
740
|
+
|
741
|
+
while True:
|
742
|
+
try:
|
743
|
+
choice = int(input("\nEnter the number of your choice (1-4): "))
|
744
|
+
if choice in options:
|
745
|
+
print(f"\nYou chose, {options[choice]}")
|
746
|
+
return choice
|
747
|
+
else:
|
748
|
+
print("Invalid choice. Please enter a number between 1 and 4.")
|
749
|
+
except ValueError:
|
750
|
+
print("Invalid input. Please enter a number.")
|
751
|
+
|
752
|
+
|
753
|
+
def cv_imshow_safe(img_name, img):
|
754
|
+
"""Mostra imagem se HighGUI estiver disponível; caso contrário, ignora."""
|
755
|
+
try:
|
756
|
+
cv2.imshow(img_name, img)
|
757
|
+
cv2.waitKey(1)
|
758
|
+
except cv2.error:
|
759
|
+
pass # headless: sem suporte a janela
|
760
|
+
|
761
|
+
def cv_destroy_all_windows_safe():
|
762
|
+
"""Fecha janelas do OpenCV se suportado; caso contrário, ignora."""
|
763
|
+
try:
|
764
|
+
cv2.destroyAllWindows()
|
765
|
+
except cv2.error:
|
766
|
+
pass # headless
|
767
|
+
|
768
|
+
|
769
|
+
def get_dir_paths( **kwargs):
|
770
|
+
|
771
|
+
# Create a hidden Tkinter window
|
772
|
+
root = tk.Tk()
|
773
|
+
root.withdraw() # Hide the main window
|
774
|
+
|
775
|
+
# Prompt the user to choose the directory
|
776
|
+
print('Choose Directory with EDF Images');
|
777
|
+
directory = filedialog.askdirectory(title="\nChoose the directory to read edf files: ")
|
778
|
+
|
779
|
+
while not directory:
|
780
|
+
print("\nNo directory selected. Try again.")
|
781
|
+
directory = filedialog.askdirectory(title="\nChoose the directory to read edf files: ")
|
782
|
+
|
783
|
+
directory = Path(directory);
|
784
|
+
directory = os.path.normpath(directory);
|
785
|
+
|
786
|
+
get_file_path = False;
|
787
|
+
file_path= '';
|
788
|
+
if 'get_file_path' in kwargs:
|
789
|
+
get_file_path = kwargs['get_file_path'];
|
790
|
+
if get_file_path:
|
791
|
+
print('Choose file with polynomio data');
|
792
|
+
file_path = filedialog.askopenfilename(title="\nSelect a file with polynomio data: ")
|
793
|
+
while not file_path:
|
794
|
+
print("\nNo file name provided. Try again.")
|
795
|
+
file_path = filedialog.askopenfilename(title="\nSelect a file with polynomio data: ")
|
796
|
+
file_path = Path(file_path);
|
797
|
+
file_path = file_path.resolve();
|
798
|
+
file_path = os.path.normpath(file_path);
|
799
|
+
|
800
|
+
get_file_root = False;
|
801
|
+
root_file= '';
|
802
|
+
if 'get_file_root' in kwargs:
|
803
|
+
get_file_root = kwargs['get_file_root'];
|
804
|
+
if get_file_root:
|
805
|
+
root_file = str(input("\nType root output file name: "))
|
806
|
+
root_file = root_file.replace('.', '')
|
807
|
+
while not root_file:
|
808
|
+
print("\n No file name provided. Try again.")
|
809
|
+
root_file = str(input("\nType root output file name: "))
|
810
|
+
root_file = root_file.replace('.', '')
|
811
|
+
|
812
|
+
return directory, root_file, file_path
|
813
|
+
|
814
|
+
|
815
|
+
|
816
|
+
def get_info_video():
|
817
|
+
|
818
|
+
|
819
|
+
questions = [
|
820
|
+
"Type the interval between frames to get the drop size: ",
|
821
|
+
"Type the START pixel value, in the image in X, to select the drop region: ",
|
822
|
+
"Type the END pixel value, in the image in X, to select the drop region: ",
|
823
|
+
"Type the START pixel value, in the image in Y, to select the drop region: ",
|
824
|
+
"Type the END pixel value, in the image in Y, to select the drop region: ",
|
825
|
+
"Type the value of pixel by millimeters: ",
|
826
|
+
"Type the maximum video analysis time (s): "
|
827
|
+
]
|
828
|
+
|
829
|
+
# Dictionary to store the answers
|
830
|
+
answers = {}
|
831
|
+
answers_out = numpy.zeros(shape=(7), dtype = int );
|
832
|
+
|
833
|
+
print("\n Answer the following questions with integers greater than 0:")
|
834
|
+
|
835
|
+
for i, question in enumerate(questions):
|
836
|
+
while True:
|
837
|
+
try:
|
838
|
+
# Request user response
|
839
|
+
answer = int(input(f"\n{i + 1}. {question} "))
|
840
|
+
|
841
|
+
# Validates if the response is an integer greater than 0
|
842
|
+
if answer > 0:
|
843
|
+
answers[question] = answer;
|
844
|
+
answers_out[i] = answer;
|
845
|
+
break
|
846
|
+
else:
|
847
|
+
print("\nThe answer must be an integer greater than 0. Please try again.")
|
848
|
+
except ValueError:
|
849
|
+
print("\nInvalid input. Please enter an integer.")
|
850
|
+
|
851
|
+
return answers_out
|
852
|
+
|
853
|
+
def get_video_file():
|
854
|
+
|
855
|
+
file_extension = input("\nType the video file format (.avi .mp4 .mov .mkv .wmv .flv .mpg .mpeg .3gp .ogv .webm): ").strip()
|
856
|
+
|
857
|
+
if not file_extension.startswith('.'):
|
858
|
+
file_extension = '.' + file_extension
|
859
|
+
|
860
|
+
# Get the list of files in the current directory
|
861
|
+
current_directory = os.getcwd()
|
862
|
+
files_in_directory = os.listdir(current_directory)
|
863
|
+
|
864
|
+
# Filter files by the given extension
|
865
|
+
filtered_files = [file for file in files_in_directory if file.endswith(file_extension)]
|
866
|
+
|
867
|
+
# Creates a dictionary where the key is a number and the value is the file name
|
868
|
+
files_dict = {i+1: file for i, file in enumerate(filtered_files)}
|
869
|
+
|
870
|
+
# Print the list of filtered files with numbers
|
871
|
+
if files_dict:
|
872
|
+
print(f"\nFiles with the extension '{file_extension}' in the directory'{current_directory}'\n:")
|
873
|
+
for number, file in files_dict.items():
|
874
|
+
print(f"{number}: {file}")
|
875
|
+
|
876
|
+
# Prompt the user to choose a file
|
877
|
+
while True:
|
878
|
+
try:
|
879
|
+
choice = int(input("\nEnter the desired file number: "))
|
880
|
+
if choice in files_dict:
|
881
|
+
print(f"Você escolheu o arquivo: {files_dict[choice]}")
|
882
|
+
return files_dict[choice]
|
883
|
+
else:
|
884
|
+
print("\nInvalid number. Please enter a number from the list.")
|
885
|
+
except ValueError:
|
886
|
+
print("\nInvalid input. Please enter a number.")
|
887
|
+
else:
|
888
|
+
print(f"\nThere are no files with the extension '{file_extension}' in the directory '{current_directory}'.")
|
889
|
+
|
890
|
+
|
891
|
+
class conc_scat_video:
|
892
|
+
|
893
|
+
def __init__(self, option, **kwargs):
|
894
|
+
|
895
|
+
if 'file_in' in kwargs:
|
896
|
+
file_in = kwargs['file_in'];
|
897
|
+
|
898
|
+
if option == 1:
|
899
|
+
|
900
|
+
f_open = open(file_in, 'r');
|
901
|
+
text = f_open.readlines();
|
902
|
+
|
903
|
+
for line in text:
|
904
|
+
if line.find('file_video_1:') != -1:
|
905
|
+
file_video = str(line[line.index(':')+1:line.index('.')+5]);
|
906
|
+
self.file_video = file_video.translate({ord(c): None for c in string.whitespace})
|
907
|
+
self.name_file = os.path.basename(self.file_video);
|
908
|
+
self.name_file = self.name_file[0:self.name_file.index('.')];
|
909
|
+
self.root_file = self.name_file;
|
910
|
+
if line.find('Co_1:') != -1:
|
911
|
+
self.Co = (float(str(line[line.index(':')+1:])));
|
912
|
+
if line.find('step_1:') != -1:
|
913
|
+
self.step = round(float(str(line[line.index(':')+1:])));
|
914
|
+
if line.find('start pixel x_1:') != -1:
|
915
|
+
self.start_x = round(float(str(line[line.index(':')+1:])));
|
916
|
+
if line.find('end pixel x_1:') != -1:
|
917
|
+
self.end_x = round(float(str(line[line.index(':')+1:])));
|
918
|
+
if line.find('start pixel y_1:') != -1:
|
919
|
+
self.start_y = round(float(str(line[line.index(':')+1:])));
|
920
|
+
if line.find('end pixel y_1:') != -1:
|
921
|
+
self.end_y = round(float(str(line[line.index(':')+1:])));
|
922
|
+
if line.find('pixel/mm_1:') != -1:
|
923
|
+
self.px_mm = (float(str(line[line.index(':')+1:])));
|
924
|
+
self.px_mm_inv = 1. / self.px_mm
|
925
|
+
if line.find('directory_path_1:') != -1:
|
926
|
+
self.path = str(line[line.index(':')+1:]);
|
927
|
+
self.path = self.path.translate({ord(c): None for c in string.whitespace})
|
928
|
+
self.path = Path(self.path);
|
929
|
+
self.path = self.path.resolve();
|
930
|
+
self.path = os.path.normpath(self.path);
|
931
|
+
if line.find('time_limit_1:') != -1:
|
932
|
+
self.time_limit = float(str(line[line.index(':')+1:]));
|
933
|
+
if line.find('print_pdf_1:') != -1:
|
934
|
+
temp = str(line[line.index(':')+1:])
|
935
|
+
temp = temp.strip()
|
936
|
+
temp = temp.lower()
|
937
|
+
if temp == "y" or temp == "yes" :
|
938
|
+
self.print_pdf = True
|
939
|
+
else:
|
940
|
+
self.print_pdf = False
|
941
|
+
f_open.close();
|
942
|
+
|
943
|
+
elif option == 2:
|
944
|
+
|
945
|
+
f_open = open(file_in, 'r');
|
946
|
+
text = f_open.readlines();
|
947
|
+
|
948
|
+
for line in text:
|
949
|
+
if line.find('file_video_2:') != -1:
|
950
|
+
file_video = str(line[line.index(':')+1:line.index('.')+5]);
|
951
|
+
self.file_video = file_video.translate({ord(c): None for c in string.whitespace})
|
952
|
+
self.name_file = os.path.basename(self.file_video);
|
953
|
+
self.name_file = self.name_file[0:self.name_file.index('.')];
|
954
|
+
self.root_file = self.name_file;
|
955
|
+
if line.find('Co_2:') != -1:
|
956
|
+
self.Co = (float(str(line[line.index(':')+1:])));
|
957
|
+
if line.find('step_2:') != -1:
|
958
|
+
self.step = round(float(str(line[line.index(':')+1:])));
|
959
|
+
if line.find('start pixel x_2:') != -1:
|
960
|
+
self.start_x = round(float(str(line[line.index(':')+1:])));
|
961
|
+
if line.find('end pixel x_2:') != -1:
|
962
|
+
self.end_x = round(float(str(line[line.index(':')+1:])));
|
963
|
+
if line.find('start pixel y_2:') != -1:
|
964
|
+
self.start_y = round(float(str(line[line.index(':')+1:])));
|
965
|
+
if line.find('end pixel y_2:') != -1:
|
966
|
+
self.end_y = round(float(str(line[line.index(':')+1:])));
|
967
|
+
if line.find('pixel/mm_2:') != -1:
|
968
|
+
self.px_mm = (float(str(line[line.index(':')+1:])));
|
969
|
+
self.px_mm_inv = 1. / self.px_mm
|
970
|
+
if line.find('directory_path_2:') != -1:
|
971
|
+
self.path = str(line[line.index(':')+1:]);
|
972
|
+
self.path = self.path.translate({ord(c): None for c in string.whitespace})
|
973
|
+
self.path = Path(self.path);
|
974
|
+
self.path = self.path.resolve();
|
975
|
+
self.path = os.path.normpath(self.path);
|
976
|
+
if line.find('time_limit_2:') != -1:
|
977
|
+
self.time_limit = float(str(line[line.index(':')+1:]));
|
978
|
+
if line.find('print_pdf_2:') != -1:
|
979
|
+
temp = str(line[line.index(':')+1:])
|
980
|
+
temp = temp.strip()
|
981
|
+
temp = temp.lower()
|
982
|
+
if temp == "y" or temp == "yes" :
|
983
|
+
self.print_pdf = True
|
984
|
+
else:
|
985
|
+
self.print_pdf = False
|
986
|
+
|
987
|
+
f_open.close();
|
988
|
+
if hasattr(self, 'file_video') and self.file_video is not None:
|
989
|
+
if not os.path.exists(self.file_video):
|
990
|
+
print("ideo File 2 not found or not loaded:", self.file_video)
|
991
|
+
exit(0);
|
992
|
+
else:
|
993
|
+
print("Video File 2 not found or not loaded.")
|
994
|
+
exit(0);
|
995
|
+
|
996
|
+
elif option == 3:
|
997
|
+
|
998
|
+
if 'file_video' in kwargs:
|
999
|
+
self.file_video = kwargs['file_video'];
|
1000
|
+
self.name_file = os.path.basename(self.file_video);
|
1001
|
+
self.name_file = self.name_file[0:self.name_file.index('.')];
|
1002
|
+
if 'Co' in kwargs:
|
1003
|
+
self.Co = float(kwargs['Co']);
|
1004
|
+
if 'px_mm' in kwargs:
|
1005
|
+
self.px_mm = float(kwargs['px_mm']);
|
1006
|
+
self.px_mm_inv = 1. / self.px_mm
|
1007
|
+
if 'step' in kwargs:
|
1008
|
+
self.step = kwargs['step'];
|
1009
|
+
if 'time_limit' in kwargs:
|
1010
|
+
self.time_limit = kwargs['time_limit'];
|
1011
|
+
if 'retangulo' in kwargs:
|
1012
|
+
ret = kwargs['retangulo'];
|
1013
|
+
self.start_x = ret[0];
|
1014
|
+
self.end_x = ret[1];
|
1015
|
+
self.start_y = ret[2];
|
1016
|
+
self.end_y= ret[3];
|
1017
|
+
if 'print_pdf' in kwargs:
|
1018
|
+
self.print_pdf = kwargs['print_pdf'];
|
1019
|
+
else:
|
1020
|
+
self.print_pdf = False;
|
1021
|
+
|
1022
|
+
elif option == 4 or option == 5:
|
1023
|
+
if 'Co' in kwargs:
|
1024
|
+
self.Co = float(kwargs['Co']);
|
1025
|
+
if 'path' in kwargs:
|
1026
|
+
self.path = Path(kwargs['path']);
|
1027
|
+
self.path = self.path.resolve();
|
1028
|
+
self.path = os.path.normpath(self.path);
|
1029
|
+
if 'root_name' in kwargs:
|
1030
|
+
self.root_file = kwargs['root_name'];
|
1031
|
+
if 'input_file' in kwargs and option == 5:
|
1032
|
+
input_file = kwargs['input_file'];
|
1033
|
+
self.coef_pol_w, self.coef_pol_h, self.coef_pol_area, self.coef_pol_conc, _ = read_file_video(input_file);
|
1034
|
+
|
1035
|
+
|
1036
|
+
|
1037
|
+
def print_frames_pdf(self, path_dir_imgs, file_data_imgs):
|
1038
|
+
|
1039
|
+
|
1040
|
+
|
1041
|
+
|
1042
|
+
|
1043
|
+
with open(file_data_imgs, mode='r') as file_data:
|
1044
|
+
lines = file_data.readlines();
|
1045
|
+
list_data_imgs= [["" for _ in range(2)] for _ in range(len(lines))]
|
1046
|
+
#list_data_imgs[len(rows),2];
|
1047
|
+
#next(_read) # Ignorar o cabeçalho
|
1048
|
+
i_row = 0;
|
1049
|
+
for row in lines:
|
1050
|
+
index = row.find(' ');
|
1051
|
+
list_data_imgs[i_row][0] = row[0:index];
|
1052
|
+
list_data_imgs[i_row][1] = row[index+1:];
|
1053
|
+
i_row = i_row + 1;
|
1054
|
+
|
1055
|
+
n_col = 4;
|
1056
|
+
n_row = 5;
|
1057
|
+
n_pages = 5;
|
1058
|
+
|
1059
|
+
|
1060
|
+
pdf_path = self.name_file+"_resume_imgs.pdf";
|
1061
|
+
# print(pdf_path)
|
1062
|
+
pdf_path = os.path.join(path_dir_imgs, pdf_path);
|
1063
|
+
# Create pdf file
|
1064
|
+
#pdf = SimpleDocTemplate(path_dir_imgs+"resume_imgs.pdf", pagesize=A4)
|
1065
|
+
pdf = canvas.Canvas(pdf_path, pagesize=A4)
|
1066
|
+
page_width, page_height = A4
|
1067
|
+
|
1068
|
+
margin = 30
|
1069
|
+
available_width = page_width - 2 * margin
|
1070
|
+
available_height = page_height - 2 * margin
|
1071
|
+
cell_width = available_width / 4
|
1072
|
+
cell_height = available_height / 5
|
1073
|
+
|
1074
|
+
|
1075
|
+
# Iterate over the images and add them to the PDF
|
1076
|
+
num_imagens = len(list_data_imgs)
|
1077
|
+
num_paginas = (num_imagens + 19) // 20 # 20 imagens por página (4x5)
|
1078
|
+
|
1079
|
+
for pagina in range(num_paginas):
|
1080
|
+
|
1081
|
+
# Add a new page
|
1082
|
+
pdf.showPage()
|
1083
|
+
|
1084
|
+
# Calcular posição inicial da tabela
|
1085
|
+
y_inicio = page_height - margin
|
1086
|
+
x_inicio = margin
|
1087
|
+
|
1088
|
+
# Draw the table (4 columns x 5 rows)
|
1089
|
+
for linha in range(5):
|
1090
|
+
for coluna in range(4):
|
1091
|
+
indice_imagem = pagina * 20 + linha * 4 + coluna
|
1092
|
+
if indice_imagem < num_imagens:
|
1093
|
+
x = x_inicio + coluna * cell_width
|
1094
|
+
y = y_inicio - linha * cell_height - cell_height
|
1095
|
+
caminho_imagem = list_data_imgs[indice_imagem][0]
|
1096
|
+
imagem = Image.open(caminho_imagem)
|
1097
|
+
largura_imagem, altura_imagem = imagem.size
|
1098
|
+
proporcao = largura_imagem / altura_imagem
|
1099
|
+
largura_final = cell_width
|
1100
|
+
altura_final = largura_final / proporcao
|
1101
|
+
if altura_final > cell_height:
|
1102
|
+
altura_final = cell_height
|
1103
|
+
largura_final = altura_final * proporcao
|
1104
|
+
pdf.drawImage(caminho_imagem, x, y, largura_final, altura_final)
|
1105
|
+
try:
|
1106
|
+
del imagem
|
1107
|
+
os.remove(caminho_imagem)
|
1108
|
+
except FileNotFoundError:
|
1109
|
+
print(f"O arquivo não foi encontrado: {caminho_imagem}")
|
1110
|
+
|
1111
|
+
pdf.save()
|
1112
|
+
|
1113
|
+
|
1114
|
+
def read_setfiles_edf(self, **kwargs):
|
1115
|
+
|
1116
|
+
path_dir_imgs = Path('data/');
|
1117
|
+
path_dir_imgs = path_dir_imgs.resolve();
|
1118
|
+
path_dir_imgs = os.path.normpath(path_dir_imgs);
|
1119
|
+
|
1120
|
+
try :
|
1121
|
+
os.makedirs(path_dir_imgs);
|
1122
|
+
except :
|
1123
|
+
pass;
|
1124
|
+
|
1125
|
+
if 'option' in kwargs:
|
1126
|
+
option = kwargs['option'];
|
1127
|
+
else:
|
1128
|
+
option = 2;
|
1129
|
+
|
1130
|
+
if option == 0:
|
1131
|
+
name_file = self.root_file+'_EDF_data_Size_Drop.csv'
|
1132
|
+
elif option == 1:
|
1133
|
+
name_file = self.root_file+'_EDF_data_Time.csv'
|
1134
|
+
|
1135
|
+
_files = os.listdir(self.path);
|
1136
|
+
format_time = '%Y-%m-%d %H:%M:%S'
|
1137
|
+
|
1138
|
+
# Filter only files
|
1139
|
+
self._files_edf = [item for item in _files if os.path.isfile(os.path.join(self.path, item)) and item.lower().endswith('.edf')]
|
1140
|
+
|
1141
|
+
|
1142
|
+
|
1143
|
+
self.info_files_edf = [];
|
1144
|
+
info_files_edf = [];
|
1145
|
+
|
1146
|
+
i_file = 0;
|
1147
|
+
for _file in self._files_edf:
|
1148
|
+
|
1149
|
+
edf_img = EdfImage().read(os.path.join(self.path, _file))
|
1150
|
+
header = edf_img.header;
|
1151
|
+
|
1152
|
+
data = {'file': _file, 'date': datetime.strptime(header['Date'],format_time), 'time': header['ExposureTime'], 'start_time': 0 , 'end_time': 0, 'area_small': 0 ,
|
1153
|
+
'area_big': 0, 'area':0, 'concentration':0, 'dropDX': 0, 'dropDY': 0}
|
1154
|
+
info_files_edf.append(data)
|
1155
|
+
|
1156
|
+
self.info_files_edf = sorted(info_files_edf, key=lambda x: x['date'])
|
1157
|
+
|
1158
|
+
|
1159
|
+
date_time_0 = self.info_files_edf[0]['date'];
|
1160
|
+
for _date in self.info_files_edf:
|
1161
|
+
diff = _date['date'] - date_time_0;
|
1162
|
+
_date['start_time'] = diff.total_seconds();
|
1163
|
+
_date['end_time'] = diff.total_seconds() + float(_date['time']);
|
1164
|
+
if option == 1:
|
1165
|
+
_date['area_small'] = 0;
|
1166
|
+
_date['area_big'] = 0;
|
1167
|
+
_date['dropDX'] = 0.;
|
1168
|
+
_date['dropDY'] = 0;
|
1169
|
+
else:
|
1170
|
+
_date['area_small'] = numpy.polyval(self.coef_pol_area, _date['start_time']);
|
1171
|
+
_date['area_big'] = numpy.polyval(self.coef_pol_area, _date['end_time']);
|
1172
|
+
_date['area'] = abs(_date['area_big'] + _date['area_small'])/ 2.;
|
1173
|
+
_date['concentration'] = abs(numpy.polyval(self.coef_pol_conc, _date['start_time']) + numpy.polyval(self.coef_pol_conc, _date['end_time']) )/ 2.;
|
1174
|
+
_date['dropDX'] = abs(numpy.polyval(self.coef_pol_w, _date['start_time']) + numpy.polyval(self.coef_pol_w, _date['end_time']) )/ 2.;
|
1175
|
+
_date['dropDY'] = abs(numpy.polyval(self.coef_pol_h, _date['start_time']) + numpy.polyval(self.coef_pol_h, _date['end_time'])) / 2.;
|
1176
|
+
|
1177
|
+
if option == 0 or option == 1:
|
1178
|
+
save_data_edf(self.info_files_edf, os.path.join(path_dir_imgs, name_file), option);
|
1179
|
+
|
1180
|
+
|
1181
|
+
|
1182
|
+
def read_video(self):
|
1183
|
+
|
1184
|
+
|
1185
|
+
# self.video_c = os.path.getctime(self.file_video);
|
1186
|
+
|
1187
|
+
self.video_m = os.path.getmtime(self.file_video);
|
1188
|
+
|
1189
|
+
# read video
|
1190
|
+
video = cv2.VideoCapture(self.file_video)
|
1191
|
+
#AVI (.avi) MP4 (.mp4)MOV (.mov) MKV (.mkv) WMV (.wmv) FLV (.flv) MPEG (.mpg, .mpeg) 3GP (.3gp) OGG (.ogv) WEBM (.webm)
|
1192
|
+
|
1193
|
+
if video.isOpened():
|
1194
|
+
rval , frame = video.read()
|
1195
|
+
else:
|
1196
|
+
rval = False
|
1197
|
+
|
1198
|
+
# get Information about video
|
1199
|
+
fps = video.get(cv2.CAP_PROP_FPS);
|
1200
|
+
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
1201
|
+
max_frames = total_frames;
|
1202
|
+
|
1203
|
+
size_data = int(total_frames / self.step) + 1;
|
1204
|
+
if self.time_limit != 0:
|
1205
|
+
size_data = int(self.time_limit*fps / self.step) + 1;
|
1206
|
+
else:
|
1207
|
+
self.time_limit = total_frames / fps;
|
1208
|
+
|
1209
|
+
# set data matrix store data
|
1210
|
+
new_size_data = size_data;
|
1211
|
+
data_time_size = numpy.zeros(shape=(size_data, 9), dtype = float );
|
1212
|
+
temp_size_window = numpy.zeros(shape=(size_data, 2), dtype = float );
|
1213
|
+
|
1214
|
+
# col1 = np.arange(0.8, 0.09, -0.05)
|
1215
|
+
# col2 = np.ones_like(col1)
|
1216
|
+
# col2 = 30* col2;
|
1217
|
+
# temp_m = np.column_stack((col1, col2))
|
1218
|
+
# flag_temp = 0;
|
1219
|
+
# print(temp_m)
|
1220
|
+
|
1221
|
+
frame_count = 0
|
1222
|
+
saved_frame_count = 0
|
1223
|
+
data_i = 0;
|
1224
|
+
|
1225
|
+
# set name and directory output files
|
1226
|
+
path_dir_imgs = "data/";
|
1227
|
+
path_dir_imgs = Path(path_dir_imgs);
|
1228
|
+
path_dir_imgs = path_dir_imgs.resolve();
|
1229
|
+
path_dir_imgs = os.path.normpath(path_dir_imgs);
|
1230
|
+
|
1231
|
+
file_image_str = os.path.join(path_dir_imgs, self.name_file+"_data_images.dat")
|
1232
|
+
os.makedirs(os.path.dirname(file_image_str), exist_ok=True)
|
1233
|
+
file_data_imgs = open(file_image_str, "w", encoding='utf-8');
|
1234
|
+
|
1235
|
+
has_frame, frame = video.read();
|
1236
|
+
file_img = os.path.join(path_dir_imgs,self.name_file+'_sample_frame.jpg');
|
1237
|
+
cv2.imwrite(file_img,frame); #exit();
|
1238
|
+
|
1239
|
+
# crop image to restrict background
|
1240
|
+
new_start_x = self.start_x;
|
1241
|
+
new_start_y = self.start_y;
|
1242
|
+
new_end_x = self.end_x;
|
1243
|
+
new_end_y = self.end_y;
|
1244
|
+
ref_width = abs(self.end_x - self.start_x);
|
1245
|
+
ref_height = abs(self.end_y - self.start_y);
|
1246
|
+
|
1247
|
+
amplie = False;
|
1248
|
+
factor = 1;
|
1249
|
+
start_time = timelib.time()
|
1250
|
+
|
1251
|
+
while has_frame: # take frame just end of video
|
1252
|
+
|
1253
|
+
if (frame_count / fps) > self.time_limit: break;
|
1254
|
+
img_h, img_w = frame.shape[:2];
|
1255
|
+
|
1256
|
+
if frame_count % self.step == 0:
|
1257
|
+
time = frame_count / fps;
|
1258
|
+
time_str = f"{time:.4f}";
|
1259
|
+
time_str = time_str.replace('.', '_')
|
1260
|
+
|
1261
|
+
|
1262
|
+
if data_i >= 1:
|
1263
|
+
# reduce the area in image croped to reduce background noises
|
1264
|
+
area = abs(new_end_x - new_start_x) * abs(new_end_y - new_start_y);
|
1265
|
+
x_start = self.start_x + abs(self.start_x - new_start_x) + (x_start/factor);
|
1266
|
+
y_start = self.start_y + abs(self.start_y - new_start_y) + (y_start/factor);
|
1267
|
+
x_center = int((x_start + (x_start + width))/ 2.)
|
1268
|
+
y_center = int((y_start + (y_start + height))/ 2.)
|
1269
|
+
|
1270
|
+
# if size of drop reduce the image croped reduce
|
1271
|
+
if ( (width * height) < 0.2 * area or (ref_width - width) < 0.15 * ref_width or (ref_height - height) < 0.15 * ref_height ):
|
1272
|
+
if data_i > 200:
|
1273
|
+
window = 200;
|
1274
|
+
else:
|
1275
|
+
window = data_i;
|
1276
|
+
|
1277
|
+
_w = temp_size_window[data_i-window:data_i,0]; _h = temp_size_window[data_i-window:data_i,1];
|
1278
|
+
avg_w = numpy.mean(_w) ;
|
1279
|
+
avg_h = numpy.mean(_h) ;
|
1280
|
+
if avg_w < 0.15* abs(self.end_x - self.start_x): avg_w = 0.15 * abs(self.end_x - self.start_x);
|
1281
|
+
if avg_h < 0.15* abs(self.end_y - self.start_y): avg_h =0.15 *abs(self.end_y - self.start_y);
|
1282
|
+
factor_exp = 0.15;
|
1283
|
+
new_start_x = int(( x_center - avg_w/2) - (factor_exp * avg_w));
|
1284
|
+
new_end_x = int(( x_center + avg_w/2) + (factor_exp * avg_w));
|
1285
|
+
if new_start_x < self.start_x: new_start_x = self.start_x;
|
1286
|
+
if new_end_x > self.end_x: new_end_x = self.end_x;
|
1287
|
+
ref_width = abs(new_end_x - new_start_x);
|
1288
|
+
new_start_y = int(( y_center - avg_h/2) - (factor_exp * avg_h));
|
1289
|
+
new_end_y = int(( y_center + avg_h/2) + (factor_exp * avg_h));
|
1290
|
+
if new_start_y < self.start_y: new_start_y = self.start_y;
|
1291
|
+
if new_end_y > self.end_y: new_end_y = self.end_y;
|
1292
|
+
ref_height = abs(new_end_y - new_start_y);
|
1293
|
+
amplie = True;
|
1294
|
+
|
1295
|
+
|
1296
|
+
# print(new_start_y,new_end_y, new_start_x,new_end_x, " ")
|
1297
|
+
# cv2.imwrite("teste.png",frame); #exit();
|
1298
|
+
#crop image
|
1299
|
+
imagem = frame[new_start_y:new_end_y, new_start_x:new_end_x];
|
1300
|
+
# cv2.imwrite("teste1.png",imagem); #exit();
|
1301
|
+
|
1302
|
+
img_h, img_w = imagem.shape[:2];
|
1303
|
+
if data_i >= 1 or amplie:
|
1304
|
+
if (width ) < 0.7:
|
1305
|
+
factor = 12;
|
1306
|
+
new_w = int(img_w * factor)
|
1307
|
+
new_h = int(img_h * factor)
|
1308
|
+
|
1309
|
+
imagem = cv2.resize(imagem, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
|
1310
|
+
# cv2.imwrite("saida_crop.png",imagem);
|
1311
|
+
|
1312
|
+
# start morphological analysis of image
|
1313
|
+
|
1314
|
+
|
1315
|
+
# Convert the image to grayscale
|
1316
|
+
imagem_cinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
|
1317
|
+
|
1318
|
+
|
1319
|
+
|
1320
|
+
# edges = cv2.Canny(imagem_cinza, 100, 200)
|
1321
|
+
|
1322
|
+
|
1323
|
+
imagem_suavizada = cv2.GaussianBlur(imagem_cinza, (7, 7), 0);
|
1324
|
+
#cv2.imshow("image suavizada", imagem_suavizada)
|
1325
|
+
# cv2.imwrite("saida_suv.png",imagem_suavizada);
|
1326
|
+
|
1327
|
+
|
1328
|
+
# Apply thresholding to segment the figure
|
1329
|
+
ret, imagem_binaria = cv2.threshold(imagem_suavizada, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU);
|
1330
|
+
|
1331
|
+
# file_img = os.path.join(path_dir_imgs,self.name_file+'_img_'+str(data_i) + '_Bin_.jpg');
|
1332
|
+
# cv2.imwrite("saida_bin.png",imagem_binaria);
|
1333
|
+
#imagem_binaria = cv2.adaptiveThreshold(imagem_cinza, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 13, 4)
|
1334
|
+
#cv2.imshow("image 00", imagem_binaria)
|
1335
|
+
|
1336
|
+
|
1337
|
+
|
1338
|
+
# Apply morphological operations to remove noise
|
1339
|
+
kernel = numpy.ones((3, 3), numpy.uint8)
|
1340
|
+
# imagem_binaria1 = cv2.morphologyEx(imagem_binaria, cv2.MORPH_ERODE, kernel, iterations=2)
|
1341
|
+
# #Removes pixels from the edges of the object. It works as a kind of "shrinking" of the image.
|
1342
|
+
# #Useful for removing small noises and highlighting smaller structures.
|
1343
|
+
#
|
1344
|
+
# imagem_binaria2= cv2.morphologyEx(imagem_binaria, cv2.MORPH_DILATE, kernel, iterations=2)
|
1345
|
+
# # Removes small noises. Useful for filling small holes and connecting disconnected components.
|
1346
|
+
#
|
1347
|
+
# imagem_binaria3 = cv2.morphologyEx(imagem_binaria, cv2.MORPH_OPEN, kernel, iterations=40)
|
1348
|
+
# #Closes small holes inside objects.
|
1349
|
+
#
|
1350
|
+
# imagem_binaria4 = cv2.morphologyEx(imagem_binaria, cv2.MORPH_CLOSE, kernel, iterations=30)
|
1351
|
+
# #Closes small holes inside objects.
|
1352
|
+
#
|
1353
|
+
imagem_binaria5 = cv2.morphologyEx(imagem_suavizada, cv2.MORPH_GRADIENT, kernel, iterations=2)
|
1354
|
+
# #Highlights the edges of objects. Useful for highlighting the edges of objects.
|
1355
|
+
#
|
1356
|
+
# imagem_binaria6 = cv2.morphologyEx(imagem_binaria, cv2.MORPH_TOPHAT, kernel, iterations=10)
|
1357
|
+
# # Highlights small protrusions. Useful for highlighting small protrusions or irregularities on the object.
|
1358
|
+
#
|
1359
|
+
# imagem_binaria7 = cv2.morphologyEx(imagem_binaria, cv2.MORPH_BLACKHAT, kernel, iterations=2)
|
1360
|
+
#Highlights small depressions. Useful for highlighting small depressions or holes in the object.
|
1361
|
+
|
1362
|
+
|
1363
|
+
|
1364
|
+
# ret, imagem_binaria = cv2.threshold(imagem_binaria4, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU);
|
1365
|
+
# imagem_binaria2 = cv2.morphologyEx(imagem_binaria2, cv2.MORPH_ERODE, kernel, iterations=3)
|
1366
|
+
|
1367
|
+
|
1368
|
+
|
1369
|
+
# cv2.imshow("image_suv", imagem_suavizada)
|
1370
|
+
# cv2.imshow("image 0", imagem_binaria)
|
1371
|
+
# cv2.imshow("image 1", imagem_binaria1)
|
1372
|
+
# cv2.imshow("image 2", imagem_binaria2)
|
1373
|
+
# cv2.imshow("image 3", imagem_binaria3)
|
1374
|
+
# cv2.imshow("image 4", imagem_binaria4)
|
1375
|
+
# cv2.imshow("image 5", imagem_binaria5)
|
1376
|
+
# cv2.imshow("image 6", imagem_binaria6)
|
1377
|
+
# cv2.imshow("image 7", imagem_binaria7)
|
1378
|
+
|
1379
|
+
|
1380
|
+
|
1381
|
+
|
1382
|
+
# Applying adaptive binarization
|
1383
|
+
#imagem_binaria = cv2.adaptiveThreshold(
|
1384
|
+
#imagem_binaria, # Imagem em escala de cinza
|
1385
|
+
#125, # Valor máximo a ser atribuído aos pixels acima do limiar
|
1386
|
+
#cv2.ADAPTIVE_THRESH_MEAN_C, # Método de limiar adaptativo
|
1387
|
+
#cv2.THRESH_BINARY, # Tipo de limiar
|
1388
|
+
#5, # Tamanho do bloco de pixels (deve ser um número ímpar)
|
1389
|
+
#20 # Constante subtraída da média ou mediana
|
1390
|
+
#)
|
1391
|
+
|
1392
|
+
#ret, imagem_binaria = cv2.threshold(imagem_binaria, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU);
|
1393
|
+
|
1394
|
+
|
1395
|
+
imagem_binaria = imagem_binaria5
|
1396
|
+
edges = cv2.Canny(imagem_binaria, 50, 150)
|
1397
|
+
# cv2.imshow("imagem_binaria", imagem_binaria)
|
1398
|
+
# cv2.imwrite("saida_bin5.png",imagem_binaria);
|
1399
|
+
|
1400
|
+
|
1401
|
+
# Find contours in binary image
|
1402
|
+
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE);
|
1403
|
+
|
1404
|
+
text_position = (50, 50)
|
1405
|
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
1406
|
+
scale_font = 1
|
1407
|
+
text_color = (125, 125, 125) # White
|
1408
|
+
line_thickness = 2
|
1409
|
+
|
1410
|
+
if contours:
|
1411
|
+
|
1412
|
+
pontos = numpy.vstack(contours).squeeze() # Combine all contour points into a single list
|
1413
|
+
x_start, y_start, width, height = cv2.boundingRect(pontos) #Find the bounding rectangle that groups all contours
|
1414
|
+
imagem_temp = imagem.copy()
|
1415
|
+
cv2.rectangle(imagem, (x_start, y_start), (x_start + width, y_start + height), (0, 255, 0), 2)
|
1416
|
+
width = int( width / factor )
|
1417
|
+
height = int( height / factor)
|
1418
|
+
temp_size_window[data_i][0] = width
|
1419
|
+
temp_size_window[data_i][1] = height
|
1420
|
+
|
1421
|
+
|
1422
|
+
#save data
|
1423
|
+
data_time_size[data_i][0] = time; # time
|
1424
|
+
data_time_size[data_i][1] = (width / 2.) / (self.px_mm ); # width -> semi axes
|
1425
|
+
data_time_size[data_i][2] = (height/ 2.) / (self.px_mm); # height -> semi axes
|
1426
|
+
if data_i == 0:
|
1427
|
+
self.Vo = calcule_vol_spheroide(data_time_size[data_i][1], data_time_size[data_i][2] ) / 1000. # use in mL
|
1428
|
+
temp = datetime.fromtimestamp(self.video_m) + timedelta(seconds=time);
|
1429
|
+
data_time_size[data_i][3] = temp.timestamp()
|
1430
|
+
data_time_size[data_i][4] = data_time_size[data_i][0] / 60.
|
1431
|
+
data_time_size[data_i][5] = frame_count;
|
1432
|
+
data_time_size[data_i][6] = calcule_surface_spheroide( data_time_size[data_i][2], data_time_size[data_i][1]); # calcule area
|
1433
|
+
data_time_size[data_i][7] = calcule_concentration( data_time_size[data_i][2], data_time_size[data_i][1], self.Co, self.Vo) / self.Co # calcule concentration
|
1434
|
+
data_time_size[data_i][8] = calcule_vol_spheroide(data_time_size[data_i][2], data_time_size[data_i][1])
|
1435
|
+
|
1436
|
+
file_img = os.path.join(path_dir_imgs,self.name_file+'_img_'+str(data_i) + '.jpg');
|
1437
|
+
file_data_imgs.write(file_img+" "+"{:7.3f}".format(time)+" s "+"{:8.4e}".format(data_time_size[data_i][6])+" mg/ml \n");
|
1438
|
+
if self.print_pdf:
|
1439
|
+
cv2.putText(imagem,str(time), text_position, font, scale_font, text_color, line_thickness, cv2.LINE_AA)
|
1440
|
+
cv2.imwrite(file_img,imagem);
|
1441
|
+
# cv2.imwrite("saida_edge.png",imagem);
|
1442
|
+
# print(temp_m[flag_temp,0] , data_time_size[data_i][1] , temp_m[flag_temp,0] - data_time_size[data_i][1] )
|
1443
|
+
# if temp_m[flag_temp,1] > 0. and abs(temp_m[flag_temp,0] - data_time_size[data_i][1] ) < 0.01:
|
1444
|
+
# # print('>>>>>', data_time_size[data_i][1], data_time_size[data_i][2] )
|
1445
|
+
# str_valor = f"{data_time_size[data_i][1]:.2f}"
|
1446
|
+
# str_valor2 = f"{data_time_size[data_i][2]:.2f}"
|
1447
|
+
# file_img = os.path.join(path_dir_imgs,self.name_file+'_img_'+str(data_i)+'_'+str_valor+'_' + str_valor2+ '_.jpg');
|
1448
|
+
# cv2.imwrite(file_img,imagem_temp);
|
1449
|
+
# temp_m[flag_temp,1] = temp_m[flag_temp,1] - 1
|
1450
|
+
# if temp_m[flag_temp,1] < 1: flag_temp = flag_temp + 1;
|
1451
|
+
|
1452
|
+
|
1453
|
+
|
1454
|
+
data_i = data_i +1;
|
1455
|
+
# cv2.imshow("image fim", imagem)
|
1456
|
+
# input("Press enter to continue")
|
1457
|
+
|
1458
|
+
frame_count += 1
|
1459
|
+
elapsed_time = timelib.time() - start_time;
|
1460
|
+
print(f"Iteration {frame_count + 1}/{(self.time_limit*fps)}, Elapsed time: {elapsed_time:.2f} seconds", end='\r')
|
1461
|
+
|
1462
|
+
has_frame, frame = video.read()
|
1463
|
+
|
1464
|
+
|
1465
|
+
file_data_imgs.close();
|
1466
|
+
|
1467
|
+
new_data_time_size = delete_value_extrem(data_time_size);
|
1468
|
+
self.coef_pol_w = numpy.polyfit(new_data_time_size[:, 0],new_data_time_size[:, 1],12);
|
1469
|
+
self.coef_pol_h = numpy.polyfit(new_data_time_size[:, 0],new_data_time_size[:, 2],12);
|
1470
|
+
self.coef_pol_area = numpy.polyfit(new_data_time_size[:, 0],new_data_time_size[:, 6],12);
|
1471
|
+
self.coef_pol_conc = numpy.polyfit(new_data_time_size[:, 0],new_data_time_size[:, 7],12);
|
1472
|
+
|
1473
|
+
|
1474
|
+
|
1475
|
+
|
1476
|
+
file_out = os.path.join(path_dir_imgs,self.name_file+'_Video_time_size.csv');
|
1477
|
+
file_out = os.path.normpath(file_out);
|
1478
|
+
save_data_video(new_data_time_size,self.coef_pol_w, self.coef_pol_h, self.coef_pol_area, self.coef_pol_conc, file_out);
|
1479
|
+
|
1480
|
+
|
1481
|
+
file_out = os.path.join(path_dir_imgs,self.name_file+'_sizes.png');
|
1482
|
+
file_out = os.path.normpath(file_out);
|
1483
|
+
plot_data(new_data_time_size[:, 0],new_data_time_size[:, 1], new_data_time_size[:, 2], new_data_time_size[:, 6], new_data_time_size[:,7], new_data_time_size[:,8], self.coef_pol_w, self.coef_pol_h, self.coef_pol_area, self.coef_pol_conc, file_out);
|
1484
|
+
|
1485
|
+
|
1486
|
+
video.release();
|
1487
|
+
cv_destroy_all_windows_safe();
|
1488
|
+
|
1489
|
+
# print("")
|
1490
|
+
|
1491
|
+
if self.print_pdf:
|
1492
|
+
self.print_frames_pdf(path_dir_imgs, file_image_str)
|
1493
|
+
|
1494
|
+
|
1495
|
+
return file_out
|
1496
|
+
|
1497
|
+
|
1498
|
+
def menu():
|
1499
|
+
print("\n Options:")
|
1500
|
+
print("1. Video analysis")
|
1501
|
+
print("2. Frame analysis")
|
1502
|
+
print("3. Join frames and videos")
|
1503
|
+
print("4. Create data treatment list")
|
1504
|
+
print("5. Exit");
|
1505
|
+
|
1506
|
+
|
1507
|
+
|
1508
|
+
def draw_square(event, x, y, flags, param, imagem):
|
1509
|
+
|
1510
|
+
|
1511
|
+
vertices = []
|
1512
|
+
|
1513
|
+
imagem = cv2.imread('sample.jpg')
|
1514
|
+
|
1515
|
+
if event == cv2.EVENT_LBUTTONDOWN:
|
1516
|
+
vertices.append((x, y))
|
1517
|
+
|
1518
|
+
if len(vertices) == 2:
|
1519
|
+
# Draw the square on the original image
|
1520
|
+
cv2.rectangle(imagem, vertices[0], vertices[1], (255, 0, 0), 5) # Blue with thickness 2
|
1521
|
+
cv_imshow_safe("Imagem", imagem) # cv2.imshow('Imagem', imagem)
|
1522
|
+
vertices.clear()
|
1523
|
+
|
1524
|
+
|
1525
|
+
for i, vertice in enumerate(vertices):
|
1526
|
+
print(f"Vértice {i + 1}: {vertice}")
|
1527
|
+
|
1528
|
+
def save_data_video(data_in, coef_w, coef_h, coef_area, coef_conc, output_file):
|
1529
|
+
|
1530
|
+
|
1531
|
+
file_op = open(output_file, "w", encoding='utf-8');
|
1532
|
+
|
1533
|
+
file_op.write(f"Coeficient width: {', '.join([f'{i_coef:.7e}' for i_coef in coef_w])}\n")
|
1534
|
+
file_op.write(f"Coeficient height: {', '.join([f'{i_coef:.7e}' for i_coef in coef_h])}\n")
|
1535
|
+
file_op.write(f"Coeficient area: {', '.join([f'{i_coef:.7e}' for i_coef in coef_area])}\n")
|
1536
|
+
file_op.write(f"Coeficient concentration: {', '.join([f'{i_coef:.7e}' for i_coef in coef_conc])}\n")
|
1537
|
+
file_op.write("Frame,dropDX(mm),dropDY(mm),surface(mm^2),Volume(\u03bcL),RelativeConcentration(%),date,time(s),time(min)\n")
|
1538
|
+
|
1539
|
+
for i_data in range(0, len(data_in)):
|
1540
|
+
|
1541
|
+
str_ = f"{int(data_in[i_data,4]):>5d}, {data_in[i_data,1]:.2f}, {data_in[i_data,2]:.3e}, {data_in[i_data,6]:.3e}, {data_in[i_data,8]:.3e}, {data_in[i_data,7]:.3e}, {datetime.fromtimestamp(data_in[i_data,3]).strftime('%Y-%m-%d %H:%M:%S')}, {data_in[i_data,0]:.2f}, {data_in[i_data,4]:.2f} \n";
|
1542
|
+
|
1543
|
+
file_op.write(str_);
|
1544
|
+
file_op.close()
|
1545
|
+
|
1546
|
+
def save_data_edf(data_in, output_file, option):
|
1547
|
+
|
1548
|
+
|
1549
|
+
format_time = '%Y-%m-%d %H:%M:%S'
|
1550
|
+
|
1551
|
+
file_op = open(output_file, "w", encoding='utf-8');
|
1552
|
+
|
1553
|
+
if option == 0: file_op.write("Frame, dropDX(mm), dropDY(mm), surface(mm^2), Volume(\u03bcL),RelativeConcentration(%), date, time(s), time(min)\n")
|
1554
|
+
else: file_op.write("Frame, date, time(s) \n")
|
1555
|
+
|
1556
|
+
for i_data in data_in:
|
1557
|
+
|
1558
|
+
if option == 0:
|
1559
|
+
str_ = f"{i_data['file']}, {float(i_data['dropDX']):.8e}, {float(i_data['dropDY']):.8e}, {float(i_data['area']):.8e}, {float(i_data['concentration']):.8e}, {i_data['date']}, {float(i_data['start_time']):.2f} \n";
|
1560
|
+
|
1561
|
+
else:
|
1562
|
+
str_ = f"{i_data['file']}, {i_data['date']}, {float(i_data['start_time']):.2f} \n";
|
1563
|
+
file_op.write(str_);
|
1564
|
+
file_op.close()
|
1565
|
+
|
1566
|
+
def read_file_video(input_file):
|
1567
|
+
|
1568
|
+
with open(input_file, 'r', encoding='utf-8') as file:
|
1569
|
+
lines = file.readlines()
|
1570
|
+
|
1571
|
+
temp = lines[0].strip();
|
1572
|
+
coef_w_values = temp[len("Coeficient width:"):].split(',')
|
1573
|
+
coef_w = [float(value.strip()) for value in coef_w_values]
|
1574
|
+
temp = lines[1].strip();
|
1575
|
+
coef_h_values = temp[len("Coeficient height:"):].split(',')
|
1576
|
+
coef_h = [float(value.strip()) for value in coef_h_values]
|
1577
|
+
temp = lines[2].strip();
|
1578
|
+
coef_area_values = temp[len("Coeficient area:"):].split(',')
|
1579
|
+
coef_area = [float(value.strip()) for value in coef_area_values]
|
1580
|
+
temp = lines[3].strip();
|
1581
|
+
coef_pol_conc_values = temp[len("Coeficient concentration:"):].split(',')
|
1582
|
+
coef_conc = [float(value.strip()) for value in coef_pol_conc_values]
|
1583
|
+
|
1584
|
+
|
1585
|
+
|
1586
|
+
|
1587
|
+
# Reading data
|
1588
|
+
data = []
|
1589
|
+
for line in lines[5:]:
|
1590
|
+
frame, dropDX, dropDY, area, volume, concentration, date_str, time_s, time_min = line.split(', ');
|
1591
|
+
date_time = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
|
1592
|
+
data.append({
|
1593
|
+
'Frame': int(frame),
|
1594
|
+
'dropDX(mm)': float(dropDX),
|
1595
|
+
'dropDY(mm)': float(dropDY),
|
1596
|
+
'area(mm^2)': float(area),
|
1597
|
+
'concentration(mg/ml)': float(concentration),
|
1598
|
+
'date': date_time,
|
1599
|
+
'time(s)': float(time_s.strip())
|
1600
|
+
} )
|
1601
|
+
|
1602
|
+
|
1603
|
+
return coef_w, coef_h, coef_area, coef_conc, data
|
1604
|
+
|
1605
|
+
|
1606
|
+
def calcule_surface_spheroide(edge_1, edge_2):
|
1607
|
+
|
1608
|
+
if np.isclose(edge_1, edge_2): # sphere
|
1609
|
+
|
1610
|
+
return 4. * np.pi * edge_1**2
|
1611
|
+
|
1612
|
+
# oblate spheroide: edge_1 > edge_2
|
1613
|
+
if edge_1 > edge_2:
|
1614
|
+
e = np.sqrt(1.0 - (edge_2*edge_2)/(edge_1*edge_1)) # 0 < e < 1
|
1615
|
+
# atanh(e) = 0.5 * ln((1+e)/(1-e))
|
1616
|
+
atanh_e = 0.5 * np.log((1.0 + e)/(1.0 - e))
|
1617
|
+
return 2.0 * np.pi * edge_1*edge_1 * (1.0 + ((1.0 - e*e)/e) * atanh_e)
|
1618
|
+
else: # prolate spheroide: edge_2 > edge_1
|
1619
|
+
e = np.sqrt(1.0 - (edge_1*edge_1)/(edge_2*edge_2)) # 0 < e < 1
|
1620
|
+
return 2.0 * np.pi * edge_1*edge_1 * (1.0 + (edge_2/(edge_1*e)) * np.arcsin(e))
|
1621
|
+
|
1622
|
+
|