q3dviewer 1.1.6__py3-none-any.whl → 1.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- q3dviewer/.vscode/c_cpp_properties.json +30 -0
- q3dviewer/.vscode/settings.json +10 -0
- q3dviewer/base_glwidget.py +56 -1
- q3dviewer/custom_items/__init__.py +1 -0
- q3dviewer/custom_items/cloud_io_item.py +16 -1
- q3dviewer/custom_items/cloud_item.py +37 -22
- q3dviewer/custom_items/text3d_item.py +120 -0
- q3dviewer/custom_items/text_item.py +19 -4
- q3dviewer/gau_io.py +0 -168
- q3dviewer/glwidget.py +23 -3
- q3dviewer/shaders/cloud_frag.glsl +1 -1
- q3dviewer/shaders/cloud_vert.glsl +13 -4
- q3dviewer/shaders/gau_frag.glsl +1 -1
- q3dviewer/shaders/gau_prep.glsl +1 -1
- q3dviewer/shaders/gau_vert.glsl +1 -1
- q3dviewer/shaders/sort_by_key.glsl +1 -1
- q3dviewer/test/test_interpolation.py +58 -0
- q3dviewer/test/test_rendering.py +73 -0
- q3dviewer/tools/cinematographer.py +367 -0
- q3dviewer/tools/cloud_viewer.py +79 -3
- q3dviewer/tools/example_viewer.py +8 -28
- q3dviewer/tools/film_maker.py +1 -1
- q3dviewer/tools/lidar_cam_calib.py +10 -11
- q3dviewer/utils/convert_ros_msg.py +49 -6
- q3dviewer/viewer.py +4 -1
- {q3dviewer-1.1.6.dist-info → q3dviewer-1.1.8.dist-info}/METADATA +4 -3
- q3dviewer-1.1.8.dist-info/RECORD +50 -0
- {q3dviewer-1.1.6.dist-info → q3dviewer-1.1.8.dist-info}/WHEEL +1 -1
- q3dviewer/basic_window.py +0 -228
- q3dviewer/cloud_viewer.py +0 -74
- q3dviewer/custom_items/camera_frame_item.py +0 -173
- q3dviewer/custom_items/trajectory_item.py +0 -79
- q3dviewer/utils.py +0 -71
- q3dviewer-1.1.6.dist-info/RECORD +0 -49
- {q3dviewer-1.1.6.dist-info → q3dviewer-1.1.8.dist-info}/LICENSE +0 -0
- {q3dviewer-1.1.6.dist-info → q3dviewer-1.1.8.dist-info}/entry_points.txt +0 -0
- {q3dviewer-1.1.6.dist-info → q3dviewer-1.1.8.dist-info}/top_level.txt +0 -0
|
@@ -1,9 +1,9 @@
|
|
|
1
|
+
#version 330 core
|
|
1
2
|
/*
|
|
2
3
|
Copyright 2024 Panasonic Advanced Technology Development Co.,Ltd. (Liu Yang)
|
|
3
4
|
Distributed under MIT license. See LICENSE for more information.
|
|
4
5
|
*/
|
|
5
6
|
|
|
6
|
-
#version 330 core
|
|
7
7
|
|
|
8
8
|
layout (location = 0) in vec3 position;
|
|
9
9
|
layout (location = 1) in uint value;
|
|
@@ -17,7 +17,7 @@ uniform float vmin = 0;
|
|
|
17
17
|
uniform float vmax = 255;
|
|
18
18
|
uniform float focal = 1000;
|
|
19
19
|
uniform int point_type = 0; // 0 pixel, 1 flat square, 2 sphere
|
|
20
|
-
uniform
|
|
20
|
+
uniform int point_size = 1; // World size for each point (pixel or cm)
|
|
21
21
|
out vec4 color;
|
|
22
22
|
|
|
23
23
|
vec3 getRainbowColor(uint value_raw) {
|
|
@@ -47,9 +47,9 @@ void main()
|
|
|
47
47
|
|
|
48
48
|
// Calculate point size in pixels based on distance
|
|
49
49
|
if (point_type == 0)
|
|
50
|
-
gl_PointSize =
|
|
50
|
+
gl_PointSize = float(point_size);
|
|
51
51
|
else
|
|
52
|
-
gl_PointSize = point_size / gl_Position.w * focal;
|
|
52
|
+
gl_PointSize = (float(point_size) * 0.01) / gl_Position.w * focal;
|
|
53
53
|
vec3 c = vec3(1.0, 1.0, 1.0);
|
|
54
54
|
if (color_mode == 1)
|
|
55
55
|
{
|
|
@@ -62,6 +62,15 @@ void main()
|
|
|
62
62
|
c.y = float((value & uint(0x0000FF00)) >> 8)/255.;
|
|
63
63
|
c.x = float((value & uint(0x00FF0000)) >> 16)/255.;
|
|
64
64
|
}
|
|
65
|
+
else if(color_mode == 3)
|
|
66
|
+
{
|
|
67
|
+
uint intensity = value >> 24;
|
|
68
|
+
float range = vmax - vmin;
|
|
69
|
+
float value = 1.0 - (float(intensity) - vmin) / range;
|
|
70
|
+
c.z = value;
|
|
71
|
+
c.y = value;
|
|
72
|
+
c.x = value;
|
|
73
|
+
}
|
|
65
74
|
else
|
|
66
75
|
{
|
|
67
76
|
c.z = float( uint(flat_rgb) & uint(0x000000FF))/255.;
|
q3dviewer/shaders/gau_frag.glsl
CHANGED
q3dviewer/shaders/gau_prep.glsl
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
|
+
#version 430 core
|
|
1
2
|
/*
|
|
2
3
|
We proprocess gaussian using compute shader.
|
|
3
4
|
this file is modified from GaussianSplattingViewer licensed under the MIT License.
|
|
4
5
|
see https://github.com/limacv/GaussianSplattingViewer/blob/main/shaders/gau_vert.glsl
|
|
5
6
|
*/
|
|
6
7
|
|
|
7
|
-
#version 430 core
|
|
8
8
|
|
|
9
9
|
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
|
10
10
|
|
q3dviewer/shaders/gau_vert.glsl
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
#version 430 core
|
|
1
2
|
/*
|
|
2
3
|
Copyright 2024 Panasonic Advanced Technology Development Co.,Ltd. (Liu Yang)
|
|
3
4
|
Distributed under MIT license. See LICENSE for more information.
|
|
@@ -5,7 +6,6 @@ Distributed under MIT license. See LICENSE for more information.
|
|
|
5
6
|
|
|
6
7
|
//draw 2d gaussian using proprocess data.
|
|
7
8
|
|
|
8
|
-
#version 430 core
|
|
9
9
|
|
|
10
10
|
#define OFFSET_PREP_U 0
|
|
11
11
|
#define OFFSET_PREP_COVINV 3
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
#version 430 core
|
|
1
2
|
/*
|
|
2
3
|
Copyright 2024 Panasonic Advanced Technology Development Co.,Ltd. (Liu Yang)
|
|
3
4
|
Distributed under MIT license. See LICENSE for more information.
|
|
@@ -8,7 +9,6 @@ opengl compute shader.
|
|
|
8
9
|
sort guassian by depth using bitonic sorter
|
|
9
10
|
*/
|
|
10
11
|
|
|
11
|
-
#version 430 core
|
|
12
12
|
|
|
13
13
|
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
|
14
14
|
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Copyright 2024 Panasonic Advanced Technology Development Co.,Ltd. (Liu Yang)
|
|
5
|
+
Distributed under MIT license. See LICENSE for more information.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
this script tests interpolation of 3D poses.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
import numpy as np
|
|
14
|
+
from q3dviewer.utils.maths import expSO3, logSO3, makeT, makeRt
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def interpolate_pose(T1, T2, v_max, omega_max, dt=0.1):
|
|
18
|
+
R1, t1 = makeRt(T1)
|
|
19
|
+
R2, t2 = makeRt(T2)
|
|
20
|
+
|
|
21
|
+
# Get transform time based on linear velocity
|
|
22
|
+
d = np.linalg.norm(t2 - t1)
|
|
23
|
+
t_lin = d / v_max
|
|
24
|
+
|
|
25
|
+
# Get transform time based on angular velocity
|
|
26
|
+
omega = logSO3(R2 @ R1.T)
|
|
27
|
+
theta = np.linalg.norm(omega)
|
|
28
|
+
t_ang = theta / omega_max
|
|
29
|
+
|
|
30
|
+
# Get total time based on the linear and angular time
|
|
31
|
+
t_total = max(t_lin, t_ang)
|
|
32
|
+
num_steps = int(np.ceil(t_total / dt))
|
|
33
|
+
|
|
34
|
+
# Generate interpolated transforms
|
|
35
|
+
interpolated_Ts = []
|
|
36
|
+
for i in range(num_steps + 1):
|
|
37
|
+
s = i / num_steps
|
|
38
|
+
t_interp = (1 - s) * t1 + s * t2
|
|
39
|
+
# Interpolate rotation using SO3
|
|
40
|
+
R_interp = expSO3(s * omega) @ R1
|
|
41
|
+
T_interp = makeT(R_interp, t_interp)
|
|
42
|
+
interpolated_Ts.append(T_interp)
|
|
43
|
+
|
|
44
|
+
return interpolated_Ts
|
|
45
|
+
|
|
46
|
+
if __name__ == "__main__":
|
|
47
|
+
T1 = np.eye(4) # Identity transformation
|
|
48
|
+
T2 = np.array([[0, -1, 0, 1], [1, 0, 0, 2], [0, 0, 1, 3], [0, 0, 0, 1]]) # Target transformation
|
|
49
|
+
|
|
50
|
+
v_max = 1.0 # Maximum linear velocity (m/s)
|
|
51
|
+
omega_max = np.pi / 4 # Maximum angular velocity (rad/s)
|
|
52
|
+
|
|
53
|
+
# Perform interpolation
|
|
54
|
+
interpolated_poses = interpolate_pose(T1, T2, v_max, omega_max)
|
|
55
|
+
for i, T in enumerate(interpolated_poses):
|
|
56
|
+
print(f"Step {i}:\n{T}\n")
|
|
57
|
+
|
|
58
|
+
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Copyright 2024 Panasonic Advanced Technology Development Co.,Ltd. (Liu Yang)
|
|
5
|
+
Distributed under MIT license. See LICENSE for more information.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
this script tests the rendering of a cloud in a camera frame based on the
|
|
10
|
+
camera pose and intrinsic matrix
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import numpy as np
|
|
14
|
+
import q3dviewer as q3d
|
|
15
|
+
import cv2
|
|
16
|
+
from q3dviewer.utils.cloud_io import load_pcd
|
|
17
|
+
|
|
18
|
+
cloud, _ = load_pcd('/home/liu/lab.pcd')
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
Tcw = np.array([[7.07106781e-01, 7.07106781e-01, 0.00000000e+00,
|
|
22
|
+
0.00000000e+00],
|
|
23
|
+
[-3.53553391e-01, 3.53553391e-01, 8.66025404e-01,
|
|
24
|
+
3.55271368e-15],
|
|
25
|
+
[6.12372436e-01, -6.12372436e-01, 5.00000000e-01,
|
|
26
|
+
-4.00000000e+01],
|
|
27
|
+
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
|
|
28
|
+
1.00000000e+00]])
|
|
29
|
+
# convert the opengl camera coordinate to the opencv camera coordinate
|
|
30
|
+
Tconv = np.array([[1, 0, 0, 0],
|
|
31
|
+
[0, -1, 0, 0],
|
|
32
|
+
[0, 0, -1, 0],
|
|
33
|
+
[0, 0, 0, 1]])
|
|
34
|
+
|
|
35
|
+
Tcw = Tconv @ Tcw
|
|
36
|
+
|
|
37
|
+
K = np.array([[1.64718029e+03, 0.00000000e+00, 9.51000000e+02],
|
|
38
|
+
[0.00000000e+00, 1.64718036e+03, 5.31000000e+02],
|
|
39
|
+
[0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def render_frame(cloud, Tcw, K, width, height):
|
|
43
|
+
image = np.zeros((height, width, 3), dtype=np.uint8)
|
|
44
|
+
Rcw, tcw = Tcw[:3, :3], Tcw[:3, 3]
|
|
45
|
+
pc = (Rcw @ cloud['xyz'].T).T + tcw
|
|
46
|
+
uv = (K @ pc.T).T
|
|
47
|
+
uv = uv[:, :2] / uv[:, 2][:, np.newaxis]
|
|
48
|
+
mask = (pc[:, 2] > 0) & (uv[:, 0] > 0) & (
|
|
49
|
+
uv[:, 0] < width) & (uv[:, 1] > 0) & (uv[:, 1] < height)
|
|
50
|
+
uv = uv[mask]
|
|
51
|
+
u = uv[:, 0].astype(int)
|
|
52
|
+
v = uv[:, 1].astype(int)
|
|
53
|
+
rgb = cloud['irgb'][mask]
|
|
54
|
+
r = rgb >> 16 & 0xff
|
|
55
|
+
g = rgb >> 8 & 0xff
|
|
56
|
+
b = rgb & 0xff
|
|
57
|
+
|
|
58
|
+
# Sort by depth to ensure front points are drawn first
|
|
59
|
+
depth = pc[mask, 2]
|
|
60
|
+
sorted_indices = np.argsort(depth)
|
|
61
|
+
u = u[sorted_indices]
|
|
62
|
+
v = v[sorted_indices]
|
|
63
|
+
r = r[sorted_indices]
|
|
64
|
+
g = g[sorted_indices]
|
|
65
|
+
b = b[sorted_indices]
|
|
66
|
+
|
|
67
|
+
image[v, u] = np.stack([b, g, r], axis=1)
|
|
68
|
+
return image
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
image = render_frame(cloud, Tcw, K, 1902, 1062)
|
|
72
|
+
cv2.imshow('image', image)
|
|
73
|
+
cv2.waitKey(0)
|
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Copyright 2024 Panasonic Advanced Technology Development Co.,Ltd. (Liu Yang)
|
|
5
|
+
Distributed under MIT license. See LICENSE for more information.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
import q3dviewer as q3d
|
|
10
|
+
from PySide6.QtWidgets import QVBoxLayout, QListWidget, QListWidgetItem, QPushButton, QDoubleSpinBox, QCheckBox, QLineEdit, QMessageBox, QLabel, QHBoxLayout
|
|
11
|
+
from PySide6.QtCore import QTimer
|
|
12
|
+
from cloud_viewer import ProgressDialog, FileLoaderThread
|
|
13
|
+
from PySide6 import QtCore
|
|
14
|
+
from PySide6.QtGui import QKeyEvent
|
|
15
|
+
from q3dviewer import GLWidget
|
|
16
|
+
import imageio.v2 as imageio
|
|
17
|
+
import os
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class KeyFrame:
|
|
21
|
+
def __init__(self, Twc):
|
|
22
|
+
self.Twc = Twc
|
|
23
|
+
self.linear_velocity = 10
|
|
24
|
+
self.angular_velocity = 1
|
|
25
|
+
self.stop_time = 0
|
|
26
|
+
self.item = q3d.FrameItem(Twc, width=3, color='#0000FF')
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class CustomGLWidget(GLWidget):
|
|
30
|
+
def __init__(self, viewer):
|
|
31
|
+
super().__init__()
|
|
32
|
+
self.viewer = viewer # Add a viewer handle
|
|
33
|
+
|
|
34
|
+
def keyPressEvent(self, ev: QKeyEvent):
|
|
35
|
+
if ev.key() == QtCore.Qt.Key_Space:
|
|
36
|
+
self.viewer.add_key_frame()
|
|
37
|
+
elif ev.key() == QtCore.Qt.Key_Delete:
|
|
38
|
+
self.viewer.del_key_frame()
|
|
39
|
+
super().keyPressEvent(ev)
|
|
40
|
+
|
|
41
|
+
class CMMViewer(q3d.Viewer):
|
|
42
|
+
"""
|
|
43
|
+
This class is a subclass of Viewer, which is used to create a cloud movie maker.
|
|
44
|
+
"""
|
|
45
|
+
def __init__(self, **kwargs):
|
|
46
|
+
self.key_frames = []
|
|
47
|
+
self.video_path = os.path.join(os.path.expanduser("~"), "output.mp4")
|
|
48
|
+
super().__init__(**kwargs, gl_widget_class=lambda: CustomGLWidget(self))
|
|
49
|
+
# for drop cloud file
|
|
50
|
+
self.setAcceptDrops(True)
|
|
51
|
+
|
|
52
|
+
def add_control_panel(self, main_layout):
|
|
53
|
+
"""
|
|
54
|
+
Add a control panel to the viewer.
|
|
55
|
+
"""
|
|
56
|
+
# Create a vertical layout for the settings
|
|
57
|
+
setting_layout = QVBoxLayout()
|
|
58
|
+
|
|
59
|
+
# Buttons to add and delete key frames
|
|
60
|
+
add_button = QPushButton("Add Key Frame")
|
|
61
|
+
add_button.clicked.connect(self.add_key_frame)
|
|
62
|
+
setting_layout.addWidget(add_button)
|
|
63
|
+
del_button = QPushButton("Delete Key Frame")
|
|
64
|
+
del_button.clicked.connect(self.del_key_frame)
|
|
65
|
+
setting_layout.addWidget(del_button)
|
|
66
|
+
|
|
67
|
+
# Add play/stop button
|
|
68
|
+
self.play_button = QPushButton("Play")
|
|
69
|
+
self.play_button.clicked.connect(self.toggle_playback)
|
|
70
|
+
setting_layout.addWidget(self.play_button)
|
|
71
|
+
|
|
72
|
+
# add a timer to play the frames
|
|
73
|
+
self.timer = QTimer()
|
|
74
|
+
self.timer.timeout.connect(self.play_frames)
|
|
75
|
+
self.current_frame_index = 0
|
|
76
|
+
self.is_playing = False
|
|
77
|
+
self.is_recording = False
|
|
78
|
+
|
|
79
|
+
# Add record checkbox
|
|
80
|
+
self.record_checkbox = QCheckBox("Record")
|
|
81
|
+
self.record_checkbox.stateChanged.connect(self.toggle_recording)
|
|
82
|
+
setting_layout.addWidget(self.record_checkbox)
|
|
83
|
+
|
|
84
|
+
# Add video path setting
|
|
85
|
+
video_path_layout = QHBoxLayout()
|
|
86
|
+
label_video_path = QLabel("Video Path:")
|
|
87
|
+
video_path_layout.addWidget(label_video_path)
|
|
88
|
+
self.video_path_edit = QLineEdit()
|
|
89
|
+
self.video_path_edit.setText(self.video_path)
|
|
90
|
+
self.video_path_edit.textChanged.connect(self.update_video_path)
|
|
91
|
+
video_path_layout.addWidget(self.video_path_edit)
|
|
92
|
+
setting_layout.addLayout(video_path_layout)
|
|
93
|
+
|
|
94
|
+
# Add a list of key frames
|
|
95
|
+
self.frame_list = QListWidget()
|
|
96
|
+
setting_layout.addWidget(self.frame_list)
|
|
97
|
+
self.frame_list.itemSelectionChanged.connect(self.on_select_frame)
|
|
98
|
+
self.installEventFilter(self)
|
|
99
|
+
|
|
100
|
+
# Add spin boxes for linear / angular velocity and stop time
|
|
101
|
+
self.lin_vel_spinbox = QDoubleSpinBox()
|
|
102
|
+
self.lin_vel_spinbox.setPrefix("Linear Velocity (m/s): ")
|
|
103
|
+
self.lin_vel_spinbox.setRange(0, 100)
|
|
104
|
+
self.lin_vel_spinbox.valueChanged.connect(self.set_frame_lin_vel)
|
|
105
|
+
setting_layout.addWidget(self.lin_vel_spinbox)
|
|
106
|
+
|
|
107
|
+
self.lin_ang_spinbox = QDoubleSpinBox()
|
|
108
|
+
self.lin_ang_spinbox.setPrefix("Angular Velocity (rad/s): ")
|
|
109
|
+
self.lin_ang_spinbox.setRange(0, 100)
|
|
110
|
+
self.lin_ang_spinbox.valueChanged.connect(self.set_frame_ang_vel)
|
|
111
|
+
setting_layout.addWidget(self.lin_ang_spinbox)
|
|
112
|
+
|
|
113
|
+
self.stop_time_spinbox = QDoubleSpinBox()
|
|
114
|
+
self.stop_time_spinbox.setPrefix("Stop Time: ")
|
|
115
|
+
self.stop_time_spinbox.setRange(0, 100)
|
|
116
|
+
self.stop_time_spinbox.valueChanged.connect(self.set_frame_stop_time)
|
|
117
|
+
setting_layout.addWidget(self.stop_time_spinbox)
|
|
118
|
+
|
|
119
|
+
setting_layout.setAlignment(QtCore.Qt.AlignTop)
|
|
120
|
+
main_layout.addLayout(setting_layout)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def update_video_path(self, path):
|
|
124
|
+
self.video_path = path
|
|
125
|
+
|
|
126
|
+
def add_key_frame(self):
|
|
127
|
+
view_matrix = self.glwidget.view_matrix
|
|
128
|
+
# Get camera pose in world frame
|
|
129
|
+
Twc = np.linalg.inv(view_matrix)
|
|
130
|
+
# Add the key frame to the list
|
|
131
|
+
key_frame = KeyFrame(Twc)
|
|
132
|
+
current_index = self.frame_list.currentRow()
|
|
133
|
+
self.key_frames.insert(current_index + 1, key_frame)
|
|
134
|
+
# visualize this key frame using FrameItem
|
|
135
|
+
self.glwidget.add_item(key_frame.item)
|
|
136
|
+
# move the camera back to 0.5 meter, let the user see the frame
|
|
137
|
+
self.glwidget.update_dist(0.5)
|
|
138
|
+
# Add the key frame to the Qt ListWidget
|
|
139
|
+
item = QListWidgetItem(f"Frame {current_index + 2}")
|
|
140
|
+
self.frame_list.insertItem(current_index + 1, item)
|
|
141
|
+
self.frame_list.setCurrentRow(current_index + 1)
|
|
142
|
+
# Update frame labels
|
|
143
|
+
for i in range(len(self.key_frames)):
|
|
144
|
+
self.frame_list.item(i).setText(f"Frame {i + 1}")
|
|
145
|
+
|
|
146
|
+
def del_key_frame(self):
|
|
147
|
+
current_index = self.frame_list.currentRow()
|
|
148
|
+
if current_index >= 0:
|
|
149
|
+
self.glwidget.remove_item(self.key_frames[current_index].item)
|
|
150
|
+
self.key_frames.pop(current_index)
|
|
151
|
+
self.frame_list.itemSelectionChanged.disconnect(self.on_select_frame)
|
|
152
|
+
self.frame_list.takeItem(current_index)
|
|
153
|
+
self.frame_list.itemSelectionChanged.connect(self.on_select_frame)
|
|
154
|
+
self.on_select_frame()
|
|
155
|
+
# Update frame labels
|
|
156
|
+
for i in range(len(self.key_frames)):
|
|
157
|
+
|
|
158
|
+
self.frame_list.item(i).setText(f"Frame {i + 1}")
|
|
159
|
+
|
|
160
|
+
def on_select_frame(self):
|
|
161
|
+
current = self.frame_list.currentRow()
|
|
162
|
+
for i, frame in enumerate(self.key_frames):
|
|
163
|
+
if i == current:
|
|
164
|
+
# Highlight the selected frame
|
|
165
|
+
frame.item.set_color('#FF0000')
|
|
166
|
+
frame.item.set_line_width(5)
|
|
167
|
+
# show current frame's parameters in the spinboxes
|
|
168
|
+
self.lin_vel_spinbox.setValue(frame.linear_velocity)
|
|
169
|
+
self.lin_ang_spinbox.setValue(frame.angular_velocity)
|
|
170
|
+
self.stop_time_spinbox.setValue(frame.stop_time)
|
|
171
|
+
else:
|
|
172
|
+
frame.item.set_color('#0000FF')
|
|
173
|
+
frame.item.set_line_width(3)
|
|
174
|
+
|
|
175
|
+
def set_frame_lin_vel(self, value):
|
|
176
|
+
current_index = self.frame_list.currentRow()
|
|
177
|
+
if current_index >= 0:
|
|
178
|
+
self.key_frames[current_index].linear_velocity = value
|
|
179
|
+
|
|
180
|
+
def set_frame_ang_vel(self, value):
|
|
181
|
+
current_index = self.frame_list.currentRow()
|
|
182
|
+
if current_index >= 0:
|
|
183
|
+
self.key_frames[current_index].angular_velocity = value
|
|
184
|
+
|
|
185
|
+
def set_frame_stop_time(self, value):
|
|
186
|
+
current_index = self.frame_list.currentRow()
|
|
187
|
+
if current_index >= 0:
|
|
188
|
+
self.key_frames[current_index].stop_time = value
|
|
189
|
+
|
|
190
|
+
def create_frames(self):
|
|
191
|
+
"""
|
|
192
|
+
Create the frames for playback by interpolating between key frames.
|
|
193
|
+
"""
|
|
194
|
+
self.frames = []
|
|
195
|
+
dt = 1 / float(self.update_interval)
|
|
196
|
+
for i in range(len(self.key_frames) - 1):
|
|
197
|
+
current_frame = self.key_frames[i]
|
|
198
|
+
if current_frame.stop_time > 0:
|
|
199
|
+
num_steps = int(current_frame.stop_time / dt)
|
|
200
|
+
for j in range(num_steps):
|
|
201
|
+
self.frames.append(current_frame.Twc)
|
|
202
|
+
next_frame = self.key_frames[i + 1]
|
|
203
|
+
Ts = q3d.interpolate_pose(current_frame.Twc, next_frame.Twc,
|
|
204
|
+
current_frame.linear_velocity,
|
|
205
|
+
current_frame.angular_velocity,
|
|
206
|
+
dt)
|
|
207
|
+
self.frames.extend(Ts)
|
|
208
|
+
|
|
209
|
+
print(f"Total frames: {len(self.frames)}")
|
|
210
|
+
print(f"Total time: {len(self.frames) * dt:.2f} seconds")
|
|
211
|
+
|
|
212
|
+
def toggle_playback(self):
|
|
213
|
+
if self.is_playing:
|
|
214
|
+
self.stop_playback()
|
|
215
|
+
else:
|
|
216
|
+
self.start_playback()
|
|
217
|
+
|
|
218
|
+
def start_playback(self):
|
|
219
|
+
if self.key_frames:
|
|
220
|
+
self.create_frames()
|
|
221
|
+
self.current_frame_index = 0
|
|
222
|
+
self.timer.start(self.update_interval) # Adjust the interval as needed
|
|
223
|
+
self.is_playing = True
|
|
224
|
+
self.play_button.setStyleSheet("")
|
|
225
|
+
self.play_button.setText("Stop")
|
|
226
|
+
self.record_checkbox.setEnabled(False)
|
|
227
|
+
if self.is_recording is True:
|
|
228
|
+
self.start_recording()
|
|
229
|
+
|
|
230
|
+
def stop_playback(self):
|
|
231
|
+
self.timer.stop()
|
|
232
|
+
self.is_playing = False
|
|
233
|
+
self.play_button.setStyleSheet("")
|
|
234
|
+
self.play_button.setText("Play")
|
|
235
|
+
self.record_checkbox.setEnabled(True)
|
|
236
|
+
if self.is_recording:
|
|
237
|
+
self.stop_recording()
|
|
238
|
+
|
|
239
|
+
def play_frames(self):
|
|
240
|
+
"""
|
|
241
|
+
callback function for the timer to play the frames
|
|
242
|
+
"""
|
|
243
|
+
# play the frames
|
|
244
|
+
if self.current_frame_index < len(self.frames):
|
|
245
|
+
self.glwidget.set_view_matrix(np.linalg.inv(self.frames[self.current_frame_index]))
|
|
246
|
+
self.current_frame_index += 1
|
|
247
|
+
if self.is_recording:
|
|
248
|
+
self.record_frame()
|
|
249
|
+
else:
|
|
250
|
+
self.stop_playback()
|
|
251
|
+
|
|
252
|
+
def toggle_recording(self, state):
|
|
253
|
+
if state == 2:
|
|
254
|
+
self.is_recording = True
|
|
255
|
+
else:
|
|
256
|
+
self.is_recording = False
|
|
257
|
+
|
|
258
|
+
def start_recording(self):
|
|
259
|
+
self.is_recording = True
|
|
260
|
+
self.frames_to_record = []
|
|
261
|
+
video_path = self.video_path_edit.text()
|
|
262
|
+
self.play_button.setStyleSheet("background-color: red")
|
|
263
|
+
self.play_button.setText("Recording")
|
|
264
|
+
self.writer = imageio.get_writer(video_path, fps=self.update_interval,
|
|
265
|
+
codec="libx264", bitrate="5M", quality=10)
|
|
266
|
+
# disable the all the frame_item while recording
|
|
267
|
+
for frame in self.key_frames:
|
|
268
|
+
frame.item.hide()
|
|
269
|
+
|
|
270
|
+
def stop_recording(self, save_movie=True):
|
|
271
|
+
self.is_recording = False
|
|
272
|
+
self.record_checkbox.setChecked(False)
|
|
273
|
+
# enable the all the frame_item after recording
|
|
274
|
+
for frame in self.key_frames:
|
|
275
|
+
frame.item.show()
|
|
276
|
+
if hasattr(self, 'writer') and save_movie:
|
|
277
|
+
self.writer.close()
|
|
278
|
+
self.show_save_message()
|
|
279
|
+
|
|
280
|
+
def show_save_message(self):
|
|
281
|
+
msg_box = QMessageBox()
|
|
282
|
+
msg_box.setIcon(QMessageBox.Information)
|
|
283
|
+
msg_box.setWindowTitle("Video Saved")
|
|
284
|
+
msg_box.setText(f"Video saved to {self.video_path_edit.text()}")
|
|
285
|
+
msg_box.setStandardButtons(QMessageBox.Ok)
|
|
286
|
+
msg_box.exec()
|
|
287
|
+
|
|
288
|
+
def record_frame(self):
|
|
289
|
+
frame = self.glwidget.capture_frame()
|
|
290
|
+
# make sure the frame size is multiple of 16
|
|
291
|
+
height, width, _ = frame.shape
|
|
292
|
+
if height % 16 != 0 or width % 16 != 0:
|
|
293
|
+
frame = frame[:-(height % 16), :-(width % 16), :]
|
|
294
|
+
frame = np.ascontiguousarray(frame)
|
|
295
|
+
self.frames_to_record.append(frame)
|
|
296
|
+
try:
|
|
297
|
+
self.writer.append_data(frame)
|
|
298
|
+
except Exception as e:
|
|
299
|
+
print("Don't change the window size during recording.")
|
|
300
|
+
self.stop_recording(False) # Stop recording without saving
|
|
301
|
+
self.stop_playback()
|
|
302
|
+
|
|
303
|
+
def eventFilter(self, obj, event):
|
|
304
|
+
if event.type() == QtCore.QEvent.KeyPress:
|
|
305
|
+
if event.key() == QtCore.Qt.Key_Delete:
|
|
306
|
+
self.del_key_frame()
|
|
307
|
+
return True
|
|
308
|
+
return super().eventFilter(obj, event)
|
|
309
|
+
|
|
310
|
+
def dragEnterEvent(self, event):
|
|
311
|
+
if event.mimeData().hasUrls():
|
|
312
|
+
event.accept()
|
|
313
|
+
else:
|
|
314
|
+
event.ignore()
|
|
315
|
+
|
|
316
|
+
def dropEvent(self, event):
|
|
317
|
+
"""
|
|
318
|
+
Overwrite the drop event to open the cloud file.
|
|
319
|
+
"""
|
|
320
|
+
self.progress_dialog = ProgressDialog(self)
|
|
321
|
+
self.progress_dialog.show()
|
|
322
|
+
files = event.mimeData().urls()
|
|
323
|
+
self.progress_thread = FileLoaderThread(self, files)
|
|
324
|
+
self['cloud'].load(files[0].toLocalFile(), append=False)
|
|
325
|
+
self.progress_thread.progress.connect(self.file_loading_progress)
|
|
326
|
+
self.progress_thread.finished.connect(self.file_loading_finished)
|
|
327
|
+
self.progress_thread.start()
|
|
328
|
+
|
|
329
|
+
def file_loading_progress(self, value):
|
|
330
|
+
self.progress_dialog.set_value(value)
|
|
331
|
+
|
|
332
|
+
def file_loading_finished(self):
|
|
333
|
+
self.progress_dialog.close()
|
|
334
|
+
|
|
335
|
+
def open_cloud_file(self, file, append=False):
|
|
336
|
+
cloud_item = self['cloud']
|
|
337
|
+
if cloud_item is None:
|
|
338
|
+
print("Can't find clouditem.")
|
|
339
|
+
return
|
|
340
|
+
cloud = cloud_item.load(file, append=append)
|
|
341
|
+
center = np.nanmean(cloud['xyz'].astype(np.float64), axis=0)
|
|
342
|
+
self.glwidget.set_cam_position(pos=center)
|
|
343
|
+
|
|
344
|
+
def main():
|
|
345
|
+
import argparse
|
|
346
|
+
parser = argparse.ArgumentParser()
|
|
347
|
+
parser.add_argument("--path", help="the cloud file path")
|
|
348
|
+
args = parser.parse_args()
|
|
349
|
+
app = q3d.QApplication(['Cloud Movie Maker'])
|
|
350
|
+
viewer = CMMViewer(name='Cloud Movie Maker', update_interval=30)
|
|
351
|
+
cloud_item = q3d.CloudIOItem(size=1, alpha=0.1)
|
|
352
|
+
axis_item = q3d.AxisItem(size=0.5, width=5)
|
|
353
|
+
grid_item = q3d.GridItem(size=1000, spacing=20)
|
|
354
|
+
|
|
355
|
+
viewer.add_items(
|
|
356
|
+
{'cloud': cloud_item, 'grid': grid_item, 'axis': axis_item})
|
|
357
|
+
|
|
358
|
+
if args.path:
|
|
359
|
+
pcd_fn = args.path
|
|
360
|
+
viewer.open_cloud_file(pcd_fn)
|
|
361
|
+
|
|
362
|
+
viewer.show()
|
|
363
|
+
app.exec()
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
if __name__ == '__main__':
|
|
367
|
+
main()
|