robits 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. robits/__init__.py +0 -0
  2. robits/agents/__init__.py +0 -0
  3. robits/agents/base_agent.py +63 -0
  4. robits/agents/mello_agent.py +72 -0
  5. robits/agents/sam2act_agent.py +29 -0
  6. robits/app/__init__.py +13 -0
  7. robits/app/extrinsics_app.py +339 -0
  8. robits/app/misc/slider_widget.py +88 -0
  9. robits/audio/__init__.py +0 -0
  10. robits/audio/audio.py +99 -0
  11. robits/audio/cache_utils.py +45 -0
  12. robits/audio/speech.py +96 -0
  13. robits/audio/utils.py +53 -0
  14. robits/cli/__init__.py +26 -0
  15. robits/cli/agents/mello_cli.py +57 -0
  16. robits/cli/agents/sam2act_cli.py +151 -0
  17. robits/cli/base_cli.py +75 -0
  18. robits/cli/cli_options.py +26 -0
  19. robits/cli/cli_utils.py +71 -0
  20. robits/cli/commands/config_cli.py +150 -0
  21. robits/cli/commands/dataset_cli.py +199 -0
  22. robits/cli/commands/move_cli.py +140 -0
  23. robits/cli/commands/scene_visu.py +34 -0
  24. robits/cli/commands/service_cli.py +88 -0
  25. robits/cli/commands/speech_cli.py +76 -0
  26. robits/cli/devices/camera_cli.py +305 -0
  27. robits/cli/devices/gripper_cli.py +61 -0
  28. robits/cli/devices/panda_cli.py +58 -0
  29. robits/cli/devices/robot_cli.py +63 -0
  30. robits/cli/main.py +23 -0
  31. robits/core/__init__.py +3 -0
  32. robits/core/abc/__init__.py +0 -0
  33. robits/core/abc/audio.py +66 -0
  34. robits/core/abc/camera.py +99 -0
  35. robits/core/abc/control.py +292 -0
  36. robits/core/abc/gripper.py +66 -0
  37. robits/core/abc/robot.py +269 -0
  38. robits/core/abc/speech.py +19 -0
  39. robits/core/compat.py +14 -0
  40. robits/core/config.py +261 -0
  41. robits/core/config_manager.py +321 -0
  42. robits/core/data_model/__init__.py +0 -0
  43. robits/core/data_model/action.py +165 -0
  44. robits/core/data_model/camera_capture.py +81 -0
  45. robits/core/data_model/dataset.py +50 -0
  46. robits/core/factory.py +197 -0
  47. robits/core/utils.py +111 -0
  48. robits/dataset/__init__.py +0 -0
  49. robits/dataset/camera.py +82 -0
  50. robits/dataset/io/__init__.py +0 -0
  51. robits/dataset/io/reader.py +161 -0
  52. robits/dataset/io/recorder.py +160 -0
  53. robits/dataset/io/stats_writer.py +105 -0
  54. robits/dataset/io/writer.py +185 -0
  55. robits/dataset/robot.py +105 -0
  56. robits/real/__init__.py +18 -0
  57. robits/real/franka/__init__.py +1 -0
  58. robits/real/franka/control.py +191 -0
  59. robits/real/franka/franka_web_client.py +94 -0
  60. robits/real/franka/gripper.py +143 -0
  61. robits/real/franka/robot.py +223 -0
  62. robits/real/realsense_camera.py +238 -0
  63. robits/real/robotiq_gripper.py +154 -0
  64. robits/remote/__init__.py +0 -0
  65. robits/remote/client/__init__.py +0 -0
  66. robits/remote/client/camera.py +45 -0
  67. robits/remote/client/client_base.py +47 -0
  68. robits/remote/client/gripper.py +31 -0
  69. robits/remote/client/robot.py +94 -0
  70. robits/remote/server/__init__.py +0 -0
  71. robits/remote/server/camera.py +11 -0
  72. robits/remote/server/gripper.py +11 -0
  73. robits/remote/server/robot.py +11 -0
  74. robits/remote/server/server_base.py +101 -0
  75. robits/sim/__init__.py +0 -0
  76. robits/sim/blueprints.py +138 -0
  77. robits/sim/camera.py +75 -0
  78. robits/sim/control.py +285 -0
  79. robits/sim/env.py +141 -0
  80. robits/sim/env_client.py +118 -0
  81. robits/sim/env_design.py +134 -0
  82. robits/sim/gripper.py +87 -0
  83. robits/sim/model_factory.py +212 -0
  84. robits/sim/robot.py +225 -0
  85. robits/sim/utils.py +119 -0
  86. robits/tools/visualize_dataset.py +160 -0
  87. robits/utils/__init__.py +0 -0
  88. robits/utils/process_utils.py +76 -0
  89. robits/utils/service_launcher.py +50 -0
  90. robits/utils/system_utils.py +41 -0
  91. robits/utils/transform_utils.py +20 -0
  92. robits/utils/vision_utils.py +106 -0
  93. robits/vis/scene_visualizer.py +188 -0
  94. robits/vlm/__init__.py +0 -0
  95. robits/vlm/openai_vlm.py +67 -0
  96. robits/vlm/vlm_cli.py +73 -0
  97. robits-0.5.0.dist-info/LICENSE.md +0 -0
  98. robits-0.5.0.dist-info/METADATA +219 -0
  99. robits-0.5.0.dist-info/RECORD +134 -0
  100. robits-0.5.0.dist-info/WHEEL +4 -0
  101. robits-0.5.0.dist-info/entry_points.txt +7 -0
  102. robits_config/__init__.py +0 -0
  103. robits_config/additional_config/__init__.py +0 -0
  104. robits_config/additional_config/remote_config/__init__.py +1 -0
  105. robits_config/additional_config/remote_config/camera_client.json +6 -0
  106. robits_config/additional_config/remote_config/gripper_client.json +6 -0
  107. robits_config/additional_config/remote_config/robot_client.json +6 -0
  108. robits_config/additional_config/sim/__init__.py +1 -0
  109. robits_config/additional_config/sim/gripper_xarm_sim.json +14 -0
  110. robits_config/additional_config/sim/robot_g1_sim.json +69 -0
  111. robits_config/additional_config/sim/robot_gen3_sim.json +18 -0
  112. robits_config/additional_config/sim/robot_h1_sim.json +49 -0
  113. robits_config/additional_config/sim/robot_iiwa14_sim.json +36 -0
  114. robits_config/additional_config/sim/robot_spot_sim.json +22 -0
  115. robits_config/additional_config/sim/robot_xarm_sim.json +50 -0
  116. robits_config/audio/__init__.py +0 -0
  117. robits_config/audio/audio_whisper.json +4 -0
  118. robits_config/camera/__init__.py +0 -0
  119. robits_config/camera/camera_front_real.json +8 -0
  120. robits_config/camera/camera_front_sim.json +7 -0
  121. robits_config/camera_data/__init__.py +0 -0
  122. robits_config/camera_data/calibration_front_camera.json +49 -0
  123. robits_config/gripper/__init__.py +0 -0
  124. robits_config/gripper/gripper_panda_real.json +5 -0
  125. robits_config/gripper/gripper_panda_sim.json +12 -0
  126. robits_config/gripper/gripper_robotiq_real.json +4 -0
  127. robits_config/gripper/gripper_robotiq_sim.json +12 -0
  128. robits_config/robot/__init__.py +0 -0
  129. robits_config/robot/robot_panda_real.json +16 -0
  130. robits_config/robot/robot_panda_sim.json +27 -0
  131. robits_config/robot/robot_ur10e_sim.json +28 -0
  132. robits_config/speech/__init__.py +0 -0
  133. robits_config/speech/speech_espeek.json +5 -0
  134. robits_config/speech/speech_openai.json +4 -0
robits/__init__.py ADDED
File without changes
File without changes
@@ -0,0 +1,63 @@
1
+ from abc import ABC
2
+ from abc import abstractmethod
3
+
4
+ import logging
5
+
6
+ import numpy as np
7
+
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class BaseAgent(ABC):
13
+ """ """
14
+
15
+ agent_name: str
16
+
17
+ lang_goal: str
18
+
19
+ def prepare_observation(self, obs, i, episode_length):
20
+
21
+ from clip import tokenize
22
+ import torch
23
+
24
+ logger.info("Using language goal %s", self.lang_goal)
25
+
26
+ obs["lang_goal_tokens"] = tokenize([self.lang_goal])[0].numpy()
27
+
28
+ elapsed_time = (1.0 - (i / float(episode_length - 1))) * 2.0 - 1.0
29
+ gripper_joint_positions = obs["gripper_joint_positions"][
30
+ 0:1
31
+ ] # gripper is normalized here
32
+ logger.info("timestamp: %.2f", elapsed_time)
33
+
34
+ obs["ignore_collisions"] = np.array([0])
35
+ obs["low_dim_state"] = np.concatenate(
36
+ [
37
+ gripper_joint_positions,
38
+ gripper_joint_positions,
39
+ gripper_joint_positions,
40
+ elapsed_time,
41
+ ],
42
+ axis=None,
43
+ )
44
+
45
+ for k, v in obs.items():
46
+ if v is None:
47
+ logger.info("No values for key %s", k)
48
+ # obs[k] = torch.tensor([np.zeros(4)], device=self.device).unsqueeze(0)
49
+ continue
50
+
51
+ if isinstance(v, np.ndarray):
52
+ v = v.astype(np.float32)
53
+ logger.debug("Item %s has shape %s", k, v.shape)
54
+ else:
55
+ logger.debug("Key %s is not a numpy array", k)
56
+
57
+ obs[k] = torch.tensor([v], device=self.device).unsqueeze(0)
58
+
59
+ return obs
60
+
61
+ @abstractmethod
62
+ def get_action(self, step, observation):
63
+ pass
@@ -0,0 +1,72 @@
1
+ from typing import Optional
2
+ import logging
3
+ import time
4
+ import requests
5
+
6
+ from dataclasses import dataclass
7
+
8
+ import numpy as np
9
+
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ @dataclass(frozen=True)
15
+ class MelloConfig:
16
+
17
+ max_delta: float = 0.25
18
+
19
+ initial_joint_positions: Optional[np.ndarray] = None
20
+
21
+
22
+ class MelloAgent:
23
+
24
+ def __init__(self, device_addr, robot):
25
+ self.device_addr = device_addr
26
+ self.robot = robot
27
+ self.max_delta = 0.25
28
+
29
+ def get_mello_data(self):
30
+ data = requests.get(self.device_addr).json()
31
+ joint_positions = np.array(data["joint_positions"])
32
+ joint_positions = np.deg2rad(joint_positions)
33
+ joint_positions *= -1
34
+
35
+ joint_velocities = np.array(data["joint_velocities"])
36
+ joint_velocities = np.deg2rad(joint_velocities)
37
+ joint_velocities *= -1
38
+ return joint_positions, joint_velocities
39
+
40
+ def get_mello_joint_positions(self):
41
+ joint_positions = np.array(
42
+ requests.get(self.device_addr).json()["joint_positions"]
43
+ )
44
+ joint_positions = np.deg2rad(joint_positions)
45
+ joint_positions *= -1
46
+ return joint_positions
47
+
48
+ def get_action(self) -> np.ndarray:
49
+ mello_joint_positions = self.get_mello_joint_positions()
50
+ robot_joint_positions = self.robot.get_proprioception_data(False, False)[
51
+ "joint_positions"
52
+ ]
53
+ delta = mello_joint_positions - robot_joint_positions
54
+ delta = np.clip(delta, -self.max_delta, self.max_delta)
55
+ new_joint_positions = robot_joint_positions + delta
56
+ return new_joint_positions
57
+
58
+ def wait_for_pose(self):
59
+ delta = np.ones_like(self.get_mello_joint_positions()) * self.max_delta
60
+ while np.any(delta >= self.max_delta):
61
+ mello_joint_positions = self.get_mello_joint_positions()
62
+ robot_joint_positions = self.robot.get_joint_obs(False, False)[
63
+ "joint_positions"
64
+ ]
65
+ delta = np.abs(mello_joint_positions - robot_joint_positions)
66
+ logger.info(
67
+ "Delta between robot and mello is too large %s. Should be with %s",
68
+ np.rad2deg(delta),
69
+ np.rad2deg(self.max_delta),
70
+ )
71
+ time.sleep(0.25)
72
+ logger.info("Done.")
@@ -0,0 +1,29 @@
1
+ import logging
2
+
3
+ import torch
4
+
5
+ from sam2act.eval import load_agent
6
+ from robits.agents.base_agent import BaseAgent
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class SAM2Act(BaseAgent):
12
+
13
+ def __init__(self, model_path):
14
+ self.device = torch.device("cuda:0")
15
+
16
+ self.agent = load_agent(model_path)
17
+ self.agent.load_clip()
18
+ self.agent.eval()
19
+ self.lang_goal = "push the buttons in the following order: red, green, blue"
20
+
21
+ def get_action(self, step, observation):
22
+
23
+ if not observation:
24
+ logger.error("Nothing todo.")
25
+ return
26
+
27
+ with torch.jit.optimized_execution(False):
28
+ act_result = self.agent.act(step, observation, deterministic=True)
29
+ return act_result
robits/app/__init__.py ADDED
@@ -0,0 +1,13 @@
1
+ """
2
+ Interactive applications for the RoBits framework.
3
+
4
+ This package provides user interface applications for interacting with
5
+ robots, cameras, and other components of the RoBits system. These applications
6
+ use terminal-based UI libraries to provide graphical interfaces that work
7
+ in both terminal and GUI environments.
8
+
9
+ Components:
10
+
11
+ - robot_app: An interactive TUI for controlling robot arms
12
+ - extrinsics_app: An application for calibrating camera extrinsics
13
+ """
@@ -0,0 +1,339 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ App for extrinsic calibration
5
+ """
6
+ import sys
7
+ import os
8
+
9
+ import time
10
+ from threading import Thread
11
+
12
+ from datetime import datetime
13
+
14
+
15
+ import numpy as np
16
+ from scipy.spatial.transform import Rotation as R
17
+ import open3d as o3d
18
+
19
+ from textual.app import App
20
+ from textual.app import ComposeResult
21
+ from textual.containers import Vertical
22
+ from textual.containers import Horizontal
23
+ from textual.widgets import Label
24
+ from textual.widgets import Button
25
+ from textual_slider import Slider
26
+ from textual.widgets import Select
27
+
28
+
29
+ from robits.core.config_manager import config_manager
30
+ from robits.core.factory import CameraFactory
31
+ from robits.core.config import CameraCalibration
32
+ from robits.utils import vision_utils
33
+
34
+
35
+ class SimplePCDViewer:
36
+
37
+ def __init__(self) -> None:
38
+ self.viewer_running = True
39
+ self.prev_transform = np.identity(4)
40
+ self.table = None
41
+
42
+ self.point_cloud = o3d.geometry.PointCloud()
43
+ # self.point_cloud.points = o3d.utility.Vector3dVector(np.zeros((10, 3)))
44
+ self.prev_transform = np.identity(4)
45
+
46
+ def apply_transformation(self, transformation: np.ndarray) -> None:
47
+ """
48
+ Apply a transformation matrix to the current point cloud.
49
+
50
+ :param transformation: the transformation to apply
51
+ """
52
+ self.point_cloud.transform(np.linalg.inv(self.prev_transform))
53
+ self.point_cloud.transform(transformation)
54
+ self.prev_transform = transformation
55
+
56
+ def update_point_cloud(self, camera) -> None:
57
+ """
58
+ Get a new point clodu from the camera
59
+ :param camera: the camera to retrieve the point cloud from
60
+ """
61
+ camera_data, _ = camera.get_camera_data()
62
+ pcd = vision_utils.depth_to_pcd(camera_data, camera)
63
+ self.point_cloud.points = pcd.points
64
+ self.point_cloud.colors = pcd.colors
65
+
66
+ def show_table(self):
67
+ pass
68
+
69
+ def get_table(self):
70
+ table = o3d.geometry.TriangleMesh.create_box(width=0.6, height=1.5, depth=0.01)
71
+ table = table.translate(np.array([0.3, 0.0 - 1.5 / 2, 0.295 - 0.01 / 2.0]))
72
+ return table
73
+
74
+ def run(self) -> None:
75
+ """
76
+ Start the Open3D viewer in a separate thread.
77
+
78
+ .. todo:: this breaks compatibility to macOS as the gui is supposed to run in the main thread.
79
+ """
80
+ vis = o3d.visualization.Visualizer()
81
+ vis.create_window(window_name="Point Cloud Viewer")
82
+
83
+ coordinate_system = o3d.geometry.TriangleMesh.create_coordinate_frame(
84
+ size=0.4, origin=[0, 0, 0]
85
+ )
86
+ vis.add_geometry(coordinate_system)
87
+ vis.add_geometry(self.get_table())
88
+ vis.add_geometry(self.point_cloud)
89
+
90
+ vis.add_geometry(
91
+ o3d.geometry.TriangleMesh.create_box(
92
+ width=1.0, height=0.001, depth=1.0
93
+ ).paint_uniform_color([1.0, 0.0, 0.0])
94
+ )
95
+ vis.add_geometry(
96
+ o3d.geometry.TriangleMesh.create_box(
97
+ width=1.0, height=1.0, depth=0.001
98
+ ).paint_uniform_color([0.0, 1.0, 0.0])
99
+ )
100
+ vis.add_geometry(
101
+ o3d.geometry.TriangleMesh.create_box(width=0.001, height=1.0, depth=1.0)
102
+ .paint_uniform_color([0.0, 0.0, 1.0])
103
+ .translate(np.array([0.4687, 0.0, 0.0]))
104
+ )
105
+
106
+ while self.viewer_running:
107
+ vis.update_geometry(self.point_cloud)
108
+ vis.poll_events()
109
+ vis.update_renderer()
110
+ time.sleep(0.05)
111
+
112
+ vis.destroy_window()
113
+
114
+ def stop(self) -> None:
115
+ """
116
+ Stop the Open3D viewer.
117
+ """
118
+ self.viewer_running = False
119
+
120
+
121
+ class CameraCalibrationApp(App):
122
+
123
+ def __init__(self, viewer: SimplePCDViewer):
124
+ super().__init__()
125
+ self.viewer = viewer
126
+
127
+ def compose(self) -> ComposeResult:
128
+ yield Vertical(
129
+ Label("current transform", id="label_transform"),
130
+ Horizontal(
131
+ Label("Select Camera"),
132
+ Select.from_values(
133
+ values=config_manager.available_cameras, id="camera_select"
134
+ ),
135
+ Button(label="Connect", id="connect"),
136
+ ),
137
+ Label("Camera Calibration - Translation Sliders"),
138
+ Horizontal(
139
+ Vertical(
140
+ Label("Translation X"),
141
+ Slider(min=-3.0, max=3.0, value=0.0, step=0.0025, id="trans_x"),
142
+ Label(id="label_x"),
143
+ ),
144
+ Vertical(
145
+ Label("Translation Y"),
146
+ Slider(min=-3.0, max=3.0, value=0.0, step=0.0025, id="trans_y"),
147
+ Label(id="label_y"),
148
+ ),
149
+ Vertical(
150
+ Label("Translation Z"),
151
+ Slider(min=-3.0, max=3.0, value=0.0, step=0.0025, id="trans_z"),
152
+ Label(id="label_z"),
153
+ ),
154
+ ),
155
+ Label("Camera Calibration - Rotation Sliders"),
156
+ Horizontal(
157
+ Vertical(
158
+ Label("Rotation Roll"),
159
+ Slider(min=-180, max=180, value=0, step=1, id="rot_roll"),
160
+ Label(id="label_roll"),
161
+ ),
162
+ Vertical(
163
+ Label("Rotation Pitch"),
164
+ Slider(min=-180, max=180, value=0, step=1, id="rot_pitch"),
165
+ Label(id="label_pitch"),
166
+ ),
167
+ Vertical(
168
+ Label("Rotation Yaw"),
169
+ Slider(min=-180, max=180, value=0, step=1, id="rot_yaw"),
170
+ Label(id="label_yaw"),
171
+ ),
172
+ ),
173
+ Label("Actions"),
174
+ Horizontal(
175
+ Button(label="Save", id="save"),
176
+ Button(label="Refine", id="refine"),
177
+ Button(label="Exit", id="exit"),
178
+ ),
179
+ )
180
+
181
+ def on_mount(self):
182
+ # Start the Open3D viewer in a separate thread. Does not work for macOS
183
+ Thread(target=self.viewer.run, daemon=True).start()
184
+
185
+ def on_button_pressed(self, message: Button.Pressed) -> None:
186
+ if message.button.id == "exit":
187
+ sys.exit(0)
188
+ elif message.button.id == "connect":
189
+ selected_camera = self.query_one("#camera_select", Select).value
190
+ if selected_camera:
191
+ camera = CameraFactory(selected_camera).build()
192
+ self.viewer.update_point_cloud(camera)
193
+ transformation = np.linalg.inv(camera.calibration.extrinsics)
194
+ self.update_sliders(transformation)
195
+ self.viewer.apply_transformation(self.get_transformation())
196
+ elif message.button.id == "save":
197
+ selected_camera = self.query_one("#camera_select", Select).value
198
+ self.save_camera_calibration(selected_camera)
199
+ elif message.button.id == "refine":
200
+ self.estimate_transformation()
201
+
202
+ def update_sliders(self, extrinsics):
203
+ rpy = R.from_matrix(extrinsics[:3, :3]).as_euler("ZYX", degrees=True)
204
+ self.query_one("#trans_x", Slider).value = extrinsics[0, 3]
205
+ self.query_one("#trans_y", Slider).value = extrinsics[1, 3]
206
+ self.query_one("#trans_z", Slider).value = extrinsics[2, 3]
207
+ self.query_one("#rot_roll", Slider).value = rpy[2]
208
+ self.query_one("#rot_pitch", Slider).value = rpy[1]
209
+ self.query_one("#rot_yaw", Slider).value = rpy[0]
210
+
211
+ def draw_registration_result(self, source, target, transformation):
212
+ import copy
213
+
214
+ source_temp = copy.deepcopy(source)
215
+ target_temp = copy.deepcopy(target)
216
+ source_temp.paint_uniform_color([1, 0.706, 0])
217
+ target_temp.paint_uniform_color([0, 0.651, 0.929])
218
+ # source_temp.transform(transformation)
219
+ coordinate_system = o3d.geometry.TriangleMesh.create_coordinate_frame(
220
+ size=0.4, origin=[0, 0, 0]
221
+ )
222
+ o3d.visualization.draw_geometries([source_temp, target_temp, coordinate_system])
223
+
224
+ def estimate_transformation(self):
225
+ """
226
+ .. todo:: not implemented yet
227
+ """
228
+
229
+ pcd = self.viewer.point_cloud
230
+ """
231
+ if False:
232
+ plane_model, inliers = pcd.segment_plane(
233
+ distance_threshold=0.03, ransac_n=5000, num_iterations=1000
234
+ )
235
+ inlier_cloud = pcd.select_by_index(inliers)
236
+ inlier_cloud.paint_uniform_color([1.0, 0, 0])
237
+ outlier_cloud = pcd.select_by_index(inliers, invert=True)
238
+ extracted_table_pcd = inlier_cloud
239
+
240
+ """
241
+
242
+ extracted_table_pcd = pcd
243
+
244
+ table_height = 0.25
245
+ x = np.linspace(0, 0.5, 100)
246
+ y = np.linspace(-0.7, 0.7, 100)
247
+ mesh_x, mesh_y = np.meshgrid(x, y)
248
+ xyz = np.zeros((100 * 100, 3))
249
+ xyz[:, 0] = mesh_x.reshape(-1)
250
+ xyz[:, 1] = mesh_y.reshape(-1)
251
+ xyz[:, 2] = table_height
252
+
253
+ target = o3d.geometry.PointCloud()
254
+ target.points = o3d.utility.Vector3dVector(xyz)
255
+ target.paint_uniform_color([1.0, 0.0, 0.0])
256
+
257
+ trans_init = self.get_transformation()
258
+ threshold = 0.02
259
+ self.draw_registration_result(extracted_table_pcd, target, trans_init)
260
+ reg_p2p = o3d.pipelines.registration.registration_icp(
261
+ extracted_table_pcd,
262
+ target,
263
+ threshold,
264
+ trans_init,
265
+ o3d.pipelines.registration.TransformationEstimationPointToPoint(),
266
+ )
267
+ # self.update_sliders(reg_p2p.transformation)
268
+ self.draw_registration_result(
269
+ extracted_table_pcd, target, reg_p2p.transformation
270
+ )
271
+
272
+ def get_transformation(self) -> np.ndarray:
273
+ trans_x = self.query_one("#trans_x", Slider).value
274
+ trans_y = self.query_one("#trans_y", Slider).value
275
+ trans_z = self.query_one("#trans_z", Slider).value
276
+ roll = self.query_one("#rot_roll", Slider).value
277
+ pitch = self.query_one("#rot_pitch", Slider).value
278
+ yaw = self.query_one("#rot_yaw", Slider).value
279
+
280
+ self.query_one("#label_x", Label).update(content=f"{trans_x:.2f}")
281
+ self.query_one("#label_y", Label).update(content=f"{trans_y:.2f}")
282
+ self.query_one("#label_z", Label).update(content=f"{trans_z:.2f}")
283
+
284
+ self.query_one("#label_roll", Label).update(content=f"{roll:.2f}")
285
+ self.query_one("#label_pitch", Label).update(content=f"{pitch:.2f}")
286
+ self.query_one("#label_yaw", Label).update(content=f"{yaw:.2f}")
287
+
288
+ # Create transformation matrix
289
+ transformation = np.identity(4)
290
+ transformation[:3, 3] = [trans_x, trans_y, trans_z]
291
+ transformation[:3, :3] = R.from_euler(
292
+ "ZYX", [yaw, pitch, roll], degrees=True
293
+ ).as_matrix()
294
+ # transformation = np.linalg.inv(transformation)
295
+
296
+ self.query_one("#label_transform", Label).update(content=f"{transformation}")
297
+
298
+ return transformation
299
+
300
+ def on_slider_changed(self, message: Slider.Changed) -> None:
301
+ transformation = self.get_transformation()
302
+
303
+ # Apply transformation to the point cloud
304
+ self.viewer.apply_transformation(transformation)
305
+
306
+ def on_shutdown(self):
307
+ # Stop the Open3D viewer when the app is closed
308
+ self.viewer.stop()
309
+
310
+ def save_camera_calibration(self, selected_camera):
311
+
312
+ if not selected_camera:
313
+ return
314
+
315
+ user_config_dir = config_manager.get_user_config_dir()
316
+ extrinsics = np.linalg.inv(self.get_transformation())
317
+
318
+ camera = CameraFactory(selected_camera).build()
319
+ config_dict = camera.calibration.to_dict()
320
+ config_dict["extrinsics"] = extrinsics.tolist()
321
+ config_dict["date_updated"] = datetime.now().isoformat()
322
+ new_calibration = CameraCalibration(**config_dict)
323
+ config_path = os.path.join(
324
+ user_config_dir, f"calibration_{camera.camera_name}_camera.json"
325
+ )
326
+ new_calibration.save_config(config_path)
327
+
328
+
329
+ if __name__ == "__main__":
330
+ # import asyncio
331
+ # loop = asyncio.new_event_loop()
332
+ # asyncio.set_event_loop(loop)
333
+
334
+ print("Viewer initialized")
335
+ viewer = SimplePCDViewer()
336
+ print("Creating app")
337
+ app = CameraCalibrationApp(viewer)
338
+ print("Running app")
339
+ app.run()
@@ -0,0 +1,88 @@
1
+ from textual.widget import Widget
2
+ from textual.reactive import reactive
3
+ from textual.events import Key
4
+ from textual.message import Message
5
+ from textual.containers import Horizontal
6
+ from textual.containers import Vertical
7
+
8
+ from textual.widgets import Button, Static
9
+ from textual.app import ComposeResult
10
+
11
+
12
+ class Slider(Widget):
13
+ """A simple horizontal slider widget with + and - buttons."""
14
+
15
+ value = reactive(0.0)
16
+
17
+ def __init__(self, min=0.0, max=1.0, value=0.0, step=0.1, id=None, name=None):
18
+ self._mounted = False
19
+ super().__init__(id=id, name=name)
20
+ self.min = min
21
+ self.max = max
22
+ self.step = step
23
+ self.bar_width = 30
24
+ self.value = value
25
+
26
+ def compose(self) -> ComposeResult:
27
+ decrease_btn = Button("-", id="decrease")
28
+ increase_btn = Button("+", id="increase")
29
+
30
+ for btn in (decrease_btn, increase_btn):
31
+ btn.styles.width = 3
32
+ btn.styles.height = 3
33
+ btn.styles.min_width = 3
34
+ btn.styles.min_height = 3
35
+ btn.styles.max_width = 3
36
+ btn.styles.max_height = 3
37
+
38
+ # Bar and label
39
+ bar = Static(self._render_bar(), id="bar", expand=True)
40
+ value_label = Static(f"{self.value:.3f}", id="value_label", expand=True)
41
+ bar.styles.text_align = "center"
42
+ value_label.styles.text_align = "center"
43
+
44
+ center_column = Vertical(value_label, bar, id="slider-column")
45
+ center_column.styles.align_horizontal = "center"
46
+
47
+ yield Horizontal(decrease_btn, center_column, increase_btn, id="slider-row")
48
+
49
+ def on_mount(self) -> None:
50
+ self._mounted = True
51
+ self.styles.width = self.bar_width + 10
52
+
53
+ def _render_bar(self) -> str:
54
+ normalized = (self.value - self.min) / (self.max - self.min)
55
+ filled_chars = int(normalized * self.bar_width)
56
+ empty_chars = self.bar_width - filled_chars
57
+ return f"[{'█' * filled_chars}{' ' * empty_chars}]"
58
+
59
+ def on_button_pressed(self, event: Button.Pressed) -> None:
60
+ if event.button.id == "decrease":
61
+ self.decrease()
62
+ elif event.button.id == "increase":
63
+ self.increase()
64
+
65
+ def decrease(self):
66
+ self.value = max(self.min, self.value - self.step)
67
+ self.post_message(Slider.Changed(self))
68
+
69
+ def increase(self):
70
+ self.value = min(self.max, self.value + self.step)
71
+ self.post_message(Slider.Changed(self))
72
+
73
+ def on_key(self, event: Key) -> None:
74
+ if event.key == "left":
75
+ self.decrease()
76
+ elif event.key == "right":
77
+ self.increase()
78
+
79
+ def watch_value(self, old: float, new: float) -> None:
80
+ if self._mounted:
81
+ self.query_one("#bar", Static).update(self._render_bar())
82
+ self.query_one("#value_label", Static).update(f"{new:.2f}")
83
+
84
+ class Changed(Message):
85
+ """Message sent when the slider value changes."""
86
+
87
+ def __init__(self, sender: "Slider"):
88
+ super().__init__()
File without changes