pyorbbec 1.0.1.7__py3-none-any.whl → 1.0.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyorbbec-1.0.1.7.dist-info → pyorbbec-1.0.1.8.dist-info}/METADATA +1 -1
- pyorbbec-1.0.1.8.dist-info/RECORD +59 -0
- pyorbbecsdk/examples/.gitkeep +0 -0
- pyorbbecsdk/examples/OrbbecSDK.dll +0 -0
- pyorbbecsdk/examples/OrbbecSDK.lib +0 -0
- pyorbbecsdk/examples/README.md +26 -0
- pyorbbecsdk/examples/__pycache__/utils.cpython-313.pyc +0 -0
- pyorbbecsdk/examples/callback.py +303 -0
- pyorbbecsdk/examples/color.py +64 -0
- pyorbbecsdk/examples/coordinate_transform.py +184 -0
- pyorbbecsdk/examples/depth.py +107 -0
- pyorbbecsdk/examples/depth_work_mode.py +50 -0
- pyorbbecsdk/examples/device_firmware_update.py +155 -0
- pyorbbecsdk/examples/device_optional_depth_presets_update.py +142 -0
- pyorbbecsdk/examples/enumerate.py +118 -0
- pyorbbecsdk/examples/extensions/depthengine/depthengine.dll +0 -0
- pyorbbecsdk/examples/extensions/depthengine/depthengine.lib +0 -0
- pyorbbecsdk/examples/extensions/filters/FilterProcessor.dll +0 -0
- pyorbbecsdk/examples/extensions/filters/ob_priv_filter.dll +0 -0
- pyorbbecsdk/examples/extensions/firmwareupdater/firmwareupdater.dll +0 -0
- pyorbbecsdk/examples/extensions/frameprocessor/ob_frame_processor.dll +0 -0
- pyorbbecsdk/examples/hdr.py +216 -0
- pyorbbecsdk/examples/hot_plug.py +160 -0
- pyorbbecsdk/examples/hw_d2c_align.py +135 -0
- pyorbbecsdk/examples/imu.py +60 -0
- pyorbbecsdk/examples/infrared.py +115 -0
- pyorbbecsdk/examples/logger.py +55 -0
- pyorbbecsdk/examples/metadata.py +64 -0
- pyorbbecsdk/examples/multi_device.py +169 -0
- pyorbbecsdk/examples/multi_streams.py +219 -0
- pyorbbecsdk/examples/net_device.py +158 -0
- pyorbbecsdk/examples/playback.py +277 -0
- pyorbbecsdk/examples/point_cloud.py +90 -0
- pyorbbecsdk/examples/post_processing.py +119 -0
- pyorbbecsdk/examples/preset.py +67 -0
- pyorbbecsdk/examples/pyorbbecsdk.cp313-win_amd64.pyd +0 -0
- pyorbbecsdk/examples/quick_start.py +90 -0
- pyorbbecsdk/examples/recorder.py +236 -0
- pyorbbecsdk/examples/requirements.txt +9 -0
- pyorbbecsdk/examples/save_image_to_disk.py +106 -0
- pyorbbecsdk/examples/sync_align.py +109 -0
- pyorbbecsdk/examples/two_devices_sync.py +233 -0
- pyorbbecsdk/examples/utils.py +127 -0
- pyorbbec-1.0.1.7.dist-info/RECORD +0 -18
- {pyorbbec-1.0.1.7.dist-info → pyorbbec-1.0.1.8.dist-info}/WHEEL +0 -0
- {pyorbbec-1.0.1.7.dist-info → pyorbbec-1.0.1.8.dist-info}/licenses/LICENSE +0 -0
- {pyorbbec-1.0.1.7.dist-info → pyorbbec-1.0.1.8.dist-info}/licenses/NOTICE +0 -0
- {pyorbbec-1.0.1.7.dist-info → pyorbbec-1.0.1.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,277 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import cv2
|
18
|
+
import numpy as np
|
19
|
+
from pyorbbecsdk import *
|
20
|
+
from utils import frame_to_bgr_image
|
21
|
+
|
22
|
+
# cached frames for better visualization
|
23
|
+
cached_frames = {
|
24
|
+
'color': None,
|
25
|
+
'depth': None,
|
26
|
+
'left_ir': None,
|
27
|
+
'right_ir': None,
|
28
|
+
'ir': None
|
29
|
+
}
|
30
|
+
|
31
|
+
def setup_camera(playback):
|
32
|
+
"""Setup camera and stream configuration"""
|
33
|
+
pipeline = Pipeline(playback)
|
34
|
+
config = Config()
|
35
|
+
device = pipeline.get_device()
|
36
|
+
|
37
|
+
# Try to enable all possible sensors
|
38
|
+
video_sensors = [
|
39
|
+
OBSensorType.COLOR_SENSOR,
|
40
|
+
OBSensorType.DEPTH_SENSOR,
|
41
|
+
OBSensorType.IR_SENSOR,
|
42
|
+
OBSensorType.LEFT_IR_SENSOR,
|
43
|
+
OBSensorType.RIGHT_IR_SENSOR,
|
44
|
+
OBSensorType.ACCEL_SENSOR,
|
45
|
+
OBSensorType.GYRO_SENSOR,
|
46
|
+
]
|
47
|
+
enabled_sensor_types = []
|
48
|
+
|
49
|
+
sensor_list = device.get_sensor_list()
|
50
|
+
for sensor in range(len(sensor_list)):
|
51
|
+
try:
|
52
|
+
sensor_type = sensor_list[sensor].get_type()
|
53
|
+
if sensor_type in video_sensors:
|
54
|
+
config.enable_stream(sensor_type)
|
55
|
+
enabled_sensor_types.append(sensor_type)
|
56
|
+
except:
|
57
|
+
continue
|
58
|
+
return pipeline, config, enabled_sensor_types
|
59
|
+
|
60
|
+
def process_color(frame):
|
61
|
+
"""Process color image"""
|
62
|
+
frame = frame if frame else cached_frames['color']
|
63
|
+
cached_frames['color'] = frame
|
64
|
+
return frame_to_bgr_image(frame) if frame else None
|
65
|
+
|
66
|
+
|
67
|
+
def process_depth(frame):
|
68
|
+
"""Process depth image"""
|
69
|
+
frame = frame if frame else cached_frames['depth']
|
70
|
+
cached_frames['depth'] = frame
|
71
|
+
if not frame:
|
72
|
+
return None
|
73
|
+
try:
|
74
|
+
depth_data = np.frombuffer(frame.get_data(), dtype=np.uint16)
|
75
|
+
depth_data = depth_data.reshape(frame.get_height(), frame.get_width())
|
76
|
+
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
77
|
+
return cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
|
78
|
+
except ValueError:
|
79
|
+
return None
|
80
|
+
|
81
|
+
|
82
|
+
def process_ir(ir_frame, key):
|
83
|
+
"""Process IR frame (left, right, or mono) with cache"""
|
84
|
+
ir_frame = ir_frame if ir_frame else cached_frames[key]
|
85
|
+
cached_frames[key] = ir_frame
|
86
|
+
if ir_frame is None:
|
87
|
+
return None
|
88
|
+
|
89
|
+
ir_frame = ir_frame.as_video_frame()
|
90
|
+
ir_data = np.asanyarray(ir_frame.get_data())
|
91
|
+
width = ir_frame.get_width()
|
92
|
+
height = ir_frame.get_height()
|
93
|
+
ir_format = ir_frame.get_format()
|
94
|
+
|
95
|
+
if ir_format == OBFormat.Y8:
|
96
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
97
|
+
data_type = np.uint8
|
98
|
+
image_dtype = cv2.CV_8UC1
|
99
|
+
max_data = 255
|
100
|
+
elif ir_format == OBFormat.MJPG:
|
101
|
+
ir_data = cv2.imdecode(ir_data, cv2.IMREAD_UNCHANGED)
|
102
|
+
data_type = np.uint8
|
103
|
+
image_dtype = cv2.CV_8UC1
|
104
|
+
max_data = 255
|
105
|
+
if ir_data is None:
|
106
|
+
print("decode mjpeg failed")
|
107
|
+
return None
|
108
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
109
|
+
else:
|
110
|
+
ir_data = np.frombuffer(ir_data, dtype=np.uint16)
|
111
|
+
data_type = np.uint16
|
112
|
+
image_dtype = cv2.CV_16UC1
|
113
|
+
max_data = 255
|
114
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
115
|
+
|
116
|
+
cv2.normalize(ir_data, ir_data, 0, max_data, cv2.NORM_MINMAX, dtype=image_dtype)
|
117
|
+
ir_data = ir_data.astype(data_type)
|
118
|
+
return cv2.cvtColor(ir_data, cv2.COLOR_GRAY2RGB)
|
119
|
+
|
120
|
+
def get_imu_text(frame, name):
|
121
|
+
"""Format IMU data"""
|
122
|
+
if not frame:
|
123
|
+
return []
|
124
|
+
return [
|
125
|
+
f"{name} x: {frame.get_x():.2f}",
|
126
|
+
f"{name} y: {frame.get_y():.2f}",
|
127
|
+
f"{name} z: {frame.get_z():.2f}"
|
128
|
+
]
|
129
|
+
|
130
|
+
|
131
|
+
|
132
|
+
def create_display(frames, enabled_sensor_types, width=1280, height=720):
|
133
|
+
"""Create display window with correct dynamic layout"""
|
134
|
+
sensor_type_to_name = {
|
135
|
+
OBSensorType.COLOR_SENSOR: 'color',
|
136
|
+
OBSensorType.DEPTH_SENSOR: 'depth',
|
137
|
+
OBSensorType.LEFT_IR_SENSOR: 'left_ir',
|
138
|
+
OBSensorType.RIGHT_IR_SENSOR: 'right_ir',
|
139
|
+
OBSensorType.IR_SENSOR: 'ir'
|
140
|
+
}
|
141
|
+
video_keys = []
|
142
|
+
for sensor_type in enabled_sensor_types:
|
143
|
+
if sensor_type in sensor_type_to_name:
|
144
|
+
video_keys.append(sensor_type_to_name[sensor_type])
|
145
|
+
|
146
|
+
video_frames = [frames.get(k) for k in video_keys]
|
147
|
+
has_imu = 'imu' in frames
|
148
|
+
num_videos = len(video_frames)
|
149
|
+
|
150
|
+
total_elements = num_videos + (1 if has_imu else 0)
|
151
|
+
|
152
|
+
if total_elements == 1:
|
153
|
+
grid_cols, grid_rows = 1, 1
|
154
|
+
elif total_elements <= 2:
|
155
|
+
grid_cols, grid_rows = 2, 1
|
156
|
+
elif total_elements <= 4:
|
157
|
+
grid_cols, grid_rows = 2, 2
|
158
|
+
elif total_elements <= 5:
|
159
|
+
grid_cols, grid_rows = 2, 3
|
160
|
+
else:
|
161
|
+
raise ValueError("Too many elements! Maximum supported is 5.")
|
162
|
+
|
163
|
+
cell_w = width // grid_cols
|
164
|
+
cell_h = height // grid_rows
|
165
|
+
|
166
|
+
display = np.zeros((cell_h * grid_rows, cell_w * grid_cols, 3), dtype=np.uint8)
|
167
|
+
|
168
|
+
for idx, frame in enumerate(video_frames):
|
169
|
+
row = idx // grid_cols
|
170
|
+
col = idx % grid_cols
|
171
|
+
x_start = col * cell_w
|
172
|
+
y_start = row * cell_h
|
173
|
+
if frame is not None:
|
174
|
+
if total_elements == 1:
|
175
|
+
resized = cv2.resize(frame, (width, height))
|
176
|
+
display = resized
|
177
|
+
else:
|
178
|
+
resized = cv2.resize(frame, (cell_w, cell_h))
|
179
|
+
display[y_start:y_start + cell_h, x_start:x_start + cell_w] = resized
|
180
|
+
else:
|
181
|
+
cv2.rectangle(display, (x_start, y_start), (x_start + cell_w, y_start + cell_h), (0, 0, 0), -1)
|
182
|
+
|
183
|
+
if has_imu:
|
184
|
+
imu_idx = num_videos
|
185
|
+
row = imu_idx // grid_cols
|
186
|
+
col = imu_idx % grid_cols
|
187
|
+
x_start = col * cell_w
|
188
|
+
y_start = row * cell_h
|
189
|
+
cv2.rectangle(display, (x_start, y_start), (x_start + cell_w, y_start + cell_h), (50, 50, 50), -1)
|
190
|
+
|
191
|
+
y_offset = y_start + 30
|
192
|
+
for data_type in ['accel', 'gyro']:
|
193
|
+
text_lines = get_imu_text(frames['imu'].get(data_type), data_type.title())
|
194
|
+
for i, line in enumerate(text_lines):
|
195
|
+
cv2.putText(display, line, (x_start + 10, y_offset + i * 20),
|
196
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, cv2.LINE_AA)
|
197
|
+
y_offset += 100
|
198
|
+
|
199
|
+
return display
|
200
|
+
|
201
|
+
|
202
|
+
|
203
|
+
def main():
|
204
|
+
# Window settings
|
205
|
+
WINDOW_NAME = "MultiStream Playback(.bag) Viewer"
|
206
|
+
file_path = input("Enter output filename (.bag) and press Enter to start playbacking: ")
|
207
|
+
|
208
|
+
DISPLAY_WIDTH = 1280
|
209
|
+
DISPLAY_HEIGHT = 720
|
210
|
+
# initialize playback
|
211
|
+
playback = PlaybackDevice(file_path)
|
212
|
+
# Initialize camera
|
213
|
+
pipeline, config, enabled_sensor_types = setup_camera(playback)
|
214
|
+
device = pipeline.get_device()
|
215
|
+
def on_status_change(status):
|
216
|
+
print(f"[Callback] status changed: {status}")
|
217
|
+
if status == PlaybackStatus.Stopped:
|
218
|
+
pipeline.stop()
|
219
|
+
pipeline.start(config)
|
220
|
+
playback.set_playback_status_change_callback(on_status_change)
|
221
|
+
|
222
|
+
pipeline.start(config)
|
223
|
+
|
224
|
+
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
|
225
|
+
cv2.resizeWindow(WINDOW_NAME, DISPLAY_WIDTH, DISPLAY_HEIGHT)
|
226
|
+
processed_frames = {}
|
227
|
+
while True:
|
228
|
+
# Get all frames
|
229
|
+
frames = pipeline.wait_for_frames(100)
|
230
|
+
if not frames:
|
231
|
+
continue
|
232
|
+
|
233
|
+
# Process color image
|
234
|
+
color_frame = frames.get_frame(OBFrameType.COLOR_FRAME)
|
235
|
+
if color_frame:
|
236
|
+
processed_frames['color'] = process_color(color_frame.as_video_frame())
|
237
|
+
# Process depth image
|
238
|
+
depth_frame = frames.get_frame(OBFrameType.DEPTH_FRAME)
|
239
|
+
if depth_frame:
|
240
|
+
processed_frames['depth'] = process_depth(depth_frame.as_video_frame())
|
241
|
+
# Process left IR
|
242
|
+
left_ir_frame = frames.get_frame(OBFrameType.LEFT_IR_FRAME)
|
243
|
+
processed_frames['left_ir'] = process_ir(left_ir_frame, 'left_ir')
|
244
|
+
|
245
|
+
# Process right IR
|
246
|
+
right_ir_frame = frames.get_frame(OBFrameType.RIGHT_IR_FRAME)
|
247
|
+
processed_frames['right_ir'] = process_ir(right_ir_frame, 'right_ir')
|
248
|
+
|
249
|
+
# Process mono IR
|
250
|
+
ir_frame = frames.get_ir_frame()
|
251
|
+
processed_frames['ir'] = process_ir(ir_frame, 'ir')
|
252
|
+
|
253
|
+
# Process IMU data
|
254
|
+
accel = frames.get_frame(OBFrameType.ACCEL_FRAME)
|
255
|
+
gyro = frames.get_frame(OBFrameType.GYRO_FRAME)
|
256
|
+
if accel and gyro:
|
257
|
+
processed_frames['imu'] = {
|
258
|
+
'accel': accel.as_accel_frame(),
|
259
|
+
'gyro': gyro.as_gyro_frame()
|
260
|
+
}
|
261
|
+
|
262
|
+
# create display
|
263
|
+
display = create_display(processed_frames, enabled_sensor_types, DISPLAY_WIDTH, DISPLAY_HEIGHT)
|
264
|
+
cv2.imshow(WINDOW_NAME, display)
|
265
|
+
|
266
|
+
# check exit key
|
267
|
+
key = cv2.waitKey(1) & 0xFF
|
268
|
+
if key in (ord('q'), 27):
|
269
|
+
break
|
270
|
+
|
271
|
+
pipeline.stop()
|
272
|
+
playback = None
|
273
|
+
cv2.destroyAllWindows()
|
274
|
+
|
275
|
+
|
276
|
+
if __name__ == "__main__":
|
277
|
+
main()
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import os
|
18
|
+
import numpy as np
|
19
|
+
|
20
|
+
from pyorbbecsdk import *
|
21
|
+
|
22
|
+
save_points_dir = os.path.join(os.getcwd(), "point_clouds")
|
23
|
+
if not os.path.exists(save_points_dir):
|
24
|
+
os.mkdir(save_points_dir)
|
25
|
+
|
26
|
+
def main():
|
27
|
+
pipeline = Pipeline()
|
28
|
+
config = Config()
|
29
|
+
|
30
|
+
# Configure depth stream
|
31
|
+
depth_profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
|
32
|
+
if depth_profile_list is None:
|
33
|
+
print("No proper depth profile, cannot generate point cloud")
|
34
|
+
return
|
35
|
+
depth_profile = depth_profile_list.get_default_video_stream_profile()
|
36
|
+
config.enable_stream(depth_profile)
|
37
|
+
|
38
|
+
has_color_sensor = False
|
39
|
+
try:
|
40
|
+
# Configure color stream if available
|
41
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
|
42
|
+
if profile_list is not None:
|
43
|
+
color_profile = profile_list.get_default_video_stream_profile()
|
44
|
+
config.enable_stream(color_profile)
|
45
|
+
has_color_sensor = True
|
46
|
+
except OBError as e:
|
47
|
+
print(e)
|
48
|
+
|
49
|
+
pipeline.enable_frame_sync()
|
50
|
+
pipeline.start(config)
|
51
|
+
|
52
|
+
#camera_param = pipeline.get_camera_param()
|
53
|
+
align_filter = AlignFilter(align_to_stream=OBStreamType.COLOR_STREAM)
|
54
|
+
point_cloud_filter = PointCloudFilter()
|
55
|
+
#point_cloud_filter.set_camera_param(camera_param)
|
56
|
+
|
57
|
+
while True:
|
58
|
+
frames = pipeline.wait_for_frames(100)
|
59
|
+
if frames is None:
|
60
|
+
continue
|
61
|
+
|
62
|
+
depth_frame = frames.get_depth_frame()
|
63
|
+
if depth_frame is None:
|
64
|
+
continue
|
65
|
+
|
66
|
+
color_frame = frames.get_color_frame()
|
67
|
+
if has_color_sensor and color_frame is None:
|
68
|
+
continue
|
69
|
+
|
70
|
+
frame = align_filter.process(frames)
|
71
|
+
#scale = depth_frame.get_depth_scale()
|
72
|
+
#point_cloud_filter.set_position_data_scaled(scale)
|
73
|
+
|
74
|
+
point_format = OBFormat.RGB_POINT if has_color_sensor and color_frame is not None else OBFormat.POINT
|
75
|
+
point_cloud_filter.set_create_point_format(point_format)
|
76
|
+
|
77
|
+
point_cloud_frame = point_cloud_filter.process(frame)
|
78
|
+
if point_cloud_frame is None:
|
79
|
+
continue
|
80
|
+
#save point cloud
|
81
|
+
save_point_cloud_to_ply(os.path.join(save_points_dir, "point_cloud.ply"), point_cloud_frame)
|
82
|
+
#save mesh to point cloud
|
83
|
+
#save_point_cloud_to_ply(os.path.join(save_points_dir, "point_cloud.ply"), point_cloud_frame, False, True, 50)
|
84
|
+
break
|
85
|
+
print("stop pipeline")
|
86
|
+
pipeline.stop()
|
87
|
+
|
88
|
+
|
89
|
+
if __name__ == "__main__":
|
90
|
+
main()
|
@@ -0,0 +1,119 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
import sys
|
17
|
+
|
18
|
+
import cv2
|
19
|
+
import numpy as np
|
20
|
+
|
21
|
+
from pyorbbecsdk import *
|
22
|
+
|
23
|
+
ESC_KEY = 27
|
24
|
+
PRINT_INTERVAL = 1 # seconds
|
25
|
+
MIN_DEPTH = 20 # 20mm
|
26
|
+
MAX_DEPTH = 10000 # 10000mm
|
27
|
+
|
28
|
+
def main():
|
29
|
+
pipeline = Pipeline()
|
30
|
+
config = Config()
|
31
|
+
try:
|
32
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
|
33
|
+
depth_profile = profile_list.get_default_video_stream_profile()
|
34
|
+
config.enable_stream(depth_profile)
|
35
|
+
except Exception as e:
|
36
|
+
print(e)
|
37
|
+
return
|
38
|
+
|
39
|
+
try:
|
40
|
+
pipeline.enable_frame_sync()
|
41
|
+
except Exception as e:
|
42
|
+
print(e)
|
43
|
+
|
44
|
+
try:
|
45
|
+
pipeline.start(config)
|
46
|
+
except Exception as e:
|
47
|
+
print(e)
|
48
|
+
return
|
49
|
+
|
50
|
+
device = pipeline.get_device()
|
51
|
+
assert device is not None
|
52
|
+
depth_sensor = device.get_sensor(OBSensorType.DEPTH_SENSOR)
|
53
|
+
assert depth_sensor is not None
|
54
|
+
filter_list = depth_sensor.get_recommended_filters()
|
55
|
+
assert filter_list is not None
|
56
|
+
# print filter list
|
57
|
+
for i in range(len(filter_list)):
|
58
|
+
post_filter = filter_list[i]
|
59
|
+
if post_filter:
|
60
|
+
print("filter name: ", post_filter.get_name())
|
61
|
+
print("filter is enabled: ", post_filter.is_enabled())
|
62
|
+
|
63
|
+
while True:
|
64
|
+
try:
|
65
|
+
frames = pipeline.wait_for_frames(100)
|
66
|
+
if not frames:
|
67
|
+
continue
|
68
|
+
depth_frame = frames.get_depth_frame()
|
69
|
+
if not depth_frame:
|
70
|
+
continue
|
71
|
+
for i in range(len(filter_list)):
|
72
|
+
post_filter = filter_list[i]
|
73
|
+
if post_filter and post_filter.is_enabled() and depth_frame:
|
74
|
+
depth_data_size = depth_frame.get_data()
|
75
|
+
if len(depth_data_size) < (depth_frame.get_width() * depth_frame.get_height() * 2):
|
76
|
+
# print("depth data is not complete")
|
77
|
+
continue
|
78
|
+
|
79
|
+
new_depth_frame = post_filter.process(depth_frame)
|
80
|
+
depth_frame = new_depth_frame.as_depth_frame()
|
81
|
+
# for Y16 format depth frame, print the distance of the center pixel every 30 frames
|
82
|
+
width = depth_frame.get_width()
|
83
|
+
height = depth_frame.get_height()
|
84
|
+
scale = depth_frame.get_depth_scale()
|
85
|
+
depth_format = depth_frame.get_format()
|
86
|
+
if depth_format != OBFormat.Y16:
|
87
|
+
print("depth format is not Y16")
|
88
|
+
continue
|
89
|
+
try:
|
90
|
+
depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16)
|
91
|
+
depth_data = depth_data.reshape((height, width))
|
92
|
+
except ValueError:
|
93
|
+
print("Failed to reshape depth data")
|
94
|
+
continue
|
95
|
+
|
96
|
+
depth_data = depth_data.astype(np.float32) * scale
|
97
|
+
depth_data = np.where((depth_data > MIN_DEPTH) & (depth_data < MAX_DEPTH), depth_data, 0)
|
98
|
+
depth_data = depth_data.astype(np.uint16)
|
99
|
+
if depth_frame.get_format() == OBFormat.Y16 and depth_frame.get_index() % 30 == 0:
|
100
|
+
# print the distance of the center pixel
|
101
|
+
center_y = int(height / 2)
|
102
|
+
center_x = int(width / 2)
|
103
|
+
center_distance = depth_data[center_y, center_x]
|
104
|
+
print("center distance: ", center_distance)
|
105
|
+
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
106
|
+
depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
|
107
|
+
|
108
|
+
cv2.imshow("Depth Viewer", depth_image)
|
109
|
+
key = cv2.waitKey(1)
|
110
|
+
if key == ord('q') or key == ESC_KEY:
|
111
|
+
break
|
112
|
+
except KeyboardInterrupt:
|
113
|
+
break
|
114
|
+
cv2.destroyAllWindows()
|
115
|
+
pipeline.stop()
|
116
|
+
|
117
|
+
|
118
|
+
if __name__ == "__main__":
|
119
|
+
main()
|
@@ -0,0 +1,67 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import pyorbbecsdk as ob
|
18
|
+
|
19
|
+
def main():
|
20
|
+
# Create a pipeline with default device
|
21
|
+
pipe = ob.Pipeline()
|
22
|
+
|
23
|
+
# Get the device from the pipeline
|
24
|
+
device = pipe.get_device()
|
25
|
+
|
26
|
+
try:
|
27
|
+
while True:
|
28
|
+
# Get preset list from device
|
29
|
+
preset_list = device.get_available_preset_list()
|
30
|
+
if len(preset_list) == 0:
|
31
|
+
print("The current device does not support preset mode")
|
32
|
+
break
|
33
|
+
|
34
|
+
print("\nAvailable Presets:")
|
35
|
+
for index in range(len(preset_list)):
|
36
|
+
print(f" - {index}. {preset_list[index]}")
|
37
|
+
|
38
|
+
# Print current preset name
|
39
|
+
print(f"\nCurrent PresetName: {device.get_current_preset_name()}")
|
40
|
+
|
41
|
+
# Select preset to load
|
42
|
+
try:
|
43
|
+
input_option = int(input("\nEnter index of preset to load (or -1 to exit): "))
|
44
|
+
if input_option == -1:
|
45
|
+
break
|
46
|
+
if input_option < 0 or input_option >= len(preset_list):
|
47
|
+
raise ValueError("Invalid index")
|
48
|
+
except ValueError:
|
49
|
+
print("Invalid input. Please enter a valid index.")
|
50
|
+
continue
|
51
|
+
|
52
|
+
preset_name = preset_list[input_option]
|
53
|
+
|
54
|
+
# Load preset
|
55
|
+
device.load_preset(preset_name)
|
56
|
+
|
57
|
+
# Print current preset name
|
58
|
+
print(f"\nPreset loaded. Current PresetName: {device.get_current_preset_name()}")
|
59
|
+
|
60
|
+
except ob.OBError as e:
|
61
|
+
print(f"Error: {str(e)}")
|
62
|
+
finally:
|
63
|
+
# Stop Pipeline
|
64
|
+
pipe.stop()
|
65
|
+
|
66
|
+
if __name__ == "__main__":
|
67
|
+
main()
|
Binary file
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import cv2
|
18
|
+
import numpy as np
|
19
|
+
import time
|
20
|
+
|
21
|
+
from pyorbbecsdk import *
|
22
|
+
from utils import frame_to_bgr_image
|
23
|
+
|
24
|
+
ESC_KEY = 27
|
25
|
+
MIN_DEPTH = 20 # 20mm
|
26
|
+
MAX_DEPTH = 10000 # 10000mm
|
27
|
+
|
28
|
+
def main():
|
29
|
+
pipeline = Pipeline()
|
30
|
+
|
31
|
+
pipeline.start()
|
32
|
+
print("Pipeline started successfully. Press 'q' or ESC to exit.")
|
33
|
+
|
34
|
+
# Set window size
|
35
|
+
window_width = 1280
|
36
|
+
window_height = 720
|
37
|
+
cv2.namedWindow("QuickStart Viewer", cv2.WINDOW_NORMAL)
|
38
|
+
cv2.resizeWindow("QuickStart Viewer", window_width, window_height)
|
39
|
+
|
40
|
+
while True:
|
41
|
+
try:
|
42
|
+
frames = pipeline.wait_for_frames(100)
|
43
|
+
if frames is None:
|
44
|
+
continue
|
45
|
+
|
46
|
+
# Get color frame
|
47
|
+
color_frame = frames.get_color_frame()
|
48
|
+
if color_frame is None:
|
49
|
+
continue
|
50
|
+
color_image = frame_to_bgr_image(color_frame)
|
51
|
+
|
52
|
+
# Get depth frame
|
53
|
+
depth_frame = frames.get_depth_frame()
|
54
|
+
if depth_frame is None:
|
55
|
+
continue
|
56
|
+
if depth_frame.get_format() != OBFormat.Y16:
|
57
|
+
print("Depth format is not Y16")
|
58
|
+
continue
|
59
|
+
|
60
|
+
# Process depth data
|
61
|
+
width = depth_frame.get_width()
|
62
|
+
height = depth_frame.get_height()
|
63
|
+
scale = depth_frame.get_depth_scale()
|
64
|
+
|
65
|
+
depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16).reshape((height, width))
|
66
|
+
depth_data = depth_data.astype(np.float32) * scale
|
67
|
+
depth_data = np.where((depth_data > MIN_DEPTH) & (depth_data < MAX_DEPTH), depth_data, 0).astype(np.uint16)
|
68
|
+
|
69
|
+
# Create depth visualization
|
70
|
+
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
71
|
+
depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
|
72
|
+
|
73
|
+
# Resize and combine images
|
74
|
+
color_image_resized = cv2.resize(color_image, (window_width // 2, window_height))
|
75
|
+
depth_image_resized = cv2.resize(depth_image, (window_width // 2, window_height))
|
76
|
+
combined_image = np.hstack((color_image_resized, depth_image_resized))
|
77
|
+
|
78
|
+
cv2.imshow("QuickStart Viewer", combined_image)
|
79
|
+
|
80
|
+
if cv2.waitKey(1) in [ord('q'), ESC_KEY]:
|
81
|
+
break
|
82
|
+
except KeyboardInterrupt:
|
83
|
+
break
|
84
|
+
|
85
|
+
cv2.destroyAllWindows()
|
86
|
+
pipeline.stop()
|
87
|
+
print("Pipeline stopped and all windows closed.")
|
88
|
+
|
89
|
+
if __name__ == "__main__":
|
90
|
+
main()
|