pyorbbec 1.0.1.6__py3-none-any.whl → 1.0.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyorbbec-1.0.1.6.dist-info → pyorbbec-1.0.1.8.dist-info}/METADATA +1 -1
- pyorbbec-1.0.1.8.dist-info/RECORD +59 -0
- pyorbbecsdk/OrbbecSDKConfig.xml +2332 -0
- pyorbbecsdk/examples/.gitkeep +0 -0
- pyorbbecsdk/examples/OrbbecSDK.dll +0 -0
- pyorbbecsdk/examples/OrbbecSDK.lib +0 -0
- pyorbbecsdk/examples/README.md +26 -0
- pyorbbecsdk/examples/__pycache__/utils.cpython-313.pyc +0 -0
- pyorbbecsdk/examples/callback.py +303 -0
- pyorbbecsdk/examples/color.py +64 -0
- pyorbbecsdk/examples/coordinate_transform.py +184 -0
- pyorbbecsdk/examples/depth.py +107 -0
- pyorbbecsdk/examples/depth_work_mode.py +50 -0
- pyorbbecsdk/examples/device_firmware_update.py +155 -0
- pyorbbecsdk/examples/device_optional_depth_presets_update.py +142 -0
- pyorbbecsdk/examples/enumerate.py +118 -0
- pyorbbecsdk/examples/extensions/depthengine/depthengine.dll +0 -0
- pyorbbecsdk/examples/extensions/depthengine/depthengine.lib +0 -0
- pyorbbecsdk/examples/extensions/filters/FilterProcessor.dll +0 -0
- pyorbbecsdk/examples/extensions/filters/ob_priv_filter.dll +0 -0
- pyorbbecsdk/examples/extensions/firmwareupdater/firmwareupdater.dll +0 -0
- pyorbbecsdk/examples/extensions/frameprocessor/ob_frame_processor.dll +0 -0
- pyorbbecsdk/examples/hdr.py +216 -0
- pyorbbecsdk/examples/hot_plug.py +160 -0
- pyorbbecsdk/examples/hw_d2c_align.py +135 -0
- pyorbbecsdk/examples/imu.py +60 -0
- pyorbbecsdk/examples/infrared.py +115 -0
- pyorbbecsdk/examples/logger.py +55 -0
- pyorbbecsdk/examples/metadata.py +64 -0
- pyorbbecsdk/examples/multi_device.py +169 -0
- pyorbbecsdk/examples/multi_streams.py +219 -0
- pyorbbecsdk/examples/net_device.py +158 -0
- pyorbbecsdk/examples/playback.py +277 -0
- pyorbbecsdk/examples/point_cloud.py +90 -0
- pyorbbecsdk/examples/post_processing.py +119 -0
- pyorbbecsdk/examples/preset.py +67 -0
- pyorbbecsdk/examples/pyorbbecsdk.cp313-win_amd64.pyd +0 -0
- pyorbbecsdk/examples/quick_start.py +90 -0
- pyorbbecsdk/examples/recorder.py +236 -0
- pyorbbecsdk/examples/requirements.txt +9 -0
- pyorbbecsdk/examples/save_image_to_disk.py +106 -0
- pyorbbecsdk/examples/sync_align.py +109 -0
- pyorbbecsdk/examples/two_devices_sync.py +233 -0
- pyorbbecsdk/examples/utils.py +127 -0
- pyorbbec-1.0.1.6.dist-info/RECORD +0 -17
- {pyorbbec-1.0.1.6.dist-info → pyorbbec-1.0.1.8.dist-info}/WHEEL +0 -0
- {pyorbbec-1.0.1.6.dist-info → pyorbbec-1.0.1.8.dist-info}/licenses/LICENSE +0 -0
- {pyorbbec-1.0.1.6.dist-info → pyorbbec-1.0.1.8.dist-info}/licenses/NOTICE +0 -0
- {pyorbbec-1.0.1.6.dist-info → pyorbbec-1.0.1.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,216 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
import sys
|
17
|
+
import cv2
|
18
|
+
import numpy as np
|
19
|
+
from pyorbbecsdk import *
|
20
|
+
|
21
|
+
ESC_KEY = 27
|
22
|
+
PRINT_INTERVAL = 1 # seconds
|
23
|
+
MIN_DEPTH = 20 # 20mm
|
24
|
+
MAX_DEPTH = 10000 # 10000mm
|
25
|
+
|
26
|
+
|
27
|
+
def add_text_to_image(image, text, position):
|
28
|
+
"""
|
29
|
+
Add text to an image at the specified position
|
30
|
+
Args:
|
31
|
+
image: Input image
|
32
|
+
text: Text to add
|
33
|
+
position: Tuple of (x, y) coordinates
|
34
|
+
Returns:
|
35
|
+
Image with text added
|
36
|
+
"""
|
37
|
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
38
|
+
font_scale = 0.8
|
39
|
+
color = (255, 255, 255) # White color
|
40
|
+
thickness = 2
|
41
|
+
|
42
|
+
# Add black background for better visibility
|
43
|
+
(text_width, text_height), _ = cv2.getTextSize(text, font, font_scale, thickness)
|
44
|
+
cv2.rectangle(image,
|
45
|
+
(position[0], position[1] - text_height - 5),
|
46
|
+
(position[0] + text_width, position[1] + 5),
|
47
|
+
(0, 0, 0),
|
48
|
+
-1)
|
49
|
+
|
50
|
+
return cv2.putText(image, text, position, font, font_scale, color, thickness)
|
51
|
+
|
52
|
+
|
53
|
+
def enhance_contrast(image, clip_limit=3.0, tile_grid_size=(8, 8)):
|
54
|
+
"""
|
55
|
+
Enhance image contrast using CLAHE
|
56
|
+
"""
|
57
|
+
if len(image.shape) == 3:
|
58
|
+
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
|
59
|
+
l, a, b = cv2.split(lab)
|
60
|
+
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)
|
61
|
+
l = clahe.apply(l)
|
62
|
+
lab = cv2.merge((l, a, b))
|
63
|
+
return cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
|
64
|
+
else:
|
65
|
+
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)
|
66
|
+
return clahe.apply(image)
|
67
|
+
|
68
|
+
|
69
|
+
def main(argv):
|
70
|
+
pipeline = Pipeline()
|
71
|
+
device = pipeline.get_device()
|
72
|
+
is_support_hdr = device.is_property_supported(OBPropertyID.OB_STRUCT_DEPTH_HDR_CONFIG,OBPermissionType.PERMISSION_READ_WRITE)
|
73
|
+
if is_support_hdr == False:
|
74
|
+
print("Current default device does not support HDR merge")
|
75
|
+
return
|
76
|
+
config = Config()
|
77
|
+
|
78
|
+
try:
|
79
|
+
# Enable depth stream
|
80
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
|
81
|
+
depth_profile = profile_list.get_default_video_stream_profile()
|
82
|
+
config.enable_stream(depth_profile)
|
83
|
+
|
84
|
+
# Enable IR streams
|
85
|
+
left_profile_list = pipeline.get_stream_profile_list(OBSensorType.LEFT_IR_SENSOR)
|
86
|
+
right_profile_list = pipeline.get_stream_profile_list(OBSensorType.RIGHT_IR_SENSOR)
|
87
|
+
left_ir_profile = left_profile_list.get_default_video_stream_profile()
|
88
|
+
right_ir_profile = right_profile_list.get_default_video_stream_profile()
|
89
|
+
config.enable_stream(left_ir_profile)
|
90
|
+
config.enable_stream(right_ir_profile)
|
91
|
+
config.set_frame_aggregate_output_mode(OBFrameAggregateOutputMode.FULL_FRAME_REQUIRE)
|
92
|
+
except Exception as e:
|
93
|
+
print(e)
|
94
|
+
return
|
95
|
+
|
96
|
+
try:
|
97
|
+
pipeline.enable_frame_sync()
|
98
|
+
except Exception as e:
|
99
|
+
print(e)
|
100
|
+
|
101
|
+
try:
|
102
|
+
pipeline.start(config)
|
103
|
+
except Exception as e:
|
104
|
+
print(e)
|
105
|
+
return
|
106
|
+
|
107
|
+
device = pipeline.get_device()
|
108
|
+
config = OBHdrConfig()
|
109
|
+
config.enable = True
|
110
|
+
config.exposure_1 = 7500
|
111
|
+
config.gain_1 = 24
|
112
|
+
config.exposure_2 = 50
|
113
|
+
config.gain_2 = 16
|
114
|
+
device.set_hdr_config(config)
|
115
|
+
hdr_filter = HDRMergeFilter()
|
116
|
+
|
117
|
+
# Create window for visualization
|
118
|
+
cv2.namedWindow("HDR Merge Viewer", cv2.WINDOW_NORMAL)
|
119
|
+
cv2.resizeWindow("HDR Merge Viewer", 1280, 960) # Adjusted for 2x2 layout
|
120
|
+
|
121
|
+
while True:
|
122
|
+
try:
|
123
|
+
frames = pipeline.wait_for_frames(100)
|
124
|
+
if not frames:
|
125
|
+
print("No frames received")
|
126
|
+
continue
|
127
|
+
|
128
|
+
# Get all frames
|
129
|
+
depth_frame = frames.get_depth_frame()
|
130
|
+
left_ir_frame = frames.get_frame(OBFrameType.LEFT_IR_FRAME)
|
131
|
+
right_ir_frame = frames.get_frame(OBFrameType.RIGHT_IR_FRAME)
|
132
|
+
|
133
|
+
if not all([depth_frame, left_ir_frame, right_ir_frame]):
|
134
|
+
print("Not All frames received")
|
135
|
+
continue
|
136
|
+
|
137
|
+
# Process with HDR merge
|
138
|
+
merged_frame = hdr_filter.process(frames)
|
139
|
+
if not merged_frame:
|
140
|
+
continue
|
141
|
+
|
142
|
+
merged_frames = merged_frame.as_frame_set()
|
143
|
+
merged_depth_frame = merged_frames.get_depth_frame()
|
144
|
+
|
145
|
+
# Convert frames to displayable images
|
146
|
+
depth_image = create_depth_image(depth_frame)
|
147
|
+
merged_depth_image = create_depth_image(merged_depth_frame)
|
148
|
+
ir_left_image = create_ir_image(left_ir_frame)
|
149
|
+
ir_right_image = create_ir_image(right_ir_frame)
|
150
|
+
|
151
|
+
# Enhance contrast for all images
|
152
|
+
depth_image = enhance_contrast(depth_image, clip_limit=4.0)
|
153
|
+
ir_left_image = enhance_contrast(ir_left_image, clip_limit=4.0)
|
154
|
+
ir_right_image = enhance_contrast(ir_right_image, clip_limit=4.0)
|
155
|
+
merged_depth_image = enhance_contrast(merged_depth_image, clip_limit=4.0)
|
156
|
+
|
157
|
+
# Ensure all images have the same dimensions for display
|
158
|
+
h, w = depth_image.shape[:2]
|
159
|
+
ir_left_image = cv2.resize(ir_left_image, (w, h))
|
160
|
+
ir_right_image = cv2.resize(ir_right_image, (w, h))
|
161
|
+
merged_depth_image = cv2.resize(merged_depth_image, (w, h))
|
162
|
+
|
163
|
+
# Add text annotations to images
|
164
|
+
ir_left_image = add_text_to_image(ir_left_image, "Left IR (HDR)", (10, 30))
|
165
|
+
ir_right_image = add_text_to_image(ir_right_image, "Right IR (HDR)", (10, 30))
|
166
|
+
depth_image = add_text_to_image(depth_image, "Original Depth (HDR)", (10, 30))
|
167
|
+
merged_depth_image = add_text_to_image(merged_depth_image, "HDR Merged Depth", (10, 30))
|
168
|
+
|
169
|
+
# Create 2x2 layout
|
170
|
+
top_row = np.hstack((ir_left_image, ir_right_image))
|
171
|
+
bottom_row = np.hstack((depth_image, merged_depth_image))
|
172
|
+
display_image = np.vstack((top_row, bottom_row))
|
173
|
+
|
174
|
+
cv2.imshow("HDR Merge Viewer", display_image)
|
175
|
+
key = cv2.waitKey(1)
|
176
|
+
if key == ord('q') or key == ESC_KEY:
|
177
|
+
break
|
178
|
+
|
179
|
+
except KeyboardInterrupt:
|
180
|
+
break
|
181
|
+
|
182
|
+
cv2.destroyAllWindows()
|
183
|
+
pipeline.stop()
|
184
|
+
|
185
|
+
|
186
|
+
def create_depth_image(depth_frame):
|
187
|
+
"""Convert depth frame to colorized image"""
|
188
|
+
width = depth_frame.get_width()
|
189
|
+
height = depth_frame.get_height()
|
190
|
+
scale = depth_frame.get_depth_scale()
|
191
|
+
|
192
|
+
depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16)
|
193
|
+
depth_data = depth_data.reshape((height, width))
|
194
|
+
depth_data = depth_data.astype(np.float32) * scale
|
195
|
+
depth_data = np.where((depth_data > MIN_DEPTH) & (depth_data < MAX_DEPTH), depth_data, 0)
|
196
|
+
depth_data = depth_data.astype(np.uint16)
|
197
|
+
|
198
|
+
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
199
|
+
return cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
|
200
|
+
|
201
|
+
|
202
|
+
def create_ir_image(ir_frame):
|
203
|
+
"""Convert IR frame to displayable image with enhanced contrast"""
|
204
|
+
ir_frame = ir_frame.as_video_frame()
|
205
|
+
width = ir_frame.get_width()
|
206
|
+
height = ir_frame.get_height()
|
207
|
+
|
208
|
+
ir_data = np.frombuffer(ir_frame.get_data(), dtype=np.uint8)
|
209
|
+
ir_data = ir_data.reshape((height, width))
|
210
|
+
|
211
|
+
ir_image = cv2.normalize(ir_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
212
|
+
return cv2.cvtColor(ir_image, cv2.COLOR_GRAY2BGR)
|
213
|
+
|
214
|
+
|
215
|
+
if __name__ == "__main__":
|
216
|
+
main(sys.argv[1:])
|
@@ -0,0 +1,160 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import threading
|
18
|
+
import time
|
19
|
+
from typing import Optional
|
20
|
+
|
21
|
+
from pyorbbecsdk import *
|
22
|
+
|
23
|
+
# Global variables
|
24
|
+
device: Optional[Device] = None
|
25
|
+
pipeline: Optional[Pipeline] = None
|
26
|
+
device_lock = threading.Lock()
|
27
|
+
|
28
|
+
def start_stream(device: Device):
|
29
|
+
"""Starts the stream for both color and depth sensors."""
|
30
|
+
global pipeline
|
31
|
+
if device is None:
|
32
|
+
print("No device connected")
|
33
|
+
return
|
34
|
+
|
35
|
+
config = Config()
|
36
|
+
print("Try to reset pipeline")
|
37
|
+
pipeline = Pipeline(device)
|
38
|
+
print("Try to enable color stream")
|
39
|
+
# Enable color stream
|
40
|
+
try:
|
41
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
|
42
|
+
color_profile = profile_list.get_default_video_stream_profile()
|
43
|
+
config.enable_stream(color_profile)
|
44
|
+
except Exception as e:
|
45
|
+
print(f"Failed to enable color stream: {e}")
|
46
|
+
|
47
|
+
# Enable depth stream
|
48
|
+
try:
|
49
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
|
50
|
+
depth_profile = profile_list.get_default_video_stream_profile()
|
51
|
+
config.enable_stream(depth_profile)
|
52
|
+
except Exception as e:
|
53
|
+
print(f"Failed to enable depth stream: {e}")
|
54
|
+
|
55
|
+
print("Starting the stream...")
|
56
|
+
pipeline.start(config)
|
57
|
+
|
58
|
+
def stop_stream():
|
59
|
+
"""Stops the pipeline if it is running."""
|
60
|
+
global pipeline
|
61
|
+
if pipeline is None:
|
62
|
+
print("Pipeline is not started")
|
63
|
+
return
|
64
|
+
pipeline.stop()
|
65
|
+
pipeline = None
|
66
|
+
|
67
|
+
def on_device_connected_callback(device_list: DeviceList):
|
68
|
+
"""Callback when a new device is connected."""
|
69
|
+
global device
|
70
|
+
if len(device_list) == 0:
|
71
|
+
return
|
72
|
+
|
73
|
+
print("Device connected")
|
74
|
+
with device_lock:
|
75
|
+
if device is not None:
|
76
|
+
print("Device is already connected")
|
77
|
+
return
|
78
|
+
|
79
|
+
# Get the first available device and start the stream
|
80
|
+
print("Try to get device")
|
81
|
+
device = device_list[0]
|
82
|
+
print("Try to start stream")
|
83
|
+
start_stream(device)
|
84
|
+
print("Start stream successfully")
|
85
|
+
|
86
|
+
def on_device_disconnected_callback(device_list: DeviceList):
|
87
|
+
"""Callback when a device is disconnected."""
|
88
|
+
global device, pipeline
|
89
|
+
if len(device_list) == 0:
|
90
|
+
return
|
91
|
+
|
92
|
+
print("Device disconnected")
|
93
|
+
try:
|
94
|
+
with device_lock:
|
95
|
+
print("reset device ...")
|
96
|
+
device = None
|
97
|
+
print("reset device successfully")
|
98
|
+
except OBError as e:
|
99
|
+
print(e)
|
100
|
+
print("Device disconnected successfully")
|
101
|
+
|
102
|
+
def on_new_frame_callback(frame: Frame):
|
103
|
+
"""Handles new frames captured by the sensors."""
|
104
|
+
if frame is None:
|
105
|
+
return
|
106
|
+
print(f"{frame.get_type()} frame, width={frame.get_width()}, height={frame.get_height()}, format={frame.get_format()}, timestamp={frame.get_timestamp_us()}us")
|
107
|
+
|
108
|
+
def on_device_changed_callback(disconn_device_list: DeviceList, conn_device_list: DeviceList):
|
109
|
+
"""Handles device changes by invoking appropriate connect/disconnect callbacks."""
|
110
|
+
on_device_connected_callback(conn_device_list)
|
111
|
+
on_device_disconnected_callback(disconn_device_list)
|
112
|
+
|
113
|
+
def main():
|
114
|
+
"""Main program loop to handle device connection and frame processing."""
|
115
|
+
ctx = Context()
|
116
|
+
|
117
|
+
# Set callback for device changes (connect/disconnect)
|
118
|
+
ctx.set_device_changed_callback(on_device_changed_callback)
|
119
|
+
|
120
|
+
# Check for currently connected devices
|
121
|
+
device_list = ctx.query_devices()
|
122
|
+
on_device_connected_callback(device_list)
|
123
|
+
|
124
|
+
global pipeline, device
|
125
|
+
|
126
|
+
while True:
|
127
|
+
try:
|
128
|
+
with device_lock:
|
129
|
+
if pipeline is not None and device is not None:
|
130
|
+
# Wait for a new set of frames
|
131
|
+
frames: FrameSet = pipeline.wait_for_frames(100)
|
132
|
+
else:
|
133
|
+
continue
|
134
|
+
if frames is None:
|
135
|
+
time.sleep(0.001) # Avoid busy waiting
|
136
|
+
continue
|
137
|
+
|
138
|
+
# Get color and depth frames
|
139
|
+
color_frame = frames.get_color_frame()
|
140
|
+
depth_frame = frames.get_depth_frame()
|
141
|
+
|
142
|
+
# Process each frame
|
143
|
+
on_new_frame_callback(color_frame)
|
144
|
+
on_new_frame_callback(depth_frame)
|
145
|
+
except KeyboardInterrupt:
|
146
|
+
break
|
147
|
+
except OBError as e:
|
148
|
+
print(f"Error during frame capture: {e}")
|
149
|
+
continue
|
150
|
+
|
151
|
+
# Stop the pipeline on exit
|
152
|
+
print("Stopping the pipeline...")
|
153
|
+
try:
|
154
|
+
if pipeline is not None:
|
155
|
+
pipeline.stop()
|
156
|
+
except OBError as e:
|
157
|
+
print(f"Error during pipeline stop: {e}")
|
158
|
+
|
159
|
+
if __name__ == "__main__":
|
160
|
+
main()
|
@@ -0,0 +1,135 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
from pyorbbecsdk import *
|
18
|
+
import cv2
|
19
|
+
import numpy as np
|
20
|
+
from utils import frame_to_bgr_image
|
21
|
+
|
22
|
+
def get_stream_config(pipeline: Pipeline):
|
23
|
+
"""
|
24
|
+
Gets the stream configuration for the pipeline.
|
25
|
+
|
26
|
+
Args:
|
27
|
+
pipeline (Pipeline): The pipeline object.
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
Config: The stream configuration.
|
31
|
+
"""
|
32
|
+
config = Config()
|
33
|
+
try:
|
34
|
+
# Get the list of color stream profiles
|
35
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
|
36
|
+
assert profile_list is not None
|
37
|
+
|
38
|
+
# Iterate through the color stream profiles
|
39
|
+
for i in range(len(profile_list)):
|
40
|
+
color_profile = profile_list[i]
|
41
|
+
|
42
|
+
# Check if the color format is RGB
|
43
|
+
if color_profile.get_format() != OBFormat.RGB:
|
44
|
+
continue
|
45
|
+
|
46
|
+
# Get the list of hardware aligned depth-to-color profiles
|
47
|
+
hw_d2c_profile_list = pipeline.get_d2c_depth_profile_list(color_profile, OBAlignMode.HW_MODE)
|
48
|
+
if len(hw_d2c_profile_list) == 0:
|
49
|
+
continue
|
50
|
+
|
51
|
+
# Get the first hardware aligned depth-to-color profile
|
52
|
+
hw_d2c_profile = hw_d2c_profile_list[0]
|
53
|
+
print("hw_d2c_profile: ", hw_d2c_profile)
|
54
|
+
|
55
|
+
# Enable the depth and color streams
|
56
|
+
config.enable_stream(hw_d2c_profile)
|
57
|
+
config.enable_stream(color_profile)
|
58
|
+
|
59
|
+
# Set the alignment mode to hardware alignment
|
60
|
+
config.set_align_mode(OBAlignMode.HW_MODE)
|
61
|
+
return config
|
62
|
+
except Exception as e:
|
63
|
+
print(e)
|
64
|
+
return None
|
65
|
+
return None
|
66
|
+
|
67
|
+
def main():
|
68
|
+
# Create a pipeline object
|
69
|
+
pipeline = Pipeline()
|
70
|
+
|
71
|
+
# Get the stream configuration
|
72
|
+
config = get_stream_config(pipeline)
|
73
|
+
if config is None:
|
74
|
+
return
|
75
|
+
|
76
|
+
# Start the pipeline
|
77
|
+
pipeline.start(config)
|
78
|
+
|
79
|
+
# Set the depth range
|
80
|
+
min_depth = 20 # Minimum depth value, keep closer depths
|
81
|
+
max_depth = 10000 # Maximum depth value, allow far depths to be lost
|
82
|
+
|
83
|
+
while True:
|
84
|
+
# Wait for frames
|
85
|
+
frames = pipeline.wait_for_frames(100)
|
86
|
+
if frames is None:
|
87
|
+
continue
|
88
|
+
|
89
|
+
# Get the color and depth frames
|
90
|
+
color_frame = frames.get_color_frame()
|
91
|
+
depth_frame = frames.get_depth_frame()
|
92
|
+
if not color_frame or not depth_frame:
|
93
|
+
continue
|
94
|
+
depth_format = depth_frame.get_format()
|
95
|
+
if depth_format != OBFormat.Y16:
|
96
|
+
print("depth format is not Y16")
|
97
|
+
continue
|
98
|
+
|
99
|
+
# Convert the color frame to a BGR image
|
100
|
+
color_image = frame_to_bgr_image(color_frame)
|
101
|
+
if color_image is None:
|
102
|
+
print("Failed to convert frame to image")
|
103
|
+
continue
|
104
|
+
|
105
|
+
# Get the depth data
|
106
|
+
depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16).reshape(
|
107
|
+
(depth_frame.get_height(), depth_frame.get_width()))
|
108
|
+
|
109
|
+
# Convert depth data to float32 and apply depth scale
|
110
|
+
depth_data = depth_data.astype(np.float32) * depth_frame.get_depth_scale()
|
111
|
+
|
112
|
+
# Apply custom depth range, clip depth data
|
113
|
+
depth_data = np.clip(depth_data, min_depth, max_depth)
|
114
|
+
|
115
|
+
# Normalize depth data for display
|
116
|
+
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX)
|
117
|
+
depth_image = cv2.applyColorMap(depth_image.astype(np.uint8), cv2.COLORMAP_JET)
|
118
|
+
|
119
|
+
# Blend the depth and color images
|
120
|
+
blended_image = cv2.addWeighted(color_image, 0.5, depth_image, 0.5, 0)
|
121
|
+
|
122
|
+
#resize the window
|
123
|
+
cv2.namedWindow("HW D2C Align Viewer", cv2.WINDOW_NORMAL)
|
124
|
+
cv2.resizeWindow("HW D2C Align Viewer", 640, 480)
|
125
|
+
|
126
|
+
# Display the result
|
127
|
+
cv2.imshow("HW D2C Align Viewer", blended_image)
|
128
|
+
if cv2.waitKey(1) in [ord('q'), 27]: # 27 is the ESC key
|
129
|
+
break
|
130
|
+
|
131
|
+
# Stop the pipeline
|
132
|
+
pipeline.stop()
|
133
|
+
|
134
|
+
if __name__ == "__main__":
|
135
|
+
main()
|
@@ -0,0 +1,60 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
import time
|
17
|
+
import cv2
|
18
|
+
import numpy as np
|
19
|
+
from pyorbbecsdk import *
|
20
|
+
|
21
|
+
ESC_KEY = 27
|
22
|
+
|
23
|
+
def main():
|
24
|
+
config = Config()
|
25
|
+
pipeline = Pipeline()
|
26
|
+
config.enable_accel_stream()
|
27
|
+
config.enable_gyro_stream()
|
28
|
+
config.set_frame_aggregate_output_mode(OBFrameAggregateOutputMode.FULL_FRAME_REQUIRE)
|
29
|
+
pipeline.start(config)
|
30
|
+
while True:
|
31
|
+
try:
|
32
|
+
frames = pipeline.wait_for_frames(100)
|
33
|
+
if frames is None:
|
34
|
+
continue
|
35
|
+
|
36
|
+
accel_frame = frames.get_frame(OBFrameType.ACCEL_FRAME)
|
37
|
+
accel_frame = accel_frame.as_accel_frame()
|
38
|
+
|
39
|
+
if accel_frame is not None:
|
40
|
+
accel_index = accel_frame.get_index()
|
41
|
+
if accel_index % 50 == 0:
|
42
|
+
print("AccelFrame: ts={}".format(accel_frame.get_timestamp()))
|
43
|
+
print("AccelFrame: x={}, y={}, z={}".format(accel_frame.get_x(), accel_frame.get_y(),accel_frame.get_z()))
|
44
|
+
|
45
|
+
gyro_frame = frames.get_frame(OBFrameType.GYRO_FRAME)
|
46
|
+
gyro_frame = gyro_frame.as_gyro_frame()
|
47
|
+
if gyro_frame is not None:
|
48
|
+
gyro_index = gyro_frame.get_index()
|
49
|
+
if gyro_index % 50 == 0:
|
50
|
+
print("GyroFrame: ts={}".format(gyro_frame.get_timestamp()))
|
51
|
+
print("GyroFrame: x={}, y={}, z={}".format(gyro_frame.get_x(), gyro_frame.get_y(),gyro_frame.get_z()))
|
52
|
+
|
53
|
+
key = cv2.waitKey(1)
|
54
|
+
if key == ord('q') or key == ESC_KEY:
|
55
|
+
break
|
56
|
+
except KeyboardInterrupt:
|
57
|
+
break
|
58
|
+
|
59
|
+
if __name__ == "__main__":
|
60
|
+
main()
|
@@ -0,0 +1,115 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import cv2
|
18
|
+
import numpy as np
|
19
|
+
from pyorbbecsdk import *
|
20
|
+
|
21
|
+
ESC_KEY = 27
|
22
|
+
|
23
|
+
|
24
|
+
def process_ir_frame(ir_frame):
|
25
|
+
if ir_frame is None:
|
26
|
+
return None
|
27
|
+
ir_frame = ir_frame.as_video_frame()
|
28
|
+
ir_data = np.asanyarray(ir_frame.get_data())
|
29
|
+
width = ir_frame.get_width()
|
30
|
+
height = ir_frame.get_height()
|
31
|
+
ir_format = ir_frame.get_format()
|
32
|
+
|
33
|
+
if ir_format == OBFormat.Y8:
|
34
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
35
|
+
data_type = np.uint8
|
36
|
+
image_dtype = cv2.CV_8UC1
|
37
|
+
max_data = 255
|
38
|
+
elif ir_format == OBFormat.MJPG:
|
39
|
+
ir_data = cv2.imdecode(ir_data, cv2.IMREAD_UNCHANGED)
|
40
|
+
data_type = np.uint8
|
41
|
+
image_dtype = cv2.CV_8UC1
|
42
|
+
max_data = 255
|
43
|
+
if ir_data is None:
|
44
|
+
print("decode mjpeg failed")
|
45
|
+
return None
|
46
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
47
|
+
else:
|
48
|
+
ir_data = np.frombuffer(ir_data, dtype=np.uint16)
|
49
|
+
data_type = np.uint16
|
50
|
+
image_dtype = cv2.CV_16UC1
|
51
|
+
max_data = 65535
|
52
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
53
|
+
|
54
|
+
cv2.normalize(ir_data, ir_data, 0, max_data, cv2.NORM_MINMAX, dtype=image_dtype)
|
55
|
+
ir_data = ir_data.astype(data_type)
|
56
|
+
return cv2.cvtColor(ir_data, cv2.COLOR_GRAY2RGB)
|
57
|
+
|
58
|
+
|
59
|
+
def main():
|
60
|
+
config = Config()
|
61
|
+
pipeline = Pipeline()
|
62
|
+
device = pipeline.get_device()
|
63
|
+
sensor_list = device.get_sensor_list()
|
64
|
+
|
65
|
+
has_dual_ir = False
|
66
|
+
for sensor in range(len(sensor_list)):
|
67
|
+
if (sensor_list[sensor].get_type() == OBSensorType.LEFT_IR_SENSOR or
|
68
|
+
sensor_list[sensor].get_type() == OBSensorType.RIGHT_IR_SENSOR):
|
69
|
+
has_dual_ir = True
|
70
|
+
break
|
71
|
+
|
72
|
+
if has_dual_ir:
|
73
|
+
config.enable_video_stream(OBSensorType.LEFT_IR_SENSOR)
|
74
|
+
config.enable_video_stream(OBSensorType.RIGHT_IR_SENSOR)
|
75
|
+
else:
|
76
|
+
config.enable_video_stream(OBSensorType.IR_SENSOR)
|
77
|
+
|
78
|
+
pipeline.start(config)
|
79
|
+
|
80
|
+
while True:
|
81
|
+
try:
|
82
|
+
frames = pipeline.wait_for_frames(100)
|
83
|
+
if frames is None:
|
84
|
+
continue
|
85
|
+
|
86
|
+
if has_dual_ir:
|
87
|
+
left_ir_frame = frames.get_frame(OBFrameType.LEFT_IR_FRAME)
|
88
|
+
right_ir_frame = frames.get_frame(OBFrameType.RIGHT_IR_FRAME)
|
89
|
+
|
90
|
+
left_image = process_ir_frame(left_ir_frame)
|
91
|
+
right_image = process_ir_frame(right_ir_frame)
|
92
|
+
|
93
|
+
if left_image is None or right_image is None:
|
94
|
+
continue
|
95
|
+
|
96
|
+
combined_ir = np.hstack((left_image, right_image))
|
97
|
+
cv2.imshow("Dual IR", combined_ir)
|
98
|
+
else:
|
99
|
+
ir_frame = frames.get_frame(OBFrameType.IR_FRAME)
|
100
|
+
ir_image = process_ir_frame(ir_frame)
|
101
|
+
if ir_image is not None:
|
102
|
+
cv2.imshow("IR", ir_image)
|
103
|
+
|
104
|
+
key = cv2.waitKey(1)
|
105
|
+
if key == ord('q') or key == ESC_KEY:
|
106
|
+
break
|
107
|
+
|
108
|
+
except KeyboardInterrupt:
|
109
|
+
break
|
110
|
+
|
111
|
+
pipeline.stop()
|
112
|
+
|
113
|
+
|
114
|
+
if __name__ == "__main__":
|
115
|
+
main()
|