pyorbbec 1.0.1.33__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- OrbbecSDK.dll +0 -0
- OrbbecSDK.lib +0 -0
- OrbbecSDKConfig.xml +2332 -0
- extensions/depthengine/depthengine.dll +0 -0
- extensions/depthengine/depthengine.lib +0 -0
- extensions/filters/FilterProcessor.dll +0 -0
- extensions/filters/ob_priv_filter.dll +0 -0
- extensions/firmwareupdater/firmwareupdater.dll +0 -0
- extensions/frameprocessor/ob_frame_processor.dll +0 -0
- pyorbbec-1.0.1.33.dist-info/METADATA +11 -0
- pyorbbec-1.0.1.33.dist-info/RECORD +49 -0
- pyorbbec-1.0.1.33.dist-info/WHEEL +5 -0
- pyorbbec-1.0.1.33.dist-info/licenses/LICENSE +202 -0
- pyorbbec-1.0.1.33.dist-info/top_level.txt +1 -0
- pyorbbecsdk/config/OrbbecSDKConfig.md +222 -0
- pyorbbecsdk/config/OrbbecSDKConfig.xml +2332 -0
- pyorbbecsdk/config/multi_device_sync_config.json +28 -0
- pyorbbecsdk/examples/.gitkeep +0 -0
- pyorbbecsdk/examples/README.md +26 -0
- pyorbbecsdk/examples/callback.py +303 -0
- pyorbbecsdk/examples/color.py +64 -0
- pyorbbecsdk/examples/coordinate_transform.py +184 -0
- pyorbbecsdk/examples/depth.py +107 -0
- pyorbbecsdk/examples/depth_work_mode.py +59 -0
- pyorbbecsdk/examples/device_firmware_update.py +155 -0
- pyorbbecsdk/examples/device_optional_depth_presets_update.py +142 -0
- pyorbbecsdk/examples/enumerate.py +118 -0
- pyorbbecsdk/examples/hdr.py +216 -0
- pyorbbecsdk/examples/hot_plug.py +160 -0
- pyorbbecsdk/examples/hw_d2c_align.py +135 -0
- pyorbbecsdk/examples/imu.py +60 -0
- pyorbbecsdk/examples/infrared.py +148 -0
- pyorbbecsdk/examples/logger.py +55 -0
- pyorbbecsdk/examples/metadata.py +64 -0
- pyorbbecsdk/examples/multi_device.py +169 -0
- pyorbbecsdk/examples/multi_streams.py +219 -0
- pyorbbecsdk/examples/net_device.py +177 -0
- pyorbbecsdk/examples/playback.py +277 -0
- pyorbbecsdk/examples/point_cloud.py +90 -0
- pyorbbecsdk/examples/post_processing.py +119 -0
- pyorbbecsdk/examples/preset.py +67 -0
- pyorbbecsdk/examples/quick_start.py +90 -0
- pyorbbecsdk/examples/recorder.py +238 -0
- pyorbbecsdk/examples/requirements.txt +8 -0
- pyorbbecsdk/examples/save_image_to_disk.py +106 -0
- pyorbbecsdk/examples/sync_align.py +109 -0
- pyorbbecsdk/examples/two_devices_sync.py +233 -0
- pyorbbecsdk/examples/utils.py +127 -0
- pyorbbecsdk.cp311-win_amd64.pyd +0 -0
@@ -0,0 +1,148 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import cv2
|
18
|
+
import numpy as np
|
19
|
+
from pyorbbecsdk import *
|
20
|
+
|
21
|
+
ESC_KEY = 27
|
22
|
+
|
23
|
+
|
24
|
+
def process_ir_frame(ir_frame, is_dual_ir=False):
|
25
|
+
if ir_frame is None:
|
26
|
+
return None
|
27
|
+
ir_frame = ir_frame.as_video_frame()
|
28
|
+
ir_data = np.asanyarray(ir_frame.get_data())
|
29
|
+
width = ir_frame.get_width()
|
30
|
+
height = ir_frame.get_height()
|
31
|
+
ir_format = ir_frame.get_format()
|
32
|
+
|
33
|
+
if ir_format == OBFormat.Y8:
|
34
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
35
|
+
data_type = np.uint8
|
36
|
+
image_dtype = cv2.CV_8UC1
|
37
|
+
max_data = 255
|
38
|
+
elif ir_format == OBFormat.MJPG:
|
39
|
+
ir_data = cv2.imdecode(ir_data, cv2.IMREAD_UNCHANGED)
|
40
|
+
data_type = np.uint8
|
41
|
+
image_dtype = cv2.CV_8UC1
|
42
|
+
max_data = 255
|
43
|
+
if ir_data is None:
|
44
|
+
print("decode mjpeg failed")
|
45
|
+
return None
|
46
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
47
|
+
else:
|
48
|
+
ir_data = np.frombuffer(ir_data, dtype=np.uint16)
|
49
|
+
data_type = np.uint16
|
50
|
+
image_dtype = cv2.CV_16UC1
|
51
|
+
max_data = 65535
|
52
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
53
|
+
|
54
|
+
cv2.normalize(ir_data, ir_data, 0, max_data, cv2.NORM_MINMAX, dtype=image_dtype)
|
55
|
+
ir_data = ir_data.astype(data_type)
|
56
|
+
result = cv2.cvtColor(ir_data, cv2.COLOR_GRAY2RGB)
|
57
|
+
|
58
|
+
if is_dual_ir:
|
59
|
+
# Scale image to 640x400
|
60
|
+
target_width = 640
|
61
|
+
target_height = 400
|
62
|
+
|
63
|
+
aspect_ratio = width / height
|
64
|
+
target_ratio = target_width / target_height
|
65
|
+
|
66
|
+
# Determine how to scale based on aspect ratio
|
67
|
+
if aspect_ratio > target_ratio:
|
68
|
+
new_width = target_width
|
69
|
+
new_height = int(target_width / aspect_ratio)
|
70
|
+
else:
|
71
|
+
new_height = target_height
|
72
|
+
new_width = int(target_height * aspect_ratio)
|
73
|
+
|
74
|
+
# Determine whether to enlarge or reduce, and choose the appropriate interpolation method
|
75
|
+
if width > new_width or height > new_height:
|
76
|
+
interpolation = cv2.INTER_AREA
|
77
|
+
else:
|
78
|
+
interpolation = cv2.INTER_CUBIC
|
79
|
+
|
80
|
+
# Scale
|
81
|
+
scaled = cv2.resize(result, (new_width, new_height), interpolation=interpolation)
|
82
|
+
|
83
|
+
# Create a black background, place the scaled image on a black background
|
84
|
+
result = np.zeros((target_height, target_width, 3), dtype=np.uint8)
|
85
|
+
x_offset = (target_width - new_width) // 2
|
86
|
+
y_offset = (target_height - new_height) // 2
|
87
|
+
result[y_offset:y_offset+new_height, x_offset:x_offset+new_width] = scaled
|
88
|
+
|
89
|
+
return result
|
90
|
+
|
91
|
+
|
92
|
+
def main():
|
93
|
+
config = Config()
|
94
|
+
pipeline = Pipeline()
|
95
|
+
device = pipeline.get_device()
|
96
|
+
sensor_list = device.get_sensor_list()
|
97
|
+
|
98
|
+
has_dual_ir = False
|
99
|
+
for sensor in range(len(sensor_list)):
|
100
|
+
if (sensor_list[sensor].get_type() == OBSensorType.LEFT_IR_SENSOR or
|
101
|
+
sensor_list[sensor].get_type() == OBSensorType.RIGHT_IR_SENSOR):
|
102
|
+
has_dual_ir = True
|
103
|
+
break
|
104
|
+
|
105
|
+
if has_dual_ir:
|
106
|
+
config.enable_video_stream(OBSensorType.LEFT_IR_SENSOR)
|
107
|
+
config.enable_video_stream(OBSensorType.RIGHT_IR_SENSOR)
|
108
|
+
else:
|
109
|
+
config.enable_video_stream(OBSensorType.IR_SENSOR)
|
110
|
+
|
111
|
+
pipeline.start(config)
|
112
|
+
|
113
|
+
while True:
|
114
|
+
try:
|
115
|
+
frames = pipeline.wait_for_frames(100)
|
116
|
+
if frames is None:
|
117
|
+
continue
|
118
|
+
|
119
|
+
if has_dual_ir:
|
120
|
+
left_ir_frame = frames.get_frame(OBFrameType.LEFT_IR_FRAME)
|
121
|
+
right_ir_frame = frames.get_frame(OBFrameType.RIGHT_IR_FRAME)
|
122
|
+
|
123
|
+
left_image = process_ir_frame(left_ir_frame, True)
|
124
|
+
right_image = process_ir_frame(right_ir_frame, True)
|
125
|
+
|
126
|
+
if left_image is None or right_image is None:
|
127
|
+
continue
|
128
|
+
|
129
|
+
combined_ir = np.hstack((left_image, right_image))
|
130
|
+
cv2.imshow("Dual IR", combined_ir)
|
131
|
+
else:
|
132
|
+
ir_frame = frames.get_frame(OBFrameType.IR_FRAME)
|
133
|
+
ir_image = process_ir_frame(ir_frame)
|
134
|
+
if ir_image is not None:
|
135
|
+
cv2.imshow("IR", ir_image)
|
136
|
+
|
137
|
+
key = cv2.waitKey(1)
|
138
|
+
if key == ord('q') or key == ESC_KEY:
|
139
|
+
break
|
140
|
+
|
141
|
+
except KeyboardInterrupt:
|
142
|
+
break
|
143
|
+
|
144
|
+
pipeline.stop()
|
145
|
+
|
146
|
+
|
147
|
+
if __name__ == "__main__":
|
148
|
+
main()
|
@@ -0,0 +1,55 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import os
|
18
|
+
from pyorbbecsdk import *
|
19
|
+
import time
|
20
|
+
|
21
|
+
def main():
|
22
|
+
# Set console logger (INFO level)
|
23
|
+
# if you DO NOT want to see the log message in console, you can set the log level to OBLogLevel.NONE
|
24
|
+
Context.set_logger_to_console(OBLogLevel.INFO)
|
25
|
+
|
26
|
+
# Set file logger (DEBUG level)
|
27
|
+
log_path = "Log/Custom/"
|
28
|
+
os.makedirs(log_path, exist_ok=True) # Ensure log directory exists
|
29
|
+
Context.set_logger_to_file(OBLogLevel.INFO, log_path)
|
30
|
+
|
31
|
+
|
32
|
+
# Configure streams
|
33
|
+
config = Config()
|
34
|
+
pipeline = Pipeline()
|
35
|
+
# Get and enable depth stream configuration
|
36
|
+
depth_profiles = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
|
37
|
+
depth_profile = depth_profiles.get_default_video_stream_profile()
|
38
|
+
config.enable_stream(depth_profile)
|
39
|
+
|
40
|
+
# Get and enable color stream configuration
|
41
|
+
color_profiles = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
|
42
|
+
color_profile = color_profiles.get_default_video_stream_profile()
|
43
|
+
config.enable_stream(color_profile)
|
44
|
+
|
45
|
+
# Start pipeline
|
46
|
+
pipeline.start(config)
|
47
|
+
time.sleep(1)
|
48
|
+
# Stop pipeline
|
49
|
+
pipeline.stop()
|
50
|
+
|
51
|
+
print("\nPress any key to exit.")
|
52
|
+
input() # Wait for user input to exit
|
53
|
+
|
54
|
+
if __name__ == "__main__":
|
55
|
+
main()
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
from pyorbbecsdk import *
|
18
|
+
|
19
|
+
ESC_KEY = 27
|
20
|
+
|
21
|
+
def main():
|
22
|
+
# Initialize Pipeline
|
23
|
+
pipeline = Pipeline()
|
24
|
+
# Start Pipeline
|
25
|
+
pipeline.start()
|
26
|
+
print("Pipeline started. Press Ctrl+C to exit.")
|
27
|
+
|
28
|
+
frame_counter = 0 # Add frame counter
|
29
|
+
|
30
|
+
while True:
|
31
|
+
try:
|
32
|
+
# Get frameSet from Pipeline
|
33
|
+
frame_set = pipeline.wait_for_frames(1000)
|
34
|
+
if frame_set is None:
|
35
|
+
continue
|
36
|
+
|
37
|
+
frame_counter += 1 # Increment counter
|
38
|
+
|
39
|
+
# Only print metadata every 30 frames
|
40
|
+
if frame_counter % 30 == 0:
|
41
|
+
for i in range(len(frame_set)):
|
42
|
+
frame = frame_set[i]
|
43
|
+
|
44
|
+
# Print frame metadata
|
45
|
+
print(f"Frame type: {frame.get_type()}")
|
46
|
+
metadata_types = [getattr(OBFrameMetadataType, attr) for attr in dir(OBFrameMetadataType)
|
47
|
+
if not attr.startswith('__') and isinstance(getattr(OBFrameMetadataType, attr), OBFrameMetadataType)]
|
48
|
+
|
49
|
+
for metadata_type in metadata_types:
|
50
|
+
if frame.has_metadata(metadata_type):
|
51
|
+
metadata_value = frame.get_metadata_value(metadata_type)
|
52
|
+
print(f" Metadata type: {metadata_type.name}, value: {metadata_value}")
|
53
|
+
|
54
|
+
except KeyboardInterrupt:
|
55
|
+
break
|
56
|
+
except Exception as e:
|
57
|
+
print("An error occurred:", e)
|
58
|
+
break
|
59
|
+
|
60
|
+
pipeline.stop()
|
61
|
+
print("Pipeline stopped.")
|
62
|
+
|
63
|
+
if __name__ == "__main__":
|
64
|
+
main()
|
@@ -0,0 +1,169 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
from queue import Queue
|
18
|
+
from typing import List
|
19
|
+
|
20
|
+
import cv2
|
21
|
+
import numpy as np
|
22
|
+
|
23
|
+
from pyorbbecsdk import *
|
24
|
+
from utils import frame_to_bgr_image
|
25
|
+
|
26
|
+
MAX_DEVICES = 2
|
27
|
+
curr_device_cnt = 0
|
28
|
+
|
29
|
+
MAX_QUEUE_SIZE = 5
|
30
|
+
ESC_KEY = 27
|
31
|
+
|
32
|
+
color_frames_queue: List[Queue] = [Queue() for _ in range(MAX_DEVICES)]
|
33
|
+
depth_frames_queue: List[Queue] = [Queue() for _ in range(MAX_DEVICES)]
|
34
|
+
has_color_sensor: List[bool] = [False for _ in range(MAX_DEVICES)]
|
35
|
+
stop_rendering = False
|
36
|
+
|
37
|
+
|
38
|
+
def on_new_frame_callback(frames: FrameSet, index: int):
|
39
|
+
global color_frames_queue, depth_frames_queue
|
40
|
+
global MAX_QUEUE_SIZE
|
41
|
+
assert index < MAX_DEVICES
|
42
|
+
color_frame = frames.get_color_frame()
|
43
|
+
depth_frame = frames.get_depth_frame()
|
44
|
+
if color_frame is not None:
|
45
|
+
if color_frames_queue[index].qsize() >= MAX_QUEUE_SIZE:
|
46
|
+
color_frames_queue[index].get()
|
47
|
+
color_frames_queue[index].put(color_frame)
|
48
|
+
if depth_frame is not None:
|
49
|
+
if depth_frames_queue[index].qsize() >= MAX_QUEUE_SIZE:
|
50
|
+
depth_frames_queue[index].get()
|
51
|
+
depth_frames_queue[index].put(depth_frame)
|
52
|
+
|
53
|
+
|
54
|
+
def rendering_frames():
|
55
|
+
global color_frames_queue, depth_frames_queue
|
56
|
+
global curr_device_cnt
|
57
|
+
global stop_rendering
|
58
|
+
while not stop_rendering:
|
59
|
+
for i in range(curr_device_cnt):
|
60
|
+
color_frame = None
|
61
|
+
depth_frame = None
|
62
|
+
if not color_frames_queue[i].empty():
|
63
|
+
color_frame = color_frames_queue[i].get()
|
64
|
+
if not depth_frames_queue[i].empty():
|
65
|
+
depth_frame = depth_frames_queue[i].get()
|
66
|
+
if color_frame is None and depth_frame is None:
|
67
|
+
continue
|
68
|
+
color_image = None
|
69
|
+
depth_image = None
|
70
|
+
color_width, color_height = 0, 0
|
71
|
+
if color_frame is not None:
|
72
|
+
color_width, color_height = color_frame.get_width(), color_frame.get_height()
|
73
|
+
color_image = frame_to_bgr_image(color_frame)
|
74
|
+
if depth_frame is not None:
|
75
|
+
width = depth_frame.get_width()
|
76
|
+
height = depth_frame.get_height()
|
77
|
+
scale = depth_frame.get_depth_scale()
|
78
|
+
depth_format = depth_frame.get_format()
|
79
|
+
if depth_format != OBFormat.Y16:
|
80
|
+
print("depth format is not Y16")
|
81
|
+
continue
|
82
|
+
|
83
|
+
try:
|
84
|
+
depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16)
|
85
|
+
depth_data = depth_data.reshape((height, width))
|
86
|
+
except ValueError:
|
87
|
+
print("Failed to reshape depth data")
|
88
|
+
continue
|
89
|
+
|
90
|
+
depth_data = depth_data.astype(np.float32) * scale
|
91
|
+
|
92
|
+
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX,
|
93
|
+
dtype=cv2.CV_8U)
|
94
|
+
depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
|
95
|
+
|
96
|
+
if color_image is not None and depth_image is not None:
|
97
|
+
window_size = (color_width // 2, color_height // 2)
|
98
|
+
color_image = cv2.resize(color_image, window_size)
|
99
|
+
depth_image = cv2.resize(depth_image, window_size)
|
100
|
+
image = np.hstack((color_image, depth_image))
|
101
|
+
elif depth_image is not None and not has_color_sensor[i]:
|
102
|
+
image = depth_image
|
103
|
+
else:
|
104
|
+
continue
|
105
|
+
cv2.imshow("Device {}".format(i), image)
|
106
|
+
key = cv2.waitKey(1)
|
107
|
+
if key == ord('q') or key == ESC_KEY:
|
108
|
+
stop_rendering = True
|
109
|
+
break
|
110
|
+
cv2.destroyAllWindows()
|
111
|
+
|
112
|
+
|
113
|
+
def start_streams(pipelines: List[Pipeline], configs: List[Config]):
|
114
|
+
index = 0
|
115
|
+
for pipeline, config in zip(pipelines, configs):
|
116
|
+
print("Starting device {}".format(index))
|
117
|
+
pipeline.start(config, lambda frame_set, curr_index=index: on_new_frame_callback(frame_set,
|
118
|
+
curr_index))
|
119
|
+
index += 1
|
120
|
+
|
121
|
+
|
122
|
+
def stop_streams(pipelines: List[Pipeline]):
|
123
|
+
for pipeline in pipelines:
|
124
|
+
pipeline.stop()
|
125
|
+
|
126
|
+
|
127
|
+
def main():
|
128
|
+
ctx = Context()
|
129
|
+
device_list = ctx.query_devices()
|
130
|
+
global curr_device_cnt
|
131
|
+
curr_device_cnt = device_list.get_count()
|
132
|
+
if curr_device_cnt == 0:
|
133
|
+
print("No device connected")
|
134
|
+
return
|
135
|
+
if curr_device_cnt > MAX_DEVICES:
|
136
|
+
print("Too many devices connected")
|
137
|
+
return
|
138
|
+
pipelines: List[Pipeline] = []
|
139
|
+
configs: List[Config] = []
|
140
|
+
global has_color_sensor
|
141
|
+
for i in range(device_list.get_count()):
|
142
|
+
device = device_list.get_device_by_index(i)
|
143
|
+
pipeline = Pipeline(device)
|
144
|
+
config = Config()
|
145
|
+
try:
|
146
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
|
147
|
+
color_profile: VideoStreamProfile = profile_list.get_default_video_stream_profile()
|
148
|
+
config.enable_stream(color_profile)
|
149
|
+
has_color_sensor[i] = True
|
150
|
+
except OBError as e:
|
151
|
+
print(e)
|
152
|
+
has_color_sensor[i] = False
|
153
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
|
154
|
+
depth_profile = profile_list.get_default_video_stream_profile()
|
155
|
+
config.enable_stream(depth_profile)
|
156
|
+
pipelines.append(pipeline)
|
157
|
+
configs.append(config)
|
158
|
+
global stop_rendering
|
159
|
+
start_streams(pipelines, configs)
|
160
|
+
try:
|
161
|
+
rendering_frames()
|
162
|
+
except KeyboardInterrupt:
|
163
|
+
stop_rendering = True
|
164
|
+
finally:
|
165
|
+
stop_streams(pipelines)
|
166
|
+
cv2.destroyAllWindows()
|
167
|
+
|
168
|
+
if __name__ == "__main__":
|
169
|
+
main()
|
@@ -0,0 +1,219 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import cv2
|
18
|
+
import numpy as np
|
19
|
+
from pyorbbecsdk import *
|
20
|
+
from utils import frame_to_bgr_image
|
21
|
+
|
22
|
+
# cached frames for better visualization
|
23
|
+
cached_frames = {
|
24
|
+
'color': None,
|
25
|
+
'depth': None,
|
26
|
+
'left_ir': None,
|
27
|
+
'right_ir': None,
|
28
|
+
'ir': None
|
29
|
+
}
|
30
|
+
|
31
|
+
def setup_camera():
|
32
|
+
"""Setup camera and stream configuration"""
|
33
|
+
pipeline = Pipeline()
|
34
|
+
config = Config()
|
35
|
+
device = pipeline.get_device()
|
36
|
+
|
37
|
+
# Try to enable all possible sensors
|
38
|
+
video_sensors = [
|
39
|
+
OBSensorType.COLOR_SENSOR,
|
40
|
+
OBSensorType.DEPTH_SENSOR,
|
41
|
+
OBSensorType.IR_SENSOR,
|
42
|
+
OBSensorType.LEFT_IR_SENSOR,
|
43
|
+
OBSensorType.RIGHT_IR_SENSOR
|
44
|
+
]
|
45
|
+
sensor_list = device.get_sensor_list()
|
46
|
+
for sensor in range(len(sensor_list)):
|
47
|
+
try:
|
48
|
+
sensor_type = sensor_list[sensor].get_type()
|
49
|
+
if sensor_type in video_sensors:
|
50
|
+
config.enable_stream(sensor_type)
|
51
|
+
except:
|
52
|
+
continue
|
53
|
+
|
54
|
+
pipeline.start(config)
|
55
|
+
return pipeline
|
56
|
+
|
57
|
+
def setup_imu():
|
58
|
+
"""Setup IMU configuration"""
|
59
|
+
pipeline = Pipeline()
|
60
|
+
config = Config()
|
61
|
+
config.enable_accel_stream()
|
62
|
+
config.enable_gyro_stream()
|
63
|
+
pipeline.start(config)
|
64
|
+
return pipeline
|
65
|
+
|
66
|
+
def process_color(frame):
|
67
|
+
"""Process color image"""
|
68
|
+
frame = frame if frame else cached_frames['color']
|
69
|
+
cached_frames['color'] = frame
|
70
|
+
return frame_to_bgr_image(frame) if frame else None
|
71
|
+
|
72
|
+
|
73
|
+
def process_depth(frame):
|
74
|
+
"""Process depth image"""
|
75
|
+
frame = frame if frame else cached_frames['depth']
|
76
|
+
cached_frames['depth'] = frame
|
77
|
+
if not frame:
|
78
|
+
return None
|
79
|
+
try:
|
80
|
+
depth_data = np.frombuffer(frame.get_data(), dtype=np.uint16)
|
81
|
+
depth_data = depth_data.reshape(frame.get_height(), frame.get_width())
|
82
|
+
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
83
|
+
return cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
|
84
|
+
except ValueError:
|
85
|
+
return None
|
86
|
+
|
87
|
+
|
88
|
+
def process_ir(ir_frame):
|
89
|
+
"""Process IR frame (left or right) to RGB image"""
|
90
|
+
if ir_frame is None:
|
91
|
+
return None
|
92
|
+
ir_frame = ir_frame.as_video_frame()
|
93
|
+
ir_data = np.asanyarray(ir_frame.get_data())
|
94
|
+
width = ir_frame.get_width()
|
95
|
+
height = ir_frame.get_height()
|
96
|
+
ir_format = ir_frame.get_format()
|
97
|
+
|
98
|
+
if ir_format == OBFormat.Y8:
|
99
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
100
|
+
data_type = np.uint8
|
101
|
+
image_dtype = cv2.CV_8UC1
|
102
|
+
max_data = 255
|
103
|
+
elif ir_format == OBFormat.MJPG:
|
104
|
+
ir_data = cv2.imdecode(ir_data, cv2.IMREAD_UNCHANGED)
|
105
|
+
data_type = np.uint8
|
106
|
+
image_dtype = cv2.CV_8UC1
|
107
|
+
max_data = 255
|
108
|
+
if ir_data is None:
|
109
|
+
print("decode mjpeg failed")
|
110
|
+
return None
|
111
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
112
|
+
else:
|
113
|
+
ir_data = np.frombuffer(ir_data, dtype=np.uint16)
|
114
|
+
data_type = np.uint16
|
115
|
+
image_dtype = cv2.CV_16UC1
|
116
|
+
max_data = 255
|
117
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
118
|
+
|
119
|
+
cv2.normalize(ir_data, ir_data, 0, max_data, cv2.NORM_MINMAX, dtype=image_dtype)
|
120
|
+
ir_data = ir_data.astype(data_type)
|
121
|
+
return cv2.cvtColor(ir_data, cv2.COLOR_GRAY2RGB)
|
122
|
+
|
123
|
+
def get_imu_text(frame, name):
|
124
|
+
"""Format IMU data"""
|
125
|
+
if not frame:
|
126
|
+
return []
|
127
|
+
return [
|
128
|
+
f"{name} x: {frame.get_x():.2f}",
|
129
|
+
f"{name} y: {frame.get_y():.2f}",
|
130
|
+
f"{name} z: {frame.get_z():.2f}"
|
131
|
+
]
|
132
|
+
|
133
|
+
|
134
|
+
def create_display(frames, width=1280, height=720):
|
135
|
+
"""Create display window"""
|
136
|
+
display = np.zeros((height, width, 3), dtype=np.uint8)
|
137
|
+
h, w = height // 2, width // 2
|
138
|
+
|
139
|
+
# Process video frames
|
140
|
+
if 'color' in frames and frames['color'] is not None:
|
141
|
+
display[0:h, 0:w] = cv2.resize(frames['color'], (w, h))
|
142
|
+
|
143
|
+
if 'depth' in frames and frames['depth'] is not None:
|
144
|
+
display[0:h, w:] = cv2.resize(frames['depth'], (w, h))
|
145
|
+
|
146
|
+
if 'ir' in frames and frames['ir'] is not None:
|
147
|
+
display[h:, 0:w] = cv2.resize(frames['ir'], (w, h))
|
148
|
+
|
149
|
+
# Display IMU data
|
150
|
+
if 'imu' in frames:
|
151
|
+
y_offset = h + 20
|
152
|
+
for data_type in ['accel', 'gyro']:
|
153
|
+
text_lines = get_imu_text(frames['imu'].get(data_type), data_type.title())
|
154
|
+
for i, line in enumerate(text_lines):
|
155
|
+
cv2.putText(display, line, (w + 10, y_offset + i * 20),
|
156
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
|
157
|
+
y_offset += 80
|
158
|
+
|
159
|
+
return display
|
160
|
+
|
161
|
+
|
162
|
+
def main():
|
163
|
+
# Window settings
|
164
|
+
WINDOW_NAME = "MultiStream Viewer"
|
165
|
+
DISPLAY_WIDTH = 1280
|
166
|
+
DISPLAY_HEIGHT = 720
|
167
|
+
|
168
|
+
# Initialize camera
|
169
|
+
pipeline = setup_camera()
|
170
|
+
imu_pipeline = setup_imu()
|
171
|
+
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
|
172
|
+
cv2.resizeWindow(WINDOW_NAME, DISPLAY_WIDTH, DISPLAY_HEIGHT)
|
173
|
+
while True:
|
174
|
+
# Get all frames
|
175
|
+
frames = pipeline.wait_for_frames(100)
|
176
|
+
if not frames:
|
177
|
+
continue
|
178
|
+
# Process different frame types
|
179
|
+
processed_frames = {'color': process_color(frames.get_color_frame()),
|
180
|
+
'depth': process_depth(frames.get_depth_frame())}
|
181
|
+
|
182
|
+
# Process IR image: try stereo IR first, fallback to mono if unavailable
|
183
|
+
try:
|
184
|
+
left = process_ir(frames.get_frame(OBFrameType.LEFT_IR_FRAME).as_video_frame())
|
185
|
+
right = process_ir(frames.get_frame(OBFrameType.RIGHT_IR_FRAME).as_video_frame())
|
186
|
+
if left is not None and right is not None:
|
187
|
+
processed_frames['ir'] = np.hstack((left, right))
|
188
|
+
except:
|
189
|
+
ir_frame = frames.get_ir_frame()
|
190
|
+
if ir_frame:
|
191
|
+
processed_frames['ir'] = process_ir(ir_frame.as_video_frame())
|
192
|
+
|
193
|
+
# Process IMU data
|
194
|
+
imu_frames = imu_pipeline.wait_for_frames(100)
|
195
|
+
if not imu_frames:
|
196
|
+
continue
|
197
|
+
accel = imu_frames.get_frame(OBFrameType.ACCEL_FRAME)
|
198
|
+
gyro = imu_frames.get_frame(OBFrameType.GYRO_FRAME)
|
199
|
+
if accel and gyro:
|
200
|
+
processed_frames['imu'] = {
|
201
|
+
'accel': accel.as_accel_frame(),
|
202
|
+
'gyro': gyro.as_gyro_frame()
|
203
|
+
}
|
204
|
+
|
205
|
+
# create display
|
206
|
+
display = create_display(processed_frames, DISPLAY_WIDTH, DISPLAY_HEIGHT)
|
207
|
+
cv2.imshow(WINDOW_NAME, display)
|
208
|
+
|
209
|
+
# check exit key
|
210
|
+
key = cv2.waitKey(1) & 0xFF
|
211
|
+
if key in [ord('q'), 27]: # q or ESC
|
212
|
+
break
|
213
|
+
|
214
|
+
pipeline.stop()
|
215
|
+
cv2.destroyAllWindows()
|
216
|
+
|
217
|
+
|
218
|
+
if __name__ == "__main__":
|
219
|
+
main()
|