pyorbbec 1.0.1.33__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- OrbbecSDK.dll +0 -0
- OrbbecSDK.lib +0 -0
- OrbbecSDKConfig.xml +2332 -0
- extensions/depthengine/depthengine.dll +0 -0
- extensions/depthengine/depthengine.lib +0 -0
- extensions/filters/FilterProcessor.dll +0 -0
- extensions/filters/ob_priv_filter.dll +0 -0
- extensions/firmwareupdater/firmwareupdater.dll +0 -0
- extensions/frameprocessor/ob_frame_processor.dll +0 -0
- pyorbbec-1.0.1.33.dist-info/METADATA +11 -0
- pyorbbec-1.0.1.33.dist-info/RECORD +49 -0
- pyorbbec-1.0.1.33.dist-info/WHEEL +5 -0
- pyorbbec-1.0.1.33.dist-info/licenses/LICENSE +202 -0
- pyorbbec-1.0.1.33.dist-info/top_level.txt +1 -0
- pyorbbecsdk/config/OrbbecSDKConfig.md +222 -0
- pyorbbecsdk/config/OrbbecSDKConfig.xml +2332 -0
- pyorbbecsdk/config/multi_device_sync_config.json +28 -0
- pyorbbecsdk/examples/.gitkeep +0 -0
- pyorbbecsdk/examples/README.md +26 -0
- pyorbbecsdk/examples/callback.py +303 -0
- pyorbbecsdk/examples/color.py +64 -0
- pyorbbecsdk/examples/coordinate_transform.py +184 -0
- pyorbbecsdk/examples/depth.py +107 -0
- pyorbbecsdk/examples/depth_work_mode.py +59 -0
- pyorbbecsdk/examples/device_firmware_update.py +155 -0
- pyorbbecsdk/examples/device_optional_depth_presets_update.py +142 -0
- pyorbbecsdk/examples/enumerate.py +118 -0
- pyorbbecsdk/examples/hdr.py +216 -0
- pyorbbecsdk/examples/hot_plug.py +160 -0
- pyorbbecsdk/examples/hw_d2c_align.py +135 -0
- pyorbbecsdk/examples/imu.py +60 -0
- pyorbbecsdk/examples/infrared.py +148 -0
- pyorbbecsdk/examples/logger.py +55 -0
- pyorbbecsdk/examples/metadata.py +64 -0
- pyorbbecsdk/examples/multi_device.py +169 -0
- pyorbbecsdk/examples/multi_streams.py +219 -0
- pyorbbecsdk/examples/net_device.py +177 -0
- pyorbbecsdk/examples/playback.py +277 -0
- pyorbbecsdk/examples/point_cloud.py +90 -0
- pyorbbecsdk/examples/post_processing.py +119 -0
- pyorbbecsdk/examples/preset.py +67 -0
- pyorbbecsdk/examples/quick_start.py +90 -0
- pyorbbecsdk/examples/recorder.py +238 -0
- pyorbbecsdk/examples/requirements.txt +8 -0
- pyorbbecsdk/examples/save_image_to_disk.py +106 -0
- pyorbbecsdk/examples/sync_align.py +109 -0
- pyorbbecsdk/examples/two_devices_sync.py +233 -0
- pyorbbecsdk/examples/utils.py +127 -0
- pyorbbecsdk.cp312-win_amd64.pyd +0 -0
@@ -0,0 +1,177 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import platform
|
18
|
+
import cv2
|
19
|
+
import numpy as np
|
20
|
+
import av
|
21
|
+
import io
|
22
|
+
import threading
|
23
|
+
import time
|
24
|
+
import pygame
|
25
|
+
import os
|
26
|
+
from pyorbbecsdk import (Pipeline, Context, Config, OBSensorType, OBFormat, OBError)
|
27
|
+
from utils import frame_to_bgr_image
|
28
|
+
|
29
|
+
ESC_KEY = 27
|
30
|
+
# Gemini 335Le
|
31
|
+
GEMINI_335LE_PRODUCT_ID = 0x080E
|
32
|
+
# Gemini 435Le
|
33
|
+
GEMINI_435LE_PRODUCT_ID = 0x0815
|
34
|
+
|
35
|
+
def get_stream_profile(pipeline, sensor_type, width, height, fmt, fps):
|
36
|
+
profile_list = pipeline.get_stream_profile_list(sensor_type)
|
37
|
+
try:
|
38
|
+
profile = profile_list.get_video_stream_profile(width, height, fmt, fps)
|
39
|
+
except OBError:
|
40
|
+
profile = profile_list.get_default_video_stream_profile()
|
41
|
+
return profile
|
42
|
+
|
43
|
+
def decode_h26x_frame(decoder, byte_data):
|
44
|
+
try:
|
45
|
+
packet = av.Packet(byte_data)
|
46
|
+
frames = decoder.decode(packet)
|
47
|
+
for frame in frames:
|
48
|
+
return frame.to_ndarray(format='bgr24')
|
49
|
+
except av.AVError as e:
|
50
|
+
print(f"Decoding error: {e}")
|
51
|
+
return None
|
52
|
+
|
53
|
+
class FrameProcessor(threading.Thread):
|
54
|
+
def __init__(self, decoder, display_width, display_height):
|
55
|
+
super().__init__()
|
56
|
+
self.decoder = decoder
|
57
|
+
self.latest_frame = None
|
58
|
+
self.processed_frame = None
|
59
|
+
self.lock = threading.Lock()
|
60
|
+
self.running = True
|
61
|
+
self.daemon = True
|
62
|
+
self.display_width = display_width
|
63
|
+
self.display_height = display_height
|
64
|
+
|
65
|
+
def run(self):
|
66
|
+
while self.running:
|
67
|
+
with self.lock:
|
68
|
+
if self.latest_frame is not None:
|
69
|
+
color_image = decode_h26x_frame(self.decoder, self.latest_frame)
|
70
|
+
if color_image is not None:
|
71
|
+
# Resize the image to 1080p
|
72
|
+
resized_image = cv2.resize(color_image, (self.display_width, self.display_height))
|
73
|
+
rgb_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)
|
74
|
+
self.processed_frame = rgb_image
|
75
|
+
self.latest_frame = None
|
76
|
+
time.sleep(0.001)
|
77
|
+
|
78
|
+
def update_frame(self, frame):
|
79
|
+
with self.lock:
|
80
|
+
self.latest_frame = frame
|
81
|
+
|
82
|
+
def get_processed_frame(self):
|
83
|
+
with self.lock:
|
84
|
+
return self.processed_frame
|
85
|
+
|
86
|
+
def stop(self):
|
87
|
+
self.running = False
|
88
|
+
|
89
|
+
def main():
|
90
|
+
ctx = Context()
|
91
|
+
ip = input("Enter the IP address of the device (default: 192.168.1.10): ") or "192.168.1.10"
|
92
|
+
device = ctx.create_net_device(ip, 8090)
|
93
|
+
if device is None:
|
94
|
+
print("Failed to create net device")
|
95
|
+
return
|
96
|
+
|
97
|
+
config = Config()
|
98
|
+
pipeline = Pipeline(device)
|
99
|
+
|
100
|
+
device_info = device.get_device_info()
|
101
|
+
# Gemini 335Le, Gemini 435Le
|
102
|
+
SUPPORTED_PIDS = {GEMINI_435LE_PRODUCT_ID, GEMINI_335LE_PRODUCT_ID}
|
103
|
+
if device_info.get_pid() in SUPPORTED_PIDS:
|
104
|
+
# Set up 1280*800 capture
|
105
|
+
print("Current device is GEMINI 435Le or GEMINI 335Le, use OBFormat.MJPG")
|
106
|
+
color_profile = get_stream_profile(pipeline, OBSensorType.COLOR_SENSOR, 1280, 800, OBFormat.MJPG, 10)
|
107
|
+
else:
|
108
|
+
# Set up 4K capture
|
109
|
+
color_profile = get_stream_profile(pipeline, OBSensorType.COLOR_SENSOR, 3840, 2160, OBFormat.H264, 25)
|
110
|
+
|
111
|
+
config.enable_stream(color_profile)
|
112
|
+
pipeline.start(config)
|
113
|
+
|
114
|
+
# Choose the correct decoder based on the format
|
115
|
+
if color_profile.get_format() == OBFormat.H264:
|
116
|
+
color_codec_name = 'h264'
|
117
|
+
elif color_profile.get_format() == OBFormat.MJPG:
|
118
|
+
color_codec_name = 'mjpeg'
|
119
|
+
else:
|
120
|
+
color_codec_name = 'hevc'
|
121
|
+
|
122
|
+
try:
|
123
|
+
decoder = av.codec.CodecContext.create(color_codec_name, 'r')
|
124
|
+
except av.AVError as e:
|
125
|
+
print(f"Failed to create decoder for {color_codec_name}: {e}")
|
126
|
+
pipeline.stop()
|
127
|
+
return
|
128
|
+
|
129
|
+
# Set display resolution to 720p
|
130
|
+
display_width, display_height = 1280, 720
|
131
|
+
frame_processor = FrameProcessor(decoder, display_width, display_height)
|
132
|
+
frame_processor.start()
|
133
|
+
|
134
|
+
pygame.init()
|
135
|
+
screen = pygame.display.set_mode((display_width, display_height))
|
136
|
+
pygame.display.set_caption("4K Net Device Viewer (720p Display)")
|
137
|
+
clock = pygame.time.Clock()
|
138
|
+
|
139
|
+
running = True
|
140
|
+
try:
|
141
|
+
while running:
|
142
|
+
for event in pygame.event.get():
|
143
|
+
if event.type == pygame.QUIT:
|
144
|
+
running = False
|
145
|
+
elif event.type == pygame.KEYDOWN:
|
146
|
+
if event.key == pygame.K_ESCAPE:
|
147
|
+
running = False
|
148
|
+
|
149
|
+
if not running:
|
150
|
+
break
|
151
|
+
|
152
|
+
frames = pipeline.wait_for_frames(100)
|
153
|
+
if frames:
|
154
|
+
color_frame = frames.get_color_frame()
|
155
|
+
if color_frame:
|
156
|
+
byte_data = color_frame.get_data()
|
157
|
+
if len(byte_data) > 0:
|
158
|
+
frame_processor.update_frame(byte_data)
|
159
|
+
|
160
|
+
processed_frame = frame_processor.get_processed_frame()
|
161
|
+
if processed_frame is not None:
|
162
|
+
surf = pygame.surfarray.make_surface(processed_frame.swapaxes(0, 1))
|
163
|
+
screen.blit(surf, (0, 0))
|
164
|
+
pygame.display.flip()
|
165
|
+
|
166
|
+
clock.tick(30) # Limit to 30 FPS
|
167
|
+
|
168
|
+
finally:
|
169
|
+
print("Stopping frame processor...")
|
170
|
+
frame_processor.stop()
|
171
|
+
print("Stopping pipeline...")
|
172
|
+
pipeline.stop()
|
173
|
+
print("Exiting the program...")
|
174
|
+
os._exit(0)
|
175
|
+
|
176
|
+
if __name__ == "__main__":
|
177
|
+
main()
|
@@ -0,0 +1,277 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import cv2
|
18
|
+
import numpy as np
|
19
|
+
from pyorbbecsdk import *
|
20
|
+
from utils import frame_to_bgr_image
|
21
|
+
|
22
|
+
# cached frames for better visualization
|
23
|
+
cached_frames = {
|
24
|
+
'color': None,
|
25
|
+
'depth': None,
|
26
|
+
'left_ir': None,
|
27
|
+
'right_ir': None,
|
28
|
+
'ir': None
|
29
|
+
}
|
30
|
+
|
31
|
+
def setup_camera(playback):
|
32
|
+
"""Setup camera and stream configuration"""
|
33
|
+
pipeline = Pipeline(playback)
|
34
|
+
config = Config()
|
35
|
+
device = pipeline.get_device()
|
36
|
+
|
37
|
+
# Try to enable all possible sensors
|
38
|
+
video_sensors = [
|
39
|
+
OBSensorType.COLOR_SENSOR,
|
40
|
+
OBSensorType.DEPTH_SENSOR,
|
41
|
+
OBSensorType.IR_SENSOR,
|
42
|
+
OBSensorType.LEFT_IR_SENSOR,
|
43
|
+
OBSensorType.RIGHT_IR_SENSOR,
|
44
|
+
OBSensorType.ACCEL_SENSOR,
|
45
|
+
OBSensorType.GYRO_SENSOR,
|
46
|
+
]
|
47
|
+
enabled_sensor_types = []
|
48
|
+
|
49
|
+
sensor_list = device.get_sensor_list()
|
50
|
+
for sensor in range(len(sensor_list)):
|
51
|
+
try:
|
52
|
+
sensor_type = sensor_list[sensor].get_type()
|
53
|
+
if sensor_type in video_sensors:
|
54
|
+
config.enable_stream(sensor_type)
|
55
|
+
enabled_sensor_types.append(sensor_type)
|
56
|
+
except:
|
57
|
+
continue
|
58
|
+
return pipeline, config, enabled_sensor_types
|
59
|
+
|
60
|
+
def process_color(frame):
|
61
|
+
"""Process color image"""
|
62
|
+
frame = frame if frame else cached_frames['color']
|
63
|
+
cached_frames['color'] = frame
|
64
|
+
return frame_to_bgr_image(frame) if frame else None
|
65
|
+
|
66
|
+
|
67
|
+
def process_depth(frame):
|
68
|
+
"""Process depth image"""
|
69
|
+
frame = frame if frame else cached_frames['depth']
|
70
|
+
cached_frames['depth'] = frame
|
71
|
+
if not frame:
|
72
|
+
return None
|
73
|
+
try:
|
74
|
+
depth_data = np.frombuffer(frame.get_data(), dtype=np.uint16)
|
75
|
+
depth_data = depth_data.reshape(frame.get_height(), frame.get_width())
|
76
|
+
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
77
|
+
return cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
|
78
|
+
except ValueError:
|
79
|
+
return None
|
80
|
+
|
81
|
+
|
82
|
+
def process_ir(ir_frame, key):
|
83
|
+
"""Process IR frame (left, right, or mono) with cache"""
|
84
|
+
ir_frame = ir_frame if ir_frame else cached_frames[key]
|
85
|
+
cached_frames[key] = ir_frame
|
86
|
+
if ir_frame is None:
|
87
|
+
return None
|
88
|
+
|
89
|
+
ir_frame = ir_frame.as_video_frame()
|
90
|
+
ir_data = np.asanyarray(ir_frame.get_data())
|
91
|
+
width = ir_frame.get_width()
|
92
|
+
height = ir_frame.get_height()
|
93
|
+
ir_format = ir_frame.get_format()
|
94
|
+
|
95
|
+
if ir_format == OBFormat.Y8:
|
96
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
97
|
+
data_type = np.uint8
|
98
|
+
image_dtype = cv2.CV_8UC1
|
99
|
+
max_data = 255
|
100
|
+
elif ir_format == OBFormat.MJPG:
|
101
|
+
ir_data = cv2.imdecode(ir_data, cv2.IMREAD_UNCHANGED)
|
102
|
+
data_type = np.uint8
|
103
|
+
image_dtype = cv2.CV_8UC1
|
104
|
+
max_data = 255
|
105
|
+
if ir_data is None:
|
106
|
+
print("decode mjpeg failed")
|
107
|
+
return None
|
108
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
109
|
+
else:
|
110
|
+
ir_data = np.frombuffer(ir_data, dtype=np.uint16)
|
111
|
+
data_type = np.uint16
|
112
|
+
image_dtype = cv2.CV_16UC1
|
113
|
+
max_data = 255
|
114
|
+
ir_data = np.resize(ir_data, (height, width, 1))
|
115
|
+
|
116
|
+
cv2.normalize(ir_data, ir_data, 0, max_data, cv2.NORM_MINMAX, dtype=image_dtype)
|
117
|
+
ir_data = ir_data.astype(data_type)
|
118
|
+
return cv2.cvtColor(ir_data, cv2.COLOR_GRAY2RGB)
|
119
|
+
|
120
|
+
def get_imu_text(frame, name):
|
121
|
+
"""Format IMU data"""
|
122
|
+
if not frame:
|
123
|
+
return []
|
124
|
+
return [
|
125
|
+
f"{name} x: {frame.get_x():.2f}",
|
126
|
+
f"{name} y: {frame.get_y():.2f}",
|
127
|
+
f"{name} z: {frame.get_z():.2f}"
|
128
|
+
]
|
129
|
+
|
130
|
+
|
131
|
+
|
132
|
+
def create_display(frames, enabled_sensor_types, width=1280, height=720):
|
133
|
+
"""Create display window with correct dynamic layout"""
|
134
|
+
sensor_type_to_name = {
|
135
|
+
OBSensorType.COLOR_SENSOR: 'color',
|
136
|
+
OBSensorType.DEPTH_SENSOR: 'depth',
|
137
|
+
OBSensorType.LEFT_IR_SENSOR: 'left_ir',
|
138
|
+
OBSensorType.RIGHT_IR_SENSOR: 'right_ir',
|
139
|
+
OBSensorType.IR_SENSOR: 'ir'
|
140
|
+
}
|
141
|
+
video_keys = []
|
142
|
+
for sensor_type in enabled_sensor_types:
|
143
|
+
if sensor_type in sensor_type_to_name:
|
144
|
+
video_keys.append(sensor_type_to_name[sensor_type])
|
145
|
+
|
146
|
+
video_frames = [frames.get(k) for k in video_keys]
|
147
|
+
has_imu = 'imu' in frames
|
148
|
+
num_videos = len(video_frames)
|
149
|
+
|
150
|
+
total_elements = num_videos + (1 if has_imu else 0)
|
151
|
+
|
152
|
+
if total_elements == 1:
|
153
|
+
grid_cols, grid_rows = 1, 1
|
154
|
+
elif total_elements <= 2:
|
155
|
+
grid_cols, grid_rows = 2, 1
|
156
|
+
elif total_elements <= 4:
|
157
|
+
grid_cols, grid_rows = 2, 2
|
158
|
+
elif total_elements <= 5:
|
159
|
+
grid_cols, grid_rows = 2, 3
|
160
|
+
else:
|
161
|
+
raise ValueError("Too many elements! Maximum supported is 5.")
|
162
|
+
|
163
|
+
cell_w = width // grid_cols
|
164
|
+
cell_h = height // grid_rows
|
165
|
+
|
166
|
+
display = np.zeros((cell_h * grid_rows, cell_w * grid_cols, 3), dtype=np.uint8)
|
167
|
+
|
168
|
+
for idx, frame in enumerate(video_frames):
|
169
|
+
row = idx // grid_cols
|
170
|
+
col = idx % grid_cols
|
171
|
+
x_start = col * cell_w
|
172
|
+
y_start = row * cell_h
|
173
|
+
if frame is not None:
|
174
|
+
if total_elements == 1:
|
175
|
+
resized = cv2.resize(frame, (width, height))
|
176
|
+
display = resized
|
177
|
+
else:
|
178
|
+
resized = cv2.resize(frame, (cell_w, cell_h))
|
179
|
+
display[y_start:y_start + cell_h, x_start:x_start + cell_w] = resized
|
180
|
+
else:
|
181
|
+
cv2.rectangle(display, (x_start, y_start), (x_start + cell_w, y_start + cell_h), (0, 0, 0), -1)
|
182
|
+
|
183
|
+
if has_imu:
|
184
|
+
imu_idx = num_videos
|
185
|
+
row = imu_idx // grid_cols
|
186
|
+
col = imu_idx % grid_cols
|
187
|
+
x_start = col * cell_w
|
188
|
+
y_start = row * cell_h
|
189
|
+
cv2.rectangle(display, (x_start, y_start), (x_start + cell_w, y_start + cell_h), (50, 50, 50), -1)
|
190
|
+
|
191
|
+
y_offset = y_start + 30
|
192
|
+
for data_type in ['accel', 'gyro']:
|
193
|
+
text_lines = get_imu_text(frames['imu'].get(data_type), data_type.title())
|
194
|
+
for i, line in enumerate(text_lines):
|
195
|
+
cv2.putText(display, line, (x_start + 10, y_offset + i * 20),
|
196
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, cv2.LINE_AA)
|
197
|
+
y_offset += 100
|
198
|
+
|
199
|
+
return display
|
200
|
+
|
201
|
+
|
202
|
+
|
203
|
+
def main():
|
204
|
+
# Window settings
|
205
|
+
WINDOW_NAME = "MultiStream Playback(.bag) Viewer"
|
206
|
+
file_path = input("Enter output filename (.bag) and press Enter to start playbacking: ")
|
207
|
+
|
208
|
+
DISPLAY_WIDTH = 1280
|
209
|
+
DISPLAY_HEIGHT = 720
|
210
|
+
# initialize playback
|
211
|
+
playback = PlaybackDevice(file_path)
|
212
|
+
# Initialize camera
|
213
|
+
pipeline, config, enabled_sensor_types = setup_camera(playback)
|
214
|
+
device = pipeline.get_device()
|
215
|
+
def on_status_change(status):
|
216
|
+
print(f"[Callback] status changed: {status}")
|
217
|
+
if status == PlaybackStatus.Stopped:
|
218
|
+
pipeline.stop()
|
219
|
+
pipeline.start(config)
|
220
|
+
playback.set_playback_status_change_callback(on_status_change)
|
221
|
+
|
222
|
+
pipeline.start(config)
|
223
|
+
|
224
|
+
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
|
225
|
+
cv2.resizeWindow(WINDOW_NAME, DISPLAY_WIDTH, DISPLAY_HEIGHT)
|
226
|
+
processed_frames = {}
|
227
|
+
while True:
|
228
|
+
# Get all frames
|
229
|
+
frames = pipeline.wait_for_frames(100)
|
230
|
+
if not frames:
|
231
|
+
continue
|
232
|
+
|
233
|
+
# Process color image
|
234
|
+
color_frame = frames.get_frame(OBFrameType.COLOR_FRAME)
|
235
|
+
if color_frame:
|
236
|
+
processed_frames['color'] = process_color(color_frame.as_video_frame())
|
237
|
+
# Process depth image
|
238
|
+
depth_frame = frames.get_frame(OBFrameType.DEPTH_FRAME)
|
239
|
+
if depth_frame:
|
240
|
+
processed_frames['depth'] = process_depth(depth_frame.as_video_frame())
|
241
|
+
# Process left IR
|
242
|
+
left_ir_frame = frames.get_frame(OBFrameType.LEFT_IR_FRAME)
|
243
|
+
processed_frames['left_ir'] = process_ir(left_ir_frame, 'left_ir')
|
244
|
+
|
245
|
+
# Process right IR
|
246
|
+
right_ir_frame = frames.get_frame(OBFrameType.RIGHT_IR_FRAME)
|
247
|
+
processed_frames['right_ir'] = process_ir(right_ir_frame, 'right_ir')
|
248
|
+
|
249
|
+
# Process mono IR
|
250
|
+
ir_frame = frames.get_ir_frame()
|
251
|
+
processed_frames['ir'] = process_ir(ir_frame, 'ir')
|
252
|
+
|
253
|
+
# Process IMU data
|
254
|
+
accel = frames.get_frame(OBFrameType.ACCEL_FRAME)
|
255
|
+
gyro = frames.get_frame(OBFrameType.GYRO_FRAME)
|
256
|
+
if accel and gyro:
|
257
|
+
processed_frames['imu'] = {
|
258
|
+
'accel': accel.as_accel_frame(),
|
259
|
+
'gyro': gyro.as_gyro_frame()
|
260
|
+
}
|
261
|
+
|
262
|
+
# create display
|
263
|
+
display = create_display(processed_frames, enabled_sensor_types, DISPLAY_WIDTH, DISPLAY_HEIGHT)
|
264
|
+
cv2.imshow(WINDOW_NAME, display)
|
265
|
+
|
266
|
+
# check exit key
|
267
|
+
key = cv2.waitKey(1) & 0xFF
|
268
|
+
if key in (ord('q'), 27):
|
269
|
+
break
|
270
|
+
|
271
|
+
pipeline.stop()
|
272
|
+
playback = None
|
273
|
+
cv2.destroyAllWindows()
|
274
|
+
|
275
|
+
|
276
|
+
if __name__ == "__main__":
|
277
|
+
main()
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
|
17
|
+
import os
|
18
|
+
import numpy as np
|
19
|
+
|
20
|
+
from pyorbbecsdk import *
|
21
|
+
|
22
|
+
save_points_dir = os.path.join(os.getcwd(), "point_clouds")
|
23
|
+
if not os.path.exists(save_points_dir):
|
24
|
+
os.mkdir(save_points_dir)
|
25
|
+
|
26
|
+
def main():
|
27
|
+
pipeline = Pipeline()
|
28
|
+
config = Config()
|
29
|
+
|
30
|
+
# Configure depth stream
|
31
|
+
depth_profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
|
32
|
+
if depth_profile_list is None:
|
33
|
+
print("No proper depth profile, cannot generate point cloud")
|
34
|
+
return
|
35
|
+
depth_profile = depth_profile_list.get_default_video_stream_profile()
|
36
|
+
config.enable_stream(depth_profile)
|
37
|
+
|
38
|
+
has_color_sensor = False
|
39
|
+
try:
|
40
|
+
# Configure color stream if available
|
41
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
|
42
|
+
if profile_list is not None:
|
43
|
+
color_profile = profile_list.get_default_video_stream_profile()
|
44
|
+
config.enable_stream(color_profile)
|
45
|
+
has_color_sensor = True
|
46
|
+
except OBError as e:
|
47
|
+
print(e)
|
48
|
+
|
49
|
+
pipeline.enable_frame_sync()
|
50
|
+
pipeline.start(config)
|
51
|
+
|
52
|
+
#camera_param = pipeline.get_camera_param()
|
53
|
+
align_filter = AlignFilter(align_to_stream=OBStreamType.COLOR_STREAM)
|
54
|
+
point_cloud_filter = PointCloudFilter()
|
55
|
+
#point_cloud_filter.set_camera_param(camera_param)
|
56
|
+
|
57
|
+
while True:
|
58
|
+
frames = pipeline.wait_for_frames(100)
|
59
|
+
if frames is None:
|
60
|
+
continue
|
61
|
+
|
62
|
+
depth_frame = frames.get_depth_frame()
|
63
|
+
if depth_frame is None:
|
64
|
+
continue
|
65
|
+
|
66
|
+
color_frame = frames.get_color_frame()
|
67
|
+
if has_color_sensor and color_frame is None:
|
68
|
+
continue
|
69
|
+
|
70
|
+
frame = align_filter.process(frames)
|
71
|
+
#scale = depth_frame.get_depth_scale()
|
72
|
+
#point_cloud_filter.set_position_data_scaled(scale)
|
73
|
+
|
74
|
+
point_format = OBFormat.RGB_POINT if has_color_sensor and color_frame is not None else OBFormat.POINT
|
75
|
+
point_cloud_filter.set_create_point_format(point_format)
|
76
|
+
|
77
|
+
point_cloud_frame = point_cloud_filter.process(frame)
|
78
|
+
if point_cloud_frame is None:
|
79
|
+
continue
|
80
|
+
#save point cloud
|
81
|
+
save_point_cloud_to_ply(os.path.join(save_points_dir, "point_cloud.ply"), point_cloud_frame)
|
82
|
+
#save mesh to point cloud
|
83
|
+
#save_point_cloud_to_ply(os.path.join(save_points_dir, "point_cloud.ply"), point_cloud_frame, False, True, 50)
|
84
|
+
break
|
85
|
+
print("stop pipeline")
|
86
|
+
pipeline.stop()
|
87
|
+
|
88
|
+
|
89
|
+
if __name__ == "__main__":
|
90
|
+
main()
|
@@ -0,0 +1,119 @@
|
|
1
|
+
# ******************************************************************************
|
2
|
+
# Copyright (c) 2024 Orbbec 3D Technology, Inc
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http:# www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ******************************************************************************
|
16
|
+
import sys
|
17
|
+
|
18
|
+
import cv2
|
19
|
+
import numpy as np
|
20
|
+
|
21
|
+
from pyorbbecsdk import *
|
22
|
+
|
23
|
+
ESC_KEY = 27
|
24
|
+
PRINT_INTERVAL = 1 # seconds
|
25
|
+
MIN_DEPTH = 20 # 20mm
|
26
|
+
MAX_DEPTH = 10000 # 10000mm
|
27
|
+
|
28
|
+
def main():
|
29
|
+
pipeline = Pipeline()
|
30
|
+
config = Config()
|
31
|
+
try:
|
32
|
+
profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
|
33
|
+
depth_profile = profile_list.get_default_video_stream_profile()
|
34
|
+
config.enable_stream(depth_profile)
|
35
|
+
except Exception as e:
|
36
|
+
print(e)
|
37
|
+
return
|
38
|
+
|
39
|
+
try:
|
40
|
+
pipeline.enable_frame_sync()
|
41
|
+
except Exception as e:
|
42
|
+
print(e)
|
43
|
+
|
44
|
+
try:
|
45
|
+
pipeline.start(config)
|
46
|
+
except Exception as e:
|
47
|
+
print(e)
|
48
|
+
return
|
49
|
+
|
50
|
+
device = pipeline.get_device()
|
51
|
+
assert device is not None
|
52
|
+
depth_sensor = device.get_sensor(OBSensorType.DEPTH_SENSOR)
|
53
|
+
assert depth_sensor is not None
|
54
|
+
filter_list = depth_sensor.get_recommended_filters()
|
55
|
+
assert filter_list is not None
|
56
|
+
# print filter list
|
57
|
+
for i in range(len(filter_list)):
|
58
|
+
post_filter = filter_list[i]
|
59
|
+
if post_filter:
|
60
|
+
print("filter name: ", post_filter.get_name())
|
61
|
+
print("filter is enabled: ", post_filter.is_enabled())
|
62
|
+
|
63
|
+
while True:
|
64
|
+
try:
|
65
|
+
frames = pipeline.wait_for_frames(100)
|
66
|
+
if not frames:
|
67
|
+
continue
|
68
|
+
depth_frame = frames.get_depth_frame()
|
69
|
+
if not depth_frame:
|
70
|
+
continue
|
71
|
+
for i in range(len(filter_list)):
|
72
|
+
post_filter = filter_list[i]
|
73
|
+
if post_filter and post_filter.is_enabled() and depth_frame:
|
74
|
+
depth_data_size = depth_frame.get_data()
|
75
|
+
if len(depth_data_size) < (depth_frame.get_width() * depth_frame.get_height() * 2):
|
76
|
+
# print("depth data is not complete")
|
77
|
+
continue
|
78
|
+
|
79
|
+
new_depth_frame = post_filter.process(depth_frame)
|
80
|
+
depth_frame = new_depth_frame.as_depth_frame()
|
81
|
+
# for Y16 format depth frame, print the distance of the center pixel every 30 frames
|
82
|
+
width = depth_frame.get_width()
|
83
|
+
height = depth_frame.get_height()
|
84
|
+
scale = depth_frame.get_depth_scale()
|
85
|
+
depth_format = depth_frame.get_format()
|
86
|
+
if depth_format != OBFormat.Y16:
|
87
|
+
print("depth format is not Y16")
|
88
|
+
continue
|
89
|
+
try:
|
90
|
+
depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16)
|
91
|
+
depth_data = depth_data.reshape((height, width))
|
92
|
+
except ValueError:
|
93
|
+
print("Failed to reshape depth data")
|
94
|
+
continue
|
95
|
+
|
96
|
+
depth_data = depth_data.astype(np.float32) * scale
|
97
|
+
depth_data = np.where((depth_data > MIN_DEPTH) & (depth_data < MAX_DEPTH), depth_data, 0)
|
98
|
+
depth_data = depth_data.astype(np.uint16)
|
99
|
+
if depth_frame.get_format() == OBFormat.Y16 and depth_frame.get_index() % 30 == 0:
|
100
|
+
# print the distance of the center pixel
|
101
|
+
center_y = int(height / 2)
|
102
|
+
center_x = int(width / 2)
|
103
|
+
center_distance = depth_data[center_y, center_x]
|
104
|
+
print("center distance: ", center_distance)
|
105
|
+
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
106
|
+
depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
|
107
|
+
|
108
|
+
cv2.imshow("Depth Viewer", depth_image)
|
109
|
+
key = cv2.waitKey(1)
|
110
|
+
if key == ord('q') or key == ESC_KEY:
|
111
|
+
break
|
112
|
+
except KeyboardInterrupt:
|
113
|
+
break
|
114
|
+
cv2.destroyAllWindows()
|
115
|
+
pipeline.stop()
|
116
|
+
|
117
|
+
|
118
|
+
if __name__ == "__main__":
|
119
|
+
main()
|