Created
March 3, 2020 15:05
-
-
Save superseppl/50d3828f2334ef034b058d612f47aa07 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
## License: Apache 2.0. See LICENSE file in root directory. | |
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved. | |
############################################### | |
## Open CV and Numpy integration ## | |
############################################### | |
import pyrealsense2 as rs | |
import numpy as np | |
import cv2 | |
# Configure depth and color streams | |
pipeline = rs.pipeline() | |
config = rs.config() | |
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) | |
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) | |
align = rs.align(rs.stream.color) | |
# Start streaming | |
pipeline.start(config) | |
try: | |
while True: | |
# Wait for a coherent pair of frames: depth and color | |
frames = pipeline.wait_for_frames() | |
#depth_frame = frames.get_depth_frame() | |
aligned_frames = align.process(frames) | |
# Get aligned frames | |
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image | |
color_frame = aligned_frames.get_color_frame() | |
if not aligned_depth_frame or not color_frame: | |
continue | |
depth_frame = aligned_depth_frame | |
# color_frame = aligned_frames.first(rs.stream.color) | |
# | |
# color_frame = frames.get_color_frame() | |
# depth_frame = pipeline.wait_for_frames().get_depth_frame().as_depth_frame() | |
# if not depth_frame or not color_frame: | |
# continue | |
# Convert images to numpy arrays | |
depth_image = np.asanyarray(depth_frame.get_data()) | |
color_image = np.asanyarray(color_frame.get_data()) | |
pixel_distance_in_meters = depth_frame.get_distance(100,100) | |
print(pixel_distance_in_meters) | |
pc = rs.pointcloud() | |
points = pc.calculate(depth_frame) | |
pc.map_to(color_frame) | |
#cv2.imshow('Color Frame', color_frame) | |
# Apply colormap on depth image (image must be converted to 8-bit per pixel first) | |
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) | |
# Stack both images horizontally | |
images = np.hstack((color_image, depth_colormap)) | |
# Show images | |
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) | |
cv2.imshow('RealSense', images) | |
cv2.waitKey(1) | |
finally: | |
# Stop streaming | |
pipeline.stop() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment