Stereo Depth custom Mesh¶
This example shows how you can load custom mesh to the device and use it for depth calculation. In this example, mesh files are generated from camera calibration data, but you can also use your own mesh files.
By default, StereoDepth will use the same logic as inside the def getMesh()
to calculate
mesh files whenever horizontal FOV is larger than 90°. You could also force calculate the mesh using:
stereo = pipeline.create(dai.node.StereoDepth)
# Enable mesh calculation to correct distortion:
stereo.enableDistortionCorrection(True)
StereoDepth node also allows you to load mesh files directly from a file path:
stereo = pipeline.create(dai.node.StereoDepth)
stereo.loadMeshFiles('path/to/left_mesh', 'path/to/right_mesh')
Demo¶
On the image above you can see that the rectified frame isn’t as wide FOV as the original one, that’s because the distortion correction is applied (in this case via custom mesh files), so the disparity matching can be performed correctly.
Setup¶
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py
For additional information, please follow installation guide
Source code¶
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | #!/usr/bin/env python3
import cv2
import numpy as np
import depthai as dai
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-res", "--resolution", type=str, default="720",
help="Sets the resolution on mono cameras. Options: 800 | 720 | 400")
parser.add_argument("-md", "--mesh_dir", type=str, default=None,
help="Output directory for mesh files. If not specified mesh files won't be saved")
parser.add_argument("-lm", "--load_mesh", default=False, action="store_true",
help="Read camera intrinsics, generate mesh files and load them into the stereo node.")
args = parser.parse_args()
meshDirectory = args.mesh_dir # Output dir for mesh files
generateMesh = args.load_mesh # Load mesh files
RES_MAP = {
'800': {'w': 1280, 'h': 800, 'res': dai.MonoCameraProperties.SensorResolution.THE_800_P },
'720': {'w': 1280, 'h': 720, 'res': dai.MonoCameraProperties.SensorResolution.THE_720_P },
'400': {'w': 640, 'h': 400, 'res': dai.MonoCameraProperties.SensorResolution.THE_400_P }
}
if args.resolution not in RES_MAP:
exit("Unsupported resolution!")
resolution = RES_MAP[args.resolution]
def getMesh(calibData):
M1 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_B, resolution['w'], resolution['h']))
d1 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_B))
R1 = np.array(calibData.getStereoLeftRectificationRotation())
M2 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, resolution['w'], resolution['h']))
d2 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_C))
R2 = np.array(calibData.getStereoRightRectificationRotation())
mapXL, mapYL = cv2.initUndistortRectifyMap(M1, d1, R1, M2, (resolution['w'], resolution['h']), cv2.CV_32FC1)
mapXR, mapYR = cv2.initUndistortRectifyMap(M2, d2, R2, M2, (resolution['w'], resolution['h']), cv2.CV_32FC1)
meshCellSize = 16
meshLeft = []
meshRight = []
for y in range(mapXL.shape[0] + 1):
if y % meshCellSize == 0:
rowLeft = []
rowRight = []
for x in range(mapXL.shape[1] + 1):
if x % meshCellSize == 0:
if y == mapXL.shape[0] and x == mapXL.shape[1]:
rowLeft.append(mapYL[y - 1, x - 1])
rowLeft.append(mapXL[y - 1, x - 1])
rowRight.append(mapYR[y - 1, x - 1])
rowRight.append(mapXR[y - 1, x - 1])
elif y == mapXL.shape[0]:
rowLeft.append(mapYL[y - 1, x])
rowLeft.append(mapXL[y - 1, x])
rowRight.append(mapYR[y - 1, x])
rowRight.append(mapXR[y - 1, x])
elif x == mapXL.shape[1]:
rowLeft.append(mapYL[y, x - 1])
rowLeft.append(mapXL[y, x - 1])
rowRight.append(mapYR[y, x - 1])
rowRight.append(mapXR[y, x - 1])
else:
rowLeft.append(mapYL[y, x])
rowLeft.append(mapXL[y, x])
rowRight.append(mapYR[y, x])
rowRight.append(mapXR[y, x])
if (mapXL.shape[1] % meshCellSize) % 2 != 0:
rowLeft.append(0)
rowLeft.append(0)
rowRight.append(0)
rowRight.append(0)
meshLeft.append(rowLeft)
meshRight.append(rowRight)
meshLeft = np.array(meshLeft)
meshRight = np.array(meshRight)
return meshLeft, meshRight
def saveMeshFiles(meshLeft, meshRight, outputPath):
print("Saving mesh to:", outputPath)
meshLeft.tofile(outputPath + "/left_mesh.calib")
meshRight.tofile(outputPath + "/right_mesh.calib")
def create_pipeline(device: dai.Device) -> dai.Pipeline:
calibData = device.readCalibration()
print("Creating Stereo Depth pipeline")
pipeline = dai.Pipeline()
camLeft = pipeline.create(dai.node.MonoCamera)
camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
camRight = pipeline.create(dai.node.MonoCamera)
camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
xoutRight = pipeline.create(dai.node.XLinkOut)
xoutRight.setStreamName("right")
camRight.out.link(xoutRight.input)
for monoCam in (camLeft, camRight): # Common config
monoCam.setResolution(resolution['res'])
# monoCam.setFps(20.0)
stereo = pipeline.create(dai.node.StereoDepth)
camLeft.out.link(stereo.left)
camRight.out.link(stereo.right)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
stereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout
stereo.setLeftRightCheck(True)
stereo.setExtendedDisparity(True)
xoutDisparity = pipeline.create(dai.node.XLinkOut)
xoutDisparity.setStreamName("disparity")
stereo.disparity.link(xoutDisparity.input)
xoutRectifRight = pipeline.create(dai.node.XLinkOut)
xoutRectifRight.setStreamName("rectifiedRight")
stereo.rectifiedRight.link(xoutRectifRight.input)
# Create custom meshes from calibration data. Here you could also
# load your own mesh files, or generate them in any other way.
leftMesh, rightMesh = getMesh(calibData)
if generateMesh:
meshLeft = list(leftMesh.tobytes())
meshRight = list(rightMesh.tobytes())
# Load mesh data to the StereoDepth node
stereo.loadMeshData(meshLeft, meshRight)
if meshDirectory is not None:
saveMeshFiles(leftMesh, rightMesh, meshDirectory)
return pipeline
with dai.Device() as device:
device.startPipeline(create_pipeline(device))
# Create a receive queue for each stream
qList = [device.getOutputQueue(stream, 8, blocking=False) for stream in ['right', 'rectifiedRight', 'disparity']]
while True:
for q in qList:
name = q.getName()
frame = q.get().getCvFrame()
cv2.imshow(name, frame)
if cv2.waitKey(1) == ord("q"):
break
|