ToF depth¶
This is a sample code that showcases how to use the ToF sensor. The ToF node converts raw data from the ToF sensor into a depth map.
Demo¶
This demo was recorded using the OAK-D SR PoE, that’s why we selected CAM_A port on the ToF sensor.
With keyboard you can configure ToF settings:
FPPN Correction; Turn on/off with f. It’s a process that corrects the fixed pattern noise (FPN) of the ToF sensor. Should be enabled.
Wiggle Correction: Turn on/off with w. It’s a process that corrects the wiggle effect of the ToF sensor. Should be enabled.
Temperature Correction: Turn on/off with t. It’s a process that corrects the temperature effect of the ToF sensor. Should be enabled.
Optical Correction: Turn on/off with o. It’s a process that corrects the optical effect (On -> ToF returns distance represented by Green Line), so it matches stereo depth reporting.
Phase Unwrapping - Process that corrects the phase wrapping effect of the ToF sensor. The higher the number, the longer the ToF range, but it also increases the noise.
0 - Disabled, up to ~1.87 meters
1 - Up to ~3 meters
2 - Up to ~4.5 meters
3 - Up to ~6 meters
4 - Up to ~7.5 meters
Setup¶
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py
For additional information, please follow installation guide
Source code¶
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | #!/usr/bin/env python3
import time
import cv2
import depthai as dai
import numpy as np
print(dai.__version__)
cvColorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET)
cvColorMap[0] = [0, 0, 0]
def create_pipeline():
pipeline = dai.Pipeline()
tof = pipeline.create(dai.node.ToF)
# Configure the ToF node
tofConfig = tof.initialConfig.get()
# Optional. Best accuracy, but adds motion blur.
# see ToF node docs on how to reduce/eliminate motion blur.
tofConfig.enableOpticalCorrection = True
tofConfig.enablePhaseShuffleTemporalFilter = True
tofConfig.phaseUnwrappingLevel = 4
tofConfig.phaseUnwrapErrorThreshold = 300
tofConfig.enableTemperatureCorrection = False # Not yet supported
xinTofConfig = pipeline.create(dai.node.XLinkIn)
xinTofConfig.setStreamName("tofConfig")
xinTofConfig.out.link(tof.inputConfig)
tof.initialConfig.set(tofConfig)
cam_tof = pipeline.create(dai.node.Camera)
cam_tof.setFps(60) # ToF node will produce depth frames at /2 of this rate
cam_tof.setBoardSocket(dai.CameraBoardSocket.CAM_A)
cam_tof.raw.link(tof.input)
xout = pipeline.create(dai.node.XLinkOut)
xout.setStreamName("depth")
tof.depth.link(xout.input)
tofConfig = tof.initialConfig.get()
return pipeline, tofConfig
if __name__ == '__main__':
pipeline, tofConfig = create_pipeline()
with dai.Device(pipeline) as device:
print('Connected cameras:', device.getConnectedCameraFeatures())
qDepth = device.getOutputQueue(name="depth")
tofConfigInQueue = device.getInputQueue("tofConfig")
counter = 0
while True:
start = time.time()
key = cv2.waitKey(1)
if key == ord('f'):
tofConfig.enableFPPNCorrection = not tofConfig.enableFPPNCorrection
tofConfigInQueue.send(tofConfig)
elif key == ord('o'):
tofConfig.enableOpticalCorrection = not tofConfig.enableOpticalCorrection
tofConfigInQueue.send(tofConfig)
elif key == ord('w'):
tofConfig.enableWiggleCorrection = not tofConfig.enableWiggleCorrection
tofConfigInQueue.send(tofConfig)
elif key == ord('t'):
tofConfig.enableTemperatureCorrection = not tofConfig.enableTemperatureCorrection
tofConfigInQueue.send(tofConfig)
elif key == ord('q'):
break
elif key == ord('0'):
tofConfig.enablePhaseUnwrapping = False
tofConfig.phaseUnwrappingLevel = 0
tofConfigInQueue.send(tofConfig)
elif key == ord('1'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 1
tofConfigInQueue.send(tofConfig)
elif key == ord('2'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 2
tofConfigInQueue.send(tofConfig)
elif key == ord('3'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 3
tofConfigInQueue.send(tofConfig)
elif key == ord('4'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 4
tofConfigInQueue.send(tofConfig)
elif key == ord('5'):
tofConfig.enablePhaseUnwrapping = True
tofConfig.phaseUnwrappingLevel = 5
tofConfigInQueue.send(tofConfig)
elif key == ord('m'):
medianSettings = [dai.MedianFilter.MEDIAN_OFF, dai.MedianFilter.KERNEL_3x3, dai.MedianFilter.KERNEL_5x5,
dai.MedianFilter.KERNEL_7x7]
currentMedian = tofConfig.median
nextMedian = medianSettings[(medianSettings.index(currentMedian) + 1) % len(medianSettings)]
print(f"Changing median to {nextMedian.name} from {currentMedian.name}")
tofConfig.median = nextMedian
tofConfigInQueue.send(tofConfig)
imgFrame = qDepth.get() # blocking call, will wait until a new data has arrived
depth_map = imgFrame.getFrame()
max_depth = (tofConfig.phaseUnwrappingLevel + 1) * 1500 # 100MHz modulation freq.
depth_colorized = np.interp(depth_map, (0, max_depth), (0, 255)).astype(np.uint8)
depth_colorized = cv2.applyColorMap(depth_colorized, cvColorMap)
cv2.imshow("Colorized depth", depth_colorized)
counter += 1
device.close()
|