Feature Detector¶
Example shows capabilities of FeatureTracker. It only detects features. Feature Tracker example also tracks these features.
Demo¶

Setup¶
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py
For additional information, please follow installation guide
Source code¶
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | #!/usr/bin/env python3
import cv2
import depthai as dai
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)
featureTrackerRight = pipeline.create(dai.node.FeatureTracker)
xoutPassthroughFrameLeft = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesLeft = pipeline.create(dai.node.XLinkOut)
xoutPassthroughFrameRight = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesRight = pipeline.create(dai.node.XLinkOut)
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)
xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")
# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setCamera("left")
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setCamera("right")
# Disable optical flow
featureTrackerLeft.initialConfig.setMotionEstimator(False)
featureTrackerRight.initialConfig.setMotionEstimator(False)
# Linking
monoLeft.out.link(featureTrackerLeft.inputImage)
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)
monoRight.out.link(featureTrackerRight.inputImage)
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)
featureTrackerConfig = featureTrackerRight.initialConfig.get()
print("Press 's' to switch between Harris and Shi-Thomasi corner detector!")
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queues used to receive the results
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)
inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")
leftWindowName = "left"
rightWindowName = "right"
def drawFeatures(frame, features):
pointColor = (0, 0, 255)
circleRadius = 2
for feature in features:
cv2.circle(frame, (int(feature.position.x), int(feature.position.y)), circleRadius, pointColor, -1, cv2.LINE_AA, 0)
while True:
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)
inPassthroughFrameRight = passthroughImageRightQueue.get()
passthroughFrameRight = inPassthroughFrameRight.getFrame()
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)
trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
drawFeatures(leftFrame, trackedFeaturesLeft)
trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
drawFeatures(rightFrame, trackedFeaturesRight)
# Show the frame
cv2.imshow(leftWindowName, leftFrame)
cv2.imshow(rightWindowName, rightFrame)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
if featureTrackerConfig.cornerDetector.type == dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS:
featureTrackerConfig.cornerDetector.type = dai.FeatureTrackerConfig.CornerDetector.Type.SHI_THOMASI
print("Switching to Shi-Thomasi")
else:
featureTrackerConfig.cornerDetector.type = dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS
print("Switching to Harris")
cfg = dai.FeatureTrackerConfig()
cfg.set(featureTrackerConfig)
inputFeatureTrackerConfigQueue.send(cfg)
|
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 | #include <iostream>
// Includes common necessary includes for development using depthai library
#include "depthai/depthai.hpp"
#include "deque"
#include "unordered_map"
#include "unordered_set"
static void drawFeatures(cv::Mat& frame, std::vector<dai::TrackedFeature>& features) {
static const auto pointColor = cv::Scalar(0, 0, 255);
static const int circleRadius = 2;
for(auto& feature : features) {
cv::circle(frame, cv::Point(feature.position.x, feature.position.y), circleRadius, pointColor, -1, cv::LINE_AA, 0);
}
}
int main() {
using namespace std;
// Create pipeline
dai::Pipeline pipeline;
// Define sources and outputs
auto monoLeft = pipeline.create<dai::node::MonoCamera>();
auto monoRight = pipeline.create<dai::node::MonoCamera>();
auto featureTrackerLeft = pipeline.create<dai::node::FeatureTracker>();
auto featureTrackerRight = pipeline.create<dai::node::FeatureTracker>();
auto xoutPassthroughFrameLeft = pipeline.create<dai::node::XLinkOut>();
auto xoutTrackedFeaturesLeft = pipeline.create<dai::node::XLinkOut>();
auto xoutPassthroughFrameRight = pipeline.create<dai::node::XLinkOut>();
auto xoutTrackedFeaturesRight = pipeline.create<dai::node::XLinkOut>();
auto xinTrackedFeaturesConfig = pipeline.create<dai::node::XLinkIn>();
xoutPassthroughFrameLeft->setStreamName("passthroughFrameLeft");
xoutTrackedFeaturesLeft->setStreamName("trackedFeaturesLeft");
xoutPassthroughFrameRight->setStreamName("passthroughFrameRight");
xoutTrackedFeaturesRight->setStreamName("trackedFeaturesRight");
xinTrackedFeaturesConfig->setStreamName("trackedFeaturesConfig");
// Properties
monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P);
monoLeft->setCamera("left");
monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P);
monoRight->setCamera("right");
// Disable optical flow
featureTrackerLeft->initialConfig.setMotionEstimator(false);
featureTrackerRight->initialConfig.setMotionEstimator(false);
// Linking
monoLeft->out.link(featureTrackerLeft->inputImage);
featureTrackerLeft->passthroughInputImage.link(xoutPassthroughFrameLeft->input);
featureTrackerLeft->outputFeatures.link(xoutTrackedFeaturesLeft->input);
xinTrackedFeaturesConfig->out.link(featureTrackerLeft->inputConfig);
monoRight->out.link(featureTrackerRight->inputImage);
featureTrackerRight->passthroughInputImage.link(xoutPassthroughFrameRight->input);
featureTrackerRight->outputFeatures.link(xoutTrackedFeaturesRight->input);
xinTrackedFeaturesConfig->out.link(featureTrackerRight->inputConfig);
auto featureTrackerConfig = featureTrackerRight->initialConfig.get();
printf("Press 's' to switch between Harris and Shi-Thomasi corner detector! \n");
// Connect to device and start pipeline
dai::Device device(pipeline);
// Output queues used to receive the results
auto passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, false);
auto outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, false);
auto passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, false);
auto outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, false);
auto inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig");
const auto leftWindowName = "left";
const auto rightWindowName = "right";
while(true) {
auto inPassthroughFrameLeft = passthroughImageLeftQueue->get<dai::ImgFrame>();
cv::Mat passthroughFrameLeft = inPassthroughFrameLeft->getFrame();
cv::Mat leftFrame;
cv::cvtColor(passthroughFrameLeft, leftFrame, cv::COLOR_GRAY2BGR);
auto inPassthroughFrameRight = passthroughImageRightQueue->get<dai::ImgFrame>();
cv::Mat passthroughFrameRight = inPassthroughFrameRight->getFrame();
cv::Mat rightFrame;
cv::cvtColor(passthroughFrameRight, rightFrame, cv::COLOR_GRAY2BGR);
auto trackedFeaturesLeft = outputFeaturesLeftQueue->get<dai::TrackedFeatures>()->trackedFeatures;
drawFeatures(leftFrame, trackedFeaturesLeft);
auto trackedFeaturesRight = outputFeaturesRightQueue->get<dai::TrackedFeatures>()->trackedFeatures;
drawFeatures(rightFrame, trackedFeaturesRight);
// Show the frame
cv::imshow(leftWindowName, leftFrame);
cv::imshow(rightWindowName, rightFrame);
int key = cv::waitKey(1);
if(key == 'q') {
break;
} else if(key == 's') {
if(featureTrackerConfig.cornerDetector.type == dai::FeatureTrackerConfig::CornerDetector::Type::HARRIS) {
featureTrackerConfig.cornerDetector.type = dai::FeatureTrackerConfig::CornerDetector::Type::SHI_THOMASI;
printf("Switching to Shi-Thomasi \n");
} else {
featureTrackerConfig.cornerDetector.type = dai::FeatureTrackerConfig::CornerDetector::Type::HARRIS;
printf("Switching to Harris \n");
}
auto cfg = dai::FeatureTrackerConfig();
cfg.set(featureTrackerConfig);
inputFeatureTrackerConfigQueue->send(cfg);
}
}
return 0;
}
|