Feature Tracker¶
Example shows capabilities of FeatureTracker. It detects features and tracks them between consecutive frames using optical flow by assigning unique ID to matching features. Feature Detector example only detects features.
Demo¶
Setup¶
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py
For additional information, please follow installation guide
Source code¶
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | #!/usr/bin/env python3
import cv2
import depthai as dai
from collections import deque
class FeatureTrackerDrawer:
lineColor = (200, 0, 200)
pointColor = (0, 0, 255)
circleRadius = 2
maxTrackedFeaturesPathLength = 30
# for how many frames the feature is tracked
trackedFeaturesPathLength = 10
trackedIDs = None
trackedFeaturesPath = None
def onTrackBar(self, val):
FeatureTrackerDrawer.trackedFeaturesPathLength = val
pass
def trackFeaturePath(self, features):
newTrackedIDs = set()
for currentFeature in features:
currentID = currentFeature.id
newTrackedIDs.add(currentID)
if currentID not in self.trackedFeaturesPath:
self.trackedFeaturesPath[currentID] = deque()
path = self.trackedFeaturesPath[currentID]
path.append(currentFeature.position)
while(len(path) > max(1, FeatureTrackerDrawer.trackedFeaturesPathLength)):
path.popleft()
self.trackedFeaturesPath[currentID] = path
featuresToRemove = set()
for oldId in self.trackedIDs:
if oldId not in newTrackedIDs:
featuresToRemove.add(oldId)
for id in featuresToRemove:
self.trackedFeaturesPath.pop(id)
self.trackedIDs = newTrackedIDs
def drawFeatures(self, img):
cv2.setTrackbarPos(self.trackbarName, self.windowName, FeatureTrackerDrawer.trackedFeaturesPathLength)
for featurePath in self.trackedFeaturesPath.values():
path = featurePath
for j in range(len(path) - 1):
src = (int(path[j].x), int(path[j].y))
dst = (int(path[j + 1].x), int(path[j + 1].y))
cv2.line(img, src, dst, self.lineColor, 1, cv2.LINE_AA, 0)
j = len(path) - 1
cv2.circle(img, (int(path[j].x), int(path[j].y)), self.circleRadius, self.pointColor, -1, cv2.LINE_AA, 0)
def __init__(self, trackbarName, windowName):
self.trackbarName = trackbarName
self.windowName = windowName
cv2.namedWindow(windowName)
cv2.createTrackbar(trackbarName, windowName, FeatureTrackerDrawer.trackedFeaturesPathLength, FeatureTrackerDrawer.maxTrackedFeaturesPathLength, self.onTrackBar)
self.trackedIDs = set()
self.trackedFeaturesPath = dict()
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)
featureTrackerRight = pipeline.create(dai.node.FeatureTracker)
xoutPassthroughFrameLeft = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesLeft = pipeline.create(dai.node.XLinkOut)
xoutPassthroughFrameRight = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesRight = pipeline.create(dai.node.XLinkOut)
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)
xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")
# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setCamera("left")
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setCamera("right")
# Linking
monoLeft.out.link(featureTrackerLeft.inputImage)
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)
monoRight.out.link(featureTrackerRight.inputImage)
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)
# By default the least mount of resources are allocated
# increasing it improves performance
numShaves = 2
numMemorySlices = 2
featureTrackerLeft.setHardwareResources(numShaves, numMemorySlices)
featureTrackerRight.setHardwareResources(numShaves, numMemorySlices)
featureTrackerConfig = featureTrackerRight.initialConfig.get()
print("Press 's' to switch between Lucas-Kanade optical flow and hardware accelerated motion estimation!")
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queues used to receive the results
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)
inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")
leftWindowName = "left"
leftFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", leftWindowName)
rightWindowName = "right"
rightFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", rightWindowName)
while True:
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)
inPassthroughFrameRight = passthroughImageRightQueue.get()
passthroughFrameRight = inPassthroughFrameRight.getFrame()
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)
trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
leftFeatureDrawer.trackFeaturePath(trackedFeaturesLeft)
leftFeatureDrawer.drawFeatures(leftFrame)
trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
rightFeatureDrawer.trackFeaturePath(trackedFeaturesRight)
rightFeatureDrawer.drawFeatures(rightFrame)
# Show the frame
cv2.imshow(leftWindowName, leftFrame)
cv2.imshow(rightWindowName, rightFrame)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
if featureTrackerConfig.motionEstimator.type == dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.HW_MOTION_ESTIMATION
print("Switching to hardware accelerated motion estimation")
else:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW
print("Switching to Lucas-Kanade optical flow")
cfg = dai.FeatureTrackerConfig()
cfg.set(featureTrackerConfig)
inputFeatureTrackerConfigQueue.send(cfg)
|
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 | #include <iostream>
// Includes common necessary includes for development using depthai library
#include "depthai/depthai.hpp"
#include "deque"
#include "unordered_map"
#include "unordered_set"
static const auto lineColor = cv::Scalar(200, 0, 200);
static const auto pointColor = cv::Scalar(0, 0, 255);
class FeatureTrackerDrawer {
private:
static const int circleRadius = 2;
static const int maxTrackedFeaturesPathLength = 30;
// for how many frames the feature is tracked
static int trackedFeaturesPathLength;
using featureIdType = decltype(dai::TrackedFeature::id);
std::unordered_set<featureIdType> trackedIDs;
std::unordered_map<featureIdType, std::deque<dai::Point2f>> trackedFeaturesPath;
std::string trackbarName;
std::string windowName;
public:
void trackFeaturePath(std::vector<dai::TrackedFeature>& features) {
std::unordered_set<featureIdType> newTrackedIDs;
for(auto& currentFeature : features) {
auto currentID = currentFeature.id;
newTrackedIDs.insert(currentID);
if(!trackedFeaturesPath.count(currentID)) {
trackedFeaturesPath.insert({currentID, std::deque<dai::Point2f>()});
}
std::deque<dai::Point2f>& path = trackedFeaturesPath.at(currentID);
path.push_back(currentFeature.position);
while(path.size() > std::max<unsigned int>(1, trackedFeaturesPathLength)) {
path.pop_front();
}
}
std::unordered_set<featureIdType> featuresToRemove;
for(auto& oldId : trackedIDs) {
if(!newTrackedIDs.count(oldId)) {
featuresToRemove.insert(oldId);
}
}
for(auto& id : featuresToRemove) {
trackedFeaturesPath.erase(id);
}
trackedIDs = newTrackedIDs;
}
void drawFeatures(cv::Mat& img) {
cv::setTrackbarPos(trackbarName.c_str(), windowName.c_str(), trackedFeaturesPathLength);
for(auto& featurePath : trackedFeaturesPath) {
std::deque<dai::Point2f>& path = featurePath.second;
unsigned int j = 0;
for(j = 0; j < path.size() - 1; j++) {
auto src = cv::Point(path[j].x, path[j].y);
auto dst = cv::Point(path[j + 1].x, path[j + 1].y);
cv::line(img, src, dst, lineColor, 1, cv::LINE_AA, 0);
}
cv::circle(img, cv::Point(path[j].x, path[j].y), circleRadius, pointColor, -1, cv::LINE_AA, 0);
}
}
FeatureTrackerDrawer(std::string trackbarName, std::string windowName) : trackbarName(trackbarName), windowName(windowName) {
cv::namedWindow(windowName.c_str());
cv::createTrackbar(trackbarName.c_str(), windowName.c_str(), &trackedFeaturesPathLength, maxTrackedFeaturesPathLength, nullptr);
}
};
int FeatureTrackerDrawer::trackedFeaturesPathLength = 10;
int main() {
using namespace std;
// Create pipeline
dai::Pipeline pipeline;
// Define sources and outputs
auto monoLeft = pipeline.create<dai::node::MonoCamera>();
auto monoRight = pipeline.create<dai::node::MonoCamera>();
auto featureTrackerLeft = pipeline.create<dai::node::FeatureTracker>();
auto featureTrackerRight = pipeline.create<dai::node::FeatureTracker>();
auto xoutPassthroughFrameLeft = pipeline.create<dai::node::XLinkOut>();
auto xoutTrackedFeaturesLeft = pipeline.create<dai::node::XLinkOut>();
auto xoutPassthroughFrameRight = pipeline.create<dai::node::XLinkOut>();
auto xoutTrackedFeaturesRight = pipeline.create<dai::node::XLinkOut>();
auto xinTrackedFeaturesConfig = pipeline.create<dai::node::XLinkIn>();
xoutPassthroughFrameLeft->setStreamName("passthroughFrameLeft");
xoutTrackedFeaturesLeft->setStreamName("trackedFeaturesLeft");
xoutPassthroughFrameRight->setStreamName("passthroughFrameRight");
xoutTrackedFeaturesRight->setStreamName("trackedFeaturesRight");
xinTrackedFeaturesConfig->setStreamName("trackedFeaturesConfig");
// Properties
monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P);
monoLeft->setCamera("left");
monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P);
monoRight->setCamera("right");
// Linking
monoLeft->out.link(featureTrackerLeft->inputImage);
featureTrackerLeft->passthroughInputImage.link(xoutPassthroughFrameLeft->input);
featureTrackerLeft->outputFeatures.link(xoutTrackedFeaturesLeft->input);
xinTrackedFeaturesConfig->out.link(featureTrackerLeft->inputConfig);
monoRight->out.link(featureTrackerRight->inputImage);
featureTrackerRight->passthroughInputImage.link(xoutPassthroughFrameRight->input);
featureTrackerRight->outputFeatures.link(xoutTrackedFeaturesRight->input);
xinTrackedFeaturesConfig->out.link(featureTrackerRight->inputConfig);
// By default the least mount of resources are allocated
// increasing it improves performance when optical flow is enabled
auto numShaves = 2;
auto numMemorySlices = 2;
featureTrackerLeft->setHardwareResources(numShaves, numMemorySlices);
featureTrackerRight->setHardwareResources(numShaves, numMemorySlices);
auto featureTrackerConfig = featureTrackerRight->initialConfig.get();
printf("Press 's' to switch between Lucas-Kanade optical flow and hardware accelerated motion estimation! \n");
// Connect to device and start pipeline
dai::Device device(pipeline);
// Output queues used to receive the results
auto passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, false);
auto outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, false);
auto passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, false);
auto outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, false);
auto inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig");
const auto leftWindowName = "left";
auto leftFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", leftWindowName);
const auto rightWindowName = "right";
auto rightFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", rightWindowName);
while(true) {
auto inPassthroughFrameLeft = passthroughImageLeftQueue->get<dai::ImgFrame>();
cv::Mat passthroughFrameLeft = inPassthroughFrameLeft->getFrame();
cv::Mat leftFrame;
cv::cvtColor(passthroughFrameLeft, leftFrame, cv::COLOR_GRAY2BGR);
auto inPassthroughFrameRight = passthroughImageRightQueue->get<dai::ImgFrame>();
cv::Mat passthroughFrameRight = inPassthroughFrameRight->getFrame();
cv::Mat rightFrame;
cv::cvtColor(passthroughFrameRight, rightFrame, cv::COLOR_GRAY2BGR);
auto trackedFeaturesLeft = outputFeaturesLeftQueue->get<dai::TrackedFeatures>()->trackedFeatures;
leftFeatureDrawer.trackFeaturePath(trackedFeaturesLeft);
leftFeatureDrawer.drawFeatures(leftFrame);
auto trackedFeaturesRight = outputFeaturesRightQueue->get<dai::TrackedFeatures>()->trackedFeatures;
rightFeatureDrawer.trackFeaturePath(trackedFeaturesRight);
rightFeatureDrawer.drawFeatures(rightFrame);
// Show the frame
cv::imshow(leftWindowName, leftFrame);
cv::imshow(rightWindowName, rightFrame);
int key = cv::waitKey(1);
if(key == 'q') {
break;
} else if(key == 's') {
if(featureTrackerConfig.motionEstimator.type == dai::FeatureTrackerConfig::MotionEstimator::Type::LUCAS_KANADE_OPTICAL_FLOW) {
featureTrackerConfig.motionEstimator.type = dai::FeatureTrackerConfig::MotionEstimator::Type::HW_MOTION_ESTIMATION;
printf("Switching to hardware accelerated motion estimation \n");
} else {
featureTrackerConfig.motionEstimator.type = dai::FeatureTrackerConfig::MotionEstimator::Type::LUCAS_KANADE_OPTICAL_FLOW;
printf("Switching to Lucas-Kanade optical flow \n");
}
auto cfg = dai::FeatureTrackerConfig();
cfg.set(featureTrackerConfig);
inputFeatureTrackerConfigQueue->send(cfg);
}
}
return 0;
}
|