RGB & MobilenetSSD¶
This example shows how to run MobileNetv2SSD on the RGB input frame, and how to display both the RGB preview and the metadata results from the MobileNetv2SSD on the preview.
Similar samples:
Demo¶
Setup¶
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py
For additional information, please follow installation guide
This example script requires external file(s) to run. If you are using:
depthai-python, run
python3 examples/install_requirements.py
to download required file(s)dephtai-core, required file(s) will get downloaded automatically when building the example
Source code¶
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 | #!/usr/bin/env python3
from pathlib import Path
import cv2
import depthai as dai
import numpy as np
import time
import argparse
nnPathDefault = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_6shave.blob')).resolve().absolute())
parser = argparse.ArgumentParser()
parser.add_argument('nnPath', nargs='?', help="Path to mobilenet detection network blob", default=nnPathDefault)
parser.add_argument('-s', '--sync', action="store_true", help="Sync RGB output with NN output", default=False)
args = parser.parse_args()
if not Path(nnPathDefault).exists():
import sys
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
# MobilenetSSD label texts
labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
xoutRgb = pipeline.create(dai.node.XLinkOut)
nnOut = pipeline.create(dai.node.XLinkOut)
nnNetworkOut = pipeline.create(dai.node.XLinkOut)
xoutRgb.setStreamName("rgb")
nnOut.setStreamName("nn")
nnNetworkOut.setStreamName("nnNetwork");
# Properties
camRgb.setPreviewSize(300, 300)
camRgb.setInterleaved(False)
camRgb.setFps(40)
# Define a neural network that will make predictions based on the source frames
nn.setConfidenceThreshold(0.5)
nn.setBlobPath(args.nnPath)
nn.setNumInferenceThreads(2)
nn.input.setBlocking(False)
# Linking
if args.sync:
nn.passthrough.link(xoutRgb.input)
else:
camRgb.preview.link(xoutRgb.input)
camRgb.preview.link(nn.input)
nn.out.link(nnOut.input)
nn.outNetwork.link(nnNetworkOut.input);
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
qNN = device.getOutputQueue(name="nnNetwork", maxSize=4, blocking=False);
frame = None
detections = []
startTime = time.monotonic()
counter = 0
color2 = (255, 255, 255)
# nn data (bounding box locations) are in <0..1> range - they need to be normalized with frame width/height
def frameNorm(frame, bbox):
normVals = np.full(len(bbox), frame.shape[0])
normVals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
def displayFrame(name, frame):
color = (255, 0, 0)
for detection in detections:
bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
# Show the frame
cv2.imshow(name, frame)
printOutputLayersOnce = True
while True:
if args.sync:
# Use blocking get() call to catch frame and inference result synced
inRgb = qRgb.get()
inDet = qDet.get()
inNN = qNN.get()
else:
# Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise
inRgb = qRgb.tryGet()
inDet = qDet.tryGet()
inNN = qNN.tryGet()
if inRgb is not None:
frame = inRgb.getCvFrame()
cv2.putText(frame, "NN fps: {:.2f}".format(counter / (time.monotonic() - startTime)),
(2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color2)
if inDet is not None:
detections = inDet.detections
counter += 1
if printOutputLayersOnce and inNN is not None:
toPrint = 'Output layer names:'
for ten in inNN.getAllLayerNames():
toPrint = f'{toPrint} {ten},'
print(toPrint)
printOutputLayersOnce = False;
# If the frame is available, draw bounding boxes on it and show the frame
if frame is not None:
displayFrame("rgb", frame)
if cv2.waitKey(1) == ord('q'):
break
|
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 | #include <chrono>
#include <cstdio>
#include <iostream>
#include "utility.hpp"
// Includes common necessary includes for development using depthai library
#include "depthai/depthai.hpp"
// MobilenetSSD label texts
static const std::vector<std::string> labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
"car", "cat", "chair", "cow", "diningtable", "dog", "horse",
"motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"};
static std::atomic<bool> syncNN{true};
int main(int argc, char** argv) {
using namespace std;
using namespace std::chrono;
// Default blob path provided by Hunter private data download
// Applicable for easier example usage only
std::string nnPath(BLOB_PATH);
// If path to blob specified, use that
if(argc > 1) {
nnPath = std::string(argv[1]);
}
// Print which blob we are using
printf("Using blob at path: %s\n", nnPath.c_str());
// Create pipeline
dai::Pipeline pipeline;
// Define sources and outputs
auto camRgb = pipeline.create<dai::node::ColorCamera>();
auto nn = pipeline.create<dai::node::MobileNetDetectionNetwork>();
auto xoutRgb = pipeline.create<dai::node::XLinkOut>();
auto nnOut = pipeline.create<dai::node::XLinkOut>();
auto nnNetworkOut = pipeline.create<dai::node::XLinkOut>();
xoutRgb->setStreamName("rgb");
nnOut->setStreamName("nn");
nnNetworkOut->setStreamName("nnNetwork");
// Properties
camRgb->setPreviewSize(300, 300); // NN input
camRgb->setInterleaved(false);
camRgb->setFps(40);
// Define a neural network that will make predictions based on the source frames
nn->setConfidenceThreshold(0.5);
nn->setBlobPath(nnPath);
nn->setNumInferenceThreads(2);
nn->input.setBlocking(false);
// Linking
if(syncNN) {
nn->passthrough.link(xoutRgb->input);
} else {
camRgb->preview.link(xoutRgb->input);
}
camRgb->preview.link(nn->input);
nn->out.link(nnOut->input);
nn->outNetwork.link(nnNetworkOut->input);
// Connect to device and start pipeline
dai::Device device(pipeline);
// Output queues will be used to get the rgb frames and nn data from the outputs defined above
auto qRgb = device.getOutputQueue("rgb", 4, false);
auto qDet = device.getOutputQueue("nn", 4, false);
auto qNN = device.getOutputQueue("nnNetwork", 4, false);
cv::Mat frame;
std::vector<dai::ImgDetection> detections;
auto startTime = steady_clock::now();
int counter = 0;
float fps = 0;
auto color2 = cv::Scalar(255, 255, 255);
// Add bounding boxes and text to the frame and show it to the user
auto displayFrame = [](std::string name, cv::Mat frame, std::vector<dai::ImgDetection>& detections) {
auto color = cv::Scalar(255, 0, 0);
// nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height
for(auto& detection : detections) {
int x1 = detection.xmin * frame.cols;
int y1 = detection.ymin * frame.rows;
int x2 = detection.xmax * frame.cols;
int y2 = detection.ymax * frame.rows;
uint32_t labelIndex = detection.label;
std::string labelStr = to_string(labelIndex);
if(labelIndex < labelMap.size()) {
labelStr = labelMap[labelIndex];
}
cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color);
std::stringstream confStr;
confStr << std::fixed << std::setprecision(2) << detection.confidence * 100;
cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color);
cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX);
}
// Show the frame
cv::imshow(name, frame);
};
bool printOutputLayersOnce = true;
while(true) {
std::shared_ptr<dai::ImgFrame> inRgb;
std::shared_ptr<dai::ImgDetections> inDet;
std::shared_ptr<dai::NNData> inNN;
if(syncNN) {
inRgb = qRgb->get<dai::ImgFrame>();
inDet = qDet->get<dai::ImgDetections>();
inNN = qNN->get<dai::NNData>();
} else {
inRgb = qRgb->tryGet<dai::ImgFrame>();
inDet = qDet->tryGet<dai::ImgDetections>();
inNN = qNN->tryGet<dai::NNData>();
}
counter++;
auto currentTime = steady_clock::now();
auto elapsed = duration_cast<duration<float>>(currentTime - startTime);
if(elapsed > seconds(1)) {
fps = counter / elapsed.count();
counter = 0;
startTime = currentTime;
}
if(inRgb) {
frame = inRgb->getCvFrame();
std::stringstream fpsStr;
fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps;
cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color2);
}
if(inDet) {
detections = inDet->detections;
}
if(printOutputLayersOnce && inNN) {
std::cout << "Output layer names: ";
for(const auto& ten : inNN->getAllLayerNames()) {
std::cout << ten << ", ";
}
std::cout << std::endl;
printOutputLayersOnce = false;
}
if(!frame.empty()) {
displayFrame("video", frame, detections);
}
int key = cv::waitKey(1);
if(key == 'q' || key == 'Q') {
return 0;
}
}
return 0;
}
|