RGB Encoding & Mono & MobilenetSSD¶
This example shows how to configure the depthai video encoder in h.265 format to encode the RGB camera input at Full-HD resolution at 30FPS, and transfers the encoded video over XLINK to the host, saving it to disk as a video file. At the same time, a MobileNetv2SSD network is ran on the frames from right grayscale camera
Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed.
Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host.
It’s a combination of RGB Encoding and Mono & MobilenetSSD.
Similar samples:
Demo¶
Setup¶
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py
For additional information, please follow installation guide
This example script requires external file(s) to run. If you are using:
depthai-python, run
python3 examples/install_requirements.py
to download required file(s)dephtai-core, required file(s) will get downloaded automatically when building the example
Source code¶
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | #!/usr/bin/env python3
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
# Get argument first
nnPath = str((Path(__file__).parent / Path('../models/mobilenet-ssd_openvino_2021.4_6shave.blob')).resolve().absolute())
if len(sys.argv) > 1:
nnPath = sys.argv[1]
if not Path(nnPath).exists():
import sys
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
# MobilenetSSD label texts
labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
videoEncoder = pipeline.create(dai.node.VideoEncoder)
nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
manip = pipeline.create(dai.node.ImageManip)
videoOut = pipeline.create(dai.node.XLinkOut)
xoutRight = pipeline.create(dai.node.XLinkOut)
manipOut = pipeline.create(dai.node.XLinkOut)
nnOut = pipeline.create(dai.node.XLinkOut)
videoOut.setStreamName('h265')
xoutRight.setStreamName("right")
manipOut.setStreamName("manip")
nnOut.setStreamName("nn")
# Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
monoRight.setCamera("right")
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
videoEncoder.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN)
nn.setConfidenceThreshold(0.5)
nn.setBlobPath(nnPath)
nn.setNumInferenceThreads(2)
nn.input.setBlocking(False)
# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
manip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p)
manip.initialConfig.setResize(300, 300)
# Linking
camRgb.video.link(videoEncoder.input)
videoEncoder.bitstream.link(videoOut.input)
monoRight.out.link(manip.inputImage)
manip.out.link(nn.input)
monoRight.out.link(xoutRight.input)
manip.out.link(manipOut.input)
nn.out.link(nnOut.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Queues
queue_size = 8
qRight = device.getOutputQueue("right", queue_size)
qManip = device.getOutputQueue("manip", queue_size)
qDet = device.getOutputQueue("nn", queue_size)
qRgbEnc = device.getOutputQueue('h265', maxSize=30, blocking=True)
frame = None
frameManip = None
detections = []
offsetX = (monoRight.getResolutionWidth() - monoRight.getResolutionHeight()) // 2
color = (255, 0, 0)
croppedFrame = np.zeros((monoRight.getResolutionHeight(), monoRight.getResolutionHeight()))
def frameNorm(frame, bbox):
normVals = np.full(len(bbox), frame.shape[0])
normVals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)
videoFile = open('video.h265', 'wb')
cv2.namedWindow("right", cv2.WINDOW_NORMAL)
cv2.namedWindow("manip", cv2.WINDOW_NORMAL)
while True:
inRight = qRight.tryGet()
inManip = qManip.tryGet()
inDet = qDet.tryGet()
while qRgbEnc.has():
qRgbEnc.get().getData().tofile(videoFile)
if inRight is not None:
frame = inRight.getCvFrame()
if inManip is not None:
frameManip = inManip.getCvFrame()
if inDet is not None:
detections = inDet.detections
if frame is not None:
for detection in detections:
bbox = frameNorm(croppedFrame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
bbox[::2] += offsetX
cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
# Show the frame
cv2.imshow("right", frame)
if frameManip is not None:
for detection in detections:
bbox = frameNorm(frameManip, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.putText(frameManip, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frameManip, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frameManip, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
# Show the frame
cv2.imshow("manip", frameManip)
if cv2.waitKey(1) == ord('q'):
break
print("To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:")
print("ffmpeg -framerate 30 -i video.h265 -c copy video.mp4")
|
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | #include <iostream>
// Includes common necessary includes for development using depthai library
#include "depthai/depthai.hpp"
// MobilenetSSD label texts
static const std::vector<std::string> labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
"car", "cat", "chair", "cow", "diningtable", "dog", "horse",
"motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"};
int main(int argc, char** argv) {
using namespace std;
// Default blob path provided by Hunter private data download
// Applicable for easier example usage only
std::string nnPath(BLOB_PATH);
// If path to blob specified, use that
if(argc > 1) {
nnPath = std::string(argv[1]);
}
// Print which blob we are using
printf("Using blob at path: %s\n", nnPath.c_str());
// Create pipeline
dai::Pipeline pipeline;
// Define sources and outputs
auto camRgb = pipeline.create<dai::node::ColorCamera>();
auto monoRight = pipeline.create<dai::node::MonoCamera>();
auto videoEncoder = pipeline.create<dai::node::VideoEncoder>();
auto nn = pipeline.create<dai::node::MobileNetDetectionNetwork>();
auto manip = pipeline.create<dai::node::ImageManip>();
auto videoOut = pipeline.create<dai::node::XLinkOut>();
auto xoutRight = pipeline.create<dai::node::XLinkOut>();
auto manipOut = pipeline.create<dai::node::XLinkOut>();
auto nnOut = pipeline.create<dai::node::XLinkOut>();
videoOut->setStreamName("h265");
xoutRight->setStreamName("right");
manipOut->setStreamName("manip");
nnOut->setStreamName("nn");
// Properties
camRgb->setBoardSocket(dai::CameraBoardSocket::CAM_A);
camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P);
monoRight->setCamera("right");
monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P);
videoEncoder->setDefaultProfilePreset(30, dai::VideoEncoderProperties::Profile::H265_MAIN);
nn->setConfidenceThreshold(0.5);
nn->setBlobPath(nnPath);
nn->setNumInferenceThreads(2);
nn->input.setBlocking(false);
// The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)
manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p);
manip->initialConfig.setResize(300, 300);
// Linking
camRgb->video.link(videoEncoder->input);
videoEncoder->bitstream.link(videoOut->input);
monoRight->out.link(manip->inputImage);
manip->out.link(nn->input);
monoRight->out.link(xoutRight->input);
manip->out.link(manipOut->input);
nn->out.link(nnOut->input);
// Connect to device and start pipeline
dai::Device device(pipeline);
// Queues
int queueSize = 8;
auto qRight = device.getOutputQueue("right", queueSize);
auto qManip = device.getOutputQueue("manip", queueSize);
auto qDet = device.getOutputQueue("nn", queueSize);
auto qRgbEnc = device.getOutputQueue("h265", 30, true);
cv::Mat frame;
cv::Mat frameManip;
std::vector<dai::ImgDetection> detections;
int offsetX = (monoRight->getResolutionWidth() - monoRight->getResolutionHeight()) / 2;
auto color = cv::Scalar(255, 0, 0);
auto videoFile = std::ofstream("video.h265", std::ios::binary);
cv::namedWindow("right", cv::WINDOW_NORMAL);
cv::namedWindow("manip", cv::WINDOW_NORMAL);
while(true) {
auto inRight = qRight->tryGet<dai::ImgFrame>();
auto inManip = qManip->tryGet<dai::ImgFrame>();
auto inDet = qDet->tryGet<dai::ImgDetections>();
auto out1 = qRgbEnc->get<dai::ImgFrame>();
videoFile.write((char*)out1->getData().data(), out1->getData().size());
if(inRight) {
frame = inRight->getCvFrame();
}
if(inManip) {
frameManip = inManip->getCvFrame();
}
if(inDet) {
detections = inDet->detections;
}
if(!frame.empty()) {
for(auto& detection : detections) {
int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX;
int y1 = detection.ymin * monoRight->getResolutionHeight();
int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX;
int y2 = detection.ymax * monoRight->getResolutionHeight();
uint32_t labelIndex = detection.label;
std::string labelStr = to_string(labelIndex);
if(labelIndex < labelMap.size()) {
labelStr = labelMap[labelIndex];
}
cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color);
std::stringstream confStr;
confStr << std::fixed << std::setprecision(2) << detection.confidence * 100;
cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color);
cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX);
}
// Show the frame
cv::imshow("right", frame);
}
if(!frameManip.empty()) {
for(auto& detection : detections) {
int x1 = detection.xmin * frameManip.cols;
int y1 = detection.ymin * frameManip.rows;
int x2 = detection.xmax * frameManip.cols;
int y2 = detection.ymax * frameManip.rows;
uint32_t labelIndex = detection.label;
std::string labelStr = to_string(labelIndex);
if(labelIndex < labelMap.size()) {
labelStr = labelMap[labelIndex];
}
cv::putText(frameManip, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color);
std::stringstream confStr;
confStr << std::fixed << std::setprecision(2) << detection.confidence * 100;
cv::putText(frameManip, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color);
cv::rectangle(frameManip, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX);
}
// Show the frame
cv::imshow("manip", frameManip);
}
int key = cv::waitKey(1);
if(key == 'q' || key == 'Q') {
break;
}
}
cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl;
cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl;
return 0;
}
|