Queue add callback¶
This example shows how to use queue callbacks. It sends both mono frames and color frames from the device to the
host via one XLinkOut
node. In the callback function newFrame()
we decode from which camera did
the frame come from so we can later show the frame with correct title to the user.
Demo¶
Setup¶
Please run the install script to download all required dependencies. Please note that this script must be ran from git context, so you have to download the depthai-python repository first and then run the script
git clone https://github.com/luxonis/depthai-python.git
cd depthai-python/examples
python3 install_requirements.py
For additional information, please follow installation guide
Source code¶
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | #!/usr/bin/env python3
import cv2
import depthai as dai
import queue
# Create pipeline
pipeline = dai.Pipeline()
# Add all three cameras
camRgb = pipeline.create(dai.node.ColorCamera)
left = pipeline.create(dai.node.MonoCamera)
right = pipeline.create(dai.node.MonoCamera)
# Create XLink output
xout = pipeline.create(dai.node.XLinkOut)
xout.setStreamName("frames")
# Properties
camRgb.setPreviewSize(300, 300)
left.setCamera("left")
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
right.setCamera("right")
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
# Stream all the camera streams through the same XLink node
camRgb.preview.link(xout.input)
left.out.link(xout.input)
right.out.link(xout.input)
q = queue.Queue()
def newFrame(inFrame):
global q
# Get "stream name" from the instance number
num = inFrame.getInstanceNum()
name = "color" if num == 0 else "left" if num == 1 else "right"
frame = inFrame.getCvFrame()
# This is a different thread and you could use it to
# run image processing algorithms here
q.put({"name": name, "frame": frame})
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Add callback to the output queue "frames" for all newly arrived frames (color, left, right)
device.getOutputQueue(name="frames", maxSize=4, blocking=False).addCallback(newFrame)
while True:
# You could also get the data as non-blocking (block=False)
data = q.get(block=True)
cv2.imshow(data["name"], data["frame"])
if cv2.waitKey(1) == ord('q'):
break
|
Also available on GitHub
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 | #include <iostream>
#include <queue>
// Includes common necessary includes for development using depthai library
#include "depthai/depthai.hpp"
struct callbackType {
std::string name;
cv::Mat frame;
};
int main() {
// Create pipeline
dai::Pipeline pipeline;
// Add all three cameras
auto camRgb = pipeline.create<dai::node::ColorCamera>();
auto left = pipeline.create<dai::node::MonoCamera>();
auto right = pipeline.create<dai::node::MonoCamera>();
// Create XLink output
auto xout = pipeline.create<dai::node::XLinkOut>();
xout->setStreamName("frames");
// Properties
camRgb->setPreviewSize(300, 300);
left->setCamera("left");
left->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
right->setCamera("right");
right->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P);
// Stream all the camera streams through the same XLink node
camRgb->preview.link(xout->input);
left->out.link(xout->input);
right->out.link(xout->input);
auto queue = std::queue<callbackType>();
std::mutex queueMtx;
// Connect to device and start pipeline
dai::Device device(pipeline);
auto newFrame = [&queueMtx, &queue](std::shared_ptr<dai::ADatatype> callback) {
if(dynamic_cast<dai::ImgFrame*>(callback.get()) != nullptr) {
std::unique_lock<std::mutex> lock(queueMtx);
callbackType cb;
dai::ImgFrame* imgFrame = static_cast<dai::ImgFrame*>(callback.get());
auto num = imgFrame->getInstanceNum();
cb.name = num == 0 ? "color" : (num == 1 ? "left" : "right");
cb.frame = imgFrame->getCvFrame();
queue.push(cb);
}
};
// Add callback to the output queue "frames" for all newly arrived frames (color, left, right)
device.getOutputQueue("frames", 4, false)->addCallback(newFrame);
while(true) {
callbackType data;
{
std::unique_lock<std::mutex> lock(queueMtx);
if(!queue.empty()) {
data = queue.front();
queue.pop();
}
}
if(!data.frame.empty()) {
cv::imshow(data.name.c_str(), data.frame);
}
int key = cv::waitKey(1);
if(key == 'q' || key == 'Q') {
return 0;
}
}
return 0;
}
|