I would like to open a video stream by OpenCv and push frame by frame inside a DeepStream pipeline to use tesornRT to make an inference on Yolov3 model, but i do not know how to make it works.
I'm trying to follow the directives that I found here, but still nothing...
This is my code :
#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <opencv2/core/core.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
static GMainLoop *loop;
static void
cb_need_data (GstElement *appsrc,
guint unused_size,
gpointer user_data)
{
static gboolean white = FALSE;
static GstClockTime timestamp = 0;
guint size,depth,height,width,step,channels;
GstFlowReturn ret ;
IplImage* img;
guchar *data1;
GstMapInfo map;
cv::Mat imgMat = imread("cat.jpg",cv::IMREAD_COLOR);
cvtColor(imgMat,imgMat,cv::COLOR_BGR2YUV);
IplImage imgIpl = imgMat;
img = &imgIpl;
height = img->height;
width = img->width;
step = img->widthStep;
channels = img->nChannels;
depth = img->depth;
data1 = (guchar *)img->imageData;
size = height*width*channels;
GstBuffer *buffer = NULL;//gst_buffer_new_allocate (NULL, size, NULL);
g_print("frame_height: %d \n",img->height);
g_print("frame_width: %d \n",img->width);
g_print("frame_channels: %d \n",img->nChannels);
g_print("frame_size: %d \n",height*width*channels);
buffer = gst_buffer_new_allocate (NULL, size, NULL);
gst_buffer_map (buffer, &map, GST_MAP_WRITE);
memcpy( (guchar *)map.data, data1, gst_buffer_get_size( buffer ) );
/* this makes the image black/white */
//gst_buffer_memset (buffer, 0, white ? 0xff : 0x0, size);
white = !white;
GST_BUFFER_PTS (buffer) = timestamp;
GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (1, GST_SECOND, 1);
timestamp += GST_BUFFER_DURATION (buffer);
//gst_app_src_push_buffer ((GstAppSrc *)appsrc, buffer);
g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);
if (ret != GST_FLOW_OK) {
g_print("quit");
/* something wrong, stop pushing */
g_main_loop_quit (loop);
}
//g_print("return");
}
gint
main (gint argc,
gchar *argv[])
{
GstElement *pipeline, *appsrc, *conv, *videosink, *sink,*nvosd,*streammux;
/* init GStreamer */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* setup pipeline */
pipeline = gst_pipeline_new ("pipeline");
appsrc = gst_element_factory_make ("appsrc", "source");
conv = gst_element_factory_make ("videoconvert", "conv");
streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
//videosink = gst_element_factory_make("appsink","app-sink");
/* setup */
g_object_set (G_OBJECT (appsrc), "caps",
gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, "RGB",
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 360,
"framerate", GST_TYPE_FRACTION, 1, 1,
NULL), NULL);
gst_bin_add_many (GST_BIN (pipeline), appsrc, conv,streammux,sink,NULL);
gst_element_link_many (appsrc,conv,streammux,sink ,NULL);
//g_object_set (videosink, "device", "/dev/video0", NULL);
/* setup appsrc */
g_object_set (G_OBJECT (appsrc),
"stream-type", 0,
"format", GST_FORMAT_TIME, NULL);
g_signal_connect (appsrc, "need-data", G_CALLBACK (cb_need_data), NULL);
/* play */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_main_loop_run (loop);
/* clean up */
gst_element_set_state (pipeline, GST_STATE_NULL);
gst_object_unref (GST_OBJECT (pipeline));
g_main_loop_unref (loop);
return 0;
}
I am an absolutely beginner, if someone can show some code is going to be much better.
Thanks.
you need to create a pipeline as follows
"appsrc" takes your frame as input
"nvvideoconvert" does format conversion
"nvstreammux" multiplexes streams in case of multiple sources
"nvinfer" does inferencing on the input stream
"nvvideoconvert" converts frame to RGBA now
"nvdsosd" draws bounding boxes on the frame
"nveglglessink" displays the frame
to run inferencing for your model you need to set path to config-file for your model and set the path of image/video you want to run inferencing on.
to run this on video h264 encoded video, just change
#define RUN_VIDEO 0
to#define RUN_VIDEO 1