I want to detect an object using the OpenCV DNN module by YOLOv4 on the ROS platform but It does not work. However, I runned opencv dnn module by yolov4 without the ROS platform. Same snippet of code does not work on the ROS platform.
CMakeList.txt: (I linked OpenCV)
cmake_minimum_required(VERSION 3.0.2)
project(test_opencv)
set(CMAKE_CXX_STANDARD_REQUIRED 17)
add_compile_options(--std=c++17 -g3)
set(OpenCV_DIR "/home/usernamepc/opencv_build/opencv/build")
find_package(OpenCV 4.4.0 REQUIRED)
My code is here. This code works without ROS but doesn't work with ROS.
int main(int argc, char **argv){
ros::init(argc, argv, "test_opencv");
int inpWidth = 608;
int inpHeight = 608;
int count = 0;
std::cout << "OpenCV version : " << CV_VERSION << std::endl;
std::cout << "Major version : " << CV_MAJOR_VERSION << std::endl;
std::cout << "Minor version : " << CV_MINOR_VERSION << std::endl;
std::cout << "Subminor version : " << CV_SUBMINOR_VERSION << std::endl;
vector<string> class_names;
{
ifstream class_file("/home/USERNAMEPC/people_detection_ws/src/test_opencv/input/data/obj_license.names");
if (!class_file)
{
cerr << "failed to open classes.txt\n";
return 0;
}
string line;
while (getline(class_file, line))
{
class_names.push_back(line);
cout << "Data: " << line << endl;
}
}
string video = "/home/USERNAMEPC/people_detection_ws/src/test_opencv/input/plate/1.mp4";
VideoCapture cap(video);
auto net = readNetFromDarknet("/home/USERNAMEPC/people_detection_ws/src/test_opencv/input/cfg/yolov4_obj_license.cfg",
"/home/USERNAMEPC/people_detection_ws/src/test_opencv/input/custom_license.weights");
auto output_names = net.getUnconnectedOutLayersNames();
double inference_fps = 0;
double total_fps = 0;
Mat frame, blob;
vector<Mat> detections;
while (waitKey(1) < 1)
{
cap >> frame;
if (frame.empty())
{
waitKey();
break;
}
auto total_start = chrono::steady_clock::now();
imshow("frame", frame);
waitKey(1);
cv::dnn::blobFromImage(frame, blob, 0.00392, Size(inpWidth, inpHeight), Scalar(), true, false, CV_32F);
net.setInput(blob);
if (blob.empty())
{
std::cout << "blob is empty" << std::endl;
return -1;
}
auto dnn_start = chrono::steady_clock::now();
cout << "6 " << endl;
net.forward(detections, output_names);
cout << "7 " << endl;
auto dnn_end = chrono::steady_clock::now();
vector<int> indices[NUM_CLASSES];
vector<Rect> boxes[NUM_CLASSES];
vector<float> scores[NUM_CLASSES];
for (auto &output : detections)
{
const auto num_boxes = output.rows;
for (int i = 0; i < num_boxes; i++)
{
auto x = output.at<float>(i, 0) * frame.cols;
auto y = output.at<float>(i, 1) * frame.rows;
auto width = output.at<float>(i, 2) * frame.cols;
auto height = output.at<float>(i, 3) * frame.rows;
Rect rect(x - width / 2, y - height / 2, width, height);
for (int c = 0; c < NUM_CLASSES; c++)
{
auto confidence = *output.ptr<float>(i, 5 + c);
if (confidence >= CONFIDENCE_THRESHOLD)
{
boxes[c].push_back(rect);
scores[c].push_back(confidence);
detected_control = true;
}
}
}
}
for (int c = 0; c < NUM_CLASSES; c++)
{
NMSBoxes(boxes[c], scores[c], 0.0, NMS_THRESHOLD, indices[c]);
}
for (int c = 0; c < NUM_CLASSES; c++)
{
for (size_t i = 0; i < indices[c].size(); ++i)
{
const auto color = colors[c % NUM_COLORS];
auto idx = indices[c][i];
auto &rect = boxes[c][idx];
rectangle(frame, Point(rect.x, rect.y), Point(rect.x + rect.width, rect.y + rect.height), color, 3);
ostringstream label_ss;
label_ss << class_names[c] << ": " << fixed << setprecision(2) << scores[c][idx];
auto label = label_ss.str();
int baseline;
auto label_bg_sz = getTextSize(label.c_str(), FONT_HERSHEY_COMPLEX_SMALL, 1, 1, &baseline);
rectangle(frame, Point(rect.x, rect.y - label_bg_sz.height - baseline - 10), Point(rect.x + label_bg_sz.width, rect.y), color, FILLED);
putText(frame, label.c_str(), Point(rect.x, rect.y - baseline - 5), FONT_HERSHEY_COMPLEX_SMALL, 1, Scalar(0, 0, 0));
}
}
auto total_end = chrono::steady_clock::now();
inference_fps = 1000.0 / chrono::duration_cast<chrono::milliseconds>(dnn_end - dnn_start).count();
total_fps = 1000.0 / chrono::duration_cast<chrono::milliseconds>(total_end - total_start).count();
ostringstream stats_ss;
stats_ss << fixed << setprecision(2);
stats_ss << "Inference FPS: " << inference_fps << ", Total FPS: " << total_fps;
auto stats = stats_ss.str();
int baseline;
auto stats_bg_sz = getTextSize(stats.c_str(), FONT_HERSHEY_COMPLEX_SMALL, 1, 1, &baseline);
rectangle(frame, Point(0, 0), Point(stats_bg_sz.width, stats_bg_sz.height + 10), Scalar(0, 0, 0), FILLED);
putText(frame, stats.c_str(), Point(0, stats_bg_sz.height + 5), FONT_HERSHEY_COMPLEX_SMALL, 1, Scalar(255, 255, 255));
// namedWindow("output");
count++;
}
cout << "Inference FPS: " << inference_fps << ", Total FPS: " << total_fps << endl;
ros::spin();
return 0; }
I get an error when the code goes to step
net.forward(detections, output_names);
Error is below:
OpenCV Error: Assertion failed (dims <= 2) in reshape, file /build/opencv-L2vuMj/opencv-3.2.0+dfsg/modules/core/src/matrix.cpp, line 1032 terminate called after throwing an instance of 'cv::Exception' what(): /build/opencv-L2vuMj/opencv-3.2.0+dfsg/modules/core/src/matrix.cpp:1032: error: (-215) dims <= 2 in function reshape
When I use GDB debugger, I get output of error like a below:
(gdb) bt #0 0x00007ffff61b5e87 in __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:51
#1 0x00007ffff61b77f1 in __GI_abort () at abort.c:79
#2 0x00007ffff680c957 in () at /usr/lib/x86_64-linux-gnu/libstdc++.so.6
#3 0x00007ffff6812ae6 in () at /usr/lib/x86_64-linux-gnu/libstdc++.so.6
#4 0x00007ffff6812b21 in () at /usr/lib/x86_64-linux-gnu/libstdc++.so.6
#5 0x00007ffff6812d54 in () at /usr/lib/x86_64-linux-gnu/libstdc++.so.6
#6 0x00007ffff77c38a2 in cv::error(cv::Exception const&) () at /usr/lib/x86_64-linux-gnu/libopencv_core.so.3.2
#7 0x00007ffff77c39bf in cv::error(int, cv::String const&, char const*, char const*, int) () at /usr/lib/x86_64-linux-gnu/libopencv_core.so.3.2
#8 0x00007ffff7734b1c in cv::Mat::reshape(int, int) const () at /usr/lib/x86_64-linux-gnu/libopencv_core.so.3.2
#9 0x00007ffff6efb344 in cv::dnn::ConvolutionLayerImpl::finalize(cv::_InputArray const&, cv::_OutputArray const&) () at /home/USERNAMEPC/opencv_build/opencv/build/lib/libopencv_dnn.so.4.4
#10 0x00007ffff6eb12d7 in cv::dnn::dnn4_v20200609::Layer::finalize(std::vector<cv::Mat, std::allocator<cv::Mat> > const&, std::vector<cv::Mat, std::allocator<cv::Mat> >&) () at /home/USERNAMEPC/opencv_build/opencv/build/lib/libopencv_dnn.so.4.4
#11 0x00007ffff6ed1ff3 in cv::dnn::dnn4_v20200609::Net::Impl::allocateLayer(int, std::map<int, cv::dnn::dnn4_v20200609::(anonymous namespace)::LayerShapes, std::less, std::allocator<std::pair<int const, cv::dnn::dnn4_v20200609::(anonymous namespace)::LayerShapes> > > const&) () at /home/USERNAMEPC/opencv_build/opencv/build/lib/libopencv_dnn.so.4.4
#12 0x00007ffff6ed3ff2 in cv::dnn::dnn4_v20200609::Net::Impl::allocateLayers(std::vector<cv::dnn::dnn4_v20200609::LayerPin, std::allocator<cv::dnn::dnn4_v20200609::LayerPin> > const&) () at /home/USERNAMEPC/opencv_build/opencv/build/lib/libopencv_dnn.so.4.4
#13 0x00007ffff6ed7675 in cv::dnn::dnn4_v20200609::Net::Impl::setUpNet(std::vector<cv::dnn::dnn4_v20200609::LayerPin, std::allocator<cv::dnn::dnn4_v20200609::LayerPin> > const&) () at /home/USERNAMEPC/opencv_build/opencv/build/lib/libopencv_dnn.so.4.4
#14 0x00007ffff6ed8ac3 in cv::dnn::dnn4_v20200609::Net::forward(cv::_OutputArray const&, std::vector<std::__cxx11::basic_string<char, std::char_traits, std::allocator >, std::allocator<std::__cxx11::basic_string<char, std::char_traits, std::allocator > > > const&) () at /home/USERNAMEPC/opencv_build/opencv/build/lib/libopencv_dnn.so.4.4
#15 0x0000555555561c0e in main(int, char**) (argc=1, argv=0x7fffffffd918) at /home/USERNAMEPC/people_detection_ws/src/test_opencv/src/test_opencv.cpp:147
How can I solve this problem?
I deleted
cv_bridge
and installed from source code on github. End of the day, code is running.