OpenCV Exception: Assertion failed in cv::DescriptorMatcher::checkMasks

66 views Asked by At

I’m encountering an OpenCV exception with the following error: “OpenCV Exception: Assertion failed in cv::DescriptorMatcher::checkMasks”. I’m using OpenCV version 4.7.0-dev. The error occurs when I’m trying to compute matches between image descriptors and patch descriptors using the cv::BFMatcher. The assertion failure specifically mentions “masks.size() == imageCount”.

i had followed:

Checked that the corrupted image and patches are correctly loaded. Extracted SIFT features from the image and patches. Converted the descriptors to the appropriate type. Tried computing matches using cv::BFMatcher and encountered the assertion failure.

Operating System: Windows 10 Programming language: C++ here is the code :

#include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>

void loadCorruptedImage(const std::string & imagePath, cv::Mat & image) {
    // Load the corrupted image from file
    image = cv::imread(imagePath, cv::IMREAD_COLOR);

    if (image.empty()) {
        std::cerr << "Failed to load image: " << imagePath << std::endl;
        return;
    }
}

void loadPatches(const std::string& patchesPath, std::vector<cv::Mat>& patches) {
    // Load the patches from file
    // You can implement the specific loading logic based on your dataset format

    // Example: Loading all images from a directory
    cv::String path(patchesPath + "/*.jpg");
    std::vector<cv::String> fileNames;
    cv::glob(path, fileNames);

    for (const auto& fileName : fileNames) {
        cv::Mat patch = cv::imread(fileName, cv::IMREAD_COLOR);

        if (!patch.empty()) {
            patches.push_back(patch);
        }
    }
}

void extractSiftFeatures(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, cv::Mat& descriptors) {
    // Extract SIFT features from the image
    cv::Ptr<cv::Feature2D> sift = cv::SIFT::create();
    sift->detectAndCompute(image, cv::noArray(), keypoints, descriptors);
}

void extractSiftFeaturesFromPatches(const std::vector<cv::Mat>& patches, std::vector<cv::KeyPoint>& keypoints, std::vector<cv::Mat>& descriptors) {
    // Extract SIFT features from the patches
    cv::Ptr<cv::Feature2D> sift = cv::SIFT::create();

    for (const auto& patch : patches) {
        std::vector<cv::KeyPoint> patchKeypoints;
        cv::Mat patchDescriptors;

        sift->detectAndCompute(patch, cv::noArray(), patchKeypoints, patchDescriptors);

        keypoints.insert(keypoints.end(), patchKeypoints.begin(), patchKeypoints.end());
        descriptors.push_back(patchDescriptors);
    }
}

void convertDescriptorsType(const cv::Mat& inputDescriptors, cv::Mat& outputDescriptors) {
    inputDescriptors.convertTo(outputDescriptors, CV_32S);
}

void computeMatches(const cv::Mat& imageDescriptors, const std::vector<cv::Mat>& patchDescriptors, std::vector<std::vector<cv::DMatch>>& matches) {
    // Convert descriptors type
    cv::Mat convertedImageDescriptors;
    imageDescriptors.convertTo(convertedImageDescriptors, CV_32F);

    std::vector<cv::Mat> convertedPatchDescriptors;
    for (const auto& patchDescriptor : patchDescriptors) {
        cv::Mat convertedPatchDescriptor;
        patchDescriptor.convertTo(convertedPatchDescriptor, CV_32F);
        convertedPatchDescriptors.push_back(convertedPatchDescriptor);
    }

    // Compute matches between the image and patch descriptors
    cv::BFMatcher matcher(cv::NORM_L2);

    // Prepare masks for each image being matched
    std::vector<cv::Mat> masks;
    for (size_t i = 0; i < patchDescriptors.size(); ++i) {
        cv::Mat mask = cv::Mat::ones(1, patchDescriptors[i].rows, CV_8U);
        masks.push_back(mask);
    }

    matcher.knnMatch(convertedImageDescriptors, convertedPatchDescriptors, matches, 2, masks);
}

void refineMatches(const std::vector<std::vector<cv::DMatch>>& matches, float ratio, std::vector<cv::DMatch>& refinedMatches) {
    // Refine matches based on ratio test
    for (const auto& matchPair : matches) {
        if (matchPair.size() < 2)
            continue;

        const cv::DMatch& bestMatch = matchPair[0];
        const cv::DMatch& secondMatch = matchPair[1];

        if (bestMatch.distance < ratio * secondMatch.distance)
            refinedMatches.push_back(bestMatch);
    }
}

cv::Mat findHomographyRANSAC(const std::vector<cv::KeyPoint>& imageKeypoints, const std::vector<cv::KeyPoint>& patchKeypoints,
    const std::vector<cv::DMatch>& matches, std::vector<cv::DMatch>& inliers) {
    // Convert keypoints to Point2f
    std::vector<cv::Point2f> imagePoints, patchPoints;
    for (const auto& match : matches) {
        imagePoints.push_back(imageKeypoints[match.queryIdx].pt);
        patchPoints.push_back(patchKeypoints[match.trainIdx].pt);
    }

    // Find homography using RANSAC
    std::vector<uchar> mask;
    cv::Mat homography = cv::findHomography(patchPoints, imagePoints, cv::RANSAC, 3.0, mask);

    // Extract inliers
    for (size_t i = 0; i < mask.size(); ++i) {
        if (mask[i])
            inliers.push_back(matches[i]);
    }

    return homography;
}

void overlayPatches(cv::Mat& image, const std::vector<cv::Mat>& patches, const std::vector<cv::KeyPoint>& imageKeypoints,
    const std::vector<cv::KeyPoint>& patchKeypoints, const std::vector<cv::DMatch>& inliers,
    const cv::Mat& homography) {
    // Overlay patches on the image using homography and inliers
    cv::Mat warpedImage;
    cv::warpPerspective(patches[0], warpedImage, homography, image.size());

    for (const auto& inlier : inliers) {
        const cv::KeyPoint& imageKeyPoint = imageKeypoints[inlier.queryIdx];
        const cv::KeyPoint& patchKeyPoint = patchKeypoints[inlier.trainIdx];

        cv::Point2f patchPoint = patchKeyPoint.pt;
        cv::Point2f imagePoint = imageKeyPoint.pt;

        // Warp the patch point using homography
        cv::Mat warpedPatchPoint = homography * cv::Mat(cv::Point3f(patchPoint.x, patchPoint.y, 1.0));
        warpedPatchPoint /= warpedPatchPoint.at<double>(2);

        // Calculate the overlay position
        int overlayX = imagePoint.x - warpedPatchPoint.at<double>(0);
        int overlayY = imagePoint.y - warpedPatchPoint.at<double>(1);

        // Overlay the patch on the image
        cv::Rect overlayRect(overlayX, overlayY, patches[0].cols, patches[0].rows);
        patches[0].copyTo(image(overlayRect), warpedImage(overlayRect));
    }
}

int main() {
    // Provide the paths to the corrupted image and patches dataset
    std::string imagePath = "C:/Users/Nomad/source/repos/Project6/image_to_complete.jpg";
    std::string patchesPath = "C:/Users/Nomad/source/repos/Project6/Patch";


        // Load the corrupted image
        cv::Mat corruptedImage;
        loadCorruptedImage(imagePath, corruptedImage);

        // Load the patches
        std::vector<cv::Mat> patches;
        loadPatches(patchesPath, patches);

        // Extract SIFT features from the image
        std::vector<cv::KeyPoint> imageKeypoints;
        cv::Mat imageDescriptors;
        extractSiftFeatures(corruptedImage, imageKeypoints, imageDescriptors);

        // Extract SIFT features from the patches
        std::vector<cv::KeyPoint> patchKeypoints;
        std::vector<cv::Mat> patchDescriptors;
        extractSiftFeaturesFromPatches(patches, patchKeypoints, patchDescriptors);

        // Convert image descriptors type
        cv::Mat convertedImageDescriptors;
        imageDescriptors.convertTo(convertedImageDescriptors, CV_32F);

        // Convert patch descriptors type
        std::vector<cv::Mat> convertedPatchDescriptors;
        for (const auto& patchDescriptor : patchDescriptors) {
            cv::Mat convertedPatchDescriptor;
            patchDescriptor.convertTo(convertedPatchDescriptor, CV_32F);
            convertedPatchDescriptors.push_back(convertedPatchDescriptor);
        }

        try {
            // Create a BFMatcher object
            cv::BFMatcher matcher(cv::NORM_L2);

            // Compute matches between the image and patch descriptors
            std::vector<std::vector<cv::DMatch>> matches;
            matcher.knnMatch(convertedImageDescriptors, convertedPatchDescriptors, matches, 2);

            // Refine matches based on ratio test
            std::vector<cv::DMatch> refinedMatches;
            refineMatches(matches, 0.8f, refinedMatches);

            // Find homography using RANSAC
            std::vector<cv::DMatch> inliers;
            cv::Mat homography = findHomographyRANSAC(imageKeypoints, patchKeypoints, refinedMatches, inliers);

            // Overlay the patches on the image
            overlayPatches(corruptedImage, patches, imageKeypoints, patchKeypoints, inliers, homography);

            // Display the resulting image with fixed corrupted regions
            cv::imshow("Fixed Image", corruptedImage);
            cv::waitKey(0);
        }
        catch (const cv::Exception& e) {
            std::cerr << "OpenCV Exception: " << e.what() << std::endl;
        }
        catch (const std::exception& e) {
            std::cerr << "Exception: " << e.what() << std::endl;
        }

        return 0;
    }

I would greatly appreciate any guidance or suggestions to resolve this issue. Thank you in advance for your help!

i had tried :

void computeMatches(const cv::Mat& imageDescriptors, const std::vector<cv::Mat>& patchDescriptors, std::vector<std::vector<cv::DMatch>>& matches) {
    // Convert descriptors type
    cv::Mat convertedImageDescriptors;
    imageDescriptors.convertTo(convertedImageDescriptors, CV_32F);

    std::vector<cv::Mat> convertedPatchDescriptors;
    for (const auto& patchDescriptor : patchDescriptors) {
        cv::Mat convertedPatchDescriptor;
        patchDescriptor.convertTo(convertedPatchDescriptor, CV_32F);
        convertedPatchDescriptors.push_back(convertedPatchDescriptor);
    }

    // Compute matches between the image and patch descriptors
    cv::BFMatcher matcher(cv::NORM_L2);
    matcher.knnMatch(convertedImageDescriptors, convertedPatchDescriptors, matches, 2);
}
void computeMatches(const cv::Mat& imageDescriptors, const std::vector<cv::Mat>& patchDescriptors, std::vector<std::vector<cv::DMatch>>& matches) {
    // Convert descriptors type
    cv::Mat convertedImageDescriptors;
    imageDescriptors.convertTo(convertedImageDescriptors, CV_32F);

    std::vector<cv::Mat> convertedPatchDescriptors;
    for (const auto& patchDescriptor : patchDescriptors) {
        cv::Mat convertedPatchDescriptor;
        patchDescriptor.convertTo(convertedPatchDescriptor, CV_32F);
        convertedPatchDescriptors.push_back(convertedPatchDescriptor);
    }

    // Compute matches between the image and patch descriptors
    cv::BFMatcher matcher(cv::NORM_L2);

    // Prepare masks for each image being matched
    std::vector<cv::Mat> masks;
    for (size_t i = 0; i < patchDescriptors.size(); ++i) {
        cv::Mat mask = cv::Mat::ones(1, patchDescriptors[i].rows, CV_8U);
        masks.push_back(mask);
    }

    matcher.knnMatch(convertedImageDescriptors, convertedPatchDescriptors, matches, 2, masks);
}
void computeMatches(const cv::Mat& imageDescriptors, const std::vector<cv::Mat>& patchDescriptors, std::vector<std::vector<cv::DMatch>>& matches) {
    // Convert descriptors type
    cv::Mat convertedImageDescriptors;
    imageDescriptors.convertTo(convertedImageDescriptors, CV_32F);

    std::vector<cv::Mat> convertedPatchDescriptors;
    for (const auto& patchDescriptor : patchDescriptors) {
        cv::Mat convertedPatchDescriptor;
        patchDescriptor.convertTo(convertedPatchDescriptor, CV_32F);
        convertedPatchDescriptors.push_back(convertedPatchDescriptor);
    }

    // Compute matches between the image and patch descriptors
    cv::BFMatcher matcher(cv::NORM_L2);

    // Prepare masks for each image being matched
    std::vector<cv::Mat> masks(patchDescriptors.size(), cv::noArray());

    matcher.knnMatch(convertedImageDescriptors, convertedPatchDescriptors, matches, 2, masks);
}
0

There are 0 answers