Python OpenCV search correspondences of 2 images with Harris Corner Detection

2.9k views Asked by At

my teacher gave us the following exercise:

Exercise

At the moment the only process I made is to get the Harris Corners of both images using cv2.cornerHarris() and place the pictures next to each other.

Now I have no idea how to get the corners itself and an area around them to generate a template which could be use for template matching.

I hope if I get this trick I may be able to solve the rest of the exercise. Maybe some of you could help me? A short explanation on how it is working would be very kindful, so that I may learn a bit more :)

Here is my current code:

import cv2
import numpy as np

churchLeft = cv2.imread("./Church/church_left.png")
churchRight = cv2.imread("./Church/church_right.png")


def doHarris(img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = np.float32(gray)

    dst = cv2.cornerHarris(gray, 2, 3, 0.01)


    # result is dilated for marking the corners, not important
    dst = cv2.dilate(dst, None)

    # Threshold for an optimal value, it may vary depending on the image.
    img[dst > 0.01 * dst.max()] = [0, 0, 255]


    return img


churchLeftHarris = doHarris(churchLeft)
churchRightHarris = doHarris(churchRight)

hor = np.hstack((churchLeftHarris, churchRightHarris))

cv2.imshow('test', hor)
while (1):
    k = cv2.waitKey(1) & 0xFF
    if k == 27:
        break
1

There are 1 answers

0
Moe On

You can try my code bellow:

import numpy as np
import cv2
from matplotlib import pyplot as plt

"""
Difference between goodFeaturesToTrack and Harrisdetector:
The main difference with the Harris algorithm is that you should
specify the minimum distance between each point, the quality level
and the number of corners to detect.

"""
#You can use this Method to detect the Harriscorners instead of goodFeaturesToTrack :

#dst1 = cv2.cornerHarris(gray1, 5, 7, 0.04)
#ret1, dst1 = cv2.threshold(dst1, 0.1 * dst1.max(), 255, 0)
#dst1 = np.uint8(dst1)
#ret1, labels1, stats1, centroids1 = cv2.connectedComponentsWithStats(dst1)
#criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
#corners1 = cv2.cornerSubPix(gray1, np.float32(centroids1), (5, 5), (-1, -1), 
#criteria)
#corners1 = np.int0(corners1)


def correlation_coefficient(window1, window2):
    product = np.mean((window1 - window1.mean()) * (window2 - window2.mean()))
    stds = window1.std() * window2.std()
    if stds == 0:
        return 0
    else:
        product /= stds
        return product


window_size_width = 7
window_size_height = 7
lineThickness = 2

img1 = cv2.imread('church_left.png')
img2 = cv2.imread('church_right.png')
width, height, ch = img1.shape[::]
img2_copy = img2.copy()
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

corners1 = cv2.goodFeaturesToTrack(gray1, 30, 0.01, 5)
corners1 = np.int0(corners1)

corners2 = cv2.goodFeaturesToTrack(gray2, 30, 0.01, 5)
corners2 = np.int0(corners2)

corners_windows1 = []

for i in corners1:
    x, y = i.ravel()
    cv2.circle(img1, (x, y), 3, 255, -1)

corners_windows2 = []
for i in corners2:
    x, y = i.ravel()
    cv2.circle(img2, (x, y), 3, 255, -1)

plt.imshow(img1), plt.show()

methods = ['SSD', 'NCC']
for method in methods:
    matches = []
    for id1, i in enumerate(corners1):
        x1, y1 = i.ravel()
        if y1 - window_size_height < 0 or y1 + window_size_height > height or x1 - window_size_width < 0 or x1 + window_size_width > width:
            continue
        pt1 = (x1, y1)
        print("pt1: ", pt1)
        template = img1[y1 - window_size_height:y1 + window_size_height, x1 - window_size_width:x1 + window_size_width]
        max_val = 0
        Threshold = 1000000
        id_max = 0
        for id2, i in enumerate(corners2):
            x2, y2 = i.ravel()

            if y2 - window_size_height < 0 or y2 + window_size_height > height or x2 - window_size_width < 0 or x2 + window_size_width > width:
                continue
            window2 = img2[y2 - window_size_height:y2 + window_size_height,
                      x2 - window_size_width:x2 + window_size_width]
            if method == 'SSD':
                temp_min_val = np.sum((template - window2) ** 2)
            elif method == 'NCC':
                temp_min_val = correlation_coefficient(template, window2)
            if temp_min_val < Threshold:
                Threshold = temp_min_val
                pt2 = (x2 + 663, y2)
        matches.append((pt1, pt2))
    stacked_img = np.hstack((img1, img2))
    #show the first 15 matches
    for match in matches[:15]:
        cv2.line(stacked_img, match[0], match[1], (0, 255, 0), lineThickness)
    matches = []
    plt.imshow(stacked_img), plt.show()