第一张图是search图,第二张图是template图。两张图的大小都是512*500的大小,并且图片内的黑色线条相同。但是search图中黑线的水平宽度像素为24,白线为22;template图中黑线的水平宽度像素为31,白线为28。也就是说两图的视野并不相同。现在我的问题是,应该怎么将这两张图进行配准,然后两张图片相减得出template图中的短路的部分。我贴上我的代码,主要的思想就是使用旋转放大,然后再通过边缘点使用cv2.matchTemplate进行配准,效果极差,没有达到想要的效果。
import numpy as np
import argparse
import imutils
import glob
import cv2
import datetime
from skimage.metrics import structural_similarity as compare_ssim
from skimage.metrics import peak_signal_noise_ratio as compare_psnr
# 读取模板图片 matching4.png template_bina.jpg
template = cv2.imread("template3_bina.bmp")
# 转换为灰度图片
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
# 执行边缘检测
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
# 显示模板
# cv2.imshow("Template", template)
# 读取测试图片并将其转化为灰度图片
image = cv2.imread("Search3_bina_512500.bmp")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
found = None
highest_matching_value = (0, 0)
start_angle = 1.0
end_angle = 5.0
step = 0.1
scale_array = np.linspace(0.9, 2.0, 50)
scale_array = np.append(scale_array, (1.6, 2.2, 1.0))
for scale in scale_array[::-1]:
# 根据尺度大小对输入图片进行裁剪
resized = imutils.resize(gray, width=int(gray.shape[1] * scale))
r = gray.shape[1] / float(resized.shape[1])
# 如果裁剪之后的图片小于模板的大小直接退出
if resized.shape[0] < tH or resized.shape[1] < tW:
break
for angle in np.arange(start_angle, end_angle, step):
center = (resized.shape[1] // 2, resized.shape[0] // 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated_image = cv2.warpAffine(resized, rotation_matrix, (resized.shape[1], resized.shape[0]))
# 首先进行边缘检测,然后执行模板检测,接着获取最小外接矩形
edged = cv2.Canny(rotated_image, 50, 200)
# edged = rotated_image
result = cv2.matchTemplate(edged, template, cv2.TM_CCOEFF_NORMED)
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
clone = rotated_image[maxLoc[1]:maxLoc[1] + tH, maxLoc[0]:maxLoc[0] + tW]
ssim = compare_ssim(clone, template)
# 计算PSNR
psnr= compare_psnr(clone, template)
# 如果发现一个新的关联值则进行更新
if found is None or maxVal > found[0]:
found = (maxVal, maxLoc, r, angle, scale)
print(maxVal, maxLoc, scale, angle, ssim, psnr) # , maxVal_ssim, psnr
clone = np.dstack([edged, edged, edged])
cv2.rectangle(clone, (maxLoc[0], maxLoc[1]), (maxLoc[0] + tW, maxLoc[1] + tH), (0, 0, 255), 2)
cv2.imwrite("Visualize.bmp", clone)
(maxVal, maxLoc, r, angle, scale) = found
resized = imutils.resize(image, width=int(image.shape[1] * scale))
center = (resized.shape[1] // 2, resized.shape[0] // 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated_image = cv2.warpAffine(resized, rotation_matrix, (resized.shape[1], resized.shape[0]))
# 计算测试图片中模板所在的具体位置,即左上角和右下角的坐标值,并乘上对应的裁剪因子
(startX, startY) = (int(maxLoc[0]) + highest_matching_value[0],
int(maxLoc[1]) + (highest_matching_value[1]))
(endX, endY) = (int((maxLoc[0] + tW)) + highest_matching_value[0],
int((maxLoc[1] + tH)) + highest_matching_value[1])
print(maxVal, scale, angle)
c = rotated_image[startY:endY, startX:endX]
cv2.imwrite('matching4.bmp', c)
cv2.rectangle(rotated_image, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.imwrite('matching_rect4.bmp', rotated_image)
cv2.waitKey(0)