import cv2 #opencv读取的格式是BGR
import numpy as np
import matplotlib.pyplot as plt#Matplotlib是RGB
%matplotlib inline
模版匹配
- 模版匹配和卷积原理很像,模版在原图像上从原点开始滑动,计算模版与(图像被模版覆盖的地方)的差别层度,这个差别成都的计算方法在opencv里有6种,然后将每次计算的结果放入一个矩阵里,作为结果输出。假如原图案是A×B大小,而模版是a×b大小,则输出结果的矩阵是(A-a+1)×(B-b+1)
face.jpg
lena.jpg
#模版匹配
img = cv2.imread('lena.jpg',0)
template = cv2.imread('face.jpg',0)
h,w = template.shape[:2]
查看相关参数:
img.shape
template.shape
- TM_SQDIFF : 计算平方不同,计算出来的值越小,越相关
- TM_COORR :计算相关性,计算出来的值越大,越相关
- TM_CCOEFF : 计算相关系统,计算出来的值越大,越相关
- TM_SQDIFF_NORMED :计算归一化平方不同,计算出来的值越接近0,越相关
- TM_CCORR_NORMED: 计算归一化相关性,计算出来的值越接近1,越相关
- TM_CCOEFF_NORMED : 计算归一化相关系数,计算出来的值越接近1,越相关
methods = ['cv2.TM_CCOEFF','cv2.TM_CCOEFF_NORMED','cv2.TM_CCORR','cv2.TM_CCORR_NORMED','cv2.TM_SQDIFF','cv2.TM_SQDIFF_NORMED']res = cv2.matchTemplate(img,template,cv2.TM_SQDIFF)
res.shapemin_val,max_val,min_loc,max_loc = cv2.minMaxLoc(res)min_valmax_valmin_locmax_loc
for meth in methods:img2 = img.copy()#匹配方法的真值method = eval(meth)print (method)res = cv2.matchTemplate(img,template,method)min_val,max_val,min_loc,max_loc = cv2.minMaxLoc(res)#如果是平方差匹配TM_SQDIFF或归一化平方差匹配TM_SQDIFF_NORMED,取最小值if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:top_left = min_locelse:top_left = max_locbottom_right = (top_left[0]+w,top_left[1]+h)#画矩形cv2.rectangle(img2,top_left,bottom_right,255,2)plt.subplot(121),plt.imshow(res,cmap='gray')plt.xticks([]),plt.yticks([]) #隐藏坐标轴plt.subplot(122),plt.imshow(img2,cmap = 'gray')plt.xticks([]),plt.yticks([])plt.suptitle(meth)plt.show()
运行出的结果
匹配多个目标对象
mario_coin.jpg
mario.jpg
img_rgb = cv2.imread('mario.jpg')
img_gray = cv2.cvtColor(img_rgb,cv2.COLOR_BGR2GRAY)
template = cv2.imread('mario_coin.jpg',0)
h,w = template.shape[:2]res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
#匹配成都大于%80 的坐标
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]): #*号表示可选参数bottom_right = (pt[0] + w, pt[1] +h)cv2.rectangle(img_rgb,pt,bottom_right,(0,0,255),2)cv2.imshow('img_rgb',img_rgb)
cv2.waitKey(0)
运行结果: