
WeChat has released its QR code detection and recognition algorithm (based on C++) and the OpenCV 4.5+ version has integrated this algorithm (the cv::wechat_qrcode::WeChatQRCode class), which supports QR code detection, positioning and decoding. Since loading the model file is required during the initialization of this class (which takes a long time), using the singleton pattern (Singleton) can ensure that only one instance is created globally, avoiding the repeated loading of the model and improving efficiency.
Version Requirements:OpenCV 4.5+ or python-opencv 4.5+

pip install numpy==1.24.3 -i https://mirrors.aliyun.com/pypi/simple/
pip install opencv-python==4.5.5.64 opencv-contrib-python==4.5.5.64 -i https://mirrors.aliyun.com/pypi/simple/1.Similarly, read the image, perform some preprocessing at different scales, and enhance the image quality.
# 递归遍历文件夹
for root, dirs, files in os.walk(folder_path):
for filename in files:
if filename.lower().endswith('.jpg'):
paper_num += 1
file_path = os.path.join(root, filename)
ori_img = cv2.imread(file_path, cv2.IMREAD_ANYCOLOR) # 目标图像
if len(ori_img.shape) == 3 and ori_img.shape[2] == 3:
gray_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)
elif len(ori_img.shape) == 2:
gray_img = ori_img.copy()
else:
print('error: bad image')
return
ori_bar_area = ori_img[bar_range[1]:bar_range[3], bar_range[0]:bar_range[2]]
bar_area = gray_img[bar_range[1]:bar_range[3], bar_range[0]:bar_range[2]]
find = False
lt_sacle = [1.0, 1.25, 1.5, 2.0, 2.25, 2.5, 3.0]
for scale_factor in lt_sacle:
resized_image = cv2.resize(bar_area, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
blurred_image = cv2.GaussianBlur(resized_image, (7, 7), 0)
# blurred_image = cv2.medianBlur(blurred_image, 5)
#g_threshold, binary = cv2.threshold(resized_image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
2.Model files: The WeChat algorithm requires 4 pre-trained model files (which can be obtained from the OpenCV source code or official channels):
detect.prototxt
detect.caffemodel
sr.prototxt
sr.caffemodel

3.Using the model file, create a singleton for the detector.
# 使用
detector = get_wechat_qrcode_detector("./model/detect.prototxt",'./model/detect.caffemodel','./model/sr.prototxt', './model/sr.caffemodel')
def get_wechat_qrcode_detector(detect_prototxt, detect_caffemodel, sr_prototxt, sr_caffemodel):
global _detector
if _detector is None:
_detector = cv2.wechat_qrcode_WeChatQRCode(
detect_prototxt, detect_caffemodel,
sr_prototxt, sr_caffemodel
)
return _detector4.Use the detector to identify and visualize the results
results, points = detector.detectAndDecode(blurred_image)
if len(points) > 0: # 若检测到二维码
for i in range(len(points)):
# points[i] 是当前二维码的4个顶点坐标(形状:(4, 2))
# 将坐标转为整数并reshape为适合polylines的格式
pts = np.int32(points[i]).reshape(-1, 1, 2)
# 绘制闭合轮廓(绿色,线宽2)
cv2.polylines( ori_bar_area, [pts], isClosed=True, color=(0, 255, 0), thickness=2)
if results:
x, y = (10,140)
cv2.putText( ori_bar_area,results[i], (x, y), cv2.FONT_HERSHEY_SIMPLEX,0.6,(0, 0, 255), 1 )
show_image(ori_bar_area)
find = True
break

5.Statistical results of recognition accuracy and the time consumption

Compare with pyzbar, the same group of imgs:

import cv2
import os
import numpy as np
import time
# 模块级变量:全局唯一检测器
_detector = None
# 读取目标图像和模板图像
def show_image(img):
cv2.namedWindow("1",cv2.WINDOW_AUTOSIZE )
cv2.imshow("1",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def get_wechat_qrcode_detector(detect_prototxt, detect_caffemodel, sr_prototxt, sr_caffemodel):
global _detector
if _detector is None:
_detector = cv2.wechat_qrcode_WeChatQRCode(
detect_prototxt, detect_caffemodel,
sr_prototxt, sr_caffemodel
)
return _detector
def main():
folder_path = './imgs8'
#模板定位点信息
bar_range = (1377, 152, 1536, 293)
paper_num = 0
error_paper_num = 0
# 使用
detector = get_wechat_qrcode_detector("./model/detect.prototxt",'./model/detect.caffemodel','./model/sr.prototxt', './model/sr.caffemodel')
start = time.perf_counter()
# 递归遍历文件夹
for root, dirs, files in os.walk(folder_path):
for filename in files:
if filename.lower().endswith('.jpg'):
paper_num += 1
file_path = os.path.join(root, filename)
ori_img = cv2.imread(file_path, cv2.IMREAD_ANYCOLOR) # 目标图像
if len(ori_img.shape) == 3 and ori_img.shape[2] == 3:
gray_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)
elif len(ori_img.shape) == 2:
gray_img = ori_img.copy()
else:
print('error: bad image')
return
ori_bar_area = ori_img[bar_range[1]:bar_range[3], bar_range[0]:bar_range[2]]
bar_area = gray_img[bar_range[1]:bar_range[3], bar_range[0]:bar_range[2]]
single_start = time.perf_counter()
find = False
lt_sacle = [1.0, 1.25, 1.5, 2.0, 2.25, 2.5, 3.0]
for scale_factor in lt_sacle:
resized_image = cv2.resize(bar_area, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
blurred_image = cv2.GaussianBlur(resized_image, (7, 7), 0)
blurred_image = cv2.medianBlur(blurred_image, 5)
#g_threshold, binary = cv2.threshold(resized_image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#show_image(blurred_image)
results, points = detector.detectAndDecode(blurred_image)
if len(points) > 0: # 若检测到二维码
for i in range(len(points)):
# points[i] 是当前二维码的4个顶点坐标(形状:(4, 2))
# 将坐标转为整数并reshape为适合polylines的格式
pts = np.int32(points[i]).reshape(-1, 1, 2)
# 绘制闭合轮廓(绿色,线宽2)
cv2.polylines( ori_bar_area, [pts], isClosed=True, color=(0, 255, 0), thickness=2)
if results:
x, y = (10,140)
cv2.putText( ori_bar_area,results[i], (x, y), cv2.FONT_HERSHEY_SIMPLEX,0.6,(0, 0, 255), 1 )
#show_image(ori_bar_area)
find = True
break
if not find :
error_paper_num += 1
#show_image(ori_bar_area)
pass
print('识别总份数: ', paper_num)
print('未识别到二维码份数: ', error_paper_num)
print('二维码识别率: ', str(1 - error_paper_num / paper_num))
singel_end = time.perf_counter()
print(f"单份识别耗时耗时:{singel_end - single_start:.6f} 秒")
print()
end = time.perf_counter()
print(f"总耗时:{end - start:.6f} 秒")
if __name__ == '__main__':
main()原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。