模板匹配时,代码1识别准确,在角度和摄像头参数不变、小车未动的情况下,代码2无法检测图片,需托起摄像头才能识别数字模板。
-
代码1
# This work is licensed under the MIT license. # Copyright (c) 2013-2023 OpenMV LLC. All rights reserved. # https://github.com/openmv/openmv/blob/master/LICENSE # # Template Matching Example - Normalized Cross Correlation (NCC) # # This example shows off how to use the NCC feature of your OpenMV Cam to match # image patches to parts of an image... expect for extremely controlled environments # NCC is not all to useful. # # WARNING: NCC supports needs to be reworked! As of right now this feature needs # a lot of work to be made into somethin useful. This script will remain to show # that the functionality exists, but, in its current state is inadequate. import time import sensor import image from image import SEARCH_EX # from image import SEARCH_DS # Reset sensor '''sensor.reset() sensor.set_vflip(True) sensor.set_hmirror(True) # Set sensor settings sensor.set_contrast(1) sensor.set_gainceiling(16) # Max resolution for template matching with SEARCH_EX is QQVGA sensor.set_framesize(sensor.QQVGA) # You can set windowing to reduce the search image. # sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60)) sensor.set_pixformat(sensor.GRAYSCALE)''' sensor.reset() sensor.set_vflip(True) sensor.set_hmirror(True) sensor.set_framesize(sensor.QQVGA) sensor.set_pixformat(sensor.GRAYSCALE) sensor.skip_frames(50) sensor.set_contrast(0) sensor.set_auto_whitebal(False) # 关闭白平衡 sensor.set_auto_gain(False) sensor.set_gainceiling(16) # Load template. # Template should be a small (eg. 32x32 pixels) grayscale image. template = image.Image("32.pgm") template1 = image.Image("31.pgm") clock = time.clock() # Run template matching while True: clock.tick() img = sensor.snapshot() # find_template(template, threshold, [roi, step, search]) # ROI: The region of interest tuple (x, y, w, h). # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster. # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search # # Note1: ROI has to be smaller than the image and bigger than the template. # Note2: In diamond search, step and ROI are both ignored. r = img.find_template( template, 0.70, step=4, search=SEARCH_EX ) # , roi=(10, 0, 60, 60)) if r: img.draw_rectangle(r) print(r[2],r[3]) r = img.find_template( template1, 0.70, step=4, search=SEARCH_EX ) # , roi=(10, 0, 60, 60)) if r: img.draw_rectangle(r) print(22) print(clock.fps())
代码2
import sensor, image, time, pyb import display from image import SEARCH_EX, SEARCH_DS from pyb import UART # ------------------ 硬件初始化 ------------------ sensor.reset() #sensor.set_vflip(True) #sensor.set_hmirror(True) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) lcd = display.SPIDisplay()#屏幕显示 # ------------------ 测试 ------------------ # ------------------ 模式定义 ------------------ MODE_INIT = 0 # 初始状态 MODE_LINE = 1 # 巡线模式 MODE_NUM_RECOG = 2 # 数字识别模式 current_mode = MODE_INIT # ------------------ 通信协议定义 ------------------ HEADER = 0xAA FOOTER = 0x55 CMD_LINE_DATA = 0x01 CMD_TURN_LEFT = 0x02 CMD_TURN_RIGHT = 0x03 CMD_RECOGNIZING = 0x04 # ------------------ 全局变量 ------------------ uart = pyb.UART(3, 115200, timeout_char=1000) global target_num target_num = 0 # 目标病房号 # 定义7个巡线检测区域 (x, y, w, h) line_regions = [ # 原QVGA(320x240) → 新QQVGA(160x120) (28, 0, 15, 10), # 原(55,35,30,20) (43, 0, 15, 10), # 原(85,35,30,20) (58, 0, 15, 10), # 原(115,35,30,20) (73, 0, 15, 10), # 原(145,35,30,20) (88, 0, 15, 10), # 原(175,35,30,20) (103, 0, 15, 10), # 原(205,35,30,20) (118, 0, 15, 10) # 原(235,35,30,20) ] # 红色阈值 (LAB颜色空间) # 针对红色线调整这些值 (L: 0-100, A: -128-127, B: -128-127) RED_THRESHOLD = (20, 100, # L范围 10, 100, # A范围 (正值表示红色) -80, 100) # B范围 # ------------------ 模板加载函数 ------------------ def load_templates(): global templates global template1,template2,template3,template4 global template5,template7,template6,template8 template1 = image.Image("1.pgm") template2 = image.Image("2.pgm") template3 = image.Image("3.pgm") template4 = image.Image("4.pgm") template5 = image.Image("5.pgm") template7 = image.Image("7.pgm") template6 = image.Image("6.pgm") template8 = image.Image("8.pgm") '''global templates1, templates2 templates1 = ["1L.pgm", "1R.pgm"] #保存多个模板 templates2 = [ "2L.pgm","2R.pgm"] #保存多个模板''' global templates5, templates6, templates7, templates8, templates3, templates4 #templates3 = ["3L.pgm", "3R.pgm", "3LL.pgm", "3RR.pgm","33.pgm"] #保存多个模板 templates3 = ["31.pgm","32.pgm","33.pgm"] #保存多个模板 templates4 = ["41.pgm", "42.pgm", "43.pgm", "44.pgm","4.pgm"] #保存多个模板 templates5 = ["5.pgm", "5.pgm","5.pgm", "5.pgm","5.pgm"] #保存多个模板 templates6 = ["6.pgm", "6.pgm", "6.pgm", "6.pgm","6.pgm"] #保存多个模板 templates7 = ["7.pgm", "7.pgm", "7.pgm", "7.pgm","7.pgm"] #保存多个模板 templates8 = ["81.pgm", "82.pgm", "8.pgm", "8.pgm","8.pgm"] #保存多个模板 # ------------------ 传感器模式配置 ------------------ def set_sensor_config(mode): if mode == MODE_LINE: # 巡线模式配 #sensor.reset() #sensor.set_vflip(True) #sensor.set_hmirror(True) sensor.set_pixformat(sensor.RGB565) #sensor.set_framesize(sensor.QQVGA) # 320x240 #sensor.skip_frames(time=50) sensor.set_contrast(-1)# 设置对比度-1(范围-3到+3,0为默认值,-1增强暗部细节) sensor.set_auto_whitebal(False) # 关闭白平衡 sensor.set_auto_gain(False) # 关闭自动增益 sensor.set_auto_exposure(False, exposure_us=25000) # 手动设置曝光时间 #sensor.set_windowing(0, 10, 150, 100) elif mode == MODE_NUM_RECOG: # 数字识别模式配置 sensor.reset() sensor.set_vflip(True) sensor.set_hmirror(True) sensor.set_framesize(sensor.QQVGA) sensor.set_pixformat(sensor.GRAYSCALE) sensor.skip_frames(50) sensor.set_contrast(0) sensor.set_auto_whitebal(False) # 关闭白平衡 sensor.set_auto_gain(False) sensor.set_gainceiling(16) # ------------------ 核心功能函数 ------------------ def initial_target_recognition(): """初始目标识别(全屏搜索)""" set_sensor_config(MODE_NUM_RECOG) timeout = time.ticks_ms() clock = time.clock() uart = UART(3, 115200, timeout_char=1000) # Run template matching best_num=0 while (True): clock.tick() img = sensor.snapshot() lcd.write(img) if best_num==0: clock.tick() img = sensor.snapshot() r1 = img.find_template(template1, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) if r1: img.draw_rectangle(r1) lcd.write(img) best_num=1 r2 = img.find_template(template2, 0.6, step=4, search=SEARCH_EX) # , roi=(10, 0, 60, 60)) if r2: img.draw_rectangle(r2) lcd.write(img) best_num=2 r3 = img.find_template(template3, 0.73, step=4, search=SEARCH_EX) # , roi=(10, 0, 60, 60)) if r3: img.draw_rectangle(r3) lcd.write(img) best_num=3 r4 = img.find_template(template4, 0.7, step=4, search=SEARCH_EX) # , roi=(10, 0, 60, 60)) if r4: img.draw_rectangle(r4) lcd.write(img) best_num=4 r5 = img.find_template(template5, 0.7, step=4, search=SEARCH_EX) # , roi=(10, 0, 60, 60)) if r5: img.draw_rectangle(r5) lcd.write(img) best_num=5 r6 = img.find_template(template6, 0.7, step=3, search=SEARCH_EX) # , roi=(10, 0, 60, 60)) if r6: img.draw_rectangle(r6) lcd.write(img) best_num=6 r7 = img.find_template(template7, 0.7, step=4, search=SEARCH_EX) # , roi=(10, 0, 60, 60)) if r7: img.draw_rectangle(r7) lcd.write(img) best_num=7 r8 = img.find_template(template8, 0.7, step=4, search=SEARCH_EX) # , roi=(10, 0, 60, 60)) if r8: img.draw_rectangle(r8) lcd.write(img) best_num=8 if best_num!=0: uart.write(bytearray([HEADER, CMD_RECOGNIZING, best_num, FOOTER])) return best_num if time.ticks_diff(time.ticks_ms(), timeout) > 10000: # 10秒超时 return 0 def line_follow_process(): set_sensor_config(MODE_LINE) bits = [] line_detected = False # 检测7个巡线区域 for idx,roi in enumerate(line_regions): #lcd.write(img) # 在ROI中寻找红色线 # 修改后的参数(降低灵敏度) blobs = img.find_blobs([RED_THRESHOLD],roi=roi, merge=True, pixels_threshold=10, # 提高最小像素点数(原50) area_threshold=50, # 提高最小区域面积(原50) margin=10) # 增加合并边界距离(原10) detected = 1 if blobs else 0 bits.append(detected) # 绘制检测区域和状态 color = (0, 255, 0) if detected else (255, 0, 0) img.draw_rectangle(roi, color=color) # 在区域中心显示检测状态 text_x = roi[0] + roi[2]//2 - 5 text_y = roi[1] + roi[3]//2 - 5 img.draw_string(text_x, text_y, str(detected), color=(255, 255, 255)) lcd.write(img) if detected: line_detected = True # 发送巡线数据 if line_detected: dec_val = int(''.join(map(str, bits)), 2) if dec_val>0: uart.write(bytearray([HEADER, CMD_LINE_DATA, dec_val, FOOTER])) else: dec_val=0 uart.write(bytearray([HEADER, CMD_LINE_DATA,dec_val, FOOTER])) print(dec_val) # 检测路口(全区域检测到线) if sum(bits) == 7: global current_mode current_mode = MODE_NUM_RECOG def number_recognize(): """数字识别模式处理""" set_sensor_config(MODE_NUM_RECOG) #sensor.set_windowing(0, 0, 160, 60) img = sensor.snapshot() #img = sensor.snapshot().binary([(0,64)]) # 在左右区域识别数字 r=None if target_num>2: while(r==None): clock.tick() img = sensor.snapshot() lcd.write(img) '''if target_num==1 : for t in templates1: template = image.Image(t) r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) if r: img.draw_rectangle(r) lcd.write(img) elif target_num==2 : for t in templates2: template = image.Image(t) r = img.find_template(template, 0.40, step=4, search=SEARCH_EX) if r: img.draw_rectangle(r) lcd.write(img)''' if target_num==3 : for t in templates3: template = image.Image(t) r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) '''rr1 = img.find_template(templates3[0], 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) if rr1: img.draw_rectangle(rr1) rr2 = img.find_template(templates3[1], 0.7, step=4, search=SEARCH_EX) # , roi=(10, 0, 60, 60)) if rr2: img.draw_rectangle(rr2)''' if r: print(r[2],r[3]) img.draw_rectangle(r) lcd.write(img) elif target_num==4 : for t in templates4: template = image.Image(t) r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) if r: print(44444) img.draw_rectangle(r) lcd.write(img) elif target_num==5 : for t in templates5: template = image.Image(t) r = img.find_template(template, 0.72, step=4, search=SEARCH_EX) if r: img.draw_rectangle(r) lcd.write(img) elif target_num==6 : for t in templates6: template = image.Image(t) r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) if r: img.draw_rectangle(r) lcd.write(img) elif target_num==7 : for t in templates7: template = image.Image(t) r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) if r: img.draw_rectangle(r) lcd.write(img) elif target_num==8 : for t in templates8: template = image.Image(t) r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) if r: img.draw_rectangle(r) lcd.write(img) elif target_num==0: r=None # 发送转向指令 #if target_num in found_nums: #turn_cmd = CMD_TURN_LEFT if found_nums.index(target_num)==0 else CMD_TURN_RIGHT if r is not None: # 只有匹配成功时才处理A print(r[0]) if r[0] >= 65: turn_cmd = CMD_TURN_RIGHT #是右 elif 0< r[0] <65: turn_cmd = CMD_TURN_LEFT #是左 else: turn_cmd = CMD_LINE_DATA elif target_num==1: turn_cmd = CMD_TURN_LEFT#是左 elif target_num==2: turn_cmd = CMD_TURN_RIGHT #是右 uart.write(bytearray([HEADER, turn_cmd,0,FOOTER])) # 返回巡线模式 global current_mode current_mode = MODE_LINE set_sensor_config(MODE_LINE) # ------------------ 主程序初始化 ------------------ templates = load_templates() set_sensor_config(MODE_LINE) # ------------------ 主循环 ------------------ clock = time.clock() while True: clock.tick() # 状态机处理 if current_mode == MODE_INIT: target_num = initial_target_recognition() if target_num > 0: current_mode = MODE_LINE set_sensor_config(MODE_LINE) else: current_mode = MODE_INIT elif current_mode == MODE_LINE: line_follow_process() elif current_mode == MODE_NUM_RECOG: number_recognize() # 显示帧率1 img = sensor.snapshot() img.draw_string(5,20, "FPS:%.1f" % clock.fps(), color=(255,0,0)) # 显示当前模式 mode_text = "LINE" if current_mode == MODE_LINE else "NUMBER" img.draw_string(250, 5, mode_text, color=(0,255,0)) # 短延时 pyb.delay(10) print(clock.fps()