这个代码运行结果为啥报错?
-
-
# Snapshot Example # # Note: You will need an SD card to run this example. # # You can use your OpenMV Cam to save image files. import sensor, image, pyb, time, lcd, os, time from pyb import Pin, LED KEY1 = Pin('P4', Pin.IN ,Pin.PULL_UP) RED_LED_PIN = 1 GREEN_LED_PIN = 2 BLUE_LED_PIN = 3 NUM_SUBJECTS = 4 #图像库中不同人数 NUM_SUBJECTS_IMGS = 20 #每人有20张样本图片 sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE sensor.set_framesize(sensor.QQVGA) # or sensor.QQVGA (or others) #sensor.set_framesize(sensor.QQVGA2) sensor.set_windowing((128,160))# Special 128x160 framesize for LCD Shield. sensor.skip_frames(10) # Let new settings take affect. sensor.skip_frames(time = 2000) lcd.init() # Initialize the lcd screen. #sensor.set_contrast(3) #sensor.set_gainceiling(16)# #sensor.set_framesize(sensor.VGA) #sensor.set_windowing((320, 240)) #sensor.set_pixformat(sensor.GRAYSCALE) lcd.display(sensor.snapshot()) num = 1 #设置被拍摄者序号,第一个人的图片保存到s1文件夹,第二个人的图片保存到s2文件夹,以此类推。每次更换拍摄者时,修改n值。 n = 20 #设置每个人拍摄图片数量。 #连续拍摄n张照片,每间隔3s拍摄一次。 def min(pmin, a, s): global b if a<pmin: pmin=a b=s return pmin while(True): if KEY1.value() == 1: #print(KEY1.value()) while(n): #红灯亮 pyb.LED(RED_LED_PIN).on() sensor.skip_frames(time = 3000) # Give the user time to get ready.等待3s,准备一下表情。 #红灯灭,蓝灯亮 pyb.LED(RED_LED_PIN).off() pyb.LED(BLUE_LED_PIN).on() #保存截取到的图片到SD卡 print(n) sensor.snapshot().save("singtown/s%s/%s.pgm" % (num, n) ) # or "example.bmp" (or others) lcd.display(sensor.snapshot()) n -= 1 pyb.LED(BLUE_LED_PIN).off() print("Done! Reset the camera to see the saved image.") #num += 1 if KEY1.value() == 0: pyb.LED(BLUE_LED_PIN).on() # 加载Haar算子 # 默认情况下,这将使用所有阶段,更低的satges更快,但不太准确。 face_cascade = image.HaarCascade("frontalface", stages=25) #image.HaarCascade(path, stages=Auto)加载一个haar模型。haar模型是二进制文件, #这个模型如果是自定义的,则引号内为模型文件的路径;也可以使用内置的haar模型, #比如“frontalface” 人脸模型或者“eye”人眼模型。 #stages值未传入时使用默认的stages。stages值设置的小一些可以加速匹配,但会降低准确率。 print(face_cascade) # FPS clock clock = time.clock() while (True): clock.tick() img = sensor.snapshot()# 拍摄一张照片 # Find objects. # Note: Lower scale factor scales-down the image more and detects smaller objects. # Higher threshold results in a higher detection rate, with more false positives. objects = img.find_features(face_cascade, threshold=0.75, scale=1.35) #image.find_features(cascade, threshold=0.5, scale=1.5),thresholds越大, #匹配速度越快,错误率也会上升。scale可以缩放被匹配特征的大小。 #在找到的目标上画框,标记出来 for r in objects: img.draw_rectangle(r) d0 = img.find_lbp((0, 0, img.width(), img.height())) #d0为当前人脸的lbp特征 img = None pmin = 999999 b=0 for s in range(1, NUM_SUBJECTS+1): dist = 0 for i in range(2, NUM_SUBJECTS_IMGS+1): img = image.Image("singtown/s%d/%d.pgm"%(s, i)) d1 = img.find_lbp((0, 0, img.width(), img.height())) #d1为第s文件夹中的第i张图片的lbp特征 dist += image.match_descriptor(d0, d1)#计算d0 d1即样本图像与被检测人脸的特征差异度。 print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS)) pmin = min(pmin, dist/NUM_SUBJECTS_IMGS, s)#特征差异度越小,被检测人脸与此样本更相似更匹配。 print(pmin) print(num) if pmin <= 20000: pyb.LED(BLUE_LED_PIN).off() pyb.LED(GREEN_LED_PIN).on() img.draw_string(64,144, "unlock") time.sleep(5) pyb.LED(RED_LED_PIN).on() img.draw_string(64,144, "lock") else: pyb.LED(RED_LED_PIN).on() img.draw_string(64,144, "lock")
-
-
你没有按照视频步骤操作,没采集图像。