@kidswong999 是按照视频的步骤进行的
P
pt6u
@pt6u
0
Reputation
7
Posts
52
Profile views
0
Followers
0
Following
Posts made by pt6u
-
求助关于人脸分辨的相关问题
你好请问这个人脸识别代码为什么不能输出num的值,串行口显示没有输出值,万分感谢
import sensor, time, image, pyb sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE sensor.set_framesize(sensor.B128X128) # or sensor.QQVGA (or others) sensor.set_windowing((92,112)) sensor.skip_frames(10) # Let new settings take affect. sensor.skip_frames(time = 5000) #等待5s num = 0 def min(pmin, a, s): global num print(00000000) if a < pmin: pmin = a num = s return pmin sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE sensor.set_framesize(sensor.B128X128) # or sensor.QQVGA (or others) sensor.set_windowing((92,112)) sensor.skip_frames(10) # Let new settings take affect. sensor.skip_frames(time = 5000) #等待5s NUM_SUBJECTS = 2 NUM_SUBJECTS_IMGS = 20 img = sensor.snapshot() d0 = img.find_lbp((0, 0, img.width(), img.height())) img = None pmin = 999999 num = 0 for s in range(1, NUM_SUBJECTS + 1): dist = 0 for i in range(2, NUM_SUBJECTS_IMGS + 1): img = image.Image("singtown/s%d/%d.pgm" % (s, i)) d1 = img.find_lbp((0, 0, img.width(), img.height())) dist += image.match_descriptor(d0, d1) print("Average dist for subject %d: %d" % (s, dist / NUM_SUBJECTS_IMGS)) pmin = min(pmin, dist / NUM_SUBJECTS_IMGS, s) print(pmin) if dist / NUM_SUBJECTS_IMGS == pmin: num = s print(num)
-
关于onenet和openmv之间的相关问题?
请问我现在想将openmv的图片截取一张传给onenet平台,openmv这边可以实现吗?如果可以的话,能否通过stm32来发送这个截取图片的命令
-
关于openmv串口发送问题
想通过串口给32发送mv识别二维码
的信息,但只想发送四个xywh坐标,不想发送其他的信息,但是改的代码却把qrcode函数下的九类信息全发到调试助手上了,请问怎样修改才能让只发送qrcodes函数下的一部分信息给别的单片机?拜托了!
import sensor, image, time from pyb import UART import json sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... clock = time.clock() uart = UART(3, 115200) while(True): clock.tick() img = sensor.snapshot() img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. for code in img.find_qrcodes(): img.draw_rectangle(code.rect(), color = (255, 0, 0)) output_str = json.dumps(code) print('you send:',output_str) uart.write(output_str+'\n') else: print('not found!')
-
人体型识别问题,有哪位大佬知道咋样解决?
这是根据例程人脸追踪改过来的,之前还可以用,但不知道为什么现在只能识别人体,然后就卡在识别那里,下方也不能输出数据,不能追踪了,import sensor, time, image sensor.reset() sensor.set_contrast(3) sensor.set_gainceiling(16) sensor.set_framesize(sensor.VGA) sensor.set_windowing((320, 240)) sensor.set_pixformat(sensor.RGB565) # Skip a few frames to allow the sensor settle down sensor.skip_frames(time = 2000) # Load Haar Cascade # By default this will use all stages, lower satges is faster but less accurate. body_cascade = image.HaarCascade("haarcascade_fullbody.cascade", stages=25) print(body_cascade) # First set of keypoints kpts1 = None # Find a body! while (kpts1 == None): img = sensor.snapshot() img.draw_string(0, 0, "Looking for a body...") # Find bodies objects = img.find_features(body_cascade, threshold=0.5, scale=1.25) if objects: # Expand the ROI by 31 pixels in every direction body = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2) # Extract keypoints using the detect face size as the ROI kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=body) # Draw a rectangle around the first face img.draw_rectangle(objects[0]) # Draw keypoints print(kpts1) img.draw_keypoints(kpts1, size=24) img = sensor.snapshot() time.sleep(2000) # FPS clock clock = time.clock() while (True): clock.tick() img = sensor.snapshot() # Extract keypoints from the whole frame kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True) if (kpts2): # Match the first set of keypoints with the second one c=image.match_descriptor(kpts1, kpts2, threshold=85) match = c[6] # C[6] contains the number of matches. if (match>5): img.draw_rectangle(c[2:6]) img.draw_cross(c[0], c[1], size=10) print(kpts2, "matched:%d dt:%d"%(match, c[7])) # Draw FPS img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
-
利用openmv进行身形识别跟踪的相关问题?
请问我如果要用opencv里的训练好的识别算法,我也已经将.xml文件用Python脚本转换成.cascade文件,在主代码中也改了,然后接下来应该怎样操作才能在摄像头上成功运行这个主函数,那个转化过来的.cascade识别算法文件应该放在哪里?