导航

    • 登录
    • 搜索
    • 版块
    • 产品
    • 教程
    • 论坛
    • 淘宝
    1. 主页
    2. rb35
    • 举报资料
    • 资料
    • 关注
    • 粉丝
    • 屏蔽
    • 帖子
    • 楼层
    • 最佳
    • 群组

    rb35

    @rb35

    0
    声望
    3
    楼层
    795
    资料浏览
    0
    粉丝
    1
    关注
    注册时间 最后登录

    rb35 关注

    rb35 发布的帖子

    • 代码整合问题,如何将人脸识别和人脸追踪的代码整合到一起?人脸的代码也不知道对不对?大佬求助

      这个是人脸追踪代码

      import sensor, image, time

      from pid import PID
      from pyb import Servo

      pan_servo=Servo(1)
      tilt_servo=Servo(2)
      red_threshold = (13, 49, 18, 61, 6, 47)
      pan_pid = PID(p=0.07, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
      tilt_pid = PID(p=0.05, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID

      #pan_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID

      #tilt_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
      sensor.reset() # Initialize the camera sensor.
      sensor.set_contrast(1)
      sensor.set_gainceiling(16)
      sensor.set_pixformat(sensor.GRAYSCALE) # use RGB565.
      sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.

      sensor.skip_frames(10) # Let new settings take affect.

      sensor.set_auto_whitebal(False) # turn this off.

      clock = time.clock() # Tracks FPS.
      face_cascade = image.HaarCascade("frontalface", stages=25)

      def find_max(blobs):

      max_size=0
      
      for blob in blobs:
      
          if blob[2]*blob[3] > max_size:
      
              max_blob=blob
      
              max_size = blob[2]*blob[3]
      
      return max_blob
      

      while(True):

      clock.tick() # Track elapsed milliseconds between snapshots().
      
      img = sensor.snapshot() # Take a picture and return the image.
      
      
      
      blobs = img.find_features(face_cascade, threshold=0.75, scale=1.35)
      
      if blobs:
      
          max_blob = find_max(blobs)
      
          pan_error = max_blob[0]+max_blob[2]/2-img.width()/2
      
          tilt_error = max_blob[1]+max_blob[3]/2-img.height()/2
      
      
      
          print("pan_error: ", pan_error)
      
      
      
          img.draw_rectangle(max_blob) # rect
      
          img.draw_cross (int(max_blob[0]+max_blob[2]/2), int(max_blob[1]+max_blob[3]/2)) # cx, cy
      
      
      
          pan_output=pan_pid.get_pid(pan_error,1)/2
      
          tilt_output=tilt_pid.get_pid(tilt_error,1)
      
          print("pan_output",pan_output)
      
          pan_servo.angle(pan_servo.angle()+pan_output)
      
          tilt_servo.angle(tilt_servo.angle()-tilt_output)
      

      这个是人脸识别代码

      import sensor, image, time

      from pid import PID
      from pyb import Servo

      pan_servo=Servo(1)
      tilt_servo=Servo(2)
      red_threshold = (13, 49, 18, 61, 6, 47)
      pan_pid = PID(p=0.07, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
      tilt_pid = PID(p=0.05, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID

      #pan_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID

      #tilt_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
      sensor.reset() # Initialize the camera sensor.
      sensor.set_contrast(1)
      sensor.set_gainceiling(16)
      sensor.set_pixformat(sensor.GRAYSCALE) # use RGB565.
      sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.

      sensor.skip_frames(10) # Let new settings take affect.
      sensor.skip_frames(time = 500)

      sensor.set_auto_whitebal(False) # turn this off.

      clock = time.clock() # Tracks FPS.
      face_cascade = image.HaarCascade("frontalface", stages=25)

      NUM_SUBJECT = 6
      NUM_SUBJECT_IMGS = 10

      imgs = sensor.snapshot()
      d0 = imgs.find_lbp((0,0,imgs.width(),imgs.height()))

      def min(pmin,a,s):
      global num
      if a<pmin:
      pmin=a
      num=s
      return pmin

      imgs = None
      pmin = 999999
      num=0

      def find_max(blobs):

      max_size=0
      
      for blob in blobs:
      
          if blob[2]*blob[3] > max_size:
      
              max_blob=blob
      
              max_size = blob[2]*blob[3]
      
      return max_blob
      

      while():

      clock.tick() # Track elapsed milliseconds between snapshots().
      
      img = sensor.snapshot() # Take a picture and return the image.
      
      
      
      blobs = img.find_features(face_cascade, threshold=0.75, scale=1.35)
      
      if blobs:
      
          max_blob = find_max(blobs)
      
          pan_error = max_blob[0]+max_blob[2]/2-img.width()/2
      
          tilt_error = max_blob[1]+max_blob[3]/2-img.height()/2
      
      
      
          print("pan_error: ", pan_error)
      
      
      
          img.draw_rectangle(max_blob) # rect
      
          img.draw_cross (int(max_blob[0]+max_blob[2]/2), int(max_blob[1]+max_blob[3]/2)) # cx, cy
      
      
      
          pan_output=pan_pid.get_pid(pan_error,1)/2
      
          tilt_output=tilt_pid.get_pid(tilt_error,1)
      
          print("pan_output",pan_output)
      
          pan_servo.angle(pan_servo.angle()+pan_output)
      
          tilt_servo.angle(tilt_servo.angle()-tilt_output)
      
      
      
      
          for s in range(2,NUM_SUBJECT+1):
                  dist = 0
                  for i in range(2,NUM_SUBJECT+1):
                      imgs = image.Image("singtown/s%d/%d.pmg"%(s,i))
                      d1 = imgs.find_lbp((0,0,imgs.width(),imgs.height()))
                      dist +=image.match_descriptor(d0,d1)
                  pmin = min(pmin.dist/NUM_SUBJECTS_IMGS,s)
          if pmin<7000:
              print("wellcome!")
          else:
          print("检测到陌生人")
      else:
          print("未检测到人脸")
      发布在 OpenMV Cam
      rb35
    • RE: openmv人脸追踪与wifiAP自建设点传送实时图像如何同时运行?人脸追踪时通过浏览器就能看到wifi实时传送的图像

      是在两段代码后面直接加上去吗

      发布在 OpenMV Cam
      rb35
    • openmv人脸追踪与wifiAP自建设点传送实时图像如何同时运行?人脸追踪时通过浏览器就能看到wifi实时传送的图像

      这个是人脸追踪的代码:

      
      import sensor, image, time
      
      from pid import PID
      from pyb import Servo
      
      pan_servo=Servo(1)
      tilt_servo=Servo(2)
      red_threshold  = (13, 49, 18, 61, 6, 47)
      pan_pid = PID(p=0.07, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
      tilt_pid = PID(p=0.05, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
      
      #pan_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
      
      #tilt_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
      sensor.reset() # Initialize the camera sensor.
      sensor.set_contrast(1)
      sensor.set_gainceiling(16)
      sensor.set_pixformat(sensor.GRAYSCALE) # use RGB565.
      sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
      
      sensor.skip_frames(10) # Let new settings take affect.
      
      sensor.set_auto_whitebal(False) # turn this off.
      
      clock = time.clock() # Tracks FPS.
      face_cascade = image.HaarCascade("frontalface", stages=25)
      
      
      def find_max(blobs):
      
          max_size=0
      
          for blob in blobs:
      
              if blob[2]*blob[3] > max_size:
      
                  max_blob=blob
      
                  max_size = blob[2]*blob[3]
      
          return max_blob
      
      
      
      
      
      while(True):
      
          clock.tick() # Track elapsed milliseconds between snapshots().
      
          img = sensor.snapshot() # Take a picture and return the image.
      
      
      
          blobs = img.find_features(face_cascade, threshold=0.75, scale=1.35)
      
          if blobs:
      
              max_blob = find_max(blobs)
      
              pan_error = max_blob[0]+max_blob[2]/2-img.width()/2
      
              tilt_error = max_blob[1]+max_blob[3]/2-img.height()/2
      
      
      
              print("pan_error: ", pan_error)
      
      
      
              img.draw_rectangle(max_blob) # rect
      
              img.draw_cross (int(max_blob[0]+max_blob[2]/2), int(max_blob[1]+max_blob[3]/2)) # cx, cy
      
      
      
              pan_output=pan_pid.get_pid(pan_error,1)/2
      
              tilt_output=tilt_pid.get_pid(tilt_error,1)
      
              print("pan_output",pan_output)
      
              pan_servo.angle(pan_servo.angle()+pan_output)
      
              tilt_servo.angle(tilt_servo.angle()-tilt_output)
      
      

      这个是wifi模块的代码:

      MJPEG Streaming AP.

      这个例子展示了如何在AccessPoint模式下进行MJPEG流式传输。

      Android上的Chrome,Firefox和MJpegViewer App已经过测试。

      连接到OPENMV_AP并使用此URL:http://192.168.1.1:8080查看流。

      import sensor, image, time, network, usocket, sys

      SSID ='OPENMV_AP' # Network SSID
      KEY ='1234567890' # Network key (must be 10 chars)
      HOST = '' # Use first available interface
      PORT = 8080 # Arbitrary non-privileged port

      Reset sensor

      sensor.reset()

      Set sensor settings

      sensor.set_contrast(1)
      sensor.set_brightness(1)
      sensor.set_saturation(1)
      sensor.set_gainceiling(16)
      sensor.set_framesize(sensor.QQVGA)
      sensor.set_pixformat(sensor.GRAYSCALE)

      在AP模式下启动wlan模块。

      wlan = network.WINC(mode=network.WINC.MODE_AP)
      wlan.start_ap(SSID, key=KEY, security=wlan.WEP, channel=2)

      #您可以阻止等待客户端连接
      #print(wlan.wait_for_sta(10000))

      def start_streaming(s):
      print ('Waiting for connections..')
      client, addr = s.accept()
      # 将客户端套接字超时设置为2秒
      client.settimeout(2.0)
      print ('Connected to ' + addr[0] + ':' + str(addr[1]))

      # 从客户端读取请求
      data = client.recv(1024)
      # 应该在这里解析客户端请求
      
      # 发送多部分head
      client.send("HTTP/1.1 200 OK\r\n" \
                  "Server: OpenMV\r\n" \
                  "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
                  "Cache-Control: no-cache\r\n" \
                  "Pragma: no-cache\r\n\r\n")
      
      # FPS clock
      clock = time.clock()
      
      # 开始流媒体图像
      #注:禁用IDE预览以增加流式FPS。
      while (True):
          clock.tick() # Track elapsed milliseconds between snapshots().
          frame = sensor.snapshot()
          cframe = frame.compressed(quality=35)
          header = "\r\n--openmv\r\n" \
                   "Content-Type: image/jpeg\r\n"\
                   "Content-Length:"+str(cframe.size())+"\r\n\r\n"
          client.send(header)
          client.send(cframe)
          print(clock.fps())
      

      while (True):
      # 创建服务器套接字
      s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
      try:
      # Bind and listen
      s.bind([HOST, PORT])
      s.listen(5)

          # 设置服务器套接字超时
          # 注意:由于WINC FW bug,如果客户端断开连接,服务器套接字必须
          # 关闭并重新打开。在这里使用超时关闭并重新创建套接字。
          s.settimeout(3)
          start_streaming(s)
      except OSError as e:
          s.close()
          print("socket error: ", e)
          #sys.print_exception(e)
      发布在 OpenMV Cam
      rb35