• 免费好用的星瞳AI云服务上线!简单标注,云端训练,支持OpenMV H7和OpenMV H7 Plus。可以替代edge impulse。 https://forum.singtown.com/topic/9519
  • 我们只解决官方正版的OpenMV的问题(STM32),其他的分支有很多兼容问题,我们无法解决。
  • 如果有产品硬件故障问题,比如无法开机,论坛很难解决。可以直接找售后维修
  • 发帖子之前,请确认看过所有的视频教程,https://singtown.com/learn/ 和所有的上手教程http://book.openmv.cc/
  • 每一个新的提问,单独发一个新帖子
  • 帖子需要目的,你要做什么?
  • 如果涉及代码,需要报错提示全部代码文本,请注意不要贴代码图片
  • 必看:玩转星瞳论坛了解一下图片上传,代码格式等问题。
  • Wifi模块与人脸识别录像同时运行,为什么从手机登陆网址看只有一帧



    • MJPEG Streaming AP.

      这个例子展示了如何在AccessPoint模式下进行MJPEG流式传输。

      Android上的Chrome,Firefox和MJpegViewer App已经过测试。

      连接到OPENMV_AP并使用此URL:http://192.168.1.1:8080查看流。

      import sensor, image, time, network, usocket, sys, mjpeg, pyb

      SSID ='wlan work,face detection error' # Network SSID
      KEY ='1234567890' # Network key (must be 10 chars)
      HOST = '' # Use first available interface
      PORT = 8080 # Arbitrary non-privileged port
      RED_LED_PIN = 1
      BLUE_LED_PIN = 3

      Reset sensor

      sensor.reset()

      Set sensor settings

      sensor.set_contrast(1)
      sensor.set_brightness(1)
      sensor.set_saturation(1)
      sensor.set_gainceiling(16)
      sensor.set_framesize(sensor.QQVGA)
      sensor.set_pixformat(sensor.GRAYSCALE)
      sensor.skip_frames(time = 2000) # Let new settings take affect.
      face_cascade = image.HaarCascade("frontalface", stages=25)

      在AP模式下启动wlan模块。

      wlan = network.WINC(mode=network.WINC.MODE_AP)
      wlan.start_ap(SSID, key=KEY, security=wlan.WEP, channel=2)

      #您可以阻止等待客户端连接
      #print(wlan.wait_for_sta(10000))

      def start_streaming(s):
      print ('Waiting for connections..')
      client, addr = s.accept()
      # 将客户端套接字超时设置为2秒
      client.settimeout(2.0)
      print ('Connected to ' + addr[0] + ':' + str(addr[1]))

      # 从客户端读取请求
      data = client.recv(1024)
      # 应该在这里解析客户端请求
      
      # 发送多部分head
      client.send("HTTP/1.1 200 OK\r\n" \
                  "Server: OpenMV\r\n" \
                  "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
                  "Cache-Control: no-cache\r\n" \
                  "Pragma: no-cache\r\n\r\n")
      
      # FPS clock
      clock = time.clock()
      
      # 开始流媒体图像
      #注:禁用IDE预览以增加流式FPS。
      while (True):
          clock.tick() # Track elapsed milliseconds between snapshots().
          frame = sensor.snapshot()
          cframe = frame.compressed(quality=35)
          header = "\r\n--openmv\r\n" \
                   "Content-Type: image/jpeg\r\n"\
                   "Content-Length:"+str(cframe.size())+"\r\n\r\n"
          client.send(header)
          client.send(cframe)
          print(clock.fps())
          print("123")
          pyb.LED(RED_LED_PIN).on()
          print("About to start detecting faces...")
          sensor.skip_frames(time = 2000) # Give the user time to get ready.
      
          pyb.LED(RED_LED_PIN).off()
          print("Now detecting faces!")
          pyb.LED(BLUE_LED_PIN).on()
      
          diff = 10 # We'll say we detected a face after 10 frames.
          while(diff):
              img = sensor.snapshot()
              # Threshold是介于0.0-1.0的阈值,较低值会同时提高检出率和假阳性
              # 率。相反,较高值会同时降低检出率和假阳性率。
              # scale是一个必须大于1.0的浮点数。较高的比例因子运行更快,
              # 但其图像匹配相应较差。理想值介于1.35-1.5之间。
      
              faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
      
              if faces:
                  diff -= 1
                  for r in faces:
                      img.draw_rectangle(r)
      
          m = mjpeg.Mjpeg("example4-%d.mjpeg" % pyb.rng())
      
          clock = time.clock() # Tracks FPS.
          print("You're on camera!")
          for i in range(200):
              clock.tick()
              m.add_frame(sensor.snapshot())
              print(clock.fps())
      
          m.close(clock.fps())
          pyb.LED(BLUE_LED_PIN).off()
          print("Restarting...")
      

      while (True):
      print("456")
      # 创建服务器套接字
      s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
      try:
      # Bind and listen
      s.bind([HOST, PORT])
      s.listen(5)

          # 设置服务器套接字超时
          # 注意:由于WINC FW bug,如果客户端断开连接,服务器套接字必须
          # 关闭并重新打开。在这里使用超时关闭并重新创建套接字。
          s.settimeout(3)
          start_streaming(s)
      except OSError as e:
          s.close()
          print("socket error: ", e)
          #sys.print_exception(e)


    • 代码不对啊。

      不需要多次 sensor.snapshot(),这样发送的图像和检测的图像都不是一样的。

      skip_frames也不需要



    • def start_streaming(s):
          print ('Waiting for connections..')
          client, addr = s.accept()
          # 将客户端套接字超时设置为2秒
          client.settimeout(2.0)
          print ('Connected to ' + addr[0] + ':' + str(addr[1]))
      
          # 从客户端读取请求
          data = client.recv(1024)
          # 应该在这里解析客户端请求
      
          # 发送多部分head
          client.send("HTTP/1.1 200 OK\r\n" \
                      "Server: OpenMV\r\n" \
                      "Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
                      "Cache-Control: no-cache\r\n" \
                      "Pragma: no-cache\r\n\r\n")
      
          # FPS clock
          clock = time.clock()
      
          # 开始流媒体图像
          #注:禁用IDE预览以增加流式FPS。
          while (True):
              clock.tick() # Track elapsed milliseconds between snapshots().
              frame = sensor.snapshot()
              faces = frame.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
              for r in faces:
                  frame.draw_rectangle(r)
              cframe = frame.compressed(quality=35)
              header = "\r\n--openmv\r\n" \
                       "Content-Type: image/jpeg\r\n"\
                       "Content-Length:"+str(cframe.size())+"\r\n\r\n"
              client.send(header)
              client.send(cframe)
      


    • @kidswong999 还是不行啊,我想的是WIFI同步影像以及人脸识别录像,这个 WIFI 模块上看实时影像还是只有一张图,卡顿的然后就不更新图像了