• 免费好用的星瞳AI云服务上线!简单标注,云端训练,支持OpenMV H7和OpenMV H7 Plus。可以替代edge impulse。 https://forum.singtown.com/topic/9519
  • 我们只解决官方正版的OpenMV的问题(STM32),其他的分支有很多兼容问题,我们无法解决。
  • 如果有产品硬件故障问题,比如无法开机,论坛很难解决。可以直接找售后维修
  • 发帖子之前,请确认看过所有的视频教程,https://singtown.com/learn/ 和所有的上手教程http://book.openmv.cc/
  • 每一个新的提问,单独发一个新帖子
  • 帖子需要目的,你要做什么?
  • 如果涉及代码,需要报错提示全部代码文本,请注意不要贴代码图片
  • 必看:玩转星瞳论坛了解一下图片上传,代码格式等问题。
  • 串口通信问题(问题思路清晰,感谢解答)



    • 1我的目的是通过例程特征点检测保存和特征点检测完成已经4个物体的分别识别
      2通过串口3将物体x,y坐标发到另一块32(电脑上位机也行)
      3还想通过上位机发送字符来改变识别哪一个物体
      4问题在于加了串口输出后特征点扫瞄函数返回的kpts10的赋值函数不稳定,会返回0也就是kpts10=0
      5加了串口输入之后程序只能找1次特征点,这一次是对的,打到上位机上,没有后来了,图像还在更新
      6贴两个代码,第一个可用,可以输出,但是个别情况卡死(kpts10=0)。第二个·是目标函数,错的。

      # Object tracking with keypoints example.
      # Show the camera an object and then run the script. A set of keypoints will be extracted
      # once and then tracked in the following frames. If you want a new set of keypoints re-run
      # the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints.
      import sensor, time, image
      from pyb import UART
      import json
      # Reset sensor
      sensor.reset()
      
      # Sensor settings
      sensor.set_contrast(3)
      sensor.set_gainceiling(16)
      sensor.set_framesize(sensor.VGA)
      sensor.set_windowing((320, 240))
      sensor.set_pixformat(sensor.GRAYSCALE)
      
      sensor.skip_frames(time = 2000)
      sensor.set_auto_gain(False, value=100)
      uart = UART(3, 115200)
      def draw_keypoints(img, kpts):
          if kpts:
              print(kpts)
              img.draw_keypoints(kpts)
              img = sensor.snapshot()
              time.sleep(1000)
      imgtarget=2
      kpts1 = None
      # NOTE: uncomment to load a keypoints descriptor from file
      kpts1 = image.load_descriptor("/yxxjqr1.orb")
      kpts2 = image.load_descriptor("/yxxjqr2.orb")
      kpts3 = image.load_descriptor("/yxxjqr3.orb")
      kpts4 = image.load_descriptor("/yxxjqr4.orb")
      img = sensor.snapshot()
      #draw_keypoints(img, kpts1)
      uart = UART(3, 115200)
      clock = time.clock()
      while (True):
          clock.tick()
          img = sensor.snapshot()
          if (kpts1 == None):
              # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
              kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)
              draw_keypoints(img, kpts1)
          else:
              # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract
              # keypoints from the first scale only, which will match one of the scales in the first descriptor.
              kpts100 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=1)
              if (kpts100):
                  if (imgtarget==1) :
                      match = image.match_descriptor(kpts1, kpts100, threshold=85)
                  elif (imgtarget==2) :
                      match = image.match_descriptor(kpts2, kpts100, threshold=85)
                  elif (imgtarget==3) :
                      match = image.match_descriptor(kpts3, kpts100, threshold=85)
                  elif (imgtarget==4) :
                      match = image.match_descriptor(kpts4, kpts100, threshold=85)
                  if (match.count()>10):
                      # If we have at least n "good matches"
                      # Draw bounding rectangle and cross.
                      #img.draw_rectangle(match.rect())
                      img.draw_cross(match.cx(), match.cy(), size=10)
                      output_str="[%d,%d]" % (match.cx(),match.cy())
                      uart.write(output_str+'\r\n')
                      print(kpts100, "matched:%d dt:%d"%(match.count(), match.theta()))
                  # NOTE: uncomment if you want to draw the keypoints
                  #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True)
      
          # Draw FPS
          img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
      
      
      # Object tracking with keypoints example.
      # Show the camera an object and then run the script. A set of keypoints will be extracted
      # once and then tracked in the following frames. If you want a new set of keypoints re-run
      # the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints.
      import sensor, time, image
      from pyb import UART
      import json
      # Reset sensor
      sensor.reset()
      
      # Sensor settings
      sensor.set_contrast(3)
      sensor.set_gainceiling(16)
      sensor.set_framesize(sensor.VGA)
      sensor.set_windowing((320, 240))
      sensor.set_pixformat(sensor.GRAYSCALE)
      
      sensor.skip_frames(time = 2000)
      sensor.set_auto_gain(False, value=100)
      def draw_keypoints(img, kpts):
          if kpts:
              print(kpts)
              img.draw_keypoints(kpts)
              img = sensor.snapshot()
              time.sleep(1000)
      imgtarget='2'
      kpts1 = None
      # NOTE: uncomment to load a keypoints descriptor from file
      kpts1 = image.load_descriptor("/yxxjqr1.orb")
      kpts2 = image.load_descriptor("/yxxjqr2.orb")
      kpts3 = image.load_descriptor("/yxxjqr3.orb")
      kpts4 = image.load_descriptor("/yxxjqr4.orb")
      img = sensor.snapshot()
      #draw_keypoints(img, kpts1)
      uart = UART(3, 115200)
      clock = time.clock()
      while (True):
          clock.tick()
          img = sensor.snapshot()
          if (kpts1 == None):
              # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
              kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)
              draw_keypoints(img, kpts1)
          else:
              # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract
              # keypoints from the first scale only, which will match one of the scales in the first descriptor.
              kpts100 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=1)
              if (kpts100):
                  if (imgtarget=='1') :
                      match = image.match_descriptor(kpts1, kpts100, threshold=85)
                  elif (imgtarget=='2') :
                      match = image.match_descriptor(kpts2, kpts100, threshold=85)
                  elif (imgtarget=='3') :
                      match = image.match_descriptor(kpts3, kpts100, threshold=85)
                  elif (imgtarget=='4') :
                      match = image.match_descriptor(kpts4, kpts100, threshold=85)
                  if (match.count()>10):
                      # If we have at least n "good matches"
                      # Draw bounding rectangle and cross.
                      #img.draw_rectangle(match.rect())
                      img.draw_cross(match.cx(), match.cy(), size=10)
                      output_str="[%d,%d]" % (match.cx(),match.cy())
                      uart.write(output_str+'\r\n')
                      print(kpts100, "matched:%d dt:%d"%(match.count(), match.theta()))
                  # NOTE: uncomment if you want to draw the keypoints
                  #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True)
          #imgtarget=uart.readchar()
          # Draw FPS
          if uart.any():
              #uart.write("132")
              #time.sleep(10)
              #print(uart.read(1))
              imgtarget=uart.read(1)
          img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
      
      


    • @yuan@网红小智智



    • 你的代码里没有kps10这个变量?啥意思?



    • @kidswong999 在else里面啊,请问有没有外部给openmv串口通信,openmv接受的程序?并且可以处理字符



    • 请问你可以将坐标信息直接通过串口传到stm32上面吗?我的openmv和STM32单方面的串口调试都没有问题,但是openmv给STM32传不过去,与帧头有关吗?还有jassion库方面怎么用